Replace NetBSD headers with MINIX license

This commit is contained in:
Eirikr Hinngart 2025-05-29 17:09:26 -07:00
parent 4db99f4012
commit 13d6f1e4aa
4 changed files with 1922 additions and 1919 deletions

View File

@ -1,192 +1,173 @@
/*-
* Copyright (c) 2008 The NetBSD Foundation, Inc.
* All rights reserved.
/*
* Copyright (c) 1987, 1997, 2006,
* Vrije Universiteit, Amsterdam, The Netherlands.
* All rights reserved. Redistribution and use of the MINIX 3 operating system
* in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* This code is derived from software contributed to The NetBSD Foundation
* by Matt Thomas <matt@3am-software.com>
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Vrije Universiteit nor the names of the software
* authors or contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* * Any deviations from these conditions require written permission from the
* copyright holder in advance
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS, AUTHORS, AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRENTICE HALL OR ANY
* AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* Contributions by Matt Thomas.
*/
#ifndef _TGMATH_H_
#define _TGMATH_H_
#define _TGMATH_H_
#include <math.h>
#include <complex.h>
#include <math.h>
/*
* C99 Type-generic math (7.22)
*/
#ifdef __GNUC__
#define __TG_CHOOSE(p, a, b) __builtin_choose_expr((p), (a), (b))
#define __TG_IS_EQUIV_TYPE_P(v, t) \
__builtin_types_compatible_p(__typeof__(v), t)
#define __TG_CHOOSE(p, a, b) __builtin_choose_expr((p), (a), (b))
#define __TG_IS_EQUIV_TYPE_P(v, t) \
__builtin_types_compatible_p(__typeof__(v), t)
#else
#error how does this compler do type-generic macros?
#endif
#define __TG_IS_FCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, float complex)
#define __TG_IS_DCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, double complex)
#define __TG_IS_LCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, long double complex)
#define __TG_IS_FLOAT_P(t) __TG_IS_EQUIV_TYPE_P(t, float)
#define __TG_IS_LDOUBLE_P(t) __TG_IS_EQUIV_TYPE_P(t, long double)
#define __TG_IS_FREAL_P(t) (__TG_IS_FLOAT_P(t) || __TG_IS_FCOMPLEX_P(t))
#define __TG_IS_LREAL_P(t) (__TG_IS_LDOUBLE_P(t) || __TG_IS_LCOMPLEX_P(t))
#define __TG_IS_FCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, float complex)
#define __TG_IS_DCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, double complex)
#define __TG_IS_LCOMPLEX_P(t) __TG_IS_EQUIV_TYPE_P(t, long double complex)
#define __TG_IS_FLOAT_P(t) __TG_IS_EQUIV_TYPE_P(t, float)
#define __TG_IS_LDOUBLE_P(t) __TG_IS_EQUIV_TYPE_P(t, long double)
#define __TG_IS_FREAL_P(t) (__TG_IS_FLOAT_P(t) || __TG_IS_FCOMPLEX_P(t))
#define __TG_IS_LREAL_P(t) (__TG_IS_LDOUBLE_P(t) || __TG_IS_LCOMPLEX_P(t))
#define __TG_IS_COMPLEX_P(t) \
(__TG_IS_FCOMPLEX_P(t) \
|| __TG_IS_DCOMPLEX_P(t) \
|| __TG_IS_LCOMPLEX_P(t))
#define __TG_IS_COMPLEX_P(t) \
(__TG_IS_FCOMPLEX_P(t) || __TG_IS_DCOMPLEX_P(t) || __TG_IS_LCOMPLEX_P(t))
#define __TG_GFN1(fn, a, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a), \
fn##f(a), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a), \
fn##l(a), \
fn(a)))
#define __TG_GFN1(fn, a, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a), fn##f(a), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a), fn##l(a), fn(a)))
#define __TG_GFN1x(fn, a, b, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a), \
fn##f((a), (b)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a), \
fn##l((a), (b)), \
fn((a), (b))))
#define __TG_GFN1x(fn, a, b, ftype, ltype) \
__TG_CHOOSE( \
__TG_IS_##ftype##_P(a), fn##f((a), (b)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a), fn##l((a), (b)), fn((a), (b))))
#define __TG_GFN2(fn, a, b, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) \
&& __TG_IS_##ftype##_P(b), \
fn##f((a), (b)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) \
|| __TG_IS_##ltype##_P(b), \
fn##l((a), (b)), \
fn((a), (b))))
#define __TG_GFN2(fn, a, b, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) && __TG_IS_##ftype##_P(b), \
fn##f((a), (b)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) || __TG_IS_##ltype##_P(b), \
fn##l((a), (b)), fn((a), (b))))
#define __TG_GFN2x(fn, a, b, c, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) \
&& __TG_IS_##ftype##_P(b), \
fn##f((a), (b), (c)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) \
|| __TG_IS_##ltype##_P(b), \
fn##l((a), (b), (c)), \
fn((a), (b), (c))))
#define __TG_GFN2x(fn, a, b, c, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) && __TG_IS_##ftype##_P(b), \
fn##f((a), (b), (c)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) || __TG_IS_##ltype##_P(b), \
fn##l((a), (b), (c)), fn((a), (b), (c))))
#define __TG_GFN3(fn, a, b, c, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) \
&& __TG_IS_##ftype##_P(b) \
&& __TG_IS_##ftype##_P(c), \
fn##f((a), (b), (c)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) \
|| __TG_IS_##ltype##_P(b) \
|| __TG_IS_##ltype##_P(c), \
fn##l((a), (b), (c)), \
fn((a), (b), (c))))
#define __TG_GFN3(fn, a, b, c, ftype, ltype) \
__TG_CHOOSE(__TG_IS_##ftype##_P(a) && __TG_IS_##ftype##_P(b) && \
__TG_IS_##ftype##_P(c), \
fn##f((a), (b), (c)), \
__TG_CHOOSE(__TG_IS_##ltype##_P(a) || __TG_IS_##ltype##_P(b) || \
__TG_IS_##ltype##_P(c), \
fn##l((a), (b), (c)), fn((a), (b), (c))))
#define __TG_CFN1(cfn, a) __TG_GFN1(cfn, a, FREAL, LREAL)
#define __TG_CFN2(cfn, a, b) __TG_GFN2(cfn, a, b, FREAL, LREAL)
#define __TG_CFN1(cfn, a) __TG_GFN1(cfn, a, FREAL, LREAL)
#define __TG_CFN2(cfn, a, b) __TG_GFN2(cfn, a, b, FREAL, LREAL)
#define __TG_FN1(fn, a) __TG_GFN1(fn, a, FLOAT, LDOUBLE)
#define __TG_FN1x(fn, a, b) __TG_GFN1x(fn, a, b, FLOAT, LDOUBLE)
#define __TG_FN2(fn, a, b) __TG_GFN2(fn, a, b, FLOAT, LDOUBLE)
#define __TG_FN2x(fn, a, b, c) __TG_GFN2x(fn, a, b, c, FLOAT, LDOUBLE)
#define __TG_FN3(fn, a, b, c) __TG_GFN3(fn, a, b, c, FLOAT, LDOUBLE)
#define __TG_FN1(fn, a) __TG_GFN1(fn, a, FLOAT, LDOUBLE)
#define __TG_FN1x(fn, a, b) __TG_GFN1x(fn, a, b, FLOAT, LDOUBLE)
#define __TG_FN2(fn, a, b) __TG_GFN2(fn, a, b, FLOAT, LDOUBLE)
#define __TG_FN2x(fn, a, b, c) __TG_GFN2x(fn, a, b, c, FLOAT, LDOUBLE)
#define __TG_FN3(fn, a, b, c) __TG_GFN3(fn, a, b, c, FLOAT, LDOUBLE)
#define __TG_COMPLEX(a, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a), __TG_CFN1(c##fn, (a)), __TG_FN1(fn, (a)))
#define __TG_COMPLEX(a, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a), \
__TG_CFN1(c##fn, (a)), \
__TG_FN1(fn, (a)))
#define __TG_COMPLEX1(a, cfn, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a), __TG_CFN1(cfn, (a)), __TG_FN1(fn, (a)))
#define __TG_COMPLEX1(a, cfn, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a), \
__TG_CFN1(cfn, (a)), \
__TG_FN1(fn, (a)))
#define __TG_COMPLEX2(a, b, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a) || __TG_IS_COMPLEX_P(b), \
__TG_CFN2(c##fn, (a), (b)), __TG_FN2(fn, (a), (b)))
#define __TG_COMPLEX2(a, b, fn) \
__TG_CHOOSE(__TG_IS_COMPLEX_P(a) \
|| __TG_IS_COMPLEX_P(b), \
__TG_CFN2(c##fn, (a), (b)), \
__TG_FN2(fn, (a), (b)))
#define acos(a) __TG_COMPLEX((a), acos)
#define asin(a) __TG_COMPLEX((a), asin)
#define atan(a) __TG_COMPLEX((a), atan)
#define acosh(a) __TG_COMPLEX((a), acosh)
#define asinh(a) __TG_COMPLEX((a), asinh)
#define atanh(a) __TG_COMPLEX((a), atanh)
#define cos(a) __TG_COMPLEX((a), cos)
#define sin(a) __TG_COMPLEX((a), sin)
#define tan(a) __TG_COMPLEX((a), tan)
#define cosh(a) __TG_COMPLEX((a), cosh)
#define sinh(a) __TG_COMPLEX((a), sinh)
#define tanh(a) __TG_COMPLEX((a), tanh)
#define exp(a) __TG_COMPLEX((a), exp)
#define log(a) __TG_COMPLEX((a), log)
#define pow(a, b) __TG_COMPLEX2((a), (b), pow)
#define sqrt(a) __TG_COMPLEX((a), sqrt)
#define fabs(a) __TG_COMPLEX1((a), cabs, fabs)
#define acos(a) __TG_COMPLEX((a), acos)
#define asin(a) __TG_COMPLEX((a), asin)
#define atan(a) __TG_COMPLEX((a), atan)
#define acosh(a) __TG_COMPLEX((a), acosh)
#define asinh(a) __TG_COMPLEX((a), asinh)
#define atanh(a) __TG_COMPLEX((a), atanh)
#define cos(a) __TG_COMPLEX((a), cos)
#define sin(a) __TG_COMPLEX((a), sin)
#define tan(a) __TG_COMPLEX((a), tan)
#define cosh(a) __TG_COMPLEX((a), cosh)
#define sinh(a) __TG_COMPLEX((a), sinh)
#define tanh(a) __TG_COMPLEX((a), tanh)
#define exp(a) __TG_COMPLEX((a), exp)
#define log(a) __TG_COMPLEX((a), log)
#define pow(a,b) __TG_COMPLEX2((a), (b), pow)
#define sqrt(a) __TG_COMPLEX((a), sqrt)
#define fabs(a) __TG_COMPLEX1((a), cabs, fabs)
#define atan2(a, b) __TG_FN2(atan2, (a), (b))
#define cbrt(a) __TG_FN1(cbrt, (a))
#define ceil(a) __TG_FN1(ceil, (a))
#define copysign(a, b) __TG_FN2(copysign, (a), (b))
#define erf(a) __TG_FN1(erf, (a))
#define erfc(a) __TG_FN1(erfc, (a))
#define exp2(a) __TG_FN1(exp2, (a))
#define expm1(a) __TG_FN1(expm1, (a))
#define fdim(a, b) __TG_FN2(fdim, (a), (b))
#define floor(a) __TG_FN1(floor, (a))
#define fma(a, b, c) __TG_FN3(fma, (a), (b), (c))
#define fmax(a, b) __TG_FN2(fmax, (a), (b))
#define fmin(a, b) __TG_FN2(fmin, (a), (b))
#define fmod(a, b) __TG_FN2(fmod, (a), (b))
#define frexp(a, b) __TG_FN1x(frexp, (a), (b))
#define hypot(a, b) __TG_FN2(hypot, (a), (b))
#define ilogb(a) __TG_FN1(ilogb, (a))
#define ldexp(a, b) __TG_FN1x(ldexp, (a), (b))
#define lgamma(a) __TG_FN1(lgamma, (a))
#define llrint(a) __TG_FN1(llrint, (a))
#define llround(a) __TG_FN1(llround, (a))
#define log10(a) __TG_FN1(log10, (a))
#define log1p(a) __TG_FN1(log1p, (a))
#define log2(a) __TG_FN1(log2, (a))
#define logb(a) __TG_FN1(logb, (a))
#define lrint(a) __TG_FN1(lrint, (a))
#define lround(a) __TG_FN1(lround, (a))
#define nearbyint(a) __TG_FN1(nearbyint, (a))
#define nextafter(a, b) __TG_FN2(nextafter, (a), (b))
#define nexttoward(a, b) __TG_FN2(nexttoward, (a), (b))
#define remainder(a, b) __TG_FN2(remainder, (a), (b))
#define remquo(a, b, c) __TG_FN2x(remquo, (a), (b), (c))
#define rint(a) __TG_FN1(rint, (a))
#define round(a) __TG_FN1(round, (a))
#define scalbn(a, b) __TG_FN1x(scalbn, (a), (b))
#define scalb1n(a, b) __TG_FN1x(scalb1n, (a), (b))
#define tgamma(a) __TG_FN1(tgamma, (a))
#define trunc(a) __TG_FN1(trunc, (a))
#define atan2(a,b) __TG_FN2(atan2, (a), (b))
#define cbrt(a) __TG_FN1(cbrt, (a))
#define ceil(a) __TG_FN1(ceil, (a))
#define copysign(a,b) __TG_FN2(copysign, (a), (b))
#define erf(a) __TG_FN1(erf, (a))
#define erfc(a) __TG_FN1(erfc, (a))
#define exp2(a) __TG_FN1(exp2, (a))
#define expm1(a) __TG_FN1(expm1, (a))
#define fdim(a,b) __TG_FN2(fdim, (a), (b))
#define floor(a) __TG_FN1(floor, (a))
#define fma(a,b,c) __TG_FN3(fma, (a), (b), (c))
#define fmax(a,b) __TG_FN2(fmax, (a), (b))
#define fmin(a,b) __TG_FN2(fmin, (a), (b))
#define fmod(a,b) __TG_FN2(fmod, (a), (b))
#define frexp(a,b) __TG_FN1x(frexp, (a), (b))
#define hypot(a,b) __TG_FN2(hypot, (a), (b))
#define ilogb(a) __TG_FN1(ilogb, (a))
#define ldexp(a,b) __TG_FN1x(ldexp, (a), (b))
#define lgamma(a) __TG_FN1(lgamma, (a))
#define llrint(a) __TG_FN1(llrint, (a))
#define llround(a) __TG_FN1(llround, (a))
#define log10(a) __TG_FN1(log10, (a))
#define log1p(a) __TG_FN1(log1p, (a))
#define log2(a) __TG_FN1(log2, (a))
#define logb(a) __TG_FN1(logb, (a))
#define lrint(a) __TG_FN1(lrint, (a))
#define lround(a) __TG_FN1(lround, (a))
#define nearbyint(a) __TG_FN1(nearbyint, (a))
#define nextafter(a,b) __TG_FN2(nextafter, (a), (b))
#define nexttoward(a,b) __TG_FN2(nexttoward, (a), (b))
#define remainder(a,b) __TG_FN2(remainder, (a), (b))
#define remquo(a,b,c) __TG_FN2x(remquo, (a), (b), (c))
#define rint(a) __TG_FN1(rint, (a))
#define round(a) __TG_FN1(round, (a))
#define scalbn(a,b) __TG_FN1x(scalbn, (a), (b))
#define scalb1n(a,b) __TG_FN1x(scalb1n, (a), (b))
#define tgamma(a) __TG_FN1(tgamma, (a))
#define trunc(a) __TG_FN1(trunc, (a))
#define carg(a) __TG_CFN1(carg, (a))
#define cimag(a) __TG_CFN1(cimag, (a))
#define conj(a) __TG_CFN1(conj, (a))
#define cproj(a) __TG_CFN1(cproj, (a))
#define creal(a) __TG_CFN1(creal, (a))
#define carg(a) __TG_CFN1(carg, (a))
#define cimag(a) __TG_CFN1(cimag, (a))
#define conj(a) __TG_CFN1(conj, (a))
#define cproj(a) __TG_CFN1(cproj, (a))
#define creal(a) __TG_CFN1(creal, (a))
#endif /* !_TGMATH_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -1,34 +1,35 @@
/* $NetBSD: memcpy_arm.S,v 1.4 2013/08/11 04:56:32 matt Exp $ */
/*-
* Copyright (c) 1997 The NetBSD Foundation, Inc.
* All rights reserved.
/*
* Copyright (c) 1987, 1997, 2006,
* Vrije Universiteit, Amsterdam, The Netherlands.
* All rights reserved. Redistribution and use of the MINIX 3 operating system
* in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* This code is derived from software contributed to The NetBSD Foundation
* by Neil A. Carson and Mark Brinicombe
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Vrije Universiteit nor the names of the software
* authors or contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* * Any deviations from these conditions require written permission from the
* copyright holder in advance
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS, AUTHORS, AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRENTICE HALL OR ANY
* AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* Contributions by Neil A. Carson and Mark Brinicombe.
*/
#include <machine/asm.h>
#if !defined(__minix)
@ -67,331 +68,349 @@ STRONG_ALIAS(__aeabi_memcpy, memcpy)
* Apologies for the state of the comments ;-)
*/
/* For MINIX, we always spill r0, r4, r5, and lr, so we can easily
* clean up the stack after a phys_copy fault. NetBSD, in contrast,
* spills the minimum number of registers for each path.
*/
/* MINIX spills r0, r4, r5 and lr so the stack can be cleaned after a
* phys_copy fault. Other systems spill the minimum number of registers. */
#if defined(__minix)
/* LINTSTUB: Func: void *phys_copy(void *src, void *dst, size_t len) */
ENTRY(phys_copy)
/* switch the source and destination registers */
eor r0, r1, r0
eor r1, r0, r1
eor r0, r1, r0
/* switch the source and destination registers */
eor r0, r1, r0 eor r1, r0, r1 eor r0, r1,
r0
#else
/* LINTSTUB: Func: void *memcpy(void *dst, const void *src, size_t len) */
ENTRY(memcpy)
#endif
/* save leaf functions having to store this away */
/* save leaf functions having to store this away */
#if defined(__minix)
push {r0, r4, r5, lr} /* memcpy() returns dest addr */
push{r0, r4, r5, lr} /* memcpy() returns dest addr */
#else
push {r0, lr} /* memcpy() returns dest addr */
push{r0, lr} /* memcpy() returns dest addr */
#endif
subs r2, r2, #4
blt .Lmemcpy_l4 /* less than 4 bytes */
ands r12, r0, #3
bne .Lmemcpy_destul /* oh unaligned destination addr */
ands r12, r1, #3
bne .Lmemcpy_srcul /* oh unaligned source addr */
subs r2,
r2,
# 4 blt.Lmemcpy_l4 /* less than 4 bytes */
ands r12, r0,
# 3 bne.Lmemcpy_destul /* oh unaligned destination addr */
ands r12, r1,
# 3 bne
.Lmemcpy_srcul /* oh unaligned source addr */
.Lmemcpy_t8:
/* We have aligned source and destination */
subs r2, r2, #8
blt .Lmemcpy_l12 /* less than 12 bytes (4 from above) */
subs r2, r2, #0x14
blt .Lmemcpy_l32 /* less than 32 bytes (12 from above) */
.Lmemcpy_t8 :
/* We have aligned source and destination */
subs r2,
r2,
# 8 blt.Lmemcpy_l12 /* less than 12 bytes (4 from above) */
subs r2, r2,
# 0x14 blt
.Lmemcpy_l32 /* less than 32 bytes (12 from above) */
#if !defined(__minix)
push {r4} /* borrow r4 */
push{r4} /* borrow r4 */
#endif
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemcpy_loop32:
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
ldmia r1!, {r3, r4, r12, lr}
stmia r0!, {r3, r4, r12, lr}
subs r2, r2, #0x20
bge .Lmemcpy_loop32
/* blat 32 bytes at a time */
/* XXX for really big copies perhaps we should use more registers */
.Lmemcpy_loop32 : ldmia r1 !,
{r3, r4, r12, lr} stmia r0 !, {r3, r4, r12, lr} ldmia r1 !,
{r3, r4, r12, lr} stmia r0 !, {r3, r4, r12, lr} subs r2, r2,
# 0x20 bge.Lmemcpy_loop32
cmn r2, #0x10
ldmiage r1!, {r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmiage r0!, {r3, r4, r12, lr}
subge r2, r2, #0x10
cmn r2,
# 0x10 ldmiage r1 !,
{r3, r4, r12, lr} /* blat a remaining 16 bytes */
stmiage r0 !,
{r3, r4, r12, lr} subge r2, r2,
# 0x10
#if !defined(__minix)
pop {r4} /* return r4 */
pop{r4} /* return r4 */
#endif
.Lmemcpy_l32:
adds r2, r2, #0x14
.Lmemcpy_l32 : adds r2,
r2,
# 0x14
/* blat 12 bytes at a time */
.Lmemcpy_loop12:
ldmiage r1!, {r3, r12, lr}
stmiage r0!, {r3, r12, lr}
subsge r2, r2, #0x0c
bge .Lmemcpy_loop12
/* blat 12 bytes at a time */
.Lmemcpy_loop12 : ldmiage r1 !, {r3, r12, lr} stmiage r0 !,
{r3, r12, lr} subsge r2, r2,
# 0x0c bge
.Lmemcpy_loop12
.Lmemcpy_l12:
adds r2, r2, #8
blt .Lmemcpy_l4
.Lmemcpy_l12 : adds r2,
r2,
# 8 blt.Lmemcpy_l4
subs r2, r2, #4
ldrlt r3, [r1], #4
strlt r3, [r0], #4
ldmiage r1!, {r3, r12}
stmiage r0!, {r3, r12}
subge r2, r2, #4
subs r2, r2, #4 ldrlt r3, [r1], #4 strlt r3, [r0], #4 ldmiage r1 !,
{r3, r12} stmiage r0 !, {r3, r12} subge r2, r2,
# 4
.Lmemcpy_l4:
/* less than 4 bytes to go */
adds r2, r2, #4
.Lmemcpy_l4 :
/* less than 4 bytes to go */
adds r2,
r2,
# 4
#if defined(__minix)
popeq {r0, r4, r5}
moveq r0, #0
popeq {pc}
popeq{r0, r4, r5} moveq r0,
# 0 popeq{pc }
#else
#ifdef __APCS_26_
ldmiaeq sp!, {r0, pc}^ /* done */
ldmiaeq sp !,
{r0, pc} ^ /* done */
#else
popeq {r0, pc} /* done */
popeq{r0, pc} /* done */
#endif
#endif
/* copy the crud byte at a time */
cmp r2, #2
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrbge r3, [r1], #1
strbge r3, [r0], #1
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
/* copy the crud byte at a time */
cmp r2,
# 2 ldrb r3, [r1], #1 strb r3, [r0], #1 ldrbge r3, [r1], #1 strbge r3, [r0],
# 1 ldrbgt r3, [r1], #1 strbgt r3, [r0],
# 1
#if defined(__minix)
pop {r0, r4, r5}
mov r0, #0
pop {pc}
pop{r0, r4, r5} mov r0,
# 0 pop{pc }
#else
pop {r0, pc}
pop{r0, pc}
#endif
/* erg - unaligned destination */
.Lmemcpy_destul:
rsb r12, r12, #4
cmp r12, #2
/* erg - unaligned destination */
.Lmemcpy_destul : rsb r12, r12, #4 cmp r12,
# 2
/* align destination with byte copies */
ldrb r3, [r1], #1
strb r3, [r0], #1
ldrbge r3, [r1], #1
strbge r3, [r0], #1
ldrbgt r3, [r1], #1
strbgt r3, [r0], #1
subs r2, r2, r12
blt .Lmemcpy_l4 /* less the 4 bytes */
/* align destination with byte copies */
ldrb r3, [r1], #1 strb r3, [r0], #1 ldrbge r3, [r1], #1 strbge r3, [r0],
# 1 ldrbgt r3, [r1], #1 strbgt r3, [r0], #1 subs r2, r2,
r12 blt.Lmemcpy_l4 /* less the 4 bytes */
ands r12, r1, #3
beq .Lmemcpy_t8 /* we have an aligned source */
ands r12,
r1,
# 3 beq
.Lmemcpy_t8 /* we have an aligned source */
/* erg - unaligned source */
/* This is where it gets nasty ... */
.Lmemcpy_srcul:
bic r1, r1, #3
ldr lr, [r1], #4
cmp r12, #2
bgt .Lmemcpy_srcul3
beq .Lmemcpy_srcul2
cmp r2, #0x0c
blt .Lmemcpy_srcul1loop4
sub r2, r2, #0x0c
/* erg - unaligned source */
/* This is where it gets nasty ... */
.Lmemcpy_srcul : bic r1,
r1, #3 ldr lr, [r1], #4 cmp r12,
# 2 bgt.Lmemcpy_srcul3 beq.Lmemcpy_srcul2 cmp r2,
# 0x0c blt.Lmemcpy_srcul1loop4 sub r2, r2,
# 0x0c
#if !defined(__minix)
push {r4, r5}
push{r4, r5}
#endif
.Lmemcpy_srcul1loop16:
.Lmemcpy_srcul1loop16 :
#ifdef __ARMEB__
mov r3, lr, lsl #8
mov r3,
lr,
lsl #8
#else
mov r3, lr, lsr #8
mov r3,
lr,
lsr #8
#endif
ldmia r1!, {r4, r5, r12, lr}
ldmia r1 !,
{r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #24
mov r4, r4, lsl #8
orr r4, r4, r5, lsr #24
mov r5, r5, lsl #8
orr r5, r5, r12, lsr #24
mov r12, r12, lsl #8
orr r12, r12, lr, lsr #24
orr r3,
r3, r4, lsr #24 mov r4, r4, lsl #8 orr r4, r4, r5, lsr #24 mov r5, r5,
lsl #8 orr r5, r5, r12, lsr #24 mov r12, r12, lsl #8 orr r12, r12, lr,
lsr #24
#else
orr r3, r3, r4, lsl #24
mov r4, r4, lsr #8
orr r4, r4, r5, lsl #24
mov r5, r5, lsr #8
orr r5, r5, r12, lsl #24
mov r12, r12, lsr #8
orr r12, r12, lr, lsl #24
orr r3,
r3, r4, lsl #24 mov r4, r4, lsr #8 orr r4, r4, r5, lsl #24 mov r5, r5,
lsr #8 orr r5, r5, r12, lsl #24 mov r12, r12, lsr #8 orr r12, r12, lr,
lsl #24
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul1loop16
stmia r0 !,
{r3 - r5, r12} subs r2, r2,
# 0x10 bge.Lmemcpy_srcul1loop16
#if !defined(__minix)
pop {r4, r5}
pop{r4, r5}
#endif
adds r2, r2, #0x0c
blt .Lmemcpy_srcul1l4
adds r2,
r2,
# 0x0c blt
.Lmemcpy_srcul1l4
.Lmemcpy_srcul1loop4:
.Lmemcpy_srcul1loop4 :
#ifdef __ARMEB__
mov r12, lr, lsl #8
mov r12,
lr,
lsl #8
#else
mov r12, lr, lsr #8
mov r12,
lr,
lsr #8
#endif
ldr lr, [r1], #4
ldr lr,
[r1],
# 4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #24
orr r12, r12, lr,
lsr #24
#else
orr r12, r12, lr, lsl #24
orr r12,
r12, lr,
lsl #24
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul1loop4
str r12,
[r0], #4 subs r2, r2,
# 4 bge
.Lmemcpy_srcul1loop4
.Lmemcpy_srcul1l4:
sub r1, r1, #3
b .Lmemcpy_l4
.Lmemcpy_srcul1l4 : sub r1,
r1,
# 3 b.Lmemcpy_l4
.Lmemcpy_srcul2:
cmp r2, #0x0c
blt .Lmemcpy_srcul2loop4
sub r2, r2, #0x0c
.Lmemcpy_srcul2 : cmp r2,
# 0x0c blt.Lmemcpy_srcul2loop4 sub r2, r2,
# 0x0c
#if !defined(__minix)
push {r4, r5}
push{r4, r5}
#endif
.Lmemcpy_srcul2loop16:
.Lmemcpy_srcul2loop16 :
#ifdef __ARMEB__
mov r3, lr, lsl #16
mov r3,
lr,
lsl #16
#else
mov r3, lr, lsr #16
mov r3,
lr,
lsr #16
#endif
ldmia r1!, {r4, r5, r12, lr}
ldmia r1 !,
{r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #16
mov r4, r4, lsl #16
orr r4, r4, r5, lsr #16
mov r5, r5, lsl #16
orr r5, r5, r12, lsr #16
mov r12, r12, lsl #16
orr r12, r12, lr, lsr #16
orr r3,
r3, r4, lsr #16 mov r4, r4, lsl #16 orr r4, r4, r5, lsr #16 mov r5, r5,
lsl #16 orr r5, r5, r12, lsr #16 mov r12, r12, lsl #16 orr r12, r12, lr,
lsr #16
#else
orr r3, r3, r4, lsl #16
mov r4, r4, lsr #16
orr r4, r4, r5, lsl #16
mov r5, r5, lsr #16
orr r5, r5, r12, lsl #16
mov r12, r12, lsr #16
orr r12, r12, lr, lsl #16
orr r3,
r3, r4, lsl #16 mov r4, r4, lsr #16 orr r4, r4, r5, lsl #16 mov r5, r5,
lsr #16 orr r5, r5, r12, lsl #16 mov r12, r12, lsr #16 orr r12, r12, lr,
lsl #16
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul2loop16
stmia r0 !,
{r3 - r5, r12} subs r2, r2,
# 0x10 bge.Lmemcpy_srcul2loop16
#if !defined(__minix)
pop {r4, r5}
pop{r4, r5}
#endif
adds r2, r2, #0x0c
blt .Lmemcpy_srcul2l4
adds r2,
r2,
# 0x0c blt
.Lmemcpy_srcul2l4
.Lmemcpy_srcul2loop4:
.Lmemcpy_srcul2loop4 :
#ifdef __ARMEB__
mov r12, lr, lsl #16
mov r12,
lr,
lsl #16
#else
mov r12, lr, lsr #16
mov r12,
lr,
lsr #16
#endif
ldr lr, [r1], #4
ldr lr,
[r1],
# 4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #16
orr r12, r12, lr,
lsr #16
#else
orr r12, r12, lr, lsl #16
orr r12,
r12, lr,
lsl #16
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul2loop4
str r12,
[r0], #4 subs r2, r2,
# 4 bge
.Lmemcpy_srcul2loop4
.Lmemcpy_srcul2l4:
sub r1, r1, #2
b .Lmemcpy_l4
.Lmemcpy_srcul2l4 : sub r1,
r1,
# 2 b.Lmemcpy_l4
.Lmemcpy_srcul3:
cmp r2, #0x0c
blt .Lmemcpy_srcul3loop4
sub r2, r2, #0x0c
.Lmemcpy_srcul3 : cmp r2,
# 0x0c blt.Lmemcpy_srcul3loop4 sub r2, r2,
# 0x0c
#if !defined(__minix)
push {r4, r5}
push{r4, r5}
#endif
.Lmemcpy_srcul3loop16:
.Lmemcpy_srcul3loop16 :
#ifdef __ARMEB__
mov r3, lr, lsl #24
mov r3,
lr,
lsl #24
#else
mov r3, lr, lsr #24
mov r3,
lr,
lsr #24
#endif
ldmia r1!, {r4, r5, r12, lr}
ldmia r1 !,
{r4, r5, r12, lr}
#ifdef __ARMEB__
orr r3, r3, r4, lsr #8
mov r4, r4, lsl #24
orr r4, r4, r5, lsr #8
mov r5, r5, lsl #24
orr r5, r5, r12, lsr #8
mov r12, r12, lsl #24
orr r12, r12, lr, lsr #8
orr r3,
r3, r4, lsr #8 mov r4, r4, lsl #24 orr r4, r4, r5, lsr #8 mov r5, r5,
lsl #24 orr r5, r5, r12, lsr #8 mov r12, r12, lsl #24 orr r12, r12, lr,
lsr #8
#else
orr r3, r3, r4, lsl #8
mov r4, r4, lsr #24
orr r4, r4, r5, lsl #8
mov r5, r5, lsr #24
orr r5, r5, r12, lsl #8
mov r12, r12, lsr #24
orr r12, r12, lr, lsl #8
orr r3,
r3, r4, lsl #8 mov r4, r4, lsr #24 orr r4, r4, r5, lsl #8 mov r5, r5,
lsr #24 orr r5, r5, r12, lsl #8 mov r12, r12, lsr #24 orr r12, r12, lr,
lsl #8
#endif
stmia r0!, {r3-r5, r12}
subs r2, r2, #0x10
bge .Lmemcpy_srcul3loop16
stmia r0 !,
{r3 - r5, r12} subs r2, r2,
# 0x10 bge.Lmemcpy_srcul3loop16
#if !defined(__minix)
pop {r4, r5}
pop{r4, r5}
#endif
adds r2, r2, #0x0c
blt .Lmemcpy_srcul3l4
adds r2,
r2,
# 0x0c blt
.Lmemcpy_srcul3l4
.Lmemcpy_srcul3loop4:
.Lmemcpy_srcul3loop4 :
#ifdef __ARMEB__
mov r12, lr, lsl #24
mov r12,
lr,
lsl #24
#else
mov r12, lr, lsr #24
mov r12,
lr,
lsr #24
#endif
ldr lr, [r1], #4
ldr lr,
[r1],
# 4
#ifdef __ARMEB__
orr r12, r12, lr, lsr #8
orr r12, r12, lr,
lsr #8
#else
orr r12, r12, lr, lsl #8
orr r12,
r12, lr,
lsl #8
#endif
str r12, [r0], #4
subs r2, r2, #4
bge .Lmemcpy_srcul3loop4
str r12,
[r0], #4 subs r2, r2,
# 4 bge
.Lmemcpy_srcul3loop4
.Lmemcpy_srcul3l4:
sub r1, r1, #1
b .Lmemcpy_l4
.Lmemcpy_srcul3l4 : sub r1,
r1,
# 1 b.Lmemcpy_l4
#if defined(__minix)
LABEL(phys_copy_fault) /* kernel can send us here */
pop {r0, r4, r5}
pop {pc}
LABEL(phys_copy_fault) /* kernel can send us here */
pop{r0, r4, r5} pop{pc}
LABEL(phys_copy_fault_in_kernel) /* kernel can send us here */
pop {r0, r4, r5}
mrc p15, 0, r0, c6, c0, 0 /* Read DFAR */
pop {pc}
LABEL(phys_copy_fault_in_kernel) /* kernel can send us here */
pop{r0, r4, r5} mrc p15,
0, r0, c6, c0,
0 /* Read DFAR */
pop{pc}
#else
END(memcpy)
END(memcpy)
#endif

View File

@ -1,74 +1,75 @@
/* $NetBSD: mpls.h,v 1.1 2010/06/26 14:24:28 kefren Exp $ */
/*-
* Copyright (c) 2010 The NetBSD Foundation, Inc.
* All rights reserved.
/*
* Copyright (c) 1987, 1997, 2006,
* Vrije Universiteit, Amsterdam, The Netherlands.
* All rights reserved. Redistribution and use of the MINIX 3 operating system
* in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* This code is derived from software contributed to The NetBSD Foundation
* by Mihai Chelaru <kefren@NetBSD.org>
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Vrije Universiteit nor the names of the software
* authors or contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* * Any deviations from these conditions require written permission from the
* copyright holder in advance
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS, AUTHORS, AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT
* NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL PRENTICE HALL OR ANY
* AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* Contributions by Mihai Chelaru.
*/
#ifndef _NETMPLS_MPLS_H_
#define _NETMPLS_MPLS_H_
#include <sys/param.h>
#include <sys/time.h>
#include <sys/proc.h>
#include <sys/queue.h>
#include <sys/time.h>
#include <net/if.h>
#include <net/if_dl.h>
#define MPLS_LABEL_IPV4NULL 0 /* IPv4 Explicit NULL Label */
#define MPLS_LABEL_RTALERT 1 /* Router Alert Label */
#define MPLS_LABEL_IPV6NULL 2 /* IPv6 Explicit NULL Label */
#define MPLS_LABEL_IMPLNULL 3 /* Implicit NULL Label */
#define MPLS_LABEL_RESMAX 15 /* Maximum reserved Label */
#define MPLS_LABEL_IPV4NULL 0 /* IPv4 Explicit NULL Label */
#define MPLS_LABEL_RTALERT 1 /* Router Alert Label */
#define MPLS_LABEL_IPV6NULL 2 /* IPv6 Explicit NULL Label */
#define MPLS_LABEL_IMPLNULL 3 /* Implicit NULL Label */
#define MPLS_LABEL_RESMAX 15 /* Maximum reserved Label */
union mpls_shim {
uint32_t s_addr; /* the whole shim */
struct {
uint32_t s_addr; /* the whole shim */
struct {
#if BYTE_ORDER == LITTLE_ENDIAN
uint32_t ttl:8;
uint32_t bos:1;
uint32_t exp:3;
uint32_t label:20;
uint32_t ttl : 8;
uint32_t bos : 1;
uint32_t exp : 3;
uint32_t label : 20;
#else
uint32_t label:20;
uint32_t exp:3;
uint32_t bos:1;
uint32_t ttl:8;
uint32_t label : 20;
uint32_t exp : 3;
uint32_t bos : 1;
uint32_t ttl : 8;
#endif
} shim;
} shim;
};
struct sockaddr_mpls {
uint8_t smpls_len;
uint8_t smpls_family;
uint8_t smpls_pad[2];
union mpls_shim smpls_addr;
uint8_t smpls_len;
uint8_t smpls_family;
uint8_t smpls_pad[2];
union mpls_shim smpls_addr;
} __packed;
#endif /* !_NETMPLS_MPLS_H_ */