xref: /OK3568_Linux_fs/u-boot/include/linux/byteorder/swab.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef _LINUX_BYTEORDER_SWAB_H
2*4882a593Smuzhiyun #define _LINUX_BYTEORDER_SWAB_H
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun  * linux/byteorder/swab.h
6*4882a593Smuzhiyun  * Byte-swapping, independently from CPU endianness
7*4882a593Smuzhiyun  *	swabXX[ps]?(foo)
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Francois-Rene Rideau <fare@tunes.org> 19971205
10*4882a593Smuzhiyun  *    separated swab functions from cpu_to_XX,
11*4882a593Smuzhiyun  *    to clean up support for bizarre-endian architectures.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * See asm-i386/byteorder.h and suches for examples of how to provide
14*4882a593Smuzhiyun  * architecture-dependent optimized versions
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* casts are necessary for constants, because we never know how for sure
19*4882a593Smuzhiyun  * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun #define ___swab16(x) \
22*4882a593Smuzhiyun 	((__u16)( \
23*4882a593Smuzhiyun 		(((__u16)(x) & (__u16)0x00ffU) << 8) | \
24*4882a593Smuzhiyun 		(((__u16)(x) & (__u16)0xff00U) >> 8) ))
25*4882a593Smuzhiyun #define ___swab32(x) \
26*4882a593Smuzhiyun 	((__u32)( \
27*4882a593Smuzhiyun 		(((__u32)(x) & (__u32)0x000000ffUL) << 24) | \
28*4882a593Smuzhiyun 		(((__u32)(x) & (__u32)0x0000ff00UL) <<  8) | \
29*4882a593Smuzhiyun 		(((__u32)(x) & (__u32)0x00ff0000UL) >>  8) | \
30*4882a593Smuzhiyun 		(((__u32)(x) & (__u32)0xff000000UL) >> 24) ))
31*4882a593Smuzhiyun #define ___swab64(x) \
32*4882a593Smuzhiyun 	((__u64)( \
33*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \
34*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \
35*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \
36*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) <<  8) | \
37*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >>  8) | \
38*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \
39*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \
40*4882a593Smuzhiyun 		(__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) ))
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * provide defaults when no architecture-specific optimization is detected
44*4882a593Smuzhiyun  */
45*4882a593Smuzhiyun #ifndef __arch__swab16
46*4882a593Smuzhiyun #  define __arch__swab16(x) ___swab16(x)
47*4882a593Smuzhiyun #endif
48*4882a593Smuzhiyun #ifndef __arch__swab32
49*4882a593Smuzhiyun #  define __arch__swab32(x) ___swab32(x)
50*4882a593Smuzhiyun #endif
51*4882a593Smuzhiyun #ifndef __arch__swab64
52*4882a593Smuzhiyun #  define __arch__swab64(x) ___swab64(x)
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #ifndef __arch__swab16p
56*4882a593Smuzhiyun #  define __arch__swab16p(x) __swab16(*(x))
57*4882a593Smuzhiyun #endif
58*4882a593Smuzhiyun #ifndef __arch__swab32p
59*4882a593Smuzhiyun #  define __arch__swab32p(x) __swab32(*(x))
60*4882a593Smuzhiyun #endif
61*4882a593Smuzhiyun #ifndef __arch__swab64p
62*4882a593Smuzhiyun #  define __arch__swab64p(x) __swab64(*(x))
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #ifndef __arch__swab16s
66*4882a593Smuzhiyun #  define __arch__swab16s(x) do { *(x) = __swab16p((x)); } while (0)
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun #ifndef __arch__swab32s
69*4882a593Smuzhiyun #  define __arch__swab32s(x) do { *(x) = __swab32p((x)); } while (0)
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun #ifndef __arch__swab64s
72*4882a593Smuzhiyun #  define __arch__swab64s(x) do { *(x) = __swab64p((x)); } while (0)
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /*
77*4882a593Smuzhiyun  * Allow constant folding
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun #if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__)
80*4882a593Smuzhiyun #  define __swab16(x) \
81*4882a593Smuzhiyun (__builtin_constant_p((__u16)(x)) ? \
82*4882a593Smuzhiyun  ___swab16((x)) : \
83*4882a593Smuzhiyun  __fswab16((x)))
84*4882a593Smuzhiyun #  define __swab32(x) \
85*4882a593Smuzhiyun (__builtin_constant_p((__u32)(x)) ? \
86*4882a593Smuzhiyun  ___swab32((x)) : \
87*4882a593Smuzhiyun  __fswab32((x)))
88*4882a593Smuzhiyun #  define __swab64(x) \
89*4882a593Smuzhiyun (__builtin_constant_p((__u64)(x)) ? \
90*4882a593Smuzhiyun  ___swab64((x)) : \
91*4882a593Smuzhiyun  __fswab64((x)))
92*4882a593Smuzhiyun #else
93*4882a593Smuzhiyun #  define __swab16(x) __fswab16(x)
94*4882a593Smuzhiyun #  define __swab32(x) __fswab32(x)
95*4882a593Smuzhiyun #  define __swab64(x) __fswab64(x)
96*4882a593Smuzhiyun #endif /* OPTIMIZE */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 
__fswab16(__u16 x)99*4882a593Smuzhiyun static __inline__ __attribute__((const)) __u16 __fswab16(__u16 x)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	return __arch__swab16(x);
102*4882a593Smuzhiyun }
__swab16p(const __u16 * x)103*4882a593Smuzhiyun static __inline__ __u16 __swab16p(const __u16 *x)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	return __arch__swab16p(x);
106*4882a593Smuzhiyun }
__swab16s(__u16 * addr)107*4882a593Smuzhiyun static __inline__ void __swab16s(__u16 *addr)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	__arch__swab16s(addr);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
__fswab32(__u32 x)112*4882a593Smuzhiyun static __inline__ __attribute__((const)) __u32 __fswab32(__u32 x)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return __arch__swab32(x);
115*4882a593Smuzhiyun }
__swab32p(const __u32 * x)116*4882a593Smuzhiyun static __inline__ __u32 __swab32p(const __u32 *x)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	return __arch__swab32p(x);
119*4882a593Smuzhiyun }
__swab32s(__u32 * addr)120*4882a593Smuzhiyun static __inline__ void __swab32s(__u32 *addr)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	__arch__swab32s(addr);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun 
__fswab64(__u64 x)125*4882a593Smuzhiyun static __inline__ __attribute__((const)) __u64 __fswab64(__u64 x)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun #  ifdef __SWAB_64_THRU_32__
128*4882a593Smuzhiyun 	__u32 h = x >> 32;
129*4882a593Smuzhiyun 	__u32 l = x & ((1ULL<<32)-1);
130*4882a593Smuzhiyun 	return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h)));
131*4882a593Smuzhiyun #  else
132*4882a593Smuzhiyun 	return __arch__swab64(x);
133*4882a593Smuzhiyun #  endif
134*4882a593Smuzhiyun }
__swab64p(const __u64 * x)135*4882a593Smuzhiyun static __inline__ __u64 __swab64p(const __u64 *x)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	return __arch__swab64p(x);
138*4882a593Smuzhiyun }
__swab64s(__u64 * addr)139*4882a593Smuzhiyun static __inline__ void __swab64s(__u64 *addr)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	__arch__swab64s(addr);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #if defined(__KERNEL__)
145*4882a593Smuzhiyun #define swab16 __swab16
146*4882a593Smuzhiyun #define swab32 __swab32
147*4882a593Smuzhiyun #define swab64 __swab64
148*4882a593Smuzhiyun #define swab16p __swab16p
149*4882a593Smuzhiyun #define swab32p __swab32p
150*4882a593Smuzhiyun #define swab64p __swab64p
151*4882a593Smuzhiyun #define swab16s __swab16s
152*4882a593Smuzhiyun #define swab32s __swab32s
153*4882a593Smuzhiyun #define swab64s __swab64s
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #endif /* _LINUX_BYTEORDER_SWAB_H */
157