1#ifndef _LINUX_BYTEORDER_SWAB_H 2#define _LINUX_BYTEORDER_SWAB_H 3 4/* 5 * linux/byteorder/swab.h 6 * Byte-swapping, independently from CPU endianness 7 * swabXX[ps]?(foo) 8 * 9 * Francois-Rene Rideau <fare@tunes.org> 19971205 10 * separated swab functions from cpu_to_XX, 11 * to clean up support for bizarre-endian architectures. 12 * 13 * See asm-i386/byteorder.h and suches for examples of how to provide 14 * architecture-dependent optimized versions 15 * 16 */ 17 18/* casts are necessary for constants, because we never know how for sure 19 * how U/UL/ULL map to __u16, __u32, __u64. At least not in a portable way. 20 */ 21#define ___swab16(x) \ 22({ \ 23 __u16 __x = (x); \ 24 ((__u16)( \ 25 (((__u16)(__x) & (__u16)0x00ffU) << 8) | \ 26 (((__u16)(__x) & (__u16)0xff00U) >> 8) )); \ 27}) 28 29#define ___swab24(x) \ 30({ \ 31 __u32 __x = (x); \ 32 ((__u32)( \ 33 ((__x & (__u32)0x000000ffUL) << 16) | \ 34 (__x & (__u32)0x0000ff00UL) | \ 35 ((__x & (__u32)0x00ff0000UL) >> 16) )); \ 36}) 37 38#define ___swab32(x) \ 39({ \ 40 __u32 __x = (x); \ 41 ((__u32)( \ 42 (((__u32)(__x) & (__u32)0x000000ffUL) << 24) | \ 43 (((__u32)(__x) & (__u32)0x0000ff00UL) << 8) | \ 44 (((__u32)(__x) & (__u32)0x00ff0000UL) >> 8) | \ 45 (((__u32)(__x) & (__u32)0xff000000UL) >> 24) )); \ 46}) 47 48#define ___swab64(x) \ 49({ \ 50 __u64 __x = (x); \ 51 ((__u64)( \ 52 (__u64)(((__u64)(__x) & (__u64)0x00000000000000ffULL) << 56) | \ 53 (__u64)(((__u64)(__x) & (__u64)0x000000000000ff00ULL) << 40) | \ 54 (__u64)(((__u64)(__x) & (__u64)0x0000000000ff0000ULL) << 24) | \ 55 (__u64)(((__u64)(__x) & (__u64)0x00000000ff000000ULL) << 8) | \ 56 (__u64)(((__u64)(__x) & (__u64)0x000000ff00000000ULL) >> 8) | \ 57 (__u64)(((__u64)(__x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ 58 (__u64)(((__u64)(__x) & (__u64)0x00ff000000000000ULL) >> 40) | \ 59 (__u64)(((__u64)(__x) & (__u64)0xff00000000000000ULL) >> 56) )); \ 60}) 61 62#define ___constant_swab16(x) \ 63 ((__u16)( \ 64 (((__u16)(x) & (__u16)0x00ffU) << 8) | \ 65 (((__u16)(x) & (__u16)0xff00U) >> 8) )) 66#define ___constant_swab24(x) \ 67 ((__u32)( \ 68 (((__u32)(x) & (__u32)0x000000ffU) << 16) | \ 69 (((__u32)(x) & (__u32)0x0000ff00U) | \ 70 (((__u32)(x) & (__u32)0x00ff0000U) >> 16) )) 71#define ___constant_swab32(x) \ 72 ((__u32)( \ 73 (((__u32)(x) & (__u32)0x000000ffUL) << 24) | \ 74 (((__u32)(x) & (__u32)0x0000ff00UL) << 8) | \ 75 (((__u32)(x) & (__u32)0x00ff0000UL) >> 8) | \ 76 (((__u32)(x) & (__u32)0xff000000UL) >> 24) )) 77#define ___constant_swab64(x) \ 78 ((__u64)( \ 79 (__u64)(((__u64)(x) & (__u64)0x00000000000000ffULL) << 56) | \ 80 (__u64)(((__u64)(x) & (__u64)0x000000000000ff00ULL) << 40) | \ 81 (__u64)(((__u64)(x) & (__u64)0x0000000000ff0000ULL) << 24) | \ 82 (__u64)(((__u64)(x) & (__u64)0x00000000ff000000ULL) << 8) | \ 83 (__u64)(((__u64)(x) & (__u64)0x000000ff00000000ULL) >> 8) | \ 84 (__u64)(((__u64)(x) & (__u64)0x0000ff0000000000ULL) >> 24) | \ 85 (__u64)(((__u64)(x) & (__u64)0x00ff000000000000ULL) >> 40) | \ 86 (__u64)(((__u64)(x) & (__u64)0xff00000000000000ULL) >> 56) )) 87 88/* 89 * provide defaults when no architecture-specific optimization is detected 90 */ 91#ifndef __arch__swab16 92# define __arch__swab16(x) ({ __u16 __tmp = (x) ; ___swab16(__tmp); }) 93#endif 94#ifndef __arch__swab24 95# define __arch__swab24(x) ({ __u32 __tmp = (x) ; ___swab24(__tmp); }) 96#endif 97#ifndef __arch__swab32 98# define __arch__swab32(x) ({ __u32 __tmp = (x) ; ___swab32(__tmp); }) 99#endif 100#ifndef __arch__swab64 101# define __arch__swab64(x) ({ __u64 __tmp = (x) ; ___swab64(__tmp); }) 102#endif 103 104#ifndef __arch__swab16p 105# define __arch__swab16p(x) __arch__swab16(*(x)) 106#endif 107#ifndef __arch__swab24p 108# define __arch__swab24p(x) __arch__swab24(*(x)) 109#endif 110#ifndef __arch__swab32p 111# define __arch__swab32p(x) __arch__swab32(*(x)) 112#endif 113#ifndef __arch__swab64p 114# define __arch__swab64p(x) __arch__swab64(*(x)) 115#endif 116 117#ifndef __arch__swab16s 118# define __arch__swab16s(x) do { *(x) = __arch__swab16p((x)); } while (0) 119#endif 120#ifndef __arch__swab24s 121# define __arch__swab24s(x) do { *(x) = __arch__swab24p((x)); } while (0) 122#endif 123#ifndef __arch__swab32s 124# define __arch__swab32s(x) do { *(x) = __arch__swab32p((x)); } while (0) 125#endif 126#ifndef __arch__swab64s 127# define __arch__swab64s(x) do { *(x) = __arch__swab64p((x)); } while (0) 128#endif 129 130 131/* 132 * Allow constant folding 133 */ 134#if defined(__GNUC__) && (__GNUC__ >= 2) && defined(__OPTIMIZE__) 135# define __swab16(x) \ 136(__builtin_constant_p((__u16)(x)) ? \ 137 ___swab16((x)) : \ 138 __fswab16((x))) 139# define __swab24(x) \ 140(__builtin_constant_p((__u32)(x)) ? \ 141 ___swab24((x)) : \ 142 __fswab24((x))) 143# define __swab32(x) \ 144(__builtin_constant_p((__u32)(x)) ? \ 145 ___swab32((x)) : \ 146 __fswab32((x))) 147# define __swab64(x) \ 148(__builtin_constant_p((__u64)(x)) ? \ 149 ___swab64((x)) : \ 150 __fswab64((x))) 151#else 152# define __swab16(x) __fswab16(x) 153# define __swab24(x) __fswab24(x) 154# define __swab32(x) __fswab32(x) 155# define __swab64(x) __fswab64(x) 156#endif /* OPTIMIZE */ 157 158 159static __inline__ __const__ __u16 __fswab16(__u16 x) 160{ 161 return __arch__swab16(x); 162} 163static __inline__ __u16 __swab16p(__u16 *x) 164{ 165 return __arch__swab16p(x); 166} 167static __inline__ void __swab16s(__u16 *addr) 168{ 169 __arch__swab16s(addr); 170} 171 172static __inline__ __const__ __u32 __fswab24(__u32 x) 173{ 174 return __arch__swab24(x); 175} 176static __inline__ __u32 __swab24p(__u32 *x) 177{ 178 return __arch__swab24p(x); 179} 180static __inline__ void __swab24s(__u32 *addr) 181{ 182 __arch__swab24s(addr); 183} 184 185static __inline__ __const__ __u32 __fswab32(__u32 x) 186{ 187 return __arch__swab32(x); 188} 189static __inline__ __u32 __swab32p(__u32 *x) 190{ 191 return __arch__swab32p(x); 192} 193static __inline__ void __swab32s(__u32 *addr) 194{ 195 __arch__swab32s(addr); 196} 197 198#ifdef __BYTEORDER_HAS_U64__ 199static __inline__ __const__ __u64 __fswab64(__u64 x) 200{ 201# ifdef __SWAB_64_THRU_32__ 202 __u32 h = x >> 32; 203 __u32 l = x & ((1ULL<<32)-1); 204 return (((__u64)__swab32(l)) << 32) | ((__u64)(__swab32(h))); 205# else 206 return __arch__swab64(x); 207# endif 208} 209static __inline__ __u64 __swab64p(__u64 *x) 210{ 211 return __arch__swab64p(x); 212} 213static __inline__ void __swab64s(__u64 *addr) 214{ 215 __arch__swab64s(addr); 216} 217#endif /* __BYTEORDER_HAS_U64__ */ 218 219#if defined(__KERNEL__) 220#define swab16 __swab16 221#define swab24 __swab24 222#define swab32 __swab32 223#define swab64 __swab64 224#define swab16p __swab16p 225#define swab24p __swab24p 226#define swab32p __swab32p 227#define swab64p __swab64p 228#define swab16s __swab16s 229#define swab24s __swab24s 230#define swab32s __swab32s 231#define swab64s __swab64s 232#endif 233 234#endif /* _LINUX_BYTEORDER_SWAB_H */ 235