1*4882a593Smuzhiyun /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2*4882a593Smuzhiyun #ifndef __BPF_ENDIAN__ 3*4882a593Smuzhiyun #define __BPF_ENDIAN__ 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun /* 6*4882a593Smuzhiyun * Isolate byte #n and put it into byte #m, for __u##b type. 7*4882a593Smuzhiyun * E.g., moving byte #6 (nnnnnnnn) into byte #1 (mmmmmmmm) for __u64: 8*4882a593Smuzhiyun * 1) xxxxxxxx nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 9*4882a593Smuzhiyun * 2) nnnnnnnn xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx mmmmmmmm xxxxxxxx 00000000 10*4882a593Smuzhiyun * 3) 00000000 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 11*4882a593Smuzhiyun * 4) 00000000 00000000 00000000 00000000 00000000 00000000 nnnnnnnn 00000000 12*4882a593Smuzhiyun */ 13*4882a593Smuzhiyun #define ___bpf_mvb(x, b, n, m) ((__u##b)(x) << (b-(n+1)*8) >> (b-8) << (m*8)) 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #define ___bpf_swab16(x) ((__u16)( \ 16*4882a593Smuzhiyun ___bpf_mvb(x, 16, 0, 1) | \ 17*4882a593Smuzhiyun ___bpf_mvb(x, 16, 1, 0))) 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun #define ___bpf_swab32(x) ((__u32)( \ 20*4882a593Smuzhiyun ___bpf_mvb(x, 32, 0, 3) | \ 21*4882a593Smuzhiyun ___bpf_mvb(x, 32, 1, 2) | \ 22*4882a593Smuzhiyun ___bpf_mvb(x, 32, 2, 1) | \ 23*4882a593Smuzhiyun ___bpf_mvb(x, 32, 3, 0))) 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #define ___bpf_swab64(x) ((__u64)( \ 26*4882a593Smuzhiyun ___bpf_mvb(x, 64, 0, 7) | \ 27*4882a593Smuzhiyun ___bpf_mvb(x, 64, 1, 6) | \ 28*4882a593Smuzhiyun ___bpf_mvb(x, 64, 2, 5) | \ 29*4882a593Smuzhiyun ___bpf_mvb(x, 64, 3, 4) | \ 30*4882a593Smuzhiyun ___bpf_mvb(x, 64, 4, 3) | \ 31*4882a593Smuzhiyun ___bpf_mvb(x, 64, 5, 2) | \ 32*4882a593Smuzhiyun ___bpf_mvb(x, 64, 6, 1) | \ 33*4882a593Smuzhiyun ___bpf_mvb(x, 64, 7, 0))) 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun /* LLVM's BPF target selects the endianness of the CPU 36*4882a593Smuzhiyun * it compiles on, or the user specifies (bpfel/bpfeb), 37*4882a593Smuzhiyun * respectively. The used __BYTE_ORDER__ is defined by 38*4882a593Smuzhiyun * the compiler, we cannot rely on __BYTE_ORDER from 39*4882a593Smuzhiyun * libc headers, since it doesn't reflect the actual 40*4882a593Smuzhiyun * requested byte order. 41*4882a593Smuzhiyun * 42*4882a593Smuzhiyun * Note, LLVM's BPF target has different __builtin_bswapX() 43*4882a593Smuzhiyun * semantics. It does map to BPF_ALU | BPF_END | BPF_TO_BE 44*4882a593Smuzhiyun * in bpfel and bpfeb case, which means below, that we map 45*4882a593Smuzhiyun * to cpu_to_be16(). We could use it unconditionally in BPF 46*4882a593Smuzhiyun * case, but better not rely on it, so that this header here 47*4882a593Smuzhiyun * can be used from application and BPF program side, which 48*4882a593Smuzhiyun * use different targets. 49*4882a593Smuzhiyun */ 50*4882a593Smuzhiyun #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ 51*4882a593Smuzhiyun # define __bpf_ntohs(x) __builtin_bswap16(x) 52*4882a593Smuzhiyun # define __bpf_htons(x) __builtin_bswap16(x) 53*4882a593Smuzhiyun # define __bpf_constant_ntohs(x) ___bpf_swab16(x) 54*4882a593Smuzhiyun # define __bpf_constant_htons(x) ___bpf_swab16(x) 55*4882a593Smuzhiyun # define __bpf_ntohl(x) __builtin_bswap32(x) 56*4882a593Smuzhiyun # define __bpf_htonl(x) __builtin_bswap32(x) 57*4882a593Smuzhiyun # define __bpf_constant_ntohl(x) ___bpf_swab32(x) 58*4882a593Smuzhiyun # define __bpf_constant_htonl(x) ___bpf_swab32(x) 59*4882a593Smuzhiyun # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) 60*4882a593Smuzhiyun # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) 61*4882a593Smuzhiyun # define __bpf_constant_be64_to_cpu(x) ___bpf_swab64(x) 62*4882a593Smuzhiyun # define __bpf_constant_cpu_to_be64(x) ___bpf_swab64(x) 63*4882a593Smuzhiyun #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 64*4882a593Smuzhiyun # define __bpf_ntohs(x) (x) 65*4882a593Smuzhiyun # define __bpf_htons(x) (x) 66*4882a593Smuzhiyun # define __bpf_constant_ntohs(x) (x) 67*4882a593Smuzhiyun # define __bpf_constant_htons(x) (x) 68*4882a593Smuzhiyun # define __bpf_ntohl(x) (x) 69*4882a593Smuzhiyun # define __bpf_htonl(x) (x) 70*4882a593Smuzhiyun # define __bpf_constant_ntohl(x) (x) 71*4882a593Smuzhiyun # define __bpf_constant_htonl(x) (x) 72*4882a593Smuzhiyun # define __bpf_be64_to_cpu(x) (x) 73*4882a593Smuzhiyun # define __bpf_cpu_to_be64(x) (x) 74*4882a593Smuzhiyun # define __bpf_constant_be64_to_cpu(x) (x) 75*4882a593Smuzhiyun # define __bpf_constant_cpu_to_be64(x) (x) 76*4882a593Smuzhiyun #else 77*4882a593Smuzhiyun # error "Fix your compiler's __BYTE_ORDER__?!" 78*4882a593Smuzhiyun #endif 79*4882a593Smuzhiyun 80*4882a593Smuzhiyun #define bpf_htons(x) \ 81*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 82*4882a593Smuzhiyun __bpf_constant_htons(x) : __bpf_htons(x)) 83*4882a593Smuzhiyun #define bpf_ntohs(x) \ 84*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 85*4882a593Smuzhiyun __bpf_constant_ntohs(x) : __bpf_ntohs(x)) 86*4882a593Smuzhiyun #define bpf_htonl(x) \ 87*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 88*4882a593Smuzhiyun __bpf_constant_htonl(x) : __bpf_htonl(x)) 89*4882a593Smuzhiyun #define bpf_ntohl(x) \ 90*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 91*4882a593Smuzhiyun __bpf_constant_ntohl(x) : __bpf_ntohl(x)) 92*4882a593Smuzhiyun #define bpf_cpu_to_be64(x) \ 93*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 94*4882a593Smuzhiyun __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) 95*4882a593Smuzhiyun #define bpf_be64_to_cpu(x) \ 96*4882a593Smuzhiyun (__builtin_constant_p(x) ? \ 97*4882a593Smuzhiyun __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun #endif /* __BPF_ENDIAN__ */ 100