1*4882a593Smuzhiyun #ifndef _PPC_BYTEORDER_H
2*4882a593Smuzhiyun #define _PPC_BYTEORDER_H
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <asm/types.h>
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifdef __GNUC__
7*4882a593Smuzhiyun
ld_le16(const volatile unsigned short * addr)8*4882a593Smuzhiyun static __inline__ unsigned ld_le16(const volatile unsigned short *addr)
9*4882a593Smuzhiyun {
10*4882a593Smuzhiyun unsigned val;
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
13*4882a593Smuzhiyun return val;
14*4882a593Smuzhiyun }
15*4882a593Smuzhiyun
st_le16(volatile unsigned short * addr,const unsigned val)16*4882a593Smuzhiyun static __inline__ void st_le16(volatile unsigned short *addr, const unsigned val)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun
ld_le32(const volatile unsigned * addr)21*4882a593Smuzhiyun static __inline__ unsigned ld_le32(const volatile unsigned *addr)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun unsigned val;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
26*4882a593Smuzhiyun return val;
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
st_le32(volatile unsigned * addr,const unsigned val)29*4882a593Smuzhiyun static __inline__ void st_le32(volatile unsigned *addr, const unsigned val)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* alas, egcs sounds like it has a bug in this code that doesn't use the
35*4882a593Smuzhiyun inline asm correctly, and can cause file corruption. Until I hear that
36*4882a593Smuzhiyun it's fixed, I can live without the extra speed. I hope. */
37*4882a593Smuzhiyun #if !(__GNUC__ >= 2 && __GNUC_MINOR__ >= 90)
38*4882a593Smuzhiyun #if 0
39*4882a593Smuzhiyun # define __arch_swab16(x) ld_le16(&x)
40*4882a593Smuzhiyun # define __arch_swab32(x) ld_le32(&x)
41*4882a593Smuzhiyun #else
___arch__swab16(__u16 value)42*4882a593Smuzhiyun static __inline__ __attribute__((const)) __u16 ___arch__swab16(__u16 value)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun __u16 result;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun __asm__("rlwimi %0,%1,8,16,23"
47*4882a593Smuzhiyun : "=r" (result)
48*4882a593Smuzhiyun : "r" (value), "0" (value >> 8));
49*4882a593Smuzhiyun return result;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
___arch__swab32(__u32 value)52*4882a593Smuzhiyun static __inline__ __attribute__((const)) __u32 ___arch__swab32(__u32 value)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun __u32 result;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun __asm__("rlwimi %0,%1,24,16,23\n\t"
57*4882a593Smuzhiyun "rlwimi %0,%1,8,8,15\n\t"
58*4882a593Smuzhiyun "rlwimi %0,%1,24,0,7"
59*4882a593Smuzhiyun : "=r" (result)
60*4882a593Smuzhiyun : "r" (value), "0" (value >> 24));
61*4882a593Smuzhiyun return result;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun #define __arch__swab32(x) ___arch__swab32(x)
64*4882a593Smuzhiyun #define __arch__swab16(x) ___arch__swab16(x)
65*4882a593Smuzhiyun #endif /* 0 */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #endif
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* The same, but returns converted value from the location pointer by addr. */
70*4882a593Smuzhiyun #define __arch__swab16p(addr) ld_le16(addr)
71*4882a593Smuzhiyun #define __arch__swab32p(addr) ld_le32(addr)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* The same, but do the conversion in situ, ie. put the value back to addr. */
74*4882a593Smuzhiyun #define __arch__swab16s(addr) st_le16(addr,*addr)
75*4882a593Smuzhiyun #define __arch__swab32s(addr) st_le32(addr,*addr)
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #endif /* __GNUC__ */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
80*4882a593Smuzhiyun #define __BYTEORDER_HAS_U64__
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun #include <linux/byteorder/big_endian.h>
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #endif /* _PPC_BYTEORDER_H */
85