1*819833afSPeter Tyser #ifndef __ASM_SH_UNALIGNED_SH4A_H
2*819833afSPeter Tyser #define __ASM_SH_UNALIGNED_SH4A_H
3*819833afSPeter Tyser
4*819833afSPeter Tyser /*
5*819833afSPeter Tyser * SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
6*819833afSPeter Tyser * Support for 64-bit accesses are done through shifting and masking
7*819833afSPeter Tyser * relative to the endianness. Unaligned stores are not supported by the
8*819833afSPeter Tyser * instruction encoding, so these continue to use the packed
9*819833afSPeter Tyser * struct.
10*819833afSPeter Tyser *
11*819833afSPeter Tyser * The same note as with the movli.l/movco.l pair applies here, as long
12*819833afSPeter Tyser * as the load is gauranteed to be inlined, nothing else will hook in to
13*819833afSPeter Tyser * r0 and we get the return value for free.
14*819833afSPeter Tyser *
15*819833afSPeter Tyser * NOTE: Due to the fact we require r0 encoding, care should be taken to
16*819833afSPeter Tyser * avoid mixing these heavily with other r0 consumers, such as the atomic
17*819833afSPeter Tyser * ops. Failure to adhere to this can result in the compiler running out
18*819833afSPeter Tyser * of spill registers and blowing up when building at low optimization
19*819833afSPeter Tyser * levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
20*819833afSPeter Tyser */
21*819833afSPeter Tyser #include <linux/types.h>
22*819833afSPeter Tyser #include <asm/byteorder.h>
23*819833afSPeter Tyser
__get_unaligned_cpu32(const u8 * p)24*819833afSPeter Tyser static __always_inline u32 __get_unaligned_cpu32(const u8 *p)
25*819833afSPeter Tyser {
26*819833afSPeter Tyser unsigned long unaligned;
27*819833afSPeter Tyser
28*819833afSPeter Tyser __asm__ __volatile__ (
29*819833afSPeter Tyser "movua.l @%1, %0\n\t"
30*819833afSPeter Tyser : "=z" (unaligned)
31*819833afSPeter Tyser : "r" (p)
32*819833afSPeter Tyser );
33*819833afSPeter Tyser
34*819833afSPeter Tyser return unaligned;
35*819833afSPeter Tyser }
36*819833afSPeter Tyser
37*819833afSPeter Tyser struct __una_u16 { u16 x __attribute__((packed)); };
38*819833afSPeter Tyser struct __una_u32 { u32 x __attribute__((packed)); };
39*819833afSPeter Tyser struct __una_u64 { u64 x __attribute__((packed)); };
40*819833afSPeter Tyser
__get_unaligned_cpu16(const u8 * p)41*819833afSPeter Tyser static inline u16 __get_unaligned_cpu16(const u8 *p)
42*819833afSPeter Tyser {
43*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
44*819833afSPeter Tyser return p[0] | p[1] << 8;
45*819833afSPeter Tyser #else
46*819833afSPeter Tyser return p[0] << 8 | p[1];
47*819833afSPeter Tyser #endif
48*819833afSPeter Tyser }
49*819833afSPeter Tyser
50*819833afSPeter Tyser /*
51*819833afSPeter Tyser * Even though movua.l supports auto-increment on the read side, it can
52*819833afSPeter Tyser * only store to r0 due to instruction encoding constraints, so just let
53*819833afSPeter Tyser * the compiler sort it out on its own.
54*819833afSPeter Tyser */
__get_unaligned_cpu64(const u8 * p)55*819833afSPeter Tyser static inline u64 __get_unaligned_cpu64(const u8 *p)
56*819833afSPeter Tyser {
57*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
58*819833afSPeter Tyser return (u64)__get_unaligned_cpu32(p + 4) << 32 |
59*819833afSPeter Tyser __get_unaligned_cpu32(p);
60*819833afSPeter Tyser #else
61*819833afSPeter Tyser return (u64)__get_unaligned_cpu32(p) << 32 |
62*819833afSPeter Tyser __get_unaligned_cpu32(p + 4);
63*819833afSPeter Tyser #endif
64*819833afSPeter Tyser }
65*819833afSPeter Tyser
get_unaligned_le16(const void * p)66*819833afSPeter Tyser static inline u16 get_unaligned_le16(const void *p)
67*819833afSPeter Tyser {
68*819833afSPeter Tyser return le16_to_cpu(__get_unaligned_cpu16(p));
69*819833afSPeter Tyser }
70*819833afSPeter Tyser
get_unaligned_le32(const void * p)71*819833afSPeter Tyser static inline u32 get_unaligned_le32(const void *p)
72*819833afSPeter Tyser {
73*819833afSPeter Tyser return le32_to_cpu(__get_unaligned_cpu32(p));
74*819833afSPeter Tyser }
75*819833afSPeter Tyser
get_unaligned_le64(const void * p)76*819833afSPeter Tyser static inline u64 get_unaligned_le64(const void *p)
77*819833afSPeter Tyser {
78*819833afSPeter Tyser return le64_to_cpu(__get_unaligned_cpu64(p));
79*819833afSPeter Tyser }
80*819833afSPeter Tyser
get_unaligned_be16(const void * p)81*819833afSPeter Tyser static inline u16 get_unaligned_be16(const void *p)
82*819833afSPeter Tyser {
83*819833afSPeter Tyser return be16_to_cpu(__get_unaligned_cpu16(p));
84*819833afSPeter Tyser }
85*819833afSPeter Tyser
get_unaligned_be32(const void * p)86*819833afSPeter Tyser static inline u32 get_unaligned_be32(const void *p)
87*819833afSPeter Tyser {
88*819833afSPeter Tyser return be32_to_cpu(__get_unaligned_cpu32(p));
89*819833afSPeter Tyser }
90*819833afSPeter Tyser
get_unaligned_be64(const void * p)91*819833afSPeter Tyser static inline u64 get_unaligned_be64(const void *p)
92*819833afSPeter Tyser {
93*819833afSPeter Tyser return be64_to_cpu(__get_unaligned_cpu64(p));
94*819833afSPeter Tyser }
95*819833afSPeter Tyser
__put_le16_noalign(u8 * p,u16 val)96*819833afSPeter Tyser static inline void __put_le16_noalign(u8 *p, u16 val)
97*819833afSPeter Tyser {
98*819833afSPeter Tyser *p++ = val;
99*819833afSPeter Tyser *p++ = val >> 8;
100*819833afSPeter Tyser }
101*819833afSPeter Tyser
__put_le32_noalign(u8 * p,u32 val)102*819833afSPeter Tyser static inline void __put_le32_noalign(u8 *p, u32 val)
103*819833afSPeter Tyser {
104*819833afSPeter Tyser __put_le16_noalign(p, val);
105*819833afSPeter Tyser __put_le16_noalign(p + 2, val >> 16);
106*819833afSPeter Tyser }
107*819833afSPeter Tyser
__put_le64_noalign(u8 * p,u64 val)108*819833afSPeter Tyser static inline void __put_le64_noalign(u8 *p, u64 val)
109*819833afSPeter Tyser {
110*819833afSPeter Tyser __put_le32_noalign(p, val);
111*819833afSPeter Tyser __put_le32_noalign(p + 4, val >> 32);
112*819833afSPeter Tyser }
113*819833afSPeter Tyser
__put_be16_noalign(u8 * p,u16 val)114*819833afSPeter Tyser static inline void __put_be16_noalign(u8 *p, u16 val)
115*819833afSPeter Tyser {
116*819833afSPeter Tyser *p++ = val >> 8;
117*819833afSPeter Tyser *p++ = val;
118*819833afSPeter Tyser }
119*819833afSPeter Tyser
__put_be32_noalign(u8 * p,u32 val)120*819833afSPeter Tyser static inline void __put_be32_noalign(u8 *p, u32 val)
121*819833afSPeter Tyser {
122*819833afSPeter Tyser __put_be16_noalign(p, val >> 16);
123*819833afSPeter Tyser __put_be16_noalign(p + 2, val);
124*819833afSPeter Tyser }
125*819833afSPeter Tyser
__put_be64_noalign(u8 * p,u64 val)126*819833afSPeter Tyser static inline void __put_be64_noalign(u8 *p, u64 val)
127*819833afSPeter Tyser {
128*819833afSPeter Tyser __put_be32_noalign(p, val >> 32);
129*819833afSPeter Tyser __put_be32_noalign(p + 4, val);
130*819833afSPeter Tyser }
131*819833afSPeter Tyser
put_unaligned_le16(u16 val,void * p)132*819833afSPeter Tyser static inline void put_unaligned_le16(u16 val, void *p)
133*819833afSPeter Tyser {
134*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
135*819833afSPeter Tyser ((struct __una_u16 *)p)->x = val;
136*819833afSPeter Tyser #else
137*819833afSPeter Tyser __put_le16_noalign(p, val);
138*819833afSPeter Tyser #endif
139*819833afSPeter Tyser }
140*819833afSPeter Tyser
put_unaligned_le32(u32 val,void * p)141*819833afSPeter Tyser static inline void put_unaligned_le32(u32 val, void *p)
142*819833afSPeter Tyser {
143*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
144*819833afSPeter Tyser ((struct __una_u32 *)p)->x = val;
145*819833afSPeter Tyser #else
146*819833afSPeter Tyser __put_le32_noalign(p, val);
147*819833afSPeter Tyser #endif
148*819833afSPeter Tyser }
149*819833afSPeter Tyser
put_unaligned_le64(u64 val,void * p)150*819833afSPeter Tyser static inline void put_unaligned_le64(u64 val, void *p)
151*819833afSPeter Tyser {
152*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
153*819833afSPeter Tyser ((struct __una_u64 *)p)->x = val;
154*819833afSPeter Tyser #else
155*819833afSPeter Tyser __put_le64_noalign(p, val);
156*819833afSPeter Tyser #endif
157*819833afSPeter Tyser }
158*819833afSPeter Tyser
put_unaligned_be16(u16 val,void * p)159*819833afSPeter Tyser static inline void put_unaligned_be16(u16 val, void *p)
160*819833afSPeter Tyser {
161*819833afSPeter Tyser #ifdef __BIG_ENDIAN
162*819833afSPeter Tyser ((struct __una_u16 *)p)->x = val;
163*819833afSPeter Tyser #else
164*819833afSPeter Tyser __put_be16_noalign(p, val);
165*819833afSPeter Tyser #endif
166*819833afSPeter Tyser }
167*819833afSPeter Tyser
put_unaligned_be32(u32 val,void * p)168*819833afSPeter Tyser static inline void put_unaligned_be32(u32 val, void *p)
169*819833afSPeter Tyser {
170*819833afSPeter Tyser #ifdef __BIG_ENDIAN
171*819833afSPeter Tyser ((struct __una_u32 *)p)->x = val;
172*819833afSPeter Tyser #else
173*819833afSPeter Tyser __put_be32_noalign(p, val);
174*819833afSPeter Tyser #endif
175*819833afSPeter Tyser }
176*819833afSPeter Tyser
put_unaligned_be64(u64 val,void * p)177*819833afSPeter Tyser static inline void put_unaligned_be64(u64 val, void *p)
178*819833afSPeter Tyser {
179*819833afSPeter Tyser #ifdef __BIG_ENDIAN
180*819833afSPeter Tyser ((struct __una_u64 *)p)->x = val;
181*819833afSPeter Tyser #else
182*819833afSPeter Tyser __put_be64_noalign(p, val);
183*819833afSPeter Tyser #endif
184*819833afSPeter Tyser }
185*819833afSPeter Tyser
186*819833afSPeter Tyser /*
187*819833afSPeter Tyser * Cause a link-time error if we try an unaligned access other than
188*819833afSPeter Tyser * 1,2,4 or 8 bytes long
189*819833afSPeter Tyser */
190*819833afSPeter Tyser extern void __bad_unaligned_access_size(void);
191*819833afSPeter Tyser
192*819833afSPeter Tyser #define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
193*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
194*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
195*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
196*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
197*819833afSPeter Tyser __bad_unaligned_access_size())))); \
198*819833afSPeter Tyser }))
199*819833afSPeter Tyser
200*819833afSPeter Tyser #define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
201*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
202*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
203*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
204*819833afSPeter Tyser __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
205*819833afSPeter Tyser __bad_unaligned_access_size())))); \
206*819833afSPeter Tyser }))
207*819833afSPeter Tyser
208*819833afSPeter Tyser #define __put_unaligned_le(val, ptr) ({ \
209*819833afSPeter Tyser void *__gu_p = (ptr); \
210*819833afSPeter Tyser switch (sizeof(*(ptr))) { \
211*819833afSPeter Tyser case 1: \
212*819833afSPeter Tyser *(u8 *)__gu_p = (__force u8)(val); \
213*819833afSPeter Tyser break; \
214*819833afSPeter Tyser case 2: \
215*819833afSPeter Tyser put_unaligned_le16((__force u16)(val), __gu_p); \
216*819833afSPeter Tyser break; \
217*819833afSPeter Tyser case 4: \
218*819833afSPeter Tyser put_unaligned_le32((__force u32)(val), __gu_p); \
219*819833afSPeter Tyser break; \
220*819833afSPeter Tyser case 8: \
221*819833afSPeter Tyser put_unaligned_le64((__force u64)(val), __gu_p); \
222*819833afSPeter Tyser break; \
223*819833afSPeter Tyser default: \
224*819833afSPeter Tyser __bad_unaligned_access_size(); \
225*819833afSPeter Tyser break; \
226*819833afSPeter Tyser } \
227*819833afSPeter Tyser (void)0; })
228*819833afSPeter Tyser
229*819833afSPeter Tyser #define __put_unaligned_be(val, ptr) ({ \
230*819833afSPeter Tyser void *__gu_p = (ptr); \
231*819833afSPeter Tyser switch (sizeof(*(ptr))) { \
232*819833afSPeter Tyser case 1: \
233*819833afSPeter Tyser *(u8 *)__gu_p = (__force u8)(val); \
234*819833afSPeter Tyser break; \
235*819833afSPeter Tyser case 2: \
236*819833afSPeter Tyser put_unaligned_be16((__force u16)(val), __gu_p); \
237*819833afSPeter Tyser break; \
238*819833afSPeter Tyser case 4: \
239*819833afSPeter Tyser put_unaligned_be32((__force u32)(val), __gu_p); \
240*819833afSPeter Tyser break; \
241*819833afSPeter Tyser case 8: \
242*819833afSPeter Tyser put_unaligned_be64((__force u64)(val), __gu_p); \
243*819833afSPeter Tyser break; \
244*819833afSPeter Tyser default: \
245*819833afSPeter Tyser __bad_unaligned_access_size(); \
246*819833afSPeter Tyser break; \
247*819833afSPeter Tyser } \
248*819833afSPeter Tyser (void)0; })
249*819833afSPeter Tyser
250*819833afSPeter Tyser #ifdef __LITTLE_ENDIAN
251*819833afSPeter Tyser # define get_unaligned __get_unaligned_le
252*819833afSPeter Tyser # define put_unaligned __put_unaligned_le
253*819833afSPeter Tyser #else
254*819833afSPeter Tyser # define get_unaligned __get_unaligned_be
255*819833afSPeter Tyser # define put_unaligned __put_unaligned_be
256*819833afSPeter Tyser #endif
257*819833afSPeter Tyser
258*819833afSPeter Tyser #endif /* __ASM_SH_UNALIGNED_SH4A_H */
259