xref: /OK3568_Linux_fs/kernel/arch/c6x/include/asm/unaligned.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Port on Texas Instruments TMS320C6x architecture
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
6*4882a593Smuzhiyun  *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
7*4882a593Smuzhiyun  *  Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun #ifndef _ASM_C6X_UNALIGNED_H
10*4882a593Smuzhiyun #define _ASM_C6X_UNALIGNED_H
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/swab.h>
13*4882a593Smuzhiyun #include <linux/unaligned/generic.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun  * The C64x+ can do unaligned word and dword accesses in hardware
17*4882a593Smuzhiyun  * using special load/store instructions.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
get_unaligned_le16(const void * p)20*4882a593Smuzhiyun static inline u16 get_unaligned_le16(const void *p)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	const u8 *_p = p;
23*4882a593Smuzhiyun 	return _p[0] | _p[1] << 8;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
get_unaligned_be16(const void * p)26*4882a593Smuzhiyun static inline u16 get_unaligned_be16(const void *p)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	const u8 *_p = p;
29*4882a593Smuzhiyun 	return _p[0] << 8 | _p[1];
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun 
put_unaligned_le16(u16 val,void * p)32*4882a593Smuzhiyun static inline void put_unaligned_le16(u16 val, void *p)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	u8 *_p = p;
35*4882a593Smuzhiyun 	_p[0] = val;
36*4882a593Smuzhiyun 	_p[1] = val >> 8;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
put_unaligned_be16(u16 val,void * p)39*4882a593Smuzhiyun static inline void put_unaligned_be16(u16 val, void *p)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	u8 *_p = p;
42*4882a593Smuzhiyun 	_p[0] = val >> 8;
43*4882a593Smuzhiyun 	_p[1] = val;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
get_unaligned32(const void * p)46*4882a593Smuzhiyun static inline u32 get_unaligned32(const void *p)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	u32 val = (u32) p;
49*4882a593Smuzhiyun 	asm (" ldnw	.d1t1	*%0,%0\n"
50*4882a593Smuzhiyun 	     " nop     4\n"
51*4882a593Smuzhiyun 	     : "+a"(val));
52*4882a593Smuzhiyun 	return val;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
put_unaligned32(u32 val,void * p)55*4882a593Smuzhiyun static inline void put_unaligned32(u32 val, void *p)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	asm volatile (" stnw	.d2t1	%0,*%1\n"
58*4882a593Smuzhiyun 		      : : "a"(val), "b"(p) : "memory");
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
get_unaligned64(const void * p)61*4882a593Smuzhiyun static inline u64 get_unaligned64(const void *p)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	u64 val;
64*4882a593Smuzhiyun 	asm volatile (" ldndw	.d1t1	*%1,%0\n"
65*4882a593Smuzhiyun 		      " nop     4\n"
66*4882a593Smuzhiyun 		      : "=a"(val) : "a"(p));
67*4882a593Smuzhiyun 	return val;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun 
put_unaligned64(u64 val,const void * p)70*4882a593Smuzhiyun static inline void put_unaligned64(u64 val, const void *p)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun 	asm volatile (" stndw	.d2t1	%0,*%1\n"
73*4882a593Smuzhiyun 		      : : "a"(val), "b"(p) : "memory");
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #ifdef CONFIG_CPU_BIG_ENDIAN
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define get_unaligned_le32(p)	 __swab32(get_unaligned32(p))
79*4882a593Smuzhiyun #define get_unaligned_le64(p)	 __swab64(get_unaligned64(p))
80*4882a593Smuzhiyun #define get_unaligned_be32(p)	 get_unaligned32(p)
81*4882a593Smuzhiyun #define get_unaligned_be64(p)	 get_unaligned64(p)
82*4882a593Smuzhiyun #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
83*4882a593Smuzhiyun #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
84*4882a593Smuzhiyun #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
85*4882a593Smuzhiyun #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
86*4882a593Smuzhiyun #define get_unaligned	__get_unaligned_be
87*4882a593Smuzhiyun #define put_unaligned	__put_unaligned_be
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #else
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define get_unaligned_le32(p)	 get_unaligned32(p)
92*4882a593Smuzhiyun #define get_unaligned_le64(p)	 get_unaligned64(p)
93*4882a593Smuzhiyun #define get_unaligned_be32(p)	 __swab32(get_unaligned32(p))
94*4882a593Smuzhiyun #define get_unaligned_be64(p)	 __swab64(get_unaligned64(p))
95*4882a593Smuzhiyun #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
96*4882a593Smuzhiyun #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
97*4882a593Smuzhiyun #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
98*4882a593Smuzhiyun #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
99*4882a593Smuzhiyun #define get_unaligned	__get_unaligned_le
100*4882a593Smuzhiyun #define put_unaligned	__put_unaligned_le
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #endif
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #endif /* _ASM_C6X_UNALIGNED_H */
105