xref: /OK3568_Linux_fs/kernel/arch/xtensa/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * include/asm-xtensa/atomic.h
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Atomic operations that C can't guarantee us.  Useful for resource counting..
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
7*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
8*4882a593Smuzhiyun  * for more details.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Copyright (C) 2001 - 2008 Tensilica Inc.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifndef _XTENSA_ATOMIC_H
14*4882a593Smuzhiyun #define _XTENSA_ATOMIC_H
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <linux/stringify.h>
17*4882a593Smuzhiyun #include <linux/types.h>
18*4882a593Smuzhiyun #include <asm/processor.h>
19*4882a593Smuzhiyun #include <asm/cmpxchg.h>
20*4882a593Smuzhiyun #include <asm/barrier.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * This Xtensa implementation assumes that the right mechanism
24*4882a593Smuzhiyun  * for exclusion is for locking interrupts to level EXCM_LEVEL.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Locking interrupts looks like this:
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  *    rsil a15, TOPLEVEL
29*4882a593Smuzhiyun  *    <code>
30*4882a593Smuzhiyun  *    wsr  a15, PS
31*4882a593Smuzhiyun  *    rsync
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Note that a15 is used here because the register allocation
34*4882a593Smuzhiyun  * done by the compiler is not guaranteed and a window overflow
35*4882a593Smuzhiyun  * may not occur between the rsil and wsr instructions. By using
36*4882a593Smuzhiyun  * a15 in the rsil, the machine is guaranteed to be in a state
37*4882a593Smuzhiyun  * where no register reference will cause an overflow.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * atomic_read - read atomic variable
42*4882a593Smuzhiyun  * @v: pointer of type atomic_t
43*4882a593Smuzhiyun  *
44*4882a593Smuzhiyun  * Atomically reads the value of @v.
45*4882a593Smuzhiyun  */
46*4882a593Smuzhiyun #define atomic_read(v)		READ_ONCE((v)->counter)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /**
49*4882a593Smuzhiyun  * atomic_set - set atomic variable
50*4882a593Smuzhiyun  * @v: pointer of type atomic_t
51*4882a593Smuzhiyun  * @i: required value
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Atomically sets the value of @v to @i.
54*4882a593Smuzhiyun  */
55*4882a593Smuzhiyun #define atomic_set(v,i)		WRITE_ONCE((v)->counter, (i))
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #if XCHAL_HAVE_EXCLUSIVE
58*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
59*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v)			\
60*4882a593Smuzhiyun {									\
61*4882a593Smuzhiyun 	unsigned long tmp;						\
62*4882a593Smuzhiyun 	int result;							\
63*4882a593Smuzhiyun 									\
64*4882a593Smuzhiyun 	__asm__ __volatile__(						\
65*4882a593Smuzhiyun 			"1:     l32ex   %[tmp], %[addr]\n"		\
66*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
67*4882a593Smuzhiyun 			"       s32ex   %[result], %[addr]\n"		\
68*4882a593Smuzhiyun 			"       getex   %[result]\n"			\
69*4882a593Smuzhiyun 			"       beqz    %[result], 1b\n"		\
70*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
71*4882a593Smuzhiyun 			: [i] "a" (i), [addr] "a" (v)			\
72*4882a593Smuzhiyun 			: "memory"					\
73*4882a593Smuzhiyun 			);						\
74*4882a593Smuzhiyun }									\
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
77*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t *v)		\
78*4882a593Smuzhiyun {									\
79*4882a593Smuzhiyun 	unsigned long tmp;						\
80*4882a593Smuzhiyun 	int result;							\
81*4882a593Smuzhiyun 									\
82*4882a593Smuzhiyun 	__asm__ __volatile__(						\
83*4882a593Smuzhiyun 			"1:     l32ex   %[tmp], %[addr]\n"		\
84*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
85*4882a593Smuzhiyun 			"       s32ex   %[result], %[addr]\n"		\
86*4882a593Smuzhiyun 			"       getex   %[result]\n"			\
87*4882a593Smuzhiyun 			"       beqz    %[result], 1b\n"		\
88*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
89*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
90*4882a593Smuzhiyun 			: [i] "a" (i), [addr] "a" (v)			\
91*4882a593Smuzhiyun 			: "memory"					\
92*4882a593Smuzhiyun 			);						\
93*4882a593Smuzhiyun 									\
94*4882a593Smuzhiyun 	return result;							\
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
98*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v)			\
99*4882a593Smuzhiyun {									\
100*4882a593Smuzhiyun 	unsigned long tmp;						\
101*4882a593Smuzhiyun 	int result;							\
102*4882a593Smuzhiyun 									\
103*4882a593Smuzhiyun 	__asm__ __volatile__(						\
104*4882a593Smuzhiyun 			"1:     l32ex   %[tmp], %[addr]\n"		\
105*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
106*4882a593Smuzhiyun 			"       s32ex   %[result], %[addr]\n"		\
107*4882a593Smuzhiyun 			"       getex   %[result]\n"			\
108*4882a593Smuzhiyun 			"       beqz    %[result], 1b\n"		\
109*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp)	\
110*4882a593Smuzhiyun 			: [i] "a" (i), [addr] "a" (v)			\
111*4882a593Smuzhiyun 			: "memory"					\
112*4882a593Smuzhiyun 			);						\
113*4882a593Smuzhiyun 									\
114*4882a593Smuzhiyun 	return tmp;							\
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #elif XCHAL_HAVE_S32C1I
118*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
119*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t * v)			\
120*4882a593Smuzhiyun {									\
121*4882a593Smuzhiyun 	unsigned long tmp;						\
122*4882a593Smuzhiyun 	int result;							\
123*4882a593Smuzhiyun 									\
124*4882a593Smuzhiyun 	__asm__ __volatile__(						\
125*4882a593Smuzhiyun 			"1:     l32i    %[tmp], %[mem]\n"		\
126*4882a593Smuzhiyun 			"       wsr     %[tmp], scompare1\n"		\
127*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
128*4882a593Smuzhiyun 			"       s32c1i  %[result], %[mem]\n"		\
129*4882a593Smuzhiyun 			"       bne     %[result], %[tmp], 1b\n"	\
130*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
131*4882a593Smuzhiyun 			  [mem] "+m" (*v)				\
132*4882a593Smuzhiyun 			: [i] "a" (i)					\
133*4882a593Smuzhiyun 			: "memory"					\
134*4882a593Smuzhiyun 			);						\
135*4882a593Smuzhiyun }									\
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
138*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t * v)		\
139*4882a593Smuzhiyun {									\
140*4882a593Smuzhiyun 	unsigned long tmp;						\
141*4882a593Smuzhiyun 	int result;							\
142*4882a593Smuzhiyun 									\
143*4882a593Smuzhiyun 	__asm__ __volatile__(						\
144*4882a593Smuzhiyun 			"1:     l32i    %[tmp], %[mem]\n"		\
145*4882a593Smuzhiyun 			"       wsr     %[tmp], scompare1\n"		\
146*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
147*4882a593Smuzhiyun 			"       s32c1i  %[result], %[mem]\n"		\
148*4882a593Smuzhiyun 			"       bne     %[result], %[tmp], 1b\n"	\
149*4882a593Smuzhiyun 			"       " #op " %[result], %[result], %[i]\n"	\
150*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
151*4882a593Smuzhiyun 			  [mem] "+m" (*v)				\
152*4882a593Smuzhiyun 			: [i] "a" (i)					\
153*4882a593Smuzhiyun 			: "memory"					\
154*4882a593Smuzhiyun 			);						\
155*4882a593Smuzhiyun 									\
156*4882a593Smuzhiyun 	return result;							\
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
160*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t * v)		\
161*4882a593Smuzhiyun {									\
162*4882a593Smuzhiyun 	unsigned long tmp;						\
163*4882a593Smuzhiyun 	int result;							\
164*4882a593Smuzhiyun 									\
165*4882a593Smuzhiyun 	__asm__ __volatile__(						\
166*4882a593Smuzhiyun 			"1:     l32i    %[tmp], %[mem]\n"		\
167*4882a593Smuzhiyun 			"       wsr     %[tmp], scompare1\n"		\
168*4882a593Smuzhiyun 			"       " #op " %[result], %[tmp], %[i]\n"	\
169*4882a593Smuzhiyun 			"       s32c1i  %[result], %[mem]\n"		\
170*4882a593Smuzhiyun 			"       bne     %[result], %[tmp], 1b\n"	\
171*4882a593Smuzhiyun 			: [result] "=&a" (result), [tmp] "=&a" (tmp),	\
172*4882a593Smuzhiyun 			  [mem] "+m" (*v)				\
173*4882a593Smuzhiyun 			: [i] "a" (i)					\
174*4882a593Smuzhiyun 			: "memory"					\
175*4882a593Smuzhiyun 			);						\
176*4882a593Smuzhiyun 									\
177*4882a593Smuzhiyun 	return result;							\
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #else /* XCHAL_HAVE_S32C1I */
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
183*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t * v)			\
184*4882a593Smuzhiyun {									\
185*4882a593Smuzhiyun 	unsigned int vval;						\
186*4882a593Smuzhiyun 									\
187*4882a593Smuzhiyun 	__asm__ __volatile__(						\
188*4882a593Smuzhiyun 			"       rsil    a15, "__stringify(TOPLEVEL)"\n"	\
189*4882a593Smuzhiyun 			"       l32i    %[result], %[mem]\n"		\
190*4882a593Smuzhiyun 			"       " #op " %[result], %[result], %[i]\n"	\
191*4882a593Smuzhiyun 			"       s32i    %[result], %[mem]\n"		\
192*4882a593Smuzhiyun 			"       wsr     a15, ps\n"			\
193*4882a593Smuzhiyun 			"       rsync\n"				\
194*4882a593Smuzhiyun 			: [result] "=&a" (vval), [mem] "+m" (*v)	\
195*4882a593Smuzhiyun 			: [i] "a" (i)					\
196*4882a593Smuzhiyun 			: "a15", "memory"				\
197*4882a593Smuzhiyun 			);						\
198*4882a593Smuzhiyun }									\
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
201*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t * v)		\
202*4882a593Smuzhiyun {									\
203*4882a593Smuzhiyun 	unsigned int vval;						\
204*4882a593Smuzhiyun 									\
205*4882a593Smuzhiyun 	__asm__ __volatile__(						\
206*4882a593Smuzhiyun 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
207*4882a593Smuzhiyun 			"       l32i    %[result], %[mem]\n"		\
208*4882a593Smuzhiyun 			"       " #op " %[result], %[result], %[i]\n"	\
209*4882a593Smuzhiyun 			"       s32i    %[result], %[mem]\n"		\
210*4882a593Smuzhiyun 			"       wsr     a15, ps\n"			\
211*4882a593Smuzhiyun 			"       rsync\n"				\
212*4882a593Smuzhiyun 			: [result] "=&a" (vval), [mem] "+m" (*v)	\
213*4882a593Smuzhiyun 			: [i] "a" (i)					\
214*4882a593Smuzhiyun 			: "a15", "memory"				\
215*4882a593Smuzhiyun 			);						\
216*4882a593Smuzhiyun 									\
217*4882a593Smuzhiyun 	return vval;							\
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
221*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t * v)		\
222*4882a593Smuzhiyun {									\
223*4882a593Smuzhiyun 	unsigned int tmp, vval;						\
224*4882a593Smuzhiyun 									\
225*4882a593Smuzhiyun 	__asm__ __volatile__(						\
226*4882a593Smuzhiyun 			"       rsil    a15,"__stringify(TOPLEVEL)"\n"	\
227*4882a593Smuzhiyun 			"       l32i    %[result], %[mem]\n"		\
228*4882a593Smuzhiyun 			"       " #op " %[tmp], %[result], %[i]\n"	\
229*4882a593Smuzhiyun 			"       s32i    %[tmp], %[mem]\n"		\
230*4882a593Smuzhiyun 			"       wsr     a15, ps\n"			\
231*4882a593Smuzhiyun 			"       rsync\n"				\
232*4882a593Smuzhiyun 			: [result] "=&a" (vval), [tmp] "=&a" (tmp),	\
233*4882a593Smuzhiyun 			  [mem] "+m" (*v)				\
234*4882a593Smuzhiyun 			: [i] "a" (i)					\
235*4882a593Smuzhiyun 			: "a15", "memory"				\
236*4882a593Smuzhiyun 			);						\
237*4882a593Smuzhiyun 									\
238*4882a593Smuzhiyun 	return vval;							\
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun #endif /* XCHAL_HAVE_S32C1I */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op) ATOMIC_OP_RETURN(op)
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun ATOMIC_OPS(add)
246*4882a593Smuzhiyun ATOMIC_OPS(sub)
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun #undef ATOMIC_OPS
249*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun ATOMIC_OPS(and)
252*4882a593Smuzhiyun ATOMIC_OPS(or)
253*4882a593Smuzhiyun ATOMIC_OPS(xor)
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #undef ATOMIC_OPS
256*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
257*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
258*4882a593Smuzhiyun #undef ATOMIC_OP
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
261*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #endif /* _XTENSA_ATOMIC_H */
264