xref: /OK3568_Linux_fs/kernel/arch/mips/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Atomic operations that C can't guarantee us.  Useful for
3*4882a593Smuzhiyun  * resource counting etc..
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * But use these as seldom as possible since they are much more slower
6*4882a593Smuzhiyun  * than regular operations.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
9*4882a593Smuzhiyun  * License.  See the file "COPYING" in the main directory of this archive
10*4882a593Smuzhiyun  * for more details.
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * Copyright (C) 1996, 97, 99, 2000, 03, 04, 06 by Ralf Baechle
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun #ifndef _ASM_ATOMIC_H
15*4882a593Smuzhiyun #define _ASM_ATOMIC_H
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/irqflags.h>
18*4882a593Smuzhiyun #include <linux/types.h>
19*4882a593Smuzhiyun #include <asm/barrier.h>
20*4882a593Smuzhiyun #include <asm/compiler.h>
21*4882a593Smuzhiyun #include <asm/cpu-features.h>
22*4882a593Smuzhiyun #include <asm/cmpxchg.h>
23*4882a593Smuzhiyun #include <asm/llsc.h>
24*4882a593Smuzhiyun #include <asm/sync.h>
25*4882a593Smuzhiyun #include <asm/war.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define ATOMIC_OPS(pfx, type)						\
28*4882a593Smuzhiyun static __always_inline type pfx##_read(const pfx##_t *v)		\
29*4882a593Smuzhiyun {									\
30*4882a593Smuzhiyun 	return READ_ONCE(v->counter);					\
31*4882a593Smuzhiyun }									\
32*4882a593Smuzhiyun 									\
33*4882a593Smuzhiyun static __always_inline void pfx##_set(pfx##_t *v, type i)		\
34*4882a593Smuzhiyun {									\
35*4882a593Smuzhiyun 	WRITE_ONCE(v->counter, i);					\
36*4882a593Smuzhiyun }									\
37*4882a593Smuzhiyun 									\
38*4882a593Smuzhiyun static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n)	\
39*4882a593Smuzhiyun {									\
40*4882a593Smuzhiyun 	return cmpxchg(&v->counter, o, n);				\
41*4882a593Smuzhiyun }									\
42*4882a593Smuzhiyun 									\
43*4882a593Smuzhiyun static __always_inline type pfx##_xchg(pfx##_t *v, type n)		\
44*4882a593Smuzhiyun {									\
45*4882a593Smuzhiyun 	return xchg(&v->counter, n);					\
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun ATOMIC_OPS(atomic, int)
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #ifdef CONFIG_64BIT
51*4882a593Smuzhiyun # define ATOMIC64_INIT(i)	{ (i) }
52*4882a593Smuzhiyun ATOMIC_OPS(atomic64, s64)
53*4882a593Smuzhiyun #endif
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
56*4882a593Smuzhiyun static __inline__ void pfx##_##op(type i, pfx##_t * v)			\
57*4882a593Smuzhiyun {									\
58*4882a593Smuzhiyun 	type temp;							\
59*4882a593Smuzhiyun 									\
60*4882a593Smuzhiyun 	if (!kernel_uses_llsc) {					\
61*4882a593Smuzhiyun 		unsigned long flags;					\
62*4882a593Smuzhiyun 									\
63*4882a593Smuzhiyun 		raw_local_irq_save(flags);				\
64*4882a593Smuzhiyun 		v->counter c_op i;					\
65*4882a593Smuzhiyun 		raw_local_irq_restore(flags);				\
66*4882a593Smuzhiyun 		return;							\
67*4882a593Smuzhiyun 	}								\
68*4882a593Smuzhiyun 									\
69*4882a593Smuzhiyun 	__asm__ __volatile__(						\
70*4882a593Smuzhiyun 	"	.set	push					\n"	\
71*4882a593Smuzhiyun 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
72*4882a593Smuzhiyun 	"	" __SYNC(full, loongson3_war) "			\n"	\
73*4882a593Smuzhiyun 	"1:	" #ll "	%0, %1		# " #pfx "_" #op "	\n"	\
74*4882a593Smuzhiyun 	"	" #asm_op " %0, %2				\n"	\
75*4882a593Smuzhiyun 	"	" #sc "	%0, %1					\n"	\
76*4882a593Smuzhiyun 	"\t" __SC_BEQZ "%0, 1b					\n"	\
77*4882a593Smuzhiyun 	"	.set	pop					\n"	\
78*4882a593Smuzhiyun 	: "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)		\
79*4882a593Smuzhiyun 	: "Ir" (i) : __LLSC_CLOBBER);					\
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
83*4882a593Smuzhiyun static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v)	\
84*4882a593Smuzhiyun {									\
85*4882a593Smuzhiyun 	type temp, result;						\
86*4882a593Smuzhiyun 									\
87*4882a593Smuzhiyun 	if (!kernel_uses_llsc) {					\
88*4882a593Smuzhiyun 		unsigned long flags;					\
89*4882a593Smuzhiyun 									\
90*4882a593Smuzhiyun 		raw_local_irq_save(flags);				\
91*4882a593Smuzhiyun 		result = v->counter;					\
92*4882a593Smuzhiyun 		result c_op i;						\
93*4882a593Smuzhiyun 		v->counter = result;					\
94*4882a593Smuzhiyun 		raw_local_irq_restore(flags);				\
95*4882a593Smuzhiyun 		return result;						\
96*4882a593Smuzhiyun 	}								\
97*4882a593Smuzhiyun 									\
98*4882a593Smuzhiyun 	__asm__ __volatile__(						\
99*4882a593Smuzhiyun 	"	.set	push					\n"	\
100*4882a593Smuzhiyun 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
101*4882a593Smuzhiyun 	"	" __SYNC(full, loongson3_war) "			\n"	\
102*4882a593Smuzhiyun 	"1:	" #ll "	%1, %2		# " #pfx "_" #op "_return\n"	\
103*4882a593Smuzhiyun 	"	" #asm_op " %0, %1, %3				\n"	\
104*4882a593Smuzhiyun 	"	" #sc "	%0, %2					\n"	\
105*4882a593Smuzhiyun 	"\t" __SC_BEQZ "%0, 1b					\n"	\
106*4882a593Smuzhiyun 	"	" #asm_op " %0, %1, %3				\n"	\
107*4882a593Smuzhiyun 	"	.set	pop					\n"	\
108*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (temp),					\
109*4882a593Smuzhiyun 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
110*4882a593Smuzhiyun 	: "Ir" (i) : __LLSC_CLOBBER);					\
111*4882a593Smuzhiyun 									\
112*4882a593Smuzhiyun 	return result;							\
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)		\
116*4882a593Smuzhiyun static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v)	\
117*4882a593Smuzhiyun {									\
118*4882a593Smuzhiyun 	int temp, result;						\
119*4882a593Smuzhiyun 									\
120*4882a593Smuzhiyun 	if (!kernel_uses_llsc) {					\
121*4882a593Smuzhiyun 		unsigned long flags;					\
122*4882a593Smuzhiyun 									\
123*4882a593Smuzhiyun 		raw_local_irq_save(flags);				\
124*4882a593Smuzhiyun 		result = v->counter;					\
125*4882a593Smuzhiyun 		v->counter c_op i;					\
126*4882a593Smuzhiyun 		raw_local_irq_restore(flags);				\
127*4882a593Smuzhiyun 		return result;						\
128*4882a593Smuzhiyun 	}								\
129*4882a593Smuzhiyun 									\
130*4882a593Smuzhiyun 	__asm__ __volatile__(						\
131*4882a593Smuzhiyun 	"	.set	push					\n"	\
132*4882a593Smuzhiyun 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
133*4882a593Smuzhiyun 	"	" __SYNC(full, loongson3_war) "			\n"	\
134*4882a593Smuzhiyun 	"1:	" #ll "	%1, %2		# " #pfx "_fetch_" #op "\n"	\
135*4882a593Smuzhiyun 	"	" #asm_op " %0, %1, %3				\n"	\
136*4882a593Smuzhiyun 	"	" #sc "	%0, %2					\n"	\
137*4882a593Smuzhiyun 	"\t" __SC_BEQZ "%0, 1b					\n"	\
138*4882a593Smuzhiyun 	"	.set	pop					\n"	\
139*4882a593Smuzhiyun 	"	move	%0, %1					\n"	\
140*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (temp),					\
141*4882a593Smuzhiyun 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
142*4882a593Smuzhiyun 	: "Ir" (i) : __LLSC_CLOBBER);					\
143*4882a593Smuzhiyun 									\
144*4882a593Smuzhiyun 	return result;							\
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #undef ATOMIC_OPS
148*4882a593Smuzhiyun #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)			\
149*4882a593Smuzhiyun 	ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
150*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc)		\
151*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
154*4882a593Smuzhiyun ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #define atomic_add_return_relaxed	atomic_add_return_relaxed
157*4882a593Smuzhiyun #define atomic_sub_return_relaxed	atomic_sub_return_relaxed
158*4882a593Smuzhiyun #define atomic_fetch_add_relaxed	atomic_fetch_add_relaxed
159*4882a593Smuzhiyun #define atomic_fetch_sub_relaxed	atomic_fetch_sub_relaxed
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #ifdef CONFIG_64BIT
162*4882a593Smuzhiyun ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
163*4882a593Smuzhiyun ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
164*4882a593Smuzhiyun # define atomic64_add_return_relaxed	atomic64_add_return_relaxed
165*4882a593Smuzhiyun # define atomic64_sub_return_relaxed	atomic64_sub_return_relaxed
166*4882a593Smuzhiyun # define atomic64_fetch_add_relaxed	atomic64_fetch_add_relaxed
167*4882a593Smuzhiyun # define atomic64_fetch_sub_relaxed	atomic64_fetch_sub_relaxed
168*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #undef ATOMIC_OPS
171*4882a593Smuzhiyun #define ATOMIC_OPS(pfx, op, type, c_op, asm_op, ll, sc)			\
172*4882a593Smuzhiyun 	ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc)			\
173*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc)
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
176*4882a593Smuzhiyun ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
177*4882a593Smuzhiyun ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun #define atomic_fetch_and_relaxed	atomic_fetch_and_relaxed
180*4882a593Smuzhiyun #define atomic_fetch_or_relaxed		atomic_fetch_or_relaxed
181*4882a593Smuzhiyun #define atomic_fetch_xor_relaxed	atomic_fetch_xor_relaxed
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #ifdef CONFIG_64BIT
184*4882a593Smuzhiyun ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
185*4882a593Smuzhiyun ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
186*4882a593Smuzhiyun ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
187*4882a593Smuzhiyun # define atomic64_fetch_and_relaxed	atomic64_fetch_and_relaxed
188*4882a593Smuzhiyun # define atomic64_fetch_or_relaxed	atomic64_fetch_or_relaxed
189*4882a593Smuzhiyun # define atomic64_fetch_xor_relaxed	atomic64_fetch_xor_relaxed
190*4882a593Smuzhiyun #endif
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #undef ATOMIC_OPS
193*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
194*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
195*4882a593Smuzhiyun #undef ATOMIC_OP
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun  * atomic_sub_if_positive - conditionally subtract integer from atomic variable
199*4882a593Smuzhiyun  * @i: integer value to subtract
200*4882a593Smuzhiyun  * @v: pointer of type atomic_t
201*4882a593Smuzhiyun  *
202*4882a593Smuzhiyun  * Atomically test @v and subtract @i if @v is greater or equal than @i.
203*4882a593Smuzhiyun  * The function returns the old value of @v minus @i.
204*4882a593Smuzhiyun  */
205*4882a593Smuzhiyun #define ATOMIC_SIP_OP(pfx, type, op, ll, sc)				\
206*4882a593Smuzhiyun static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v)	\
207*4882a593Smuzhiyun {									\
208*4882a593Smuzhiyun 	type temp, result;						\
209*4882a593Smuzhiyun 									\
210*4882a593Smuzhiyun 	smp_mb__before_atomic();					\
211*4882a593Smuzhiyun 									\
212*4882a593Smuzhiyun 	if (!kernel_uses_llsc) {					\
213*4882a593Smuzhiyun 		unsigned long flags;					\
214*4882a593Smuzhiyun 									\
215*4882a593Smuzhiyun 		raw_local_irq_save(flags);				\
216*4882a593Smuzhiyun 		result = v->counter;					\
217*4882a593Smuzhiyun 		result -= i;						\
218*4882a593Smuzhiyun 		if (result >= 0)					\
219*4882a593Smuzhiyun 			v->counter = result;				\
220*4882a593Smuzhiyun 		raw_local_irq_restore(flags);				\
221*4882a593Smuzhiyun 		smp_mb__after_atomic();					\
222*4882a593Smuzhiyun 		return result;						\
223*4882a593Smuzhiyun 	}								\
224*4882a593Smuzhiyun 									\
225*4882a593Smuzhiyun 	__asm__ __volatile__(						\
226*4882a593Smuzhiyun 	"	.set	push					\n"	\
227*4882a593Smuzhiyun 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
228*4882a593Smuzhiyun 	"	" __SYNC(full, loongson3_war) "			\n"	\
229*4882a593Smuzhiyun 	"1:	" #ll "	%1, %2		# atomic_sub_if_positive\n"	\
230*4882a593Smuzhiyun 	"	.set	pop					\n"	\
231*4882a593Smuzhiyun 	"	" #op "	%0, %1, %3				\n"	\
232*4882a593Smuzhiyun 	"	move	%1, %0					\n"	\
233*4882a593Smuzhiyun 	"	bltz	%0, 2f					\n"	\
234*4882a593Smuzhiyun 	"	.set	push					\n"	\
235*4882a593Smuzhiyun 	"	.set	" MIPS_ISA_LEVEL "			\n"	\
236*4882a593Smuzhiyun 	"	" #sc "	%1, %2					\n"	\
237*4882a593Smuzhiyun 	"	" __SC_BEQZ "%1, 1b				\n"	\
238*4882a593Smuzhiyun 	"2:	" __SYNC(full, loongson3_war) "			\n"	\
239*4882a593Smuzhiyun 	"	.set	pop					\n"	\
240*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (temp),					\
241*4882a593Smuzhiyun 	  "+" GCC_OFF_SMALL_ASM() (v->counter)				\
242*4882a593Smuzhiyun 	: "Ir" (i)							\
243*4882a593Smuzhiyun 	: __LLSC_CLOBBER);						\
244*4882a593Smuzhiyun 									\
245*4882a593Smuzhiyun 	/*								\
246*4882a593Smuzhiyun 	 * In the Loongson3 workaround case we already have a		\
247*4882a593Smuzhiyun 	 * completion barrier at 2: above, which is needed due to the	\
248*4882a593Smuzhiyun 	 * bltz that can branch	to code outside of the LL/SC loop. As	\
249*4882a593Smuzhiyun 	 * such, we don't need to emit another barrier here.		\
250*4882a593Smuzhiyun 	 */								\
251*4882a593Smuzhiyun 	if (__SYNC_loongson3_war == 0)					\
252*4882a593Smuzhiyun 		smp_mb__after_atomic();					\
253*4882a593Smuzhiyun 									\
254*4882a593Smuzhiyun 	return result;							\
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
258*4882a593Smuzhiyun #define atomic_dec_if_positive(v)	atomic_sub_if_positive(1, v)
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun #ifdef CONFIG_64BIT
261*4882a593Smuzhiyun ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
262*4882a593Smuzhiyun #define atomic64_dec_if_positive(v)	atomic64_sub_if_positive(1, v)
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #undef ATOMIC_SIP_OP
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun #endif /* _ASM_ATOMIC_H */
268