xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/atomic_lse.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Based on arch/arm/include/asm/atomic.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1996 Russell King.
6*4882a593Smuzhiyun  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __ASM_ATOMIC_LSE_H
11*4882a593Smuzhiyun #define __ASM_ATOMIC_LSE_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define ATOMIC_OP(op, asm_op)						\
14*4882a593Smuzhiyun static inline void __lse_atomic_##op(int i, atomic_t *v)			\
15*4882a593Smuzhiyun {									\
16*4882a593Smuzhiyun 	asm volatile(							\
17*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
18*4882a593Smuzhiyun "	" #asm_op "	%w[i], %[v]\n"					\
19*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
20*4882a593Smuzhiyun 	: "r" (v));							\
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun 
ATOMIC_OP(andnot,stclr)23*4882a593Smuzhiyun ATOMIC_OP(andnot, stclr)
24*4882a593Smuzhiyun ATOMIC_OP(or, stset)
25*4882a593Smuzhiyun ATOMIC_OP(xor, steor)
26*4882a593Smuzhiyun ATOMIC_OP(add, stadd)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #undef ATOMIC_OP
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...)			\
31*4882a593Smuzhiyun static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v)	\
32*4882a593Smuzhiyun {									\
33*4882a593Smuzhiyun 	asm volatile(							\
34*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
35*4882a593Smuzhiyun "	" #asm_op #mb "	%w[i], %w[i], %[v]"				\
36*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
37*4882a593Smuzhiyun 	: "r" (v)							\
38*4882a593Smuzhiyun 	: cl);								\
39*4882a593Smuzhiyun 									\
40*4882a593Smuzhiyun 	return i;							\
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define ATOMIC_FETCH_OPS(op, asm_op)					\
44*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(_relaxed,   , op, asm_op)			\
45*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
46*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(_release,  l, op, asm_op, "memory")		\
47*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(        , al, op, asm_op, "memory")
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun ATOMIC_FETCH_OPS(andnot, ldclr)
50*4882a593Smuzhiyun ATOMIC_FETCH_OPS(or, ldset)
51*4882a593Smuzhiyun ATOMIC_FETCH_OPS(xor, ldeor)
52*4882a593Smuzhiyun ATOMIC_FETCH_OPS(add, ldadd)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
55*4882a593Smuzhiyun #undef ATOMIC_FETCH_OPS
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define ATOMIC_OP_ADD_RETURN(name, mb, cl...)				\
58*4882a593Smuzhiyun static inline int __lse_atomic_add_return##name(int i, atomic_t *v)	\
59*4882a593Smuzhiyun {									\
60*4882a593Smuzhiyun 	u32 tmp;							\
61*4882a593Smuzhiyun 									\
62*4882a593Smuzhiyun 	asm volatile(							\
63*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
64*4882a593Smuzhiyun 	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
65*4882a593Smuzhiyun 	"	add	%w[i], %w[i], %w[tmp]"				\
66*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
67*4882a593Smuzhiyun 	: "r" (v)							\
68*4882a593Smuzhiyun 	: cl);								\
69*4882a593Smuzhiyun 									\
70*4882a593Smuzhiyun 	return i;							\
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun ATOMIC_OP_ADD_RETURN(_relaxed,   )
74*4882a593Smuzhiyun ATOMIC_OP_ADD_RETURN(_acquire,  a, "memory")
75*4882a593Smuzhiyun ATOMIC_OP_ADD_RETURN(_release,  l, "memory")
76*4882a593Smuzhiyun ATOMIC_OP_ADD_RETURN(        , al, "memory")
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #undef ATOMIC_OP_ADD_RETURN
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static inline void __lse_atomic_and(int i, atomic_t *v)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	asm volatile(
83*4882a593Smuzhiyun 	__LSE_PREAMBLE
84*4882a593Smuzhiyun 	"	mvn	%w[i], %w[i]\n"
85*4882a593Smuzhiyun 	"	stclr	%w[i], %[v]"
86*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)
87*4882a593Smuzhiyun 	: "r" (v));
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define ATOMIC_FETCH_OP_AND(name, mb, cl...)				\
91*4882a593Smuzhiyun static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v)	\
92*4882a593Smuzhiyun {									\
93*4882a593Smuzhiyun 	asm volatile(							\
94*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
95*4882a593Smuzhiyun 	"	mvn	%w[i], %w[i]\n"					\
96*4882a593Smuzhiyun 	"	ldclr" #mb "	%w[i], %w[i], %[v]"			\
97*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
98*4882a593Smuzhiyun 	: "r" (v)							\
99*4882a593Smuzhiyun 	: cl);								\
100*4882a593Smuzhiyun 									\
101*4882a593Smuzhiyun 	return i;							\
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun ATOMIC_FETCH_OP_AND(_relaxed,   )
105*4882a593Smuzhiyun ATOMIC_FETCH_OP_AND(_acquire,  a, "memory")
106*4882a593Smuzhiyun ATOMIC_FETCH_OP_AND(_release,  l, "memory")
107*4882a593Smuzhiyun ATOMIC_FETCH_OP_AND(        , al, "memory")
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP_AND
110*4882a593Smuzhiyun 
__lse_atomic_sub(int i,atomic_t * v)111*4882a593Smuzhiyun static inline void __lse_atomic_sub(int i, atomic_t *v)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	asm volatile(
114*4882a593Smuzhiyun 	__LSE_PREAMBLE
115*4882a593Smuzhiyun 	"	neg	%w[i], %w[i]\n"
116*4882a593Smuzhiyun 	"	stadd	%w[i], %[v]"
117*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)
118*4882a593Smuzhiyun 	: "r" (v));
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define ATOMIC_OP_SUB_RETURN(name, mb, cl...)				\
122*4882a593Smuzhiyun static inline int __lse_atomic_sub_return##name(int i, atomic_t *v)	\
123*4882a593Smuzhiyun {									\
124*4882a593Smuzhiyun 	u32 tmp;							\
125*4882a593Smuzhiyun 									\
126*4882a593Smuzhiyun 	asm volatile(							\
127*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
128*4882a593Smuzhiyun 	"	neg	%w[i], %w[i]\n"					\
129*4882a593Smuzhiyun 	"	ldadd" #mb "	%w[i], %w[tmp], %[v]\n"			\
130*4882a593Smuzhiyun 	"	add	%w[i], %w[i], %w[tmp]"				\
131*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
132*4882a593Smuzhiyun 	: "r" (v)							\
133*4882a593Smuzhiyun 	: cl);							\
134*4882a593Smuzhiyun 									\
135*4882a593Smuzhiyun 	return i;							\
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun ATOMIC_OP_SUB_RETURN(_relaxed,   )
139*4882a593Smuzhiyun ATOMIC_OP_SUB_RETURN(_acquire,  a, "memory")
140*4882a593Smuzhiyun ATOMIC_OP_SUB_RETURN(_release,  l, "memory")
141*4882a593Smuzhiyun ATOMIC_OP_SUB_RETURN(        , al, "memory")
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #undef ATOMIC_OP_SUB_RETURN
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #define ATOMIC_FETCH_OP_SUB(name, mb, cl...)				\
146*4882a593Smuzhiyun static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v)	\
147*4882a593Smuzhiyun {									\
148*4882a593Smuzhiyun 	asm volatile(							\
149*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
150*4882a593Smuzhiyun 	"	neg	%w[i], %w[i]\n"					\
151*4882a593Smuzhiyun 	"	ldadd" #mb "	%w[i], %w[i], %[v]"			\
152*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
153*4882a593Smuzhiyun 	: "r" (v)							\
154*4882a593Smuzhiyun 	: cl);								\
155*4882a593Smuzhiyun 									\
156*4882a593Smuzhiyun 	return i;							\
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun ATOMIC_FETCH_OP_SUB(_relaxed,   )
160*4882a593Smuzhiyun ATOMIC_FETCH_OP_SUB(_acquire,  a, "memory")
161*4882a593Smuzhiyun ATOMIC_FETCH_OP_SUB(_release,  l, "memory")
162*4882a593Smuzhiyun ATOMIC_FETCH_OP_SUB(        , al, "memory")
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP_SUB
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define ATOMIC64_OP(op, asm_op)						\
167*4882a593Smuzhiyun static inline void __lse_atomic64_##op(s64 i, atomic64_t *v)		\
168*4882a593Smuzhiyun {									\
169*4882a593Smuzhiyun 	asm volatile(							\
170*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
171*4882a593Smuzhiyun "	" #asm_op "	%[i], %[v]\n"					\
172*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
173*4882a593Smuzhiyun 	: "r" (v));							\
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun 
ATOMIC64_OP(andnot,stclr)176*4882a593Smuzhiyun ATOMIC64_OP(andnot, stclr)
177*4882a593Smuzhiyun ATOMIC64_OP(or, stset)
178*4882a593Smuzhiyun ATOMIC64_OP(xor, steor)
179*4882a593Smuzhiyun ATOMIC64_OP(add, stadd)
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #undef ATOMIC64_OP
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...)			\
184*4882a593Smuzhiyun static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
185*4882a593Smuzhiyun {									\
186*4882a593Smuzhiyun 	asm volatile(							\
187*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
188*4882a593Smuzhiyun "	" #asm_op #mb "	%[i], %[i], %[v]"				\
189*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter)				\
190*4882a593Smuzhiyun 	: "r" (v)							\
191*4882a593Smuzhiyun 	: cl);								\
192*4882a593Smuzhiyun 									\
193*4882a593Smuzhiyun 	return i;							\
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define ATOMIC64_FETCH_OPS(op, asm_op)					\
197*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(_relaxed,   , op, asm_op)			\
198*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(_acquire,  a, op, asm_op, "memory")		\
199*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(_release,  l, op, asm_op, "memory")		\
200*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(        , al, op, asm_op, "memory")
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun ATOMIC64_FETCH_OPS(andnot, ldclr)
203*4882a593Smuzhiyun ATOMIC64_FETCH_OPS(or, ldset)
204*4882a593Smuzhiyun ATOMIC64_FETCH_OPS(xor, ldeor)
205*4882a593Smuzhiyun ATOMIC64_FETCH_OPS(add, ldadd)
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP
208*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OPS
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)				\
211*4882a593Smuzhiyun static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
212*4882a593Smuzhiyun {									\
213*4882a593Smuzhiyun 	unsigned long tmp;						\
214*4882a593Smuzhiyun 									\
215*4882a593Smuzhiyun 	asm volatile(							\
216*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
217*4882a593Smuzhiyun 	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
218*4882a593Smuzhiyun 	"	add	%[i], %[i], %x[tmp]"				\
219*4882a593Smuzhiyun 	: [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
220*4882a593Smuzhiyun 	: "r" (v)							\
221*4882a593Smuzhiyun 	: cl);								\
222*4882a593Smuzhiyun 									\
223*4882a593Smuzhiyun 	return i;							\
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun ATOMIC64_OP_ADD_RETURN(_relaxed,   )
227*4882a593Smuzhiyun ATOMIC64_OP_ADD_RETURN(_acquire,  a, "memory")
228*4882a593Smuzhiyun ATOMIC64_OP_ADD_RETURN(_release,  l, "memory")
229*4882a593Smuzhiyun ATOMIC64_OP_ADD_RETURN(        , al, "memory")
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #undef ATOMIC64_OP_ADD_RETURN
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	asm volatile(
236*4882a593Smuzhiyun 	__LSE_PREAMBLE
237*4882a593Smuzhiyun 	"	mvn	%[i], %[i]\n"
238*4882a593Smuzhiyun 	"	stclr	%[i], %[v]"
239*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)
240*4882a593Smuzhiyun 	: "r" (v));
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP_AND(name, mb, cl...)				\
244*4882a593Smuzhiyun static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)	\
245*4882a593Smuzhiyun {									\
246*4882a593Smuzhiyun 	asm volatile(							\
247*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
248*4882a593Smuzhiyun 	"	mvn	%[i], %[i]\n"					\
249*4882a593Smuzhiyun 	"	ldclr" #mb "	%[i], %[i], %[v]"			\
250*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
251*4882a593Smuzhiyun 	: "r" (v)							\
252*4882a593Smuzhiyun 	: cl);								\
253*4882a593Smuzhiyun 									\
254*4882a593Smuzhiyun 	return i;							\
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun ATOMIC64_FETCH_OP_AND(_relaxed,   )
258*4882a593Smuzhiyun ATOMIC64_FETCH_OP_AND(_acquire,  a, "memory")
259*4882a593Smuzhiyun ATOMIC64_FETCH_OP_AND(_release,  l, "memory")
260*4882a593Smuzhiyun ATOMIC64_FETCH_OP_AND(        , al, "memory")
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP_AND
263*4882a593Smuzhiyun 
__lse_atomic64_sub(s64 i,atomic64_t * v)264*4882a593Smuzhiyun static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	asm volatile(
267*4882a593Smuzhiyun 	__LSE_PREAMBLE
268*4882a593Smuzhiyun 	"	neg	%[i], %[i]\n"
269*4882a593Smuzhiyun 	"	stadd	%[i], %[v]"
270*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)
271*4882a593Smuzhiyun 	: "r" (v));
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)				\
275*4882a593Smuzhiyun static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)	\
276*4882a593Smuzhiyun {									\
277*4882a593Smuzhiyun 	unsigned long tmp;						\
278*4882a593Smuzhiyun 									\
279*4882a593Smuzhiyun 	asm volatile(							\
280*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
281*4882a593Smuzhiyun 	"	neg	%[i], %[i]\n"					\
282*4882a593Smuzhiyun 	"	ldadd" #mb "	%[i], %x[tmp], %[v]\n"			\
283*4882a593Smuzhiyun 	"	add	%[i], %[i], %x[tmp]"				\
284*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)	\
285*4882a593Smuzhiyun 	: "r" (v)							\
286*4882a593Smuzhiyun 	: cl);								\
287*4882a593Smuzhiyun 									\
288*4882a593Smuzhiyun 	return i;							\
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun ATOMIC64_OP_SUB_RETURN(_relaxed,   )
292*4882a593Smuzhiyun ATOMIC64_OP_SUB_RETURN(_acquire,  a, "memory")
293*4882a593Smuzhiyun ATOMIC64_OP_SUB_RETURN(_release,  l, "memory")
294*4882a593Smuzhiyun ATOMIC64_OP_SUB_RETURN(        , al, "memory")
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun #undef ATOMIC64_OP_SUB_RETURN
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...)				\
299*4882a593Smuzhiyun static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)	\
300*4882a593Smuzhiyun {									\
301*4882a593Smuzhiyun 	asm volatile(							\
302*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
303*4882a593Smuzhiyun 	"	neg	%[i], %[i]\n"					\
304*4882a593Smuzhiyun 	"	ldadd" #mb "	%[i], %[i], %[v]"			\
305*4882a593Smuzhiyun 	: [i] "+&r" (i), [v] "+Q" (v->counter)				\
306*4882a593Smuzhiyun 	: "r" (v)							\
307*4882a593Smuzhiyun 	: cl);								\
308*4882a593Smuzhiyun 									\
309*4882a593Smuzhiyun 	return i;							\
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun ATOMIC64_FETCH_OP_SUB(_relaxed,   )
313*4882a593Smuzhiyun ATOMIC64_FETCH_OP_SUB(_acquire,  a, "memory")
314*4882a593Smuzhiyun ATOMIC64_FETCH_OP_SUB(_release,  l, "memory")
315*4882a593Smuzhiyun ATOMIC64_FETCH_OP_SUB(        , al, "memory")
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP_SUB
318*4882a593Smuzhiyun 
__lse_atomic64_dec_if_positive(atomic64_t * v)319*4882a593Smuzhiyun static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	unsigned long tmp;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	asm volatile(
324*4882a593Smuzhiyun 	__LSE_PREAMBLE
325*4882a593Smuzhiyun 	"1:	ldr	%x[tmp], %[v]\n"
326*4882a593Smuzhiyun 	"	subs	%[ret], %x[tmp], #1\n"
327*4882a593Smuzhiyun 	"	b.lt	2f\n"
328*4882a593Smuzhiyun 	"	casal	%x[tmp], %[ret], %[v]\n"
329*4882a593Smuzhiyun 	"	sub	%x[tmp], %x[tmp], #1\n"
330*4882a593Smuzhiyun 	"	sub	%x[tmp], %x[tmp], %[ret]\n"
331*4882a593Smuzhiyun 	"	cbnz	%x[tmp], 1b\n"
332*4882a593Smuzhiyun 	"2:"
333*4882a593Smuzhiyun 	: [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
334*4882a593Smuzhiyun 	:
335*4882a593Smuzhiyun 	: "cc", "memory");
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	return (long)v;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...)			\
341*4882a593Smuzhiyun static __always_inline u##sz						\
342*4882a593Smuzhiyun __lse__cmpxchg_case_##name##sz(volatile void *ptr,			\
343*4882a593Smuzhiyun 					      u##sz old,		\
344*4882a593Smuzhiyun 					      u##sz new)		\
345*4882a593Smuzhiyun {									\
346*4882a593Smuzhiyun 	register unsigned long x0 asm ("x0") = (unsigned long)ptr;	\
347*4882a593Smuzhiyun 	register u##sz x1 asm ("x1") = old;				\
348*4882a593Smuzhiyun 	register u##sz x2 asm ("x2") = new;				\
349*4882a593Smuzhiyun 	unsigned long tmp;						\
350*4882a593Smuzhiyun 									\
351*4882a593Smuzhiyun 	asm volatile(							\
352*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
353*4882a593Smuzhiyun 	"	mov	%" #w "[tmp], %" #w "[old]\n"			\
354*4882a593Smuzhiyun 	"	cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n"	\
355*4882a593Smuzhiyun 	"	mov	%" #w "[ret], %" #w "[tmp]"			\
356*4882a593Smuzhiyun 	: [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr),		\
357*4882a593Smuzhiyun 	  [tmp] "=&r" (tmp)						\
358*4882a593Smuzhiyun 	: [old] "r" (x1), [new] "r" (x2)				\
359*4882a593Smuzhiyun 	: cl);								\
360*4882a593Smuzhiyun 									\
361*4882a593Smuzhiyun 	return x0;							\
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun __CMPXCHG_CASE(w, b,     ,  8,   )
365*4882a593Smuzhiyun __CMPXCHG_CASE(w, h,     , 16,   )
366*4882a593Smuzhiyun __CMPXCHG_CASE(w,  ,     , 32,   )
367*4882a593Smuzhiyun __CMPXCHG_CASE(x,  ,     , 64,   )
368*4882a593Smuzhiyun __CMPXCHG_CASE(w, b, acq_,  8,  a, "memory")
369*4882a593Smuzhiyun __CMPXCHG_CASE(w, h, acq_, 16,  a, "memory")
370*4882a593Smuzhiyun __CMPXCHG_CASE(w,  , acq_, 32,  a, "memory")
371*4882a593Smuzhiyun __CMPXCHG_CASE(x,  , acq_, 64,  a, "memory")
372*4882a593Smuzhiyun __CMPXCHG_CASE(w, b, rel_,  8,  l, "memory")
373*4882a593Smuzhiyun __CMPXCHG_CASE(w, h, rel_, 16,  l, "memory")
374*4882a593Smuzhiyun __CMPXCHG_CASE(w,  , rel_, 32,  l, "memory")
375*4882a593Smuzhiyun __CMPXCHG_CASE(x,  , rel_, 64,  l, "memory")
376*4882a593Smuzhiyun __CMPXCHG_CASE(w, b,  mb_,  8, al, "memory")
377*4882a593Smuzhiyun __CMPXCHG_CASE(w, h,  mb_, 16, al, "memory")
378*4882a593Smuzhiyun __CMPXCHG_CASE(w,  ,  mb_, 32, al, "memory")
379*4882a593Smuzhiyun __CMPXCHG_CASE(x,  ,  mb_, 64, al, "memory")
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun #undef __CMPXCHG_CASE
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun #define __CMPXCHG_DBL(name, mb, cl...)					\
384*4882a593Smuzhiyun static __always_inline long						\
385*4882a593Smuzhiyun __lse__cmpxchg_double##name(unsigned long old1,				\
386*4882a593Smuzhiyun 					 unsigned long old2,		\
387*4882a593Smuzhiyun 					 unsigned long new1,		\
388*4882a593Smuzhiyun 					 unsigned long new2,		\
389*4882a593Smuzhiyun 					 volatile void *ptr)		\
390*4882a593Smuzhiyun {									\
391*4882a593Smuzhiyun 	unsigned long oldval1 = old1;					\
392*4882a593Smuzhiyun 	unsigned long oldval2 = old2;					\
393*4882a593Smuzhiyun 	register unsigned long x0 asm ("x0") = old1;			\
394*4882a593Smuzhiyun 	register unsigned long x1 asm ("x1") = old2;			\
395*4882a593Smuzhiyun 	register unsigned long x2 asm ("x2") = new1;			\
396*4882a593Smuzhiyun 	register unsigned long x3 asm ("x3") = new2;			\
397*4882a593Smuzhiyun 	register unsigned long x4 asm ("x4") = (unsigned long)ptr;	\
398*4882a593Smuzhiyun 									\
399*4882a593Smuzhiyun 	asm volatile(							\
400*4882a593Smuzhiyun 	__LSE_PREAMBLE							\
401*4882a593Smuzhiyun 	"	casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\
402*4882a593Smuzhiyun 	"	eor	%[old1], %[old1], %[oldval1]\n"			\
403*4882a593Smuzhiyun 	"	eor	%[old2], %[old2], %[oldval2]\n"			\
404*4882a593Smuzhiyun 	"	orr	%[old1], %[old1], %[old2]"			\
405*4882a593Smuzhiyun 	: [old1] "+&r" (x0), [old2] "+&r" (x1),				\
406*4882a593Smuzhiyun 	  [v] "+Q" (*(unsigned long *)ptr)				\
407*4882a593Smuzhiyun 	: [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4),		\
408*4882a593Smuzhiyun 	  [oldval1] "r" (oldval1), [oldval2] "r" (oldval2)		\
409*4882a593Smuzhiyun 	: cl);								\
410*4882a593Smuzhiyun 									\
411*4882a593Smuzhiyun 	return x0;							\
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun __CMPXCHG_DBL(   ,   )
415*4882a593Smuzhiyun __CMPXCHG_DBL(_mb, al, "memory")
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun #undef __CMPXCHG_DBL
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun #endif	/* __ASM_ATOMIC_LSE_H */
420