xref: /OK3568_Linux_fs/kernel/arch/arm64/include/asm/atomic_ll_sc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Based on arch/arm/include/asm/atomic.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1996 Russell King.
6*4882a593Smuzhiyun  * Copyright (C) 2002 Deep Blue Solutions Ltd.
7*4882a593Smuzhiyun  * Copyright (C) 2012 ARM Ltd.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __ASM_ATOMIC_LL_SC_H
11*4882a593Smuzhiyun #define __ASM_ATOMIC_LL_SC_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/stringify.h>
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #ifdef CONFIG_ARM64_LSE_ATOMICS
16*4882a593Smuzhiyun #define __LL_SC_FALLBACK(asm_ops)					\
17*4882a593Smuzhiyun "	b	3f\n"							\
18*4882a593Smuzhiyun "	.subsection	1\n"						\
19*4882a593Smuzhiyun "3:\n"									\
20*4882a593Smuzhiyun asm_ops "\n"								\
21*4882a593Smuzhiyun "	b	4f\n"							\
22*4882a593Smuzhiyun "	.previous\n"							\
23*4882a593Smuzhiyun "4:\n"
24*4882a593Smuzhiyun #else
25*4882a593Smuzhiyun #define __LL_SC_FALLBACK(asm_ops) asm_ops
26*4882a593Smuzhiyun #endif
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #ifndef CONFIG_CC_HAS_K_CONSTRAINT
29*4882a593Smuzhiyun #define K
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
34*4882a593Smuzhiyun  * store exclusive to ensure that these are atomic.  We may loop
35*4882a593Smuzhiyun  * to ensure that the update happens.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define ATOMIC_OP(op, asm_op, constraint)				\
39*4882a593Smuzhiyun static inline void							\
40*4882a593Smuzhiyun __ll_sc_atomic_##op(int i, atomic_t *v)					\
41*4882a593Smuzhiyun {									\
42*4882a593Smuzhiyun 	unsigned long tmp;						\
43*4882a593Smuzhiyun 	int result;							\
44*4882a593Smuzhiyun 									\
45*4882a593Smuzhiyun 	asm volatile("// atomic_" #op "\n"				\
46*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
47*4882a593Smuzhiyun "	prfm	pstl1strm, %2\n"					\
48*4882a593Smuzhiyun "1:	ldxr	%w0, %2\n"						\
49*4882a593Smuzhiyun "	" #asm_op "	%w0, %w0, %w3\n"				\
50*4882a593Smuzhiyun "	stxr	%w1, %w0, %2\n"						\
51*4882a593Smuzhiyun "	cbnz	%w1, 1b\n")						\
52*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
53*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i));				\
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
57*4882a593Smuzhiyun static inline int							\
58*4882a593Smuzhiyun __ll_sc_atomic_##op##_return##name(int i, atomic_t *v)			\
59*4882a593Smuzhiyun {									\
60*4882a593Smuzhiyun 	unsigned long tmp;						\
61*4882a593Smuzhiyun 	int result;							\
62*4882a593Smuzhiyun 									\
63*4882a593Smuzhiyun 	asm volatile("// atomic_" #op "_return" #name "\n"		\
64*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
65*4882a593Smuzhiyun "	prfm	pstl1strm, %2\n"					\
66*4882a593Smuzhiyun "1:	ld" #acq "xr	%w0, %2\n"					\
67*4882a593Smuzhiyun "	" #asm_op "	%w0, %w0, %w3\n"				\
68*4882a593Smuzhiyun "	st" #rel "xr	%w1, %w0, %2\n"					\
69*4882a593Smuzhiyun "	cbnz	%w1, 1b\n"						\
70*4882a593Smuzhiyun "	" #mb )								\
71*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
72*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i)				\
73*4882a593Smuzhiyun 	: cl);								\
74*4882a593Smuzhiyun 									\
75*4882a593Smuzhiyun 	return result;							\
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \
79*4882a593Smuzhiyun static inline int							\
80*4882a593Smuzhiyun __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v)			\
81*4882a593Smuzhiyun {									\
82*4882a593Smuzhiyun 	unsigned long tmp;						\
83*4882a593Smuzhiyun 	int val, result;						\
84*4882a593Smuzhiyun 									\
85*4882a593Smuzhiyun 	asm volatile("// atomic_fetch_" #op #name "\n"			\
86*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
87*4882a593Smuzhiyun "	prfm	pstl1strm, %3\n"					\
88*4882a593Smuzhiyun "1:	ld" #acq "xr	%w0, %3\n"					\
89*4882a593Smuzhiyun "	" #asm_op "	%w1, %w0, %w4\n"				\
90*4882a593Smuzhiyun "	st" #rel "xr	%w2, %w1, %3\n"					\
91*4882a593Smuzhiyun "	cbnz	%w2, 1b\n"						\
92*4882a593Smuzhiyun "	" #mb )								\
93*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)	\
94*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i)				\
95*4882a593Smuzhiyun 	: cl);								\
96*4882a593Smuzhiyun 									\
97*4882a593Smuzhiyun 	return result;							\
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define ATOMIC_OPS(...)							\
101*4882a593Smuzhiyun 	ATOMIC_OP(__VA_ARGS__)						\
102*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(        , dmb ish,  , l, "memory", __VA_ARGS__)\
103*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(_relaxed,        ,  ,  ,         , __VA_ARGS__)\
104*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(_acquire,        , a,  , "memory", __VA_ARGS__)\
105*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(_release,        ,  , l, "memory", __VA_ARGS__)\
106*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
107*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
108*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
109*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
110*4882a593Smuzhiyun 
ATOMIC_OPS(add,add,I)111*4882a593Smuzhiyun ATOMIC_OPS(add, add, I)
112*4882a593Smuzhiyun ATOMIC_OPS(sub, sub, J)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #undef ATOMIC_OPS
115*4882a593Smuzhiyun #define ATOMIC_OPS(...)							\
116*4882a593Smuzhiyun 	ATOMIC_OP(__VA_ARGS__)						\
117*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (        , dmb ish,  , l, "memory", __VA_ARGS__)\
118*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_relaxed,        ,  ,  ,         , __VA_ARGS__)\
119*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_acquire,        , a,  , "memory", __VA_ARGS__)\
120*4882a593Smuzhiyun 	ATOMIC_FETCH_OP (_release,        ,  , l, "memory", __VA_ARGS__)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun ATOMIC_OPS(and, and, K)
123*4882a593Smuzhiyun ATOMIC_OPS(or, orr, K)
124*4882a593Smuzhiyun ATOMIC_OPS(xor, eor, K)
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * GAS converts the mysterious and undocumented BIC (immediate) alias to
127*4882a593Smuzhiyun  * an AND (immediate) instruction with the immediate inverted. We don't
128*4882a593Smuzhiyun  * have a constraint for this, so fall back to register.
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun ATOMIC_OPS(andnot, bic, )
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun #undef ATOMIC_OPS
133*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
134*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
135*4882a593Smuzhiyun #undef ATOMIC_OP
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define ATOMIC64_OP(op, asm_op, constraint)				\
138*4882a593Smuzhiyun static inline void							\
139*4882a593Smuzhiyun __ll_sc_atomic64_##op(s64 i, atomic64_t *v)				\
140*4882a593Smuzhiyun {									\
141*4882a593Smuzhiyun 	s64 result;							\
142*4882a593Smuzhiyun 	unsigned long tmp;						\
143*4882a593Smuzhiyun 									\
144*4882a593Smuzhiyun 	asm volatile("// atomic64_" #op "\n"				\
145*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
146*4882a593Smuzhiyun "	prfm	pstl1strm, %2\n"					\
147*4882a593Smuzhiyun "1:	ldxr	%0, %2\n"						\
148*4882a593Smuzhiyun "	" #asm_op "	%0, %0, %3\n"					\
149*4882a593Smuzhiyun "	stxr	%w1, %0, %2\n"						\
150*4882a593Smuzhiyun "	cbnz	%w1, 1b")						\
151*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
152*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i));				\
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\
156*4882a593Smuzhiyun static inline long							\
157*4882a593Smuzhiyun __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v)		\
158*4882a593Smuzhiyun {									\
159*4882a593Smuzhiyun 	s64 result;							\
160*4882a593Smuzhiyun 	unsigned long tmp;						\
161*4882a593Smuzhiyun 									\
162*4882a593Smuzhiyun 	asm volatile("// atomic64_" #op "_return" #name "\n"		\
163*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
164*4882a593Smuzhiyun "	prfm	pstl1strm, %2\n"					\
165*4882a593Smuzhiyun "1:	ld" #acq "xr	%0, %2\n"					\
166*4882a593Smuzhiyun "	" #asm_op "	%0, %0, %3\n"					\
167*4882a593Smuzhiyun "	st" #rel "xr	%w1, %0, %2\n"					\
168*4882a593Smuzhiyun "	cbnz	%w1, 1b\n"						\
169*4882a593Smuzhiyun "	" #mb )								\
170*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)		\
171*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i)				\
172*4882a593Smuzhiyun 	: cl);								\
173*4882a593Smuzhiyun 									\
174*4882a593Smuzhiyun 	return result;							\
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\
178*4882a593Smuzhiyun static inline long							\
179*4882a593Smuzhiyun __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v)		\
180*4882a593Smuzhiyun {									\
181*4882a593Smuzhiyun 	s64 result, val;						\
182*4882a593Smuzhiyun 	unsigned long tmp;						\
183*4882a593Smuzhiyun 									\
184*4882a593Smuzhiyun 	asm volatile("// atomic64_fetch_" #op #name "\n"		\
185*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
186*4882a593Smuzhiyun "	prfm	pstl1strm, %3\n"					\
187*4882a593Smuzhiyun "1:	ld" #acq "xr	%0, %3\n"					\
188*4882a593Smuzhiyun "	" #asm_op "	%1, %0, %4\n"					\
189*4882a593Smuzhiyun "	st" #rel "xr	%w2, %1, %3\n"					\
190*4882a593Smuzhiyun "	cbnz	%w2, 1b\n"						\
191*4882a593Smuzhiyun "	" #mb )								\
192*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter)	\
193*4882a593Smuzhiyun 	: __stringify(constraint) "r" (i)				\
194*4882a593Smuzhiyun 	: cl);								\
195*4882a593Smuzhiyun 									\
196*4882a593Smuzhiyun 	return result;							\
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun #define ATOMIC64_OPS(...)						\
200*4882a593Smuzhiyun 	ATOMIC64_OP(__VA_ARGS__)					\
201*4882a593Smuzhiyun 	ATOMIC64_OP_RETURN(, dmb ish,  , l, "memory", __VA_ARGS__)	\
202*4882a593Smuzhiyun 	ATOMIC64_OP_RETURN(_relaxed,,  ,  ,         , __VA_ARGS__)	\
203*4882a593Smuzhiyun 	ATOMIC64_OP_RETURN(_acquire,, a,  , "memory", __VA_ARGS__)	\
204*4882a593Smuzhiyun 	ATOMIC64_OP_RETURN(_release,,  , l, "memory", __VA_ARGS__)	\
205*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)	\
206*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)	\
207*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)	\
208*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun ATOMIC64_OPS(add, add, I)
211*4882a593Smuzhiyun ATOMIC64_OPS(sub, sub, J)
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #undef ATOMIC64_OPS
214*4882a593Smuzhiyun #define ATOMIC64_OPS(...)						\
215*4882a593Smuzhiyun 	ATOMIC64_OP(__VA_ARGS__)					\
216*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (, dmb ish,  , l, "memory", __VA_ARGS__)	\
217*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_relaxed,,  ,  ,         , __VA_ARGS__)	\
218*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_acquire,, a,  , "memory", __VA_ARGS__)	\
219*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP (_release,,  , l, "memory", __VA_ARGS__)
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun ATOMIC64_OPS(and, and, L)
222*4882a593Smuzhiyun ATOMIC64_OPS(or, orr, L)
223*4882a593Smuzhiyun ATOMIC64_OPS(xor, eor, L)
224*4882a593Smuzhiyun /*
225*4882a593Smuzhiyun  * GAS converts the mysterious and undocumented BIC (immediate) alias to
226*4882a593Smuzhiyun  * an AND (immediate) instruction with the immediate inverted. We don't
227*4882a593Smuzhiyun  * have a constraint for this, so fall back to register.
228*4882a593Smuzhiyun  */
229*4882a593Smuzhiyun ATOMIC64_OPS(andnot, bic, )
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #undef ATOMIC64_OPS
232*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP
233*4882a593Smuzhiyun #undef ATOMIC64_OP_RETURN
234*4882a593Smuzhiyun #undef ATOMIC64_OP
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static inline s64
237*4882a593Smuzhiyun __ll_sc_atomic64_dec_if_positive(atomic64_t *v)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	s64 result;
240*4882a593Smuzhiyun 	unsigned long tmp;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	asm volatile("// atomic64_dec_if_positive\n"
243*4882a593Smuzhiyun 	__LL_SC_FALLBACK(
244*4882a593Smuzhiyun "	prfm	pstl1strm, %2\n"
245*4882a593Smuzhiyun "1:	ldxr	%0, %2\n"
246*4882a593Smuzhiyun "	subs	%0, %0, #1\n"
247*4882a593Smuzhiyun "	b.lt	2f\n"
248*4882a593Smuzhiyun "	stlxr	%w1, %0, %2\n"
249*4882a593Smuzhiyun "	cbnz	%w1, 1b\n"
250*4882a593Smuzhiyun "	dmb	ish\n"
251*4882a593Smuzhiyun "2:")
252*4882a593Smuzhiyun 	: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
253*4882a593Smuzhiyun 	:
254*4882a593Smuzhiyun 	: "cc", "memory");
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return result;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint)	\
260*4882a593Smuzhiyun static inline u##sz							\
261*4882a593Smuzhiyun __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr,			\
262*4882a593Smuzhiyun 					 unsigned long old,		\
263*4882a593Smuzhiyun 					 u##sz new)			\
264*4882a593Smuzhiyun {									\
265*4882a593Smuzhiyun 	unsigned long tmp;						\
266*4882a593Smuzhiyun 	u##sz oldval;							\
267*4882a593Smuzhiyun 									\
268*4882a593Smuzhiyun 	/*								\
269*4882a593Smuzhiyun 	 * Sub-word sizes require explicit casting so that the compare  \
270*4882a593Smuzhiyun 	 * part of the cmpxchg doesn't end up interpreting non-zero	\
271*4882a593Smuzhiyun 	 * upper bits of the register containing "old".			\
272*4882a593Smuzhiyun 	 */								\
273*4882a593Smuzhiyun 	if (sz < 32)							\
274*4882a593Smuzhiyun 		old = (u##sz)old;					\
275*4882a593Smuzhiyun 									\
276*4882a593Smuzhiyun 	asm volatile(							\
277*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
278*4882a593Smuzhiyun 	"	prfm	pstl1strm, %[v]\n"				\
279*4882a593Smuzhiyun 	"1:	ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n"		\
280*4882a593Smuzhiyun 	"	eor	%" #w "[tmp], %" #w "[oldval], %" #w "[old]\n"	\
281*4882a593Smuzhiyun 	"	cbnz	%" #w "[tmp], 2f\n"				\
282*4882a593Smuzhiyun 	"	st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n"	\
283*4882a593Smuzhiyun 	"	cbnz	%w[tmp], 1b\n"					\
284*4882a593Smuzhiyun 	"	" #mb "\n"						\
285*4882a593Smuzhiyun 	"2:")								\
286*4882a593Smuzhiyun 	: [tmp] "=&r" (tmp), [oldval] "=&r" (oldval),			\
287*4882a593Smuzhiyun 	  [v] "+Q" (*(u##sz *)ptr)					\
288*4882a593Smuzhiyun 	: [old] __stringify(constraint) "r" (old), [new] "r" (new)	\
289*4882a593Smuzhiyun 	: cl);								\
290*4882a593Smuzhiyun 									\
291*4882a593Smuzhiyun 	return oldval;							\
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun  * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly
296*4882a593Smuzhiyun  * handle the 'K' constraint for the value 4294967295 - thus we use no
297*4882a593Smuzhiyun  * constraint for 32 bit operations.
298*4882a593Smuzhiyun  */
299*4882a593Smuzhiyun __CMPXCHG_CASE(w, b,     ,  8,        ,  ,  ,         , K)
300*4882a593Smuzhiyun __CMPXCHG_CASE(w, h,     , 16,        ,  ,  ,         , K)
301*4882a593Smuzhiyun __CMPXCHG_CASE(w,  ,     , 32,        ,  ,  ,         , K)
302*4882a593Smuzhiyun __CMPXCHG_CASE( ,  ,     , 64,        ,  ,  ,         , L)
303*4882a593Smuzhiyun __CMPXCHG_CASE(w, b, acq_,  8,        , a,  , "memory", K)
304*4882a593Smuzhiyun __CMPXCHG_CASE(w, h, acq_, 16,        , a,  , "memory", K)
305*4882a593Smuzhiyun __CMPXCHG_CASE(w,  , acq_, 32,        , a,  , "memory", K)
306*4882a593Smuzhiyun __CMPXCHG_CASE( ,  , acq_, 64,        , a,  , "memory", L)
307*4882a593Smuzhiyun __CMPXCHG_CASE(w, b, rel_,  8,        ,  , l, "memory", K)
308*4882a593Smuzhiyun __CMPXCHG_CASE(w, h, rel_, 16,        ,  , l, "memory", K)
309*4882a593Smuzhiyun __CMPXCHG_CASE(w,  , rel_, 32,        ,  , l, "memory", K)
310*4882a593Smuzhiyun __CMPXCHG_CASE( ,  , rel_, 64,        ,  , l, "memory", L)
311*4882a593Smuzhiyun __CMPXCHG_CASE(w, b,  mb_,  8, dmb ish,  , l, "memory", K)
312*4882a593Smuzhiyun __CMPXCHG_CASE(w, h,  mb_, 16, dmb ish,  , l, "memory", K)
313*4882a593Smuzhiyun __CMPXCHG_CASE(w,  ,  mb_, 32, dmb ish,  , l, "memory", K)
314*4882a593Smuzhiyun __CMPXCHG_CASE( ,  ,  mb_, 64, dmb ish,  , l, "memory", L)
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #undef __CMPXCHG_CASE
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun #define __CMPXCHG_DBL(name, mb, rel, cl)				\
319*4882a593Smuzhiyun static inline long							\
320*4882a593Smuzhiyun __ll_sc__cmpxchg_double##name(unsigned long old1,			\
321*4882a593Smuzhiyun 				      unsigned long old2,		\
322*4882a593Smuzhiyun 				      unsigned long new1,		\
323*4882a593Smuzhiyun 				      unsigned long new2,		\
324*4882a593Smuzhiyun 				      volatile void *ptr)		\
325*4882a593Smuzhiyun {									\
326*4882a593Smuzhiyun 	unsigned long tmp, ret;						\
327*4882a593Smuzhiyun 									\
328*4882a593Smuzhiyun 	asm volatile("// __cmpxchg_double" #name "\n"			\
329*4882a593Smuzhiyun 	__LL_SC_FALLBACK(						\
330*4882a593Smuzhiyun 	"	prfm	pstl1strm, %2\n"				\
331*4882a593Smuzhiyun 	"1:	ldxp	%0, %1, %2\n"					\
332*4882a593Smuzhiyun 	"	eor	%0, %0, %3\n"					\
333*4882a593Smuzhiyun 	"	eor	%1, %1, %4\n"					\
334*4882a593Smuzhiyun 	"	orr	%1, %0, %1\n"					\
335*4882a593Smuzhiyun 	"	cbnz	%1, 2f\n"					\
336*4882a593Smuzhiyun 	"	st" #rel "xp	%w0, %5, %6, %2\n"			\
337*4882a593Smuzhiyun 	"	cbnz	%w0, 1b\n"					\
338*4882a593Smuzhiyun 	"	" #mb "\n"						\
339*4882a593Smuzhiyun 	"2:")								\
340*4882a593Smuzhiyun 	: "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr)	\
341*4882a593Smuzhiyun 	: "r" (old1), "r" (old2), "r" (new1), "r" (new2)		\
342*4882a593Smuzhiyun 	: cl);								\
343*4882a593Smuzhiyun 									\
344*4882a593Smuzhiyun 	return ret;							\
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun __CMPXCHG_DBL(   ,        ,  ,         )
348*4882a593Smuzhiyun __CMPXCHG_DBL(_mb, dmb ish, l, "memory")
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun #undef __CMPXCHG_DBL
351*4882a593Smuzhiyun #undef K
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun #endif	/* __ASM_ATOMIC_LL_SC_H */
354