1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_ATOMIC_H_
3*4882a593Smuzhiyun #define _ASM_POWERPC_ATOMIC_H_
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * PowerPC atomic operations
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #ifdef __KERNEL__
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <asm/cmpxchg.h>
12*4882a593Smuzhiyun #include <asm/barrier.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun * Since *_return_relaxed and {cmp}xchg_relaxed are implemented with
16*4882a593Smuzhiyun * a "bne-" instruction at the end, so an isync is enough as a acquire barrier
17*4882a593Smuzhiyun * on the platform without lwsync.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun #define __atomic_acquire_fence() \
20*4882a593Smuzhiyun __asm__ __volatile__(PPC_ACQUIRE_BARRIER "" : : : "memory")
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define __atomic_release_fence() \
23*4882a593Smuzhiyun __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
24*4882a593Smuzhiyun
atomic_read(const atomic_t * v)25*4882a593Smuzhiyun static __inline__ int atomic_read(const atomic_t *v)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun int t;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun __asm__ __volatile__("lwz%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun return t;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
atomic_set(atomic_t * v,int i)34*4882a593Smuzhiyun static __inline__ void atomic_set(atomic_t *v, int i)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define ATOMIC_OP(op, asm_op) \
40*4882a593Smuzhiyun static __inline__ void atomic_##op(int a, atomic_t *v) \
41*4882a593Smuzhiyun { \
42*4882a593Smuzhiyun int t; \
43*4882a593Smuzhiyun \
44*4882a593Smuzhiyun __asm__ __volatile__( \
45*4882a593Smuzhiyun "1: lwarx %0,0,%3 # atomic_" #op "\n" \
46*4882a593Smuzhiyun #asm_op " %0,%2,%0\n" \
47*4882a593Smuzhiyun " stwcx. %0,0,%3 \n" \
48*4882a593Smuzhiyun " bne- 1b\n" \
49*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter) \
50*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
51*4882a593Smuzhiyun : "cc"); \
52*4882a593Smuzhiyun } \
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
55*4882a593Smuzhiyun static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
56*4882a593Smuzhiyun { \
57*4882a593Smuzhiyun int t; \
58*4882a593Smuzhiyun \
59*4882a593Smuzhiyun __asm__ __volatile__( \
60*4882a593Smuzhiyun "1: lwarx %0,0,%3 # atomic_" #op "_return_relaxed\n" \
61*4882a593Smuzhiyun #asm_op " %0,%2,%0\n" \
62*4882a593Smuzhiyun " stwcx. %0,0,%3\n" \
63*4882a593Smuzhiyun " bne- 1b\n" \
64*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter) \
65*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
66*4882a593Smuzhiyun : "cc"); \
67*4882a593Smuzhiyun \
68*4882a593Smuzhiyun return t; \
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
72*4882a593Smuzhiyun static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
73*4882a593Smuzhiyun { \
74*4882a593Smuzhiyun int res, t; \
75*4882a593Smuzhiyun \
76*4882a593Smuzhiyun __asm__ __volatile__( \
77*4882a593Smuzhiyun "1: lwarx %0,0,%4 # atomic_fetch_" #op "_relaxed\n" \
78*4882a593Smuzhiyun #asm_op " %1,%3,%0\n" \
79*4882a593Smuzhiyun " stwcx. %1,0,%4\n" \
80*4882a593Smuzhiyun " bne- 1b\n" \
81*4882a593Smuzhiyun : "=&r" (res), "=&r" (t), "+m" (v->counter) \
82*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
83*4882a593Smuzhiyun : "cc"); \
84*4882a593Smuzhiyun \
85*4882a593Smuzhiyun return res; \
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define ATOMIC_OPS(op, asm_op) \
89*4882a593Smuzhiyun ATOMIC_OP(op, asm_op) \
90*4882a593Smuzhiyun ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
91*4882a593Smuzhiyun ATOMIC_FETCH_OP_RELAXED(op, asm_op)
92*4882a593Smuzhiyun
ATOMIC_OPS(add,add)93*4882a593Smuzhiyun ATOMIC_OPS(add, add)
94*4882a593Smuzhiyun ATOMIC_OPS(sub, subf)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun #define atomic_add_return_relaxed atomic_add_return_relaxed
97*4882a593Smuzhiyun #define atomic_sub_return_relaxed atomic_sub_return_relaxed
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
100*4882a593Smuzhiyun #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #undef ATOMIC_OPS
103*4882a593Smuzhiyun #define ATOMIC_OPS(op, asm_op) \
104*4882a593Smuzhiyun ATOMIC_OP(op, asm_op) \
105*4882a593Smuzhiyun ATOMIC_FETCH_OP_RELAXED(op, asm_op)
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun ATOMIC_OPS(and, and)
108*4882a593Smuzhiyun ATOMIC_OPS(or, or)
109*4882a593Smuzhiyun ATOMIC_OPS(xor, xor)
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
112*4882a593Smuzhiyun #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
113*4882a593Smuzhiyun #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #undef ATOMIC_OPS
116*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP_RELAXED
117*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN_RELAXED
118*4882a593Smuzhiyun #undef ATOMIC_OP
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun static __inline__ void atomic_inc(atomic_t *v)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun int t;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun __asm__ __volatile__(
125*4882a593Smuzhiyun "1: lwarx %0,0,%2 # atomic_inc\n\
126*4882a593Smuzhiyun addic %0,%0,1\n"
127*4882a593Smuzhiyun " stwcx. %0,0,%2 \n\
128*4882a593Smuzhiyun bne- 1b"
129*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
130*4882a593Smuzhiyun : "r" (&v->counter)
131*4882a593Smuzhiyun : "cc", "xer");
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun #define atomic_inc atomic_inc
134*4882a593Smuzhiyun
atomic_inc_return_relaxed(atomic_t * v)135*4882a593Smuzhiyun static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun int t;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun __asm__ __volatile__(
140*4882a593Smuzhiyun "1: lwarx %0,0,%2 # atomic_inc_return_relaxed\n"
141*4882a593Smuzhiyun " addic %0,%0,1\n"
142*4882a593Smuzhiyun " stwcx. %0,0,%2\n"
143*4882a593Smuzhiyun " bne- 1b"
144*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
145*4882a593Smuzhiyun : "r" (&v->counter)
146*4882a593Smuzhiyun : "cc", "xer");
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return t;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
atomic_dec(atomic_t * v)151*4882a593Smuzhiyun static __inline__ void atomic_dec(atomic_t *v)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun int t;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun __asm__ __volatile__(
156*4882a593Smuzhiyun "1: lwarx %0,0,%2 # atomic_dec\n\
157*4882a593Smuzhiyun addic %0,%0,-1\n"
158*4882a593Smuzhiyun " stwcx. %0,0,%2\n\
159*4882a593Smuzhiyun bne- 1b"
160*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
161*4882a593Smuzhiyun : "r" (&v->counter)
162*4882a593Smuzhiyun : "cc", "xer");
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun #define atomic_dec atomic_dec
165*4882a593Smuzhiyun
atomic_dec_return_relaxed(atomic_t * v)166*4882a593Smuzhiyun static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun int t;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun __asm__ __volatile__(
171*4882a593Smuzhiyun "1: lwarx %0,0,%2 # atomic_dec_return_relaxed\n"
172*4882a593Smuzhiyun " addic %0,%0,-1\n"
173*4882a593Smuzhiyun " stwcx. %0,0,%2\n"
174*4882a593Smuzhiyun " bne- 1b"
175*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
176*4882a593Smuzhiyun : "r" (&v->counter)
177*4882a593Smuzhiyun : "cc", "xer");
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun return t;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun #define atomic_inc_return_relaxed atomic_inc_return_relaxed
183*4882a593Smuzhiyun #define atomic_dec_return_relaxed atomic_dec_return_relaxed
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
186*4882a593Smuzhiyun #define atomic_cmpxchg_relaxed(v, o, n) \
187*4882a593Smuzhiyun cmpxchg_relaxed(&((v)->counter), (o), (n))
188*4882a593Smuzhiyun #define atomic_cmpxchg_acquire(v, o, n) \
189*4882a593Smuzhiyun cmpxchg_acquire(&((v)->counter), (o), (n))
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
192*4882a593Smuzhiyun #define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Don't want to override the generic atomic_try_cmpxchg_acquire, because
196*4882a593Smuzhiyun * we add a lock hint to the lwarx, which may not be wanted for the
197*4882a593Smuzhiyun * _acquire case (and is not used by the other _acquire variants so it
198*4882a593Smuzhiyun * would be a surprise).
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun static __always_inline bool
atomic_try_cmpxchg_lock(atomic_t * v,int * old,int new)201*4882a593Smuzhiyun atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun int r, o = *old;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun __asm__ __volatile__ (
206*4882a593Smuzhiyun "1:\t" PPC_LWARX(%0,0,%2,1) " # atomic_try_cmpxchg_acquire \n"
207*4882a593Smuzhiyun " cmpw 0,%0,%3 \n"
208*4882a593Smuzhiyun " bne- 2f \n"
209*4882a593Smuzhiyun " stwcx. %4,0,%2 \n"
210*4882a593Smuzhiyun " bne- 1b \n"
211*4882a593Smuzhiyun "\t" PPC_ACQUIRE_BARRIER " \n"
212*4882a593Smuzhiyun "2: \n"
213*4882a593Smuzhiyun : "=&r" (r), "+m" (v->counter)
214*4882a593Smuzhiyun : "r" (&v->counter), "r" (o), "r" (new)
215*4882a593Smuzhiyun : "cr0", "memory");
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (unlikely(r != o))
218*4882a593Smuzhiyun *old = r;
219*4882a593Smuzhiyun return likely(r == o);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun /**
223*4882a593Smuzhiyun * atomic_fetch_add_unless - add unless the number is a given value
224*4882a593Smuzhiyun * @v: pointer of type atomic_t
225*4882a593Smuzhiyun * @a: the amount to add to v...
226*4882a593Smuzhiyun * @u: ...unless v is equal to u.
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun * Atomically adds @a to @v, so long as it was not @u.
229*4882a593Smuzhiyun * Returns the old value of @v.
230*4882a593Smuzhiyun */
atomic_fetch_add_unless(atomic_t * v,int a,int u)231*4882a593Smuzhiyun static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun int t;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun __asm__ __volatile__ (
236*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
237*4882a593Smuzhiyun "1: lwarx %0,0,%1 # atomic_fetch_add_unless\n\
238*4882a593Smuzhiyun cmpw 0,%0,%3 \n\
239*4882a593Smuzhiyun beq 2f \n\
240*4882a593Smuzhiyun add %0,%2,%0 \n"
241*4882a593Smuzhiyun " stwcx. %0,0,%1 \n\
242*4882a593Smuzhiyun bne- 1b \n"
243*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
244*4882a593Smuzhiyun " subf %0,%2,%0 \n\
245*4882a593Smuzhiyun 2:"
246*4882a593Smuzhiyun : "=&r" (t)
247*4882a593Smuzhiyun : "r" (&v->counter), "r" (a), "r" (u)
248*4882a593Smuzhiyun : "cc", "memory");
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun return t;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun #define atomic_fetch_add_unless atomic_fetch_add_unless
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /**
255*4882a593Smuzhiyun * atomic_inc_not_zero - increment unless the number is zero
256*4882a593Smuzhiyun * @v: pointer of type atomic_t
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun * Atomically increments @v by 1, so long as @v is non-zero.
259*4882a593Smuzhiyun * Returns non-zero if @v was non-zero, and zero otherwise.
260*4882a593Smuzhiyun */
atomic_inc_not_zero(atomic_t * v)261*4882a593Smuzhiyun static __inline__ int atomic_inc_not_zero(atomic_t *v)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun int t1, t2;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun __asm__ __volatile__ (
266*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
267*4882a593Smuzhiyun "1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
268*4882a593Smuzhiyun cmpwi 0,%0,0\n\
269*4882a593Smuzhiyun beq- 2f\n\
270*4882a593Smuzhiyun addic %1,%0,1\n"
271*4882a593Smuzhiyun " stwcx. %1,0,%2\n\
272*4882a593Smuzhiyun bne- 1b\n"
273*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
274*4882a593Smuzhiyun "\n\
275*4882a593Smuzhiyun 2:"
276*4882a593Smuzhiyun : "=&r" (t1), "=&r" (t2)
277*4882a593Smuzhiyun : "r" (&v->counter)
278*4882a593Smuzhiyun : "cc", "xer", "memory");
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun return t1;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun #define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /*
285*4882a593Smuzhiyun * Atomically test *v and decrement if it is greater than 0.
286*4882a593Smuzhiyun * The function returns the old value of *v minus 1, even if
287*4882a593Smuzhiyun * the atomic variable, v, was not decremented.
288*4882a593Smuzhiyun */
atomic_dec_if_positive(atomic_t * v)289*4882a593Smuzhiyun static __inline__ int atomic_dec_if_positive(atomic_t *v)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun int t;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun __asm__ __volatile__(
294*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
295*4882a593Smuzhiyun "1: lwarx %0,0,%1 # atomic_dec_if_positive\n\
296*4882a593Smuzhiyun cmpwi %0,1\n\
297*4882a593Smuzhiyun addi %0,%0,-1\n\
298*4882a593Smuzhiyun blt- 2f\n"
299*4882a593Smuzhiyun " stwcx. %0,0,%1\n\
300*4882a593Smuzhiyun bne- 1b"
301*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
302*4882a593Smuzhiyun "\n\
303*4882a593Smuzhiyun 2:" : "=&b" (t)
304*4882a593Smuzhiyun : "r" (&v->counter)
305*4882a593Smuzhiyun : "cc", "memory");
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return t;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun #define atomic_dec_if_positive atomic_dec_if_positive
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun #ifdef __powerpc64__
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun #define ATOMIC64_INIT(i) { (i) }
314*4882a593Smuzhiyun
atomic64_read(const atomic64_t * v)315*4882a593Smuzhiyun static __inline__ s64 atomic64_read(const atomic64_t *v)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun s64 t;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun __asm__ __volatile__("ld%U1%X1 %0,%1" : "=r"(t) : "m"(v->counter));
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return t;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
atomic64_set(atomic64_t * v,s64 i)324*4882a593Smuzhiyun static __inline__ void atomic64_set(atomic64_t *v, s64 i)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"(v->counter) : "r"(i));
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun #define ATOMIC64_OP(op, asm_op) \
330*4882a593Smuzhiyun static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
331*4882a593Smuzhiyun { \
332*4882a593Smuzhiyun s64 t; \
333*4882a593Smuzhiyun \
334*4882a593Smuzhiyun __asm__ __volatile__( \
335*4882a593Smuzhiyun "1: ldarx %0,0,%3 # atomic64_" #op "\n" \
336*4882a593Smuzhiyun #asm_op " %0,%2,%0\n" \
337*4882a593Smuzhiyun " stdcx. %0,0,%3 \n" \
338*4882a593Smuzhiyun " bne- 1b\n" \
339*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter) \
340*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
341*4882a593Smuzhiyun : "cc"); \
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
345*4882a593Smuzhiyun static inline s64 \
346*4882a593Smuzhiyun atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
347*4882a593Smuzhiyun { \
348*4882a593Smuzhiyun s64 t; \
349*4882a593Smuzhiyun \
350*4882a593Smuzhiyun __asm__ __volatile__( \
351*4882a593Smuzhiyun "1: ldarx %0,0,%3 # atomic64_" #op "_return_relaxed\n" \
352*4882a593Smuzhiyun #asm_op " %0,%2,%0\n" \
353*4882a593Smuzhiyun " stdcx. %0,0,%3\n" \
354*4882a593Smuzhiyun " bne- 1b\n" \
355*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter) \
356*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
357*4882a593Smuzhiyun : "cc"); \
358*4882a593Smuzhiyun \
359*4882a593Smuzhiyun return t; \
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
363*4882a593Smuzhiyun static inline s64 \
364*4882a593Smuzhiyun atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
365*4882a593Smuzhiyun { \
366*4882a593Smuzhiyun s64 res, t; \
367*4882a593Smuzhiyun \
368*4882a593Smuzhiyun __asm__ __volatile__( \
369*4882a593Smuzhiyun "1: ldarx %0,0,%4 # atomic64_fetch_" #op "_relaxed\n" \
370*4882a593Smuzhiyun #asm_op " %1,%3,%0\n" \
371*4882a593Smuzhiyun " stdcx. %1,0,%4\n" \
372*4882a593Smuzhiyun " bne- 1b\n" \
373*4882a593Smuzhiyun : "=&r" (res), "=&r" (t), "+m" (v->counter) \
374*4882a593Smuzhiyun : "r" (a), "r" (&v->counter) \
375*4882a593Smuzhiyun : "cc"); \
376*4882a593Smuzhiyun \
377*4882a593Smuzhiyun return res; \
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun #define ATOMIC64_OPS(op, asm_op) \
381*4882a593Smuzhiyun ATOMIC64_OP(op, asm_op) \
382*4882a593Smuzhiyun ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
383*4882a593Smuzhiyun ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
384*4882a593Smuzhiyun
ATOMIC64_OPS(add,add)385*4882a593Smuzhiyun ATOMIC64_OPS(add, add)
386*4882a593Smuzhiyun ATOMIC64_OPS(sub, subf)
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun #define atomic64_add_return_relaxed atomic64_add_return_relaxed
389*4882a593Smuzhiyun #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
392*4882a593Smuzhiyun #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun #undef ATOMIC64_OPS
395*4882a593Smuzhiyun #define ATOMIC64_OPS(op, asm_op) \
396*4882a593Smuzhiyun ATOMIC64_OP(op, asm_op) \
397*4882a593Smuzhiyun ATOMIC64_FETCH_OP_RELAXED(op, asm_op)
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun ATOMIC64_OPS(and, and)
400*4882a593Smuzhiyun ATOMIC64_OPS(or, or)
401*4882a593Smuzhiyun ATOMIC64_OPS(xor, xor)
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
404*4882a593Smuzhiyun #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
405*4882a593Smuzhiyun #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun #undef ATOPIC64_OPS
408*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP_RELAXED
409*4882a593Smuzhiyun #undef ATOMIC64_OP_RETURN_RELAXED
410*4882a593Smuzhiyun #undef ATOMIC64_OP
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun static __inline__ void atomic64_inc(atomic64_t *v)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun s64 t;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun __asm__ __volatile__(
417*4882a593Smuzhiyun "1: ldarx %0,0,%2 # atomic64_inc\n\
418*4882a593Smuzhiyun addic %0,%0,1\n\
419*4882a593Smuzhiyun stdcx. %0,0,%2 \n\
420*4882a593Smuzhiyun bne- 1b"
421*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
422*4882a593Smuzhiyun : "r" (&v->counter)
423*4882a593Smuzhiyun : "cc", "xer");
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun #define atomic64_inc atomic64_inc
426*4882a593Smuzhiyun
atomic64_inc_return_relaxed(atomic64_t * v)427*4882a593Smuzhiyun static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun s64 t;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun __asm__ __volatile__(
432*4882a593Smuzhiyun "1: ldarx %0,0,%2 # atomic64_inc_return_relaxed\n"
433*4882a593Smuzhiyun " addic %0,%0,1\n"
434*4882a593Smuzhiyun " stdcx. %0,0,%2\n"
435*4882a593Smuzhiyun " bne- 1b"
436*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
437*4882a593Smuzhiyun : "r" (&v->counter)
438*4882a593Smuzhiyun : "cc", "xer");
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun return t;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
atomic64_dec(atomic64_t * v)443*4882a593Smuzhiyun static __inline__ void atomic64_dec(atomic64_t *v)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun s64 t;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun __asm__ __volatile__(
448*4882a593Smuzhiyun "1: ldarx %0,0,%2 # atomic64_dec\n\
449*4882a593Smuzhiyun addic %0,%0,-1\n\
450*4882a593Smuzhiyun stdcx. %0,0,%2\n\
451*4882a593Smuzhiyun bne- 1b"
452*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
453*4882a593Smuzhiyun : "r" (&v->counter)
454*4882a593Smuzhiyun : "cc", "xer");
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun #define atomic64_dec atomic64_dec
457*4882a593Smuzhiyun
atomic64_dec_return_relaxed(atomic64_t * v)458*4882a593Smuzhiyun static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun s64 t;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun __asm__ __volatile__(
463*4882a593Smuzhiyun "1: ldarx %0,0,%2 # atomic64_dec_return_relaxed\n"
464*4882a593Smuzhiyun " addic %0,%0,-1\n"
465*4882a593Smuzhiyun " stdcx. %0,0,%2\n"
466*4882a593Smuzhiyun " bne- 1b"
467*4882a593Smuzhiyun : "=&r" (t), "+m" (v->counter)
468*4882a593Smuzhiyun : "r" (&v->counter)
469*4882a593Smuzhiyun : "cc", "xer");
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun return t;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed
475*4882a593Smuzhiyun #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Atomically test *v and decrement if it is greater than 0.
479*4882a593Smuzhiyun * The function returns the old value of *v minus 1.
480*4882a593Smuzhiyun */
atomic64_dec_if_positive(atomic64_t * v)481*4882a593Smuzhiyun static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun s64 t;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun __asm__ __volatile__(
486*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
487*4882a593Smuzhiyun "1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
488*4882a593Smuzhiyun addic. %0,%0,-1\n\
489*4882a593Smuzhiyun blt- 2f\n\
490*4882a593Smuzhiyun stdcx. %0,0,%1\n\
491*4882a593Smuzhiyun bne- 1b"
492*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
493*4882a593Smuzhiyun "\n\
494*4882a593Smuzhiyun 2:" : "=&r" (t)
495*4882a593Smuzhiyun : "r" (&v->counter)
496*4882a593Smuzhiyun : "cc", "xer", "memory");
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun return t;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun #define atomic64_dec_if_positive atomic64_dec_if_positive
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun #define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
503*4882a593Smuzhiyun #define atomic64_cmpxchg_relaxed(v, o, n) \
504*4882a593Smuzhiyun cmpxchg_relaxed(&((v)->counter), (o), (n))
505*4882a593Smuzhiyun #define atomic64_cmpxchg_acquire(v, o, n) \
506*4882a593Smuzhiyun cmpxchg_acquire(&((v)->counter), (o), (n))
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
509*4882a593Smuzhiyun #define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new))
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun /**
512*4882a593Smuzhiyun * atomic64_fetch_add_unless - add unless the number is a given value
513*4882a593Smuzhiyun * @v: pointer of type atomic64_t
514*4882a593Smuzhiyun * @a: the amount to add to v...
515*4882a593Smuzhiyun * @u: ...unless v is equal to u.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Atomically adds @a to @v, so long as it was not @u.
518*4882a593Smuzhiyun * Returns the old value of @v.
519*4882a593Smuzhiyun */
atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)520*4882a593Smuzhiyun static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun s64 t;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun __asm__ __volatile__ (
525*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
526*4882a593Smuzhiyun "1: ldarx %0,0,%1 # atomic64_fetch_add_unless\n\
527*4882a593Smuzhiyun cmpd 0,%0,%3 \n\
528*4882a593Smuzhiyun beq 2f \n\
529*4882a593Smuzhiyun add %0,%2,%0 \n"
530*4882a593Smuzhiyun " stdcx. %0,0,%1 \n\
531*4882a593Smuzhiyun bne- 1b \n"
532*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
533*4882a593Smuzhiyun " subf %0,%2,%0 \n\
534*4882a593Smuzhiyun 2:"
535*4882a593Smuzhiyun : "=&r" (t)
536*4882a593Smuzhiyun : "r" (&v->counter), "r" (a), "r" (u)
537*4882a593Smuzhiyun : "cc", "memory");
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun return t;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun #define atomic64_fetch_add_unless atomic64_fetch_add_unless
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /**
544*4882a593Smuzhiyun * atomic_inc64_not_zero - increment unless the number is zero
545*4882a593Smuzhiyun * @v: pointer of type atomic64_t
546*4882a593Smuzhiyun *
547*4882a593Smuzhiyun * Atomically increments @v by 1, so long as @v is non-zero.
548*4882a593Smuzhiyun * Returns non-zero if @v was non-zero, and zero otherwise.
549*4882a593Smuzhiyun */
atomic64_inc_not_zero(atomic64_t * v)550*4882a593Smuzhiyun static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun s64 t1, t2;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun __asm__ __volatile__ (
555*4882a593Smuzhiyun PPC_ATOMIC_ENTRY_BARRIER
556*4882a593Smuzhiyun "1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
557*4882a593Smuzhiyun cmpdi 0,%0,0\n\
558*4882a593Smuzhiyun beq- 2f\n\
559*4882a593Smuzhiyun addic %1,%0,1\n\
560*4882a593Smuzhiyun stdcx. %1,0,%2\n\
561*4882a593Smuzhiyun bne- 1b\n"
562*4882a593Smuzhiyun PPC_ATOMIC_EXIT_BARRIER
563*4882a593Smuzhiyun "\n\
564*4882a593Smuzhiyun 2:"
565*4882a593Smuzhiyun : "=&r" (t1), "=&r" (t2)
566*4882a593Smuzhiyun : "r" (&v->counter)
567*4882a593Smuzhiyun : "cc", "xer", "memory");
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun return t1 != 0;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun #define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v))
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun #endif /* __powerpc64__ */
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun #endif /* __KERNEL__ */
576*4882a593Smuzhiyun #endif /* _ASM_POWERPC_ATOMIC_H_ */
577