1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ALPHA_ATOMIC_H
3*4882a593Smuzhiyun #define _ALPHA_ATOMIC_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <asm/barrier.h>
7*4882a593Smuzhiyun #include <asm/cmpxchg.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * Atomic operations that C can't guarantee us. Useful for
11*4882a593Smuzhiyun * resource counting etc...
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * But use these as seldom as possible since they are much slower
14*4882a593Smuzhiyun * than regular operations.
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * To ensure dependency ordering is preserved for the _relaxed and
19*4882a593Smuzhiyun * _release atomics, an smp_mb() is unconditionally inserted into the
20*4882a593Smuzhiyun * _relaxed variants, which are used to build the barriered versions.
21*4882a593Smuzhiyun * Avoid redundant back-to-back fences in the _acquire and _fence
22*4882a593Smuzhiyun * versions.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun #define __atomic_acquire_fence()
25*4882a593Smuzhiyun #define __atomic_post_full_fence()
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define ATOMIC64_INIT(i) { (i) }
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define atomic_read(v) READ_ONCE((v)->counter)
30*4882a593Smuzhiyun #define atomic64_read(v) READ_ONCE((v)->counter)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
33*4882a593Smuzhiyun #define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * To get proper branch prediction for the main line, we must branch
37*4882a593Smuzhiyun * forward to code at the end of this object's .text section, then
38*4882a593Smuzhiyun * branch back to restart the operation.
39*4882a593Smuzhiyun */
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun #define ATOMIC_OP(op, asm_op) \
42*4882a593Smuzhiyun static __inline__ void atomic_##op(int i, atomic_t * v) \
43*4882a593Smuzhiyun { \
44*4882a593Smuzhiyun unsigned long temp; \
45*4882a593Smuzhiyun __asm__ __volatile__( \
46*4882a593Smuzhiyun "1: ldl_l %0,%1\n" \
47*4882a593Smuzhiyun " " #asm_op " %0,%2,%0\n" \
48*4882a593Smuzhiyun " stl_c %0,%1\n" \
49*4882a593Smuzhiyun " beq %0,2f\n" \
50*4882a593Smuzhiyun ".subsection 2\n" \
51*4882a593Smuzhiyun "2: br 1b\n" \
52*4882a593Smuzhiyun ".previous" \
53*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter) \
54*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter)); \
55*4882a593Smuzhiyun } \
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op, asm_op) \
58*4882a593Smuzhiyun static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
59*4882a593Smuzhiyun { \
60*4882a593Smuzhiyun long temp, result; \
61*4882a593Smuzhiyun __asm__ __volatile__( \
62*4882a593Smuzhiyun "1: ldl_l %0,%1\n" \
63*4882a593Smuzhiyun " " #asm_op " %0,%3,%2\n" \
64*4882a593Smuzhiyun " " #asm_op " %0,%3,%0\n" \
65*4882a593Smuzhiyun " stl_c %0,%1\n" \
66*4882a593Smuzhiyun " beq %0,2f\n" \
67*4882a593Smuzhiyun ".subsection 2\n" \
68*4882a593Smuzhiyun "2: br 1b\n" \
69*4882a593Smuzhiyun ".previous" \
70*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
71*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter) : "memory"); \
72*4882a593Smuzhiyun smp_mb(); \
73*4882a593Smuzhiyun return result; \
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op, asm_op) \
77*4882a593Smuzhiyun static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
78*4882a593Smuzhiyun { \
79*4882a593Smuzhiyun long temp, result; \
80*4882a593Smuzhiyun __asm__ __volatile__( \
81*4882a593Smuzhiyun "1: ldl_l %2,%1\n" \
82*4882a593Smuzhiyun " " #asm_op " %2,%3,%0\n" \
83*4882a593Smuzhiyun " stl_c %0,%1\n" \
84*4882a593Smuzhiyun " beq %0,2f\n" \
85*4882a593Smuzhiyun ".subsection 2\n" \
86*4882a593Smuzhiyun "2: br 1b\n" \
87*4882a593Smuzhiyun ".previous" \
88*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
89*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter) : "memory"); \
90*4882a593Smuzhiyun smp_mb(); \
91*4882a593Smuzhiyun return result; \
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define ATOMIC64_OP(op, asm_op) \
95*4882a593Smuzhiyun static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
96*4882a593Smuzhiyun { \
97*4882a593Smuzhiyun s64 temp; \
98*4882a593Smuzhiyun __asm__ __volatile__( \
99*4882a593Smuzhiyun "1: ldq_l %0,%1\n" \
100*4882a593Smuzhiyun " " #asm_op " %0,%2,%0\n" \
101*4882a593Smuzhiyun " stq_c %0,%1\n" \
102*4882a593Smuzhiyun " beq %0,2f\n" \
103*4882a593Smuzhiyun ".subsection 2\n" \
104*4882a593Smuzhiyun "2: br 1b\n" \
105*4882a593Smuzhiyun ".previous" \
106*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter) \
107*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter)); \
108*4882a593Smuzhiyun } \
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define ATOMIC64_OP_RETURN(op, asm_op) \
111*4882a593Smuzhiyun static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
112*4882a593Smuzhiyun { \
113*4882a593Smuzhiyun s64 temp, result; \
114*4882a593Smuzhiyun __asm__ __volatile__( \
115*4882a593Smuzhiyun "1: ldq_l %0,%1\n" \
116*4882a593Smuzhiyun " " #asm_op " %0,%3,%2\n" \
117*4882a593Smuzhiyun " " #asm_op " %0,%3,%0\n" \
118*4882a593Smuzhiyun " stq_c %0,%1\n" \
119*4882a593Smuzhiyun " beq %0,2f\n" \
120*4882a593Smuzhiyun ".subsection 2\n" \
121*4882a593Smuzhiyun "2: br 1b\n" \
122*4882a593Smuzhiyun ".previous" \
123*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
124*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter) : "memory"); \
125*4882a593Smuzhiyun smp_mb(); \
126*4882a593Smuzhiyun return result; \
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP(op, asm_op) \
130*4882a593Smuzhiyun static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
131*4882a593Smuzhiyun { \
132*4882a593Smuzhiyun s64 temp, result; \
133*4882a593Smuzhiyun __asm__ __volatile__( \
134*4882a593Smuzhiyun "1: ldq_l %2,%1\n" \
135*4882a593Smuzhiyun " " #asm_op " %2,%3,%0\n" \
136*4882a593Smuzhiyun " stq_c %0,%1\n" \
137*4882a593Smuzhiyun " beq %0,2f\n" \
138*4882a593Smuzhiyun ".subsection 2\n" \
139*4882a593Smuzhiyun "2: br 1b\n" \
140*4882a593Smuzhiyun ".previous" \
141*4882a593Smuzhiyun :"=&r" (temp), "=m" (v->counter), "=&r" (result) \
142*4882a593Smuzhiyun :"Ir" (i), "m" (v->counter) : "memory"); \
143*4882a593Smuzhiyun smp_mb(); \
144*4882a593Smuzhiyun return result; \
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun #define ATOMIC_OPS(op) \
148*4882a593Smuzhiyun ATOMIC_OP(op, op##l) \
149*4882a593Smuzhiyun ATOMIC_OP_RETURN(op, op##l) \
150*4882a593Smuzhiyun ATOMIC_FETCH_OP(op, op##l) \
151*4882a593Smuzhiyun ATOMIC64_OP(op, op##q) \
152*4882a593Smuzhiyun ATOMIC64_OP_RETURN(op, op##q) \
153*4882a593Smuzhiyun ATOMIC64_FETCH_OP(op, op##q)
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun ATOMIC_OPS(add)
ATOMIC_OPS(sub)156*4882a593Smuzhiyun ATOMIC_OPS(sub)
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define atomic_add_return_relaxed atomic_add_return_relaxed
159*4882a593Smuzhiyun #define atomic_sub_return_relaxed atomic_sub_return_relaxed
160*4882a593Smuzhiyun #define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
161*4882a593Smuzhiyun #define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun #define atomic64_add_return_relaxed atomic64_add_return_relaxed
164*4882a593Smuzhiyun #define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
165*4882a593Smuzhiyun #define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
166*4882a593Smuzhiyun #define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #define atomic_andnot atomic_andnot
169*4882a593Smuzhiyun #define atomic64_andnot atomic64_andnot
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun #undef ATOMIC_OPS
172*4882a593Smuzhiyun #define ATOMIC_OPS(op, asm) \
173*4882a593Smuzhiyun ATOMIC_OP(op, asm) \
174*4882a593Smuzhiyun ATOMIC_FETCH_OP(op, asm) \
175*4882a593Smuzhiyun ATOMIC64_OP(op, asm) \
176*4882a593Smuzhiyun ATOMIC64_FETCH_OP(op, asm)
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun ATOMIC_OPS(and, and)
179*4882a593Smuzhiyun ATOMIC_OPS(andnot, bic)
180*4882a593Smuzhiyun ATOMIC_OPS(or, bis)
181*4882a593Smuzhiyun ATOMIC_OPS(xor, xor)
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
184*4882a593Smuzhiyun #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
185*4882a593Smuzhiyun #define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
186*4882a593Smuzhiyun #define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
189*4882a593Smuzhiyun #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
190*4882a593Smuzhiyun #define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
191*4882a593Smuzhiyun #define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #undef ATOMIC_OPS
194*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP
195*4882a593Smuzhiyun #undef ATOMIC64_OP_RETURN
196*4882a593Smuzhiyun #undef ATOMIC64_OP
197*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
198*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
199*4882a593Smuzhiyun #undef ATOMIC_OP
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
202*4882a593Smuzhiyun #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
205*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /**
208*4882a593Smuzhiyun * atomic_fetch_add_unless - add unless the number is a given value
209*4882a593Smuzhiyun * @v: pointer of type atomic_t
210*4882a593Smuzhiyun * @a: the amount to add to v...
211*4882a593Smuzhiyun * @u: ...unless v is equal to u.
212*4882a593Smuzhiyun *
213*4882a593Smuzhiyun * Atomically adds @a to @v, so long as it was not @u.
214*4882a593Smuzhiyun * Returns the old value of @v.
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun int c, new, old;
219*4882a593Smuzhiyun smp_mb();
220*4882a593Smuzhiyun __asm__ __volatile__(
221*4882a593Smuzhiyun "1: ldl_l %[old],%[mem]\n"
222*4882a593Smuzhiyun " cmpeq %[old],%[u],%[c]\n"
223*4882a593Smuzhiyun " addl %[old],%[a],%[new]\n"
224*4882a593Smuzhiyun " bne %[c],2f\n"
225*4882a593Smuzhiyun " stl_c %[new],%[mem]\n"
226*4882a593Smuzhiyun " beq %[new],3f\n"
227*4882a593Smuzhiyun "2:\n"
228*4882a593Smuzhiyun ".subsection 2\n"
229*4882a593Smuzhiyun "3: br 1b\n"
230*4882a593Smuzhiyun ".previous"
231*4882a593Smuzhiyun : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
232*4882a593Smuzhiyun : [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
233*4882a593Smuzhiyun : "memory");
234*4882a593Smuzhiyun smp_mb();
235*4882a593Smuzhiyun return old;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun #define atomic_fetch_add_unless atomic_fetch_add_unless
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /**
240*4882a593Smuzhiyun * atomic64_fetch_add_unless - add unless the number is a given value
241*4882a593Smuzhiyun * @v: pointer of type atomic64_t
242*4882a593Smuzhiyun * @a: the amount to add to v...
243*4882a593Smuzhiyun * @u: ...unless v is equal to u.
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun * Atomically adds @a to @v, so long as it was not @u.
246*4882a593Smuzhiyun * Returns the old value of @v.
247*4882a593Smuzhiyun */
atomic64_fetch_add_unless(atomic64_t * v,s64 a,s64 u)248*4882a593Smuzhiyun static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun s64 c, new, old;
251*4882a593Smuzhiyun smp_mb();
252*4882a593Smuzhiyun __asm__ __volatile__(
253*4882a593Smuzhiyun "1: ldq_l %[old],%[mem]\n"
254*4882a593Smuzhiyun " cmpeq %[old],%[u],%[c]\n"
255*4882a593Smuzhiyun " addq %[old],%[a],%[new]\n"
256*4882a593Smuzhiyun " bne %[c],2f\n"
257*4882a593Smuzhiyun " stq_c %[new],%[mem]\n"
258*4882a593Smuzhiyun " beq %[new],3f\n"
259*4882a593Smuzhiyun "2:\n"
260*4882a593Smuzhiyun ".subsection 2\n"
261*4882a593Smuzhiyun "3: br 1b\n"
262*4882a593Smuzhiyun ".previous"
263*4882a593Smuzhiyun : [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
264*4882a593Smuzhiyun : [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
265*4882a593Smuzhiyun : "memory");
266*4882a593Smuzhiyun smp_mb();
267*4882a593Smuzhiyun return old;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun #define atomic64_fetch_add_unless atomic64_fetch_add_unless
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun * atomic64_dec_if_positive - decrement by 1 if old value positive
273*4882a593Smuzhiyun * @v: pointer of type atomic_t
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun * The function returns the old value of *v minus 1, even if
276*4882a593Smuzhiyun * the atomic variable, v, was not decremented.
277*4882a593Smuzhiyun */
atomic64_dec_if_positive(atomic64_t * v)278*4882a593Smuzhiyun static inline s64 atomic64_dec_if_positive(atomic64_t *v)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun s64 old, tmp;
281*4882a593Smuzhiyun smp_mb();
282*4882a593Smuzhiyun __asm__ __volatile__(
283*4882a593Smuzhiyun "1: ldq_l %[old],%[mem]\n"
284*4882a593Smuzhiyun " subq %[old],1,%[tmp]\n"
285*4882a593Smuzhiyun " ble %[old],2f\n"
286*4882a593Smuzhiyun " stq_c %[tmp],%[mem]\n"
287*4882a593Smuzhiyun " beq %[tmp],3f\n"
288*4882a593Smuzhiyun "2:\n"
289*4882a593Smuzhiyun ".subsection 2\n"
290*4882a593Smuzhiyun "3: br 1b\n"
291*4882a593Smuzhiyun ".previous"
292*4882a593Smuzhiyun : [old] "=&r"(old), [tmp] "=&r"(tmp)
293*4882a593Smuzhiyun : [mem] "m"(*v)
294*4882a593Smuzhiyun : "memory");
295*4882a593Smuzhiyun smp_mb();
296*4882a593Smuzhiyun return old - 1;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun #define atomic64_dec_if_positive atomic64_dec_if_positive
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun #endif /* _ALPHA_ATOMIC_H */
301