1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ARCH_M68K_ATOMIC__
3*4882a593Smuzhiyun #define __ARCH_M68K_ATOMIC__
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <linux/irqflags.h>
7*4882a593Smuzhiyun #include <asm/cmpxchg.h>
8*4882a593Smuzhiyun #include <asm/barrier.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * Atomic operations that C can't guarantee us. Useful for
12*4882a593Smuzhiyun * resource counting etc..
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * We do not have SMP m68k systems, so we don't have to deal with that.
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define atomic_read(v) READ_ONCE((v)->counter)
20*4882a593Smuzhiyun #define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun * The ColdFire parts cannot do some immediate to memory operations,
24*4882a593Smuzhiyun * so for them we do not specify the "i" asm constraint.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #ifdef CONFIG_COLDFIRE
27*4882a593Smuzhiyun #define ASM_DI "d"
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun #define ASM_DI "di"
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define ATOMIC_OP(op, c_op, asm_op) \
33*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v) \
34*4882a593Smuzhiyun { \
35*4882a593Smuzhiyun __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
36*4882a593Smuzhiyun } \
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #ifdef CONFIG_RMW_INSNS
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
41*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t *v) \
42*4882a593Smuzhiyun { \
43*4882a593Smuzhiyun int t, tmp; \
44*4882a593Smuzhiyun \
45*4882a593Smuzhiyun __asm__ __volatile__( \
46*4882a593Smuzhiyun "1: movel %2,%1\n" \
47*4882a593Smuzhiyun " " #asm_op "l %3,%1\n" \
48*4882a593Smuzhiyun " casl %2,%1,%0\n" \
49*4882a593Smuzhiyun " jne 1b" \
50*4882a593Smuzhiyun : "+m" (*v), "=&d" (t), "=&d" (tmp) \
51*4882a593Smuzhiyun : "g" (i), "2" (atomic_read(v))); \
52*4882a593Smuzhiyun return t; \
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
56*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v) \
57*4882a593Smuzhiyun { \
58*4882a593Smuzhiyun int t, tmp; \
59*4882a593Smuzhiyun \
60*4882a593Smuzhiyun __asm__ __volatile__( \
61*4882a593Smuzhiyun "1: movel %2,%1\n" \
62*4882a593Smuzhiyun " " #asm_op "l %3,%1\n" \
63*4882a593Smuzhiyun " casl %2,%1,%0\n" \
64*4882a593Smuzhiyun " jne 1b" \
65*4882a593Smuzhiyun : "+m" (*v), "=&d" (t), "=&d" (tmp) \
66*4882a593Smuzhiyun : "g" (i), "2" (atomic_read(v))); \
67*4882a593Smuzhiyun return tmp; \
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #else
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
73*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t * v) \
74*4882a593Smuzhiyun { \
75*4882a593Smuzhiyun unsigned long flags; \
76*4882a593Smuzhiyun int t; \
77*4882a593Smuzhiyun \
78*4882a593Smuzhiyun local_irq_save(flags); \
79*4882a593Smuzhiyun t = (v->counter c_op i); \
80*4882a593Smuzhiyun local_irq_restore(flags); \
81*4882a593Smuzhiyun \
82*4882a593Smuzhiyun return t; \
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
86*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t * v) \
87*4882a593Smuzhiyun { \
88*4882a593Smuzhiyun unsigned long flags; \
89*4882a593Smuzhiyun int t; \
90*4882a593Smuzhiyun \
91*4882a593Smuzhiyun local_irq_save(flags); \
92*4882a593Smuzhiyun t = v->counter; \
93*4882a593Smuzhiyun v->counter c_op i; \
94*4882a593Smuzhiyun local_irq_restore(flags); \
95*4882a593Smuzhiyun \
96*4882a593Smuzhiyun return t; \
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #endif /* CONFIG_RMW_INSNS */
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define ATOMIC_OPS(op, c_op, asm_op) \
102*4882a593Smuzhiyun ATOMIC_OP(op, c_op, asm_op) \
103*4882a593Smuzhiyun ATOMIC_OP_RETURN(op, c_op, asm_op) \
104*4882a593Smuzhiyun ATOMIC_FETCH_OP(op, c_op, asm_op)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun ATOMIC_OPS(add, +=, add)
107*4882a593Smuzhiyun ATOMIC_OPS(sub, -=, sub)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #undef ATOMIC_OPS
110*4882a593Smuzhiyun #define ATOMIC_OPS(op, c_op, asm_op) \
111*4882a593Smuzhiyun ATOMIC_OP(op, c_op, asm_op) \
112*4882a593Smuzhiyun ATOMIC_FETCH_OP(op, c_op, asm_op)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun ATOMIC_OPS(and, &=, and)
115*4882a593Smuzhiyun ATOMIC_OPS(or, |=, or)
116*4882a593Smuzhiyun ATOMIC_OPS(xor, ^=, eor)
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun #undef ATOMIC_OPS
119*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
120*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
121*4882a593Smuzhiyun #undef ATOMIC_OP
122*4882a593Smuzhiyun
atomic_inc(atomic_t * v)123*4882a593Smuzhiyun static inline void atomic_inc(atomic_t *v)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun __asm__ __volatile__("addql #1,%0" : "+m" (*v));
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun #define atomic_inc atomic_inc
128*4882a593Smuzhiyun
atomic_dec(atomic_t * v)129*4882a593Smuzhiyun static inline void atomic_dec(atomic_t *v)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun __asm__ __volatile__("subql #1,%0" : "+m" (*v));
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun #define atomic_dec atomic_dec
134*4882a593Smuzhiyun
atomic_dec_and_test(atomic_t * v)135*4882a593Smuzhiyun static inline int atomic_dec_and_test(atomic_t *v)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun char c;
138*4882a593Smuzhiyun __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
139*4882a593Smuzhiyun return c != 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun #define atomic_dec_and_test atomic_dec_and_test
142*4882a593Smuzhiyun
atomic_dec_and_test_lt(atomic_t * v)143*4882a593Smuzhiyun static inline int atomic_dec_and_test_lt(atomic_t *v)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun char c;
146*4882a593Smuzhiyun __asm__ __volatile__(
147*4882a593Smuzhiyun "subql #1,%1; slt %0"
148*4882a593Smuzhiyun : "=d" (c), "=m" (*v)
149*4882a593Smuzhiyun : "m" (*v));
150*4882a593Smuzhiyun return c != 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
atomic_inc_and_test(atomic_t * v)153*4882a593Smuzhiyun static inline int atomic_inc_and_test(atomic_t *v)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun char c;
156*4882a593Smuzhiyun __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
157*4882a593Smuzhiyun return c != 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun #define atomic_inc_and_test atomic_inc_and_test
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun #ifdef CONFIG_RMW_INSNS
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
164*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun #else /* !CONFIG_RMW_INSNS */
167*4882a593Smuzhiyun
atomic_cmpxchg(atomic_t * v,int old,int new)168*4882a593Smuzhiyun static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun unsigned long flags;
171*4882a593Smuzhiyun int prev;
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun local_irq_save(flags);
174*4882a593Smuzhiyun prev = atomic_read(v);
175*4882a593Smuzhiyun if (prev == old)
176*4882a593Smuzhiyun atomic_set(v, new);
177*4882a593Smuzhiyun local_irq_restore(flags);
178*4882a593Smuzhiyun return prev;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
atomic_xchg(atomic_t * v,int new)181*4882a593Smuzhiyun static inline int atomic_xchg(atomic_t *v, int new)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun unsigned long flags;
184*4882a593Smuzhiyun int prev;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun local_irq_save(flags);
187*4882a593Smuzhiyun prev = atomic_read(v);
188*4882a593Smuzhiyun atomic_set(v, new);
189*4882a593Smuzhiyun local_irq_restore(flags);
190*4882a593Smuzhiyun return prev;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun #endif /* !CONFIG_RMW_INSNS */
194*4882a593Smuzhiyun
atomic_sub_and_test(int i,atomic_t * v)195*4882a593Smuzhiyun static inline int atomic_sub_and_test(int i, atomic_t *v)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun char c;
198*4882a593Smuzhiyun __asm__ __volatile__("subl %2,%1; seq %0"
199*4882a593Smuzhiyun : "=d" (c), "+m" (*v)
200*4882a593Smuzhiyun : ASM_DI (i));
201*4882a593Smuzhiyun return c != 0;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun #define atomic_sub_and_test atomic_sub_and_test
204*4882a593Smuzhiyun
atomic_add_negative(int i,atomic_t * v)205*4882a593Smuzhiyun static inline int atomic_add_negative(int i, atomic_t *v)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun char c;
208*4882a593Smuzhiyun __asm__ __volatile__("addl %2,%1; smi %0"
209*4882a593Smuzhiyun : "=d" (c), "+m" (*v)
210*4882a593Smuzhiyun : ASM_DI (i));
211*4882a593Smuzhiyun return c != 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun #define atomic_add_negative atomic_add_negative
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun #endif /* __ARCH_M68K_ATOMIC __ */
216