xref: /OK3568_Linux_fs/kernel/arch/parisc/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
3*4882a593Smuzhiyun  * Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _ASM_PARISC_ATOMIC_H_
7*4882a593Smuzhiyun #define _ASM_PARISC_ATOMIC_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <asm/cmpxchg.h>
11*4882a593Smuzhiyun #include <asm/barrier.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * Atomic operations that C can't guarantee us.  Useful for
15*4882a593Smuzhiyun  * resource counting etc..
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  * And probably incredibly slow on parisc.  OTOH, we don't
18*4882a593Smuzhiyun  * have to write any serious assembly.   prumpf
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #ifdef CONFIG_SMP
22*4882a593Smuzhiyun #include <asm/spinlock.h>
23*4882a593Smuzhiyun #include <asm/cache.h>		/* we use L1_CACHE_BYTES */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* Use an array of spinlocks for our atomic_ts.
26*4882a593Smuzhiyun  * Hash function to index into a different SPINLOCK.
27*4882a593Smuzhiyun  * Since "a" is usually an address, use one spinlock per cacheline.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun #  define ATOMIC_HASH_SIZE 4
30*4882a593Smuzhiyun #  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /* Can't use raw_spin_lock_irq because of #include problems, so
35*4882a593Smuzhiyun  * this is the substitute */
36*4882a593Smuzhiyun #define _atomic_spin_lock_irqsave(l,f) do {	\
37*4882a593Smuzhiyun 	arch_spinlock_t *s = ATOMIC_HASH(l);	\
38*4882a593Smuzhiyun 	local_irq_save(f);			\
39*4882a593Smuzhiyun 	arch_spin_lock(s);			\
40*4882a593Smuzhiyun } while(0)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define _atomic_spin_unlock_irqrestore(l,f) do {	\
43*4882a593Smuzhiyun 	arch_spinlock_t *s = ATOMIC_HASH(l);		\
44*4882a593Smuzhiyun 	arch_spin_unlock(s);				\
45*4882a593Smuzhiyun 	local_irq_restore(f);				\
46*4882a593Smuzhiyun } while(0)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #else
50*4882a593Smuzhiyun #  define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
51*4882a593Smuzhiyun #  define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
52*4882a593Smuzhiyun #endif
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * Note that we need not lock read accesses - aligned word writes/reads
56*4882a593Smuzhiyun  * are atomic, so a reader never sees inconsistent values.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun 
atomic_set(atomic_t * v,int i)59*4882a593Smuzhiyun static __inline__ void atomic_set(atomic_t *v, int i)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	unsigned long flags;
62*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	v->counter = i;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define atomic_set_release(v, i)	atomic_set((v), (i))
70*4882a593Smuzhiyun 
atomic_read(const atomic_t * v)71*4882a593Smuzhiyun static __inline__ int atomic_read(const atomic_t *v)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	return READ_ONCE((v)->counter);
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* exported interface */
77*4882a593Smuzhiyun #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
78*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define ATOMIC_OP(op, c_op)						\
81*4882a593Smuzhiyun static __inline__ void atomic_##op(int i, atomic_t *v)			\
82*4882a593Smuzhiyun {									\
83*4882a593Smuzhiyun 	unsigned long flags;						\
84*4882a593Smuzhiyun 									\
85*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
86*4882a593Smuzhiyun 	v->counter c_op i;						\
87*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op, c_op)					\
91*4882a593Smuzhiyun static __inline__ int atomic_##op##_return(int i, atomic_t *v)		\
92*4882a593Smuzhiyun {									\
93*4882a593Smuzhiyun 	unsigned long flags;						\
94*4882a593Smuzhiyun 	int ret;							\
95*4882a593Smuzhiyun 									\
96*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
97*4882a593Smuzhiyun 	ret = (v->counter c_op i);					\
98*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
99*4882a593Smuzhiyun 									\
100*4882a593Smuzhiyun 	return ret;							\
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op, c_op)					\
104*4882a593Smuzhiyun static __inline__ int atomic_fetch_##op(int i, atomic_t *v)		\
105*4882a593Smuzhiyun {									\
106*4882a593Smuzhiyun 	unsigned long flags;						\
107*4882a593Smuzhiyun 	int ret;							\
108*4882a593Smuzhiyun 									\
109*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
110*4882a593Smuzhiyun 	ret = v->counter;						\
111*4882a593Smuzhiyun 	v->counter c_op i;						\
112*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
113*4882a593Smuzhiyun 									\
114*4882a593Smuzhiyun 	return ret;							\
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define ATOMIC_OPS(op, c_op)						\
118*4882a593Smuzhiyun 	ATOMIC_OP(op, c_op)						\
119*4882a593Smuzhiyun 	ATOMIC_OP_RETURN(op, c_op)					\
120*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(op, c_op)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun ATOMIC_OPS(add, +=)
123*4882a593Smuzhiyun ATOMIC_OPS(sub, -=)
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #undef ATOMIC_OPS
126*4882a593Smuzhiyun #define ATOMIC_OPS(op, c_op)						\
127*4882a593Smuzhiyun 	ATOMIC_OP(op, c_op)						\
128*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(op, c_op)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun ATOMIC_OPS(and, &=)
131*4882a593Smuzhiyun ATOMIC_OPS(or, |=)
132*4882a593Smuzhiyun ATOMIC_OPS(xor, ^=)
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #undef ATOMIC_OPS
135*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
136*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
137*4882a593Smuzhiyun #undef ATOMIC_OP
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun #ifdef CONFIG_64BIT
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun #define ATOMIC64_INIT(i) { (i) }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define ATOMIC64_OP(op, c_op)						\
144*4882a593Smuzhiyun static __inline__ void atomic64_##op(s64 i, atomic64_t *v)		\
145*4882a593Smuzhiyun {									\
146*4882a593Smuzhiyun 	unsigned long flags;						\
147*4882a593Smuzhiyun 									\
148*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
149*4882a593Smuzhiyun 	v->counter c_op i;						\
150*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define ATOMIC64_OP_RETURN(op, c_op)					\
154*4882a593Smuzhiyun static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v)	\
155*4882a593Smuzhiyun {									\
156*4882a593Smuzhiyun 	unsigned long flags;						\
157*4882a593Smuzhiyun 	s64 ret;							\
158*4882a593Smuzhiyun 									\
159*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
160*4882a593Smuzhiyun 	ret = (v->counter c_op i);					\
161*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
162*4882a593Smuzhiyun 									\
163*4882a593Smuzhiyun 	return ret;							\
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP(op, c_op)					\
167*4882a593Smuzhiyun static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v)		\
168*4882a593Smuzhiyun {									\
169*4882a593Smuzhiyun 	unsigned long flags;						\
170*4882a593Smuzhiyun 	s64 ret;							\
171*4882a593Smuzhiyun 									\
172*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);				\
173*4882a593Smuzhiyun 	ret = v->counter;						\
174*4882a593Smuzhiyun 	v->counter c_op i;						\
175*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);			\
176*4882a593Smuzhiyun 									\
177*4882a593Smuzhiyun 	return ret;							\
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #define ATOMIC64_OPS(op, c_op)						\
181*4882a593Smuzhiyun 	ATOMIC64_OP(op, c_op)						\
182*4882a593Smuzhiyun 	ATOMIC64_OP_RETURN(op, c_op)					\
183*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(op, c_op)
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun ATOMIC64_OPS(add, +=)
186*4882a593Smuzhiyun ATOMIC64_OPS(sub, -=)
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #undef ATOMIC64_OPS
189*4882a593Smuzhiyun #define ATOMIC64_OPS(op, c_op)						\
190*4882a593Smuzhiyun 	ATOMIC64_OP(op, c_op)						\
191*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(op, c_op)
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun ATOMIC64_OPS(and, &=)
194*4882a593Smuzhiyun ATOMIC64_OPS(or, |=)
195*4882a593Smuzhiyun ATOMIC64_OPS(xor, ^=)
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #undef ATOMIC64_OPS
198*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP
199*4882a593Smuzhiyun #undef ATOMIC64_OP_RETURN
200*4882a593Smuzhiyun #undef ATOMIC64_OP
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun static __inline__ void
atomic64_set(atomic64_t * v,s64 i)203*4882a593Smuzhiyun atomic64_set(atomic64_t *v, s64 i)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	unsigned long flags;
206*4882a593Smuzhiyun 	_atomic_spin_lock_irqsave(v, flags);
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	v->counter = i;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	_atomic_spin_unlock_irqrestore(v, flags);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #define atomic64_set_release(v, i)	atomic64_set((v), (i))
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun static __inline__ s64
atomic64_read(const atomic64_t * v)216*4882a593Smuzhiyun atomic64_read(const atomic64_t *v)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	return READ_ONCE((v)->counter);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /* exported interface */
222*4882a593Smuzhiyun #define atomic64_cmpxchg(v, o, n) \
223*4882a593Smuzhiyun 	((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
224*4882a593Smuzhiyun #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #endif /* !CONFIG_64BIT */
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun #endif /* _ASM_PARISC_ATOMIC_H_ */
230