xref: /OK3568_Linux_fs/kernel/arch/ia64/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_IA64_ATOMIC_H
3*4882a593Smuzhiyun #define _ASM_IA64_ATOMIC_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Atomic operations that C can't guarantee us.  Useful for
7*4882a593Smuzhiyun  * resource counting etc..
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * NOTE: don't mess with the types below!  The "unsigned long" and
10*4882a593Smuzhiyun  * "int" types were carefully placed so as to ensure proper operation
11*4882a593Smuzhiyun  * of the macros.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Copyright (C) 1998, 1999, 2002-2003 Hewlett-Packard Co
14*4882a593Smuzhiyun  *	David Mosberger-Tang <davidm@hpl.hp.com>
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/intrinsics.h>
19*4882a593Smuzhiyun #include <asm/barrier.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define ATOMIC64_INIT(i)	{ (i) }
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define atomic_read(v)		READ_ONCE((v)->counter)
25*4882a593Smuzhiyun #define atomic64_read(v)	READ_ONCE((v)->counter)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define atomic_set(v,i)		WRITE_ONCE(((v)->counter), (i))
28*4882a593Smuzhiyun #define atomic64_set(v,i)	WRITE_ONCE(((v)->counter), (i))
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #define ATOMIC_OP(op, c_op)						\
31*4882a593Smuzhiyun static __inline__ int							\
32*4882a593Smuzhiyun ia64_atomic_##op (int i, atomic_t *v)					\
33*4882a593Smuzhiyun {									\
34*4882a593Smuzhiyun 	__s32 old, new;							\
35*4882a593Smuzhiyun 	CMPXCHG_BUGCHECK_DECL						\
36*4882a593Smuzhiyun 									\
37*4882a593Smuzhiyun 	do {								\
38*4882a593Smuzhiyun 		CMPXCHG_BUGCHECK(v);					\
39*4882a593Smuzhiyun 		old = atomic_read(v);					\
40*4882a593Smuzhiyun 		new = old c_op i;					\
41*4882a593Smuzhiyun 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
42*4882a593Smuzhiyun 	return new;							\
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op, c_op)					\
46*4882a593Smuzhiyun static __inline__ int							\
47*4882a593Smuzhiyun ia64_atomic_fetch_##op (int i, atomic_t *v)				\
48*4882a593Smuzhiyun {									\
49*4882a593Smuzhiyun 	__s32 old, new;							\
50*4882a593Smuzhiyun 	CMPXCHG_BUGCHECK_DECL						\
51*4882a593Smuzhiyun 									\
52*4882a593Smuzhiyun 	do {								\
53*4882a593Smuzhiyun 		CMPXCHG_BUGCHECK(v);					\
54*4882a593Smuzhiyun 		old = atomic_read(v);					\
55*4882a593Smuzhiyun 		new = old c_op i;					\
56*4882a593Smuzhiyun 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
57*4882a593Smuzhiyun 	return old;							\
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define ATOMIC_OPS(op, c_op)						\
61*4882a593Smuzhiyun 	ATOMIC_OP(op, c_op)						\
62*4882a593Smuzhiyun 	ATOMIC_FETCH_OP(op, c_op)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun ATOMIC_OPS(add, +)
65*4882a593Smuzhiyun ATOMIC_OPS(sub, -)
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifdef __OPTIMIZE__
68*4882a593Smuzhiyun #define __ia64_atomic_const(i)						\
69*4882a593Smuzhiyun 	static const int __ia64_atomic_p = __builtin_constant_p(i) ?	\
70*4882a593Smuzhiyun 		((i) == 1 || (i) == 4 || (i) == 8 || (i) == 16 ||	\
71*4882a593Smuzhiyun 		 (i) == -1 || (i) == -4 || (i) == -8 || (i) == -16) : 0;\
72*4882a593Smuzhiyun 	__ia64_atomic_p
73*4882a593Smuzhiyun #else
74*4882a593Smuzhiyun #define __ia64_atomic_const(i)	0
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define atomic_add_return(i,v)						\
78*4882a593Smuzhiyun ({									\
79*4882a593Smuzhiyun 	int __ia64_aar_i = (i);						\
80*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
81*4882a593Smuzhiyun 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
82*4882a593Smuzhiyun 		: ia64_atomic_add(__ia64_aar_i, v);			\
83*4882a593Smuzhiyun })
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #define atomic_sub_return(i,v)						\
86*4882a593Smuzhiyun ({									\
87*4882a593Smuzhiyun 	int __ia64_asr_i = (i);						\
88*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
89*4882a593Smuzhiyun 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
90*4882a593Smuzhiyun 		: ia64_atomic_sub(__ia64_asr_i, v);			\
91*4882a593Smuzhiyun })
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define atomic_fetch_add(i,v)						\
94*4882a593Smuzhiyun ({									\
95*4882a593Smuzhiyun 	int __ia64_aar_i = (i);						\
96*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
97*4882a593Smuzhiyun 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
98*4882a593Smuzhiyun 		: ia64_atomic_fetch_add(__ia64_aar_i, v);		\
99*4882a593Smuzhiyun })
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun #define atomic_fetch_sub(i,v)						\
102*4882a593Smuzhiyun ({									\
103*4882a593Smuzhiyun 	int __ia64_asr_i = (i);						\
104*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
105*4882a593Smuzhiyun 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
106*4882a593Smuzhiyun 		: ia64_atomic_fetch_sub(__ia64_asr_i, v);		\
107*4882a593Smuzhiyun })
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun ATOMIC_FETCH_OP(and, &)
110*4882a593Smuzhiyun ATOMIC_FETCH_OP(or, |)
111*4882a593Smuzhiyun ATOMIC_FETCH_OP(xor, ^)
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define atomic_and(i,v)	(void)ia64_atomic_fetch_and(i,v)
114*4882a593Smuzhiyun #define atomic_or(i,v)	(void)ia64_atomic_fetch_or(i,v)
115*4882a593Smuzhiyun #define atomic_xor(i,v)	(void)ia64_atomic_fetch_xor(i,v)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun #define atomic_fetch_and(i,v)	ia64_atomic_fetch_and(i,v)
118*4882a593Smuzhiyun #define atomic_fetch_or(i,v)	ia64_atomic_fetch_or(i,v)
119*4882a593Smuzhiyun #define atomic_fetch_xor(i,v)	ia64_atomic_fetch_xor(i,v)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #undef ATOMIC_OPS
122*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
123*4882a593Smuzhiyun #undef ATOMIC_OP
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define ATOMIC64_OP(op, c_op)						\
126*4882a593Smuzhiyun static __inline__ s64							\
127*4882a593Smuzhiyun ia64_atomic64_##op (s64 i, atomic64_t *v)				\
128*4882a593Smuzhiyun {									\
129*4882a593Smuzhiyun 	s64 old, new;							\
130*4882a593Smuzhiyun 	CMPXCHG_BUGCHECK_DECL						\
131*4882a593Smuzhiyun 									\
132*4882a593Smuzhiyun 	do {								\
133*4882a593Smuzhiyun 		CMPXCHG_BUGCHECK(v);					\
134*4882a593Smuzhiyun 		old = atomic64_read(v);					\
135*4882a593Smuzhiyun 		new = old c_op i;					\
136*4882a593Smuzhiyun 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
137*4882a593Smuzhiyun 	return new;							\
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define ATOMIC64_FETCH_OP(op, c_op)					\
141*4882a593Smuzhiyun static __inline__ s64							\
142*4882a593Smuzhiyun ia64_atomic64_fetch_##op (s64 i, atomic64_t *v)				\
143*4882a593Smuzhiyun {									\
144*4882a593Smuzhiyun 	s64 old, new;							\
145*4882a593Smuzhiyun 	CMPXCHG_BUGCHECK_DECL						\
146*4882a593Smuzhiyun 									\
147*4882a593Smuzhiyun 	do {								\
148*4882a593Smuzhiyun 		CMPXCHG_BUGCHECK(v);					\
149*4882a593Smuzhiyun 		old = atomic64_read(v);					\
150*4882a593Smuzhiyun 		new = old c_op i;					\
151*4882a593Smuzhiyun 	} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
152*4882a593Smuzhiyun 	return old;							\
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #define ATOMIC64_OPS(op, c_op)						\
156*4882a593Smuzhiyun 	ATOMIC64_OP(op, c_op)						\
157*4882a593Smuzhiyun 	ATOMIC64_FETCH_OP(op, c_op)
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun ATOMIC64_OPS(add, +)
160*4882a593Smuzhiyun ATOMIC64_OPS(sub, -)
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define atomic64_add_return(i,v)					\
163*4882a593Smuzhiyun ({									\
164*4882a593Smuzhiyun 	s64 __ia64_aar_i = (i);						\
165*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
166*4882a593Smuzhiyun 		? ia64_fetch_and_add(__ia64_aar_i, &(v)->counter)	\
167*4882a593Smuzhiyun 		: ia64_atomic64_add(__ia64_aar_i, v);			\
168*4882a593Smuzhiyun })
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define atomic64_sub_return(i,v)					\
171*4882a593Smuzhiyun ({									\
172*4882a593Smuzhiyun 	s64 __ia64_asr_i = (i);						\
173*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
174*4882a593Smuzhiyun 		? ia64_fetch_and_add(-__ia64_asr_i, &(v)->counter)	\
175*4882a593Smuzhiyun 		: ia64_atomic64_sub(__ia64_asr_i, v);			\
176*4882a593Smuzhiyun })
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #define atomic64_fetch_add(i,v)						\
179*4882a593Smuzhiyun ({									\
180*4882a593Smuzhiyun 	s64 __ia64_aar_i = (i);						\
181*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
182*4882a593Smuzhiyun 		? ia64_fetchadd(__ia64_aar_i, &(v)->counter, acq)	\
183*4882a593Smuzhiyun 		: ia64_atomic64_fetch_add(__ia64_aar_i, v);		\
184*4882a593Smuzhiyun })
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #define atomic64_fetch_sub(i,v)						\
187*4882a593Smuzhiyun ({									\
188*4882a593Smuzhiyun 	s64 __ia64_asr_i = (i);						\
189*4882a593Smuzhiyun 	__ia64_atomic_const(i)						\
190*4882a593Smuzhiyun 		? ia64_fetchadd(-__ia64_asr_i, &(v)->counter, acq)	\
191*4882a593Smuzhiyun 		: ia64_atomic64_fetch_sub(__ia64_asr_i, v);		\
192*4882a593Smuzhiyun })
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun ATOMIC64_FETCH_OP(and, &)
195*4882a593Smuzhiyun ATOMIC64_FETCH_OP(or, |)
196*4882a593Smuzhiyun ATOMIC64_FETCH_OP(xor, ^)
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #define atomic64_and(i,v)	(void)ia64_atomic64_fetch_and(i,v)
199*4882a593Smuzhiyun #define atomic64_or(i,v)	(void)ia64_atomic64_fetch_or(i,v)
200*4882a593Smuzhiyun #define atomic64_xor(i,v)	(void)ia64_atomic64_fetch_xor(i,v)
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun #define atomic64_fetch_and(i,v)	ia64_atomic64_fetch_and(i,v)
203*4882a593Smuzhiyun #define atomic64_fetch_or(i,v)	ia64_atomic64_fetch_or(i,v)
204*4882a593Smuzhiyun #define atomic64_fetch_xor(i,v)	ia64_atomic64_fetch_xor(i,v)
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun #undef ATOMIC64_OPS
207*4882a593Smuzhiyun #undef ATOMIC64_FETCH_OP
208*4882a593Smuzhiyun #undef ATOMIC64_OP
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
211*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #define atomic64_cmpxchg(v, old, new) \
214*4882a593Smuzhiyun 	(cmpxchg(&((v)->counter), old, new))
215*4882a593Smuzhiyun #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun #define atomic_add(i,v)			(void)atomic_add_return((i), (v))
218*4882a593Smuzhiyun #define atomic_sub(i,v)			(void)atomic_sub_return((i), (v))
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #define atomic64_add(i,v)		(void)atomic64_add_return((i), (v))
221*4882a593Smuzhiyun #define atomic64_sub(i,v)		(void)atomic64_sub_return((i), (v))
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #endif /* _ASM_IA64_ATOMIC_H */
224