xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/atomic.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright IBM Corp. 1999, 2016
4*4882a593Smuzhiyun  * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
5*4882a593Smuzhiyun  *	      Denis Joseph Barrow,
6*4882a593Smuzhiyun  *	      Arnd Bergmann,
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef __ARCH_S390_ATOMIC__
10*4882a593Smuzhiyun #define __ARCH_S390_ATOMIC__
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/compiler.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <asm/atomic_ops.h>
15*4882a593Smuzhiyun #include <asm/barrier.h>
16*4882a593Smuzhiyun #include <asm/cmpxchg.h>
17*4882a593Smuzhiyun 
atomic_read(const atomic_t * v)18*4882a593Smuzhiyun static inline int atomic_read(const atomic_t *v)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun 	int c;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	asm volatile(
23*4882a593Smuzhiyun 		"	l	%0,%1\n"
24*4882a593Smuzhiyun 		: "=d" (c) : "Q" (v->counter));
25*4882a593Smuzhiyun 	return c;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
atomic_set(atomic_t * v,int i)28*4882a593Smuzhiyun static inline void atomic_set(atomic_t *v, int i)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	asm volatile(
31*4882a593Smuzhiyun 		"	st	%1,%0\n"
32*4882a593Smuzhiyun 		: "=Q" (v->counter) : "d" (i));
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
atomic_add_return(int i,atomic_t * v)35*4882a593Smuzhiyun static inline int atomic_add_return(int i, atomic_t *v)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun 	return __atomic_add_barrier(i, &v->counter) + i;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
atomic_fetch_add(int i,atomic_t * v)40*4882a593Smuzhiyun static inline int atomic_fetch_add(int i, atomic_t *v)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	return __atomic_add_barrier(i, &v->counter);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
atomic_add(int i,atomic_t * v)45*4882a593Smuzhiyun static inline void atomic_add(int i, atomic_t *v)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
48*4882a593Smuzhiyun 	/*
49*4882a593Smuzhiyun 	 * Order of conditions is important to circumvent gcc 10 bug:
50*4882a593Smuzhiyun 	 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
51*4882a593Smuzhiyun 	 */
52*4882a593Smuzhiyun 	if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
53*4882a593Smuzhiyun 		__atomic_add_const(i, &v->counter);
54*4882a593Smuzhiyun 		return;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun #endif
57*4882a593Smuzhiyun 	__atomic_add(i, &v->counter);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define atomic_sub(_i, _v)		atomic_add(-(int)(_i), _v)
61*4882a593Smuzhiyun #define atomic_sub_return(_i, _v)	atomic_add_return(-(int)(_i), _v)
62*4882a593Smuzhiyun #define atomic_fetch_sub(_i, _v)	atomic_fetch_add(-(int)(_i), _v)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define ATOMIC_OPS(op)							\
65*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v)			\
66*4882a593Smuzhiyun {									\
67*4882a593Smuzhiyun 	__atomic_##op(i, &v->counter);					\
68*4882a593Smuzhiyun }									\
69*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v)			\
70*4882a593Smuzhiyun {									\
71*4882a593Smuzhiyun 	return __atomic_##op##_barrier(i, &v->counter);			\
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun ATOMIC_OPS(and)
ATOMIC_OPS(or)75*4882a593Smuzhiyun ATOMIC_OPS(or)
76*4882a593Smuzhiyun ATOMIC_OPS(xor)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #undef ATOMIC_OPS
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	return __atomic_cmpxchg(&v->counter, old, new);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define ATOMIC64_INIT(i)  { (i) }
88*4882a593Smuzhiyun 
atomic64_read(const atomic64_t * v)89*4882a593Smuzhiyun static inline s64 atomic64_read(const atomic64_t *v)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	s64 c;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	asm volatile(
94*4882a593Smuzhiyun 		"	lg	%0,%1\n"
95*4882a593Smuzhiyun 		: "=d" (c) : "Q" (v->counter));
96*4882a593Smuzhiyun 	return c;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
atomic64_set(atomic64_t * v,s64 i)99*4882a593Smuzhiyun static inline void atomic64_set(atomic64_t *v, s64 i)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	asm volatile(
102*4882a593Smuzhiyun 		"	stg	%1,%0\n"
103*4882a593Smuzhiyun 		: "=Q" (v->counter) : "d" (i));
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun 
atomic64_add_return(s64 i,atomic64_t * v)106*4882a593Smuzhiyun static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun 	return __atomic64_add_barrier(i, (long *)&v->counter) + i;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun 
atomic64_fetch_add(s64 i,atomic64_t * v)111*4882a593Smuzhiyun static inline s64 atomic64_fetch_add(s64 i, atomic64_t *v)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	return __atomic64_add_barrier(i, (long *)&v->counter);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
atomic64_add(s64 i,atomic64_t * v)116*4882a593Smuzhiyun static inline void atomic64_add(s64 i, atomic64_t *v)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun #ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
119*4882a593Smuzhiyun 	/*
120*4882a593Smuzhiyun 	 * Order of conditions is important to circumvent gcc 10 bug:
121*4882a593Smuzhiyun 	 * https://gcc.gnu.org/pipermail/gcc-patches/2020-July/549318.html
122*4882a593Smuzhiyun 	 */
123*4882a593Smuzhiyun 	if ((i > -129) && (i < 128) && __builtin_constant_p(i)) {
124*4882a593Smuzhiyun 		__atomic64_add_const(i, (long *)&v->counter);
125*4882a593Smuzhiyun 		return;
126*4882a593Smuzhiyun 	}
127*4882a593Smuzhiyun #endif
128*4882a593Smuzhiyun 	__atomic64_add(i, (long *)&v->counter);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
132*4882a593Smuzhiyun 
atomic64_cmpxchg(atomic64_t * v,s64 old,s64 new)133*4882a593Smuzhiyun static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	return __atomic64_cmpxchg((long *)&v->counter, old, new);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun #define ATOMIC64_OPS(op)						\
139*4882a593Smuzhiyun static inline void atomic64_##op(s64 i, atomic64_t *v)			\
140*4882a593Smuzhiyun {									\
141*4882a593Smuzhiyun 	__atomic64_##op(i, (long *)&v->counter);			\
142*4882a593Smuzhiyun }									\
143*4882a593Smuzhiyun static inline long atomic64_fetch_##op(s64 i, atomic64_t *v)		\
144*4882a593Smuzhiyun {									\
145*4882a593Smuzhiyun 	return __atomic64_##op##_barrier(i, (long *)&v->counter);	\
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun ATOMIC64_OPS(and)
149*4882a593Smuzhiyun ATOMIC64_OPS(or)
150*4882a593Smuzhiyun ATOMIC64_OPS(xor)
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #undef ATOMIC64_OPS
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #define atomic64_sub_return(_i, _v)	atomic64_add_return(-(s64)(_i), _v)
155*4882a593Smuzhiyun #define atomic64_fetch_sub(_i, _v)	atomic64_fetch_add(-(s64)(_i), _v)
156*4882a593Smuzhiyun #define atomic64_sub(_i, _v)		atomic64_add(-(s64)(_i), _v)
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #endif /* __ARCH_S390_ATOMIC__  */
159