xref: /OK3568_Linux_fs/kernel/arch/s390/include/asm/percpu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ARCH_S390_PERCPU__
3*4882a593Smuzhiyun #define __ARCH_S390_PERCPU__
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/preempt.h>
6*4882a593Smuzhiyun #include <asm/cmpxchg.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * s390 uses its own implementation for per cpu data, the offset of
10*4882a593Smuzhiyun  * the cpu local data area is cached in the cpu's lowcore memory.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #define __my_cpu_offset S390_lowcore.percpu_offset
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * For 64 bit module code, the module may be more than 4G above the
16*4882a593Smuzhiyun  * per cpu area, use weak definitions to force the compiler to
17*4882a593Smuzhiyun  * generate external references.
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun #if defined(MODULE)
20*4882a593Smuzhiyun #define ARCH_NEEDS_WEAK_PER_CPU
21*4882a593Smuzhiyun #endif
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /*
24*4882a593Smuzhiyun  * We use a compare-and-swap loop since that uses less cpu cycles than
25*4882a593Smuzhiyun  * disabling and enabling interrupts like the generic variant would do.
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun #define arch_this_cpu_to_op_simple(pcp, val, op)			\
28*4882a593Smuzhiyun ({									\
29*4882a593Smuzhiyun 	typedef typeof(pcp) pcp_op_T__;					\
30*4882a593Smuzhiyun 	pcp_op_T__ old__, new__, prev__;				\
31*4882a593Smuzhiyun 	pcp_op_T__ *ptr__;						\
32*4882a593Smuzhiyun 	preempt_disable_notrace();					\
33*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp));					\
34*4882a593Smuzhiyun 	prev__ = *ptr__;						\
35*4882a593Smuzhiyun 	do {								\
36*4882a593Smuzhiyun 		old__ = prev__;						\
37*4882a593Smuzhiyun 		new__ = old__ op (val);					\
38*4882a593Smuzhiyun 		prev__ = cmpxchg(ptr__, old__, new__);			\
39*4882a593Smuzhiyun 	} while (prev__ != old__);					\
40*4882a593Smuzhiyun 	preempt_enable_notrace();					\
41*4882a593Smuzhiyun 	new__;								\
42*4882a593Smuzhiyun })
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define this_cpu_add_1(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, +)
45*4882a593Smuzhiyun #define this_cpu_add_2(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, +)
46*4882a593Smuzhiyun #define this_cpu_add_return_1(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
47*4882a593Smuzhiyun #define this_cpu_add_return_2(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
48*4882a593Smuzhiyun #define this_cpu_and_1(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, &)
49*4882a593Smuzhiyun #define this_cpu_and_2(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, &)
50*4882a593Smuzhiyun #define this_cpu_or_1(pcp, val)		arch_this_cpu_to_op_simple(pcp, val, |)
51*4882a593Smuzhiyun #define this_cpu_or_2(pcp, val)		arch_this_cpu_to_op_simple(pcp, val, |)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #ifndef CONFIG_HAVE_MARCH_Z196_FEATURES
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define this_cpu_add_4(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, +)
56*4882a593Smuzhiyun #define this_cpu_add_8(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, +)
57*4882a593Smuzhiyun #define this_cpu_add_return_4(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
58*4882a593Smuzhiyun #define this_cpu_add_return_8(pcp, val) arch_this_cpu_to_op_simple(pcp, val, +)
59*4882a593Smuzhiyun #define this_cpu_and_4(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, &)
60*4882a593Smuzhiyun #define this_cpu_and_8(pcp, val)	arch_this_cpu_to_op_simple(pcp, val, &)
61*4882a593Smuzhiyun #define this_cpu_or_4(pcp, val)		arch_this_cpu_to_op_simple(pcp, val, |)
62*4882a593Smuzhiyun #define this_cpu_or_8(pcp, val)		arch_this_cpu_to_op_simple(pcp, val, |)
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #else /* CONFIG_HAVE_MARCH_Z196_FEATURES */
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define arch_this_cpu_add(pcp, val, op1, op2, szcast)			\
67*4882a593Smuzhiyun {									\
68*4882a593Smuzhiyun 	typedef typeof(pcp) pcp_op_T__; 				\
69*4882a593Smuzhiyun 	pcp_op_T__ val__ = (val);					\
70*4882a593Smuzhiyun 	pcp_op_T__ old__, *ptr__;					\
71*4882a593Smuzhiyun 	preempt_disable_notrace();					\
72*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp)); 				\
73*4882a593Smuzhiyun 	if (__builtin_constant_p(val__) &&				\
74*4882a593Smuzhiyun 	    ((szcast)val__ > -129) && ((szcast)val__ < 128)) {		\
75*4882a593Smuzhiyun 		asm volatile(						\
76*4882a593Smuzhiyun 			op2 "   %[ptr__],%[val__]\n"			\
77*4882a593Smuzhiyun 			: [ptr__] "+Q" (*ptr__) 			\
78*4882a593Smuzhiyun 			: [val__] "i" ((szcast)val__)			\
79*4882a593Smuzhiyun 			: "cc");					\
80*4882a593Smuzhiyun 	} else {							\
81*4882a593Smuzhiyun 		asm volatile(						\
82*4882a593Smuzhiyun 			op1 "   %[old__],%[val__],%[ptr__]\n"		\
83*4882a593Smuzhiyun 			: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)	\
84*4882a593Smuzhiyun 			: [val__] "d" (val__)				\
85*4882a593Smuzhiyun 			: "cc");					\
86*4882a593Smuzhiyun 	}								\
87*4882a593Smuzhiyun 	preempt_enable_notrace();					\
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #define this_cpu_add_4(pcp, val) arch_this_cpu_add(pcp, val, "laa", "asi", int)
91*4882a593Smuzhiyun #define this_cpu_add_8(pcp, val) arch_this_cpu_add(pcp, val, "laag", "agsi", long)
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #define arch_this_cpu_add_return(pcp, val, op)				\
94*4882a593Smuzhiyun ({									\
95*4882a593Smuzhiyun 	typedef typeof(pcp) pcp_op_T__; 				\
96*4882a593Smuzhiyun 	pcp_op_T__ val__ = (val);					\
97*4882a593Smuzhiyun 	pcp_op_T__ old__, *ptr__;					\
98*4882a593Smuzhiyun 	preempt_disable_notrace();					\
99*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp));	 				\
100*4882a593Smuzhiyun 	asm volatile(							\
101*4882a593Smuzhiyun 		op "    %[old__],%[val__],%[ptr__]\n"			\
102*4882a593Smuzhiyun 		: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)		\
103*4882a593Smuzhiyun 		: [val__] "d" (val__)					\
104*4882a593Smuzhiyun 		: "cc");						\
105*4882a593Smuzhiyun 	preempt_enable_notrace();						\
106*4882a593Smuzhiyun 	old__ + val__;							\
107*4882a593Smuzhiyun })
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define this_cpu_add_return_4(pcp, val) arch_this_cpu_add_return(pcp, val, "laa")
110*4882a593Smuzhiyun #define this_cpu_add_return_8(pcp, val) arch_this_cpu_add_return(pcp, val, "laag")
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define arch_this_cpu_to_op(pcp, val, op)				\
113*4882a593Smuzhiyun {									\
114*4882a593Smuzhiyun 	typedef typeof(pcp) pcp_op_T__; 				\
115*4882a593Smuzhiyun 	pcp_op_T__ val__ = (val);					\
116*4882a593Smuzhiyun 	pcp_op_T__ old__, *ptr__;					\
117*4882a593Smuzhiyun 	preempt_disable_notrace();					\
118*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp));	 				\
119*4882a593Smuzhiyun 	asm volatile(							\
120*4882a593Smuzhiyun 		op "    %[old__],%[val__],%[ptr__]\n"			\
121*4882a593Smuzhiyun 		: [old__] "=d" (old__), [ptr__] "+Q" (*ptr__)		\
122*4882a593Smuzhiyun 		: [val__] "d" (val__)					\
123*4882a593Smuzhiyun 		: "cc");						\
124*4882a593Smuzhiyun 	preempt_enable_notrace();					\
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define this_cpu_and_4(pcp, val)	arch_this_cpu_to_op(pcp, val, "lan")
128*4882a593Smuzhiyun #define this_cpu_and_8(pcp, val)	arch_this_cpu_to_op(pcp, val, "lang")
129*4882a593Smuzhiyun #define this_cpu_or_4(pcp, val)		arch_this_cpu_to_op(pcp, val, "lao")
130*4882a593Smuzhiyun #define this_cpu_or_8(pcp, val)		arch_this_cpu_to_op(pcp, val, "laog")
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #define arch_this_cpu_cmpxchg(pcp, oval, nval)				\
135*4882a593Smuzhiyun ({									\
136*4882a593Smuzhiyun 	typedef typeof(pcp) pcp_op_T__;					\
137*4882a593Smuzhiyun 	pcp_op_T__ ret__;						\
138*4882a593Smuzhiyun 	pcp_op_T__ *ptr__;						\
139*4882a593Smuzhiyun 	preempt_disable_notrace();					\
140*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp));					\
141*4882a593Smuzhiyun 	ret__ = cmpxchg(ptr__, oval, nval);				\
142*4882a593Smuzhiyun 	preempt_enable_notrace();					\
143*4882a593Smuzhiyun 	ret__;								\
144*4882a593Smuzhiyun })
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #define this_cpu_cmpxchg_1(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
147*4882a593Smuzhiyun #define this_cpu_cmpxchg_2(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
148*4882a593Smuzhiyun #define this_cpu_cmpxchg_4(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
149*4882a593Smuzhiyun #define this_cpu_cmpxchg_8(pcp, oval, nval) arch_this_cpu_cmpxchg(pcp, oval, nval)
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #define arch_this_cpu_xchg(pcp, nval)					\
152*4882a593Smuzhiyun ({									\
153*4882a593Smuzhiyun 	typeof(pcp) *ptr__;						\
154*4882a593Smuzhiyun 	typeof(pcp) ret__;						\
155*4882a593Smuzhiyun 	preempt_disable_notrace();					\
156*4882a593Smuzhiyun 	ptr__ = raw_cpu_ptr(&(pcp));					\
157*4882a593Smuzhiyun 	ret__ = xchg(ptr__, nval);					\
158*4882a593Smuzhiyun 	preempt_enable_notrace();					\
159*4882a593Smuzhiyun 	ret__;								\
160*4882a593Smuzhiyun })
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define this_cpu_xchg_1(pcp, nval) arch_this_cpu_xchg(pcp, nval)
163*4882a593Smuzhiyun #define this_cpu_xchg_2(pcp, nval) arch_this_cpu_xchg(pcp, nval)
164*4882a593Smuzhiyun #define this_cpu_xchg_4(pcp, nval) arch_this_cpu_xchg(pcp, nval)
165*4882a593Smuzhiyun #define this_cpu_xchg_8(pcp, nval) arch_this_cpu_xchg(pcp, nval)
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #define arch_this_cpu_cmpxchg_double(pcp1, pcp2, o1, o2, n1, n2)	\
168*4882a593Smuzhiyun ({									\
169*4882a593Smuzhiyun 	typeof(pcp1) o1__ = (o1), n1__ = (n1);				\
170*4882a593Smuzhiyun 	typeof(pcp2) o2__ = (o2), n2__ = (n2);				\
171*4882a593Smuzhiyun 	typeof(pcp1) *p1__;						\
172*4882a593Smuzhiyun 	typeof(pcp2) *p2__;						\
173*4882a593Smuzhiyun 	int ret__;							\
174*4882a593Smuzhiyun 	preempt_disable_notrace();					\
175*4882a593Smuzhiyun 	p1__ = raw_cpu_ptr(&(pcp1));					\
176*4882a593Smuzhiyun 	p2__ = raw_cpu_ptr(&(pcp2));					\
177*4882a593Smuzhiyun 	ret__ = __cmpxchg_double(p1__, p2__, o1__, o2__, n1__, n2__);	\
178*4882a593Smuzhiyun 	preempt_enable_notrace();					\
179*4882a593Smuzhiyun 	ret__;								\
180*4882a593Smuzhiyun })
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun #define this_cpu_cmpxchg_double_8 arch_this_cpu_cmpxchg_double
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun #include <asm-generic/percpu.h>
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #endif /* __ARCH_S390_PERCPU__ */
187