xref: /OK3568_Linux_fs/kernel/arch/sh/include/asm/atomic-llsc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_SH_ATOMIC_LLSC_H
3*4882a593Smuzhiyun #define __ASM_SH_ATOMIC_LLSC_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * SH-4A note:
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * We basically get atomic_xxx_return() for free compared with
9*4882a593Smuzhiyun  * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
10*4882a593Smuzhiyun  * encoding, so the retval is automatically set without having to
11*4882a593Smuzhiyun  * do any special work.
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun /*
14*4882a593Smuzhiyun  * To get proper branch prediction for the main line, we must branch
15*4882a593Smuzhiyun  * forward to code at the end of this object's .text section, then
16*4882a593Smuzhiyun  * branch back to restart the operation.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define ATOMIC_OP(op)							\
20*4882a593Smuzhiyun static inline void atomic_##op(int i, atomic_t *v)			\
21*4882a593Smuzhiyun {									\
22*4882a593Smuzhiyun 	unsigned long tmp;						\
23*4882a593Smuzhiyun 									\
24*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
25*4882a593Smuzhiyun "1:	movli.l @%2, %0		! atomic_" #op "\n"			\
26*4882a593Smuzhiyun "	" #op "	%1, %0				\n"			\
27*4882a593Smuzhiyun "	movco.l	%0, @%2				\n"			\
28*4882a593Smuzhiyun "	bf	1b				\n"			\
29*4882a593Smuzhiyun 	: "=&z" (tmp)							\
30*4882a593Smuzhiyun 	: "r" (i), "r" (&v->counter)					\
31*4882a593Smuzhiyun 	: "t");								\
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define ATOMIC_OP_RETURN(op)						\
35*4882a593Smuzhiyun static inline int atomic_##op##_return(int i, atomic_t *v)		\
36*4882a593Smuzhiyun {									\
37*4882a593Smuzhiyun 	unsigned long temp;						\
38*4882a593Smuzhiyun 									\
39*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
40*4882a593Smuzhiyun "1:	movli.l @%2, %0		! atomic_" #op "_return	\n"		\
41*4882a593Smuzhiyun "	" #op "	%1, %0					\n"		\
42*4882a593Smuzhiyun "	movco.l	%0, @%2					\n"		\
43*4882a593Smuzhiyun "	bf	1b					\n"		\
44*4882a593Smuzhiyun "	synco						\n"		\
45*4882a593Smuzhiyun 	: "=&z" (temp)							\
46*4882a593Smuzhiyun 	: "r" (i), "r" (&v->counter)					\
47*4882a593Smuzhiyun 	: "t");								\
48*4882a593Smuzhiyun 									\
49*4882a593Smuzhiyun 	return temp;							\
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define ATOMIC_FETCH_OP(op)						\
53*4882a593Smuzhiyun static inline int atomic_fetch_##op(int i, atomic_t *v)			\
54*4882a593Smuzhiyun {									\
55*4882a593Smuzhiyun 	unsigned long res, temp;					\
56*4882a593Smuzhiyun 									\
57*4882a593Smuzhiyun 	__asm__ __volatile__ (						\
58*4882a593Smuzhiyun "1:	movli.l @%3, %0		! atomic_fetch_" #op "	\n"		\
59*4882a593Smuzhiyun "	mov %0, %1					\n"		\
60*4882a593Smuzhiyun "	" #op "	%2, %0					\n"		\
61*4882a593Smuzhiyun "	movco.l	%0, @%3					\n"		\
62*4882a593Smuzhiyun "	bf	1b					\n"		\
63*4882a593Smuzhiyun "	synco						\n"		\
64*4882a593Smuzhiyun 	: "=&z" (temp), "=&r" (res)					\
65*4882a593Smuzhiyun 	: "r" (i), "r" (&v->counter)					\
66*4882a593Smuzhiyun 	: "t");								\
67*4882a593Smuzhiyun 									\
68*4882a593Smuzhiyun 	return res;							\
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun ATOMIC_OPS(add)
74*4882a593Smuzhiyun ATOMIC_OPS(sub)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #undef ATOMIC_OPS
77*4882a593Smuzhiyun #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_FETCH_OP(op)
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun ATOMIC_OPS(and)
80*4882a593Smuzhiyun ATOMIC_OPS(or)
81*4882a593Smuzhiyun ATOMIC_OPS(xor)
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #undef ATOMIC_OPS
84*4882a593Smuzhiyun #undef ATOMIC_FETCH_OP
85*4882a593Smuzhiyun #undef ATOMIC_OP_RETURN
86*4882a593Smuzhiyun #undef ATOMIC_OP
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #endif /* __ASM_SH_ATOMIC_LLSC_H */
89