xref: /OK3568_Linux_fs/kernel/arch/sh/include/asm/spinlock-llsc.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * include/asm-sh/spinlock-llsc.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2002, 2003 Paul Mundt
6*4882a593Smuzhiyun  * Copyright (C) 2006, 2007 Akio Idehara
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #ifndef __ASM_SH_SPINLOCK_LLSC_H
9*4882a593Smuzhiyun #define __ASM_SH_SPINLOCK_LLSC_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/barrier.h>
12*4882a593Smuzhiyun #include <asm/processor.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * Your basic SMP spinlocks, allowing only a single CPU anywhere
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define arch_spin_is_locked(x)		((x)->lock <= 0)
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * Simple spin lock operations.  There are two variants, one clears IRQ's
22*4882a593Smuzhiyun  * on the local processor, one does not.
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * We make no fairness assumptions.  They have a cost.
25*4882a593Smuzhiyun  */
arch_spin_lock(arch_spinlock_t * lock)26*4882a593Smuzhiyun static inline void arch_spin_lock(arch_spinlock_t *lock)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	unsigned long tmp;
29*4882a593Smuzhiyun 	unsigned long oldval;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	__asm__ __volatile__ (
32*4882a593Smuzhiyun 		"1:						\n\t"
33*4882a593Smuzhiyun 		"movli.l	@%2, %0	! arch_spin_lock	\n\t"
34*4882a593Smuzhiyun 		"mov		%0, %1				\n\t"
35*4882a593Smuzhiyun 		"mov		#0, %0				\n\t"
36*4882a593Smuzhiyun 		"movco.l	%0, @%2				\n\t"
37*4882a593Smuzhiyun 		"bf		1b				\n\t"
38*4882a593Smuzhiyun 		"cmp/pl		%1				\n\t"
39*4882a593Smuzhiyun 		"bf		1b				\n\t"
40*4882a593Smuzhiyun 		: "=&z" (tmp), "=&r" (oldval)
41*4882a593Smuzhiyun 		: "r" (&lock->lock)
42*4882a593Smuzhiyun 		: "t", "memory"
43*4882a593Smuzhiyun 	);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
arch_spin_unlock(arch_spinlock_t * lock)46*4882a593Smuzhiyun static inline void arch_spin_unlock(arch_spinlock_t *lock)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned long tmp;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* This could be optimised with ARCH_HAS_MMIOWB */
51*4882a593Smuzhiyun 	mmiowb();
52*4882a593Smuzhiyun 	__asm__ __volatile__ (
53*4882a593Smuzhiyun 		"mov		#1, %0 ! arch_spin_unlock	\n\t"
54*4882a593Smuzhiyun 		"mov.l		%0, @%1				\n\t"
55*4882a593Smuzhiyun 		: "=&z" (tmp)
56*4882a593Smuzhiyun 		: "r" (&lock->lock)
57*4882a593Smuzhiyun 		: "t", "memory"
58*4882a593Smuzhiyun 	);
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
arch_spin_trylock(arch_spinlock_t * lock)61*4882a593Smuzhiyun static inline int arch_spin_trylock(arch_spinlock_t *lock)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	unsigned long tmp, oldval;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	__asm__ __volatile__ (
66*4882a593Smuzhiyun 		"1:						\n\t"
67*4882a593Smuzhiyun 		"movli.l	@%2, %0	! arch_spin_trylock	\n\t"
68*4882a593Smuzhiyun 		"mov		%0, %1				\n\t"
69*4882a593Smuzhiyun 		"mov		#0, %0				\n\t"
70*4882a593Smuzhiyun 		"movco.l	%0, @%2				\n\t"
71*4882a593Smuzhiyun 		"bf		1b				\n\t"
72*4882a593Smuzhiyun 		"synco						\n\t"
73*4882a593Smuzhiyun 		: "=&z" (tmp), "=&r" (oldval)
74*4882a593Smuzhiyun 		: "r" (&lock->lock)
75*4882a593Smuzhiyun 		: "t", "memory"
76*4882a593Smuzhiyun 	);
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return oldval;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * Read-write spinlocks, allowing multiple readers but only one writer.
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * NOTE! it is quite common to have readers in interrupts but no interrupt
85*4882a593Smuzhiyun  * writers. For those circumstances we can "mix" irq-safe locks - any writer
86*4882a593Smuzhiyun  * needs to get a irq-safe write-lock, but readers can get non-irqsafe
87*4882a593Smuzhiyun  * read-locks.
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun 
arch_read_lock(arch_rwlock_t * rw)90*4882a593Smuzhiyun static inline void arch_read_lock(arch_rwlock_t *rw)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	unsigned long tmp;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	__asm__ __volatile__ (
95*4882a593Smuzhiyun 		"1:						\n\t"
96*4882a593Smuzhiyun 		"movli.l	@%1, %0	! arch_read_lock	\n\t"
97*4882a593Smuzhiyun 		"cmp/pl		%0				\n\t"
98*4882a593Smuzhiyun 		"bf		1b				\n\t"
99*4882a593Smuzhiyun 		"add		#-1, %0				\n\t"
100*4882a593Smuzhiyun 		"movco.l	%0, @%1				\n\t"
101*4882a593Smuzhiyun 		"bf		1b				\n\t"
102*4882a593Smuzhiyun 		: "=&z" (tmp)
103*4882a593Smuzhiyun 		: "r" (&rw->lock)
104*4882a593Smuzhiyun 		: "t", "memory"
105*4882a593Smuzhiyun 	);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
arch_read_unlock(arch_rwlock_t * rw)108*4882a593Smuzhiyun static inline void arch_read_unlock(arch_rwlock_t *rw)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	unsigned long tmp;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	__asm__ __volatile__ (
113*4882a593Smuzhiyun 		"1:						\n\t"
114*4882a593Smuzhiyun 		"movli.l	@%1, %0	! arch_read_unlock	\n\t"
115*4882a593Smuzhiyun 		"add		#1, %0				\n\t"
116*4882a593Smuzhiyun 		"movco.l	%0, @%1				\n\t"
117*4882a593Smuzhiyun 		"bf		1b				\n\t"
118*4882a593Smuzhiyun 		: "=&z" (tmp)
119*4882a593Smuzhiyun 		: "r" (&rw->lock)
120*4882a593Smuzhiyun 		: "t", "memory"
121*4882a593Smuzhiyun 	);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
arch_write_lock(arch_rwlock_t * rw)124*4882a593Smuzhiyun static inline void arch_write_lock(arch_rwlock_t *rw)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	unsigned long tmp;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	__asm__ __volatile__ (
129*4882a593Smuzhiyun 		"1:						\n\t"
130*4882a593Smuzhiyun 		"movli.l	@%1, %0	! arch_write_lock	\n\t"
131*4882a593Smuzhiyun 		"cmp/hs		%2, %0				\n\t"
132*4882a593Smuzhiyun 		"bf		1b				\n\t"
133*4882a593Smuzhiyun 		"sub		%2, %0				\n\t"
134*4882a593Smuzhiyun 		"movco.l	%0, @%1				\n\t"
135*4882a593Smuzhiyun 		"bf		1b				\n\t"
136*4882a593Smuzhiyun 		: "=&z" (tmp)
137*4882a593Smuzhiyun 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
138*4882a593Smuzhiyun 		: "t", "memory"
139*4882a593Smuzhiyun 	);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
arch_write_unlock(arch_rwlock_t * rw)142*4882a593Smuzhiyun static inline void arch_write_unlock(arch_rwlock_t *rw)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	__asm__ __volatile__ (
145*4882a593Smuzhiyun 		"mov.l		%1, @%0 ! arch_write_unlock	\n\t"
146*4882a593Smuzhiyun 		:
147*4882a593Smuzhiyun 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
148*4882a593Smuzhiyun 		: "t", "memory"
149*4882a593Smuzhiyun 	);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
arch_read_trylock(arch_rwlock_t * rw)152*4882a593Smuzhiyun static inline int arch_read_trylock(arch_rwlock_t *rw)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	unsigned long tmp, oldval;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	__asm__ __volatile__ (
157*4882a593Smuzhiyun 		"1:						\n\t"
158*4882a593Smuzhiyun 		"movli.l	@%2, %0	! arch_read_trylock	\n\t"
159*4882a593Smuzhiyun 		"mov		%0, %1				\n\t"
160*4882a593Smuzhiyun 		"cmp/pl		%0				\n\t"
161*4882a593Smuzhiyun 		"bf		2f				\n\t"
162*4882a593Smuzhiyun 		"add		#-1, %0				\n\t"
163*4882a593Smuzhiyun 		"movco.l	%0, @%2				\n\t"
164*4882a593Smuzhiyun 		"bf		1b				\n\t"
165*4882a593Smuzhiyun 		"2:						\n\t"
166*4882a593Smuzhiyun 		"synco						\n\t"
167*4882a593Smuzhiyun 		: "=&z" (tmp), "=&r" (oldval)
168*4882a593Smuzhiyun 		: "r" (&rw->lock)
169*4882a593Smuzhiyun 		: "t", "memory"
170*4882a593Smuzhiyun 	);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	return (oldval > 0);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
arch_write_trylock(arch_rwlock_t * rw)175*4882a593Smuzhiyun static inline int arch_write_trylock(arch_rwlock_t *rw)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	unsigned long tmp, oldval;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	__asm__ __volatile__ (
180*4882a593Smuzhiyun 		"1:						\n\t"
181*4882a593Smuzhiyun 		"movli.l	@%2, %0	! arch_write_trylock	\n\t"
182*4882a593Smuzhiyun 		"mov		%0, %1				\n\t"
183*4882a593Smuzhiyun 		"cmp/hs		%3, %0				\n\t"
184*4882a593Smuzhiyun 		"bf		2f				\n\t"
185*4882a593Smuzhiyun 		"sub		%3, %0				\n\t"
186*4882a593Smuzhiyun 		"2:						\n\t"
187*4882a593Smuzhiyun 		"movco.l	%0, @%2				\n\t"
188*4882a593Smuzhiyun 		"bf		1b				\n\t"
189*4882a593Smuzhiyun 		"synco						\n\t"
190*4882a593Smuzhiyun 		: "=&z" (tmp), "=&r" (oldval)
191*4882a593Smuzhiyun 		: "r" (&rw->lock), "r" (RW_LOCK_BIAS)
192*4882a593Smuzhiyun 		: "t", "memory"
193*4882a593Smuzhiyun 	);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	return (oldval > (RW_LOCK_BIAS - 1));
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #endif /* __ASM_SH_SPINLOCK_LLSC_H */
199