1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __LINUX_SPINLOCK_H
3*4882a593Smuzhiyun #define __LINUX_SPINLOCK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun * include/linux/spinlock.h - generic spinlock/rwlock declarations
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * here's the role of the various spinlock/rwlock related include files:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * on SMP builds:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
13*4882a593Smuzhiyun * initializers
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * linux/spinlock_types.h:
16*4882a593Smuzhiyun * defines the generic type and initializers
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * asm/spinlock.h: contains the arch_spin_*()/etc. lowlevel
19*4882a593Smuzhiyun * implementations, mostly inline assembly code
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * (also included on UP-debug builds:)
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * linux/spinlock_api_smp.h:
24*4882a593Smuzhiyun * contains the prototypes for the _spin_*() APIs.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * linux/spinlock.h: builds the final spin_*() APIs.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * on UP builds:
29*4882a593Smuzhiyun *
30*4882a593Smuzhiyun * linux/spinlock_type_up.h:
31*4882a593Smuzhiyun * contains the generic, simplified UP spinlock type.
32*4882a593Smuzhiyun * (which is an empty structure on non-debug builds)
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * linux/spinlock_types.h:
35*4882a593Smuzhiyun * defines the generic type and initializers
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * linux/spinlock_up.h:
38*4882a593Smuzhiyun * contains the arch_spin_*()/etc. version of UP
39*4882a593Smuzhiyun * builds. (which are NOPs on non-debug, non-preempt
40*4882a593Smuzhiyun * builds)
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * (included on UP-non-debug builds:)
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * linux/spinlock_api_up.h:
45*4882a593Smuzhiyun * builds the _spin_*() APIs.
46*4882a593Smuzhiyun *
47*4882a593Smuzhiyun * linux/spinlock.h: builds the final spin_*() APIs.
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #include <linux/typecheck.h>
51*4882a593Smuzhiyun #include <linux/preempt.h>
52*4882a593Smuzhiyun #include <linux/linkage.h>
53*4882a593Smuzhiyun #include <linux/compiler.h>
54*4882a593Smuzhiyun #include <linux/irqflags.h>
55*4882a593Smuzhiyun #include <linux/thread_info.h>
56*4882a593Smuzhiyun #include <linux/kernel.h>
57*4882a593Smuzhiyun #include <linux/stringify.h>
58*4882a593Smuzhiyun #include <linux/bottom_half.h>
59*4882a593Smuzhiyun #include <linux/lockdep.h>
60*4882a593Smuzhiyun #include <asm/barrier.h>
61*4882a593Smuzhiyun #include <asm/mmiowb.h>
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Must define these before including other files, inline functions need them
66*4882a593Smuzhiyun */
67*4882a593Smuzhiyun #define LOCK_SECTION_NAME ".text..lock."KBUILD_BASENAME
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define LOCK_SECTION_START(extra) \
70*4882a593Smuzhiyun ".subsection 1\n\t" \
71*4882a593Smuzhiyun extra \
72*4882a593Smuzhiyun ".ifndef " LOCK_SECTION_NAME "\n\t" \
73*4882a593Smuzhiyun LOCK_SECTION_NAME ":\n\t" \
74*4882a593Smuzhiyun ".endif\n"
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define LOCK_SECTION_END \
77*4882a593Smuzhiyun ".previous\n\t"
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define __lockfunc __section(".spinlock.text")
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * Pull the arch_spinlock_t and arch_rwlock_t definitions:
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun #include <linux/spinlock_types.h>
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * Pull the arch_spin*() functions/declarations (UP-nondebug doesn't need them):
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun #ifdef CONFIG_SMP
90*4882a593Smuzhiyun # include <asm/spinlock.h>
91*4882a593Smuzhiyun #else
92*4882a593Smuzhiyun # include <linux/spinlock_up.h>
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_SPINLOCK
96*4882a593Smuzhiyun extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
97*4882a593Smuzhiyun struct lock_class_key *key, short inner);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun # define raw_spin_lock_init(lock) \
100*4882a593Smuzhiyun do { \
101*4882a593Smuzhiyun static struct lock_class_key __key; \
102*4882a593Smuzhiyun \
103*4882a593Smuzhiyun __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
104*4882a593Smuzhiyun } while (0)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #else
107*4882a593Smuzhiyun # define raw_spin_lock_init(lock) \
108*4882a593Smuzhiyun do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #ifdef arch_spin_is_contended
114*4882a593Smuzhiyun #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock)
115*4882a593Smuzhiyun #else
116*4882a593Smuzhiyun #define raw_spin_is_contended(lock) (((void)(lock), 0))
117*4882a593Smuzhiyun #endif /*arch_spin_is_contended*/
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
121*4882a593Smuzhiyun * between program-order earlier lock acquisitions and program-order later
122*4882a593Smuzhiyun * memory accesses.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun * This guarantees that the following two properties hold:
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * 1) Given the snippet:
127*4882a593Smuzhiyun *
128*4882a593Smuzhiyun * { X = 0; Y = 0; }
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun * CPU0 CPU1
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
133*4882a593Smuzhiyun * spin_lock(S); smp_mb();
134*4882a593Smuzhiyun * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
135*4882a593Smuzhiyun * r0 = READ_ONCE(Y);
136*4882a593Smuzhiyun * spin_unlock(S);
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
139*4882a593Smuzhiyun * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
140*4882a593Smuzhiyun * preceding the call to smp_mb__after_spinlock() in __schedule() and in
141*4882a593Smuzhiyun * try_to_wake_up().
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * 2) Given the snippet:
144*4882a593Smuzhiyun *
145*4882a593Smuzhiyun * { X = 0; Y = 0; }
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun * CPU0 CPU1 CPU2
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
150*4882a593Smuzhiyun * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
151*4882a593Smuzhiyun * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
152*4882a593Smuzhiyun * WRITE_ONCE(Y, 1);
153*4882a593Smuzhiyun * spin_unlock(S);
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun * it is forbidden that CPU0's critical section executes before CPU1's
156*4882a593Smuzhiyun * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
157*4882a593Smuzhiyun * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
158*4882a593Smuzhiyun * preceding the calls to smp_rmb() in try_to_wake_up() for similar
159*4882a593Smuzhiyun * snippets but "projected" onto two CPUs.
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun * Property (2) upgrades the lock to an RCsc lock.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * Since most load-store architectures implement ACQUIRE with an smp_mb() after
164*4882a593Smuzhiyun * the LL/SC loop, they need no further barriers. Similarly all our TSO
165*4882a593Smuzhiyun * architectures imply an smp_mb() for each atomic instruction and equally don't
166*4882a593Smuzhiyun * need more.
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * Architectures that can implement ACQUIRE better need to take care.
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun #ifndef smp_mb__after_spinlock
171*4882a593Smuzhiyun #define smp_mb__after_spinlock() do { } while (0)
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_SPINLOCK
175*4882a593Smuzhiyun extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
176*4882a593Smuzhiyun #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
177*4882a593Smuzhiyun extern int do_raw_spin_trylock(raw_spinlock_t *lock);
178*4882a593Smuzhiyun extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
179*4882a593Smuzhiyun #else
do_raw_spin_lock(raw_spinlock_t * lock)180*4882a593Smuzhiyun static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun __acquire(lock);
183*4882a593Smuzhiyun arch_spin_lock(&lock->raw_lock);
184*4882a593Smuzhiyun mmiowb_spin_lock();
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun #ifndef arch_spin_lock_flags
188*4882a593Smuzhiyun #define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
189*4882a593Smuzhiyun #endif
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun static inline void
do_raw_spin_lock_flags(raw_spinlock_t * lock,unsigned long * flags)192*4882a593Smuzhiyun do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun __acquire(lock);
195*4882a593Smuzhiyun arch_spin_lock_flags(&lock->raw_lock, *flags);
196*4882a593Smuzhiyun mmiowb_spin_lock();
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
do_raw_spin_trylock(raw_spinlock_t * lock)199*4882a593Smuzhiyun static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun int ret = arch_spin_trylock(&(lock)->raw_lock);
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (ret)
204*4882a593Smuzhiyun mmiowb_spin_lock();
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return ret;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
do_raw_spin_unlock(raw_spinlock_t * lock)209*4882a593Smuzhiyun static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun mmiowb_spin_unlock();
212*4882a593Smuzhiyun arch_spin_unlock(&lock->raw_lock);
213*4882a593Smuzhiyun __release(lock);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun #endif
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * Define the various spin_lock methods. Note we define these
219*4882a593Smuzhiyun * regardless of whether CONFIG_SMP or CONFIG_PREEMPTION are set. The
220*4882a593Smuzhiyun * various methods are defined as nops in the case they are not
221*4882a593Smuzhiyun * required.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun #define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun #define raw_spin_lock(lock) _raw_spin_lock(lock)
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
228*4882a593Smuzhiyun # define raw_spin_lock_nested(lock, subclass) \
229*4882a593Smuzhiyun _raw_spin_lock_nested(lock, subclass)
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun # define raw_spin_lock_nest_lock(lock, nest_lock) \
232*4882a593Smuzhiyun do { \
233*4882a593Smuzhiyun typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
234*4882a593Smuzhiyun _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
235*4882a593Smuzhiyun } while (0)
236*4882a593Smuzhiyun #else
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Always evaluate the 'subclass' argument to avoid that the compiler
239*4882a593Smuzhiyun * warns about set-but-not-used variables when building with
240*4882a593Smuzhiyun * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun # define raw_spin_lock_nested(lock, subclass) \
243*4882a593Smuzhiyun _raw_spin_lock(((void)(subclass), (lock)))
244*4882a593Smuzhiyun # define raw_spin_lock_nest_lock(lock, nest_lock) _raw_spin_lock(lock)
245*4882a593Smuzhiyun #endif
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun #define raw_spin_lock_irqsave(lock, flags) \
250*4882a593Smuzhiyun do { \
251*4882a593Smuzhiyun typecheck(unsigned long, flags); \
252*4882a593Smuzhiyun flags = _raw_spin_lock_irqsave(lock); \
253*4882a593Smuzhiyun } while (0)
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_LOCK_ALLOC
256*4882a593Smuzhiyun #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
257*4882a593Smuzhiyun do { \
258*4882a593Smuzhiyun typecheck(unsigned long, flags); \
259*4882a593Smuzhiyun flags = _raw_spin_lock_irqsave_nested(lock, subclass); \
260*4882a593Smuzhiyun } while (0)
261*4882a593Smuzhiyun #else
262*4882a593Smuzhiyun #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
263*4882a593Smuzhiyun do { \
264*4882a593Smuzhiyun typecheck(unsigned long, flags); \
265*4882a593Smuzhiyun flags = _raw_spin_lock_irqsave(lock); \
266*4882a593Smuzhiyun } while (0)
267*4882a593Smuzhiyun #endif
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun #else
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun #define raw_spin_lock_irqsave(lock, flags) \
272*4882a593Smuzhiyun do { \
273*4882a593Smuzhiyun typecheck(unsigned long, flags); \
274*4882a593Smuzhiyun _raw_spin_lock_irqsave(lock, flags); \
275*4882a593Smuzhiyun } while (0)
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define raw_spin_lock_irqsave_nested(lock, flags, subclass) \
278*4882a593Smuzhiyun raw_spin_lock_irqsave(lock, flags)
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun #endif
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun #define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
283*4882a593Smuzhiyun #define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
284*4882a593Smuzhiyun #define raw_spin_unlock(lock) _raw_spin_unlock(lock)
285*4882a593Smuzhiyun #define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun #define raw_spin_unlock_irqrestore(lock, flags) \
288*4882a593Smuzhiyun do { \
289*4882a593Smuzhiyun typecheck(unsigned long, flags); \
290*4882a593Smuzhiyun _raw_spin_unlock_irqrestore(lock, flags); \
291*4882a593Smuzhiyun } while (0)
292*4882a593Smuzhiyun #define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun #define raw_spin_trylock_bh(lock) \
295*4882a593Smuzhiyun __cond_lock(lock, _raw_spin_trylock_bh(lock))
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun #define raw_spin_trylock_irq(lock) \
298*4882a593Smuzhiyun ({ \
299*4882a593Smuzhiyun local_irq_disable(); \
300*4882a593Smuzhiyun raw_spin_trylock(lock) ? \
301*4882a593Smuzhiyun 1 : ({ local_irq_enable(); 0; }); \
302*4882a593Smuzhiyun })
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun #define raw_spin_trylock_irqsave(lock, flags) \
305*4882a593Smuzhiyun ({ \
306*4882a593Smuzhiyun local_irq_save(flags); \
307*4882a593Smuzhiyun raw_spin_trylock(lock) ? \
308*4882a593Smuzhiyun 1 : ({ local_irq_restore(flags); 0; }); \
309*4882a593Smuzhiyun })
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /* Include rwlock functions */
312*4882a593Smuzhiyun #include <linux/rwlock.h>
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
318*4882a593Smuzhiyun # include <linux/spinlock_api_smp.h>
319*4882a593Smuzhiyun #else
320*4882a593Smuzhiyun # include <linux/spinlock_api_up.h>
321*4882a593Smuzhiyun #endif
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun
spinlock_check(spinlock_t * lock)327*4882a593Smuzhiyun static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun return &lock->rlock;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_SPINLOCK
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun # define spin_lock_init(lock) \
335*4882a593Smuzhiyun do { \
336*4882a593Smuzhiyun static struct lock_class_key __key; \
337*4882a593Smuzhiyun \
338*4882a593Smuzhiyun __raw_spin_lock_init(spinlock_check(lock), \
339*4882a593Smuzhiyun #lock, &__key, LD_WAIT_CONFIG); \
340*4882a593Smuzhiyun } while (0)
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun #else
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun # define spin_lock_init(_lock) \
345*4882a593Smuzhiyun do { \
346*4882a593Smuzhiyun spinlock_check(_lock); \
347*4882a593Smuzhiyun *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
348*4882a593Smuzhiyun } while (0)
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun #endif
351*4882a593Smuzhiyun
spin_lock(spinlock_t * lock)352*4882a593Smuzhiyun static __always_inline void spin_lock(spinlock_t *lock)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun raw_spin_lock(&lock->rlock);
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
spin_lock_bh(spinlock_t * lock)357*4882a593Smuzhiyun static __always_inline void spin_lock_bh(spinlock_t *lock)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun raw_spin_lock_bh(&lock->rlock);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
spin_trylock(spinlock_t * lock)362*4882a593Smuzhiyun static __always_inline int spin_trylock(spinlock_t *lock)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun return raw_spin_trylock(&lock->rlock);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #define spin_lock_nested(lock, subclass) \
368*4882a593Smuzhiyun do { \
369*4882a593Smuzhiyun raw_spin_lock_nested(spinlock_check(lock), subclass); \
370*4882a593Smuzhiyun } while (0)
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun #define spin_lock_nest_lock(lock, nest_lock) \
373*4882a593Smuzhiyun do { \
374*4882a593Smuzhiyun raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
375*4882a593Smuzhiyun } while (0)
376*4882a593Smuzhiyun
spin_lock_irq(spinlock_t * lock)377*4882a593Smuzhiyun static __always_inline void spin_lock_irq(spinlock_t *lock)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun raw_spin_lock_irq(&lock->rlock);
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun #define spin_lock_irqsave(lock, flags) \
383*4882a593Smuzhiyun do { \
384*4882a593Smuzhiyun raw_spin_lock_irqsave(spinlock_check(lock), flags); \
385*4882a593Smuzhiyun } while (0)
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun #define spin_lock_irqsave_nested(lock, flags, subclass) \
388*4882a593Smuzhiyun do { \
389*4882a593Smuzhiyun raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
390*4882a593Smuzhiyun } while (0)
391*4882a593Smuzhiyun
spin_unlock(spinlock_t * lock)392*4882a593Smuzhiyun static __always_inline void spin_unlock(spinlock_t *lock)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun raw_spin_unlock(&lock->rlock);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
spin_unlock_bh(spinlock_t * lock)397*4882a593Smuzhiyun static __always_inline void spin_unlock_bh(spinlock_t *lock)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun raw_spin_unlock_bh(&lock->rlock);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
spin_unlock_irq(spinlock_t * lock)402*4882a593Smuzhiyun static __always_inline void spin_unlock_irq(spinlock_t *lock)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun raw_spin_unlock_irq(&lock->rlock);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
spin_unlock_irqrestore(spinlock_t * lock,unsigned long flags)407*4882a593Smuzhiyun static __always_inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun raw_spin_unlock_irqrestore(&lock->rlock, flags);
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
spin_trylock_bh(spinlock_t * lock)412*4882a593Smuzhiyun static __always_inline int spin_trylock_bh(spinlock_t *lock)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun return raw_spin_trylock_bh(&lock->rlock);
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
spin_trylock_irq(spinlock_t * lock)417*4882a593Smuzhiyun static __always_inline int spin_trylock_irq(spinlock_t *lock)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun return raw_spin_trylock_irq(&lock->rlock);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun #define spin_trylock_irqsave(lock, flags) \
423*4882a593Smuzhiyun ({ \
424*4882a593Smuzhiyun raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
425*4882a593Smuzhiyun })
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /**
428*4882a593Smuzhiyun * spin_is_locked() - Check whether a spinlock is locked.
429*4882a593Smuzhiyun * @lock: Pointer to the spinlock.
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * This function is NOT required to provide any memory ordering
432*4882a593Smuzhiyun * guarantees; it could be used for debugging purposes or, when
433*4882a593Smuzhiyun * additional synchronization is needed, accompanied with other
434*4882a593Smuzhiyun * constructs (memory barriers) enforcing the synchronization.
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * Returns: 1 if @lock is locked, 0 otherwise.
437*4882a593Smuzhiyun *
438*4882a593Smuzhiyun * Note that the function only tells you that the spinlock is
439*4882a593Smuzhiyun * seen to be locked, not that it is locked on your CPU.
440*4882a593Smuzhiyun *
441*4882a593Smuzhiyun * Further, on CONFIG_SMP=n builds with CONFIG_DEBUG_SPINLOCK=n,
442*4882a593Smuzhiyun * the return value is always 0 (see include/linux/spinlock_up.h).
443*4882a593Smuzhiyun * Therefore you should not rely heavily on the return value.
444*4882a593Smuzhiyun */
spin_is_locked(spinlock_t * lock)445*4882a593Smuzhiyun static __always_inline int spin_is_locked(spinlock_t *lock)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun return raw_spin_is_locked(&lock->rlock);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun
spin_is_contended(spinlock_t * lock)450*4882a593Smuzhiyun static __always_inline int spin_is_contended(spinlock_t *lock)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun return raw_spin_is_contended(&lock->rlock);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock)
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun * Pull the atomic_t declaration:
459*4882a593Smuzhiyun * (asm-mips/atomic.h needs above definitions)
460*4882a593Smuzhiyun */
461*4882a593Smuzhiyun #include <linux/atomic.h>
462*4882a593Smuzhiyun /**
463*4882a593Smuzhiyun * atomic_dec_and_lock - lock on reaching reference count zero
464*4882a593Smuzhiyun * @atomic: the atomic counter
465*4882a593Smuzhiyun * @lock: the spinlock in question
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * Decrements @atomic by 1. If the result is 0, returns true and locks
468*4882a593Smuzhiyun * @lock. Returns false for all other cases.
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
471*4882a593Smuzhiyun #define atomic_dec_and_lock(atomic, lock) \
472*4882a593Smuzhiyun __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
475*4882a593Smuzhiyun unsigned long *flags);
476*4882a593Smuzhiyun #define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
477*4882a593Smuzhiyun __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
480*4882a593Smuzhiyun size_t max_size, unsigned int cpu_mult,
481*4882a593Smuzhiyun gfp_t gfp, const char *name,
482*4882a593Smuzhiyun struct lock_class_key *key);
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun #define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
485*4882a593Smuzhiyun ({ \
486*4882a593Smuzhiyun static struct lock_class_key key; \
487*4882a593Smuzhiyun int ret; \
488*4882a593Smuzhiyun \
489*4882a593Smuzhiyun ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
490*4882a593Smuzhiyun cpu_mult, gfp, #locks, &key); \
491*4882a593Smuzhiyun ret; \
492*4882a593Smuzhiyun })
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun void free_bucket_spinlocks(spinlock_t *locks);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun #endif /* __LINUX_SPINLOCK_H */
497