1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Queued spinlock
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
6*4882a593Smuzhiyun * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Authors: Waiman Long <waiman.long@hpe.com>
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun #ifndef __ASM_GENERIC_QSPINLOCK_H
11*4882a593Smuzhiyun #define __ASM_GENERIC_QSPINLOCK_H
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <asm-generic/qspinlock_types.h>
14*4882a593Smuzhiyun #include <linux/atomic.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifndef queued_spin_is_locked
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun * queued_spin_is_locked - is the spinlock locked?
19*4882a593Smuzhiyun * @lock: Pointer to queued spinlock structure
20*4882a593Smuzhiyun * Return: 1 if it is locked, 0 otherwise
21*4882a593Smuzhiyun */
queued_spin_is_locked(struct qspinlock * lock)22*4882a593Smuzhiyun static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun /*
25*4882a593Smuzhiyun * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
26*4882a593Smuzhiyun * isn't immediately observable.
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun return atomic_read(&lock->val);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun * queued_spin_value_unlocked - is the spinlock structure unlocked?
34*4882a593Smuzhiyun * @lock: queued spinlock structure
35*4882a593Smuzhiyun * Return: 1 if it is unlocked, 0 otherwise
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * N.B. Whenever there are tasks waiting for the lock, it is considered
38*4882a593Smuzhiyun * locked wrt the lockref code to avoid lock stealing by the lockref
39*4882a593Smuzhiyun * code and change things underneath the lock. This also allows some
40*4882a593Smuzhiyun * optimizations to be applied without conflict with lockref.
41*4882a593Smuzhiyun */
queued_spin_value_unlocked(struct qspinlock lock)42*4882a593Smuzhiyun static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun return !atomic_read(&lock.val);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * queued_spin_is_contended - check if the lock is contended
49*4882a593Smuzhiyun * @lock : Pointer to queued spinlock structure
50*4882a593Smuzhiyun * Return: 1 if lock contended, 0 otherwise
51*4882a593Smuzhiyun */
queued_spin_is_contended(struct qspinlock * lock)52*4882a593Smuzhiyun static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun * queued_spin_trylock - try to acquire the queued spinlock
58*4882a593Smuzhiyun * @lock : Pointer to queued spinlock structure
59*4882a593Smuzhiyun * Return: 1 if lock acquired, 0 if failed
60*4882a593Smuzhiyun */
queued_spin_trylock(struct qspinlock * lock)61*4882a593Smuzhiyun static __always_inline int queued_spin_trylock(struct qspinlock *lock)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun u32 val = atomic_read(&lock->val);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (unlikely(val))
66*4882a593Smuzhiyun return 0;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #ifndef queued_spin_lock
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun * queued_spin_lock - acquire a queued spinlock
76*4882a593Smuzhiyun * @lock: Pointer to queued spinlock structure
77*4882a593Smuzhiyun */
queued_spin_lock(struct qspinlock * lock)78*4882a593Smuzhiyun static __always_inline void queued_spin_lock(struct qspinlock *lock)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun u32 val = 0;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
83*4882a593Smuzhiyun return;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun queued_spin_lock_slowpath(lock, val);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifndef queued_spin_unlock
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * queued_spin_unlock - release a queued spinlock
92*4882a593Smuzhiyun * @lock : Pointer to queued spinlock structure
93*4882a593Smuzhiyun */
queued_spin_unlock(struct qspinlock * lock)94*4882a593Smuzhiyun static __always_inline void queued_spin_unlock(struct qspinlock *lock)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * unlock() needs release semantics:
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun smp_store_release(&lock->locked, 0);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #ifndef virt_spin_lock
virt_spin_lock(struct qspinlock * lock)104*4882a593Smuzhiyun static __always_inline bool virt_spin_lock(struct qspinlock *lock)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun return false;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * Remapping spinlock architecture specific functions to the corresponding
112*4882a593Smuzhiyun * queued spinlock functions.
113*4882a593Smuzhiyun */
114*4882a593Smuzhiyun #define arch_spin_is_locked(l) queued_spin_is_locked(l)
115*4882a593Smuzhiyun #define arch_spin_is_contended(l) queued_spin_is_contended(l)
116*4882a593Smuzhiyun #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
117*4882a593Smuzhiyun #define arch_spin_lock(l) queued_spin_lock(l)
118*4882a593Smuzhiyun #define arch_spin_trylock(l) queued_spin_trylock(l)
119*4882a593Smuzhiyun #define arch_spin_unlock(l) queued_spin_unlock(l)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun #endif /* __ASM_GENERIC_QSPINLOCK_H */
122