1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_QSPINLOCK_H
3*4882a593Smuzhiyun #define _ASM_X86_QSPINLOCK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/jump_label.h>
6*4882a593Smuzhiyun #include <asm/cpufeature.h>
7*4882a593Smuzhiyun #include <asm-generic/qspinlock_types.h>
8*4882a593Smuzhiyun #include <asm/paravirt.h>
9*4882a593Smuzhiyun #include <asm/rmwcc.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #define _Q_PENDING_LOOPS (1 << 9)
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
queued_fetch_set_pending_acquire(struct qspinlock * lock)14*4882a593Smuzhiyun static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun u32 val;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
20*4882a593Smuzhiyun * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
21*4882a593Smuzhiyun * statement expression, which GCC doesn't like.
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
24*4882a593Smuzhiyun "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
25*4882a593Smuzhiyun val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun return val;
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_SPINLOCKS
31*4882a593Smuzhiyun extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
32*4882a593Smuzhiyun extern void __pv_init_lock_hash(void);
33*4882a593Smuzhiyun extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34*4882a593Smuzhiyun extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
35*4882a593Smuzhiyun extern bool nopvspin;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define queued_spin_unlock queued_spin_unlock
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun * queued_spin_unlock - release a queued spinlock
40*4882a593Smuzhiyun * @lock : Pointer to queued spinlock structure
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * A smp_store_release() on the least-significant byte.
43*4882a593Smuzhiyun */
native_queued_spin_unlock(struct qspinlock * lock)44*4882a593Smuzhiyun static inline void native_queued_spin_unlock(struct qspinlock *lock)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun smp_store_release(&lock->locked, 0);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
queued_spin_lock_slowpath(struct qspinlock * lock,u32 val)49*4882a593Smuzhiyun static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun pv_queued_spin_lock_slowpath(lock, val);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
queued_spin_unlock(struct qspinlock * lock)54*4882a593Smuzhiyun static inline void queued_spin_unlock(struct qspinlock *lock)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun pv_queued_spin_unlock(lock);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(long cpu)60*4882a593Smuzhiyun static inline bool vcpu_is_preempted(long cpu)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun return pv_vcpu_is_preempted(cpu);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
69*4882a593Smuzhiyun *
70*4882a593Smuzhiyun * Native (and PV wanting native due to vCPU pinning) should disable this key.
71*4882a593Smuzhiyun * It is done in this backwards fashion to only have a single direction change,
72*4882a593Smuzhiyun * which removes ordering between native_pv_spin_init() and HV setup.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun void native_pv_lock_init(void) __init;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Shortcut for the queued_spin_lock_slowpath() function that allows
80*4882a593Smuzhiyun * virt to hijack it.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * Returns:
83*4882a593Smuzhiyun * true - lock has been negotiated, all done;
84*4882a593Smuzhiyun * false - queued_spin_lock_slowpath() will do its thing.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun #define virt_spin_lock virt_spin_lock
virt_spin_lock(struct qspinlock * lock)87*4882a593Smuzhiyun static inline bool virt_spin_lock(struct qspinlock *lock)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun if (!static_branch_likely(&virt_spin_lock_key))
90*4882a593Smuzhiyun return false;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * On hypervisors without PARAVIRT_SPINLOCKS support we fall
94*4882a593Smuzhiyun * back to a Test-and-Set spinlock, because fair locks have
95*4882a593Smuzhiyun * horrible lock 'holder' preemption issues.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun do {
99*4882a593Smuzhiyun while (atomic_read(&lock->val) != 0)
100*4882a593Smuzhiyun cpu_relax();
101*4882a593Smuzhiyun } while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return true;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun #else
native_pv_lock_init(void)106*4882a593Smuzhiyun static inline void native_pv_lock_init(void)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun #endif /* CONFIG_PARAVIRT */
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun #include <asm-generic/qspinlock.h>
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun #endif /* _ASM_X86_QSPINLOCK_H */
114