1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_QSPINLOCK_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_QSPINLOCK_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm-generic/qspinlock_types.h>
6*4882a593Smuzhiyun #include <asm/paravirt.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define _Q_PENDING_LOOPS (1 << 9) /* not tuned */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_SPINLOCKS
11*4882a593Smuzhiyun extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
12*4882a593Smuzhiyun extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13*4882a593Smuzhiyun extern void __pv_queued_spin_unlock(struct qspinlock *lock);
14*4882a593Smuzhiyun
queued_spin_lock_slowpath(struct qspinlock * lock,u32 val)15*4882a593Smuzhiyun static __always_inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun if (!is_shared_processor())
18*4882a593Smuzhiyun native_queued_spin_lock_slowpath(lock, val);
19*4882a593Smuzhiyun else
20*4882a593Smuzhiyun __pv_queued_spin_lock_slowpath(lock, val);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define queued_spin_unlock queued_spin_unlock
queued_spin_unlock(struct qspinlock * lock)24*4882a593Smuzhiyun static inline void queued_spin_unlock(struct qspinlock *lock)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun if (!is_shared_processor())
27*4882a593Smuzhiyun smp_store_release(&lock->locked, 0);
28*4882a593Smuzhiyun else
29*4882a593Smuzhiyun __pv_queued_spin_unlock(lock);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #else
33*4882a593Smuzhiyun extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun
queued_spin_lock(struct qspinlock * lock)36*4882a593Smuzhiyun static __always_inline void queued_spin_lock(struct qspinlock *lock)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun u32 val = 0;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
41*4882a593Smuzhiyun return;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun queued_spin_lock_slowpath(lock, val);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun #define queued_spin_lock queued_spin_lock
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define smp_mb__after_spinlock() smp_mb()
48*4882a593Smuzhiyun
queued_spin_is_locked(struct qspinlock * lock)49*4882a593Smuzhiyun static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * This barrier was added to simple spinlocks by commit 51d7d5205d338,
53*4882a593Smuzhiyun * but it should now be possible to remove it, asm arm64 has done with
54*4882a593Smuzhiyun * commit c6f5d02b6a0f.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun smp_mb();
57*4882a593Smuzhiyun return atomic_read(&lock->val);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun #define queued_spin_is_locked queued_spin_is_locked
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_SPINLOCKS
62*4882a593Smuzhiyun #define SPIN_THRESHOLD (1<<15) /* not tuned */
63*4882a593Smuzhiyun
pv_wait(u8 * ptr,u8 val)64*4882a593Smuzhiyun static __always_inline void pv_wait(u8 *ptr, u8 val)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun if (*ptr != val)
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun yield_to_any();
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * We could pass in a CPU here if waiting in the queue and yield to
71*4882a593Smuzhiyun * the previous CPU in the queue.
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
pv_kick(int cpu)75*4882a593Smuzhiyun static __always_inline void pv_kick(int cpu)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun prod_cpu(cpu);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun extern void __pv_init_lock_hash(void);
81*4882a593Smuzhiyun
pv_spinlocks_init(void)82*4882a593Smuzhiyun static inline void pv_spinlocks_init(void)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun __pv_init_lock_hash();
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #include <asm-generic/qspinlock.h>
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #endif /* _ASM_POWERPC_QSPINLOCK_H */
92