1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_PARAVIRT_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_PARAVIRT_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/jump_label.h>
6*4882a593Smuzhiyun #include <asm/smp.h>
7*4882a593Smuzhiyun #ifdef CONFIG_PPC64
8*4882a593Smuzhiyun #include <asm/paca.h>
9*4882a593Smuzhiyun #include <asm/hvcall.h>
10*4882a593Smuzhiyun #endif
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifdef CONFIG_PPC_SPLPAR
13*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(shared_processor);
14*4882a593Smuzhiyun
is_shared_processor(void)15*4882a593Smuzhiyun static inline bool is_shared_processor(void)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun return static_branch_unlikely(&shared_processor);
18*4882a593Smuzhiyun }
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* If bit 0 is set, the cpu has been preempted */
yield_count_of(int cpu)21*4882a593Smuzhiyun static inline u32 yield_count_of(int cpu)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun __be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
24*4882a593Smuzhiyun return be32_to_cpu(yield_count);
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun * Spinlock code confers and prods, so don't trace the hcalls because the
29*4882a593Smuzhiyun * tracing code takes spinlocks which can cause recursion deadlocks.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * These calls are made while the lock is not held: the lock slowpath yields if
32*4882a593Smuzhiyun * it can not acquire the lock, and unlock slow path might prod if a waiter has
33*4882a593Smuzhiyun * yielded). So this may not be a problem for simple spin locks because the
34*4882a593Smuzhiyun * tracing does not technically recurse on the lock, but we avoid it anyway.
35*4882a593Smuzhiyun *
36*4882a593Smuzhiyun * However the queued spin lock contended path is more strictly ordered: the
37*4882a593Smuzhiyun * H_CONFER hcall is made after the task has queued itself on the lock, so then
38*4882a593Smuzhiyun * recursing on that lock will cause the task to then queue up again behind the
39*4882a593Smuzhiyun * first instance (or worse: queued spinlocks use tricks that assume a context
40*4882a593Smuzhiyun * never waits on more than one spinlock, so such recursion may cause random
41*4882a593Smuzhiyun * corruption in the lock code).
42*4882a593Smuzhiyun */
yield_to_preempted(int cpu,u32 yield_count)43*4882a593Smuzhiyun static inline void yield_to_preempted(int cpu, u32 yield_count)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
prod_cpu(int cpu)48*4882a593Smuzhiyun static inline void prod_cpu(int cpu)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
yield_to_any(void)53*4882a593Smuzhiyun static inline void yield_to_any(void)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun plpar_hcall_norets_notrace(H_CONFER, -1, 0);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun #else
is_shared_processor(void)58*4882a593Smuzhiyun static inline bool is_shared_processor(void)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun return false;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
yield_count_of(int cpu)63*4882a593Smuzhiyun static inline u32 yield_count_of(int cpu)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return 0;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun extern void ___bad_yield_to_preempted(void);
yield_to_preempted(int cpu,u32 yield_count)69*4882a593Smuzhiyun static inline void yield_to_preempted(int cpu, u32 yield_count)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun ___bad_yield_to_preempted(); /* This would be a bug */
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun extern void ___bad_yield_to_any(void);
yield_to_any(void)75*4882a593Smuzhiyun static inline void yield_to_any(void)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun ___bad_yield_to_any(); /* This would be a bug */
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun extern void ___bad_prod_cpu(void);
prod_cpu(int cpu)81*4882a593Smuzhiyun static inline void prod_cpu(int cpu)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun ___bad_prod_cpu(); /* This would be a bug */
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #endif
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(int cpu)89*4882a593Smuzhiyun static inline bool vcpu_is_preempted(int cpu)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun if (!is_shared_processor())
92*4882a593Smuzhiyun return false;
93*4882a593Smuzhiyun if (yield_count_of(cpu) & 1)
94*4882a593Smuzhiyun return true;
95*4882a593Smuzhiyun return false;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
pv_is_native_spin_unlock(void)98*4882a593Smuzhiyun static inline bool pv_is_native_spin_unlock(void)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun return !is_shared_processor();
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #endif /* _ASM_POWERPC_PARAVIRT_H */
104