1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_X86_KVM_PARA_H
3*4882a593Smuzhiyun #define _ASM_X86_KVM_PARA_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/processor.h>
6*4882a593Smuzhiyun #include <asm/alternative.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <uapi/asm/kvm_para.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifdef CONFIG_KVM_GUEST
11*4882a593Smuzhiyun bool kvm_check_and_clear_guest_paused(void);
12*4882a593Smuzhiyun #else
kvm_check_and_clear_guest_paused(void)13*4882a593Smuzhiyun static inline bool kvm_check_and_clear_guest_paused(void)
14*4882a593Smuzhiyun {
15*4882a593Smuzhiyun return false;
16*4882a593Smuzhiyun }
17*4882a593Smuzhiyun #endif /* CONFIG_KVM_GUEST */
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define KVM_HYPERCALL \
20*4882a593Smuzhiyun ALTERNATIVE("vmcall", "vmmcall", X86_FEATURE_VMMCALL)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
23*4882a593Smuzhiyun * instruction. The hypervisor may replace it with something else but only the
24*4882a593Smuzhiyun * instructions are guaranteed to be supported.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
27*4882a593Smuzhiyun * The hypercall number should be placed in rax and the return value will be
28*4882a593Smuzhiyun * placed in rax. No other registers will be clobbered unless explicitly
29*4882a593Smuzhiyun * noted by the particular hypercall.
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
kvm_hypercall0(unsigned int nr)32*4882a593Smuzhiyun static inline long kvm_hypercall0(unsigned int nr)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun long ret;
35*4882a593Smuzhiyun asm volatile(KVM_HYPERCALL
36*4882a593Smuzhiyun : "=a"(ret)
37*4882a593Smuzhiyun : "a"(nr)
38*4882a593Smuzhiyun : "memory");
39*4882a593Smuzhiyun return ret;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
kvm_hypercall1(unsigned int nr,unsigned long p1)42*4882a593Smuzhiyun static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun long ret;
45*4882a593Smuzhiyun asm volatile(KVM_HYPERCALL
46*4882a593Smuzhiyun : "=a"(ret)
47*4882a593Smuzhiyun : "a"(nr), "b"(p1)
48*4882a593Smuzhiyun : "memory");
49*4882a593Smuzhiyun return ret;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun
kvm_hypercall2(unsigned int nr,unsigned long p1,unsigned long p2)52*4882a593Smuzhiyun static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
53*4882a593Smuzhiyun unsigned long p2)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun long ret;
56*4882a593Smuzhiyun asm volatile(KVM_HYPERCALL
57*4882a593Smuzhiyun : "=a"(ret)
58*4882a593Smuzhiyun : "a"(nr), "b"(p1), "c"(p2)
59*4882a593Smuzhiyun : "memory");
60*4882a593Smuzhiyun return ret;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
kvm_hypercall3(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3)63*4882a593Smuzhiyun static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
64*4882a593Smuzhiyun unsigned long p2, unsigned long p3)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun long ret;
67*4882a593Smuzhiyun asm volatile(KVM_HYPERCALL
68*4882a593Smuzhiyun : "=a"(ret)
69*4882a593Smuzhiyun : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
70*4882a593Smuzhiyun : "memory");
71*4882a593Smuzhiyun return ret;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
kvm_hypercall4(unsigned int nr,unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4)74*4882a593Smuzhiyun static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
75*4882a593Smuzhiyun unsigned long p2, unsigned long p3,
76*4882a593Smuzhiyun unsigned long p4)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun long ret;
79*4882a593Smuzhiyun asm volatile(KVM_HYPERCALL
80*4882a593Smuzhiyun : "=a"(ret)
81*4882a593Smuzhiyun : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
82*4882a593Smuzhiyun : "memory");
83*4882a593Smuzhiyun return ret;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun #ifdef CONFIG_KVM_GUEST
87*4882a593Smuzhiyun void kvmclock_init(void);
88*4882a593Smuzhiyun void kvmclock_disable(void);
89*4882a593Smuzhiyun bool kvm_para_available(void);
90*4882a593Smuzhiyun unsigned int kvm_arch_para_features(void);
91*4882a593Smuzhiyun unsigned int kvm_arch_para_hints(void);
92*4882a593Smuzhiyun void kvm_async_pf_task_wait_schedule(u32 token);
93*4882a593Smuzhiyun void kvm_async_pf_task_wake(u32 token);
94*4882a593Smuzhiyun u32 kvm_read_and_reset_apf_flags(void);
95*4882a593Smuzhiyun bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun DECLARE_STATIC_KEY_FALSE(kvm_async_pf_enabled);
98*4882a593Smuzhiyun
kvm_handle_async_pf(struct pt_regs * regs,u32 token)99*4882a593Smuzhiyun static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun if (static_branch_unlikely(&kvm_async_pf_enabled))
102*4882a593Smuzhiyun return __kvm_handle_async_pf(regs, token);
103*4882a593Smuzhiyun else
104*4882a593Smuzhiyun return false;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #ifdef CONFIG_PARAVIRT_SPINLOCKS
108*4882a593Smuzhiyun void __init kvm_spinlock_init(void);
109*4882a593Smuzhiyun #else /* !CONFIG_PARAVIRT_SPINLOCKS */
kvm_spinlock_init(void)110*4882a593Smuzhiyun static inline void kvm_spinlock_init(void)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun #endif /* CONFIG_PARAVIRT_SPINLOCKS */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun #else /* CONFIG_KVM_GUEST */
116*4882a593Smuzhiyun #define kvm_async_pf_task_wait_schedule(T) do {} while(0)
117*4882a593Smuzhiyun #define kvm_async_pf_task_wake(T) do {} while(0)
118*4882a593Smuzhiyun
kvm_para_available(void)119*4882a593Smuzhiyun static inline bool kvm_para_available(void)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun return false;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
kvm_arch_para_features(void)124*4882a593Smuzhiyun static inline unsigned int kvm_arch_para_features(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
kvm_arch_para_hints(void)129*4882a593Smuzhiyun static inline unsigned int kvm_arch_para_hints(void)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
kvm_read_and_reset_apf_flags(void)134*4882a593Smuzhiyun static inline u32 kvm_read_and_reset_apf_flags(void)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
kvm_handle_async_pf(struct pt_regs * regs,u32 token)139*4882a593Smuzhiyun static __always_inline bool kvm_handle_async_pf(struct pt_regs *regs, u32 token)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun return false;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun #endif
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun #endif /* _ASM_X86_KVM_PARA_H */
146