1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun /*
4*4882a593Smuzhiyun * Linux-specific definitions for managing interactions with Microsoft's
5*4882a593Smuzhiyun * Hyper-V hypervisor. The definitions in this file are architecture
6*4882a593Smuzhiyun * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
7*4882a593Smuzhiyun * that are specific to architecture <arch>.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Definitions that are specified in the Hyper-V Top Level Functional
10*4882a593Smuzhiyun * Spec (TLFS) should not go in this file, but should instead go in
11*4882a593Smuzhiyun * hyperv-tlfs.h.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright (C) 2019, Microsoft, Inc.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * Author : Michael Kelley <mikelley@microsoft.com>
16*4882a593Smuzhiyun */
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #ifndef _ASM_GENERIC_MSHYPERV_H
19*4882a593Smuzhiyun #define _ASM_GENERIC_MSHYPERV_H
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <linux/types.h>
22*4882a593Smuzhiyun #include <linux/atomic.h>
23*4882a593Smuzhiyun #include <linux/bitops.h>
24*4882a593Smuzhiyun #include <linux/cpumask.h>
25*4882a593Smuzhiyun #include <asm/ptrace.h>
26*4882a593Smuzhiyun #include <asm/hyperv-tlfs.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun struct ms_hyperv_info {
29*4882a593Smuzhiyun u32 features;
30*4882a593Smuzhiyun u32 misc_features;
31*4882a593Smuzhiyun u32 hints;
32*4882a593Smuzhiyun u32 nested_features;
33*4882a593Smuzhiyun u32 max_vp_index;
34*4882a593Smuzhiyun u32 max_lp_index;
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun extern struct ms_hyperv_info ms_hyperv;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
39*4882a593Smuzhiyun extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Generate the guest OS identifier as described in the Hyper-V TLFS */
generate_guest_id(__u64 d_info1,__u64 kernel_version,__u64 d_info2)43*4882a593Smuzhiyun static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
44*4882a593Smuzhiyun __u64 d_info2)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun __u64 guest_id = 0;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
49*4882a593Smuzhiyun guest_id |= (d_info1 << 48);
50*4882a593Smuzhiyun guest_id |= (kernel_version << 16);
51*4882a593Smuzhiyun guest_id |= d_info2;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return guest_id;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun /* Free the message slot and signal end-of-message if required */
vmbus_signal_eom(struct hv_message * msg,u32 old_msg_type)58*4882a593Smuzhiyun static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun * On crash we're reading some other CPU's message page and we need
62*4882a593Smuzhiyun * to be careful: this other CPU may already had cleared the header
63*4882a593Smuzhiyun * and the host may already had delivered some other message there.
64*4882a593Smuzhiyun * In case we blindly write msg->header.message_type we're going
65*4882a593Smuzhiyun * to lose it. We can still lose a message of the same type but
66*4882a593Smuzhiyun * we count on the fact that there can only be one
67*4882a593Smuzhiyun * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
68*4882a593Smuzhiyun * on crash.
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun if (cmpxchg(&msg->header.message_type, old_msg_type,
71*4882a593Smuzhiyun HVMSG_NONE) != old_msg_type)
72*4882a593Smuzhiyun return;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * The cmxchg() above does an implicit memory barrier to
76*4882a593Smuzhiyun * ensure the write to MessageType (ie set to
77*4882a593Smuzhiyun * HVMSG_NONE) happens before we read the
78*4882a593Smuzhiyun * MessagePending and EOMing. Otherwise, the EOMing
79*4882a593Smuzhiyun * will not deliver any more messages since there is
80*4882a593Smuzhiyun * no empty slot
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun if (msg->header.message_flags.msg_pending) {
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * This will cause message queue rescan to
85*4882a593Smuzhiyun * possibly deliver another msg from the
86*4882a593Smuzhiyun * hypervisor
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun hv_signal_eom();
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun int hv_setup_vmbus_irq(int irq, void (*handler)(void));
93*4882a593Smuzhiyun void hv_remove_vmbus_irq(void);
94*4882a593Smuzhiyun void hv_enable_vmbus_irq(void);
95*4882a593Smuzhiyun void hv_disable_vmbus_irq(void);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun void hv_setup_kexec_handler(void (*handler)(void));
98*4882a593Smuzhiyun void hv_remove_kexec_handler(void);
99*4882a593Smuzhiyun void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
100*4882a593Smuzhiyun void hv_remove_crash_handler(void);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun extern int vmbus_interrupt;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_HYPERV)
105*4882a593Smuzhiyun /*
106*4882a593Smuzhiyun * Hypervisor's notion of virtual processor ID is different from
107*4882a593Smuzhiyun * Linux' notion of CPU ID. This information can only be retrieved
108*4882a593Smuzhiyun * in the context of the calling CPU. Setup a map for easy access
109*4882a593Smuzhiyun * to this information.
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun extern u32 *hv_vp_index;
112*4882a593Smuzhiyun extern u32 hv_max_vp_index;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Sentinel value for an uninitialized entry in hv_vp_index array */
115*4882a593Smuzhiyun #define VP_INVAL U32_MAX
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /**
118*4882a593Smuzhiyun * hv_cpu_number_to_vp_number() - Map CPU to VP.
119*4882a593Smuzhiyun * @cpu_number: CPU number in Linux terms
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun * This function returns the mapping between the Linux processor
122*4882a593Smuzhiyun * number and the hypervisor's virtual processor number, useful
123*4882a593Smuzhiyun * in making hypercalls and such that talk about specific
124*4882a593Smuzhiyun * processors.
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun * Return: Virtual processor number in Hyper-V terms
127*4882a593Smuzhiyun */
hv_cpu_number_to_vp_number(int cpu_number)128*4882a593Smuzhiyun static inline int hv_cpu_number_to_vp_number(int cpu_number)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun return hv_vp_index[cpu_number];
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
cpumask_to_vpset(struct hv_vpset * vpset,const struct cpumask * cpus)133*4882a593Smuzhiyun static inline int cpumask_to_vpset(struct hv_vpset *vpset,
134*4882a593Smuzhiyun const struct cpumask *cpus)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* valid_bank_mask can represent up to 64 banks */
139*4882a593Smuzhiyun if (hv_max_vp_index / 64 >= 64)
140*4882a593Smuzhiyun return 0;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
144*4882a593Smuzhiyun * structs are not cleared between calls, we risk flushing unneeded
145*4882a593Smuzhiyun * vCPUs otherwise.
146*4882a593Smuzhiyun */
147*4882a593Smuzhiyun for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
148*4882a593Smuzhiyun vpset->bank_contents[vcpu_bank] = 0;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun * Some banks may end up being empty but this is acceptable.
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun for_each_cpu(cpu, cpus) {
154*4882a593Smuzhiyun vcpu = hv_cpu_number_to_vp_number(cpu);
155*4882a593Smuzhiyun if (vcpu == VP_INVAL)
156*4882a593Smuzhiyun return -1;
157*4882a593Smuzhiyun vcpu_bank = vcpu / 64;
158*4882a593Smuzhiyun vcpu_offset = vcpu % 64;
159*4882a593Smuzhiyun __set_bit(vcpu_offset, (unsigned long *)
160*4882a593Smuzhiyun &vpset->bank_contents[vcpu_bank]);
161*4882a593Smuzhiyun if (vcpu_bank >= nr_bank)
162*4882a593Smuzhiyun nr_bank = vcpu_bank + 1;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
165*4882a593Smuzhiyun return nr_bank;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
169*4882a593Smuzhiyun void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
170*4882a593Smuzhiyun bool hv_is_hyperv_initialized(void);
171*4882a593Smuzhiyun bool hv_is_hibernation_supported(void);
172*4882a593Smuzhiyun void hyperv_cleanup(void);
173*4882a593Smuzhiyun #else /* CONFIG_HYPERV */
hv_is_hyperv_initialized(void)174*4882a593Smuzhiyun static inline bool hv_is_hyperv_initialized(void) { return false; }
hv_is_hibernation_supported(void)175*4882a593Smuzhiyun static inline bool hv_is_hibernation_supported(void) { return false; }
hyperv_cleanup(void)176*4882a593Smuzhiyun static inline void hyperv_cleanup(void) {}
177*4882a593Smuzhiyun #endif /* CONFIG_HYPERV */
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_HYPERV)
180*4882a593Smuzhiyun extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
181*4882a593Smuzhiyun extern void hv_remove_stimer0_irq(int irq);
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #endif
185