1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/cpu.h>
7*4882a593Smuzhiyun #include <linux/kvm_host.h>
8*4882a593Smuzhiyun #include <linux/preempt.h>
9*4882a593Smuzhiyun #include <linux/export.h>
10*4882a593Smuzhiyun #include <linux/sched.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/memblock.h>
14*4882a593Smuzhiyun #include <linux/sizes.h>
15*4882a593Smuzhiyun #include <linux/cma.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/asm-prototypes.h>
19*4882a593Smuzhiyun #include <asm/cputable.h>
20*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
21*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
22*4882a593Smuzhiyun #include <asm/archrandom.h>
23*4882a593Smuzhiyun #include <asm/xics.h>
24*4882a593Smuzhiyun #include <asm/xive.h>
25*4882a593Smuzhiyun #include <asm/dbell.h>
26*4882a593Smuzhiyun #include <asm/cputhreads.h>
27*4882a593Smuzhiyun #include <asm/io.h>
28*4882a593Smuzhiyun #include <asm/opal.h>
29*4882a593Smuzhiyun #include <asm/smp.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define KVM_CMA_CHUNK_ORDER 18
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include "book3s_xics.h"
34*4882a593Smuzhiyun #include "book3s_xive.h"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * The XIVE module will populate these when it loads
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
40*4882a593Smuzhiyun unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
41*4882a593Smuzhiyun int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
42*4882a593Smuzhiyun unsigned long mfrr);
43*4882a593Smuzhiyun int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
44*4882a593Smuzhiyun int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
45*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
46*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
47*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
48*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
49*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
53*4882a593Smuzhiyun * should be power of 2.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun * By default we reserve 5% of memory for hash pagetable allocation.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun static unsigned long kvm_cma_resv_ratio = 5;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static struct cma *kvm_cma;
62*4882a593Smuzhiyun
early_parse_kvm_cma_resv(char * p)63*4882a593Smuzhiyun static int __init early_parse_kvm_cma_resv(char *p)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun pr_debug("%s(%s)\n", __func__, p);
66*4882a593Smuzhiyun if (!p)
67*4882a593Smuzhiyun return -EINVAL;
68*4882a593Smuzhiyun return kstrtoul(p, 0, &kvm_cma_resv_ratio);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
71*4882a593Smuzhiyun
kvm_alloc_hpt_cma(unsigned long nr_pages)72*4882a593Smuzhiyun struct page *kvm_alloc_hpt_cma(unsigned long nr_pages)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES),
77*4882a593Smuzhiyun false);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma);
80*4882a593Smuzhiyun
kvm_free_hpt_cma(struct page * page,unsigned long nr_pages)81*4882a593Smuzhiyun void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun cma_release(kvm_cma, page, nr_pages);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_free_hpt_cma);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * kvm_cma_reserve() - reserve area for kvm hash pagetable
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * This function reserves memory from early allocator. It should be
91*4882a593Smuzhiyun * called by arch specific code once the memblock allocator
92*4882a593Smuzhiyun * has been activated and all other subsystems have already allocated/reserved
93*4882a593Smuzhiyun * memory.
94*4882a593Smuzhiyun */
kvm_cma_reserve(void)95*4882a593Smuzhiyun void __init kvm_cma_reserve(void)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun unsigned long align_size;
98*4882a593Smuzhiyun phys_addr_t selected_size;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * We need CMA reservation only when we are in HV mode
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun if (!cpu_has_feature(CPU_FTR_HVMODE))
104*4882a593Smuzhiyun return;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun selected_size = PAGE_ALIGN(memblock_phys_mem_size() * kvm_cma_resv_ratio / 100);
107*4882a593Smuzhiyun if (selected_size) {
108*4882a593Smuzhiyun pr_info("%s: reserving %ld MiB for global area\n", __func__,
109*4882a593Smuzhiyun (unsigned long)selected_size / SZ_1M);
110*4882a593Smuzhiyun align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
111*4882a593Smuzhiyun cma_declare_contiguous(0, selected_size, 0, align_size,
112*4882a593Smuzhiyun KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma",
113*4882a593Smuzhiyun &kvm_cma);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /*
118*4882a593Smuzhiyun * Real-mode H_CONFER implementation.
119*4882a593Smuzhiyun * We check if we are the only vcpu out of this virtual core
120*4882a593Smuzhiyun * still running in the guest and not ceded. If so, we pop up
121*4882a593Smuzhiyun * to the virtual-mode implementation; if not, just return to
122*4882a593Smuzhiyun * the guest.
123*4882a593Smuzhiyun */
kvmppc_rm_h_confer(struct kvm_vcpu * vcpu,int target,unsigned int yield_count)124*4882a593Smuzhiyun long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
125*4882a593Smuzhiyun unsigned int yield_count)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
128*4882a593Smuzhiyun int ptid = local_paca->kvm_hstate.ptid;
129*4882a593Smuzhiyun int threads_running;
130*4882a593Smuzhiyun int threads_ceded;
131*4882a593Smuzhiyun int threads_conferring;
132*4882a593Smuzhiyun u64 stop = get_tb() + 10 * tb_ticks_per_usec;
133*4882a593Smuzhiyun int rv = H_SUCCESS; /* => don't yield */
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun set_bit(ptid, &vc->conferring_threads);
136*4882a593Smuzhiyun while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
137*4882a593Smuzhiyun threads_running = VCORE_ENTRY_MAP(vc);
138*4882a593Smuzhiyun threads_ceded = vc->napping_threads;
139*4882a593Smuzhiyun threads_conferring = vc->conferring_threads;
140*4882a593Smuzhiyun if ((threads_ceded | threads_conferring) == threads_running) {
141*4882a593Smuzhiyun rv = H_TOO_HARD; /* => do yield */
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun clear_bit(ptid, &vc->conferring_threads);
146*4882a593Smuzhiyun return rv;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun * When running HV mode KVM we need to block certain operations while KVM VMs
151*4882a593Smuzhiyun * exist in the system. We use a counter of VMs to track this.
152*4882a593Smuzhiyun *
153*4882a593Smuzhiyun * One of the operations we need to block is onlining of secondaries, so we
154*4882a593Smuzhiyun * protect hv_vm_count with get/put_online_cpus().
155*4882a593Smuzhiyun */
156*4882a593Smuzhiyun static atomic_t hv_vm_count;
157*4882a593Smuzhiyun
kvm_hv_vm_activated(void)158*4882a593Smuzhiyun void kvm_hv_vm_activated(void)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun get_online_cpus();
161*4882a593Smuzhiyun atomic_inc(&hv_vm_count);
162*4882a593Smuzhiyun put_online_cpus();
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
165*4882a593Smuzhiyun
kvm_hv_vm_deactivated(void)166*4882a593Smuzhiyun void kvm_hv_vm_deactivated(void)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun get_online_cpus();
169*4882a593Smuzhiyun atomic_dec(&hv_vm_count);
170*4882a593Smuzhiyun put_online_cpus();
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
173*4882a593Smuzhiyun
kvm_hv_mode_active(void)174*4882a593Smuzhiyun bool kvm_hv_mode_active(void)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun return atomic_read(&hv_vm_count) != 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun extern int hcall_real_table[], hcall_real_table_end[];
180*4882a593Smuzhiyun
kvmppc_hcall_impl_hv_realmode(unsigned long cmd)181*4882a593Smuzhiyun int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun cmd /= 4;
184*4882a593Smuzhiyun if (cmd < hcall_real_table_end - hcall_real_table &&
185*4882a593Smuzhiyun hcall_real_table[cmd])
186*4882a593Smuzhiyun return 1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);
191*4882a593Smuzhiyun
kvmppc_hwrng_present(void)192*4882a593Smuzhiyun int kvmppc_hwrng_present(void)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun return powernv_hwrng_present();
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_hwrng_present);
197*4882a593Smuzhiyun
kvmppc_h_random(struct kvm_vcpu * vcpu)198*4882a593Smuzhiyun long kvmppc_h_random(struct kvm_vcpu *vcpu)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun int r;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /* Only need to do the expensive mfmsr() on radix */
203*4882a593Smuzhiyun if (kvm_is_radix(vcpu->kvm) && (mfmsr() & MSR_IR))
204*4882a593Smuzhiyun r = powernv_get_random_long(&vcpu->arch.regs.gpr[4]);
205*4882a593Smuzhiyun else
206*4882a593Smuzhiyun r = powernv_get_random_real_mode(&vcpu->arch.regs.gpr[4]);
207*4882a593Smuzhiyun if (r)
208*4882a593Smuzhiyun return H_SUCCESS;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun return H_HARDWARE;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /*
214*4882a593Smuzhiyun * Send an interrupt or message to another CPU.
215*4882a593Smuzhiyun * The caller needs to include any barrier needed to order writes
216*4882a593Smuzhiyun * to memory vs. the IPI/message.
217*4882a593Smuzhiyun */
kvmhv_rm_send_ipi(int cpu)218*4882a593Smuzhiyun void kvmhv_rm_send_ipi(int cpu)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun void __iomem *xics_phys;
221*4882a593Smuzhiyun unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* For a nested hypervisor, use the XICS via hcall */
224*4882a593Smuzhiyun if (kvmhv_on_pseries()) {
225*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun plpar_hcall_raw(H_IPI, retbuf, get_hard_smp_processor_id(cpu),
228*4882a593Smuzhiyun IPI_PRIORITY);
229*4882a593Smuzhiyun return;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* On POWER9 we can use msgsnd for any destination cpu. */
233*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300)) {
234*4882a593Smuzhiyun msg |= get_hard_smp_processor_id(cpu);
235*4882a593Smuzhiyun __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
236*4882a593Smuzhiyun return;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
240*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
241*4882a593Smuzhiyun cpu_first_thread_sibling(cpu) ==
242*4882a593Smuzhiyun cpu_first_thread_sibling(raw_smp_processor_id())) {
243*4882a593Smuzhiyun msg |= cpu_thread_in_core(cpu);
244*4882a593Smuzhiyun __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
245*4882a593Smuzhiyun return;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* We should never reach this */
249*4882a593Smuzhiyun if (WARN_ON_ONCE(xics_on_xive()))
250*4882a593Smuzhiyun return;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Else poke the target with an IPI */
253*4882a593Smuzhiyun xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys;
254*4882a593Smuzhiyun if (xics_phys)
255*4882a593Smuzhiyun __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
256*4882a593Smuzhiyun else
257*4882a593Smuzhiyun opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * The following functions are called from the assembly code
262*4882a593Smuzhiyun * in book3s_hv_rmhandlers.S.
263*4882a593Smuzhiyun */
kvmhv_interrupt_vcore(struct kvmppc_vcore * vc,int active)264*4882a593Smuzhiyun static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int cpu = vc->pcpu;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Order setting of exit map vs. msgsnd/IPI */
269*4882a593Smuzhiyun smp_mb();
270*4882a593Smuzhiyun for (; active; active >>= 1, ++cpu)
271*4882a593Smuzhiyun if (active & 1)
272*4882a593Smuzhiyun kvmhv_rm_send_ipi(cpu);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
kvmhv_commence_exit(int trap)275*4882a593Smuzhiyun void kvmhv_commence_exit(int trap)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
278*4882a593Smuzhiyun int ptid = local_paca->kvm_hstate.ptid;
279*4882a593Smuzhiyun struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
280*4882a593Smuzhiyun int me, ee, i, t;
281*4882a593Smuzhiyun int cpu0;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /* Set our bit in the threads-exiting-guest map in the 0xff00
284*4882a593Smuzhiyun bits of vcore->entry_exit_map */
285*4882a593Smuzhiyun me = 0x100 << ptid;
286*4882a593Smuzhiyun do {
287*4882a593Smuzhiyun ee = vc->entry_exit_map;
288*4882a593Smuzhiyun } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Are we the first here? */
291*4882a593Smuzhiyun if ((ee >> 8) != 0)
292*4882a593Smuzhiyun return;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun * Trigger the other threads in this vcore to exit the guest.
296*4882a593Smuzhiyun * If this is a hypervisor decrementer interrupt then they
297*4882a593Smuzhiyun * will be already on their way out of the guest.
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
300*4882a593Smuzhiyun kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /*
303*4882a593Smuzhiyun * If we are doing dynamic micro-threading, interrupt the other
304*4882a593Smuzhiyun * subcores to pull them out of their guests too.
305*4882a593Smuzhiyun */
306*4882a593Smuzhiyun if (!sip)
307*4882a593Smuzhiyun return;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun for (i = 0; i < MAX_SUBCORES; ++i) {
310*4882a593Smuzhiyun vc = sip->vc[i];
311*4882a593Smuzhiyun if (!vc)
312*4882a593Smuzhiyun break;
313*4882a593Smuzhiyun do {
314*4882a593Smuzhiyun ee = vc->entry_exit_map;
315*4882a593Smuzhiyun /* Already asked to exit? */
316*4882a593Smuzhiyun if ((ee >> 8) != 0)
317*4882a593Smuzhiyun break;
318*4882a593Smuzhiyun } while (cmpxchg(&vc->entry_exit_map, ee,
319*4882a593Smuzhiyun ee | VCORE_EXIT_REQ) != ee);
320*4882a593Smuzhiyun if ((ee >> 8) == 0)
321*4882a593Smuzhiyun kvmhv_interrupt_vcore(vc, ee);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /*
325*4882a593Smuzhiyun * On POWER9 when running a HPT guest on a radix host (sip != NULL),
326*4882a593Smuzhiyun * we have to interrupt inactive CPU threads to get them to
327*4882a593Smuzhiyun * restore the host LPCR value.
328*4882a593Smuzhiyun */
329*4882a593Smuzhiyun if (sip->lpcr_req) {
330*4882a593Smuzhiyun if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
331*4882a593Smuzhiyun vc = local_paca->kvm_hstate.kvm_vcore;
332*4882a593Smuzhiyun cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
333*4882a593Smuzhiyun for (t = 1; t < threads_per_core; ++t) {
334*4882a593Smuzhiyun if (sip->napped[t])
335*4882a593Smuzhiyun kvmhv_rm_send_ipi(cpu0 + t);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
342*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
get_irqmap(struct kvmppc_passthru_irqmap * pimap,u32 xisr)345*4882a593Smuzhiyun static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap,
346*4882a593Smuzhiyun u32 xisr)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun int i;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * We access the mapped array here without a lock. That
352*4882a593Smuzhiyun * is safe because we never reduce the number of entries
353*4882a593Smuzhiyun * in the array and we never change the v_hwirq field of
354*4882a593Smuzhiyun * an entry once it is set.
355*4882a593Smuzhiyun *
356*4882a593Smuzhiyun * We have also carefully ordered the stores in the writer
357*4882a593Smuzhiyun * and the loads here in the reader, so that if we find a matching
358*4882a593Smuzhiyun * hwirq here, the associated GSI and irq_desc fields are valid.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun for (i = 0; i < pimap->n_mapped; i++) {
361*4882a593Smuzhiyun if (xisr == pimap->mapped[i].r_hwirq) {
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun * Order subsequent reads in the caller to serialize
364*4882a593Smuzhiyun * with the writer.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun smp_rmb();
367*4882a593Smuzhiyun return &pimap->mapped[i];
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun return NULL;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /*
374*4882a593Smuzhiyun * If we have an interrupt that's not an IPI, check if we have a
375*4882a593Smuzhiyun * passthrough adapter and if so, check if this external interrupt
376*4882a593Smuzhiyun * is for the adapter.
377*4882a593Smuzhiyun * We will attempt to deliver the IRQ directly to the target VCPU's
378*4882a593Smuzhiyun * ICP, the virtual ICP (based on affinity - the xive value in ICS).
379*4882a593Smuzhiyun *
380*4882a593Smuzhiyun * If the delivery fails or if this is not for a passthrough adapter,
381*4882a593Smuzhiyun * return to the host to handle this interrupt. We earlier
382*4882a593Smuzhiyun * saved a copy of the XIRR in the PACA, it will be picked up by
383*4882a593Smuzhiyun * the host ICP driver.
384*4882a593Smuzhiyun */
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)385*4882a593Smuzhiyun static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct kvmppc_passthru_irqmap *pimap;
388*4882a593Smuzhiyun struct kvmppc_irq_map *irq_map;
389*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun vcpu = local_paca->kvm_hstate.kvm_vcpu;
392*4882a593Smuzhiyun if (!vcpu)
393*4882a593Smuzhiyun return 1;
394*4882a593Smuzhiyun pimap = kvmppc_get_passthru_irqmap(vcpu->kvm);
395*4882a593Smuzhiyun if (!pimap)
396*4882a593Smuzhiyun return 1;
397*4882a593Smuzhiyun irq_map = get_irqmap(pimap, xisr);
398*4882a593Smuzhiyun if (!irq_map)
399*4882a593Smuzhiyun return 1;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* We're handling this interrupt, generic code doesn't need to */
402*4882a593Smuzhiyun local_paca->kvm_hstate.saved_xirr = 0;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun #else
kvmppc_check_passthru(u32 xisr,__be32 xirr,bool * again)408*4882a593Smuzhiyun static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun return 1;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun #endif
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /*
415*4882a593Smuzhiyun * Determine what sort of external interrupt is pending (if any).
416*4882a593Smuzhiyun * Returns:
417*4882a593Smuzhiyun * 0 if no interrupt is pending
418*4882a593Smuzhiyun * 1 if an interrupt is pending that needs to be handled by the host
419*4882a593Smuzhiyun * 2 Passthrough that needs completion in the host
420*4882a593Smuzhiyun * -1 if there was a guest wakeup IPI (which has now been cleared)
421*4882a593Smuzhiyun * -2 if there is PCI passthrough external interrupt that was handled
422*4882a593Smuzhiyun */
423*4882a593Smuzhiyun static long kvmppc_read_one_intr(bool *again);
424*4882a593Smuzhiyun
kvmppc_read_intr(void)425*4882a593Smuzhiyun long kvmppc_read_intr(void)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun long ret = 0;
428*4882a593Smuzhiyun long rc;
429*4882a593Smuzhiyun bool again;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun if (xive_enabled())
432*4882a593Smuzhiyun return 1;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun do {
435*4882a593Smuzhiyun again = false;
436*4882a593Smuzhiyun rc = kvmppc_read_one_intr(&again);
437*4882a593Smuzhiyun if (rc && (ret == 0 || rc > ret))
438*4882a593Smuzhiyun ret = rc;
439*4882a593Smuzhiyun } while (again);
440*4882a593Smuzhiyun return ret;
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun
kvmppc_read_one_intr(bool * again)443*4882a593Smuzhiyun static long kvmppc_read_one_intr(bool *again)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun void __iomem *xics_phys;
446*4882a593Smuzhiyun u32 h_xirr;
447*4882a593Smuzhiyun __be32 xirr;
448*4882a593Smuzhiyun u32 xisr;
449*4882a593Smuzhiyun u8 host_ipi;
450*4882a593Smuzhiyun int64_t rc;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (xive_enabled())
453*4882a593Smuzhiyun return 1;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /* see if a host IPI is pending */
456*4882a593Smuzhiyun host_ipi = local_paca->kvm_hstate.host_ipi;
457*4882a593Smuzhiyun if (host_ipi)
458*4882a593Smuzhiyun return 1;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* Now read the interrupt from the ICP */
461*4882a593Smuzhiyun if (kvmhv_on_pseries()) {
462*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun rc = plpar_hcall_raw(H_XIRR, retbuf, 0xFF);
465*4882a593Smuzhiyun xirr = cpu_to_be32(retbuf[0]);
466*4882a593Smuzhiyun } else {
467*4882a593Smuzhiyun xics_phys = local_paca->kvm_hstate.xics_phys;
468*4882a593Smuzhiyun rc = 0;
469*4882a593Smuzhiyun if (!xics_phys)
470*4882a593Smuzhiyun rc = opal_int_get_xirr(&xirr, false);
471*4882a593Smuzhiyun else
472*4882a593Smuzhiyun xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun if (rc < 0)
475*4882a593Smuzhiyun return 1;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * Save XIRR for later. Since we get control in reverse endian
479*4882a593Smuzhiyun * on LE systems, save it byte reversed and fetch it back in
480*4882a593Smuzhiyun * host endian. Note that xirr is the value read from the
481*4882a593Smuzhiyun * XIRR register, while h_xirr is the host endian version.
482*4882a593Smuzhiyun */
483*4882a593Smuzhiyun h_xirr = be32_to_cpu(xirr);
484*4882a593Smuzhiyun local_paca->kvm_hstate.saved_xirr = h_xirr;
485*4882a593Smuzhiyun xisr = h_xirr & 0xffffff;
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * Ensure that the store/load complete to guarantee all side
488*4882a593Smuzhiyun * effects of loading from XIRR has completed
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun smp_mb();
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun /* if nothing pending in the ICP */
493*4882a593Smuzhiyun if (!xisr)
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* We found something in the ICP...
497*4882a593Smuzhiyun *
498*4882a593Smuzhiyun * If it is an IPI, clear the MFRR and EOI it.
499*4882a593Smuzhiyun */
500*4882a593Smuzhiyun if (xisr == XICS_IPI) {
501*4882a593Smuzhiyun rc = 0;
502*4882a593Smuzhiyun if (kvmhv_on_pseries()) {
503*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun plpar_hcall_raw(H_IPI, retbuf,
506*4882a593Smuzhiyun hard_smp_processor_id(), 0xff);
507*4882a593Smuzhiyun plpar_hcall_raw(H_EOI, retbuf, h_xirr);
508*4882a593Smuzhiyun } else if (xics_phys) {
509*4882a593Smuzhiyun __raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
510*4882a593Smuzhiyun __raw_rm_writel(xirr, xics_phys + XICS_XIRR);
511*4882a593Smuzhiyun } else {
512*4882a593Smuzhiyun opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
513*4882a593Smuzhiyun rc = opal_int_eoi(h_xirr);
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun /* If rc > 0, there is another interrupt pending */
516*4882a593Smuzhiyun *again = rc > 0;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * Need to ensure side effects of above stores
520*4882a593Smuzhiyun * complete before proceeding.
521*4882a593Smuzhiyun */
522*4882a593Smuzhiyun smp_mb();
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * We need to re-check host IPI now in case it got set in the
526*4882a593Smuzhiyun * meantime. If it's clear, we bounce the interrupt to the
527*4882a593Smuzhiyun * guest
528*4882a593Smuzhiyun */
529*4882a593Smuzhiyun host_ipi = local_paca->kvm_hstate.host_ipi;
530*4882a593Smuzhiyun if (unlikely(host_ipi != 0)) {
531*4882a593Smuzhiyun /* We raced with the host,
532*4882a593Smuzhiyun * we need to resend that IPI, bummer
533*4882a593Smuzhiyun */
534*4882a593Smuzhiyun if (kvmhv_on_pseries()) {
535*4882a593Smuzhiyun unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun plpar_hcall_raw(H_IPI, retbuf,
538*4882a593Smuzhiyun hard_smp_processor_id(),
539*4882a593Smuzhiyun IPI_PRIORITY);
540*4882a593Smuzhiyun } else if (xics_phys)
541*4882a593Smuzhiyun __raw_rm_writeb(IPI_PRIORITY,
542*4882a593Smuzhiyun xics_phys + XICS_MFRR);
543*4882a593Smuzhiyun else
544*4882a593Smuzhiyun opal_int_set_mfrr(hard_smp_processor_id(),
545*4882a593Smuzhiyun IPI_PRIORITY);
546*4882a593Smuzhiyun /* Let side effects complete */
547*4882a593Smuzhiyun smp_mb();
548*4882a593Smuzhiyun return 1;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* OK, it's an IPI for us */
552*4882a593Smuzhiyun local_paca->kvm_hstate.saved_xirr = 0;
553*4882a593Smuzhiyun return -1;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return kvmppc_check_passthru(xisr, xirr, again);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun #ifdef CONFIG_KVM_XICS
is_rm(void)560*4882a593Smuzhiyun static inline bool is_rm(void)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun return !(mfmsr() & MSR_DR);
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
kvmppc_rm_h_xirr(struct kvm_vcpu * vcpu)565*4882a593Smuzhiyun unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
568*4882a593Smuzhiyun return H_TOO_HARD;
569*4882a593Smuzhiyun if (xics_on_xive()) {
570*4882a593Smuzhiyun if (is_rm())
571*4882a593Smuzhiyun return xive_rm_h_xirr(vcpu);
572*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_xirr))
573*4882a593Smuzhiyun return H_NOT_AVAILABLE;
574*4882a593Smuzhiyun return __xive_vm_h_xirr(vcpu);
575*4882a593Smuzhiyun } else
576*4882a593Smuzhiyun return xics_rm_h_xirr(vcpu);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
kvmppc_rm_h_xirr_x(struct kvm_vcpu * vcpu)579*4882a593Smuzhiyun unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
582*4882a593Smuzhiyun return H_TOO_HARD;
583*4882a593Smuzhiyun vcpu->arch.regs.gpr[5] = get_tb();
584*4882a593Smuzhiyun if (xics_on_xive()) {
585*4882a593Smuzhiyun if (is_rm())
586*4882a593Smuzhiyun return xive_rm_h_xirr(vcpu);
587*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_xirr))
588*4882a593Smuzhiyun return H_NOT_AVAILABLE;
589*4882a593Smuzhiyun return __xive_vm_h_xirr(vcpu);
590*4882a593Smuzhiyun } else
591*4882a593Smuzhiyun return xics_rm_h_xirr(vcpu);
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
kvmppc_rm_h_ipoll(struct kvm_vcpu * vcpu,unsigned long server)594*4882a593Smuzhiyun unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
597*4882a593Smuzhiyun return H_TOO_HARD;
598*4882a593Smuzhiyun if (xics_on_xive()) {
599*4882a593Smuzhiyun if (is_rm())
600*4882a593Smuzhiyun return xive_rm_h_ipoll(vcpu, server);
601*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_ipoll))
602*4882a593Smuzhiyun return H_NOT_AVAILABLE;
603*4882a593Smuzhiyun return __xive_vm_h_ipoll(vcpu, server);
604*4882a593Smuzhiyun } else
605*4882a593Smuzhiyun return H_TOO_HARD;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
kvmppc_rm_h_ipi(struct kvm_vcpu * vcpu,unsigned long server,unsigned long mfrr)608*4882a593Smuzhiyun int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
609*4882a593Smuzhiyun unsigned long mfrr)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
612*4882a593Smuzhiyun return H_TOO_HARD;
613*4882a593Smuzhiyun if (xics_on_xive()) {
614*4882a593Smuzhiyun if (is_rm())
615*4882a593Smuzhiyun return xive_rm_h_ipi(vcpu, server, mfrr);
616*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_ipi))
617*4882a593Smuzhiyun return H_NOT_AVAILABLE;
618*4882a593Smuzhiyun return __xive_vm_h_ipi(vcpu, server, mfrr);
619*4882a593Smuzhiyun } else
620*4882a593Smuzhiyun return xics_rm_h_ipi(vcpu, server, mfrr);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
kvmppc_rm_h_cppr(struct kvm_vcpu * vcpu,unsigned long cppr)623*4882a593Smuzhiyun int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
626*4882a593Smuzhiyun return H_TOO_HARD;
627*4882a593Smuzhiyun if (xics_on_xive()) {
628*4882a593Smuzhiyun if (is_rm())
629*4882a593Smuzhiyun return xive_rm_h_cppr(vcpu, cppr);
630*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_cppr))
631*4882a593Smuzhiyun return H_NOT_AVAILABLE;
632*4882a593Smuzhiyun return __xive_vm_h_cppr(vcpu, cppr);
633*4882a593Smuzhiyun } else
634*4882a593Smuzhiyun return xics_rm_h_cppr(vcpu, cppr);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
kvmppc_rm_h_eoi(struct kvm_vcpu * vcpu,unsigned long xirr)637*4882a593Smuzhiyun int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
640*4882a593Smuzhiyun return H_TOO_HARD;
641*4882a593Smuzhiyun if (xics_on_xive()) {
642*4882a593Smuzhiyun if (is_rm())
643*4882a593Smuzhiyun return xive_rm_h_eoi(vcpu, xirr);
644*4882a593Smuzhiyun if (unlikely(!__xive_vm_h_eoi))
645*4882a593Smuzhiyun return H_NOT_AVAILABLE;
646*4882a593Smuzhiyun return __xive_vm_h_eoi(vcpu, xirr);
647*4882a593Smuzhiyun } else
648*4882a593Smuzhiyun return xics_rm_h_eoi(vcpu, xirr);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun #endif /* CONFIG_KVM_XICS */
651*4882a593Smuzhiyun
kvmppc_bad_interrupt(struct pt_regs * regs)652*4882a593Smuzhiyun void kvmppc_bad_interrupt(struct pt_regs *regs)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun /*
655*4882a593Smuzhiyun * 100 could happen at any time, 200 can happen due to invalid real
656*4882a593Smuzhiyun * address access for example (or any time due to a hardware problem).
657*4882a593Smuzhiyun */
658*4882a593Smuzhiyun if (TRAP(regs) == 0x100) {
659*4882a593Smuzhiyun get_paca()->in_nmi++;
660*4882a593Smuzhiyun system_reset_exception(regs);
661*4882a593Smuzhiyun get_paca()->in_nmi--;
662*4882a593Smuzhiyun } else if (TRAP(regs) == 0x200) {
663*4882a593Smuzhiyun machine_check_exception(regs);
664*4882a593Smuzhiyun } else {
665*4882a593Smuzhiyun die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun panic("Bad KVM trap");
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /*
671*4882a593Smuzhiyun * Functions used to switch LPCR HR and UPRT bits on all threads
672*4882a593Smuzhiyun * when entering and exiting HPT guests on a radix host.
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun #define PHASE_REALMODE 1 /* in real mode */
676*4882a593Smuzhiyun #define PHASE_SET_LPCR 2 /* have set LPCR */
677*4882a593Smuzhiyun #define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
678*4882a593Smuzhiyun #define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun #define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
681*4882a593Smuzhiyun
wait_for_sync(struct kvm_split_mode * sip,int phase)682*4882a593Smuzhiyun static void wait_for_sync(struct kvm_split_mode *sip, int phase)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun int thr = local_paca->kvm_hstate.tid;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun sip->lpcr_sync.phase[thr] |= phase;
687*4882a593Smuzhiyun phase = ALL(phase);
688*4882a593Smuzhiyun while ((sip->lpcr_sync.allphases & phase) != phase) {
689*4882a593Smuzhiyun HMT_low();
690*4882a593Smuzhiyun barrier();
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun HMT_medium();
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
kvmhv_p9_set_lpcr(struct kvm_split_mode * sip)695*4882a593Smuzhiyun void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun unsigned long rb, set;
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* wait for every other thread to get to real mode */
700*4882a593Smuzhiyun wait_for_sync(sip, PHASE_REALMODE);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Set LPCR and LPIDR */
703*4882a593Smuzhiyun mtspr(SPRN_LPCR, sip->lpcr_req);
704*4882a593Smuzhiyun mtspr(SPRN_LPID, sip->lpidr_req);
705*4882a593Smuzhiyun isync();
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Invalidate the TLB on thread 0 */
708*4882a593Smuzhiyun if (local_paca->kvm_hstate.tid == 0) {
709*4882a593Smuzhiyun sip->do_set = 0;
710*4882a593Smuzhiyun asm volatile("ptesync" : : : "memory");
711*4882a593Smuzhiyun for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
712*4882a593Smuzhiyun rb = TLBIEL_INVAL_SET_LPID +
713*4882a593Smuzhiyun (set << TLBIEL_INVAL_SET_SHIFT);
714*4882a593Smuzhiyun asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
715*4882a593Smuzhiyun "r" (rb), "r" (0));
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun asm volatile("ptesync" : : : "memory");
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun /* indicate that we have done so and wait for others */
721*4882a593Smuzhiyun wait_for_sync(sip, PHASE_SET_LPCR);
722*4882a593Smuzhiyun /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
723*4882a593Smuzhiyun smp_rmb();
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /*
727*4882a593Smuzhiyun * Called when a thread that has been in the guest needs
728*4882a593Smuzhiyun * to reload the host LPCR value - but only on POWER9 when
729*4882a593Smuzhiyun * running a HPT guest on a radix host.
730*4882a593Smuzhiyun */
kvmhv_p9_restore_lpcr(struct kvm_split_mode * sip)731*4882a593Smuzhiyun void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun /* we're out of the guest... */
734*4882a593Smuzhiyun wait_for_sync(sip, PHASE_OUT_OF_GUEST);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun mtspr(SPRN_LPID, 0);
737*4882a593Smuzhiyun mtspr(SPRN_LPCR, sip->host_lpcr);
738*4882a593Smuzhiyun isync();
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (local_paca->kvm_hstate.tid == 0) {
741*4882a593Smuzhiyun sip->do_restore = 0;
742*4882a593Smuzhiyun smp_wmb(); /* order store of do_restore vs. phase */
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun wait_for_sync(sip, PHASE_RESET_LPCR);
746*4882a593Smuzhiyun smp_mb();
747*4882a593Smuzhiyun local_paca->kvm_hstate.kvm_split_mode = NULL;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
kvmppc_end_cede(struct kvm_vcpu * vcpu)750*4882a593Smuzhiyun static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun vcpu->arch.ceded = 0;
753*4882a593Smuzhiyun if (vcpu->arch.timer_running) {
754*4882a593Smuzhiyun hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
755*4882a593Smuzhiyun vcpu->arch.timer_running = 0;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
kvmppc_set_msr_hv(struct kvm_vcpu * vcpu,u64 msr)759*4882a593Smuzhiyun void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * Check for illegal transactional state bit combination
763*4882a593Smuzhiyun * and if we find it, force the TS field to a safe state.
764*4882a593Smuzhiyun */
765*4882a593Smuzhiyun if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
766*4882a593Smuzhiyun msr &= ~MSR_TS_MASK;
767*4882a593Smuzhiyun vcpu->arch.shregs.msr = msr;
768*4882a593Smuzhiyun kvmppc_end_cede(vcpu);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_set_msr_hv);
771*4882a593Smuzhiyun
inject_interrupt(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)772*4882a593Smuzhiyun static void inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun unsigned long msr, pc, new_msr, new_pc;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun msr = kvmppc_get_msr(vcpu);
777*4882a593Smuzhiyun pc = kvmppc_get_pc(vcpu);
778*4882a593Smuzhiyun new_msr = vcpu->arch.intr_msr;
779*4882a593Smuzhiyun new_pc = vec;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun /* If transactional, change to suspend mode on IRQ delivery */
782*4882a593Smuzhiyun if (MSR_TM_TRANSACTIONAL(msr))
783*4882a593Smuzhiyun new_msr |= MSR_TS_S;
784*4882a593Smuzhiyun else
785*4882a593Smuzhiyun new_msr |= msr & MSR_TS_MASK;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /*
788*4882a593Smuzhiyun * Perform MSR and PC adjustment for LPCR[AIL]=3 if it is set and
789*4882a593Smuzhiyun * applicable. AIL=2 is not supported.
790*4882a593Smuzhiyun *
791*4882a593Smuzhiyun * AIL does not apply to SRESET, MCE, or HMI (which is never
792*4882a593Smuzhiyun * delivered to the guest), and does not apply if IR=0 or DR=0.
793*4882a593Smuzhiyun */
794*4882a593Smuzhiyun if (vec != BOOK3S_INTERRUPT_SYSTEM_RESET &&
795*4882a593Smuzhiyun vec != BOOK3S_INTERRUPT_MACHINE_CHECK &&
796*4882a593Smuzhiyun (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 &&
797*4882a593Smuzhiyun (msr & (MSR_IR|MSR_DR)) == (MSR_IR|MSR_DR) ) {
798*4882a593Smuzhiyun new_msr |= MSR_IR | MSR_DR;
799*4882a593Smuzhiyun new_pc += 0xC000000000004000ULL;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun kvmppc_set_srr0(vcpu, pc);
803*4882a593Smuzhiyun kvmppc_set_srr1(vcpu, (msr & SRR1_MSR_BITS) | srr1_flags);
804*4882a593Smuzhiyun kvmppc_set_pc(vcpu, new_pc);
805*4882a593Smuzhiyun vcpu->arch.shregs.msr = new_msr;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
kvmppc_inject_interrupt_hv(struct kvm_vcpu * vcpu,int vec,u64 srr1_flags)808*4882a593Smuzhiyun void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun inject_interrupt(vcpu, vec, srr1_flags);
811*4882a593Smuzhiyun kvmppc_end_cede(vcpu);
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_inject_interrupt_hv);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /*
816*4882a593Smuzhiyun * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
817*4882a593Smuzhiyun * Can we inject a Decrementer or a External interrupt?
818*4882a593Smuzhiyun */
kvmppc_guest_entry_inject_int(struct kvm_vcpu * vcpu)819*4882a593Smuzhiyun void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun int ext;
822*4882a593Smuzhiyun unsigned long lpcr;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /* Insert EXTERNAL bit into LPCR at the MER bit position */
825*4882a593Smuzhiyun ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1;
826*4882a593Smuzhiyun lpcr = mfspr(SPRN_LPCR);
827*4882a593Smuzhiyun lpcr |= ext << LPCR_MER_SH;
828*4882a593Smuzhiyun mtspr(SPRN_LPCR, lpcr);
829*4882a593Smuzhiyun isync();
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (vcpu->arch.shregs.msr & MSR_EE) {
832*4882a593Smuzhiyun if (ext) {
833*4882a593Smuzhiyun inject_interrupt(vcpu, BOOK3S_INTERRUPT_EXTERNAL, 0);
834*4882a593Smuzhiyun } else {
835*4882a593Smuzhiyun long int dec = mfspr(SPRN_DEC);
836*4882a593Smuzhiyun if (!(lpcr & LPCR_LD))
837*4882a593Smuzhiyun dec = (int) dec;
838*4882a593Smuzhiyun if (dec < 0)
839*4882a593Smuzhiyun inject_interrupt(vcpu,
840*4882a593Smuzhiyun BOOK3S_INTERRUPT_DECREMENTER, 0);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (vcpu->arch.doorbell_request) {
845*4882a593Smuzhiyun mtspr(SPRN_DPDES, 1);
846*4882a593Smuzhiyun vcpu->arch.vcore->dpdes = 1;
847*4882a593Smuzhiyun smp_wmb();
848*4882a593Smuzhiyun vcpu->arch.doorbell_request = 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
flush_guest_tlb(struct kvm * kvm)852*4882a593Smuzhiyun static void flush_guest_tlb(struct kvm *kvm)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun unsigned long rb, set;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun rb = PPC_BIT(52); /* IS = 2 */
857*4882a593Smuzhiyun if (kvm_is_radix(kvm)) {
858*4882a593Smuzhiyun /* R=1 PRS=1 RIC=2 */
859*4882a593Smuzhiyun asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
860*4882a593Smuzhiyun : : "r" (rb), "i" (1), "i" (1), "i" (2),
861*4882a593Smuzhiyun "r" (0) : "memory");
862*4882a593Smuzhiyun for (set = 1; set < kvm->arch.tlb_sets; ++set) {
863*4882a593Smuzhiyun rb += PPC_BIT(51); /* increment set number */
864*4882a593Smuzhiyun /* R=1 PRS=1 RIC=0 */
865*4882a593Smuzhiyun asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
866*4882a593Smuzhiyun : : "r" (rb), "i" (1), "i" (1), "i" (0),
867*4882a593Smuzhiyun "r" (0) : "memory");
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun asm volatile("ptesync": : :"memory");
870*4882a593Smuzhiyun // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
871*4882a593Smuzhiyun asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
872*4882a593Smuzhiyun } else {
873*4882a593Smuzhiyun for (set = 0; set < kvm->arch.tlb_sets; ++set) {
874*4882a593Smuzhiyun /* R=0 PRS=0 RIC=0 */
875*4882a593Smuzhiyun asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
876*4882a593Smuzhiyun : : "r" (rb), "i" (0), "i" (0), "i" (0),
877*4882a593Smuzhiyun "r" (0) : "memory");
878*4882a593Smuzhiyun rb += PPC_BIT(51); /* increment set number */
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun asm volatile("ptesync": : :"memory");
881*4882a593Smuzhiyun // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
882*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
883*4882a593Smuzhiyun asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
kvmppc_check_need_tlb_flush(struct kvm * kvm,int pcpu,struct kvm_nested_guest * nested)887*4882a593Smuzhiyun void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu,
888*4882a593Smuzhiyun struct kvm_nested_guest *nested)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun cpumask_t *need_tlb_flush;
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /*
893*4882a593Smuzhiyun * On POWER9, individual threads can come in here, but the
894*4882a593Smuzhiyun * TLB is shared between the 4 threads in a core, hence
895*4882a593Smuzhiyun * invalidating on one thread invalidates for all.
896*4882a593Smuzhiyun * Thus we make all 4 threads use the same bit.
897*4882a593Smuzhiyun */
898*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
899*4882a593Smuzhiyun pcpu = cpu_first_tlb_thread_sibling(pcpu);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun if (nested)
902*4882a593Smuzhiyun need_tlb_flush = &nested->need_tlb_flush;
903*4882a593Smuzhiyun else
904*4882a593Smuzhiyun need_tlb_flush = &kvm->arch.need_tlb_flush;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (cpumask_test_cpu(pcpu, need_tlb_flush)) {
907*4882a593Smuzhiyun flush_guest_tlb(kvm);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun /* Clear the bit after the TLB flush */
910*4882a593Smuzhiyun cpumask_clear_cpu(pcpu, need_tlb_flush);
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_check_need_tlb_flush);
914