1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2017 Benjamin Herrenschmidt, IBM Corporation.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #define pr_fmt(fmt) "xive-kvm: " fmt
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/kvm_host.h>
10*4882a593Smuzhiyun #include <linux/err.h>
11*4882a593Smuzhiyun #include <linux/gfp.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/percpu.h>
15*4882a593Smuzhiyun #include <linux/cpumask.h>
16*4882a593Smuzhiyun #include <linux/uaccess.h>
17*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
18*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
19*4882a593Smuzhiyun #include <asm/hvcall.h>
20*4882a593Smuzhiyun #include <asm/xics.h>
21*4882a593Smuzhiyun #include <asm/xive.h>
22*4882a593Smuzhiyun #include <asm/xive-regs.h>
23*4882a593Smuzhiyun #include <asm/debug.h>
24*4882a593Smuzhiyun #include <asm/debugfs.h>
25*4882a593Smuzhiyun #include <asm/time.h>
26*4882a593Smuzhiyun #include <asm/opal.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <linux/debugfs.h>
29*4882a593Smuzhiyun #include <linux/seq_file.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "book3s_xive.h"
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Virtual mode variants of the hcalls for use on radix/radix
36*4882a593Smuzhiyun * with AIL. They require the VCPU's VP to be "pushed"
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * We still instantiate them here because we use some of the
39*4882a593Smuzhiyun * generated utility functions as well in this file.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun #define XIVE_RUNTIME_CHECKS
42*4882a593Smuzhiyun #define X_PFX xive_vm_
43*4882a593Smuzhiyun #define X_STATIC static
44*4882a593Smuzhiyun #define X_STAT_PFX stat_vm_
45*4882a593Smuzhiyun #define __x_tima xive_tima
46*4882a593Smuzhiyun #define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_mmio))
47*4882a593Smuzhiyun #define __x_trig_page(xd) ((void __iomem *)((xd)->trig_mmio))
48*4882a593Smuzhiyun #define __x_writeb __raw_writeb
49*4882a593Smuzhiyun #define __x_readw __raw_readw
50*4882a593Smuzhiyun #define __x_readq __raw_readq
51*4882a593Smuzhiyun #define __x_writeq __raw_writeq
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #include "book3s_xive_template.c"
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * We leave a gap of a couple of interrupts in the queue to
57*4882a593Smuzhiyun * account for the IPI and additional safety guard.
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #define XIVE_Q_GAP 2
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * Push a vcpu's context to the XIVE on guest entry.
63*4882a593Smuzhiyun * This assumes we are in virtual mode (MMU on)
64*4882a593Smuzhiyun */
kvmppc_xive_push_vcpu(struct kvm_vcpu * vcpu)65*4882a593Smuzhiyun void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun void __iomem *tima = local_paca->kvm_hstate.xive_tima_virt;
68*4882a593Smuzhiyun u64 pq;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * Nothing to do if the platform doesn't have a XIVE
72*4882a593Smuzhiyun * or this vCPU doesn't have its own XIVE context
73*4882a593Smuzhiyun * (e.g. because it's not using an in-kernel interrupt controller).
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun if (!tima || !vcpu->arch.xive_cam_word)
76*4882a593Smuzhiyun return;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun eieio();
79*4882a593Smuzhiyun __raw_writeq(vcpu->arch.xive_saved_state.w01, tima + TM_QW1_OS);
80*4882a593Smuzhiyun __raw_writel(vcpu->arch.xive_cam_word, tima + TM_QW1_OS + TM_WORD2);
81*4882a593Smuzhiyun vcpu->arch.xive_pushed = 1;
82*4882a593Smuzhiyun eieio();
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * We clear the irq_pending flag. There is a small chance of a
86*4882a593Smuzhiyun * race vs. the escalation interrupt happening on another
87*4882a593Smuzhiyun * processor setting it again, but the only consequence is to
88*4882a593Smuzhiyun * cause a spurious wakeup on the next H_CEDE, which is not an
89*4882a593Smuzhiyun * issue.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun vcpu->arch.irq_pending = 0;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun * In single escalation mode, if the escalation interrupt is
95*4882a593Smuzhiyun * on, we mask it.
96*4882a593Smuzhiyun */
97*4882a593Smuzhiyun if (vcpu->arch.xive_esc_on) {
98*4882a593Smuzhiyun pq = __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
99*4882a593Smuzhiyun XIVE_ESB_SET_PQ_01));
100*4882a593Smuzhiyun mb();
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun * We have a possible subtle race here: The escalation
104*4882a593Smuzhiyun * interrupt might have fired and be on its way to the
105*4882a593Smuzhiyun * host queue while we mask it, and if we unmask it
106*4882a593Smuzhiyun * early enough (re-cede right away), there is a
107*4882a593Smuzhiyun * theorical possibility that it fires again, thus
108*4882a593Smuzhiyun * landing in the target queue more than once which is
109*4882a593Smuzhiyun * a big no-no.
110*4882a593Smuzhiyun *
111*4882a593Smuzhiyun * Fortunately, solving this is rather easy. If the
112*4882a593Smuzhiyun * above load setting PQ to 01 returns a previous
113*4882a593Smuzhiyun * value where P is set, then we know the escalation
114*4882a593Smuzhiyun * interrupt is somewhere on its way to the host. In
115*4882a593Smuzhiyun * that case we simply don't clear the xive_esc_on
116*4882a593Smuzhiyun * flag below. It will be eventually cleared by the
117*4882a593Smuzhiyun * handler for the escalation interrupt.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun * Then, when doing a cede, we check that flag again
120*4882a593Smuzhiyun * before re-enabling the escalation interrupt, and if
121*4882a593Smuzhiyun * set, we abort the cede.
122*4882a593Smuzhiyun */
123*4882a593Smuzhiyun if (!(pq & XIVE_ESB_VAL_P))
124*4882a593Smuzhiyun /* Now P is 0, we can clear the flag */
125*4882a593Smuzhiyun vcpu->arch.xive_esc_on = 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xive_push_vcpu);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * This is a simple trigger for a generic XIVE IRQ. This must
132*4882a593Smuzhiyun * only be called for interrupts that support a trigger page
133*4882a593Smuzhiyun */
xive_irq_trigger(struct xive_irq_data * xd)134*4882a593Smuzhiyun static bool xive_irq_trigger(struct xive_irq_data *xd)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun /* This should be only for MSIs */
137*4882a593Smuzhiyun if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
138*4882a593Smuzhiyun return false;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /* Those interrupts should always have a trigger page */
141*4882a593Smuzhiyun if (WARN_ON(!xd->trig_mmio))
142*4882a593Smuzhiyun return false;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun out_be64(xd->trig_mmio, 0);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return true;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
xive_esc_irq(int irq,void * data)149*4882a593Smuzhiyun static irqreturn_t xive_esc_irq(int irq, void *data)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun struct kvm_vcpu *vcpu = data;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun vcpu->arch.irq_pending = 1;
154*4882a593Smuzhiyun smp_mb();
155*4882a593Smuzhiyun if (vcpu->arch.ceded)
156*4882a593Smuzhiyun kvmppc_fast_vcpu_kick(vcpu);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Since we have the no-EOI flag, the interrupt is effectively
159*4882a593Smuzhiyun * disabled now. Clearing xive_esc_on means we won't bother
160*4882a593Smuzhiyun * doing so on the next entry.
161*4882a593Smuzhiyun *
162*4882a593Smuzhiyun * This also allows the entry code to know that if a PQ combination
163*4882a593Smuzhiyun * of 10 is observed while xive_esc_on is true, it means the queue
164*4882a593Smuzhiyun * contains an unprocessed escalation interrupt. We don't make use of
165*4882a593Smuzhiyun * that knowledge today but might (see comment in book3s_hv_rmhandler.S)
166*4882a593Smuzhiyun */
167*4882a593Smuzhiyun vcpu->arch.xive_esc_on = false;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* This orders xive_esc_on = false vs. subsequent stale_p = true */
170*4882a593Smuzhiyun smp_wmb(); /* goes with smp_mb() in cleanup_single_escalation */
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return IRQ_HANDLED;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
kvmppc_xive_attach_escalation(struct kvm_vcpu * vcpu,u8 prio,bool single_escalation)175*4882a593Smuzhiyun int kvmppc_xive_attach_escalation(struct kvm_vcpu *vcpu, u8 prio,
176*4882a593Smuzhiyun bool single_escalation)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
179*4882a593Smuzhiyun struct xive_q *q = &xc->queues[prio];
180*4882a593Smuzhiyun char *name = NULL;
181*4882a593Smuzhiyun int rc;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Already there ? */
184*4882a593Smuzhiyun if (xc->esc_virq[prio])
185*4882a593Smuzhiyun return 0;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* Hook up the escalation interrupt */
188*4882a593Smuzhiyun xc->esc_virq[prio] = irq_create_mapping(NULL, q->esc_irq);
189*4882a593Smuzhiyun if (!xc->esc_virq[prio]) {
190*4882a593Smuzhiyun pr_err("Failed to map escalation interrupt for queue %d of VCPU %d\n",
191*4882a593Smuzhiyun prio, xc->server_num);
192*4882a593Smuzhiyun return -EIO;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (single_escalation)
196*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "kvm-%d-%d",
197*4882a593Smuzhiyun vcpu->kvm->arch.lpid, xc->server_num);
198*4882a593Smuzhiyun else
199*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "kvm-%d-%d-%d",
200*4882a593Smuzhiyun vcpu->kvm->arch.lpid, xc->server_num, prio);
201*4882a593Smuzhiyun if (!name) {
202*4882a593Smuzhiyun pr_err("Failed to allocate escalation irq name for queue %d of VCPU %d\n",
203*4882a593Smuzhiyun prio, xc->server_num);
204*4882a593Smuzhiyun rc = -ENOMEM;
205*4882a593Smuzhiyun goto error;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pr_devel("Escalation %s irq %d (prio %d)\n", name, xc->esc_virq[prio], prio);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun rc = request_irq(xc->esc_virq[prio], xive_esc_irq,
211*4882a593Smuzhiyun IRQF_NO_THREAD, name, vcpu);
212*4882a593Smuzhiyun if (rc) {
213*4882a593Smuzhiyun pr_err("Failed to request escalation interrupt for queue %d of VCPU %d\n",
214*4882a593Smuzhiyun prio, xc->server_num);
215*4882a593Smuzhiyun goto error;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun xc->esc_virq_names[prio] = name;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* In single escalation mode, we grab the ESB MMIO of the
220*4882a593Smuzhiyun * interrupt and mask it. Also populate the VCPU v/raddr
221*4882a593Smuzhiyun * of the ESB page for use by asm entry/exit code. Finally
222*4882a593Smuzhiyun * set the XIVE_IRQ_NO_EOI flag which will prevent the
223*4882a593Smuzhiyun * core code from performing an EOI on the escalation
224*4882a593Smuzhiyun * interrupt, thus leaving it effectively masked after
225*4882a593Smuzhiyun * it fires once.
226*4882a593Smuzhiyun */
227*4882a593Smuzhiyun if (single_escalation) {
228*4882a593Smuzhiyun struct irq_data *d = irq_get_irq_data(xc->esc_virq[prio]);
229*4882a593Smuzhiyun struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
232*4882a593Smuzhiyun vcpu->arch.xive_esc_raddr = xd->eoi_page;
233*4882a593Smuzhiyun vcpu->arch.xive_esc_vaddr = (__force u64)xd->eoi_mmio;
234*4882a593Smuzhiyun xd->flags |= XIVE_IRQ_NO_EOI;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun error:
239*4882a593Smuzhiyun irq_dispose_mapping(xc->esc_virq[prio]);
240*4882a593Smuzhiyun xc->esc_virq[prio] = 0;
241*4882a593Smuzhiyun kfree(name);
242*4882a593Smuzhiyun return rc;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
xive_provision_queue(struct kvm_vcpu * vcpu,u8 prio)245*4882a593Smuzhiyun static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
248*4882a593Smuzhiyun struct kvmppc_xive *xive = xc->xive;
249*4882a593Smuzhiyun struct xive_q *q = &xc->queues[prio];
250*4882a593Smuzhiyun void *qpage;
251*4882a593Smuzhiyun int rc;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (WARN_ON(q->qpage))
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* Allocate the queue and retrieve infos on current node for now */
257*4882a593Smuzhiyun qpage = (__be32 *)__get_free_pages(GFP_KERNEL, xive->q_page_order);
258*4882a593Smuzhiyun if (!qpage) {
259*4882a593Smuzhiyun pr_err("Failed to allocate queue %d for VCPU %d\n",
260*4882a593Smuzhiyun prio, xc->server_num);
261*4882a593Smuzhiyun return -ENOMEM;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun memset(qpage, 0, 1 << xive->q_order);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /*
266*4882a593Smuzhiyun * Reconfigure the queue. This will set q->qpage only once the
267*4882a593Smuzhiyun * queue is fully configured. This is a requirement for prio 0
268*4882a593Smuzhiyun * as we will stop doing EOIs for every IPI as soon as we observe
269*4882a593Smuzhiyun * qpage being non-NULL, and instead will only EOI when we receive
270*4882a593Smuzhiyun * corresponding queue 0 entries
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun rc = xive_native_configure_queue(xc->vp_id, q, prio, qpage,
273*4882a593Smuzhiyun xive->q_order, true);
274*4882a593Smuzhiyun if (rc)
275*4882a593Smuzhiyun pr_err("Failed to configure queue %d for VCPU %d\n",
276*4882a593Smuzhiyun prio, xc->server_num);
277*4882a593Smuzhiyun return rc;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* Called with xive->lock held */
xive_check_provisioning(struct kvm * kvm,u8 prio)281*4882a593Smuzhiyun static int xive_check_provisioning(struct kvm *kvm, u8 prio)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
284*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
285*4882a593Smuzhiyun int i, rc;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun lockdep_assert_held(&xive->lock);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* Already provisioned ? */
290*4882a593Smuzhiyun if (xive->qmap & (1 << prio))
291*4882a593Smuzhiyun return 0;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun pr_devel("Provisioning prio... %d\n", prio);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun /* Provision each VCPU and enable escalations if needed */
296*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
297*4882a593Smuzhiyun if (!vcpu->arch.xive_vcpu)
298*4882a593Smuzhiyun continue;
299*4882a593Smuzhiyun rc = xive_provision_queue(vcpu, prio);
300*4882a593Smuzhiyun if (rc == 0 && !xive->single_escalation)
301*4882a593Smuzhiyun kvmppc_xive_attach_escalation(vcpu, prio,
302*4882a593Smuzhiyun xive->single_escalation);
303*4882a593Smuzhiyun if (rc)
304*4882a593Smuzhiyun return rc;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Order previous stores and mark it as provisioned */
308*4882a593Smuzhiyun mb();
309*4882a593Smuzhiyun xive->qmap |= (1 << prio);
310*4882a593Smuzhiyun return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
xive_inc_q_pending(struct kvm * kvm,u32 server,u8 prio)313*4882a593Smuzhiyun static void xive_inc_q_pending(struct kvm *kvm, u32 server, u8 prio)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
316*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc;
317*4882a593Smuzhiyun struct xive_q *q;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun /* Locate target server */
320*4882a593Smuzhiyun vcpu = kvmppc_xive_find_server(kvm, server);
321*4882a593Smuzhiyun if (!vcpu) {
322*4882a593Smuzhiyun pr_warn("%s: Can't find server %d\n", __func__, server);
323*4882a593Smuzhiyun return;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun xc = vcpu->arch.xive_vcpu;
326*4882a593Smuzhiyun if (WARN_ON(!xc))
327*4882a593Smuzhiyun return;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun q = &xc->queues[prio];
330*4882a593Smuzhiyun atomic_inc(&q->pending_count);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
xive_try_pick_queue(struct kvm_vcpu * vcpu,u8 prio)333*4882a593Smuzhiyun static int xive_try_pick_queue(struct kvm_vcpu *vcpu, u8 prio)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
336*4882a593Smuzhiyun struct xive_q *q;
337*4882a593Smuzhiyun u32 max;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (WARN_ON(!xc))
340*4882a593Smuzhiyun return -ENXIO;
341*4882a593Smuzhiyun if (!xc->valid)
342*4882a593Smuzhiyun return -ENXIO;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun q = &xc->queues[prio];
345*4882a593Smuzhiyun if (WARN_ON(!q->qpage))
346*4882a593Smuzhiyun return -ENXIO;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Calculate max number of interrupts in that queue. */
349*4882a593Smuzhiyun max = (q->msk + 1) - XIVE_Q_GAP;
350*4882a593Smuzhiyun return atomic_add_unless(&q->count, 1, max) ? 0 : -EBUSY;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
kvmppc_xive_select_target(struct kvm * kvm,u32 * server,u8 prio)353*4882a593Smuzhiyun int kvmppc_xive_select_target(struct kvm *kvm, u32 *server, u8 prio)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
356*4882a593Smuzhiyun int i, rc;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* Locate target server */
359*4882a593Smuzhiyun vcpu = kvmppc_xive_find_server(kvm, *server);
360*4882a593Smuzhiyun if (!vcpu) {
361*4882a593Smuzhiyun pr_devel("Can't find server %d\n", *server);
362*4882a593Smuzhiyun return -EINVAL;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun pr_devel("Finding irq target on 0x%x/%d...\n", *server, prio);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Try pick it */
368*4882a593Smuzhiyun rc = xive_try_pick_queue(vcpu, prio);
369*4882a593Smuzhiyun if (rc == 0)
370*4882a593Smuzhiyun return rc;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun pr_devel(" .. failed, looking up candidate...\n");
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun /* Failed, pick another VCPU */
375*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
376*4882a593Smuzhiyun if (!vcpu->arch.xive_vcpu)
377*4882a593Smuzhiyun continue;
378*4882a593Smuzhiyun rc = xive_try_pick_queue(vcpu, prio);
379*4882a593Smuzhiyun if (rc == 0) {
380*4882a593Smuzhiyun *server = vcpu->arch.xive_vcpu->server_num;
381*4882a593Smuzhiyun pr_devel(" found on 0x%x/%d\n", *server, prio);
382*4882a593Smuzhiyun return rc;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun pr_devel(" no available target !\n");
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* No available target ! */
388*4882a593Smuzhiyun return -EBUSY;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
xive_lock_and_mask(struct kvmppc_xive * xive,struct kvmppc_xive_src_block * sb,struct kvmppc_xive_irq_state * state)391*4882a593Smuzhiyun static u8 xive_lock_and_mask(struct kvmppc_xive *xive,
392*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb,
393*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun struct xive_irq_data *xd;
396*4882a593Smuzhiyun u32 hw_num;
397*4882a593Smuzhiyun u8 old_prio;
398*4882a593Smuzhiyun u64 val;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /*
401*4882a593Smuzhiyun * Take the lock, set masked, try again if racing
402*4882a593Smuzhiyun * with H_EOI
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun for (;;) {
405*4882a593Smuzhiyun arch_spin_lock(&sb->lock);
406*4882a593Smuzhiyun old_prio = state->guest_priority;
407*4882a593Smuzhiyun state->guest_priority = MASKED;
408*4882a593Smuzhiyun mb();
409*4882a593Smuzhiyun if (!state->in_eoi)
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun state->guest_priority = old_prio;
412*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* No change ? Bail */
416*4882a593Smuzhiyun if (old_prio == MASKED)
417*4882a593Smuzhiyun return old_prio;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Get the right irq */
420*4882a593Smuzhiyun kvmppc_xive_select_irq(state, &hw_num, &xd);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun * If the interrupt is marked as needing masking via
424*4882a593Smuzhiyun * firmware, we do it here. Firmware masking however
425*4882a593Smuzhiyun * is "lossy", it won't return the old p and q bits
426*4882a593Smuzhiyun * and won't set the interrupt to a state where it will
427*4882a593Smuzhiyun * record queued ones. If this is an issue we should do
428*4882a593Smuzhiyun * lazy masking instead.
429*4882a593Smuzhiyun *
430*4882a593Smuzhiyun * For now, we work around this in unmask by forcing
431*4882a593Smuzhiyun * an interrupt whenever we unmask a non-LSI via FW
432*4882a593Smuzhiyun * (if ever).
433*4882a593Smuzhiyun */
434*4882a593Smuzhiyun if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
435*4882a593Smuzhiyun xive_native_configure_irq(hw_num,
436*4882a593Smuzhiyun kvmppc_xive_vp(xive, state->act_server),
437*4882a593Smuzhiyun MASKED, state->number);
438*4882a593Smuzhiyun /* set old_p so we can track if an H_EOI was done */
439*4882a593Smuzhiyun state->old_p = true;
440*4882a593Smuzhiyun state->old_q = false;
441*4882a593Smuzhiyun } else {
442*4882a593Smuzhiyun /* Set PQ to 10, return old P and old Q and remember them */
443*4882a593Smuzhiyun val = xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_10);
444*4882a593Smuzhiyun state->old_p = !!(val & 2);
445*4882a593Smuzhiyun state->old_q = !!(val & 1);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun * Synchronize hardware to sensure the queues are updated
449*4882a593Smuzhiyun * when masking
450*4882a593Smuzhiyun */
451*4882a593Smuzhiyun xive_native_sync_source(hw_num);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun return old_prio;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
xive_lock_for_unmask(struct kvmppc_xive_src_block * sb,struct kvmppc_xive_irq_state * state)457*4882a593Smuzhiyun static void xive_lock_for_unmask(struct kvmppc_xive_src_block *sb,
458*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun /*
461*4882a593Smuzhiyun * Take the lock try again if racing with H_EOI
462*4882a593Smuzhiyun */
463*4882a593Smuzhiyun for (;;) {
464*4882a593Smuzhiyun arch_spin_lock(&sb->lock);
465*4882a593Smuzhiyun if (!state->in_eoi)
466*4882a593Smuzhiyun break;
467*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
xive_finish_unmask(struct kvmppc_xive * xive,struct kvmppc_xive_src_block * sb,struct kvmppc_xive_irq_state * state,u8 prio)471*4882a593Smuzhiyun static void xive_finish_unmask(struct kvmppc_xive *xive,
472*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb,
473*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state,
474*4882a593Smuzhiyun u8 prio)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun struct xive_irq_data *xd;
477*4882a593Smuzhiyun u32 hw_num;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* If we aren't changing a thing, move on */
480*4882a593Smuzhiyun if (state->guest_priority != MASKED)
481*4882a593Smuzhiyun goto bail;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Get the right irq */
484*4882a593Smuzhiyun kvmppc_xive_select_irq(state, &hw_num, &xd);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * See comment in xive_lock_and_mask() concerning masking
488*4882a593Smuzhiyun * via firmware.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun if (xd->flags & OPAL_XIVE_IRQ_MASK_VIA_FW) {
491*4882a593Smuzhiyun xive_native_configure_irq(hw_num,
492*4882a593Smuzhiyun kvmppc_xive_vp(xive, state->act_server),
493*4882a593Smuzhiyun state->act_priority, state->number);
494*4882a593Smuzhiyun /* If an EOI is needed, do it here */
495*4882a593Smuzhiyun if (!state->old_p)
496*4882a593Smuzhiyun xive_vm_source_eoi(hw_num, xd);
497*4882a593Smuzhiyun /* If this is not an LSI, force a trigger */
498*4882a593Smuzhiyun if (!(xd->flags & OPAL_XIVE_IRQ_LSI))
499*4882a593Smuzhiyun xive_irq_trigger(xd);
500*4882a593Smuzhiyun goto bail;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Old Q set, set PQ to 11 */
504*4882a593Smuzhiyun if (state->old_q)
505*4882a593Smuzhiyun xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_11);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /*
508*4882a593Smuzhiyun * If not old P, then perform an "effective" EOI,
509*4882a593Smuzhiyun * on the source. This will handle the cases where
510*4882a593Smuzhiyun * FW EOI is needed.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun if (!state->old_p)
513*4882a593Smuzhiyun xive_vm_source_eoi(hw_num, xd);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /* Synchronize ordering and mark unmasked */
516*4882a593Smuzhiyun mb();
517*4882a593Smuzhiyun bail:
518*4882a593Smuzhiyun state->guest_priority = prio;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /*
522*4882a593Smuzhiyun * Target an interrupt to a given server/prio, this will fallback
523*4882a593Smuzhiyun * to another server if necessary and perform the HW targetting
524*4882a593Smuzhiyun * updates as needed
525*4882a593Smuzhiyun *
526*4882a593Smuzhiyun * NOTE: Must be called with the state lock held
527*4882a593Smuzhiyun */
xive_target_interrupt(struct kvm * kvm,struct kvmppc_xive_irq_state * state,u32 server,u8 prio)528*4882a593Smuzhiyun static int xive_target_interrupt(struct kvm *kvm,
529*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state,
530*4882a593Smuzhiyun u32 server, u8 prio)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
533*4882a593Smuzhiyun u32 hw_num;
534*4882a593Smuzhiyun int rc;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /*
537*4882a593Smuzhiyun * This will return a tentative server and actual
538*4882a593Smuzhiyun * priority. The count for that new target will have
539*4882a593Smuzhiyun * already been incremented.
540*4882a593Smuzhiyun */
541*4882a593Smuzhiyun rc = kvmppc_xive_select_target(kvm, &server, prio);
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun /*
544*4882a593Smuzhiyun * We failed to find a target ? Not much we can do
545*4882a593Smuzhiyun * at least until we support the GIQ.
546*4882a593Smuzhiyun */
547*4882a593Smuzhiyun if (rc)
548*4882a593Smuzhiyun return rc;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /*
551*4882a593Smuzhiyun * Increment the old queue pending count if there
552*4882a593Smuzhiyun * was one so that the old queue count gets adjusted later
553*4882a593Smuzhiyun * when observed to be empty.
554*4882a593Smuzhiyun */
555*4882a593Smuzhiyun if (state->act_priority != MASKED)
556*4882a593Smuzhiyun xive_inc_q_pending(kvm,
557*4882a593Smuzhiyun state->act_server,
558*4882a593Smuzhiyun state->act_priority);
559*4882a593Smuzhiyun /*
560*4882a593Smuzhiyun * Update state and HW
561*4882a593Smuzhiyun */
562*4882a593Smuzhiyun state->act_priority = prio;
563*4882a593Smuzhiyun state->act_server = server;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* Get the right irq */
566*4882a593Smuzhiyun kvmppc_xive_select_irq(state, &hw_num, NULL);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return xive_native_configure_irq(hw_num,
569*4882a593Smuzhiyun kvmppc_xive_vp(xive, server),
570*4882a593Smuzhiyun prio, state->number);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun /*
574*4882a593Smuzhiyun * Targetting rules: In order to avoid losing track of
575*4882a593Smuzhiyun * pending interrupts accross mask and unmask, which would
576*4882a593Smuzhiyun * allow queue overflows, we implement the following rules:
577*4882a593Smuzhiyun *
578*4882a593Smuzhiyun * - Unless it was never enabled (or we run out of capacity)
579*4882a593Smuzhiyun * an interrupt is always targetted at a valid server/queue
580*4882a593Smuzhiyun * pair even when "masked" by the guest. This pair tends to
581*4882a593Smuzhiyun * be the last one used but it can be changed under some
582*4882a593Smuzhiyun * circumstances. That allows us to separate targetting
583*4882a593Smuzhiyun * from masking, we only handle accounting during (re)targetting,
584*4882a593Smuzhiyun * this also allows us to let an interrupt drain into its target
585*4882a593Smuzhiyun * queue after masking, avoiding complex schemes to remove
586*4882a593Smuzhiyun * interrupts out of remote processor queues.
587*4882a593Smuzhiyun *
588*4882a593Smuzhiyun * - When masking, we set PQ to 10 and save the previous value
589*4882a593Smuzhiyun * of P and Q.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * - When unmasking, if saved Q was set, we set PQ to 11
592*4882a593Smuzhiyun * otherwise we leave PQ to the HW state which will be either
593*4882a593Smuzhiyun * 10 if nothing happened or 11 if the interrupt fired while
594*4882a593Smuzhiyun * masked. Effectively we are OR'ing the previous Q into the
595*4882a593Smuzhiyun * HW Q.
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * Then if saved P is clear, we do an effective EOI (Q->P->Trigger)
598*4882a593Smuzhiyun * which will unmask the interrupt and shoot a new one if Q was
599*4882a593Smuzhiyun * set.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * Otherwise (saved P is set) we leave PQ unchanged (so 10 or 11,
602*4882a593Smuzhiyun * effectively meaning an H_EOI from the guest is still expected
603*4882a593Smuzhiyun * for that interrupt).
604*4882a593Smuzhiyun *
605*4882a593Smuzhiyun * - If H_EOI occurs while masked, we clear the saved P.
606*4882a593Smuzhiyun *
607*4882a593Smuzhiyun * - When changing target, we account on the new target and
608*4882a593Smuzhiyun * increment a separate "pending" counter on the old one.
609*4882a593Smuzhiyun * This pending counter will be used to decrement the old
610*4882a593Smuzhiyun * target's count when its queue has been observed empty.
611*4882a593Smuzhiyun */
612*4882a593Smuzhiyun
kvmppc_xive_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)613*4882a593Smuzhiyun int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
614*4882a593Smuzhiyun u32 priority)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
617*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
618*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
619*4882a593Smuzhiyun u8 new_act_prio;
620*4882a593Smuzhiyun int rc = 0;
621*4882a593Smuzhiyun u16 idx;
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (!xive)
624*4882a593Smuzhiyun return -ENODEV;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun pr_devel("set_xive ! irq 0x%x server 0x%x prio %d\n",
627*4882a593Smuzhiyun irq, server, priority);
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /* First, check provisioning of queues */
630*4882a593Smuzhiyun if (priority != MASKED) {
631*4882a593Smuzhiyun mutex_lock(&xive->lock);
632*4882a593Smuzhiyun rc = xive_check_provisioning(xive->kvm,
633*4882a593Smuzhiyun xive_prio_from_guest(priority));
634*4882a593Smuzhiyun mutex_unlock(&xive->lock);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun if (rc) {
637*4882a593Smuzhiyun pr_devel(" provisioning failure %d !\n", rc);
638*4882a593Smuzhiyun return rc;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
642*4882a593Smuzhiyun if (!sb)
643*4882a593Smuzhiyun return -EINVAL;
644*4882a593Smuzhiyun state = &sb->irq_state[idx];
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /*
647*4882a593Smuzhiyun * We first handle masking/unmasking since the locking
648*4882a593Smuzhiyun * might need to be retried due to EOIs, we'll handle
649*4882a593Smuzhiyun * targetting changes later. These functions will return
650*4882a593Smuzhiyun * with the SB lock held.
651*4882a593Smuzhiyun *
652*4882a593Smuzhiyun * xive_lock_and_mask() will also set state->guest_priority
653*4882a593Smuzhiyun * but won't otherwise change other fields of the state.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * xive_lock_for_unmask will not actually unmask, this will
656*4882a593Smuzhiyun * be done later by xive_finish_unmask() once the targetting
657*4882a593Smuzhiyun * has been done, so we don't try to unmask an interrupt
658*4882a593Smuzhiyun * that hasn't yet been targetted.
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun if (priority == MASKED)
661*4882a593Smuzhiyun xive_lock_and_mask(xive, sb, state);
662*4882a593Smuzhiyun else
663*4882a593Smuzhiyun xive_lock_for_unmask(sb, state);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun * Then we handle targetting.
668*4882a593Smuzhiyun *
669*4882a593Smuzhiyun * First calculate a new "actual priority"
670*4882a593Smuzhiyun */
671*4882a593Smuzhiyun new_act_prio = state->act_priority;
672*4882a593Smuzhiyun if (priority != MASKED)
673*4882a593Smuzhiyun new_act_prio = xive_prio_from_guest(priority);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun pr_devel(" new_act_prio=%x act_server=%x act_prio=%x\n",
676*4882a593Smuzhiyun new_act_prio, state->act_server, state->act_priority);
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun /*
679*4882a593Smuzhiyun * Then check if we actually need to change anything,
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * The condition for re-targetting the interrupt is that
682*4882a593Smuzhiyun * we have a valid new priority (new_act_prio is not 0xff)
683*4882a593Smuzhiyun * and either the server or the priority changed.
684*4882a593Smuzhiyun *
685*4882a593Smuzhiyun * Note: If act_priority was ff and the new priority is
686*4882a593Smuzhiyun * also ff, we don't do anything and leave the interrupt
687*4882a593Smuzhiyun * untargetted. An attempt of doing an int_on on an
688*4882a593Smuzhiyun * untargetted interrupt will fail. If that is a problem
689*4882a593Smuzhiyun * we could initialize interrupts with valid default
690*4882a593Smuzhiyun */
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun if (new_act_prio != MASKED &&
693*4882a593Smuzhiyun (state->act_server != server ||
694*4882a593Smuzhiyun state->act_priority != new_act_prio))
695*4882a593Smuzhiyun rc = xive_target_interrupt(kvm, state, server, new_act_prio);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /*
698*4882a593Smuzhiyun * Perform the final unmasking of the interrupt source
699*4882a593Smuzhiyun * if necessary
700*4882a593Smuzhiyun */
701*4882a593Smuzhiyun if (priority != MASKED)
702*4882a593Smuzhiyun xive_finish_unmask(xive, sb, state, priority);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun * Finally Update saved_priority to match. Only int_on/off
706*4882a593Smuzhiyun * set this field to a different value.
707*4882a593Smuzhiyun */
708*4882a593Smuzhiyun state->saved_priority = priority;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
711*4882a593Smuzhiyun return rc;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
kvmppc_xive_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)714*4882a593Smuzhiyun int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
715*4882a593Smuzhiyun u32 *priority)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
718*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
719*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
720*4882a593Smuzhiyun u16 idx;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (!xive)
723*4882a593Smuzhiyun return -ENODEV;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
726*4882a593Smuzhiyun if (!sb)
727*4882a593Smuzhiyun return -EINVAL;
728*4882a593Smuzhiyun state = &sb->irq_state[idx];
729*4882a593Smuzhiyun arch_spin_lock(&sb->lock);
730*4882a593Smuzhiyun *server = state->act_server;
731*4882a593Smuzhiyun *priority = state->guest_priority;
732*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun return 0;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun
kvmppc_xive_int_on(struct kvm * kvm,u32 irq)737*4882a593Smuzhiyun int kvmppc_xive_int_on(struct kvm *kvm, u32 irq)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
740*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
741*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
742*4882a593Smuzhiyun u16 idx;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (!xive)
745*4882a593Smuzhiyun return -ENODEV;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
748*4882a593Smuzhiyun if (!sb)
749*4882a593Smuzhiyun return -EINVAL;
750*4882a593Smuzhiyun state = &sb->irq_state[idx];
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun pr_devel("int_on(irq=0x%x)\n", irq);
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /*
755*4882a593Smuzhiyun * Check if interrupt was not targetted
756*4882a593Smuzhiyun */
757*4882a593Smuzhiyun if (state->act_priority == MASKED) {
758*4882a593Smuzhiyun pr_devel("int_on on untargetted interrupt\n");
759*4882a593Smuzhiyun return -EINVAL;
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* If saved_priority is 0xff, do nothing */
763*4882a593Smuzhiyun if (state->saved_priority == MASKED)
764*4882a593Smuzhiyun return 0;
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Lock and unmask it.
768*4882a593Smuzhiyun */
769*4882a593Smuzhiyun xive_lock_for_unmask(sb, state);
770*4882a593Smuzhiyun xive_finish_unmask(xive, sb, state, state->saved_priority);
771*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun return 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun
kvmppc_xive_int_off(struct kvm * kvm,u32 irq)776*4882a593Smuzhiyun int kvmppc_xive_int_off(struct kvm *kvm, u32 irq)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
779*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
780*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
781*4882a593Smuzhiyun u16 idx;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (!xive)
784*4882a593Smuzhiyun return -ENODEV;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
787*4882a593Smuzhiyun if (!sb)
788*4882a593Smuzhiyun return -EINVAL;
789*4882a593Smuzhiyun state = &sb->irq_state[idx];
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun pr_devel("int_off(irq=0x%x)\n", irq);
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun /*
794*4882a593Smuzhiyun * Lock and mask
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun state->saved_priority = xive_lock_and_mask(xive, sb, state);
797*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun return 0;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
xive_restore_pending_irq(struct kvmppc_xive * xive,u32 irq)802*4882a593Smuzhiyun static bool xive_restore_pending_irq(struct kvmppc_xive *xive, u32 irq)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
805*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
806*4882a593Smuzhiyun u16 idx;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
809*4882a593Smuzhiyun if (!sb)
810*4882a593Smuzhiyun return false;
811*4882a593Smuzhiyun state = &sb->irq_state[idx];
812*4882a593Smuzhiyun if (!state->valid)
813*4882a593Smuzhiyun return false;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /*
816*4882a593Smuzhiyun * Trigger the IPI. This assumes we never restore a pass-through
817*4882a593Smuzhiyun * interrupt which should be safe enough
818*4882a593Smuzhiyun */
819*4882a593Smuzhiyun xive_irq_trigger(&state->ipi_data);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun return true;
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
kvmppc_xive_get_icp(struct kvm_vcpu * vcpu)824*4882a593Smuzhiyun u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun if (!xc)
829*4882a593Smuzhiyun return 0;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /* Return the per-cpu state for state saving/migration */
832*4882a593Smuzhiyun return (u64)xc->cppr << KVM_REG_PPC_ICP_CPPR_SHIFT |
833*4882a593Smuzhiyun (u64)xc->mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT |
834*4882a593Smuzhiyun (u64)0xff << KVM_REG_PPC_ICP_PPRI_SHIFT;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
kvmppc_xive_set_icp(struct kvm_vcpu * vcpu,u64 icpval)837*4882a593Smuzhiyun int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
840*4882a593Smuzhiyun struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
841*4882a593Smuzhiyun u8 cppr, mfrr;
842*4882a593Smuzhiyun u32 xisr;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!xc || !xive)
845*4882a593Smuzhiyun return -ENOENT;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* Grab individual state fields. We don't use pending_pri */
848*4882a593Smuzhiyun cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
849*4882a593Smuzhiyun xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
850*4882a593Smuzhiyun KVM_REG_PPC_ICP_XISR_MASK;
851*4882a593Smuzhiyun mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun pr_devel("set_icp vcpu %d cppr=0x%x mfrr=0x%x xisr=0x%x\n",
854*4882a593Smuzhiyun xc->server_num, cppr, mfrr, xisr);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun /*
857*4882a593Smuzhiyun * We can't update the state of a "pushed" VCPU, but that
858*4882a593Smuzhiyun * shouldn't happen because the vcpu->mutex makes running a
859*4882a593Smuzhiyun * vcpu mutually exclusive with doing one_reg get/set on it.
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun if (WARN_ON(vcpu->arch.xive_pushed))
862*4882a593Smuzhiyun return -EIO;
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun /* Update VCPU HW saved state */
865*4882a593Smuzhiyun vcpu->arch.xive_saved_state.cppr = cppr;
866*4882a593Smuzhiyun xc->hw_cppr = xc->cppr = cppr;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * Update MFRR state. If it's not 0xff, we mark the VCPU as
870*4882a593Smuzhiyun * having a pending MFRR change, which will re-evaluate the
871*4882a593Smuzhiyun * target. The VCPU will thus potentially get a spurious
872*4882a593Smuzhiyun * interrupt but that's not a big deal.
873*4882a593Smuzhiyun */
874*4882a593Smuzhiyun xc->mfrr = mfrr;
875*4882a593Smuzhiyun if (mfrr < cppr)
876*4882a593Smuzhiyun xive_irq_trigger(&xc->vp_ipi_data);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /*
879*4882a593Smuzhiyun * Now saved XIRR is "interesting". It means there's something in
880*4882a593Smuzhiyun * the legacy "1 element" queue... for an IPI we simply ignore it,
881*4882a593Smuzhiyun * as the MFRR restore will handle that. For anything else we need
882*4882a593Smuzhiyun * to force a resend of the source.
883*4882a593Smuzhiyun * However the source may not have been setup yet. If that's the
884*4882a593Smuzhiyun * case, we keep that info and increment a counter in the xive to
885*4882a593Smuzhiyun * tell subsequent xive_set_source() to go look.
886*4882a593Smuzhiyun */
887*4882a593Smuzhiyun if (xisr > XICS_IPI && !xive_restore_pending_irq(xive, xisr)) {
888*4882a593Smuzhiyun xc->delayed_irq = xisr;
889*4882a593Smuzhiyun xive->delayed_irqs++;
890*4882a593Smuzhiyun pr_devel(" xisr restore delayed\n");
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun return 0;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
kvmppc_xive_set_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)896*4882a593Smuzhiyun int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
897*4882a593Smuzhiyun struct irq_desc *host_desc)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
900*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
901*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
902*4882a593Smuzhiyun struct irq_data *host_data = irq_desc_get_irq_data(host_desc);
903*4882a593Smuzhiyun unsigned int host_irq = irq_desc_get_irq(host_desc);
904*4882a593Smuzhiyun unsigned int hw_irq = (unsigned int)irqd_to_hwirq(host_data);
905*4882a593Smuzhiyun u16 idx;
906*4882a593Smuzhiyun u8 prio;
907*4882a593Smuzhiyun int rc;
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun if (!xive)
910*4882a593Smuzhiyun return -ENODEV;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun pr_devel("set_mapped girq 0x%lx host HW irq 0x%x...\n",guest_irq, hw_irq);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
915*4882a593Smuzhiyun if (!sb)
916*4882a593Smuzhiyun return -EINVAL;
917*4882a593Smuzhiyun state = &sb->irq_state[idx];
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /*
920*4882a593Smuzhiyun * Mark the passed-through interrupt as going to a VCPU,
921*4882a593Smuzhiyun * this will prevent further EOIs and similar operations
922*4882a593Smuzhiyun * from the XIVE code. It will also mask the interrupt
923*4882a593Smuzhiyun * to either PQ=10 or 11 state, the latter if the interrupt
924*4882a593Smuzhiyun * is pending. This will allow us to unmask or retrigger it
925*4882a593Smuzhiyun * after routing it to the guest with a simple EOI.
926*4882a593Smuzhiyun *
927*4882a593Smuzhiyun * The "state" argument is a "token", all it needs is to be
928*4882a593Smuzhiyun * non-NULL to switch to passed-through or NULL for the
929*4882a593Smuzhiyun * other way around. We may not yet have an actual VCPU
930*4882a593Smuzhiyun * target here and we don't really care.
931*4882a593Smuzhiyun */
932*4882a593Smuzhiyun rc = irq_set_vcpu_affinity(host_irq, state);
933*4882a593Smuzhiyun if (rc) {
934*4882a593Smuzhiyun pr_err("Failed to set VCPU affinity for irq %d\n", host_irq);
935*4882a593Smuzhiyun return rc;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * Mask and read state of IPI. We need to know if its P bit
940*4882a593Smuzhiyun * is set as that means it's potentially already using a
941*4882a593Smuzhiyun * queue entry in the target
942*4882a593Smuzhiyun */
943*4882a593Smuzhiyun prio = xive_lock_and_mask(xive, sb, state);
944*4882a593Smuzhiyun pr_devel(" old IPI prio %02x P:%d Q:%d\n", prio,
945*4882a593Smuzhiyun state->old_p, state->old_q);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Turn the IPI hard off */
948*4882a593Smuzhiyun xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun /*
951*4882a593Smuzhiyun * Reset ESB guest mapping. Needed when ESB pages are exposed
952*4882a593Smuzhiyun * to the guest in XIVE native mode
953*4882a593Smuzhiyun */
954*4882a593Smuzhiyun if (xive->ops && xive->ops->reset_mapped)
955*4882a593Smuzhiyun xive->ops->reset_mapped(kvm, guest_irq);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun /* Grab info about irq */
958*4882a593Smuzhiyun state->pt_number = hw_irq;
959*4882a593Smuzhiyun state->pt_data = irq_data_get_irq_handler_data(host_data);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /*
962*4882a593Smuzhiyun * Configure the IRQ to match the existing configuration of
963*4882a593Smuzhiyun * the IPI if it was already targetted. Otherwise this will
964*4882a593Smuzhiyun * mask the interrupt in a lossy way (act_priority is 0xff)
965*4882a593Smuzhiyun * which is fine for a never started interrupt.
966*4882a593Smuzhiyun */
967*4882a593Smuzhiyun xive_native_configure_irq(hw_irq,
968*4882a593Smuzhiyun kvmppc_xive_vp(xive, state->act_server),
969*4882a593Smuzhiyun state->act_priority, state->number);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun /*
972*4882a593Smuzhiyun * We do an EOI to enable the interrupt (and retrigger if needed)
973*4882a593Smuzhiyun * if the guest has the interrupt unmasked and the P bit was *not*
974*4882a593Smuzhiyun * set in the IPI. If it was set, we know a slot may still be in
975*4882a593Smuzhiyun * use in the target queue thus we have to wait for a guest
976*4882a593Smuzhiyun * originated EOI
977*4882a593Smuzhiyun */
978*4882a593Smuzhiyun if (prio != MASKED && !state->old_p)
979*4882a593Smuzhiyun xive_vm_source_eoi(hw_irq, state->pt_data);
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun /* Clear old_p/old_q as they are no longer relevant */
982*4882a593Smuzhiyun state->old_p = state->old_q = false;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* Restore guest prio (unlocks EOI) */
985*4882a593Smuzhiyun mb();
986*4882a593Smuzhiyun state->guest_priority = prio;
987*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun return 0;
990*4882a593Smuzhiyun }
991*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xive_set_mapped);
992*4882a593Smuzhiyun
kvmppc_xive_clr_mapped(struct kvm * kvm,unsigned long guest_irq,struct irq_desc * host_desc)993*4882a593Smuzhiyun int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
994*4882a593Smuzhiyun struct irq_desc *host_desc)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
997*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
998*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
999*4882a593Smuzhiyun unsigned int host_irq = irq_desc_get_irq(host_desc);
1000*4882a593Smuzhiyun u16 idx;
1001*4882a593Smuzhiyun u8 prio;
1002*4882a593Smuzhiyun int rc;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun if (!xive)
1005*4882a593Smuzhiyun return -ENODEV;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun pr_devel("clr_mapped girq 0x%lx...\n", guest_irq);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, guest_irq, &idx);
1010*4882a593Smuzhiyun if (!sb)
1011*4882a593Smuzhiyun return -EINVAL;
1012*4882a593Smuzhiyun state = &sb->irq_state[idx];
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /*
1015*4882a593Smuzhiyun * Mask and read state of IRQ. We need to know if its P bit
1016*4882a593Smuzhiyun * is set as that means it's potentially already using a
1017*4882a593Smuzhiyun * queue entry in the target
1018*4882a593Smuzhiyun */
1019*4882a593Smuzhiyun prio = xive_lock_and_mask(xive, sb, state);
1020*4882a593Smuzhiyun pr_devel(" old IRQ prio %02x P:%d Q:%d\n", prio,
1021*4882a593Smuzhiyun state->old_p, state->old_q);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun /*
1024*4882a593Smuzhiyun * If old_p is set, the interrupt is pending, we switch it to
1025*4882a593Smuzhiyun * PQ=11. This will force a resend in the host so the interrupt
1026*4882a593Smuzhiyun * isn't lost to whatver host driver may pick it up
1027*4882a593Smuzhiyun */
1028*4882a593Smuzhiyun if (state->old_p)
1029*4882a593Smuzhiyun xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_11);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun /* Release the passed-through interrupt to the host */
1032*4882a593Smuzhiyun rc = irq_set_vcpu_affinity(host_irq, NULL);
1033*4882a593Smuzhiyun if (rc) {
1034*4882a593Smuzhiyun pr_err("Failed to clr VCPU affinity for irq %d\n", host_irq);
1035*4882a593Smuzhiyun return rc;
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun /* Forget about the IRQ */
1039*4882a593Smuzhiyun state->pt_number = 0;
1040*4882a593Smuzhiyun state->pt_data = NULL;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /*
1043*4882a593Smuzhiyun * Reset ESB guest mapping. Needed when ESB pages are exposed
1044*4882a593Smuzhiyun * to the guest in XIVE native mode
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun if (xive->ops && xive->ops->reset_mapped) {
1047*4882a593Smuzhiyun xive->ops->reset_mapped(kvm, guest_irq);
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun /* Reconfigure the IPI */
1051*4882a593Smuzhiyun xive_native_configure_irq(state->ipi_number,
1052*4882a593Smuzhiyun kvmppc_xive_vp(xive, state->act_server),
1053*4882a593Smuzhiyun state->act_priority, state->number);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /*
1056*4882a593Smuzhiyun * If old_p is set (we have a queue entry potentially
1057*4882a593Smuzhiyun * occupied) or the interrupt is masked, we set the IPI
1058*4882a593Smuzhiyun * to PQ=10 state. Otherwise we just re-enable it (PQ=00).
1059*4882a593Smuzhiyun */
1060*4882a593Smuzhiyun if (prio == MASKED || state->old_p)
1061*4882a593Smuzhiyun xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_10);
1062*4882a593Smuzhiyun else
1063*4882a593Smuzhiyun xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_00);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /* Restore guest prio (unlocks EOI) */
1066*4882a593Smuzhiyun mb();
1067*4882a593Smuzhiyun state->guest_priority = prio;
1068*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun return 0;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xive_clr_mapped);
1073*4882a593Smuzhiyun
kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu * vcpu)1074*4882a593Smuzhiyun void kvmppc_xive_disable_vcpu_interrupts(struct kvm_vcpu *vcpu)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1077*4882a593Smuzhiyun struct kvm *kvm = vcpu->kvm;
1078*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
1079*4882a593Smuzhiyun int i, j;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun for (i = 0; i <= xive->max_sbid; i++) {
1082*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (!sb)
1085*4882a593Smuzhiyun continue;
1086*4882a593Smuzhiyun for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
1087*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun if (!state->valid)
1090*4882a593Smuzhiyun continue;
1091*4882a593Smuzhiyun if (state->act_priority == MASKED)
1092*4882a593Smuzhiyun continue;
1093*4882a593Smuzhiyun if (state->act_server != xc->server_num)
1094*4882a593Smuzhiyun continue;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /* Clean it up */
1097*4882a593Smuzhiyun arch_spin_lock(&sb->lock);
1098*4882a593Smuzhiyun state->act_priority = MASKED;
1099*4882a593Smuzhiyun xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
1100*4882a593Smuzhiyun xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
1101*4882a593Smuzhiyun if (state->pt_number) {
1102*4882a593Smuzhiyun xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
1103*4882a593Smuzhiyun xive_native_configure_irq(state->pt_number, 0, MASKED, 0);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /* Disable vcpu's escalation interrupt */
1110*4882a593Smuzhiyun if (vcpu->arch.xive_esc_on) {
1111*4882a593Smuzhiyun __raw_readq((void __iomem *)(vcpu->arch.xive_esc_vaddr +
1112*4882a593Smuzhiyun XIVE_ESB_SET_PQ_01));
1113*4882a593Smuzhiyun vcpu->arch.xive_esc_on = false;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /*
1117*4882a593Smuzhiyun * Clear pointers to escalation interrupt ESB.
1118*4882a593Smuzhiyun * This is safe because the vcpu->mutex is held, preventing
1119*4882a593Smuzhiyun * any other CPU from concurrently executing a KVM_RUN ioctl.
1120*4882a593Smuzhiyun */
1121*4882a593Smuzhiyun vcpu->arch.xive_esc_vaddr = 0;
1122*4882a593Smuzhiyun vcpu->arch.xive_esc_raddr = 0;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /*
1126*4882a593Smuzhiyun * In single escalation mode, the escalation interrupt is marked so
1127*4882a593Smuzhiyun * that EOI doesn't re-enable it, but just sets the stale_p flag to
1128*4882a593Smuzhiyun * indicate that the P bit has already been dealt with. However, the
1129*4882a593Smuzhiyun * assembly code that enters the guest sets PQ to 00 without clearing
1130*4882a593Smuzhiyun * stale_p (because it has no easy way to address it). Hence we have
1131*4882a593Smuzhiyun * to adjust stale_p before shutting down the interrupt.
1132*4882a593Smuzhiyun */
xive_cleanup_single_escalation(struct kvm_vcpu * vcpu,struct kvmppc_xive_vcpu * xc,int irq)1133*4882a593Smuzhiyun void xive_cleanup_single_escalation(struct kvm_vcpu *vcpu,
1134*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc, int irq)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun struct irq_data *d = irq_get_irq_data(irq);
1137*4882a593Smuzhiyun struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /*
1140*4882a593Smuzhiyun * This slightly odd sequence gives the right result
1141*4882a593Smuzhiyun * (i.e. stale_p set if xive_esc_on is false) even if
1142*4882a593Smuzhiyun * we race with xive_esc_irq() and xive_irq_eoi().
1143*4882a593Smuzhiyun */
1144*4882a593Smuzhiyun xd->stale_p = false;
1145*4882a593Smuzhiyun smp_mb(); /* paired with smb_wmb in xive_esc_irq */
1146*4882a593Smuzhiyun if (!vcpu->arch.xive_esc_on)
1147*4882a593Smuzhiyun xd->stale_p = true;
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
kvmppc_xive_cleanup_vcpu(struct kvm_vcpu * vcpu)1150*4882a593Smuzhiyun void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1153*4882a593Smuzhiyun struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1154*4882a593Smuzhiyun int i;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (!kvmppc_xics_enabled(vcpu))
1157*4882a593Smuzhiyun return;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun if (!xc)
1160*4882a593Smuzhiyun return;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun pr_devel("cleanup_vcpu(cpu=%d)\n", xc->server_num);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* Ensure no interrupt is still routed to that VP */
1165*4882a593Smuzhiyun xc->valid = false;
1166*4882a593Smuzhiyun kvmppc_xive_disable_vcpu_interrupts(vcpu);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun /* Mask the VP IPI */
1169*4882a593Smuzhiyun xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_01);
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /* Free escalations */
1172*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1173*4882a593Smuzhiyun if (xc->esc_virq[i]) {
1174*4882a593Smuzhiyun if (xc->xive->single_escalation)
1175*4882a593Smuzhiyun xive_cleanup_single_escalation(vcpu, xc,
1176*4882a593Smuzhiyun xc->esc_virq[i]);
1177*4882a593Smuzhiyun free_irq(xc->esc_virq[i], vcpu);
1178*4882a593Smuzhiyun irq_dispose_mapping(xc->esc_virq[i]);
1179*4882a593Smuzhiyun kfree(xc->esc_virq_names[i]);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun /* Disable the VP */
1184*4882a593Smuzhiyun xive_native_disable_vp(xc->vp_id);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /* Clear the cam word so guest entry won't try to push context */
1187*4882a593Smuzhiyun vcpu->arch.xive_cam_word = 0;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /* Free the queues */
1190*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1191*4882a593Smuzhiyun struct xive_q *q = &xc->queues[i];
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun xive_native_disable_queue(xc->vp_id, q, i);
1194*4882a593Smuzhiyun if (q->qpage) {
1195*4882a593Smuzhiyun free_pages((unsigned long)q->qpage,
1196*4882a593Smuzhiyun xive->q_page_order);
1197*4882a593Smuzhiyun q->qpage = NULL;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun /* Free the IPI */
1202*4882a593Smuzhiyun if (xc->vp_ipi) {
1203*4882a593Smuzhiyun xive_cleanup_irq_data(&xc->vp_ipi_data);
1204*4882a593Smuzhiyun xive_native_free_irq(xc->vp_ipi);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun /* Free the VP */
1207*4882a593Smuzhiyun kfree(xc);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* Cleanup the vcpu */
1210*4882a593Smuzhiyun vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1211*4882a593Smuzhiyun vcpu->arch.xive_vcpu = NULL;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun
kvmppc_xive_vcpu_id_valid(struct kvmppc_xive * xive,u32 cpu)1214*4882a593Smuzhiyun static bool kvmppc_xive_vcpu_id_valid(struct kvmppc_xive *xive, u32 cpu)
1215*4882a593Smuzhiyun {
1216*4882a593Smuzhiyun /* We have a block of xive->nr_servers VPs. We just need to check
1217*4882a593Smuzhiyun * packed vCPU ids are below that.
1218*4882a593Smuzhiyun */
1219*4882a593Smuzhiyun return kvmppc_pack_vcpu_id(xive->kvm, cpu) < xive->nr_servers;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
kvmppc_xive_compute_vp_id(struct kvmppc_xive * xive,u32 cpu,u32 * vp)1222*4882a593Smuzhiyun int kvmppc_xive_compute_vp_id(struct kvmppc_xive *xive, u32 cpu, u32 *vp)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun u32 vp_id;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (!kvmppc_xive_vcpu_id_valid(xive, cpu)) {
1227*4882a593Smuzhiyun pr_devel("Out of bounds !\n");
1228*4882a593Smuzhiyun return -EINVAL;
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (xive->vp_base == XIVE_INVALID_VP) {
1232*4882a593Smuzhiyun xive->vp_base = xive_native_alloc_vp_block(xive->nr_servers);
1233*4882a593Smuzhiyun pr_devel("VP_Base=%x nr_servers=%d\n", xive->vp_base, xive->nr_servers);
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (xive->vp_base == XIVE_INVALID_VP)
1236*4882a593Smuzhiyun return -ENOSPC;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun vp_id = kvmppc_xive_vp(xive, cpu);
1240*4882a593Smuzhiyun if (kvmppc_xive_vp_in_use(xive->kvm, vp_id)) {
1241*4882a593Smuzhiyun pr_devel("Duplicate !\n");
1242*4882a593Smuzhiyun return -EEXIST;
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun *vp = vp_id;
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun return 0;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
kvmppc_xive_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 cpu)1250*4882a593Smuzhiyun int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
1251*4882a593Smuzhiyun struct kvm_vcpu *vcpu, u32 cpu)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun struct kvmppc_xive *xive = dev->private;
1254*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc;
1255*4882a593Smuzhiyun int i, r = -EBUSY;
1256*4882a593Smuzhiyun u32 vp_id;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun pr_devel("connect_vcpu(cpu=%d)\n", cpu);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (dev->ops != &kvm_xive_ops) {
1261*4882a593Smuzhiyun pr_devel("Wrong ops !\n");
1262*4882a593Smuzhiyun return -EPERM;
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun if (xive->kvm != vcpu->kvm)
1265*4882a593Smuzhiyun return -EPERM;
1266*4882a593Smuzhiyun if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1267*4882a593Smuzhiyun return -EBUSY;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* We need to synchronize with queue provisioning */
1270*4882a593Smuzhiyun mutex_lock(&xive->lock);
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun r = kvmppc_xive_compute_vp_id(xive, cpu, &vp_id);
1273*4882a593Smuzhiyun if (r)
1274*4882a593Smuzhiyun goto bail;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun xc = kzalloc(sizeof(*xc), GFP_KERNEL);
1277*4882a593Smuzhiyun if (!xc) {
1278*4882a593Smuzhiyun r = -ENOMEM;
1279*4882a593Smuzhiyun goto bail;
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun vcpu->arch.xive_vcpu = xc;
1283*4882a593Smuzhiyun xc->xive = xive;
1284*4882a593Smuzhiyun xc->vcpu = vcpu;
1285*4882a593Smuzhiyun xc->server_num = cpu;
1286*4882a593Smuzhiyun xc->vp_id = vp_id;
1287*4882a593Smuzhiyun xc->mfrr = 0xff;
1288*4882a593Smuzhiyun xc->valid = true;
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun r = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
1291*4882a593Smuzhiyun if (r)
1292*4882a593Smuzhiyun goto bail;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* Configure VCPU fields for use by assembly push/pull */
1295*4882a593Smuzhiyun vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
1296*4882a593Smuzhiyun vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun /* Allocate IPI */
1299*4882a593Smuzhiyun xc->vp_ipi = xive_native_alloc_irq();
1300*4882a593Smuzhiyun if (!xc->vp_ipi) {
1301*4882a593Smuzhiyun pr_err("Failed to allocate xive irq for VCPU IPI\n");
1302*4882a593Smuzhiyun r = -EIO;
1303*4882a593Smuzhiyun goto bail;
1304*4882a593Smuzhiyun }
1305*4882a593Smuzhiyun pr_devel(" IPI=0x%x\n", xc->vp_ipi);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun r = xive_native_populate_irq_data(xc->vp_ipi, &xc->vp_ipi_data);
1308*4882a593Smuzhiyun if (r)
1309*4882a593Smuzhiyun goto bail;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun /*
1312*4882a593Smuzhiyun * Enable the VP first as the single escalation mode will
1313*4882a593Smuzhiyun * affect escalation interrupts numbering
1314*4882a593Smuzhiyun */
1315*4882a593Smuzhiyun r = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
1316*4882a593Smuzhiyun if (r) {
1317*4882a593Smuzhiyun pr_err("Failed to enable VP in OPAL, err %d\n", r);
1318*4882a593Smuzhiyun goto bail;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /*
1322*4882a593Smuzhiyun * Initialize queues. Initially we set them all for no queueing
1323*4882a593Smuzhiyun * and we enable escalation for queue 0 only which we'll use for
1324*4882a593Smuzhiyun * our mfrr change notifications. If the VCPU is hot-plugged, we
1325*4882a593Smuzhiyun * do handle provisioning however based on the existing "map"
1326*4882a593Smuzhiyun * of enabled queues.
1327*4882a593Smuzhiyun */
1328*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
1329*4882a593Smuzhiyun struct xive_q *q = &xc->queues[i];
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* Single escalation, no queue 7 */
1332*4882a593Smuzhiyun if (i == 7 && xive->single_escalation)
1333*4882a593Smuzhiyun break;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun /* Is queue already enabled ? Provision it */
1336*4882a593Smuzhiyun if (xive->qmap & (1 << i)) {
1337*4882a593Smuzhiyun r = xive_provision_queue(vcpu, i);
1338*4882a593Smuzhiyun if (r == 0 && !xive->single_escalation)
1339*4882a593Smuzhiyun kvmppc_xive_attach_escalation(
1340*4882a593Smuzhiyun vcpu, i, xive->single_escalation);
1341*4882a593Smuzhiyun if (r)
1342*4882a593Smuzhiyun goto bail;
1343*4882a593Smuzhiyun } else {
1344*4882a593Smuzhiyun r = xive_native_configure_queue(xc->vp_id,
1345*4882a593Smuzhiyun q, i, NULL, 0, true);
1346*4882a593Smuzhiyun if (r) {
1347*4882a593Smuzhiyun pr_err("Failed to configure queue %d for VCPU %d\n",
1348*4882a593Smuzhiyun i, cpu);
1349*4882a593Smuzhiyun goto bail;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* If not done above, attach priority 0 escalation */
1355*4882a593Smuzhiyun r = kvmppc_xive_attach_escalation(vcpu, 0, xive->single_escalation);
1356*4882a593Smuzhiyun if (r)
1357*4882a593Smuzhiyun goto bail;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /* Route the IPI */
1360*4882a593Smuzhiyun r = xive_native_configure_irq(xc->vp_ipi, xc->vp_id, 0, XICS_IPI);
1361*4882a593Smuzhiyun if (!r)
1362*4882a593Smuzhiyun xive_vm_esb_load(&xc->vp_ipi_data, XIVE_ESB_SET_PQ_00);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun bail:
1365*4882a593Smuzhiyun mutex_unlock(&xive->lock);
1366*4882a593Smuzhiyun if (r) {
1367*4882a593Smuzhiyun kvmppc_xive_cleanup_vcpu(vcpu);
1368*4882a593Smuzhiyun return r;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1372*4882a593Smuzhiyun return 0;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun /*
1376*4882a593Smuzhiyun * Scanning of queues before/after migration save
1377*4882a593Smuzhiyun */
xive_pre_save_set_queued(struct kvmppc_xive * xive,u32 irq)1378*4882a593Smuzhiyun static void xive_pre_save_set_queued(struct kvmppc_xive *xive, u32 irq)
1379*4882a593Smuzhiyun {
1380*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
1381*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
1382*4882a593Smuzhiyun u16 idx;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
1385*4882a593Smuzhiyun if (!sb)
1386*4882a593Smuzhiyun return;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun state = &sb->irq_state[idx];
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /* Some sanity checking */
1391*4882a593Smuzhiyun if (!state->valid) {
1392*4882a593Smuzhiyun pr_err("invalid irq 0x%x in cpu queue!\n", irq);
1393*4882a593Smuzhiyun return;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun /*
1397*4882a593Smuzhiyun * If the interrupt is in a queue it should have P set.
1398*4882a593Smuzhiyun * We warn so that gets reported. A backtrace isn't useful
1399*4882a593Smuzhiyun * so no need to use a WARN_ON.
1400*4882a593Smuzhiyun */
1401*4882a593Smuzhiyun if (!state->saved_p)
1402*4882a593Smuzhiyun pr_err("Interrupt 0x%x is marked in a queue but P not set !\n", irq);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /* Set flag */
1405*4882a593Smuzhiyun state->in_queue = true;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
xive_pre_save_mask_irq(struct kvmppc_xive * xive,struct kvmppc_xive_src_block * sb,u32 irq)1408*4882a593Smuzhiyun static void xive_pre_save_mask_irq(struct kvmppc_xive *xive,
1409*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb,
1410*4882a593Smuzhiyun u32 irq)
1411*4882a593Smuzhiyun {
1412*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun if (!state->valid)
1415*4882a593Smuzhiyun return;
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun /* Mask and save state, this will also sync HW queues */
1418*4882a593Smuzhiyun state->saved_scan_prio = xive_lock_and_mask(xive, sb, state);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun /* Transfer P and Q */
1421*4882a593Smuzhiyun state->saved_p = state->old_p;
1422*4882a593Smuzhiyun state->saved_q = state->old_q;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun /* Unlock */
1425*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
xive_pre_save_unmask_irq(struct kvmppc_xive * xive,struct kvmppc_xive_src_block * sb,u32 irq)1428*4882a593Smuzhiyun static void xive_pre_save_unmask_irq(struct kvmppc_xive *xive,
1429*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb,
1430*4882a593Smuzhiyun u32 irq)
1431*4882a593Smuzhiyun {
1432*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state = &sb->irq_state[irq];
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun if (!state->valid)
1435*4882a593Smuzhiyun return;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun /*
1438*4882a593Smuzhiyun * Lock / exclude EOI (not technically necessary if the
1439*4882a593Smuzhiyun * guest isn't running concurrently. If this becomes a
1440*4882a593Smuzhiyun * performance issue we can probably remove the lock.
1441*4882a593Smuzhiyun */
1442*4882a593Smuzhiyun xive_lock_for_unmask(sb, state);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun /* Restore mask/prio if it wasn't masked */
1445*4882a593Smuzhiyun if (state->saved_scan_prio != MASKED)
1446*4882a593Smuzhiyun xive_finish_unmask(xive, sb, state, state->saved_scan_prio);
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun /* Unlock */
1449*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun
xive_pre_save_queue(struct kvmppc_xive * xive,struct xive_q * q)1452*4882a593Smuzhiyun static void xive_pre_save_queue(struct kvmppc_xive *xive, struct xive_q *q)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun u32 idx = q->idx;
1455*4882a593Smuzhiyun u32 toggle = q->toggle;
1456*4882a593Smuzhiyun u32 irq;
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun do {
1459*4882a593Smuzhiyun irq = __xive_read_eq(q->qpage, q->msk, &idx, &toggle);
1460*4882a593Smuzhiyun if (irq > XICS_IPI)
1461*4882a593Smuzhiyun xive_pre_save_set_queued(xive, irq);
1462*4882a593Smuzhiyun } while(irq);
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun
xive_pre_save_scan(struct kvmppc_xive * xive)1465*4882a593Smuzhiyun static void xive_pre_save_scan(struct kvmppc_xive *xive)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun struct kvm_vcpu *vcpu = NULL;
1468*4882a593Smuzhiyun int i, j;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun /*
1471*4882a593Smuzhiyun * See comment in xive_get_source() about how this
1472*4882a593Smuzhiyun * work. Collect a stable state for all interrupts
1473*4882a593Smuzhiyun */
1474*4882a593Smuzhiyun for (i = 0; i <= xive->max_sbid; i++) {
1475*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1476*4882a593Smuzhiyun if (!sb)
1477*4882a593Smuzhiyun continue;
1478*4882a593Smuzhiyun for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1479*4882a593Smuzhiyun xive_pre_save_mask_irq(xive, sb, j);
1480*4882a593Smuzhiyun }
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /* Then scan the queues and update the "in_queue" flag */
1483*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, xive->kvm) {
1484*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1485*4882a593Smuzhiyun if (!xc)
1486*4882a593Smuzhiyun continue;
1487*4882a593Smuzhiyun for (j = 0; j < KVMPPC_XIVE_Q_COUNT; j++) {
1488*4882a593Smuzhiyun if (xc->queues[j].qpage)
1489*4882a593Smuzhiyun xive_pre_save_queue(xive, &xc->queues[j]);
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* Finally restore interrupt states */
1494*4882a593Smuzhiyun for (i = 0; i <= xive->max_sbid; i++) {
1495*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1496*4882a593Smuzhiyun if (!sb)
1497*4882a593Smuzhiyun continue;
1498*4882a593Smuzhiyun for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1499*4882a593Smuzhiyun xive_pre_save_unmask_irq(xive, sb, j);
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun
xive_post_save_scan(struct kvmppc_xive * xive)1503*4882a593Smuzhiyun static void xive_post_save_scan(struct kvmppc_xive *xive)
1504*4882a593Smuzhiyun {
1505*4882a593Smuzhiyun u32 i, j;
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun /* Clear all the in_queue flags */
1508*4882a593Smuzhiyun for (i = 0; i <= xive->max_sbid; i++) {
1509*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
1510*4882a593Smuzhiyun if (!sb)
1511*4882a593Smuzhiyun continue;
1512*4882a593Smuzhiyun for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++)
1513*4882a593Smuzhiyun sb->irq_state[j].in_queue = false;
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun /* Next get_source() will do a new scan */
1517*4882a593Smuzhiyun xive->saved_src_count = 0;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun /*
1521*4882a593Smuzhiyun * This returns the source configuration and state to user space.
1522*4882a593Smuzhiyun */
xive_get_source(struct kvmppc_xive * xive,long irq,u64 addr)1523*4882a593Smuzhiyun static int xive_get_source(struct kvmppc_xive *xive, long irq, u64 addr)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
1526*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
1527*4882a593Smuzhiyun u64 __user *ubufp = (u64 __user *) addr;
1528*4882a593Smuzhiyun u64 val, prio;
1529*4882a593Smuzhiyun u16 idx;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
1532*4882a593Smuzhiyun if (!sb)
1533*4882a593Smuzhiyun return -ENOENT;
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun state = &sb->irq_state[idx];
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun if (!state->valid)
1538*4882a593Smuzhiyun return -ENOENT;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun pr_devel("get_source(%ld)...\n", irq);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun /*
1543*4882a593Smuzhiyun * So to properly save the state into something that looks like a
1544*4882a593Smuzhiyun * XICS migration stream we cannot treat interrupts individually.
1545*4882a593Smuzhiyun *
1546*4882a593Smuzhiyun * We need, instead, mask them all (& save their previous PQ state)
1547*4882a593Smuzhiyun * to get a stable state in the HW, then sync them to ensure that
1548*4882a593Smuzhiyun * any interrupt that had already fired hits its queue, and finally
1549*4882a593Smuzhiyun * scan all the queues to collect which interrupts are still present
1550*4882a593Smuzhiyun * in the queues, so we can set the "pending" flag on them and
1551*4882a593Smuzhiyun * they can be resent on restore.
1552*4882a593Smuzhiyun *
1553*4882a593Smuzhiyun * So we do it all when the "first" interrupt gets saved, all the
1554*4882a593Smuzhiyun * state is collected at that point, the rest of xive_get_source()
1555*4882a593Smuzhiyun * will merely collect and convert that state to the expected
1556*4882a593Smuzhiyun * userspace bit mask.
1557*4882a593Smuzhiyun */
1558*4882a593Smuzhiyun if (xive->saved_src_count == 0)
1559*4882a593Smuzhiyun xive_pre_save_scan(xive);
1560*4882a593Smuzhiyun xive->saved_src_count++;
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun /* Convert saved state into something compatible with xics */
1563*4882a593Smuzhiyun val = state->act_server;
1564*4882a593Smuzhiyun prio = state->saved_scan_prio;
1565*4882a593Smuzhiyun
1566*4882a593Smuzhiyun if (prio == MASKED) {
1567*4882a593Smuzhiyun val |= KVM_XICS_MASKED;
1568*4882a593Smuzhiyun prio = state->saved_priority;
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun val |= prio << KVM_XICS_PRIORITY_SHIFT;
1571*4882a593Smuzhiyun if (state->lsi) {
1572*4882a593Smuzhiyun val |= KVM_XICS_LEVEL_SENSITIVE;
1573*4882a593Smuzhiyun if (state->saved_p)
1574*4882a593Smuzhiyun val |= KVM_XICS_PENDING;
1575*4882a593Smuzhiyun } else {
1576*4882a593Smuzhiyun if (state->saved_p)
1577*4882a593Smuzhiyun val |= KVM_XICS_PRESENTED;
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun if (state->saved_q)
1580*4882a593Smuzhiyun val |= KVM_XICS_QUEUED;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun /*
1583*4882a593Smuzhiyun * We mark it pending (which will attempt a re-delivery)
1584*4882a593Smuzhiyun * if we are in a queue *or* we were masked and had
1585*4882a593Smuzhiyun * Q set which is equivalent to the XICS "masked pending"
1586*4882a593Smuzhiyun * state
1587*4882a593Smuzhiyun */
1588*4882a593Smuzhiyun if (state->in_queue || (prio == MASKED && state->saved_q))
1589*4882a593Smuzhiyun val |= KVM_XICS_PENDING;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /*
1593*4882a593Smuzhiyun * If that was the last interrupt saved, reset the
1594*4882a593Smuzhiyun * in_queue flags
1595*4882a593Smuzhiyun */
1596*4882a593Smuzhiyun if (xive->saved_src_count == xive->src_count)
1597*4882a593Smuzhiyun xive_post_save_scan(xive);
1598*4882a593Smuzhiyun
1599*4882a593Smuzhiyun /* Copy the result to userspace */
1600*4882a593Smuzhiyun if (put_user(val, ubufp))
1601*4882a593Smuzhiyun return -EFAULT;
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun return 0;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun
kvmppc_xive_create_src_block(struct kvmppc_xive * xive,int irq)1606*4882a593Smuzhiyun struct kvmppc_xive_src_block *kvmppc_xive_create_src_block(
1607*4882a593Smuzhiyun struct kvmppc_xive *xive, int irq)
1608*4882a593Smuzhiyun {
1609*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
1610*4882a593Smuzhiyun int i, bid;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun bid = irq >> KVMPPC_XICS_ICS_SHIFT;
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun mutex_lock(&xive->lock);
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* block already exists - somebody else got here first */
1617*4882a593Smuzhiyun if (xive->src_blocks[bid])
1618*4882a593Smuzhiyun goto out;
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun /* Create the ICS */
1621*4882a593Smuzhiyun sb = kzalloc(sizeof(*sb), GFP_KERNEL);
1622*4882a593Smuzhiyun if (!sb)
1623*4882a593Smuzhiyun goto out;
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun sb->id = bid;
1626*4882a593Smuzhiyun
1627*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1628*4882a593Smuzhiyun sb->irq_state[i].number = (bid << KVMPPC_XICS_ICS_SHIFT) | i;
1629*4882a593Smuzhiyun sb->irq_state[i].eisn = 0;
1630*4882a593Smuzhiyun sb->irq_state[i].guest_priority = MASKED;
1631*4882a593Smuzhiyun sb->irq_state[i].saved_priority = MASKED;
1632*4882a593Smuzhiyun sb->irq_state[i].act_priority = MASKED;
1633*4882a593Smuzhiyun }
1634*4882a593Smuzhiyun smp_wmb();
1635*4882a593Smuzhiyun xive->src_blocks[bid] = sb;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun if (bid > xive->max_sbid)
1638*4882a593Smuzhiyun xive->max_sbid = bid;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun out:
1641*4882a593Smuzhiyun mutex_unlock(&xive->lock);
1642*4882a593Smuzhiyun return xive->src_blocks[bid];
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun
xive_check_delayed_irq(struct kvmppc_xive * xive,u32 irq)1645*4882a593Smuzhiyun static bool xive_check_delayed_irq(struct kvmppc_xive *xive, u32 irq)
1646*4882a593Smuzhiyun {
1647*4882a593Smuzhiyun struct kvm *kvm = xive->kvm;
1648*4882a593Smuzhiyun struct kvm_vcpu *vcpu = NULL;
1649*4882a593Smuzhiyun int i;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
1652*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun if (!xc)
1655*4882a593Smuzhiyun continue;
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun if (xc->delayed_irq == irq) {
1658*4882a593Smuzhiyun xc->delayed_irq = 0;
1659*4882a593Smuzhiyun xive->delayed_irqs--;
1660*4882a593Smuzhiyun return true;
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun return false;
1664*4882a593Smuzhiyun }
1665*4882a593Smuzhiyun
xive_set_source(struct kvmppc_xive * xive,long irq,u64 addr)1666*4882a593Smuzhiyun static int xive_set_source(struct kvmppc_xive *xive, long irq, u64 addr)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
1669*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
1670*4882a593Smuzhiyun u64 __user *ubufp = (u64 __user *) addr;
1671*4882a593Smuzhiyun u16 idx;
1672*4882a593Smuzhiyun u64 val;
1673*4882a593Smuzhiyun u8 act_prio, guest_prio;
1674*4882a593Smuzhiyun u32 server;
1675*4882a593Smuzhiyun int rc = 0;
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1678*4882a593Smuzhiyun return -ENOENT;
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun pr_devel("set_source(irq=0x%lx)\n", irq);
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /* Find the source */
1683*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
1684*4882a593Smuzhiyun if (!sb) {
1685*4882a593Smuzhiyun pr_devel("No source, creating source block...\n");
1686*4882a593Smuzhiyun sb = kvmppc_xive_create_src_block(xive, irq);
1687*4882a593Smuzhiyun if (!sb) {
1688*4882a593Smuzhiyun pr_devel("Failed to create block...\n");
1689*4882a593Smuzhiyun return -ENOMEM;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun state = &sb->irq_state[idx];
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun /* Read user passed data */
1695*4882a593Smuzhiyun if (get_user(val, ubufp)) {
1696*4882a593Smuzhiyun pr_devel("fault getting user info !\n");
1697*4882a593Smuzhiyun return -EFAULT;
1698*4882a593Smuzhiyun }
1699*4882a593Smuzhiyun
1700*4882a593Smuzhiyun server = val & KVM_XICS_DESTINATION_MASK;
1701*4882a593Smuzhiyun guest_prio = val >> KVM_XICS_PRIORITY_SHIFT;
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun pr_devel(" val=0x016%llx (server=0x%x, guest_prio=%d)\n",
1704*4882a593Smuzhiyun val, server, guest_prio);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun /*
1707*4882a593Smuzhiyun * If the source doesn't already have an IPI, allocate
1708*4882a593Smuzhiyun * one and get the corresponding data
1709*4882a593Smuzhiyun */
1710*4882a593Smuzhiyun if (!state->ipi_number) {
1711*4882a593Smuzhiyun state->ipi_number = xive_native_alloc_irq();
1712*4882a593Smuzhiyun if (state->ipi_number == 0) {
1713*4882a593Smuzhiyun pr_devel("Failed to allocate IPI !\n");
1714*4882a593Smuzhiyun return -ENOMEM;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun xive_native_populate_irq_data(state->ipi_number, &state->ipi_data);
1717*4882a593Smuzhiyun pr_devel(" src_ipi=0x%x\n", state->ipi_number);
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /*
1721*4882a593Smuzhiyun * We use lock_and_mask() to set us in the right masked
1722*4882a593Smuzhiyun * state. We will override that state from the saved state
1723*4882a593Smuzhiyun * further down, but this will handle the cases of interrupts
1724*4882a593Smuzhiyun * that need FW masking. We set the initial guest_priority to
1725*4882a593Smuzhiyun * 0 before calling it to ensure it actually performs the masking.
1726*4882a593Smuzhiyun */
1727*4882a593Smuzhiyun state->guest_priority = 0;
1728*4882a593Smuzhiyun xive_lock_and_mask(xive, sb, state);
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun /*
1731*4882a593Smuzhiyun * Now, we select a target if we have one. If we don't we
1732*4882a593Smuzhiyun * leave the interrupt untargetted. It means that an interrupt
1733*4882a593Smuzhiyun * can become "untargetted" accross migration if it was masked
1734*4882a593Smuzhiyun * by set_xive() but there is little we can do about it.
1735*4882a593Smuzhiyun */
1736*4882a593Smuzhiyun
1737*4882a593Smuzhiyun /* First convert prio and mark interrupt as untargetted */
1738*4882a593Smuzhiyun act_prio = xive_prio_from_guest(guest_prio);
1739*4882a593Smuzhiyun state->act_priority = MASKED;
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun /*
1742*4882a593Smuzhiyun * We need to drop the lock due to the mutex below. Hopefully
1743*4882a593Smuzhiyun * nothing is touching that interrupt yet since it hasn't been
1744*4882a593Smuzhiyun * advertized to a running guest yet
1745*4882a593Smuzhiyun */
1746*4882a593Smuzhiyun arch_spin_unlock(&sb->lock);
1747*4882a593Smuzhiyun
1748*4882a593Smuzhiyun /* If we have a priority target the interrupt */
1749*4882a593Smuzhiyun if (act_prio != MASKED) {
1750*4882a593Smuzhiyun /* First, check provisioning of queues */
1751*4882a593Smuzhiyun mutex_lock(&xive->lock);
1752*4882a593Smuzhiyun rc = xive_check_provisioning(xive->kvm, act_prio);
1753*4882a593Smuzhiyun mutex_unlock(&xive->lock);
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /* Target interrupt */
1756*4882a593Smuzhiyun if (rc == 0)
1757*4882a593Smuzhiyun rc = xive_target_interrupt(xive->kvm, state,
1758*4882a593Smuzhiyun server, act_prio);
1759*4882a593Smuzhiyun /*
1760*4882a593Smuzhiyun * If provisioning or targetting failed, leave it
1761*4882a593Smuzhiyun * alone and masked. It will remain disabled until
1762*4882a593Smuzhiyun * the guest re-targets it.
1763*4882a593Smuzhiyun */
1764*4882a593Smuzhiyun }
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun /*
1767*4882a593Smuzhiyun * Find out if this was a delayed irq stashed in an ICP,
1768*4882a593Smuzhiyun * in which case, treat it as pending
1769*4882a593Smuzhiyun */
1770*4882a593Smuzhiyun if (xive->delayed_irqs && xive_check_delayed_irq(xive, irq)) {
1771*4882a593Smuzhiyun val |= KVM_XICS_PENDING;
1772*4882a593Smuzhiyun pr_devel(" Found delayed ! forcing PENDING !\n");
1773*4882a593Smuzhiyun }
1774*4882a593Smuzhiyun
1775*4882a593Smuzhiyun /* Cleanup the SW state */
1776*4882a593Smuzhiyun state->old_p = false;
1777*4882a593Smuzhiyun state->old_q = false;
1778*4882a593Smuzhiyun state->lsi = false;
1779*4882a593Smuzhiyun state->asserted = false;
1780*4882a593Smuzhiyun
1781*4882a593Smuzhiyun /* Restore LSI state */
1782*4882a593Smuzhiyun if (val & KVM_XICS_LEVEL_SENSITIVE) {
1783*4882a593Smuzhiyun state->lsi = true;
1784*4882a593Smuzhiyun if (val & KVM_XICS_PENDING)
1785*4882a593Smuzhiyun state->asserted = true;
1786*4882a593Smuzhiyun pr_devel(" LSI ! Asserted=%d\n", state->asserted);
1787*4882a593Smuzhiyun }
1788*4882a593Smuzhiyun
1789*4882a593Smuzhiyun /*
1790*4882a593Smuzhiyun * Restore P and Q. If the interrupt was pending, we
1791*4882a593Smuzhiyun * force Q and !P, which will trigger a resend.
1792*4882a593Smuzhiyun *
1793*4882a593Smuzhiyun * That means that a guest that had both an interrupt
1794*4882a593Smuzhiyun * pending (queued) and Q set will restore with only
1795*4882a593Smuzhiyun * one instance of that interrupt instead of 2, but that
1796*4882a593Smuzhiyun * is perfectly fine as coalescing interrupts that haven't
1797*4882a593Smuzhiyun * been presented yet is always allowed.
1798*4882a593Smuzhiyun */
1799*4882a593Smuzhiyun if (val & KVM_XICS_PRESENTED && !(val & KVM_XICS_PENDING))
1800*4882a593Smuzhiyun state->old_p = true;
1801*4882a593Smuzhiyun if (val & KVM_XICS_QUEUED || val & KVM_XICS_PENDING)
1802*4882a593Smuzhiyun state->old_q = true;
1803*4882a593Smuzhiyun
1804*4882a593Smuzhiyun pr_devel(" P=%d, Q=%d\n", state->old_p, state->old_q);
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun /*
1807*4882a593Smuzhiyun * If the interrupt was unmasked, update guest priority and
1808*4882a593Smuzhiyun * perform the appropriate state transition and do a
1809*4882a593Smuzhiyun * re-trigger if necessary.
1810*4882a593Smuzhiyun */
1811*4882a593Smuzhiyun if (val & KVM_XICS_MASKED) {
1812*4882a593Smuzhiyun pr_devel(" masked, saving prio\n");
1813*4882a593Smuzhiyun state->guest_priority = MASKED;
1814*4882a593Smuzhiyun state->saved_priority = guest_prio;
1815*4882a593Smuzhiyun } else {
1816*4882a593Smuzhiyun pr_devel(" unmasked, restoring to prio %d\n", guest_prio);
1817*4882a593Smuzhiyun xive_finish_unmask(xive, sb, state, guest_prio);
1818*4882a593Smuzhiyun state->saved_priority = guest_prio;
1819*4882a593Smuzhiyun }
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun /* Increment the number of valid sources and mark this one valid */
1822*4882a593Smuzhiyun if (!state->valid)
1823*4882a593Smuzhiyun xive->src_count++;
1824*4882a593Smuzhiyun state->valid = true;
1825*4882a593Smuzhiyun
1826*4882a593Smuzhiyun return 0;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun
kvmppc_xive_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)1829*4882a593Smuzhiyun int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1830*4882a593Smuzhiyun bool line_status)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun struct kvmppc_xive *xive = kvm->arch.xive;
1833*4882a593Smuzhiyun struct kvmppc_xive_src_block *sb;
1834*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state;
1835*4882a593Smuzhiyun u16 idx;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun if (!xive)
1838*4882a593Smuzhiyun return -ENODEV;
1839*4882a593Smuzhiyun
1840*4882a593Smuzhiyun sb = kvmppc_xive_find_source(xive, irq, &idx);
1841*4882a593Smuzhiyun if (!sb)
1842*4882a593Smuzhiyun return -EINVAL;
1843*4882a593Smuzhiyun
1844*4882a593Smuzhiyun /* Perform locklessly .... (we need to do some RCUisms here...) */
1845*4882a593Smuzhiyun state = &sb->irq_state[idx];
1846*4882a593Smuzhiyun if (!state->valid)
1847*4882a593Smuzhiyun return -EINVAL;
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun /* We don't allow a trigger on a passed-through interrupt */
1850*4882a593Smuzhiyun if (state->pt_number)
1851*4882a593Smuzhiyun return -EINVAL;
1852*4882a593Smuzhiyun
1853*4882a593Smuzhiyun if ((level == 1 && state->lsi) || level == KVM_INTERRUPT_SET_LEVEL)
1854*4882a593Smuzhiyun state->asserted = 1;
1855*4882a593Smuzhiyun else if (level == 0 || level == KVM_INTERRUPT_UNSET) {
1856*4882a593Smuzhiyun state->asserted = 0;
1857*4882a593Smuzhiyun return 0;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun
1860*4882a593Smuzhiyun /* Trigger the IPI */
1861*4882a593Smuzhiyun xive_irq_trigger(&state->ipi_data);
1862*4882a593Smuzhiyun
1863*4882a593Smuzhiyun return 0;
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun
kvmppc_xive_set_nr_servers(struct kvmppc_xive * xive,u64 addr)1866*4882a593Smuzhiyun int kvmppc_xive_set_nr_servers(struct kvmppc_xive *xive, u64 addr)
1867*4882a593Smuzhiyun {
1868*4882a593Smuzhiyun u32 __user *ubufp = (u32 __user *) addr;
1869*4882a593Smuzhiyun u32 nr_servers;
1870*4882a593Smuzhiyun int rc = 0;
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun if (get_user(nr_servers, ubufp))
1873*4882a593Smuzhiyun return -EFAULT;
1874*4882a593Smuzhiyun
1875*4882a593Smuzhiyun pr_devel("%s nr_servers=%u\n", __func__, nr_servers);
1876*4882a593Smuzhiyun
1877*4882a593Smuzhiyun if (!nr_servers || nr_servers > KVM_MAX_VCPU_ID)
1878*4882a593Smuzhiyun return -EINVAL;
1879*4882a593Smuzhiyun
1880*4882a593Smuzhiyun mutex_lock(&xive->lock);
1881*4882a593Smuzhiyun if (xive->vp_base != XIVE_INVALID_VP)
1882*4882a593Smuzhiyun /* The VP block is allocated once and freed when the device
1883*4882a593Smuzhiyun * is released. Better not allow to change its size since its
1884*4882a593Smuzhiyun * used by connect_vcpu to validate vCPU ids are valid (eg,
1885*4882a593Smuzhiyun * setting it back to a higher value could allow connect_vcpu
1886*4882a593Smuzhiyun * to come up with a VP id that goes beyond the VP block, which
1887*4882a593Smuzhiyun * is likely to cause a crash in OPAL).
1888*4882a593Smuzhiyun */
1889*4882a593Smuzhiyun rc = -EBUSY;
1890*4882a593Smuzhiyun else if (nr_servers > KVM_MAX_VCPUS)
1891*4882a593Smuzhiyun /* We don't need more servers. Higher vCPU ids get packed
1892*4882a593Smuzhiyun * down below KVM_MAX_VCPUS by kvmppc_pack_vcpu_id().
1893*4882a593Smuzhiyun */
1894*4882a593Smuzhiyun xive->nr_servers = KVM_MAX_VCPUS;
1895*4882a593Smuzhiyun else
1896*4882a593Smuzhiyun xive->nr_servers = nr_servers;
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun mutex_unlock(&xive->lock);
1899*4882a593Smuzhiyun
1900*4882a593Smuzhiyun return rc;
1901*4882a593Smuzhiyun }
1902*4882a593Smuzhiyun
xive_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1903*4882a593Smuzhiyun static int xive_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1904*4882a593Smuzhiyun {
1905*4882a593Smuzhiyun struct kvmppc_xive *xive = dev->private;
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun /* We honor the existing XICS ioctl */
1908*4882a593Smuzhiyun switch (attr->group) {
1909*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1910*4882a593Smuzhiyun return xive_set_source(xive, attr->attr, attr->addr);
1911*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_CTRL:
1912*4882a593Smuzhiyun switch (attr->attr) {
1913*4882a593Smuzhiyun case KVM_DEV_XICS_NR_SERVERS:
1914*4882a593Smuzhiyun return kvmppc_xive_set_nr_servers(xive, attr->addr);
1915*4882a593Smuzhiyun }
1916*4882a593Smuzhiyun }
1917*4882a593Smuzhiyun return -ENXIO;
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
xive_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1920*4882a593Smuzhiyun static int xive_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1921*4882a593Smuzhiyun {
1922*4882a593Smuzhiyun struct kvmppc_xive *xive = dev->private;
1923*4882a593Smuzhiyun
1924*4882a593Smuzhiyun /* We honor the existing XICS ioctl */
1925*4882a593Smuzhiyun switch (attr->group) {
1926*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1927*4882a593Smuzhiyun return xive_get_source(xive, attr->attr, attr->addr);
1928*4882a593Smuzhiyun }
1929*4882a593Smuzhiyun return -ENXIO;
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun
xive_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1932*4882a593Smuzhiyun static int xive_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1933*4882a593Smuzhiyun {
1934*4882a593Smuzhiyun /* We honor the same limits as XICS, at least for now */
1935*4882a593Smuzhiyun switch (attr->group) {
1936*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1937*4882a593Smuzhiyun if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1938*4882a593Smuzhiyun attr->attr < KVMPPC_XICS_NR_IRQS)
1939*4882a593Smuzhiyun return 0;
1940*4882a593Smuzhiyun break;
1941*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_CTRL:
1942*4882a593Smuzhiyun switch (attr->attr) {
1943*4882a593Smuzhiyun case KVM_DEV_XICS_NR_SERVERS:
1944*4882a593Smuzhiyun return 0;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun return -ENXIO;
1948*4882a593Smuzhiyun }
1949*4882a593Smuzhiyun
kvmppc_xive_cleanup_irq(u32 hw_num,struct xive_irq_data * xd)1950*4882a593Smuzhiyun static void kvmppc_xive_cleanup_irq(u32 hw_num, struct xive_irq_data *xd)
1951*4882a593Smuzhiyun {
1952*4882a593Smuzhiyun xive_vm_esb_load(xd, XIVE_ESB_SET_PQ_01);
1953*4882a593Smuzhiyun xive_native_configure_irq(hw_num, 0, MASKED, 0);
1954*4882a593Smuzhiyun }
1955*4882a593Smuzhiyun
kvmppc_xive_free_sources(struct kvmppc_xive_src_block * sb)1956*4882a593Smuzhiyun void kvmppc_xive_free_sources(struct kvmppc_xive_src_block *sb)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun int i;
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1961*4882a593Smuzhiyun struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun if (!state->valid)
1964*4882a593Smuzhiyun continue;
1965*4882a593Smuzhiyun
1966*4882a593Smuzhiyun kvmppc_xive_cleanup_irq(state->ipi_number, &state->ipi_data);
1967*4882a593Smuzhiyun xive_cleanup_irq_data(&state->ipi_data);
1968*4882a593Smuzhiyun xive_native_free_irq(state->ipi_number);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun /* Pass-through, cleanup too but keep IRQ hw data */
1971*4882a593Smuzhiyun if (state->pt_number)
1972*4882a593Smuzhiyun kvmppc_xive_cleanup_irq(state->pt_number, state->pt_data);
1973*4882a593Smuzhiyun
1974*4882a593Smuzhiyun state->valid = false;
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun
1978*4882a593Smuzhiyun /*
1979*4882a593Smuzhiyun * Called when device fd is closed. kvm->lock is held.
1980*4882a593Smuzhiyun */
kvmppc_xive_release(struct kvm_device * dev)1981*4882a593Smuzhiyun static void kvmppc_xive_release(struct kvm_device *dev)
1982*4882a593Smuzhiyun {
1983*4882a593Smuzhiyun struct kvmppc_xive *xive = dev->private;
1984*4882a593Smuzhiyun struct kvm *kvm = xive->kvm;
1985*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
1986*4882a593Smuzhiyun int i;
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun pr_devel("Releasing xive device\n");
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun /*
1991*4882a593Smuzhiyun * Since this is the device release function, we know that
1992*4882a593Smuzhiyun * userspace does not have any open fd referring to the
1993*4882a593Smuzhiyun * device. Therefore there can not be any of the device
1994*4882a593Smuzhiyun * attribute set/get functions being executed concurrently,
1995*4882a593Smuzhiyun * and similarly, the connect_vcpu and set/clr_mapped
1996*4882a593Smuzhiyun * functions also cannot be being executed.
1997*4882a593Smuzhiyun */
1998*4882a593Smuzhiyun
1999*4882a593Smuzhiyun debugfs_remove(xive->dentry);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun /*
2002*4882a593Smuzhiyun * We should clean up the vCPU interrupt presenters first.
2003*4882a593Smuzhiyun */
2004*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
2005*4882a593Smuzhiyun /*
2006*4882a593Smuzhiyun * Take vcpu->mutex to ensure that no one_reg get/set ioctl
2007*4882a593Smuzhiyun * (i.e. kvmppc_xive_[gs]et_icp) can be done concurrently.
2008*4882a593Smuzhiyun * Holding the vcpu->mutex also means that the vcpu cannot
2009*4882a593Smuzhiyun * be executing the KVM_RUN ioctl, and therefore it cannot
2010*4882a593Smuzhiyun * be executing the XIVE push or pull code or accessing
2011*4882a593Smuzhiyun * the XIVE MMIO regions.
2012*4882a593Smuzhiyun */
2013*4882a593Smuzhiyun mutex_lock(&vcpu->mutex);
2014*4882a593Smuzhiyun kvmppc_xive_cleanup_vcpu(vcpu);
2015*4882a593Smuzhiyun mutex_unlock(&vcpu->mutex);
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun /*
2019*4882a593Smuzhiyun * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
2020*4882a593Smuzhiyun * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
2021*4882a593Smuzhiyun * against xive code getting called during vcpu execution or
2022*4882a593Smuzhiyun * set/get one_reg operations.
2023*4882a593Smuzhiyun */
2024*4882a593Smuzhiyun kvm->arch.xive = NULL;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun /* Mask and free interrupts */
2027*4882a593Smuzhiyun for (i = 0; i <= xive->max_sbid; i++) {
2028*4882a593Smuzhiyun if (xive->src_blocks[i])
2029*4882a593Smuzhiyun kvmppc_xive_free_sources(xive->src_blocks[i]);
2030*4882a593Smuzhiyun kfree(xive->src_blocks[i]);
2031*4882a593Smuzhiyun xive->src_blocks[i] = NULL;
2032*4882a593Smuzhiyun }
2033*4882a593Smuzhiyun
2034*4882a593Smuzhiyun if (xive->vp_base != XIVE_INVALID_VP)
2035*4882a593Smuzhiyun xive_native_free_vp_block(xive->vp_base);
2036*4882a593Smuzhiyun
2037*4882a593Smuzhiyun /*
2038*4882a593Smuzhiyun * A reference of the kvmppc_xive pointer is now kept under
2039*4882a593Smuzhiyun * the xive_devices struct of the machine for reuse. It is
2040*4882a593Smuzhiyun * freed when the VM is destroyed for now until we fix all the
2041*4882a593Smuzhiyun * execution paths.
2042*4882a593Smuzhiyun */
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun kfree(dev);
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun
2047*4882a593Smuzhiyun /*
2048*4882a593Smuzhiyun * When the guest chooses the interrupt mode (XICS legacy or XIVE
2049*4882a593Smuzhiyun * native), the VM will switch of KVM device. The previous device will
2050*4882a593Smuzhiyun * be "released" before the new one is created.
2051*4882a593Smuzhiyun *
2052*4882a593Smuzhiyun * Until we are sure all execution paths are well protected, provide a
2053*4882a593Smuzhiyun * fail safe (transitional) method for device destruction, in which
2054*4882a593Smuzhiyun * the XIVE device pointer is recycled and not directly freed.
2055*4882a593Smuzhiyun */
kvmppc_xive_get_device(struct kvm * kvm,u32 type)2056*4882a593Smuzhiyun struct kvmppc_xive *kvmppc_xive_get_device(struct kvm *kvm, u32 type)
2057*4882a593Smuzhiyun {
2058*4882a593Smuzhiyun struct kvmppc_xive **kvm_xive_device = type == KVM_DEV_TYPE_XIVE ?
2059*4882a593Smuzhiyun &kvm->arch.xive_devices.native :
2060*4882a593Smuzhiyun &kvm->arch.xive_devices.xics_on_xive;
2061*4882a593Smuzhiyun struct kvmppc_xive *xive = *kvm_xive_device;
2062*4882a593Smuzhiyun
2063*4882a593Smuzhiyun if (!xive) {
2064*4882a593Smuzhiyun xive = kzalloc(sizeof(*xive), GFP_KERNEL);
2065*4882a593Smuzhiyun *kvm_xive_device = xive;
2066*4882a593Smuzhiyun } else {
2067*4882a593Smuzhiyun memset(xive, 0, sizeof(*xive));
2068*4882a593Smuzhiyun }
2069*4882a593Smuzhiyun
2070*4882a593Smuzhiyun return xive;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun /*
2074*4882a593Smuzhiyun * Create a XICS device with XIVE backend. kvm->lock is held.
2075*4882a593Smuzhiyun */
kvmppc_xive_create(struct kvm_device * dev,u32 type)2076*4882a593Smuzhiyun static int kvmppc_xive_create(struct kvm_device *dev, u32 type)
2077*4882a593Smuzhiyun {
2078*4882a593Smuzhiyun struct kvmppc_xive *xive;
2079*4882a593Smuzhiyun struct kvm *kvm = dev->kvm;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun pr_devel("Creating xive for partition\n");
2082*4882a593Smuzhiyun
2083*4882a593Smuzhiyun /* Already there ? */
2084*4882a593Smuzhiyun if (kvm->arch.xive)
2085*4882a593Smuzhiyun return -EEXIST;
2086*4882a593Smuzhiyun
2087*4882a593Smuzhiyun xive = kvmppc_xive_get_device(kvm, type);
2088*4882a593Smuzhiyun if (!xive)
2089*4882a593Smuzhiyun return -ENOMEM;
2090*4882a593Smuzhiyun
2091*4882a593Smuzhiyun dev->private = xive;
2092*4882a593Smuzhiyun xive->dev = dev;
2093*4882a593Smuzhiyun xive->kvm = kvm;
2094*4882a593Smuzhiyun mutex_init(&xive->lock);
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun /* We use the default queue size set by the host */
2097*4882a593Smuzhiyun xive->q_order = xive_native_default_eq_shift();
2098*4882a593Smuzhiyun if (xive->q_order < PAGE_SHIFT)
2099*4882a593Smuzhiyun xive->q_page_order = 0;
2100*4882a593Smuzhiyun else
2101*4882a593Smuzhiyun xive->q_page_order = xive->q_order - PAGE_SHIFT;
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun /* VP allocation is delayed to the first call to connect_vcpu */
2104*4882a593Smuzhiyun xive->vp_base = XIVE_INVALID_VP;
2105*4882a593Smuzhiyun /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
2106*4882a593Smuzhiyun * on a POWER9 system.
2107*4882a593Smuzhiyun */
2108*4882a593Smuzhiyun xive->nr_servers = KVM_MAX_VCPUS;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun xive->single_escalation = xive_native_has_single_escalation();
2111*4882a593Smuzhiyun
2112*4882a593Smuzhiyun kvm->arch.xive = xive;
2113*4882a593Smuzhiyun return 0;
2114*4882a593Smuzhiyun }
2115*4882a593Smuzhiyun
kvmppc_xive_debug_show_queues(struct seq_file * m,struct kvm_vcpu * vcpu)2116*4882a593Smuzhiyun int kvmppc_xive_debug_show_queues(struct seq_file *m, struct kvm_vcpu *vcpu)
2117*4882a593Smuzhiyun {
2118*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2119*4882a593Smuzhiyun unsigned int i;
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
2122*4882a593Smuzhiyun struct xive_q *q = &xc->queues[i];
2123*4882a593Smuzhiyun u32 i0, i1, idx;
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun if (!q->qpage && !xc->esc_virq[i])
2126*4882a593Smuzhiyun continue;
2127*4882a593Smuzhiyun
2128*4882a593Smuzhiyun seq_printf(m, " [q%d]: ", i);
2129*4882a593Smuzhiyun
2130*4882a593Smuzhiyun if (q->qpage) {
2131*4882a593Smuzhiyun idx = q->idx;
2132*4882a593Smuzhiyun i0 = be32_to_cpup(q->qpage + idx);
2133*4882a593Smuzhiyun idx = (idx + 1) & q->msk;
2134*4882a593Smuzhiyun i1 = be32_to_cpup(q->qpage + idx);
2135*4882a593Smuzhiyun seq_printf(m, "T=%d %08x %08x...\n", q->toggle,
2136*4882a593Smuzhiyun i0, i1);
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun if (xc->esc_virq[i]) {
2139*4882a593Smuzhiyun struct irq_data *d = irq_get_irq_data(xc->esc_virq[i]);
2140*4882a593Smuzhiyun struct xive_irq_data *xd =
2141*4882a593Smuzhiyun irq_data_get_irq_handler_data(d);
2142*4882a593Smuzhiyun u64 pq = xive_vm_esb_load(xd, XIVE_ESB_GET);
2143*4882a593Smuzhiyun
2144*4882a593Smuzhiyun seq_printf(m, "E:%c%c I(%d:%llx:%llx)",
2145*4882a593Smuzhiyun (pq & XIVE_ESB_VAL_P) ? 'P' : 'p',
2146*4882a593Smuzhiyun (pq & XIVE_ESB_VAL_Q) ? 'Q' : 'q',
2147*4882a593Smuzhiyun xc->esc_virq[i], pq, xd->eoi_page);
2148*4882a593Smuzhiyun seq_puts(m, "\n");
2149*4882a593Smuzhiyun }
2150*4882a593Smuzhiyun }
2151*4882a593Smuzhiyun return 0;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun
xive_debug_show(struct seq_file * m,void * private)2154*4882a593Smuzhiyun static int xive_debug_show(struct seq_file *m, void *private)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun struct kvmppc_xive *xive = m->private;
2157*4882a593Smuzhiyun struct kvm *kvm = xive->kvm;
2158*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
2159*4882a593Smuzhiyun u64 t_rm_h_xirr = 0;
2160*4882a593Smuzhiyun u64 t_rm_h_ipoll = 0;
2161*4882a593Smuzhiyun u64 t_rm_h_cppr = 0;
2162*4882a593Smuzhiyun u64 t_rm_h_eoi = 0;
2163*4882a593Smuzhiyun u64 t_rm_h_ipi = 0;
2164*4882a593Smuzhiyun u64 t_vm_h_xirr = 0;
2165*4882a593Smuzhiyun u64 t_vm_h_ipoll = 0;
2166*4882a593Smuzhiyun u64 t_vm_h_cppr = 0;
2167*4882a593Smuzhiyun u64 t_vm_h_eoi = 0;
2168*4882a593Smuzhiyun u64 t_vm_h_ipi = 0;
2169*4882a593Smuzhiyun unsigned int i;
2170*4882a593Smuzhiyun
2171*4882a593Smuzhiyun if (!kvm)
2172*4882a593Smuzhiyun return 0;
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun seq_printf(m, "=========\nVCPU state\n=========\n");
2175*4882a593Smuzhiyun
2176*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
2177*4882a593Smuzhiyun struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun if (!xc)
2180*4882a593Smuzhiyun continue;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun seq_printf(m, "cpu server %#x VP:%#x CPPR:%#x HWCPPR:%#x"
2183*4882a593Smuzhiyun " MFRR:%#x PEND:%#x h_xirr: R=%lld V=%lld\n",
2184*4882a593Smuzhiyun xc->server_num, xc->vp_id, xc->cppr, xc->hw_cppr,
2185*4882a593Smuzhiyun xc->mfrr, xc->pending,
2186*4882a593Smuzhiyun xc->stat_rm_h_xirr, xc->stat_vm_h_xirr);
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun kvmppc_xive_debug_show_queues(m, vcpu);
2189*4882a593Smuzhiyun
2190*4882a593Smuzhiyun t_rm_h_xirr += xc->stat_rm_h_xirr;
2191*4882a593Smuzhiyun t_rm_h_ipoll += xc->stat_rm_h_ipoll;
2192*4882a593Smuzhiyun t_rm_h_cppr += xc->stat_rm_h_cppr;
2193*4882a593Smuzhiyun t_rm_h_eoi += xc->stat_rm_h_eoi;
2194*4882a593Smuzhiyun t_rm_h_ipi += xc->stat_rm_h_ipi;
2195*4882a593Smuzhiyun t_vm_h_xirr += xc->stat_vm_h_xirr;
2196*4882a593Smuzhiyun t_vm_h_ipoll += xc->stat_vm_h_ipoll;
2197*4882a593Smuzhiyun t_vm_h_cppr += xc->stat_vm_h_cppr;
2198*4882a593Smuzhiyun t_vm_h_eoi += xc->stat_vm_h_eoi;
2199*4882a593Smuzhiyun t_vm_h_ipi += xc->stat_vm_h_ipi;
2200*4882a593Smuzhiyun }
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun seq_printf(m, "Hcalls totals\n");
2203*4882a593Smuzhiyun seq_printf(m, " H_XIRR R=%10lld V=%10lld\n", t_rm_h_xirr, t_vm_h_xirr);
2204*4882a593Smuzhiyun seq_printf(m, " H_IPOLL R=%10lld V=%10lld\n", t_rm_h_ipoll, t_vm_h_ipoll);
2205*4882a593Smuzhiyun seq_printf(m, " H_CPPR R=%10lld V=%10lld\n", t_rm_h_cppr, t_vm_h_cppr);
2206*4882a593Smuzhiyun seq_printf(m, " H_EOI R=%10lld V=%10lld\n", t_rm_h_eoi, t_vm_h_eoi);
2207*4882a593Smuzhiyun seq_printf(m, " H_IPI R=%10lld V=%10lld\n", t_rm_h_ipi, t_vm_h_ipi);
2208*4882a593Smuzhiyun
2209*4882a593Smuzhiyun return 0;
2210*4882a593Smuzhiyun }
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(xive_debug);
2213*4882a593Smuzhiyun
xive_debugfs_init(struct kvmppc_xive * xive)2214*4882a593Smuzhiyun static void xive_debugfs_init(struct kvmppc_xive *xive)
2215*4882a593Smuzhiyun {
2216*4882a593Smuzhiyun char *name;
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
2219*4882a593Smuzhiyun if (!name) {
2220*4882a593Smuzhiyun pr_err("%s: no memory for name\n", __func__);
2221*4882a593Smuzhiyun return;
2222*4882a593Smuzhiyun }
2223*4882a593Smuzhiyun
2224*4882a593Smuzhiyun xive->dentry = debugfs_create_file(name, S_IRUGO, powerpc_debugfs_root,
2225*4882a593Smuzhiyun xive, &xive_debug_fops);
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun pr_debug("%s: created %s\n", __func__, name);
2228*4882a593Smuzhiyun kfree(name);
2229*4882a593Smuzhiyun }
2230*4882a593Smuzhiyun
kvmppc_xive_init(struct kvm_device * dev)2231*4882a593Smuzhiyun static void kvmppc_xive_init(struct kvm_device *dev)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
2234*4882a593Smuzhiyun
2235*4882a593Smuzhiyun /* Register some debug interfaces */
2236*4882a593Smuzhiyun xive_debugfs_init(xive);
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun struct kvm_device_ops kvm_xive_ops = {
2240*4882a593Smuzhiyun .name = "kvm-xive",
2241*4882a593Smuzhiyun .create = kvmppc_xive_create,
2242*4882a593Smuzhiyun .init = kvmppc_xive_init,
2243*4882a593Smuzhiyun .release = kvmppc_xive_release,
2244*4882a593Smuzhiyun .set_attr = xive_set_attr,
2245*4882a593Smuzhiyun .get_attr = xive_get_attr,
2246*4882a593Smuzhiyun .has_attr = xive_has_attr,
2247*4882a593Smuzhiyun };
2248*4882a593Smuzhiyun
kvmppc_xive_init_module(void)2249*4882a593Smuzhiyun void kvmppc_xive_init_module(void)
2250*4882a593Smuzhiyun {
2251*4882a593Smuzhiyun __xive_vm_h_xirr = xive_vm_h_xirr;
2252*4882a593Smuzhiyun __xive_vm_h_ipoll = xive_vm_h_ipoll;
2253*4882a593Smuzhiyun __xive_vm_h_ipi = xive_vm_h_ipi;
2254*4882a593Smuzhiyun __xive_vm_h_cppr = xive_vm_h_cppr;
2255*4882a593Smuzhiyun __xive_vm_h_eoi = xive_vm_h_eoi;
2256*4882a593Smuzhiyun }
2257*4882a593Smuzhiyun
kvmppc_xive_exit_module(void)2258*4882a593Smuzhiyun void kvmppc_xive_exit_module(void)
2259*4882a593Smuzhiyun {
2260*4882a593Smuzhiyun __xive_vm_h_xirr = NULL;
2261*4882a593Smuzhiyun __xive_vm_h_ipoll = NULL;
2262*4882a593Smuzhiyun __xive_vm_h_ipi = NULL;
2263*4882a593Smuzhiyun __xive_vm_h_cppr = NULL;
2264*4882a593Smuzhiyun __xive_vm_h_eoi = NULL;
2265*4882a593Smuzhiyun }
2266