1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2012 Michael Ellerman, IBM Corporation.
4*4882a593Smuzhiyun * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/kvm_host.h>
9*4882a593Smuzhiyun #include <linux/err.h>
10*4882a593Smuzhiyun #include <linux/gfp.h>
11*4882a593Smuzhiyun #include <linux/anon_inodes.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #include <linux/uaccess.h>
15*4882a593Smuzhiyun #include <asm/kvm_book3s.h>
16*4882a593Smuzhiyun #include <asm/kvm_ppc.h>
17*4882a593Smuzhiyun #include <asm/hvcall.h>
18*4882a593Smuzhiyun #include <asm/xics.h>
19*4882a593Smuzhiyun #include <asm/debugfs.h>
20*4882a593Smuzhiyun #include <asm/time.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <linux/seq_file.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "book3s_xics.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #if 1
27*4882a593Smuzhiyun #define XICS_DBG(fmt...) do { } while (0)
28*4882a593Smuzhiyun #else
29*4882a593Smuzhiyun #define XICS_DBG(fmt...) trace_printk(fmt)
30*4882a593Smuzhiyun #endif
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define ENABLE_REALMODE true
33*4882a593Smuzhiyun #define DEBUG_REALMODE false
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * LOCKING
37*4882a593Smuzhiyun * =======
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * Each ICS has a spin lock protecting the information about the IRQ
40*4882a593Smuzhiyun * sources and avoiding simultaneous deliveries of the same interrupt.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * ICP operations are done via a single compare & swap transaction
43*4882a593Smuzhiyun * (most ICP state fits in the union kvmppc_icp_state)
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun * TODO
48*4882a593Smuzhiyun * ====
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * - To speed up resends, keep a bitmap of "resend" set bits in the
51*4882a593Smuzhiyun * ICS
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * - Speed up server# -> ICP lookup (array ? hash table ?)
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * - Make ICS lockless as well, or at least a per-interrupt lock or hashed
56*4882a593Smuzhiyun * locks array to improve scalability
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* -- ICS routines -- */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
62*4882a593Smuzhiyun u32 new_irq, bool check_resend);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun * Return value ideally indicates how the interrupt was handled, but no
66*4882a593Smuzhiyun * callers look at it (given that we don't implement KVM_IRQ_LINE_STATUS),
67*4882a593Smuzhiyun * so just return 0.
68*4882a593Smuzhiyun */
ics_deliver_irq(struct kvmppc_xics * xics,u32 irq,u32 level)69*4882a593Smuzhiyun static int ics_deliver_irq(struct kvmppc_xics *xics, u32 irq, u32 level)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun struct ics_irq_state *state;
72*4882a593Smuzhiyun struct kvmppc_ics *ics;
73*4882a593Smuzhiyun u16 src;
74*4882a593Smuzhiyun u32 pq_old, pq_new;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun XICS_DBG("ics deliver %#x (level: %d)\n", irq, level);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
79*4882a593Smuzhiyun if (!ics) {
80*4882a593Smuzhiyun XICS_DBG("ics_deliver_irq: IRQ 0x%06x not found !\n", irq);
81*4882a593Smuzhiyun return -EINVAL;
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun state = &ics->irq_state[src];
84*4882a593Smuzhiyun if (!state->exists)
85*4882a593Smuzhiyun return -EINVAL;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (level == KVM_INTERRUPT_SET_LEVEL || level == KVM_INTERRUPT_SET)
88*4882a593Smuzhiyun level = 1;
89*4882a593Smuzhiyun else if (level == KVM_INTERRUPT_UNSET)
90*4882a593Smuzhiyun level = 0;
91*4882a593Smuzhiyun /*
92*4882a593Smuzhiyun * Take other values the same as 1, consistent with original code.
93*4882a593Smuzhiyun * maybe WARN here?
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (!state->lsi && level == 0) /* noop for MSI */
97*4882a593Smuzhiyun return 0;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun do {
100*4882a593Smuzhiyun pq_old = state->pq_state;
101*4882a593Smuzhiyun if (state->lsi) {
102*4882a593Smuzhiyun if (level) {
103*4882a593Smuzhiyun if (pq_old & PQ_PRESENTED)
104*4882a593Smuzhiyun /* Setting already set LSI ... */
105*4882a593Smuzhiyun return 0;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun pq_new = PQ_PRESENTED;
108*4882a593Smuzhiyun } else
109*4882a593Smuzhiyun pq_new = 0;
110*4882a593Smuzhiyun } else
111*4882a593Smuzhiyun pq_new = ((pq_old << 1) & 3) | PQ_PRESENTED;
112*4882a593Smuzhiyun } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Test P=1, Q=0, this is the only case where we present */
115*4882a593Smuzhiyun if (pq_new == PQ_PRESENTED)
116*4882a593Smuzhiyun icp_deliver_irq(xics, NULL, irq, false);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* Record which CPU this arrived on for passed-through interrupts */
119*4882a593Smuzhiyun if (state->host_irq)
120*4882a593Smuzhiyun state->intr_cpu = raw_smp_processor_id();
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
ics_check_resend(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct kvmppc_icp * icp)125*4882a593Smuzhiyun static void ics_check_resend(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
126*4882a593Smuzhiyun struct kvmppc_icp *icp)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun int i;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
131*4882a593Smuzhiyun struct ics_irq_state *state = &ics->irq_state[i];
132*4882a593Smuzhiyun if (state->resend) {
133*4882a593Smuzhiyun XICS_DBG("resend %#x prio %#x\n", state->number,
134*4882a593Smuzhiyun state->priority);
135*4882a593Smuzhiyun icp_deliver_irq(xics, icp, state->number, true);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
write_xive(struct kvmppc_xics * xics,struct kvmppc_ics * ics,struct ics_irq_state * state,u32 server,u32 priority,u32 saved_priority)140*4882a593Smuzhiyun static bool write_xive(struct kvmppc_xics *xics, struct kvmppc_ics *ics,
141*4882a593Smuzhiyun struct ics_irq_state *state,
142*4882a593Smuzhiyun u32 server, u32 priority, u32 saved_priority)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun bool deliver;
145*4882a593Smuzhiyun unsigned long flags;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun local_irq_save(flags);
148*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun state->server = server;
151*4882a593Smuzhiyun state->priority = priority;
152*4882a593Smuzhiyun state->saved_priority = saved_priority;
153*4882a593Smuzhiyun deliver = false;
154*4882a593Smuzhiyun if ((state->masked_pending || state->resend) && priority != MASKED) {
155*4882a593Smuzhiyun state->masked_pending = 0;
156*4882a593Smuzhiyun state->resend = 0;
157*4882a593Smuzhiyun deliver = true;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
161*4882a593Smuzhiyun local_irq_restore(flags);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun return deliver;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
kvmppc_xics_set_xive(struct kvm * kvm,u32 irq,u32 server,u32 priority)166*4882a593Smuzhiyun int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, u32 priority)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
169*4882a593Smuzhiyun struct kvmppc_icp *icp;
170*4882a593Smuzhiyun struct kvmppc_ics *ics;
171*4882a593Smuzhiyun struct ics_irq_state *state;
172*4882a593Smuzhiyun u16 src;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (!xics)
175*4882a593Smuzhiyun return -ENODEV;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
178*4882a593Smuzhiyun if (!ics)
179*4882a593Smuzhiyun return -EINVAL;
180*4882a593Smuzhiyun state = &ics->irq_state[src];
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun icp = kvmppc_xics_find_server(kvm, server);
183*4882a593Smuzhiyun if (!icp)
184*4882a593Smuzhiyun return -EINVAL;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun XICS_DBG("set_xive %#x server %#x prio %#x MP:%d RS:%d\n",
187*4882a593Smuzhiyun irq, server, priority,
188*4882a593Smuzhiyun state->masked_pending, state->resend);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (write_xive(xics, ics, state, server, priority, priority))
191*4882a593Smuzhiyun icp_deliver_irq(xics, icp, irq, false);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return 0;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
kvmppc_xics_get_xive(struct kvm * kvm,u32 irq,u32 * server,u32 * priority)196*4882a593Smuzhiyun int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, u32 *priority)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
199*4882a593Smuzhiyun struct kvmppc_ics *ics;
200*4882a593Smuzhiyun struct ics_irq_state *state;
201*4882a593Smuzhiyun u16 src;
202*4882a593Smuzhiyun unsigned long flags;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (!xics)
205*4882a593Smuzhiyun return -ENODEV;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
208*4882a593Smuzhiyun if (!ics)
209*4882a593Smuzhiyun return -EINVAL;
210*4882a593Smuzhiyun state = &ics->irq_state[src];
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun local_irq_save(flags);
213*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
214*4882a593Smuzhiyun *server = state->server;
215*4882a593Smuzhiyun *priority = state->priority;
216*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
217*4882a593Smuzhiyun local_irq_restore(flags);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return 0;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
kvmppc_xics_int_on(struct kvm * kvm,u32 irq)222*4882a593Smuzhiyun int kvmppc_xics_int_on(struct kvm *kvm, u32 irq)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
225*4882a593Smuzhiyun struct kvmppc_icp *icp;
226*4882a593Smuzhiyun struct kvmppc_ics *ics;
227*4882a593Smuzhiyun struct ics_irq_state *state;
228*4882a593Smuzhiyun u16 src;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun if (!xics)
231*4882a593Smuzhiyun return -ENODEV;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
234*4882a593Smuzhiyun if (!ics)
235*4882a593Smuzhiyun return -EINVAL;
236*4882a593Smuzhiyun state = &ics->irq_state[src];
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun icp = kvmppc_xics_find_server(kvm, state->server);
239*4882a593Smuzhiyun if (!icp)
240*4882a593Smuzhiyun return -EINVAL;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (write_xive(xics, ics, state, state->server, state->saved_priority,
243*4882a593Smuzhiyun state->saved_priority))
244*4882a593Smuzhiyun icp_deliver_irq(xics, icp, irq, false);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun return 0;
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
kvmppc_xics_int_off(struct kvm * kvm,u32 irq)249*4882a593Smuzhiyun int kvmppc_xics_int_off(struct kvm *kvm, u32 irq)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
252*4882a593Smuzhiyun struct kvmppc_ics *ics;
253*4882a593Smuzhiyun struct ics_irq_state *state;
254*4882a593Smuzhiyun u16 src;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (!xics)
257*4882a593Smuzhiyun return -ENODEV;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
260*4882a593Smuzhiyun if (!ics)
261*4882a593Smuzhiyun return -EINVAL;
262*4882a593Smuzhiyun state = &ics->irq_state[src];
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun write_xive(xics, ics, state, state->server, MASKED, state->priority);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun return 0;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* -- ICP routines, including hcalls -- */
270*4882a593Smuzhiyun
icp_try_update(struct kvmppc_icp * icp,union kvmppc_icp_state old,union kvmppc_icp_state new,bool change_self)271*4882a593Smuzhiyun static inline bool icp_try_update(struct kvmppc_icp *icp,
272*4882a593Smuzhiyun union kvmppc_icp_state old,
273*4882a593Smuzhiyun union kvmppc_icp_state new,
274*4882a593Smuzhiyun bool change_self)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun bool success;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /* Calculate new output value */
279*4882a593Smuzhiyun new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /* Attempt atomic update */
282*4882a593Smuzhiyun success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
283*4882a593Smuzhiyun if (!success)
284*4882a593Smuzhiyun goto bail;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun XICS_DBG("UPD [%04lx] - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
287*4882a593Smuzhiyun icp->server_num,
288*4882a593Smuzhiyun old.cppr, old.mfrr, old.pending_pri, old.xisr,
289*4882a593Smuzhiyun old.need_resend, old.out_ee);
290*4882a593Smuzhiyun XICS_DBG("UPD - C:%02x M:%02x PP: %02x PI:%06x R:%d O:%d\n",
291*4882a593Smuzhiyun new.cppr, new.mfrr, new.pending_pri, new.xisr,
292*4882a593Smuzhiyun new.need_resend, new.out_ee);
293*4882a593Smuzhiyun /*
294*4882a593Smuzhiyun * Check for output state update
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Note that this is racy since another processor could be updating
297*4882a593Smuzhiyun * the state already. This is why we never clear the interrupt output
298*4882a593Smuzhiyun * here, we only ever set it. The clear only happens prior to doing
299*4882a593Smuzhiyun * an update and only by the processor itself. Currently we do it
300*4882a593Smuzhiyun * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
301*4882a593Smuzhiyun *
302*4882a593Smuzhiyun * We also do not try to figure out whether the EE state has changed,
303*4882a593Smuzhiyun * we unconditionally set it if the new state calls for it. The reason
304*4882a593Smuzhiyun * for that is that we opportunistically remove the pending interrupt
305*4882a593Smuzhiyun * flag when raising CPPR, so we need to set it back here if an
306*4882a593Smuzhiyun * interrupt is still pending.
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun if (new.out_ee) {
309*4882a593Smuzhiyun kvmppc_book3s_queue_irqprio(icp->vcpu,
310*4882a593Smuzhiyun BOOK3S_INTERRUPT_EXTERNAL);
311*4882a593Smuzhiyun if (!change_self)
312*4882a593Smuzhiyun kvmppc_fast_vcpu_kick(icp->vcpu);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun bail:
315*4882a593Smuzhiyun return success;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
icp_check_resend(struct kvmppc_xics * xics,struct kvmppc_icp * icp)318*4882a593Smuzhiyun static void icp_check_resend(struct kvmppc_xics *xics,
319*4882a593Smuzhiyun struct kvmppc_icp *icp)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun u32 icsid;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Order this load with the test for need_resend in the caller */
324*4882a593Smuzhiyun smp_rmb();
325*4882a593Smuzhiyun for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
326*4882a593Smuzhiyun struct kvmppc_ics *ics = xics->ics[icsid];
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (!test_and_clear_bit(icsid, icp->resend_map))
329*4882a593Smuzhiyun continue;
330*4882a593Smuzhiyun if (!ics)
331*4882a593Smuzhiyun continue;
332*4882a593Smuzhiyun ics_check_resend(xics, ics, icp);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
icp_try_to_deliver(struct kvmppc_icp * icp,u32 irq,u8 priority,u32 * reject)336*4882a593Smuzhiyun static bool icp_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
337*4882a593Smuzhiyun u32 *reject)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
340*4882a593Smuzhiyun bool success;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun XICS_DBG("try deliver %#x(P:%#x) to server %#lx\n", irq, priority,
343*4882a593Smuzhiyun icp->server_num);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun do {
346*4882a593Smuzhiyun old_state = new_state = READ_ONCE(icp->state);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun *reject = 0;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* See if we can deliver */
351*4882a593Smuzhiyun success = new_state.cppr > priority &&
352*4882a593Smuzhiyun new_state.mfrr > priority &&
353*4882a593Smuzhiyun new_state.pending_pri > priority;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * If we can, check for a rejection and perform the
357*4882a593Smuzhiyun * delivery
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun if (success) {
360*4882a593Smuzhiyun *reject = new_state.xisr;
361*4882a593Smuzhiyun new_state.xisr = irq;
362*4882a593Smuzhiyun new_state.pending_pri = priority;
363*4882a593Smuzhiyun } else {
364*4882a593Smuzhiyun /*
365*4882a593Smuzhiyun * If we failed to deliver we set need_resend
366*4882a593Smuzhiyun * so a subsequent CPPR state change causes us
367*4882a593Smuzhiyun * to try a new delivery.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun new_state.need_resend = true;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, false));
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun return success;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
icp_deliver_irq(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u32 new_irq,bool check_resend)377*4882a593Smuzhiyun static void icp_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
378*4882a593Smuzhiyun u32 new_irq, bool check_resend)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct ics_irq_state *state;
381*4882a593Smuzhiyun struct kvmppc_ics *ics;
382*4882a593Smuzhiyun u32 reject;
383*4882a593Smuzhiyun u16 src;
384*4882a593Smuzhiyun unsigned long flags;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /*
387*4882a593Smuzhiyun * This is used both for initial delivery of an interrupt and
388*4882a593Smuzhiyun * for subsequent rejection.
389*4882a593Smuzhiyun *
390*4882a593Smuzhiyun * Rejection can be racy vs. resends. We have evaluated the
391*4882a593Smuzhiyun * rejection in an atomic ICP transaction which is now complete,
392*4882a593Smuzhiyun * so potentially the ICP can already accept the interrupt again.
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * So we need to retry the delivery. Essentially the reject path
395*4882a593Smuzhiyun * boils down to a failed delivery. Always.
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * Now the interrupt could also have moved to a different target,
398*4882a593Smuzhiyun * thus we may need to re-do the ICP lookup as well
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun again:
402*4882a593Smuzhiyun /* Get the ICS state and lock it */
403*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, new_irq, &src);
404*4882a593Smuzhiyun if (!ics) {
405*4882a593Smuzhiyun XICS_DBG("icp_deliver_irq: IRQ 0x%06x not found !\n", new_irq);
406*4882a593Smuzhiyun return;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun state = &ics->irq_state[src];
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* Get a lock on the ICS */
411*4882a593Smuzhiyun local_irq_save(flags);
412*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Get our server */
415*4882a593Smuzhiyun if (!icp || state->server != icp->server_num) {
416*4882a593Smuzhiyun icp = kvmppc_xics_find_server(xics->kvm, state->server);
417*4882a593Smuzhiyun if (!icp) {
418*4882a593Smuzhiyun pr_warn("icp_deliver_irq: IRQ 0x%06x server 0x%x not found !\n",
419*4882a593Smuzhiyun new_irq, state->server);
420*4882a593Smuzhiyun goto out;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun if (check_resend)
425*4882a593Smuzhiyun if (!state->resend)
426*4882a593Smuzhiyun goto out;
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun /* Clear the resend bit of that interrupt */
429*4882a593Smuzhiyun state->resend = 0;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /*
432*4882a593Smuzhiyun * If masked, bail out
433*4882a593Smuzhiyun *
434*4882a593Smuzhiyun * Note: PAPR doesn't mention anything about masked pending
435*4882a593Smuzhiyun * when doing a resend, only when doing a delivery.
436*4882a593Smuzhiyun *
437*4882a593Smuzhiyun * However that would have the effect of losing a masked
438*4882a593Smuzhiyun * interrupt that was rejected and isn't consistent with
439*4882a593Smuzhiyun * the whole masked_pending business which is about not
440*4882a593Smuzhiyun * losing interrupts that occur while masked.
441*4882a593Smuzhiyun *
442*4882a593Smuzhiyun * I don't differentiate normal deliveries and resends, this
443*4882a593Smuzhiyun * implementation will differ from PAPR and not lose such
444*4882a593Smuzhiyun * interrupts.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun if (state->priority == MASKED) {
447*4882a593Smuzhiyun XICS_DBG("irq %#x masked pending\n", new_irq);
448*4882a593Smuzhiyun state->masked_pending = 1;
449*4882a593Smuzhiyun goto out;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Try the delivery, this will set the need_resend flag
454*4882a593Smuzhiyun * in the ICP as part of the atomic transaction if the
455*4882a593Smuzhiyun * delivery is not possible.
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * Note that if successful, the new delivery might have itself
458*4882a593Smuzhiyun * rejected an interrupt that was "delivered" before we took the
459*4882a593Smuzhiyun * ics spin lock.
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * In this case we do the whole sequence all over again for the
462*4882a593Smuzhiyun * new guy. We cannot assume that the rejected interrupt is less
463*4882a593Smuzhiyun * favored than the new one, and thus doesn't need to be delivered,
464*4882a593Smuzhiyun * because by the time we exit icp_try_to_deliver() the target
465*4882a593Smuzhiyun * processor may well have alrady consumed & completed it, and thus
466*4882a593Smuzhiyun * the rejected interrupt might actually be already acceptable.
467*4882a593Smuzhiyun */
468*4882a593Smuzhiyun if (icp_try_to_deliver(icp, new_irq, state->priority, &reject)) {
469*4882a593Smuzhiyun /*
470*4882a593Smuzhiyun * Delivery was successful, did we reject somebody else ?
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun if (reject && reject != XICS_IPI) {
473*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
474*4882a593Smuzhiyun local_irq_restore(flags);
475*4882a593Smuzhiyun new_irq = reject;
476*4882a593Smuzhiyun check_resend = 0;
477*4882a593Smuzhiyun goto again;
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun } else {
480*4882a593Smuzhiyun /*
481*4882a593Smuzhiyun * We failed to deliver the interrupt we need to set the
482*4882a593Smuzhiyun * resend map bit and mark the ICS state as needing a resend
483*4882a593Smuzhiyun */
484*4882a593Smuzhiyun state->resend = 1;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /*
487*4882a593Smuzhiyun * Make sure when checking resend, we don't miss the resend
488*4882a593Smuzhiyun * if resend_map bit is seen and cleared.
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun smp_wmb();
491*4882a593Smuzhiyun set_bit(ics->icsid, icp->resend_map);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * If the need_resend flag got cleared in the ICP some time
495*4882a593Smuzhiyun * between icp_try_to_deliver() atomic update and now, then
496*4882a593Smuzhiyun * we know it might have missed the resend_map bit. So we
497*4882a593Smuzhiyun * retry
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun smp_mb();
500*4882a593Smuzhiyun if (!icp->state.need_resend) {
501*4882a593Smuzhiyun state->resend = 0;
502*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
503*4882a593Smuzhiyun local_irq_restore(flags);
504*4882a593Smuzhiyun check_resend = 0;
505*4882a593Smuzhiyun goto again;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun out:
509*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
510*4882a593Smuzhiyun local_irq_restore(flags);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
icp_down_cppr(struct kvmppc_xics * xics,struct kvmppc_icp * icp,u8 new_cppr)513*4882a593Smuzhiyun static void icp_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
514*4882a593Smuzhiyun u8 new_cppr)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
517*4882a593Smuzhiyun bool resend;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun /*
520*4882a593Smuzhiyun * This handles several related states in one operation:
521*4882a593Smuzhiyun *
522*4882a593Smuzhiyun * ICP State: Down_CPPR
523*4882a593Smuzhiyun *
524*4882a593Smuzhiyun * Load CPPR with new value and if the XISR is 0
525*4882a593Smuzhiyun * then check for resends:
526*4882a593Smuzhiyun *
527*4882a593Smuzhiyun * ICP State: Resend
528*4882a593Smuzhiyun *
529*4882a593Smuzhiyun * If MFRR is more favored than CPPR, check for IPIs
530*4882a593Smuzhiyun * and notify ICS of a potential resend. This is done
531*4882a593Smuzhiyun * asynchronously (when used in real mode, we will have
532*4882a593Smuzhiyun * to exit here).
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * We do not handle the complete Check_IPI as documented
535*4882a593Smuzhiyun * here. In the PAPR, this state will be used for both
536*4882a593Smuzhiyun * Set_MFRR and Down_CPPR. However, we know that we aren't
537*4882a593Smuzhiyun * changing the MFRR state here so we don't need to handle
538*4882a593Smuzhiyun * the case of an MFRR causing a reject of a pending irq,
539*4882a593Smuzhiyun * this will have been handled when the MFRR was set in the
540*4882a593Smuzhiyun * first place.
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * Thus we don't have to handle rejects, only resends.
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * When implementing real mode for HV KVM, resend will lead to
545*4882a593Smuzhiyun * a H_TOO_HARD return and the whole transaction will be handled
546*4882a593Smuzhiyun * in virtual mode.
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun do {
549*4882a593Smuzhiyun old_state = new_state = READ_ONCE(icp->state);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Down_CPPR */
552*4882a593Smuzhiyun new_state.cppr = new_cppr;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /*
555*4882a593Smuzhiyun * Cut down Resend / Check_IPI / IPI
556*4882a593Smuzhiyun *
557*4882a593Smuzhiyun * The logic is that we cannot have a pending interrupt
558*4882a593Smuzhiyun * trumped by an IPI at this point (see above), so we
559*4882a593Smuzhiyun * know that either the pending interrupt is already an
560*4882a593Smuzhiyun * IPI (in which case we don't care to override it) or
561*4882a593Smuzhiyun * it's either more favored than us or non existent
562*4882a593Smuzhiyun */
563*4882a593Smuzhiyun if (new_state.mfrr < new_cppr &&
564*4882a593Smuzhiyun new_state.mfrr <= new_state.pending_pri) {
565*4882a593Smuzhiyun WARN_ON(new_state.xisr != XICS_IPI &&
566*4882a593Smuzhiyun new_state.xisr != 0);
567*4882a593Smuzhiyun new_state.pending_pri = new_state.mfrr;
568*4882a593Smuzhiyun new_state.xisr = XICS_IPI;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Latch/clear resend bit */
572*4882a593Smuzhiyun resend = new_state.need_resend;
573*4882a593Smuzhiyun new_state.need_resend = 0;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, true));
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * Now handle resend checks. Those are asynchronous to the ICP
579*4882a593Smuzhiyun * state update in HW (ie bus transactions) so we can handle them
580*4882a593Smuzhiyun * separately here too
581*4882a593Smuzhiyun */
582*4882a593Smuzhiyun if (resend)
583*4882a593Smuzhiyun icp_check_resend(xics, icp);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
kvmppc_h_xirr(struct kvm_vcpu * vcpu)586*4882a593Smuzhiyun static noinline unsigned long kvmppc_h_xirr(struct kvm_vcpu *vcpu)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
589*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
590*4882a593Smuzhiyun u32 xirr;
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* First, remove EE from the processor */
593*4882a593Smuzhiyun kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun * ICP State: Accept_Interrupt
597*4882a593Smuzhiyun *
598*4882a593Smuzhiyun * Return the pending interrupt (if any) along with the
599*4882a593Smuzhiyun * current CPPR, then clear the XISR & set CPPR to the
600*4882a593Smuzhiyun * pending priority
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun do {
603*4882a593Smuzhiyun old_state = new_state = READ_ONCE(icp->state);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
606*4882a593Smuzhiyun if (!old_state.xisr)
607*4882a593Smuzhiyun break;
608*4882a593Smuzhiyun new_state.cppr = new_state.pending_pri;
609*4882a593Smuzhiyun new_state.pending_pri = 0xff;
610*4882a593Smuzhiyun new_state.xisr = 0;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, true));
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun XICS_DBG("h_xirr vcpu %d xirr %#x\n", vcpu->vcpu_id, xirr);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun return xirr;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun
kvmppc_h_ipi(struct kvm_vcpu * vcpu,unsigned long server,unsigned long mfrr)619*4882a593Smuzhiyun static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
620*4882a593Smuzhiyun unsigned long mfrr)
621*4882a593Smuzhiyun {
622*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
623*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
624*4882a593Smuzhiyun struct kvmppc_icp *icp;
625*4882a593Smuzhiyun u32 reject;
626*4882a593Smuzhiyun bool resend;
627*4882a593Smuzhiyun bool local;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun XICS_DBG("h_ipi vcpu %d to server %lu mfrr %#lx\n",
630*4882a593Smuzhiyun vcpu->vcpu_id, server, mfrr);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun icp = vcpu->arch.icp;
633*4882a593Smuzhiyun local = icp->server_num == server;
634*4882a593Smuzhiyun if (!local) {
635*4882a593Smuzhiyun icp = kvmppc_xics_find_server(vcpu->kvm, server);
636*4882a593Smuzhiyun if (!icp)
637*4882a593Smuzhiyun return H_PARAMETER;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /*
641*4882a593Smuzhiyun * ICP state: Set_MFRR
642*4882a593Smuzhiyun *
643*4882a593Smuzhiyun * If the CPPR is more favored than the new MFRR, then
644*4882a593Smuzhiyun * nothing needs to be rejected as there can be no XISR to
645*4882a593Smuzhiyun * reject. If the MFRR is being made less favored then
646*4882a593Smuzhiyun * there might be a previously-rejected interrupt needing
647*4882a593Smuzhiyun * to be resent.
648*4882a593Smuzhiyun *
649*4882a593Smuzhiyun * ICP state: Check_IPI
650*4882a593Smuzhiyun *
651*4882a593Smuzhiyun * If the CPPR is less favored, then we might be replacing
652*4882a593Smuzhiyun * an interrupt, and thus need to possibly reject it.
653*4882a593Smuzhiyun *
654*4882a593Smuzhiyun * ICP State: IPI
655*4882a593Smuzhiyun *
656*4882a593Smuzhiyun * Besides rejecting any pending interrupts, we also
657*4882a593Smuzhiyun * update XISR and pending_pri to mark IPI as pending.
658*4882a593Smuzhiyun *
659*4882a593Smuzhiyun * PAPR does not describe this state, but if the MFRR is being
660*4882a593Smuzhiyun * made less favored than its earlier value, there might be
661*4882a593Smuzhiyun * a previously-rejected interrupt needing to be resent.
662*4882a593Smuzhiyun * Ideally, we would want to resend only if
663*4882a593Smuzhiyun * prio(pending_interrupt) < mfrr &&
664*4882a593Smuzhiyun * prio(pending_interrupt) < cppr
665*4882a593Smuzhiyun * where pending interrupt is the one that was rejected. But
666*4882a593Smuzhiyun * we don't have that state, so we simply trigger a resend
667*4882a593Smuzhiyun * whenever the MFRR is made less favored.
668*4882a593Smuzhiyun */
669*4882a593Smuzhiyun do {
670*4882a593Smuzhiyun old_state = new_state = READ_ONCE(icp->state);
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /* Set_MFRR */
673*4882a593Smuzhiyun new_state.mfrr = mfrr;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* Check_IPI */
676*4882a593Smuzhiyun reject = 0;
677*4882a593Smuzhiyun resend = false;
678*4882a593Smuzhiyun if (mfrr < new_state.cppr) {
679*4882a593Smuzhiyun /* Reject a pending interrupt if not an IPI */
680*4882a593Smuzhiyun if (mfrr <= new_state.pending_pri) {
681*4882a593Smuzhiyun reject = new_state.xisr;
682*4882a593Smuzhiyun new_state.pending_pri = mfrr;
683*4882a593Smuzhiyun new_state.xisr = XICS_IPI;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun if (mfrr > old_state.mfrr) {
688*4882a593Smuzhiyun resend = new_state.need_resend;
689*4882a593Smuzhiyun new_state.need_resend = 0;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, local));
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* Handle reject */
694*4882a593Smuzhiyun if (reject && reject != XICS_IPI)
695*4882a593Smuzhiyun icp_deliver_irq(xics, icp, reject, false);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun /* Handle resend */
698*4882a593Smuzhiyun if (resend)
699*4882a593Smuzhiyun icp_check_resend(xics, icp);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun return H_SUCCESS;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
kvmppc_h_ipoll(struct kvm_vcpu * vcpu,unsigned long server)704*4882a593Smuzhiyun static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun union kvmppc_icp_state state;
707*4882a593Smuzhiyun struct kvmppc_icp *icp;
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun icp = vcpu->arch.icp;
710*4882a593Smuzhiyun if (icp->server_num != server) {
711*4882a593Smuzhiyun icp = kvmppc_xics_find_server(vcpu->kvm, server);
712*4882a593Smuzhiyun if (!icp)
713*4882a593Smuzhiyun return H_PARAMETER;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun state = READ_ONCE(icp->state);
716*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr);
717*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 5, state.mfrr);
718*4882a593Smuzhiyun return H_SUCCESS;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
kvmppc_h_cppr(struct kvm_vcpu * vcpu,unsigned long cppr)721*4882a593Smuzhiyun static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
724*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
725*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
726*4882a593Smuzhiyun u32 reject;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun XICS_DBG("h_cppr vcpu %d cppr %#lx\n", vcpu->vcpu_id, cppr);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun /*
731*4882a593Smuzhiyun * ICP State: Set_CPPR
732*4882a593Smuzhiyun *
733*4882a593Smuzhiyun * We can safely compare the new value with the current
734*4882a593Smuzhiyun * value outside of the transaction as the CPPR is only
735*4882a593Smuzhiyun * ever changed by the processor on itself
736*4882a593Smuzhiyun */
737*4882a593Smuzhiyun if (cppr > icp->state.cppr)
738*4882a593Smuzhiyun icp_down_cppr(xics, icp, cppr);
739*4882a593Smuzhiyun else if (cppr == icp->state.cppr)
740*4882a593Smuzhiyun return;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /*
743*4882a593Smuzhiyun * ICP State: Up_CPPR
744*4882a593Smuzhiyun *
745*4882a593Smuzhiyun * The processor is raising its priority, this can result
746*4882a593Smuzhiyun * in a rejection of a pending interrupt:
747*4882a593Smuzhiyun *
748*4882a593Smuzhiyun * ICP State: Reject_Current
749*4882a593Smuzhiyun *
750*4882a593Smuzhiyun * We can remove EE from the current processor, the update
751*4882a593Smuzhiyun * transaction will set it again if needed
752*4882a593Smuzhiyun */
753*4882a593Smuzhiyun kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun do {
756*4882a593Smuzhiyun old_state = new_state = READ_ONCE(icp->state);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun reject = 0;
759*4882a593Smuzhiyun new_state.cppr = cppr;
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (cppr <= new_state.pending_pri) {
762*4882a593Smuzhiyun reject = new_state.xisr;
763*4882a593Smuzhiyun new_state.xisr = 0;
764*4882a593Smuzhiyun new_state.pending_pri = 0xff;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, true));
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * Check for rejects. They are handled by doing a new delivery
771*4882a593Smuzhiyun * attempt (see comments in icp_deliver_irq).
772*4882a593Smuzhiyun */
773*4882a593Smuzhiyun if (reject && reject != XICS_IPI)
774*4882a593Smuzhiyun icp_deliver_irq(xics, icp, reject, false);
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
ics_eoi(struct kvm_vcpu * vcpu,u32 irq)777*4882a593Smuzhiyun static int ics_eoi(struct kvm_vcpu *vcpu, u32 irq)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
780*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
781*4882a593Smuzhiyun struct kvmppc_ics *ics;
782*4882a593Smuzhiyun struct ics_irq_state *state;
783*4882a593Smuzhiyun u16 src;
784*4882a593Smuzhiyun u32 pq_old, pq_new;
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun /*
787*4882a593Smuzhiyun * ICS EOI handling: For LSI, if P bit is still set, we need to
788*4882a593Smuzhiyun * resend it.
789*4882a593Smuzhiyun *
790*4882a593Smuzhiyun * For MSI, we move Q bit into P (and clear Q). If it is set,
791*4882a593Smuzhiyun * resend it.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &src);
795*4882a593Smuzhiyun if (!ics) {
796*4882a593Smuzhiyun XICS_DBG("ios_eoi: IRQ 0x%06x not found !\n", irq);
797*4882a593Smuzhiyun return H_PARAMETER;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun state = &ics->irq_state[src];
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun if (state->lsi)
802*4882a593Smuzhiyun pq_new = state->pq_state;
803*4882a593Smuzhiyun else
804*4882a593Smuzhiyun do {
805*4882a593Smuzhiyun pq_old = state->pq_state;
806*4882a593Smuzhiyun pq_new = pq_old >> 1;
807*4882a593Smuzhiyun } while (cmpxchg(&state->pq_state, pq_old, pq_new) != pq_old);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (pq_new & PQ_PRESENTED)
810*4882a593Smuzhiyun icp_deliver_irq(xics, icp, irq, false);
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun kvm_notify_acked_irq(vcpu->kvm, 0, irq);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun return H_SUCCESS;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
kvmppc_h_eoi(struct kvm_vcpu * vcpu,unsigned long xirr)817*4882a593Smuzhiyun static noinline int kvmppc_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
820*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
821*4882a593Smuzhiyun u32 irq = xirr & 0x00ffffff;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun XICS_DBG("h_eoi vcpu %d eoi %#lx\n", vcpu->vcpu_id, xirr);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun /*
826*4882a593Smuzhiyun * ICP State: EOI
827*4882a593Smuzhiyun *
828*4882a593Smuzhiyun * Note: If EOI is incorrectly used by SW to lower the CPPR
829*4882a593Smuzhiyun * value (ie more favored), we do not check for rejection of
830*4882a593Smuzhiyun * a pending interrupt, this is a SW error and PAPR specifies
831*4882a593Smuzhiyun * that we don't have to deal with it.
832*4882a593Smuzhiyun *
833*4882a593Smuzhiyun * The sending of an EOI to the ICS is handled after the
834*4882a593Smuzhiyun * CPPR update
835*4882a593Smuzhiyun *
836*4882a593Smuzhiyun * ICP State: Down_CPPR which we handle
837*4882a593Smuzhiyun * in a separate function as it's shared with H_CPPR.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun icp_down_cppr(xics, icp, xirr >> 24);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun /* IPIs have no EOI */
842*4882a593Smuzhiyun if (irq == XICS_IPI)
843*4882a593Smuzhiyun return H_SUCCESS;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return ics_eoi(vcpu, irq);
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
kvmppc_xics_rm_complete(struct kvm_vcpu * vcpu,u32 hcall)848*4882a593Smuzhiyun int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
851*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun XICS_DBG("XICS_RM: H_%x completing, act: %x state: %lx tgt: %p\n",
854*4882a593Smuzhiyun hcall, icp->rm_action, icp->rm_dbgstate.raw, icp->rm_dbgtgt);
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (icp->rm_action & XICS_RM_KICK_VCPU) {
857*4882a593Smuzhiyun icp->n_rm_kick_vcpu++;
858*4882a593Smuzhiyun kvmppc_fast_vcpu_kick(icp->rm_kick_target);
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun if (icp->rm_action & XICS_RM_CHECK_RESEND) {
861*4882a593Smuzhiyun icp->n_rm_check_resend++;
862*4882a593Smuzhiyun icp_check_resend(xics, icp->rm_resend_icp);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun if (icp->rm_action & XICS_RM_NOTIFY_EOI) {
865*4882a593Smuzhiyun icp->n_rm_notify_eoi++;
866*4882a593Smuzhiyun kvm_notify_acked_irq(vcpu->kvm, 0, icp->rm_eoied_irq);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun icp->rm_action = 0;
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun return H_SUCCESS;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xics_rm_complete);
874*4882a593Smuzhiyun
kvmppc_xics_hcall(struct kvm_vcpu * vcpu,u32 req)875*4882a593Smuzhiyun int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
878*4882a593Smuzhiyun unsigned long res;
879*4882a593Smuzhiyun int rc = H_SUCCESS;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /* Check if we have an ICP */
882*4882a593Smuzhiyun if (!xics || !vcpu->arch.icp)
883*4882a593Smuzhiyun return H_HARDWARE;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /* These requests don't have real-mode implementations at present */
886*4882a593Smuzhiyun switch (req) {
887*4882a593Smuzhiyun case H_XIRR_X:
888*4882a593Smuzhiyun res = kvmppc_h_xirr(vcpu);
889*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 4, res);
890*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 5, get_tb());
891*4882a593Smuzhiyun return rc;
892*4882a593Smuzhiyun case H_IPOLL:
893*4882a593Smuzhiyun rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4));
894*4882a593Smuzhiyun return rc;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun /* Check for real mode returning too hard */
898*4882a593Smuzhiyun if (xics->real_mode && is_kvmppc_hv_enabled(vcpu->kvm))
899*4882a593Smuzhiyun return kvmppc_xics_rm_complete(vcpu, req);
900*4882a593Smuzhiyun
901*4882a593Smuzhiyun switch (req) {
902*4882a593Smuzhiyun case H_XIRR:
903*4882a593Smuzhiyun res = kvmppc_h_xirr(vcpu);
904*4882a593Smuzhiyun kvmppc_set_gpr(vcpu, 4, res);
905*4882a593Smuzhiyun break;
906*4882a593Smuzhiyun case H_CPPR:
907*4882a593Smuzhiyun kvmppc_h_cppr(vcpu, kvmppc_get_gpr(vcpu, 4));
908*4882a593Smuzhiyun break;
909*4882a593Smuzhiyun case H_EOI:
910*4882a593Smuzhiyun rc = kvmppc_h_eoi(vcpu, kvmppc_get_gpr(vcpu, 4));
911*4882a593Smuzhiyun break;
912*4882a593Smuzhiyun case H_IPI:
913*4882a593Smuzhiyun rc = kvmppc_h_ipi(vcpu, kvmppc_get_gpr(vcpu, 4),
914*4882a593Smuzhiyun kvmppc_get_gpr(vcpu, 5));
915*4882a593Smuzhiyun break;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun return rc;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xics_hcall);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* -- Initialisation code etc. -- */
924*4882a593Smuzhiyun
xics_debugfs_irqmap(struct seq_file * m,struct kvmppc_passthru_irqmap * pimap)925*4882a593Smuzhiyun static void xics_debugfs_irqmap(struct seq_file *m,
926*4882a593Smuzhiyun struct kvmppc_passthru_irqmap *pimap)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun int i;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (!pimap)
931*4882a593Smuzhiyun return;
932*4882a593Smuzhiyun seq_printf(m, "========\nPIRQ mappings: %d maps\n===========\n",
933*4882a593Smuzhiyun pimap->n_mapped);
934*4882a593Smuzhiyun for (i = 0; i < pimap->n_mapped; i++) {
935*4882a593Smuzhiyun seq_printf(m, "r_hwirq=%x, v_hwirq=%x\n",
936*4882a593Smuzhiyun pimap->mapped[i].r_hwirq, pimap->mapped[i].v_hwirq);
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun
xics_debug_show(struct seq_file * m,void * private)940*4882a593Smuzhiyun static int xics_debug_show(struct seq_file *m, void *private)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun struct kvmppc_xics *xics = m->private;
943*4882a593Smuzhiyun struct kvm *kvm = xics->kvm;
944*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
945*4882a593Smuzhiyun int icsid, i;
946*4882a593Smuzhiyun unsigned long flags;
947*4882a593Smuzhiyun unsigned long t_rm_kick_vcpu, t_rm_check_resend;
948*4882a593Smuzhiyun unsigned long t_rm_notify_eoi;
949*4882a593Smuzhiyun unsigned long t_reject, t_check_resend;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun if (!kvm)
952*4882a593Smuzhiyun return 0;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun t_rm_kick_vcpu = 0;
955*4882a593Smuzhiyun t_rm_notify_eoi = 0;
956*4882a593Smuzhiyun t_rm_check_resend = 0;
957*4882a593Smuzhiyun t_check_resend = 0;
958*4882a593Smuzhiyun t_reject = 0;
959*4882a593Smuzhiyun
960*4882a593Smuzhiyun xics_debugfs_irqmap(m, kvm->arch.pimap);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun seq_printf(m, "=========\nICP state\n=========\n");
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
965*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
966*4882a593Smuzhiyun union kvmppc_icp_state state;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun if (!icp)
969*4882a593Smuzhiyun continue;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun state.raw = READ_ONCE(icp->state.raw);
972*4882a593Smuzhiyun seq_printf(m, "cpu server %#lx XIRR:%#x PPRI:%#x CPPR:%#x MFRR:%#x OUT:%d NR:%d\n",
973*4882a593Smuzhiyun icp->server_num, state.xisr,
974*4882a593Smuzhiyun state.pending_pri, state.cppr, state.mfrr,
975*4882a593Smuzhiyun state.out_ee, state.need_resend);
976*4882a593Smuzhiyun t_rm_kick_vcpu += icp->n_rm_kick_vcpu;
977*4882a593Smuzhiyun t_rm_notify_eoi += icp->n_rm_notify_eoi;
978*4882a593Smuzhiyun t_rm_check_resend += icp->n_rm_check_resend;
979*4882a593Smuzhiyun t_check_resend += icp->n_check_resend;
980*4882a593Smuzhiyun t_reject += icp->n_reject;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun seq_printf(m, "ICP Guest->Host totals: kick_vcpu=%lu check_resend=%lu notify_eoi=%lu\n",
984*4882a593Smuzhiyun t_rm_kick_vcpu, t_rm_check_resend,
985*4882a593Smuzhiyun t_rm_notify_eoi);
986*4882a593Smuzhiyun seq_printf(m, "ICP Real Mode totals: check_resend=%lu resend=%lu\n",
987*4882a593Smuzhiyun t_check_resend, t_reject);
988*4882a593Smuzhiyun for (icsid = 0; icsid <= KVMPPC_XICS_MAX_ICS_ID; icsid++) {
989*4882a593Smuzhiyun struct kvmppc_ics *ics = xics->ics[icsid];
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun if (!ics)
992*4882a593Smuzhiyun continue;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun seq_printf(m, "=========\nICS state for ICS 0x%x\n=========\n",
995*4882a593Smuzhiyun icsid);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun local_irq_save(flags);
998*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1001*4882a593Smuzhiyun struct ics_irq_state *irq = &ics->irq_state[i];
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun seq_printf(m, "irq 0x%06x: server %#x prio %#x save prio %#x pq_state %d resend %d masked pending %d\n",
1004*4882a593Smuzhiyun irq->number, irq->server, irq->priority,
1005*4882a593Smuzhiyun irq->saved_priority, irq->pq_state,
1006*4882a593Smuzhiyun irq->resend, irq->masked_pending);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
1010*4882a593Smuzhiyun local_irq_restore(flags);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(xics_debug);
1016*4882a593Smuzhiyun
xics_debugfs_init(struct kvmppc_xics * xics)1017*4882a593Smuzhiyun static void xics_debugfs_init(struct kvmppc_xics *xics)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun char *name;
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun name = kasprintf(GFP_KERNEL, "kvm-xics-%p", xics);
1022*4882a593Smuzhiyun if (!name) {
1023*4882a593Smuzhiyun pr_err("%s: no memory for name\n", __func__);
1024*4882a593Smuzhiyun return;
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun xics->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
1028*4882a593Smuzhiyun xics, &xics_debug_fops);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun pr_debug("%s: created %s\n", __func__, name);
1031*4882a593Smuzhiyun kfree(name);
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun
kvmppc_xics_create_ics(struct kvm * kvm,struct kvmppc_xics * xics,int irq)1034*4882a593Smuzhiyun static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
1035*4882a593Smuzhiyun struct kvmppc_xics *xics, int irq)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun struct kvmppc_ics *ics;
1038*4882a593Smuzhiyun int i, icsid;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun icsid = irq >> KVMPPC_XICS_ICS_SHIFT;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun mutex_lock(&kvm->lock);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* ICS already exists - somebody else got here first */
1045*4882a593Smuzhiyun if (xics->ics[icsid])
1046*4882a593Smuzhiyun goto out;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun /* Create the ICS */
1049*4882a593Smuzhiyun ics = kzalloc(sizeof(struct kvmppc_ics), GFP_KERNEL);
1050*4882a593Smuzhiyun if (!ics)
1051*4882a593Smuzhiyun goto out;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun ics->icsid = icsid;
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
1056*4882a593Smuzhiyun ics->irq_state[i].number = (icsid << KVMPPC_XICS_ICS_SHIFT) | i;
1057*4882a593Smuzhiyun ics->irq_state[i].priority = MASKED;
1058*4882a593Smuzhiyun ics->irq_state[i].saved_priority = MASKED;
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun smp_wmb();
1061*4882a593Smuzhiyun xics->ics[icsid] = ics;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun if (icsid > xics->max_icsid)
1064*4882a593Smuzhiyun xics->max_icsid = icsid;
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun out:
1067*4882a593Smuzhiyun mutex_unlock(&kvm->lock);
1068*4882a593Smuzhiyun return xics->ics[icsid];
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun
kvmppc_xics_create_icp(struct kvm_vcpu * vcpu,unsigned long server_num)1071*4882a593Smuzhiyun static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun struct kvmppc_icp *icp;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if (!vcpu->kvm->arch.xics)
1076*4882a593Smuzhiyun return -ENODEV;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (kvmppc_xics_find_server(vcpu->kvm, server_num))
1079*4882a593Smuzhiyun return -EEXIST;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun icp = kzalloc(sizeof(struct kvmppc_icp), GFP_KERNEL);
1082*4882a593Smuzhiyun if (!icp)
1083*4882a593Smuzhiyun return -ENOMEM;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun icp->vcpu = vcpu;
1086*4882a593Smuzhiyun icp->server_num = server_num;
1087*4882a593Smuzhiyun icp->state.mfrr = MASKED;
1088*4882a593Smuzhiyun icp->state.pending_pri = MASKED;
1089*4882a593Smuzhiyun vcpu->arch.icp = icp;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun XICS_DBG("created server for vcpu %d\n", vcpu->vcpu_id);
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun return 0;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun
kvmppc_xics_get_icp(struct kvm_vcpu * vcpu)1096*4882a593Smuzhiyun u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
1099*4882a593Smuzhiyun union kvmppc_icp_state state;
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun if (!icp)
1102*4882a593Smuzhiyun return 0;
1103*4882a593Smuzhiyun state = icp->state;
1104*4882a593Smuzhiyun return ((u64)state.cppr << KVM_REG_PPC_ICP_CPPR_SHIFT) |
1105*4882a593Smuzhiyun ((u64)state.xisr << KVM_REG_PPC_ICP_XISR_SHIFT) |
1106*4882a593Smuzhiyun ((u64)state.mfrr << KVM_REG_PPC_ICP_MFRR_SHIFT) |
1107*4882a593Smuzhiyun ((u64)state.pending_pri << KVM_REG_PPC_ICP_PPRI_SHIFT);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
kvmppc_xics_set_icp(struct kvm_vcpu * vcpu,u64 icpval)1110*4882a593Smuzhiyun int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun struct kvmppc_icp *icp = vcpu->arch.icp;
1113*4882a593Smuzhiyun struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
1114*4882a593Smuzhiyun union kvmppc_icp_state old_state, new_state;
1115*4882a593Smuzhiyun struct kvmppc_ics *ics;
1116*4882a593Smuzhiyun u8 cppr, mfrr, pending_pri;
1117*4882a593Smuzhiyun u32 xisr;
1118*4882a593Smuzhiyun u16 src;
1119*4882a593Smuzhiyun bool resend;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun if (!icp || !xics)
1122*4882a593Smuzhiyun return -ENOENT;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun cppr = icpval >> KVM_REG_PPC_ICP_CPPR_SHIFT;
1125*4882a593Smuzhiyun xisr = (icpval >> KVM_REG_PPC_ICP_XISR_SHIFT) &
1126*4882a593Smuzhiyun KVM_REG_PPC_ICP_XISR_MASK;
1127*4882a593Smuzhiyun mfrr = icpval >> KVM_REG_PPC_ICP_MFRR_SHIFT;
1128*4882a593Smuzhiyun pending_pri = icpval >> KVM_REG_PPC_ICP_PPRI_SHIFT;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun /* Require the new state to be internally consistent */
1131*4882a593Smuzhiyun if (xisr == 0) {
1132*4882a593Smuzhiyun if (pending_pri != 0xff)
1133*4882a593Smuzhiyun return -EINVAL;
1134*4882a593Smuzhiyun } else if (xisr == XICS_IPI) {
1135*4882a593Smuzhiyun if (pending_pri != mfrr || pending_pri >= cppr)
1136*4882a593Smuzhiyun return -EINVAL;
1137*4882a593Smuzhiyun } else {
1138*4882a593Smuzhiyun if (pending_pri >= mfrr || pending_pri >= cppr)
1139*4882a593Smuzhiyun return -EINVAL;
1140*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, xisr, &src);
1141*4882a593Smuzhiyun if (!ics)
1142*4882a593Smuzhiyun return -EINVAL;
1143*4882a593Smuzhiyun }
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun new_state.raw = 0;
1146*4882a593Smuzhiyun new_state.cppr = cppr;
1147*4882a593Smuzhiyun new_state.xisr = xisr;
1148*4882a593Smuzhiyun new_state.mfrr = mfrr;
1149*4882a593Smuzhiyun new_state.pending_pri = pending_pri;
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /*
1152*4882a593Smuzhiyun * Deassert the CPU interrupt request.
1153*4882a593Smuzhiyun * icp_try_update will reassert it if necessary.
1154*4882a593Smuzhiyun */
1155*4882a593Smuzhiyun kvmppc_book3s_dequeue_irqprio(icp->vcpu, BOOK3S_INTERRUPT_EXTERNAL);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /*
1158*4882a593Smuzhiyun * Note that if we displace an interrupt from old_state.xisr,
1159*4882a593Smuzhiyun * we don't mark it as rejected. We expect userspace to set
1160*4882a593Smuzhiyun * the state of the interrupt sources to be consistent with
1161*4882a593Smuzhiyun * the ICP states (either before or afterwards, which doesn't
1162*4882a593Smuzhiyun * matter). We do handle resends due to CPPR becoming less
1163*4882a593Smuzhiyun * favoured because that is necessary to end up with a
1164*4882a593Smuzhiyun * consistent state in the situation where userspace restores
1165*4882a593Smuzhiyun * the ICS states before the ICP states.
1166*4882a593Smuzhiyun */
1167*4882a593Smuzhiyun do {
1168*4882a593Smuzhiyun old_state = READ_ONCE(icp->state);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun if (new_state.mfrr <= old_state.mfrr) {
1171*4882a593Smuzhiyun resend = false;
1172*4882a593Smuzhiyun new_state.need_resend = old_state.need_resend;
1173*4882a593Smuzhiyun } else {
1174*4882a593Smuzhiyun resend = old_state.need_resend;
1175*4882a593Smuzhiyun new_state.need_resend = 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun } while (!icp_try_update(icp, old_state, new_state, false));
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (resend)
1180*4882a593Smuzhiyun icp_check_resend(xics, icp);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun return 0;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun
xics_get_source(struct kvmppc_xics * xics,long irq,u64 addr)1185*4882a593Smuzhiyun static int xics_get_source(struct kvmppc_xics *xics, long irq, u64 addr)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun int ret;
1188*4882a593Smuzhiyun struct kvmppc_ics *ics;
1189*4882a593Smuzhiyun struct ics_irq_state *irqp;
1190*4882a593Smuzhiyun u64 __user *ubufp = (u64 __user *) addr;
1191*4882a593Smuzhiyun u16 idx;
1192*4882a593Smuzhiyun u64 val, prio;
1193*4882a593Smuzhiyun unsigned long flags;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &idx);
1196*4882a593Smuzhiyun if (!ics)
1197*4882a593Smuzhiyun return -ENOENT;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun irqp = &ics->irq_state[idx];
1200*4882a593Smuzhiyun local_irq_save(flags);
1201*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
1202*4882a593Smuzhiyun ret = -ENOENT;
1203*4882a593Smuzhiyun if (irqp->exists) {
1204*4882a593Smuzhiyun val = irqp->server;
1205*4882a593Smuzhiyun prio = irqp->priority;
1206*4882a593Smuzhiyun if (prio == MASKED) {
1207*4882a593Smuzhiyun val |= KVM_XICS_MASKED;
1208*4882a593Smuzhiyun prio = irqp->saved_priority;
1209*4882a593Smuzhiyun }
1210*4882a593Smuzhiyun val |= prio << KVM_XICS_PRIORITY_SHIFT;
1211*4882a593Smuzhiyun if (irqp->lsi) {
1212*4882a593Smuzhiyun val |= KVM_XICS_LEVEL_SENSITIVE;
1213*4882a593Smuzhiyun if (irqp->pq_state & PQ_PRESENTED)
1214*4882a593Smuzhiyun val |= KVM_XICS_PENDING;
1215*4882a593Smuzhiyun } else if (irqp->masked_pending || irqp->resend)
1216*4882a593Smuzhiyun val |= KVM_XICS_PENDING;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (irqp->pq_state & PQ_PRESENTED)
1219*4882a593Smuzhiyun val |= KVM_XICS_PRESENTED;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun if (irqp->pq_state & PQ_QUEUED)
1222*4882a593Smuzhiyun val |= KVM_XICS_QUEUED;
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun ret = 0;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
1227*4882a593Smuzhiyun local_irq_restore(flags);
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun if (!ret && put_user(val, ubufp))
1230*4882a593Smuzhiyun ret = -EFAULT;
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun return ret;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun
xics_set_source(struct kvmppc_xics * xics,long irq,u64 addr)1235*4882a593Smuzhiyun static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
1236*4882a593Smuzhiyun {
1237*4882a593Smuzhiyun struct kvmppc_ics *ics;
1238*4882a593Smuzhiyun struct ics_irq_state *irqp;
1239*4882a593Smuzhiyun u64 __user *ubufp = (u64 __user *) addr;
1240*4882a593Smuzhiyun u16 idx;
1241*4882a593Smuzhiyun u64 val;
1242*4882a593Smuzhiyun u8 prio;
1243*4882a593Smuzhiyun u32 server;
1244*4882a593Smuzhiyun unsigned long flags;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun if (irq < KVMPPC_XICS_FIRST_IRQ || irq >= KVMPPC_XICS_NR_IRQS)
1247*4882a593Smuzhiyun return -ENOENT;
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &idx);
1250*4882a593Smuzhiyun if (!ics) {
1251*4882a593Smuzhiyun ics = kvmppc_xics_create_ics(xics->kvm, xics, irq);
1252*4882a593Smuzhiyun if (!ics)
1253*4882a593Smuzhiyun return -ENOMEM;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun irqp = &ics->irq_state[idx];
1256*4882a593Smuzhiyun if (get_user(val, ubufp))
1257*4882a593Smuzhiyun return -EFAULT;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun server = val & KVM_XICS_DESTINATION_MASK;
1260*4882a593Smuzhiyun prio = val >> KVM_XICS_PRIORITY_SHIFT;
1261*4882a593Smuzhiyun if (prio != MASKED &&
1262*4882a593Smuzhiyun kvmppc_xics_find_server(xics->kvm, server) == NULL)
1263*4882a593Smuzhiyun return -EINVAL;
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun local_irq_save(flags);
1266*4882a593Smuzhiyun arch_spin_lock(&ics->lock);
1267*4882a593Smuzhiyun irqp->server = server;
1268*4882a593Smuzhiyun irqp->saved_priority = prio;
1269*4882a593Smuzhiyun if (val & KVM_XICS_MASKED)
1270*4882a593Smuzhiyun prio = MASKED;
1271*4882a593Smuzhiyun irqp->priority = prio;
1272*4882a593Smuzhiyun irqp->resend = 0;
1273*4882a593Smuzhiyun irqp->masked_pending = 0;
1274*4882a593Smuzhiyun irqp->lsi = 0;
1275*4882a593Smuzhiyun irqp->pq_state = 0;
1276*4882a593Smuzhiyun if (val & KVM_XICS_LEVEL_SENSITIVE)
1277*4882a593Smuzhiyun irqp->lsi = 1;
1278*4882a593Smuzhiyun /* If PENDING, set P in case P is not saved because of old code */
1279*4882a593Smuzhiyun if (val & KVM_XICS_PRESENTED || val & KVM_XICS_PENDING)
1280*4882a593Smuzhiyun irqp->pq_state |= PQ_PRESENTED;
1281*4882a593Smuzhiyun if (val & KVM_XICS_QUEUED)
1282*4882a593Smuzhiyun irqp->pq_state |= PQ_QUEUED;
1283*4882a593Smuzhiyun irqp->exists = 1;
1284*4882a593Smuzhiyun arch_spin_unlock(&ics->lock);
1285*4882a593Smuzhiyun local_irq_restore(flags);
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun if (val & KVM_XICS_PENDING)
1288*4882a593Smuzhiyun icp_deliver_irq(xics, NULL, irqp->number, false);
1289*4882a593Smuzhiyun
1290*4882a593Smuzhiyun return 0;
1291*4882a593Smuzhiyun }
1292*4882a593Smuzhiyun
kvmppc_xics_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)1293*4882a593Smuzhiyun int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1294*4882a593Smuzhiyun bool line_status)
1295*4882a593Smuzhiyun {
1296*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun if (!xics)
1299*4882a593Smuzhiyun return -ENODEV;
1300*4882a593Smuzhiyun return ics_deliver_irq(xics, irq, level);
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
xics_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1303*4882a593Smuzhiyun static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun struct kvmppc_xics *xics = dev->private;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun switch (attr->group) {
1308*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1309*4882a593Smuzhiyun return xics_set_source(xics, attr->attr, attr->addr);
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun return -ENXIO;
1312*4882a593Smuzhiyun }
1313*4882a593Smuzhiyun
xics_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1314*4882a593Smuzhiyun static int xics_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1315*4882a593Smuzhiyun {
1316*4882a593Smuzhiyun struct kvmppc_xics *xics = dev->private;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun switch (attr->group) {
1319*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1320*4882a593Smuzhiyun return xics_get_source(xics, attr->attr, attr->addr);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun return -ENXIO;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
xics_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)1325*4882a593Smuzhiyun static int xics_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1326*4882a593Smuzhiyun {
1327*4882a593Smuzhiyun switch (attr->group) {
1328*4882a593Smuzhiyun case KVM_DEV_XICS_GRP_SOURCES:
1329*4882a593Smuzhiyun if (attr->attr >= KVMPPC_XICS_FIRST_IRQ &&
1330*4882a593Smuzhiyun attr->attr < KVMPPC_XICS_NR_IRQS)
1331*4882a593Smuzhiyun return 0;
1332*4882a593Smuzhiyun break;
1333*4882a593Smuzhiyun }
1334*4882a593Smuzhiyun return -ENXIO;
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun /*
1338*4882a593Smuzhiyun * Called when device fd is closed. kvm->lock is held.
1339*4882a593Smuzhiyun */
kvmppc_xics_release(struct kvm_device * dev)1340*4882a593Smuzhiyun static void kvmppc_xics_release(struct kvm_device *dev)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun struct kvmppc_xics *xics = dev->private;
1343*4882a593Smuzhiyun int i;
1344*4882a593Smuzhiyun struct kvm *kvm = xics->kvm;
1345*4882a593Smuzhiyun struct kvm_vcpu *vcpu;
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun pr_devel("Releasing xics device\n");
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun /*
1350*4882a593Smuzhiyun * Since this is the device release function, we know that
1351*4882a593Smuzhiyun * userspace does not have any open fd referring to the
1352*4882a593Smuzhiyun * device. Therefore there can not be any of the device
1353*4882a593Smuzhiyun * attribute set/get functions being executed concurrently,
1354*4882a593Smuzhiyun * and similarly, the connect_vcpu and set/clr_mapped
1355*4882a593Smuzhiyun * functions also cannot be being executed.
1356*4882a593Smuzhiyun */
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun debugfs_remove(xics->dentry);
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /*
1361*4882a593Smuzhiyun * We should clean up the vCPU interrupt presenters first.
1362*4882a593Smuzhiyun */
1363*4882a593Smuzhiyun kvm_for_each_vcpu(i, vcpu, kvm) {
1364*4882a593Smuzhiyun /*
1365*4882a593Smuzhiyun * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1366*4882a593Smuzhiyun * (i.e. kvmppc_xics_[gs]et_icp) can be done concurrently.
1367*4882a593Smuzhiyun * Holding the vcpu->mutex also means that execution is
1368*4882a593Smuzhiyun * excluded for the vcpu until the ICP was freed. When the vcpu
1369*4882a593Smuzhiyun * can execute again, vcpu->arch.icp and vcpu->arch.irq_type
1370*4882a593Smuzhiyun * have been cleared and the vcpu will not be going into the
1371*4882a593Smuzhiyun * XICS code anymore.
1372*4882a593Smuzhiyun */
1373*4882a593Smuzhiyun mutex_lock(&vcpu->mutex);
1374*4882a593Smuzhiyun kvmppc_xics_free_icp(vcpu);
1375*4882a593Smuzhiyun mutex_unlock(&vcpu->mutex);
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (kvm)
1379*4882a593Smuzhiyun kvm->arch.xics = NULL;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun for (i = 0; i <= xics->max_icsid; i++) {
1382*4882a593Smuzhiyun kfree(xics->ics[i]);
1383*4882a593Smuzhiyun xics->ics[i] = NULL;
1384*4882a593Smuzhiyun }
1385*4882a593Smuzhiyun /*
1386*4882a593Smuzhiyun * A reference of the kvmppc_xics pointer is now kept under
1387*4882a593Smuzhiyun * the xics_device pointer of the machine for reuse. It is
1388*4882a593Smuzhiyun * freed when the VM is destroyed for now until we fix all the
1389*4882a593Smuzhiyun * execution paths.
1390*4882a593Smuzhiyun */
1391*4882a593Smuzhiyun kfree(dev);
1392*4882a593Smuzhiyun }
1393*4882a593Smuzhiyun
kvmppc_xics_get_device(struct kvm * kvm)1394*4882a593Smuzhiyun static struct kvmppc_xics *kvmppc_xics_get_device(struct kvm *kvm)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun struct kvmppc_xics **kvm_xics_device = &kvm->arch.xics_device;
1397*4882a593Smuzhiyun struct kvmppc_xics *xics = *kvm_xics_device;
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun if (!xics) {
1400*4882a593Smuzhiyun xics = kzalloc(sizeof(*xics), GFP_KERNEL);
1401*4882a593Smuzhiyun *kvm_xics_device = xics;
1402*4882a593Smuzhiyun } else {
1403*4882a593Smuzhiyun memset(xics, 0, sizeof(*xics));
1404*4882a593Smuzhiyun }
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun return xics;
1407*4882a593Smuzhiyun }
1408*4882a593Smuzhiyun
kvmppc_xics_create(struct kvm_device * dev,u32 type)1409*4882a593Smuzhiyun static int kvmppc_xics_create(struct kvm_device *dev, u32 type)
1410*4882a593Smuzhiyun {
1411*4882a593Smuzhiyun struct kvmppc_xics *xics;
1412*4882a593Smuzhiyun struct kvm *kvm = dev->kvm;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun pr_devel("Creating xics for partition\n");
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun /* Already there ? */
1417*4882a593Smuzhiyun if (kvm->arch.xics)
1418*4882a593Smuzhiyun return -EEXIST;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun xics = kvmppc_xics_get_device(kvm);
1421*4882a593Smuzhiyun if (!xics)
1422*4882a593Smuzhiyun return -ENOMEM;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun dev->private = xics;
1425*4882a593Smuzhiyun xics->dev = dev;
1426*4882a593Smuzhiyun xics->kvm = kvm;
1427*4882a593Smuzhiyun kvm->arch.xics = xics;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1430*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_206) &&
1431*4882a593Smuzhiyun cpu_has_feature(CPU_FTR_HVMODE)) {
1432*4882a593Smuzhiyun /* Enable real mode support */
1433*4882a593Smuzhiyun xics->real_mode = ENABLE_REALMODE;
1434*4882a593Smuzhiyun xics->real_mode_dbg = DEBUG_REALMODE;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun return 0;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
kvmppc_xics_init(struct kvm_device * dev)1441*4882a593Smuzhiyun static void kvmppc_xics_init(struct kvm_device *dev)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun struct kvmppc_xics *xics = (struct kvmppc_xics *)dev->private;
1444*4882a593Smuzhiyun
1445*4882a593Smuzhiyun xics_debugfs_init(xics);
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun
1448*4882a593Smuzhiyun struct kvm_device_ops kvm_xics_ops = {
1449*4882a593Smuzhiyun .name = "kvm-xics",
1450*4882a593Smuzhiyun .create = kvmppc_xics_create,
1451*4882a593Smuzhiyun .init = kvmppc_xics_init,
1452*4882a593Smuzhiyun .release = kvmppc_xics_release,
1453*4882a593Smuzhiyun .set_attr = xics_set_attr,
1454*4882a593Smuzhiyun .get_attr = xics_get_attr,
1455*4882a593Smuzhiyun .has_attr = xics_has_attr,
1456*4882a593Smuzhiyun };
1457*4882a593Smuzhiyun
kvmppc_xics_connect_vcpu(struct kvm_device * dev,struct kvm_vcpu * vcpu,u32 xcpu)1458*4882a593Smuzhiyun int kvmppc_xics_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
1459*4882a593Smuzhiyun u32 xcpu)
1460*4882a593Smuzhiyun {
1461*4882a593Smuzhiyun struct kvmppc_xics *xics = dev->private;
1462*4882a593Smuzhiyun int r = -EBUSY;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun if (dev->ops != &kvm_xics_ops)
1465*4882a593Smuzhiyun return -EPERM;
1466*4882a593Smuzhiyun if (xics->kvm != vcpu->kvm)
1467*4882a593Smuzhiyun return -EPERM;
1468*4882a593Smuzhiyun if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
1469*4882a593Smuzhiyun return -EBUSY;
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun r = kvmppc_xics_create_icp(vcpu, xcpu);
1472*4882a593Smuzhiyun if (!r)
1473*4882a593Smuzhiyun vcpu->arch.irq_type = KVMPPC_IRQ_XICS;
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun return r;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun
kvmppc_xics_free_icp(struct kvm_vcpu * vcpu)1478*4882a593Smuzhiyun void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun if (!vcpu->arch.icp)
1481*4882a593Smuzhiyun return;
1482*4882a593Smuzhiyun kfree(vcpu->arch.icp);
1483*4882a593Smuzhiyun vcpu->arch.icp = NULL;
1484*4882a593Smuzhiyun vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun
kvmppc_xics_set_mapped(struct kvm * kvm,unsigned long irq,unsigned long host_irq)1487*4882a593Smuzhiyun void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
1488*4882a593Smuzhiyun unsigned long host_irq)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
1491*4882a593Smuzhiyun struct kvmppc_ics *ics;
1492*4882a593Smuzhiyun u16 idx;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &idx);
1495*4882a593Smuzhiyun if (!ics)
1496*4882a593Smuzhiyun return;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun ics->irq_state[idx].host_irq = host_irq;
1499*4882a593Smuzhiyun ics->irq_state[idx].intr_cpu = -1;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xics_set_mapped);
1502*4882a593Smuzhiyun
kvmppc_xics_clr_mapped(struct kvm * kvm,unsigned long irq,unsigned long host_irq)1503*4882a593Smuzhiyun void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long irq,
1504*4882a593Smuzhiyun unsigned long host_irq)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun struct kvmppc_xics *xics = kvm->arch.xics;
1507*4882a593Smuzhiyun struct kvmppc_ics *ics;
1508*4882a593Smuzhiyun u16 idx;
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun ics = kvmppc_xics_find_ics(xics, irq, &idx);
1511*4882a593Smuzhiyun if (!ics)
1512*4882a593Smuzhiyun return;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun ics->irq_state[idx].host_irq = 0;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(kvmppc_xics_clr_mapped);
1517