1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * irqchip.c: Common API for in kernel interrupt controllers
4*4882a593Smuzhiyun * Copyright (c) 2007, Intel Corporation.
5*4882a593Smuzhiyun * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6*4882a593Smuzhiyun * Copyright (c) 2013, Alexander Graf <agraf@suse.de>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * This file is derived from virt/kvm/irq_comm.c.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Authors:
11*4882a593Smuzhiyun * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
12*4882a593Smuzhiyun * Alexander Graf <agraf@suse.de>
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/kvm_host.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/srcu.h>
18*4882a593Smuzhiyun #include <linux/export.h>
19*4882a593Smuzhiyun #include <trace/events/kvm.h>
20*4882a593Smuzhiyun #include "irq.h"
21*4882a593Smuzhiyun
kvm_irq_map_gsi(struct kvm * kvm,struct kvm_kernel_irq_routing_entry * entries,int gsi)22*4882a593Smuzhiyun int kvm_irq_map_gsi(struct kvm *kvm,
23*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *entries, int gsi)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun struct kvm_irq_routing_table *irq_rt;
26*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *e;
27*4882a593Smuzhiyun int n = 0;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
30*4882a593Smuzhiyun lockdep_is_held(&kvm->irq_lock));
31*4882a593Smuzhiyun if (irq_rt && gsi < irq_rt->nr_rt_entries) {
32*4882a593Smuzhiyun hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
33*4882a593Smuzhiyun entries[n] = *e;
34*4882a593Smuzhiyun ++n;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun return n;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
kvm_irq_map_chip_pin(struct kvm * kvm,unsigned irqchip,unsigned pin)41*4882a593Smuzhiyun int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct kvm_irq_routing_table *irq_rt;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
46*4882a593Smuzhiyun return irq_rt->chip[irqchip][pin];
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
kvm_send_userspace_msi(struct kvm * kvm,struct kvm_msi * msi)49*4882a593Smuzhiyun int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry route;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
54*4882a593Smuzhiyun return -EINVAL;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun route.msi.address_lo = msi->address_lo;
57*4882a593Smuzhiyun route.msi.address_hi = msi->address_hi;
58*4882a593Smuzhiyun route.msi.data = msi->data;
59*4882a593Smuzhiyun route.msi.flags = msi->flags;
60*4882a593Smuzhiyun route.msi.devid = msi->devid;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * Return value:
67*4882a593Smuzhiyun * < 0 Interrupt was ignored (masked or not delivered for other reasons)
68*4882a593Smuzhiyun * = 0 Interrupt was coalesced (previous irq is still pending)
69*4882a593Smuzhiyun * > 0 Number of CPUs interrupt was delivered to
70*4882a593Smuzhiyun */
kvm_set_irq(struct kvm * kvm,int irq_source_id,u32 irq,int level,bool line_status)71*4882a593Smuzhiyun int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
72*4882a593Smuzhiyun bool line_status)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
75*4882a593Smuzhiyun int ret = -1, i, idx;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun trace_kvm_set_irq(irq, level, irq_source_id);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Not possible to detect if the guest uses the PIC or the
80*4882a593Smuzhiyun * IOAPIC. So set the bit in both. The guest will ignore
81*4882a593Smuzhiyun * writes to the unused one.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun idx = srcu_read_lock(&kvm->irq_srcu);
84*4882a593Smuzhiyun i = kvm_irq_map_gsi(kvm, irq_set, irq);
85*4882a593Smuzhiyun srcu_read_unlock(&kvm->irq_srcu, idx);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun while (i--) {
88*4882a593Smuzhiyun int r;
89*4882a593Smuzhiyun r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
90*4882a593Smuzhiyun line_status);
91*4882a593Smuzhiyun if (r < 0)
92*4882a593Smuzhiyun continue;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun ret = r + ((ret < 0) ? 0 : ret);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return ret;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
free_irq_routing_table(struct kvm_irq_routing_table * rt)100*4882a593Smuzhiyun static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun int i;
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (!rt)
105*4882a593Smuzhiyun return;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun for (i = 0; i < rt->nr_rt_entries; ++i) {
108*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *e;
109*4882a593Smuzhiyun struct hlist_node *n;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
112*4882a593Smuzhiyun hlist_del(&e->link);
113*4882a593Smuzhiyun kfree(e);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun kfree(rt);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun
kvm_free_irq_routing(struct kvm * kvm)120*4882a593Smuzhiyun void kvm_free_irq_routing(struct kvm *kvm)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun /* Called only during vm destruction. Nobody can use the pointer
123*4882a593Smuzhiyun at this stage */
124*4882a593Smuzhiyun struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
125*4882a593Smuzhiyun free_irq_routing_table(rt);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
setup_routing_entry(struct kvm * kvm,struct kvm_irq_routing_table * rt,struct kvm_kernel_irq_routing_entry * e,const struct kvm_irq_routing_entry * ue)128*4882a593Smuzhiyun static int setup_routing_entry(struct kvm *kvm,
129*4882a593Smuzhiyun struct kvm_irq_routing_table *rt,
130*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *e,
131*4882a593Smuzhiyun const struct kvm_irq_routing_entry *ue)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *ei;
134*4882a593Smuzhiyun int r;
135*4882a593Smuzhiyun u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /*
138*4882a593Smuzhiyun * Do not allow GSI to be mapped to the same irqchip more than once.
139*4882a593Smuzhiyun * Allow only one to one mapping between GSI and non-irqchip routing.
140*4882a593Smuzhiyun */
141*4882a593Smuzhiyun hlist_for_each_entry(ei, &rt->map[gsi], link)
142*4882a593Smuzhiyun if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
143*4882a593Smuzhiyun ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
144*4882a593Smuzhiyun ue->u.irqchip.irqchip == ei->irqchip.irqchip)
145*4882a593Smuzhiyun return -EINVAL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun e->gsi = gsi;
148*4882a593Smuzhiyun e->type = ue->type;
149*4882a593Smuzhiyun r = kvm_set_routing_entry(kvm, e, ue);
150*4882a593Smuzhiyun if (r)
151*4882a593Smuzhiyun return r;
152*4882a593Smuzhiyun if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
153*4882a593Smuzhiyun rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun hlist_add_head(&e->link, &rt->map[e->gsi]);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return 0;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
kvm_arch_irq_routing_update(struct kvm * kvm)160*4882a593Smuzhiyun void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
kvm_arch_can_set_irq_routing(struct kvm * kvm)164*4882a593Smuzhiyun bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun return true;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
kvm_set_irq_routing(struct kvm * kvm,const struct kvm_irq_routing_entry * ue,unsigned nr,unsigned flags)169*4882a593Smuzhiyun int kvm_set_irq_routing(struct kvm *kvm,
170*4882a593Smuzhiyun const struct kvm_irq_routing_entry *ue,
171*4882a593Smuzhiyun unsigned nr,
172*4882a593Smuzhiyun unsigned flags)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct kvm_irq_routing_table *new, *old;
175*4882a593Smuzhiyun struct kvm_kernel_irq_routing_entry *e;
176*4882a593Smuzhiyun u32 i, j, nr_rt_entries = 0;
177*4882a593Smuzhiyun int r;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun for (i = 0; i < nr; ++i) {
180*4882a593Smuzhiyun if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
181*4882a593Smuzhiyun return -EINVAL;
182*4882a593Smuzhiyun nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun nr_rt_entries += 1;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT);
188*4882a593Smuzhiyun if (!new)
189*4882a593Smuzhiyun return -ENOMEM;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun new->nr_rt_entries = nr_rt_entries;
192*4882a593Smuzhiyun for (i = 0; i < KVM_NR_IRQCHIPS; i++)
193*4882a593Smuzhiyun for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
194*4882a593Smuzhiyun new->chip[i][j] = -1;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun for (i = 0; i < nr; ++i) {
197*4882a593Smuzhiyun r = -ENOMEM;
198*4882a593Smuzhiyun e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
199*4882a593Smuzhiyun if (!e)
200*4882a593Smuzhiyun goto out;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun r = -EINVAL;
203*4882a593Smuzhiyun switch (ue->type) {
204*4882a593Smuzhiyun case KVM_IRQ_ROUTING_MSI:
205*4882a593Smuzhiyun if (ue->flags & ~KVM_MSI_VALID_DEVID)
206*4882a593Smuzhiyun goto free_entry;
207*4882a593Smuzhiyun break;
208*4882a593Smuzhiyun default:
209*4882a593Smuzhiyun if (ue->flags)
210*4882a593Smuzhiyun goto free_entry;
211*4882a593Smuzhiyun break;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun r = setup_routing_entry(kvm, new, e, ue);
214*4882a593Smuzhiyun if (r)
215*4882a593Smuzhiyun goto free_entry;
216*4882a593Smuzhiyun ++ue;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun mutex_lock(&kvm->irq_lock);
220*4882a593Smuzhiyun old = rcu_dereference_protected(kvm->irq_routing, 1);
221*4882a593Smuzhiyun rcu_assign_pointer(kvm->irq_routing, new);
222*4882a593Smuzhiyun kvm_irq_routing_update(kvm);
223*4882a593Smuzhiyun kvm_arch_irq_routing_update(kvm);
224*4882a593Smuzhiyun mutex_unlock(&kvm->irq_lock);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun kvm_arch_post_irq_routing_update(kvm);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun synchronize_srcu_expedited(&kvm->irq_srcu);
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun new = old;
231*4882a593Smuzhiyun r = 0;
232*4882a593Smuzhiyun goto out;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun free_entry:
235*4882a593Smuzhiyun kfree(e);
236*4882a593Smuzhiyun out:
237*4882a593Smuzhiyun free_irq_routing_table(new);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return r;
240*4882a593Smuzhiyun }
241