xref: /OK3568_Linux_fs/kernel/arch/arm64/kvm/vgic/vgic-its.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * GICv3 ITS emulation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015,2016 ARM Ltd.
6*4882a593Smuzhiyun  * Author: Andre Przywara <andre.przywara@arm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/cpu.h>
10*4882a593Smuzhiyun #include <linux/kvm.h>
11*4882a593Smuzhiyun #include <linux/kvm_host.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/list.h>
14*4882a593Smuzhiyun #include <linux/uaccess.h>
15*4882a593Smuzhiyun #include <linux/list_sort.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include <linux/irqchip/arm-gic-v3.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/kvm_emulate.h>
20*4882a593Smuzhiyun #include <asm/kvm_arm.h>
21*4882a593Smuzhiyun #include <asm/kvm_mmu.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "vgic.h"
24*4882a593Smuzhiyun #include "vgic-mmio.h"
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static int vgic_its_save_tables_v0(struct vgic_its *its);
27*4882a593Smuzhiyun static int vgic_its_restore_tables_v0(struct vgic_its *its);
28*4882a593Smuzhiyun static int vgic_its_commit_v0(struct vgic_its *its);
29*4882a593Smuzhiyun static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
30*4882a593Smuzhiyun 			     struct kvm_vcpu *filter_vcpu, bool needs_inv);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Creates a new (reference to a) struct vgic_irq for a given LPI.
34*4882a593Smuzhiyun  * If this LPI is already mapped on another ITS, we increase its refcount
35*4882a593Smuzhiyun  * and return a pointer to the existing structure.
36*4882a593Smuzhiyun  * If this is a "new" LPI, we allocate and initialize a new struct vgic_irq.
37*4882a593Smuzhiyun  * This function returns a pointer to the _unlocked_ structure.
38*4882a593Smuzhiyun  */
vgic_add_lpi(struct kvm * kvm,u32 intid,struct kvm_vcpu * vcpu)39*4882a593Smuzhiyun static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid,
40*4882a593Smuzhiyun 				     struct kvm_vcpu *vcpu)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
43*4882a593Smuzhiyun 	struct vgic_irq *irq = vgic_get_irq(kvm, NULL, intid), *oldirq;
44*4882a593Smuzhiyun 	unsigned long flags;
45*4882a593Smuzhiyun 	int ret;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	/* In this case there is no put, since we keep the reference. */
48*4882a593Smuzhiyun 	if (irq)
49*4882a593Smuzhiyun 		return irq;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	irq = kzalloc(sizeof(struct vgic_irq), GFP_KERNEL);
52*4882a593Smuzhiyun 	if (!irq)
53*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	INIT_LIST_HEAD(&irq->lpi_list);
56*4882a593Smuzhiyun 	INIT_LIST_HEAD(&irq->ap_list);
57*4882a593Smuzhiyun 	raw_spin_lock_init(&irq->irq_lock);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	irq->config = VGIC_CONFIG_EDGE;
60*4882a593Smuzhiyun 	kref_init(&irq->refcount);
61*4882a593Smuzhiyun 	irq->intid = intid;
62*4882a593Smuzhiyun 	irq->target_vcpu = vcpu;
63*4882a593Smuzhiyun 	irq->group = 1;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	/*
68*4882a593Smuzhiyun 	 * There could be a race with another vgic_add_lpi(), so we need to
69*4882a593Smuzhiyun 	 * check that we don't add a second list entry with the same LPI.
70*4882a593Smuzhiyun 	 */
71*4882a593Smuzhiyun 	list_for_each_entry(oldirq, &dist->lpi_list_head, lpi_list) {
72*4882a593Smuzhiyun 		if (oldirq->intid != intid)
73*4882a593Smuzhiyun 			continue;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		/* Someone was faster with adding this LPI, lets use that. */
76*4882a593Smuzhiyun 		kfree(irq);
77*4882a593Smuzhiyun 		irq = oldirq;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		/*
80*4882a593Smuzhiyun 		 * This increases the refcount, the caller is expected to
81*4882a593Smuzhiyun 		 * call vgic_put_irq() on the returned pointer once it's
82*4882a593Smuzhiyun 		 * finished with the IRQ.
83*4882a593Smuzhiyun 		 */
84*4882a593Smuzhiyun 		vgic_get_irq_kref(irq);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 		goto out_unlock;
87*4882a593Smuzhiyun 	}
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	list_add_tail(&irq->lpi_list, &dist->lpi_list_head);
90*4882a593Smuzhiyun 	dist->lpi_list_count++;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun out_unlock:
93*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/*
96*4882a593Smuzhiyun 	 * We "cache" the configuration table entries in our struct vgic_irq's.
97*4882a593Smuzhiyun 	 * However we only have those structs for mapped IRQs, so we read in
98*4882a593Smuzhiyun 	 * the respective config data from memory here upon mapping the LPI.
99*4882a593Smuzhiyun 	 *
100*4882a593Smuzhiyun 	 * Should any of these fail, behave as if we couldn't create the LPI
101*4882a593Smuzhiyun 	 * by dropping the refcount and returning the error.
102*4882a593Smuzhiyun 	 */
103*4882a593Smuzhiyun 	ret = update_lpi_config(kvm, irq, NULL, false);
104*4882a593Smuzhiyun 	if (ret) {
105*4882a593Smuzhiyun 		vgic_put_irq(kvm, irq);
106*4882a593Smuzhiyun 		return ERR_PTR(ret);
107*4882a593Smuzhiyun 	}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
110*4882a593Smuzhiyun 	if (ret) {
111*4882a593Smuzhiyun 		vgic_put_irq(kvm, irq);
112*4882a593Smuzhiyun 		return ERR_PTR(ret);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	return irq;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct its_device {
119*4882a593Smuzhiyun 	struct list_head dev_list;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* the head for the list of ITTEs */
122*4882a593Smuzhiyun 	struct list_head itt_head;
123*4882a593Smuzhiyun 	u32 num_eventid_bits;
124*4882a593Smuzhiyun 	gpa_t itt_addr;
125*4882a593Smuzhiyun 	u32 device_id;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #define COLLECTION_NOT_MAPPED ((u32)~0)
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct its_collection {
131*4882a593Smuzhiyun 	struct list_head coll_list;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	u32 collection_id;
134*4882a593Smuzhiyun 	u32 target_addr;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun #define its_is_collection_mapped(coll) ((coll) && \
138*4882a593Smuzhiyun 				((coll)->target_addr != COLLECTION_NOT_MAPPED))
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun struct its_ite {
141*4882a593Smuzhiyun 	struct list_head ite_list;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	struct vgic_irq *irq;
144*4882a593Smuzhiyun 	struct its_collection *collection;
145*4882a593Smuzhiyun 	u32 event_id;
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun struct vgic_translation_cache_entry {
149*4882a593Smuzhiyun 	struct list_head	entry;
150*4882a593Smuzhiyun 	phys_addr_t		db;
151*4882a593Smuzhiyun 	u32			devid;
152*4882a593Smuzhiyun 	u32			eventid;
153*4882a593Smuzhiyun 	struct vgic_irq		*irq;
154*4882a593Smuzhiyun };
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /**
157*4882a593Smuzhiyun  * struct vgic_its_abi - ITS abi ops and settings
158*4882a593Smuzhiyun  * @cte_esz: collection table entry size
159*4882a593Smuzhiyun  * @dte_esz: device table entry size
160*4882a593Smuzhiyun  * @ite_esz: interrupt translation table entry size
161*4882a593Smuzhiyun  * @save tables: save the ITS tables into guest RAM
162*4882a593Smuzhiyun  * @restore_tables: restore the ITS internal structs from tables
163*4882a593Smuzhiyun  *  stored in guest RAM
164*4882a593Smuzhiyun  * @commit: initialize the registers which expose the ABI settings,
165*4882a593Smuzhiyun  *  especially the entry sizes
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun struct vgic_its_abi {
168*4882a593Smuzhiyun 	int cte_esz;
169*4882a593Smuzhiyun 	int dte_esz;
170*4882a593Smuzhiyun 	int ite_esz;
171*4882a593Smuzhiyun 	int (*save_tables)(struct vgic_its *its);
172*4882a593Smuzhiyun 	int (*restore_tables)(struct vgic_its *its);
173*4882a593Smuzhiyun 	int (*commit)(struct vgic_its *its);
174*4882a593Smuzhiyun };
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun #define ABI_0_ESZ	8
177*4882a593Smuzhiyun #define ESZ_MAX		ABI_0_ESZ
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun static const struct vgic_its_abi its_table_abi_versions[] = {
180*4882a593Smuzhiyun 	[0] = {
181*4882a593Smuzhiyun 	 .cte_esz = ABI_0_ESZ,
182*4882a593Smuzhiyun 	 .dte_esz = ABI_0_ESZ,
183*4882a593Smuzhiyun 	 .ite_esz = ABI_0_ESZ,
184*4882a593Smuzhiyun 	 .save_tables = vgic_its_save_tables_v0,
185*4882a593Smuzhiyun 	 .restore_tables = vgic_its_restore_tables_v0,
186*4882a593Smuzhiyun 	 .commit = vgic_its_commit_v0,
187*4882a593Smuzhiyun 	},
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #define NR_ITS_ABIS	ARRAY_SIZE(its_table_abi_versions)
191*4882a593Smuzhiyun 
vgic_its_get_abi(struct vgic_its * its)192*4882a593Smuzhiyun inline const struct vgic_its_abi *vgic_its_get_abi(struct vgic_its *its)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	return &its_table_abi_versions[its->abi_rev];
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
vgic_its_set_abi(struct vgic_its * its,u32 rev)197*4882a593Smuzhiyun static int vgic_its_set_abi(struct vgic_its *its, u32 rev)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	const struct vgic_its_abi *abi;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	its->abi_rev = rev;
202*4882a593Smuzhiyun 	abi = vgic_its_get_abi(its);
203*4882a593Smuzhiyun 	return abi->commit(its);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun  * Find and returns a device in the device table for an ITS.
208*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
209*4882a593Smuzhiyun  */
find_its_device(struct vgic_its * its,u32 device_id)210*4882a593Smuzhiyun static struct its_device *find_its_device(struct vgic_its *its, u32 device_id)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct its_device *device;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	list_for_each_entry(device, &its->device_list, dev_list)
215*4882a593Smuzhiyun 		if (device_id == device->device_id)
216*4882a593Smuzhiyun 			return device;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	return NULL;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun  * Find and returns an interrupt translation table entry (ITTE) for a given
223*4882a593Smuzhiyun  * Device ID/Event ID pair on an ITS.
224*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
225*4882a593Smuzhiyun  */
find_ite(struct vgic_its * its,u32 device_id,u32 event_id)226*4882a593Smuzhiyun static struct its_ite *find_ite(struct vgic_its *its, u32 device_id,
227*4882a593Smuzhiyun 				  u32 event_id)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct its_device *device;
230*4882a593Smuzhiyun 	struct its_ite *ite;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	device = find_its_device(its, device_id);
233*4882a593Smuzhiyun 	if (device == NULL)
234*4882a593Smuzhiyun 		return NULL;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	list_for_each_entry(ite, &device->itt_head, ite_list)
237*4882a593Smuzhiyun 		if (ite->event_id == event_id)
238*4882a593Smuzhiyun 			return ite;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	return NULL;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* To be used as an iterator this macro misses the enclosing parentheses */
244*4882a593Smuzhiyun #define for_each_lpi_its(dev, ite, its) \
245*4882a593Smuzhiyun 	list_for_each_entry(dev, &(its)->device_list, dev_list) \
246*4882a593Smuzhiyun 		list_for_each_entry(ite, &(dev)->itt_head, ite_list)
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun #define GIC_LPI_OFFSET 8192
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #define VITS_TYPER_IDBITS 16
251*4882a593Smuzhiyun #define VITS_TYPER_DEVBITS 16
252*4882a593Smuzhiyun #define VITS_DTE_MAX_DEVID_OFFSET	(BIT(14) - 1)
253*4882a593Smuzhiyun #define VITS_ITE_MAX_EVENTID_OFFSET	(BIT(16) - 1)
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun  * Finds and returns a collection in the ITS collection table.
257*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
258*4882a593Smuzhiyun  */
find_collection(struct vgic_its * its,int coll_id)259*4882a593Smuzhiyun static struct its_collection *find_collection(struct vgic_its *its, int coll_id)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct its_collection *collection;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	list_for_each_entry(collection, &its->collection_list, coll_list) {
264*4882a593Smuzhiyun 		if (coll_id == collection->collection_id)
265*4882a593Smuzhiyun 			return collection;
266*4882a593Smuzhiyun 	}
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	return NULL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define LPI_PROP_ENABLE_BIT(p)	((p) & LPI_PROP_ENABLED)
272*4882a593Smuzhiyun #define LPI_PROP_PRIORITY(p)	((p) & 0xfc)
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun  * Reads the configuration data for a given LPI from guest memory and
276*4882a593Smuzhiyun  * updates the fields in struct vgic_irq.
277*4882a593Smuzhiyun  * If filter_vcpu is not NULL, applies only if the IRQ is targeting this
278*4882a593Smuzhiyun  * VCPU. Unconditionally applies if filter_vcpu is NULL.
279*4882a593Smuzhiyun  */
update_lpi_config(struct kvm * kvm,struct vgic_irq * irq,struct kvm_vcpu * filter_vcpu,bool needs_inv)280*4882a593Smuzhiyun static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
281*4882a593Smuzhiyun 			     struct kvm_vcpu *filter_vcpu, bool needs_inv)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun 	u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
284*4882a593Smuzhiyun 	u8 prop;
285*4882a593Smuzhiyun 	int ret;
286*4882a593Smuzhiyun 	unsigned long flags;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	ret = kvm_read_guest_lock(kvm, propbase + irq->intid - GIC_LPI_OFFSET,
289*4882a593Smuzhiyun 				  &prop, 1);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (ret)
292*4882a593Smuzhiyun 		return ret;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (!filter_vcpu || filter_vcpu == irq->target_vcpu) {
297*4882a593Smuzhiyun 		irq->priority = LPI_PROP_PRIORITY(prop);
298*4882a593Smuzhiyun 		irq->enabled = LPI_PROP_ENABLE_BIT(prop);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		if (!irq->hw) {
301*4882a593Smuzhiyun 			vgic_queue_irq_unlock(kvm, irq, flags);
302*4882a593Smuzhiyun 			return 0;
303*4882a593Smuzhiyun 		}
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	if (irq->hw)
309*4882a593Smuzhiyun 		return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	return 0;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun  * Create a snapshot of the current LPIs targeting @vcpu, so that we can
316*4882a593Smuzhiyun  * enumerate those LPIs without holding any lock.
317*4882a593Smuzhiyun  * Returns their number and puts the kmalloc'ed array into intid_ptr.
318*4882a593Smuzhiyun  */
vgic_copy_lpi_list(struct kvm * kvm,struct kvm_vcpu * vcpu,u32 ** intid_ptr)319*4882a593Smuzhiyun int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
322*4882a593Smuzhiyun 	struct vgic_irq *irq;
323*4882a593Smuzhiyun 	unsigned long flags;
324*4882a593Smuzhiyun 	u32 *intids;
325*4882a593Smuzhiyun 	int irq_count, i = 0;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/*
328*4882a593Smuzhiyun 	 * There is an obvious race between allocating the array and LPIs
329*4882a593Smuzhiyun 	 * being mapped/unmapped. If we ended up here as a result of a
330*4882a593Smuzhiyun 	 * command, we're safe (locks are held, preventing another
331*4882a593Smuzhiyun 	 * command). If coming from another path (such as enabling LPIs),
332*4882a593Smuzhiyun 	 * we must be careful not to overrun the array.
333*4882a593Smuzhiyun 	 */
334*4882a593Smuzhiyun 	irq_count = READ_ONCE(dist->lpi_list_count);
335*4882a593Smuzhiyun 	intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
336*4882a593Smuzhiyun 	if (!intids)
337*4882a593Smuzhiyun 		return -ENOMEM;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
340*4882a593Smuzhiyun 	list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
341*4882a593Smuzhiyun 		if (i == irq_count)
342*4882a593Smuzhiyun 			break;
343*4882a593Smuzhiyun 		/* We don't need to "get" the IRQ, as we hold the list lock. */
344*4882a593Smuzhiyun 		if (vcpu && irq->target_vcpu != vcpu)
345*4882a593Smuzhiyun 			continue;
346*4882a593Smuzhiyun 		intids[i++] = irq->intid;
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	*intid_ptr = intids;
351*4882a593Smuzhiyun 	return i;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
update_affinity(struct vgic_irq * irq,struct kvm_vcpu * vcpu)354*4882a593Smuzhiyun static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	int ret = 0;
357*4882a593Smuzhiyun 	unsigned long flags;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
360*4882a593Smuzhiyun 	irq->target_vcpu = vcpu;
361*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (irq->hw) {
364*4882a593Smuzhiyun 		struct its_vlpi_map map;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		ret = its_get_vlpi(irq->host_irq, &map);
367*4882a593Smuzhiyun 		if (ret)
368*4882a593Smuzhiyun 			return ret;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		if (map.vpe)
371*4882a593Smuzhiyun 			atomic_dec(&map.vpe->vlpi_count);
372*4882a593Smuzhiyun 		map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
373*4882a593Smuzhiyun 		atomic_inc(&map.vpe->vlpi_count);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		ret = its_map_vlpi(irq->host_irq, &map);
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	return ret;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun  * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
383*4882a593Smuzhiyun  * is targeting) to the VGIC's view, which deals with target VCPUs.
384*4882a593Smuzhiyun  * Needs to be called whenever either the collection for a LPIs has
385*4882a593Smuzhiyun  * changed or the collection itself got retargeted.
386*4882a593Smuzhiyun  */
update_affinity_ite(struct kvm * kvm,struct its_ite * ite)387*4882a593Smuzhiyun static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	if (!its_is_collection_mapped(ite->collection))
392*4882a593Smuzhiyun 		return;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
395*4882a593Smuzhiyun 	update_affinity(ite->irq, vcpu);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun /*
399*4882a593Smuzhiyun  * Updates the target VCPU for every LPI targeting this collection.
400*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
401*4882a593Smuzhiyun  */
update_affinity_collection(struct kvm * kvm,struct vgic_its * its,struct its_collection * coll)402*4882a593Smuzhiyun static void update_affinity_collection(struct kvm *kvm, struct vgic_its *its,
403*4882a593Smuzhiyun 				       struct its_collection *coll)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	struct its_device *device;
406*4882a593Smuzhiyun 	struct its_ite *ite;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	for_each_lpi_its(device, ite, its) {
409*4882a593Smuzhiyun 		if (!ite->collection || coll != ite->collection)
410*4882a593Smuzhiyun 			continue;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		update_affinity_ite(kvm, ite);
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
max_lpis_propbaser(u64 propbaser)416*4882a593Smuzhiyun static u32 max_lpis_propbaser(u64 propbaser)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	int nr_idbits = (propbaser & 0x1f) + 1;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	return 1U << min(nr_idbits, INTERRUPT_ID_BITS_ITS);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun /*
424*4882a593Smuzhiyun  * Sync the pending table pending bit of LPIs targeting @vcpu
425*4882a593Smuzhiyun  * with our own data structures. This relies on the LPI being
426*4882a593Smuzhiyun  * mapped before.
427*4882a593Smuzhiyun  */
its_sync_lpi_pending_table(struct kvm_vcpu * vcpu)428*4882a593Smuzhiyun static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	gpa_t pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
431*4882a593Smuzhiyun 	struct vgic_irq *irq;
432*4882a593Smuzhiyun 	int last_byte_offset = -1;
433*4882a593Smuzhiyun 	int ret = 0;
434*4882a593Smuzhiyun 	u32 *intids;
435*4882a593Smuzhiyun 	int nr_irqs, i;
436*4882a593Smuzhiyun 	unsigned long flags;
437*4882a593Smuzhiyun 	u8 pendmask;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	nr_irqs = vgic_copy_lpi_list(vcpu->kvm, vcpu, &intids);
440*4882a593Smuzhiyun 	if (nr_irqs < 0)
441*4882a593Smuzhiyun 		return nr_irqs;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	for (i = 0; i < nr_irqs; i++) {
444*4882a593Smuzhiyun 		int byte_offset, bit_nr;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		byte_offset = intids[i] / BITS_PER_BYTE;
447*4882a593Smuzhiyun 		bit_nr = intids[i] % BITS_PER_BYTE;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		/*
450*4882a593Smuzhiyun 		 * For contiguously allocated LPIs chances are we just read
451*4882a593Smuzhiyun 		 * this very same byte in the last iteration. Reuse that.
452*4882a593Smuzhiyun 		 */
453*4882a593Smuzhiyun 		if (byte_offset != last_byte_offset) {
454*4882a593Smuzhiyun 			ret = kvm_read_guest_lock(vcpu->kvm,
455*4882a593Smuzhiyun 						  pendbase + byte_offset,
456*4882a593Smuzhiyun 						  &pendmask, 1);
457*4882a593Smuzhiyun 			if (ret) {
458*4882a593Smuzhiyun 				kfree(intids);
459*4882a593Smuzhiyun 				return ret;
460*4882a593Smuzhiyun 			}
461*4882a593Smuzhiyun 			last_byte_offset = byte_offset;
462*4882a593Smuzhiyun 		}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 		irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]);
465*4882a593Smuzhiyun 		raw_spin_lock_irqsave(&irq->irq_lock, flags);
466*4882a593Smuzhiyun 		irq->pending_latch = pendmask & (1U << bit_nr);
467*4882a593Smuzhiyun 		vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
468*4882a593Smuzhiyun 		vgic_put_irq(vcpu->kvm, irq);
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	kfree(intids);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return ret;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
vgic_mmio_read_its_typer(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)476*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
477*4882a593Smuzhiyun 					      struct vgic_its *its,
478*4882a593Smuzhiyun 					      gpa_t addr, unsigned int len)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
481*4882a593Smuzhiyun 	u64 reg = GITS_TYPER_PLPIS;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/*
484*4882a593Smuzhiyun 	 * We use linear CPU numbers for redistributor addressing,
485*4882a593Smuzhiyun 	 * so GITS_TYPER.PTA is 0.
486*4882a593Smuzhiyun 	 * Also we force all PROPBASER registers to be the same, so
487*4882a593Smuzhiyun 	 * CommonLPIAff is 0 as well.
488*4882a593Smuzhiyun 	 * To avoid memory waste in the guest, we keep the number of IDBits and
489*4882a593Smuzhiyun 	 * DevBits low - as least for the time being.
490*4882a593Smuzhiyun 	 */
491*4882a593Smuzhiyun 	reg |= GIC_ENCODE_SZ(VITS_TYPER_DEVBITS, 5) << GITS_TYPER_DEVBITS_SHIFT;
492*4882a593Smuzhiyun 	reg |= GIC_ENCODE_SZ(VITS_TYPER_IDBITS, 5) << GITS_TYPER_IDBITS_SHIFT;
493*4882a593Smuzhiyun 	reg |= GIC_ENCODE_SZ(abi->ite_esz, 4) << GITS_TYPER_ITT_ENTRY_SIZE_SHIFT;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	return extract_bytes(reg, addr & 7, len);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
vgic_mmio_read_its_iidr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)498*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_iidr(struct kvm *kvm,
499*4882a593Smuzhiyun 					     struct vgic_its *its,
500*4882a593Smuzhiyun 					     gpa_t addr, unsigned int len)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	u32 val;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	val = (its->abi_rev << GITS_IIDR_REV_SHIFT) & GITS_IIDR_REV_MASK;
505*4882a593Smuzhiyun 	val |= (PRODUCT_ID_KVM << GITS_IIDR_PRODUCTID_SHIFT) | IMPLEMENTER_ARM;
506*4882a593Smuzhiyun 	return val;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun 
vgic_mmio_uaccess_write_its_iidr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)509*4882a593Smuzhiyun static int vgic_mmio_uaccess_write_its_iidr(struct kvm *kvm,
510*4882a593Smuzhiyun 					    struct vgic_its *its,
511*4882a593Smuzhiyun 					    gpa_t addr, unsigned int len,
512*4882a593Smuzhiyun 					    unsigned long val)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	u32 rev = GITS_IIDR_REV(val);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (rev >= NR_ITS_ABIS)
517*4882a593Smuzhiyun 		return -EINVAL;
518*4882a593Smuzhiyun 	return vgic_its_set_abi(its, rev);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
vgic_mmio_read_its_idregs(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)521*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_idregs(struct kvm *kvm,
522*4882a593Smuzhiyun 					       struct vgic_its *its,
523*4882a593Smuzhiyun 					       gpa_t addr, unsigned int len)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	switch (addr & 0xffff) {
526*4882a593Smuzhiyun 	case GITS_PIDR0:
527*4882a593Smuzhiyun 		return 0x92;	/* part number, bits[7:0] */
528*4882a593Smuzhiyun 	case GITS_PIDR1:
529*4882a593Smuzhiyun 		return 0xb4;	/* part number, bits[11:8] */
530*4882a593Smuzhiyun 	case GITS_PIDR2:
531*4882a593Smuzhiyun 		return GIC_PIDR2_ARCH_GICv3 | 0x0b;
532*4882a593Smuzhiyun 	case GITS_PIDR4:
533*4882a593Smuzhiyun 		return 0x40;	/* This is a 64K software visible page */
534*4882a593Smuzhiyun 	/* The following are the ID registers for (any) GIC. */
535*4882a593Smuzhiyun 	case GITS_CIDR0:
536*4882a593Smuzhiyun 		return 0x0d;
537*4882a593Smuzhiyun 	case GITS_CIDR1:
538*4882a593Smuzhiyun 		return 0xf0;
539*4882a593Smuzhiyun 	case GITS_CIDR2:
540*4882a593Smuzhiyun 		return 0x05;
541*4882a593Smuzhiyun 	case GITS_CIDR3:
542*4882a593Smuzhiyun 		return 0xb1;
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	return 0;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
__vgic_its_check_cache(struct vgic_dist * dist,phys_addr_t db,u32 devid,u32 eventid)548*4882a593Smuzhiyun static struct vgic_irq *__vgic_its_check_cache(struct vgic_dist *dist,
549*4882a593Smuzhiyun 					       phys_addr_t db,
550*4882a593Smuzhiyun 					       u32 devid, u32 eventid)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	struct vgic_translation_cache_entry *cte;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
555*4882a593Smuzhiyun 		/*
556*4882a593Smuzhiyun 		 * If we hit a NULL entry, there is nothing after this
557*4882a593Smuzhiyun 		 * point.
558*4882a593Smuzhiyun 		 */
559*4882a593Smuzhiyun 		if (!cte->irq)
560*4882a593Smuzhiyun 			break;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 		if (cte->db != db || cte->devid != devid ||
563*4882a593Smuzhiyun 		    cte->eventid != eventid)
564*4882a593Smuzhiyun 			continue;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		/*
567*4882a593Smuzhiyun 		 * Move this entry to the head, as it is the most
568*4882a593Smuzhiyun 		 * recently used.
569*4882a593Smuzhiyun 		 */
570*4882a593Smuzhiyun 		if (!list_is_first(&cte->entry, &dist->lpi_translation_cache))
571*4882a593Smuzhiyun 			list_move(&cte->entry, &dist->lpi_translation_cache);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 		return cte->irq;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	return NULL;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun 
vgic_its_check_cache(struct kvm * kvm,phys_addr_t db,u32 devid,u32 eventid)579*4882a593Smuzhiyun static struct vgic_irq *vgic_its_check_cache(struct kvm *kvm, phys_addr_t db,
580*4882a593Smuzhiyun 					     u32 devid, u32 eventid)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
583*4882a593Smuzhiyun 	struct vgic_irq *irq;
584*4882a593Smuzhiyun 	unsigned long flags;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
587*4882a593Smuzhiyun 	irq = __vgic_its_check_cache(dist, db, devid, eventid);
588*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	return irq;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
vgic_its_cache_translation(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid,struct vgic_irq * irq)593*4882a593Smuzhiyun static void vgic_its_cache_translation(struct kvm *kvm, struct vgic_its *its,
594*4882a593Smuzhiyun 				       u32 devid, u32 eventid,
595*4882a593Smuzhiyun 				       struct vgic_irq *irq)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
598*4882a593Smuzhiyun 	struct vgic_translation_cache_entry *cte;
599*4882a593Smuzhiyun 	unsigned long flags;
600*4882a593Smuzhiyun 	phys_addr_t db;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	/* Do not cache a directly injected interrupt */
603*4882a593Smuzhiyun 	if (irq->hw)
604*4882a593Smuzhiyun 		return;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	if (unlikely(list_empty(&dist->lpi_translation_cache)))
609*4882a593Smuzhiyun 		goto out;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/*
612*4882a593Smuzhiyun 	 * We could have raced with another CPU caching the same
613*4882a593Smuzhiyun 	 * translation behind our back, so let's check it is not in
614*4882a593Smuzhiyun 	 * already
615*4882a593Smuzhiyun 	 */
616*4882a593Smuzhiyun 	db = its->vgic_its_base + GITS_TRANSLATER;
617*4882a593Smuzhiyun 	if (__vgic_its_check_cache(dist, db, devid, eventid))
618*4882a593Smuzhiyun 		goto out;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* Always reuse the last entry (LRU policy) */
621*4882a593Smuzhiyun 	cte = list_last_entry(&dist->lpi_translation_cache,
622*4882a593Smuzhiyun 			      typeof(*cte), entry);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	/*
625*4882a593Smuzhiyun 	 * Caching the translation implies having an extra reference
626*4882a593Smuzhiyun 	 * to the interrupt, so drop the potential reference on what
627*4882a593Smuzhiyun 	 * was in the cache, and increment it on the new interrupt.
628*4882a593Smuzhiyun 	 */
629*4882a593Smuzhiyun 	if (cte->irq)
630*4882a593Smuzhiyun 		__vgic_put_lpi_locked(kvm, cte->irq);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	vgic_get_irq_kref(irq);
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	cte->db		= db;
635*4882a593Smuzhiyun 	cte->devid	= devid;
636*4882a593Smuzhiyun 	cte->eventid	= eventid;
637*4882a593Smuzhiyun 	cte->irq	= irq;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	/* Move the new translation to the head of the list */
640*4882a593Smuzhiyun 	list_move(&cte->entry, &dist->lpi_translation_cache);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun out:
643*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
vgic_its_invalidate_cache(struct kvm * kvm)646*4882a593Smuzhiyun void vgic_its_invalidate_cache(struct kvm *kvm)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
649*4882a593Smuzhiyun 	struct vgic_translation_cache_entry *cte;
650*4882a593Smuzhiyun 	unsigned long flags;
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dist->lpi_list_lock, flags);
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	list_for_each_entry(cte, &dist->lpi_translation_cache, entry) {
655*4882a593Smuzhiyun 		/*
656*4882a593Smuzhiyun 		 * If we hit a NULL entry, there is nothing after this
657*4882a593Smuzhiyun 		 * point.
658*4882a593Smuzhiyun 		 */
659*4882a593Smuzhiyun 		if (!cte->irq)
660*4882a593Smuzhiyun 			break;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 		__vgic_put_lpi_locked(kvm, cte->irq);
663*4882a593Smuzhiyun 		cte->irq = NULL;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
vgic_its_resolve_lpi(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid,struct vgic_irq ** irq)669*4882a593Smuzhiyun int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
670*4882a593Smuzhiyun 			 u32 devid, u32 eventid, struct vgic_irq **irq)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
673*4882a593Smuzhiyun 	struct its_ite *ite;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	if (!its->enabled)
676*4882a593Smuzhiyun 		return -EBUSY;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	ite = find_ite(its, devid, eventid);
679*4882a593Smuzhiyun 	if (!ite || !its_is_collection_mapped(ite->collection))
680*4882a593Smuzhiyun 		return E_ITS_INT_UNMAPPED_INTERRUPT;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
683*4882a593Smuzhiyun 	if (!vcpu)
684*4882a593Smuzhiyun 		return E_ITS_INT_UNMAPPED_INTERRUPT;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	if (!vcpu->arch.vgic_cpu.lpis_enabled)
687*4882a593Smuzhiyun 		return -EBUSY;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	vgic_its_cache_translation(kvm, its, devid, eventid, ite->irq);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	*irq = ite->irq;
692*4882a593Smuzhiyun 	return 0;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
vgic_msi_to_its(struct kvm * kvm,struct kvm_msi * msi)695*4882a593Smuzhiyun struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	u64 address;
698*4882a593Smuzhiyun 	struct kvm_io_device *kvm_io_dev;
699*4882a593Smuzhiyun 	struct vgic_io_device *iodev;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	if (!vgic_has_its(kvm))
702*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	if (!(msi->flags & KVM_MSI_VALID_DEVID))
705*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	address = (u64)msi->address_hi << 32 | msi->address_lo;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
710*4882a593Smuzhiyun 	if (!kvm_io_dev)
711*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	if (kvm_io_dev->ops != &kvm_io_gic_ops)
714*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
717*4882a593Smuzhiyun 	if (iodev->iodev_type != IODEV_ITS)
718*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	return iodev->its;
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun  * Find the target VCPU and the LPI number for a given devid/eventid pair
725*4882a593Smuzhiyun  * and make this IRQ pending, possibly injecting it.
726*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
727*4882a593Smuzhiyun  * Returns 0 on success, a positive error value for any ITS mapping
728*4882a593Smuzhiyun  * related errors and negative error values for generic errors.
729*4882a593Smuzhiyun  */
vgic_its_trigger_msi(struct kvm * kvm,struct vgic_its * its,u32 devid,u32 eventid)730*4882a593Smuzhiyun static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
731*4882a593Smuzhiyun 				u32 devid, u32 eventid)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun 	struct vgic_irq *irq = NULL;
734*4882a593Smuzhiyun 	unsigned long flags;
735*4882a593Smuzhiyun 	int err;
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
738*4882a593Smuzhiyun 	if (err)
739*4882a593Smuzhiyun 		return err;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	if (irq->hw)
742*4882a593Smuzhiyun 		return irq_set_irqchip_state(irq->host_irq,
743*4882a593Smuzhiyun 					     IRQCHIP_STATE_PENDING, true);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
746*4882a593Smuzhiyun 	irq->pending_latch = true;
747*4882a593Smuzhiyun 	vgic_queue_irq_unlock(kvm, irq, flags);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	return 0;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun 
vgic_its_inject_cached_translation(struct kvm * kvm,struct kvm_msi * msi)752*4882a593Smuzhiyun int vgic_its_inject_cached_translation(struct kvm *kvm, struct kvm_msi *msi)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	struct vgic_irq *irq;
755*4882a593Smuzhiyun 	unsigned long flags;
756*4882a593Smuzhiyun 	phys_addr_t db;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	db = (u64)msi->address_hi << 32 | msi->address_lo;
759*4882a593Smuzhiyun 	irq = vgic_its_check_cache(kvm, db, msi->devid, msi->data);
760*4882a593Smuzhiyun 	if (!irq)
761*4882a593Smuzhiyun 		return -EWOULDBLOCK;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
764*4882a593Smuzhiyun 	irq->pending_latch = true;
765*4882a593Smuzhiyun 	vgic_queue_irq_unlock(kvm, irq, flags);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	return 0;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun /*
771*4882a593Smuzhiyun  * Queries the KVM IO bus framework to get the ITS pointer from the given
772*4882a593Smuzhiyun  * doorbell address.
773*4882a593Smuzhiyun  * We then call vgic_its_trigger_msi() with the decoded data.
774*4882a593Smuzhiyun  * According to the KVM_SIGNAL_MSI API description returns 1 on success.
775*4882a593Smuzhiyun  */
vgic_its_inject_msi(struct kvm * kvm,struct kvm_msi * msi)776*4882a593Smuzhiyun int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	struct vgic_its *its;
779*4882a593Smuzhiyun 	int ret;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	if (!vgic_its_inject_cached_translation(kvm, msi))
782*4882a593Smuzhiyun 		return 1;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	its = vgic_msi_to_its(kvm, msi);
785*4882a593Smuzhiyun 	if (IS_ERR(its))
786*4882a593Smuzhiyun 		return PTR_ERR(its);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	mutex_lock(&its->its_lock);
789*4882a593Smuzhiyun 	ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
790*4882a593Smuzhiyun 	mutex_unlock(&its->its_lock);
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	if (ret < 0)
793*4882a593Smuzhiyun 		return ret;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	/*
796*4882a593Smuzhiyun 	 * KVM_SIGNAL_MSI demands a return value > 0 for success and 0
797*4882a593Smuzhiyun 	 * if the guest has blocked the MSI. So we map any LPI mapping
798*4882a593Smuzhiyun 	 * related error to that.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	if (ret)
801*4882a593Smuzhiyun 		return 0;
802*4882a593Smuzhiyun 	else
803*4882a593Smuzhiyun 		return 1;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun /* Requires the its_lock to be held. */
its_free_ite(struct kvm * kvm,struct its_ite * ite)807*4882a593Smuzhiyun static void its_free_ite(struct kvm *kvm, struct its_ite *ite)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	list_del(&ite->ite_list);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	/* This put matches the get in vgic_add_lpi. */
812*4882a593Smuzhiyun 	if (ite->irq) {
813*4882a593Smuzhiyun 		if (ite->irq->hw)
814*4882a593Smuzhiyun 			WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 		vgic_put_irq(kvm, ite->irq);
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	kfree(ite);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
its_cmd_mask_field(u64 * its_cmd,int word,int shift,int size)822*4882a593Smuzhiyun static u64 its_cmd_mask_field(u64 *its_cmd, int word, int shift, int size)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	return (le64_to_cpu(its_cmd[word]) >> shift) & (BIT_ULL(size) - 1);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun #define its_cmd_get_command(cmd)	its_cmd_mask_field(cmd, 0,  0,  8)
828*4882a593Smuzhiyun #define its_cmd_get_deviceid(cmd)	its_cmd_mask_field(cmd, 0, 32, 32)
829*4882a593Smuzhiyun #define its_cmd_get_size(cmd)		(its_cmd_mask_field(cmd, 1,  0,  5) + 1)
830*4882a593Smuzhiyun #define its_cmd_get_id(cmd)		its_cmd_mask_field(cmd, 1,  0, 32)
831*4882a593Smuzhiyun #define its_cmd_get_physical_id(cmd)	its_cmd_mask_field(cmd, 1, 32, 32)
832*4882a593Smuzhiyun #define its_cmd_get_collection(cmd)	its_cmd_mask_field(cmd, 2,  0, 16)
833*4882a593Smuzhiyun #define its_cmd_get_ittaddr(cmd)	(its_cmd_mask_field(cmd, 2,  8, 44) << 8)
834*4882a593Smuzhiyun #define its_cmd_get_target_addr(cmd)	its_cmd_mask_field(cmd, 2, 16, 32)
835*4882a593Smuzhiyun #define its_cmd_get_validbit(cmd)	its_cmd_mask_field(cmd, 2, 63,  1)
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun /*
838*4882a593Smuzhiyun  * The DISCARD command frees an Interrupt Translation Table Entry (ITTE).
839*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
840*4882a593Smuzhiyun  */
vgic_its_cmd_handle_discard(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)841*4882a593Smuzhiyun static int vgic_its_cmd_handle_discard(struct kvm *kvm, struct vgic_its *its,
842*4882a593Smuzhiyun 				       u64 *its_cmd)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
845*4882a593Smuzhiyun 	u32 event_id = its_cmd_get_id(its_cmd);
846*4882a593Smuzhiyun 	struct its_ite *ite;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	ite = find_ite(its, device_id, event_id);
849*4882a593Smuzhiyun 	if (ite && its_is_collection_mapped(ite->collection)) {
850*4882a593Smuzhiyun 		/*
851*4882a593Smuzhiyun 		 * Though the spec talks about removing the pending state, we
852*4882a593Smuzhiyun 		 * don't bother here since we clear the ITTE anyway and the
853*4882a593Smuzhiyun 		 * pending state is a property of the ITTE struct.
854*4882a593Smuzhiyun 		 */
855*4882a593Smuzhiyun 		vgic_its_invalidate_cache(kvm);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		its_free_ite(kvm, ite);
858*4882a593Smuzhiyun 		return 0;
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return E_ITS_DISCARD_UNMAPPED_INTERRUPT;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun /*
865*4882a593Smuzhiyun  * The MOVI command moves an ITTE to a different collection.
866*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
867*4882a593Smuzhiyun  */
vgic_its_cmd_handle_movi(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)868*4882a593Smuzhiyun static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its,
869*4882a593Smuzhiyun 				    u64 *its_cmd)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
872*4882a593Smuzhiyun 	u32 event_id = its_cmd_get_id(its_cmd);
873*4882a593Smuzhiyun 	u32 coll_id = its_cmd_get_collection(its_cmd);
874*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
875*4882a593Smuzhiyun 	struct its_ite *ite;
876*4882a593Smuzhiyun 	struct its_collection *collection;
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	ite = find_ite(its, device_id, event_id);
879*4882a593Smuzhiyun 	if (!ite)
880*4882a593Smuzhiyun 		return E_ITS_MOVI_UNMAPPED_INTERRUPT;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	if (!its_is_collection_mapped(ite->collection))
883*4882a593Smuzhiyun 		return E_ITS_MOVI_UNMAPPED_COLLECTION;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
886*4882a593Smuzhiyun 	if (!its_is_collection_mapped(collection))
887*4882a593Smuzhiyun 		return E_ITS_MOVI_UNMAPPED_COLLECTION;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	ite->collection = collection;
890*4882a593Smuzhiyun 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	vgic_its_invalidate_cache(kvm);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	return update_affinity(ite->irq, vcpu);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun /*
898*4882a593Smuzhiyun  * Check whether an ID can be stored into the corresponding guest table.
899*4882a593Smuzhiyun  * For a direct table this is pretty easy, but gets a bit nasty for
900*4882a593Smuzhiyun  * indirect tables. We check whether the resulting guest physical address
901*4882a593Smuzhiyun  * is actually valid (covered by a memslot and guest accessible).
902*4882a593Smuzhiyun  * For this we have to read the respective first level entry.
903*4882a593Smuzhiyun  */
vgic_its_check_id(struct vgic_its * its,u64 baser,u32 id,gpa_t * eaddr)904*4882a593Smuzhiyun static bool vgic_its_check_id(struct vgic_its *its, u64 baser, u32 id,
905*4882a593Smuzhiyun 			      gpa_t *eaddr)
906*4882a593Smuzhiyun {
907*4882a593Smuzhiyun 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
908*4882a593Smuzhiyun 	u64 indirect_ptr, type = GITS_BASER_TYPE(baser);
909*4882a593Smuzhiyun 	phys_addr_t base = GITS_BASER_ADDR_48_to_52(baser);
910*4882a593Smuzhiyun 	int esz = GITS_BASER_ENTRY_SIZE(baser);
911*4882a593Smuzhiyun 	int index, idx;
912*4882a593Smuzhiyun 	gfn_t gfn;
913*4882a593Smuzhiyun 	bool ret;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	switch (type) {
916*4882a593Smuzhiyun 	case GITS_BASER_TYPE_DEVICE:
917*4882a593Smuzhiyun 		if (id >= BIT_ULL(VITS_TYPER_DEVBITS))
918*4882a593Smuzhiyun 			return false;
919*4882a593Smuzhiyun 		break;
920*4882a593Smuzhiyun 	case GITS_BASER_TYPE_COLLECTION:
921*4882a593Smuzhiyun 		/* as GITS_TYPER.CIL == 0, ITS supports 16-bit collection ID */
922*4882a593Smuzhiyun 		if (id >= BIT_ULL(16))
923*4882a593Smuzhiyun 			return false;
924*4882a593Smuzhiyun 		break;
925*4882a593Smuzhiyun 	default:
926*4882a593Smuzhiyun 		return false;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (!(baser & GITS_BASER_INDIRECT)) {
930*4882a593Smuzhiyun 		phys_addr_t addr;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		if (id >= (l1_tbl_size / esz))
933*4882a593Smuzhiyun 			return false;
934*4882a593Smuzhiyun 
935*4882a593Smuzhiyun 		addr = base + id * esz;
936*4882a593Smuzhiyun 		gfn = addr >> PAGE_SHIFT;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 		if (eaddr)
939*4882a593Smuzhiyun 			*eaddr = addr;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		goto out;
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	/* calculate and check the index into the 1st level */
945*4882a593Smuzhiyun 	index = id / (SZ_64K / esz);
946*4882a593Smuzhiyun 	if (index >= (l1_tbl_size / sizeof(u64)))
947*4882a593Smuzhiyun 		return false;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	/* Each 1st level entry is represented by a 64-bit value. */
950*4882a593Smuzhiyun 	if (kvm_read_guest_lock(its->dev->kvm,
951*4882a593Smuzhiyun 			   base + index * sizeof(indirect_ptr),
952*4882a593Smuzhiyun 			   &indirect_ptr, sizeof(indirect_ptr)))
953*4882a593Smuzhiyun 		return false;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	indirect_ptr = le64_to_cpu(indirect_ptr);
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	/* check the valid bit of the first level entry */
958*4882a593Smuzhiyun 	if (!(indirect_ptr & BIT_ULL(63)))
959*4882a593Smuzhiyun 		return false;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	/* Mask the guest physical address and calculate the frame number. */
962*4882a593Smuzhiyun 	indirect_ptr &= GENMASK_ULL(51, 16);
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	/* Find the address of the actual entry */
965*4882a593Smuzhiyun 	index = id % (SZ_64K / esz);
966*4882a593Smuzhiyun 	indirect_ptr += index * esz;
967*4882a593Smuzhiyun 	gfn = indirect_ptr >> PAGE_SHIFT;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	if (eaddr)
970*4882a593Smuzhiyun 		*eaddr = indirect_ptr;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun out:
973*4882a593Smuzhiyun 	idx = srcu_read_lock(&its->dev->kvm->srcu);
974*4882a593Smuzhiyun 	ret = kvm_is_visible_gfn(its->dev->kvm, gfn);
975*4882a593Smuzhiyun 	srcu_read_unlock(&its->dev->kvm->srcu, idx);
976*4882a593Smuzhiyun 	return ret;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
vgic_its_alloc_collection(struct vgic_its * its,struct its_collection ** colp,u32 coll_id)979*4882a593Smuzhiyun static int vgic_its_alloc_collection(struct vgic_its *its,
980*4882a593Smuzhiyun 				     struct its_collection **colp,
981*4882a593Smuzhiyun 				     u32 coll_id)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun 	struct its_collection *collection;
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	if (!vgic_its_check_id(its, its->baser_coll_table, coll_id, NULL))
986*4882a593Smuzhiyun 		return E_ITS_MAPC_COLLECTION_OOR;
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun 	collection = kzalloc(sizeof(*collection), GFP_KERNEL);
989*4882a593Smuzhiyun 	if (!collection)
990*4882a593Smuzhiyun 		return -ENOMEM;
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	collection->collection_id = coll_id;
993*4882a593Smuzhiyun 	collection->target_addr = COLLECTION_NOT_MAPPED;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	list_add_tail(&collection->coll_list, &its->collection_list);
996*4882a593Smuzhiyun 	*colp = collection;
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	return 0;
999*4882a593Smuzhiyun }
1000*4882a593Smuzhiyun 
vgic_its_free_collection(struct vgic_its * its,u32 coll_id)1001*4882a593Smuzhiyun static void vgic_its_free_collection(struct vgic_its *its, u32 coll_id)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun 	struct its_collection *collection;
1004*4882a593Smuzhiyun 	struct its_device *device;
1005*4882a593Smuzhiyun 	struct its_ite *ite;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	/*
1008*4882a593Smuzhiyun 	 * Clearing the mapping for that collection ID removes the
1009*4882a593Smuzhiyun 	 * entry from the list. If there wasn't any before, we can
1010*4882a593Smuzhiyun 	 * go home early.
1011*4882a593Smuzhiyun 	 */
1012*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
1013*4882a593Smuzhiyun 	if (!collection)
1014*4882a593Smuzhiyun 		return;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 	for_each_lpi_its(device, ite, its)
1017*4882a593Smuzhiyun 		if (ite->collection &&
1018*4882a593Smuzhiyun 		    ite->collection->collection_id == coll_id)
1019*4882a593Smuzhiyun 			ite->collection = NULL;
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	list_del(&collection->coll_list);
1022*4882a593Smuzhiyun 	kfree(collection);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun /* Must be called with its_lock mutex held */
vgic_its_alloc_ite(struct its_device * device,struct its_collection * collection,u32 event_id)1026*4882a593Smuzhiyun static struct its_ite *vgic_its_alloc_ite(struct its_device *device,
1027*4882a593Smuzhiyun 					  struct its_collection *collection,
1028*4882a593Smuzhiyun 					  u32 event_id)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	struct its_ite *ite;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	ite = kzalloc(sizeof(*ite), GFP_KERNEL);
1033*4882a593Smuzhiyun 	if (!ite)
1034*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	ite->event_id	= event_id;
1037*4882a593Smuzhiyun 	ite->collection = collection;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	list_add_tail(&ite->ite_list, &device->itt_head);
1040*4882a593Smuzhiyun 	return ite;
1041*4882a593Smuzhiyun }
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun  * The MAPTI and MAPI commands map LPIs to ITTEs.
1045*4882a593Smuzhiyun  * Must be called with its_lock mutex held.
1046*4882a593Smuzhiyun  */
vgic_its_cmd_handle_mapi(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1047*4882a593Smuzhiyun static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its,
1048*4882a593Smuzhiyun 				    u64 *its_cmd)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1051*4882a593Smuzhiyun 	u32 event_id = its_cmd_get_id(its_cmd);
1052*4882a593Smuzhiyun 	u32 coll_id = its_cmd_get_collection(its_cmd);
1053*4882a593Smuzhiyun 	struct its_ite *ite;
1054*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = NULL;
1055*4882a593Smuzhiyun 	struct its_device *device;
1056*4882a593Smuzhiyun 	struct its_collection *collection, *new_coll = NULL;
1057*4882a593Smuzhiyun 	struct vgic_irq *irq;
1058*4882a593Smuzhiyun 	int lpi_nr;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	device = find_its_device(its, device_id);
1061*4882a593Smuzhiyun 	if (!device)
1062*4882a593Smuzhiyun 		return E_ITS_MAPTI_UNMAPPED_DEVICE;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	if (event_id >= BIT_ULL(device->num_eventid_bits))
1065*4882a593Smuzhiyun 		return E_ITS_MAPTI_ID_OOR;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	if (its_cmd_get_command(its_cmd) == GITS_CMD_MAPTI)
1068*4882a593Smuzhiyun 		lpi_nr = its_cmd_get_physical_id(its_cmd);
1069*4882a593Smuzhiyun 	else
1070*4882a593Smuzhiyun 		lpi_nr = event_id;
1071*4882a593Smuzhiyun 	if (lpi_nr < GIC_LPI_OFFSET ||
1072*4882a593Smuzhiyun 	    lpi_nr >= max_lpis_propbaser(kvm->arch.vgic.propbaser))
1073*4882a593Smuzhiyun 		return E_ITS_MAPTI_PHYSICALID_OOR;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	/* If there is an existing mapping, behavior is UNPREDICTABLE. */
1076*4882a593Smuzhiyun 	if (find_ite(its, device_id, event_id))
1077*4882a593Smuzhiyun 		return 0;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
1080*4882a593Smuzhiyun 	if (!collection) {
1081*4882a593Smuzhiyun 		int ret = vgic_its_alloc_collection(its, &collection, coll_id);
1082*4882a593Smuzhiyun 		if (ret)
1083*4882a593Smuzhiyun 			return ret;
1084*4882a593Smuzhiyun 		new_coll = collection;
1085*4882a593Smuzhiyun 	}
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	ite = vgic_its_alloc_ite(device, collection, event_id);
1088*4882a593Smuzhiyun 	if (IS_ERR(ite)) {
1089*4882a593Smuzhiyun 		if (new_coll)
1090*4882a593Smuzhiyun 			vgic_its_free_collection(its, coll_id);
1091*4882a593Smuzhiyun 		return PTR_ERR(ite);
1092*4882a593Smuzhiyun 	}
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	if (its_is_collection_mapped(collection))
1095*4882a593Smuzhiyun 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 	irq = vgic_add_lpi(kvm, lpi_nr, vcpu);
1098*4882a593Smuzhiyun 	if (IS_ERR(irq)) {
1099*4882a593Smuzhiyun 		if (new_coll)
1100*4882a593Smuzhiyun 			vgic_its_free_collection(its, coll_id);
1101*4882a593Smuzhiyun 		its_free_ite(kvm, ite);
1102*4882a593Smuzhiyun 		return PTR_ERR(irq);
1103*4882a593Smuzhiyun 	}
1104*4882a593Smuzhiyun 	ite->irq = irq;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	return 0;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun /* Requires the its_lock to be held. */
vgic_its_free_device(struct kvm * kvm,struct its_device * device)1110*4882a593Smuzhiyun static void vgic_its_free_device(struct kvm *kvm, struct its_device *device)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	struct its_ite *ite, *temp;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	/*
1115*4882a593Smuzhiyun 	 * The spec says that unmapping a device with still valid
1116*4882a593Smuzhiyun 	 * ITTEs associated is UNPREDICTABLE. We remove all ITTEs,
1117*4882a593Smuzhiyun 	 * since we cannot leave the memory unreferenced.
1118*4882a593Smuzhiyun 	 */
1119*4882a593Smuzhiyun 	list_for_each_entry_safe(ite, temp, &device->itt_head, ite_list)
1120*4882a593Smuzhiyun 		its_free_ite(kvm, ite);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	vgic_its_invalidate_cache(kvm);
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	list_del(&device->dev_list);
1125*4882a593Smuzhiyun 	kfree(device);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun /* its lock must be held */
vgic_its_free_device_list(struct kvm * kvm,struct vgic_its * its)1129*4882a593Smuzhiyun static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its)
1130*4882a593Smuzhiyun {
1131*4882a593Smuzhiyun 	struct its_device *cur, *temp;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	list_for_each_entry_safe(cur, temp, &its->device_list, dev_list)
1134*4882a593Smuzhiyun 		vgic_its_free_device(kvm, cur);
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun /* its lock must be held */
vgic_its_free_collection_list(struct kvm * kvm,struct vgic_its * its)1138*4882a593Smuzhiyun static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	struct its_collection *cur, *temp;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list)
1143*4882a593Smuzhiyun 		vgic_its_free_collection(its, cur->collection_id);
1144*4882a593Smuzhiyun }
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun /* Must be called with its_lock mutex held */
vgic_its_alloc_device(struct vgic_its * its,u32 device_id,gpa_t itt_addr,u8 num_eventid_bits)1147*4882a593Smuzhiyun static struct its_device *vgic_its_alloc_device(struct vgic_its *its,
1148*4882a593Smuzhiyun 						u32 device_id, gpa_t itt_addr,
1149*4882a593Smuzhiyun 						u8 num_eventid_bits)
1150*4882a593Smuzhiyun {
1151*4882a593Smuzhiyun 	struct its_device *device;
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	device = kzalloc(sizeof(*device), GFP_KERNEL);
1154*4882a593Smuzhiyun 	if (!device)
1155*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	device->device_id = device_id;
1158*4882a593Smuzhiyun 	device->itt_addr = itt_addr;
1159*4882a593Smuzhiyun 	device->num_eventid_bits = num_eventid_bits;
1160*4882a593Smuzhiyun 	INIT_LIST_HEAD(&device->itt_head);
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	list_add_tail(&device->dev_list, &its->device_list);
1163*4882a593Smuzhiyun 	return device;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun /*
1167*4882a593Smuzhiyun  * MAPD maps or unmaps a device ID to Interrupt Translation Tables (ITTs).
1168*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1169*4882a593Smuzhiyun  */
vgic_its_cmd_handle_mapd(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1170*4882a593Smuzhiyun static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its,
1171*4882a593Smuzhiyun 				    u64 *its_cmd)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1174*4882a593Smuzhiyun 	bool valid = its_cmd_get_validbit(its_cmd);
1175*4882a593Smuzhiyun 	u8 num_eventid_bits = its_cmd_get_size(its_cmd);
1176*4882a593Smuzhiyun 	gpa_t itt_addr = its_cmd_get_ittaddr(its_cmd);
1177*4882a593Smuzhiyun 	struct its_device *device;
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	if (!vgic_its_check_id(its, its->baser_device_table, device_id, NULL))
1180*4882a593Smuzhiyun 		return E_ITS_MAPD_DEVICE_OOR;
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (valid && num_eventid_bits > VITS_TYPER_IDBITS)
1183*4882a593Smuzhiyun 		return E_ITS_MAPD_ITTSIZE_OOR;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	device = find_its_device(its, device_id);
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/*
1188*4882a593Smuzhiyun 	 * The spec says that calling MAPD on an already mapped device
1189*4882a593Smuzhiyun 	 * invalidates all cached data for this device. We implement this
1190*4882a593Smuzhiyun 	 * by removing the mapping and re-establishing it.
1191*4882a593Smuzhiyun 	 */
1192*4882a593Smuzhiyun 	if (device)
1193*4882a593Smuzhiyun 		vgic_its_free_device(kvm, device);
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	/*
1196*4882a593Smuzhiyun 	 * The spec does not say whether unmapping a not-mapped device
1197*4882a593Smuzhiyun 	 * is an error, so we are done in any case.
1198*4882a593Smuzhiyun 	 */
1199*4882a593Smuzhiyun 	if (!valid)
1200*4882a593Smuzhiyun 		return 0;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	device = vgic_its_alloc_device(its, device_id, itt_addr,
1203*4882a593Smuzhiyun 				       num_eventid_bits);
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(device);
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun /*
1209*4882a593Smuzhiyun  * The MAPC command maps collection IDs to redistributors.
1210*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1211*4882a593Smuzhiyun  */
vgic_its_cmd_handle_mapc(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1212*4882a593Smuzhiyun static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its,
1213*4882a593Smuzhiyun 				    u64 *its_cmd)
1214*4882a593Smuzhiyun {
1215*4882a593Smuzhiyun 	u16 coll_id;
1216*4882a593Smuzhiyun 	u32 target_addr;
1217*4882a593Smuzhiyun 	struct its_collection *collection;
1218*4882a593Smuzhiyun 	bool valid;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	valid = its_cmd_get_validbit(its_cmd);
1221*4882a593Smuzhiyun 	coll_id = its_cmd_get_collection(its_cmd);
1222*4882a593Smuzhiyun 	target_addr = its_cmd_get_target_addr(its_cmd);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 	if (target_addr >= atomic_read(&kvm->online_vcpus))
1225*4882a593Smuzhiyun 		return E_ITS_MAPC_PROCNUM_OOR;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	if (!valid) {
1228*4882a593Smuzhiyun 		vgic_its_free_collection(its, coll_id);
1229*4882a593Smuzhiyun 		vgic_its_invalidate_cache(kvm);
1230*4882a593Smuzhiyun 	} else {
1231*4882a593Smuzhiyun 		collection = find_collection(its, coll_id);
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 		if (!collection) {
1234*4882a593Smuzhiyun 			int ret;
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 			ret = vgic_its_alloc_collection(its, &collection,
1237*4882a593Smuzhiyun 							coll_id);
1238*4882a593Smuzhiyun 			if (ret)
1239*4882a593Smuzhiyun 				return ret;
1240*4882a593Smuzhiyun 			collection->target_addr = target_addr;
1241*4882a593Smuzhiyun 		} else {
1242*4882a593Smuzhiyun 			collection->target_addr = target_addr;
1243*4882a593Smuzhiyun 			update_affinity_collection(kvm, its, collection);
1244*4882a593Smuzhiyun 		}
1245*4882a593Smuzhiyun 	}
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	return 0;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun /*
1251*4882a593Smuzhiyun  * The CLEAR command removes the pending state for a particular LPI.
1252*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1253*4882a593Smuzhiyun  */
vgic_its_cmd_handle_clear(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1254*4882a593Smuzhiyun static int vgic_its_cmd_handle_clear(struct kvm *kvm, struct vgic_its *its,
1255*4882a593Smuzhiyun 				     u64 *its_cmd)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1258*4882a593Smuzhiyun 	u32 event_id = its_cmd_get_id(its_cmd);
1259*4882a593Smuzhiyun 	struct its_ite *ite;
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	ite = find_ite(its, device_id, event_id);
1263*4882a593Smuzhiyun 	if (!ite)
1264*4882a593Smuzhiyun 		return E_ITS_CLEAR_UNMAPPED_INTERRUPT;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	ite->irq->pending_latch = false;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	if (ite->irq->hw)
1269*4882a593Smuzhiyun 		return irq_set_irqchip_state(ite->irq->host_irq,
1270*4882a593Smuzhiyun 					     IRQCHIP_STATE_PENDING, false);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	return 0;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun /*
1276*4882a593Smuzhiyun  * The INV command syncs the configuration bits from the memory table.
1277*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1278*4882a593Smuzhiyun  */
vgic_its_cmd_handle_inv(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1279*4882a593Smuzhiyun static int vgic_its_cmd_handle_inv(struct kvm *kvm, struct vgic_its *its,
1280*4882a593Smuzhiyun 				   u64 *its_cmd)
1281*4882a593Smuzhiyun {
1282*4882a593Smuzhiyun 	u32 device_id = its_cmd_get_deviceid(its_cmd);
1283*4882a593Smuzhiyun 	u32 event_id = its_cmd_get_id(its_cmd);
1284*4882a593Smuzhiyun 	struct its_ite *ite;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	ite = find_ite(its, device_id, event_id);
1288*4882a593Smuzhiyun 	if (!ite)
1289*4882a593Smuzhiyun 		return E_ITS_INV_UNMAPPED_INTERRUPT;
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	return update_lpi_config(kvm, ite->irq, NULL, true);
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun /*
1295*4882a593Smuzhiyun  * The INVALL command requests flushing of all IRQ data in this collection.
1296*4882a593Smuzhiyun  * Find the VCPU mapped to that collection, then iterate over the VM's list
1297*4882a593Smuzhiyun  * of mapped LPIs and update the configuration for each IRQ which targets
1298*4882a593Smuzhiyun  * the specified vcpu. The configuration will be read from the in-memory
1299*4882a593Smuzhiyun  * configuration table.
1300*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1301*4882a593Smuzhiyun  */
vgic_its_cmd_handle_invall(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1302*4882a593Smuzhiyun static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1303*4882a593Smuzhiyun 				      u64 *its_cmd)
1304*4882a593Smuzhiyun {
1305*4882a593Smuzhiyun 	u32 coll_id = its_cmd_get_collection(its_cmd);
1306*4882a593Smuzhiyun 	struct its_collection *collection;
1307*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu;
1308*4882a593Smuzhiyun 	struct vgic_irq *irq;
1309*4882a593Smuzhiyun 	u32 *intids;
1310*4882a593Smuzhiyun 	int irq_count, i;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
1313*4882a593Smuzhiyun 	if (!its_is_collection_mapped(collection))
1314*4882a593Smuzhiyun 		return E_ITS_INVALL_UNMAPPED_COLLECTION;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 	vcpu = kvm_get_vcpu(kvm, collection->target_addr);
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	irq_count = vgic_copy_lpi_list(kvm, vcpu, &intids);
1319*4882a593Smuzhiyun 	if (irq_count < 0)
1320*4882a593Smuzhiyun 		return irq_count;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	for (i = 0; i < irq_count; i++) {
1323*4882a593Smuzhiyun 		irq = vgic_get_irq(kvm, NULL, intids[i]);
1324*4882a593Smuzhiyun 		if (!irq)
1325*4882a593Smuzhiyun 			continue;
1326*4882a593Smuzhiyun 		update_lpi_config(kvm, irq, vcpu, false);
1327*4882a593Smuzhiyun 		vgic_put_irq(kvm, irq);
1328*4882a593Smuzhiyun 	}
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	kfree(intids);
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
1333*4882a593Smuzhiyun 		its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	return 0;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun /*
1339*4882a593Smuzhiyun  * The MOVALL command moves the pending state of all IRQs targeting one
1340*4882a593Smuzhiyun  * redistributor to another. We don't hold the pending state in the VCPUs,
1341*4882a593Smuzhiyun  * but in the IRQs instead, so there is really not much to do for us here.
1342*4882a593Smuzhiyun  * However the spec says that no IRQ must target the old redistributor
1343*4882a593Smuzhiyun  * afterwards, so we make sure that no LPI is using the associated target_vcpu.
1344*4882a593Smuzhiyun  * This command affects all LPIs in the system that target that redistributor.
1345*4882a593Smuzhiyun  */
vgic_its_cmd_handle_movall(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1346*4882a593Smuzhiyun static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1347*4882a593Smuzhiyun 				      u64 *its_cmd)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun 	u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1350*4882a593Smuzhiyun 	u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1351*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu1, *vcpu2;
1352*4882a593Smuzhiyun 	struct vgic_irq *irq;
1353*4882a593Smuzhiyun 	u32 *intids;
1354*4882a593Smuzhiyun 	int irq_count, i;
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1357*4882a593Smuzhiyun 	    target2_addr >= atomic_read(&kvm->online_vcpus))
1358*4882a593Smuzhiyun 		return E_ITS_MOVALL_PROCNUM_OOR;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	if (target1_addr == target2_addr)
1361*4882a593Smuzhiyun 		return 0;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1364*4882a593Smuzhiyun 	vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids);
1367*4882a593Smuzhiyun 	if (irq_count < 0)
1368*4882a593Smuzhiyun 		return irq_count;
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	for (i = 0; i < irq_count; i++) {
1371*4882a593Smuzhiyun 		irq = vgic_get_irq(kvm, NULL, intids[i]);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 		update_affinity(irq, vcpu2);
1374*4882a593Smuzhiyun 
1375*4882a593Smuzhiyun 		vgic_put_irq(kvm, irq);
1376*4882a593Smuzhiyun 	}
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	vgic_its_invalidate_cache(kvm);
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	kfree(intids);
1381*4882a593Smuzhiyun 	return 0;
1382*4882a593Smuzhiyun }
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun /*
1385*4882a593Smuzhiyun  * The INT command injects the LPI associated with that DevID/EvID pair.
1386*4882a593Smuzhiyun  * Must be called with the its_lock mutex held.
1387*4882a593Smuzhiyun  */
vgic_its_cmd_handle_int(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1388*4882a593Smuzhiyun static int vgic_its_cmd_handle_int(struct kvm *kvm, struct vgic_its *its,
1389*4882a593Smuzhiyun 				   u64 *its_cmd)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun 	u32 msi_data = its_cmd_get_id(its_cmd);
1392*4882a593Smuzhiyun 	u64 msi_devid = its_cmd_get_deviceid(its_cmd);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	return vgic_its_trigger_msi(kvm, its, msi_devid, msi_data);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun /*
1398*4882a593Smuzhiyun  * This function is called with the its_cmd lock held, but the ITS data
1399*4882a593Smuzhiyun  * structure lock dropped.
1400*4882a593Smuzhiyun  */
vgic_its_handle_command(struct kvm * kvm,struct vgic_its * its,u64 * its_cmd)1401*4882a593Smuzhiyun static int vgic_its_handle_command(struct kvm *kvm, struct vgic_its *its,
1402*4882a593Smuzhiyun 				   u64 *its_cmd)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun 	int ret = -ENODEV;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	mutex_lock(&its->its_lock);
1407*4882a593Smuzhiyun 	switch (its_cmd_get_command(its_cmd)) {
1408*4882a593Smuzhiyun 	case GITS_CMD_MAPD:
1409*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_mapd(kvm, its, its_cmd);
1410*4882a593Smuzhiyun 		break;
1411*4882a593Smuzhiyun 	case GITS_CMD_MAPC:
1412*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_mapc(kvm, its, its_cmd);
1413*4882a593Smuzhiyun 		break;
1414*4882a593Smuzhiyun 	case GITS_CMD_MAPI:
1415*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1416*4882a593Smuzhiyun 		break;
1417*4882a593Smuzhiyun 	case GITS_CMD_MAPTI:
1418*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_mapi(kvm, its, its_cmd);
1419*4882a593Smuzhiyun 		break;
1420*4882a593Smuzhiyun 	case GITS_CMD_MOVI:
1421*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_movi(kvm, its, its_cmd);
1422*4882a593Smuzhiyun 		break;
1423*4882a593Smuzhiyun 	case GITS_CMD_DISCARD:
1424*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_discard(kvm, its, its_cmd);
1425*4882a593Smuzhiyun 		break;
1426*4882a593Smuzhiyun 	case GITS_CMD_CLEAR:
1427*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_clear(kvm, its, its_cmd);
1428*4882a593Smuzhiyun 		break;
1429*4882a593Smuzhiyun 	case GITS_CMD_MOVALL:
1430*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_movall(kvm, its, its_cmd);
1431*4882a593Smuzhiyun 		break;
1432*4882a593Smuzhiyun 	case GITS_CMD_INT:
1433*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_int(kvm, its, its_cmd);
1434*4882a593Smuzhiyun 		break;
1435*4882a593Smuzhiyun 	case GITS_CMD_INV:
1436*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_inv(kvm, its, its_cmd);
1437*4882a593Smuzhiyun 		break;
1438*4882a593Smuzhiyun 	case GITS_CMD_INVALL:
1439*4882a593Smuzhiyun 		ret = vgic_its_cmd_handle_invall(kvm, its, its_cmd);
1440*4882a593Smuzhiyun 		break;
1441*4882a593Smuzhiyun 	case GITS_CMD_SYNC:
1442*4882a593Smuzhiyun 		/* we ignore this command: we are in sync all of the time */
1443*4882a593Smuzhiyun 		ret = 0;
1444*4882a593Smuzhiyun 		break;
1445*4882a593Smuzhiyun 	}
1446*4882a593Smuzhiyun 	mutex_unlock(&its->its_lock);
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	return ret;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun 
vgic_sanitise_its_baser(u64 reg)1451*4882a593Smuzhiyun static u64 vgic_sanitise_its_baser(u64 reg)
1452*4882a593Smuzhiyun {
1453*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_BASER_SHAREABILITY_MASK,
1454*4882a593Smuzhiyun 				  GITS_BASER_SHAREABILITY_SHIFT,
1455*4882a593Smuzhiyun 				  vgic_sanitise_shareability);
1456*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_BASER_INNER_CACHEABILITY_MASK,
1457*4882a593Smuzhiyun 				  GITS_BASER_INNER_CACHEABILITY_SHIFT,
1458*4882a593Smuzhiyun 				  vgic_sanitise_inner_cacheability);
1459*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_BASER_OUTER_CACHEABILITY_MASK,
1460*4882a593Smuzhiyun 				  GITS_BASER_OUTER_CACHEABILITY_SHIFT,
1461*4882a593Smuzhiyun 				  vgic_sanitise_outer_cacheability);
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	/* We support only one (ITS) page size: 64K */
1464*4882a593Smuzhiyun 	reg = (reg & ~GITS_BASER_PAGE_SIZE_MASK) | GITS_BASER_PAGE_SIZE_64K;
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	return reg;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun 
vgic_sanitise_its_cbaser(u64 reg)1469*4882a593Smuzhiyun static u64 vgic_sanitise_its_cbaser(u64 reg)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_CBASER_SHAREABILITY_MASK,
1472*4882a593Smuzhiyun 				  GITS_CBASER_SHAREABILITY_SHIFT,
1473*4882a593Smuzhiyun 				  vgic_sanitise_shareability);
1474*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_CBASER_INNER_CACHEABILITY_MASK,
1475*4882a593Smuzhiyun 				  GITS_CBASER_INNER_CACHEABILITY_SHIFT,
1476*4882a593Smuzhiyun 				  vgic_sanitise_inner_cacheability);
1477*4882a593Smuzhiyun 	reg = vgic_sanitise_field(reg, GITS_CBASER_OUTER_CACHEABILITY_MASK,
1478*4882a593Smuzhiyun 				  GITS_CBASER_OUTER_CACHEABILITY_SHIFT,
1479*4882a593Smuzhiyun 				  vgic_sanitise_outer_cacheability);
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	/* Sanitise the physical address to be 64k aligned. */
1482*4882a593Smuzhiyun 	reg &= ~GENMASK_ULL(15, 12);
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	return reg;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun 
vgic_mmio_read_its_cbaser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1487*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_cbaser(struct kvm *kvm,
1488*4882a593Smuzhiyun 					       struct vgic_its *its,
1489*4882a593Smuzhiyun 					       gpa_t addr, unsigned int len)
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun 	return extract_bytes(its->cbaser, addr & 7, len);
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun 
vgic_mmio_write_its_cbaser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1494*4882a593Smuzhiyun static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
1495*4882a593Smuzhiyun 				       gpa_t addr, unsigned int len,
1496*4882a593Smuzhiyun 				       unsigned long val)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	/* When GITS_CTLR.Enable is 1, this register is RO. */
1499*4882a593Smuzhiyun 	if (its->enabled)
1500*4882a593Smuzhiyun 		return;
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	mutex_lock(&its->cmd_lock);
1503*4882a593Smuzhiyun 	its->cbaser = update_64bit_reg(its->cbaser, addr & 7, len, val);
1504*4882a593Smuzhiyun 	its->cbaser = vgic_sanitise_its_cbaser(its->cbaser);
1505*4882a593Smuzhiyun 	its->creadr = 0;
1506*4882a593Smuzhiyun 	/*
1507*4882a593Smuzhiyun 	 * CWRITER is architecturally UNKNOWN on reset, but we need to reset
1508*4882a593Smuzhiyun 	 * it to CREADR to make sure we start with an empty command buffer.
1509*4882a593Smuzhiyun 	 */
1510*4882a593Smuzhiyun 	its->cwriter = its->creadr;
1511*4882a593Smuzhiyun 	mutex_unlock(&its->cmd_lock);
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun #define ITS_CMD_BUFFER_SIZE(baser)	((((baser) & 0xff) + 1) << 12)
1515*4882a593Smuzhiyun #define ITS_CMD_SIZE			32
1516*4882a593Smuzhiyun #define ITS_CMD_OFFSET(reg)		((reg) & GENMASK(19, 5))
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun /* Must be called with the cmd_lock held. */
vgic_its_process_commands(struct kvm * kvm,struct vgic_its * its)1519*4882a593Smuzhiyun static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
1520*4882a593Smuzhiyun {
1521*4882a593Smuzhiyun 	gpa_t cbaser;
1522*4882a593Smuzhiyun 	u64 cmd_buf[4];
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	/* Commands are only processed when the ITS is enabled. */
1525*4882a593Smuzhiyun 	if (!its->enabled)
1526*4882a593Smuzhiyun 		return;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	cbaser = GITS_CBASER_ADDRESS(its->cbaser);
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	while (its->cwriter != its->creadr) {
1531*4882a593Smuzhiyun 		int ret = kvm_read_guest_lock(kvm, cbaser + its->creadr,
1532*4882a593Smuzhiyun 					      cmd_buf, ITS_CMD_SIZE);
1533*4882a593Smuzhiyun 		/*
1534*4882a593Smuzhiyun 		 * If kvm_read_guest() fails, this could be due to the guest
1535*4882a593Smuzhiyun 		 * programming a bogus value in CBASER or something else going
1536*4882a593Smuzhiyun 		 * wrong from which we cannot easily recover.
1537*4882a593Smuzhiyun 		 * According to section 6.3.2 in the GICv3 spec we can just
1538*4882a593Smuzhiyun 		 * ignore that command then.
1539*4882a593Smuzhiyun 		 */
1540*4882a593Smuzhiyun 		if (!ret)
1541*4882a593Smuzhiyun 			vgic_its_handle_command(kvm, its, cmd_buf);
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 		its->creadr += ITS_CMD_SIZE;
1544*4882a593Smuzhiyun 		if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
1545*4882a593Smuzhiyun 			its->creadr = 0;
1546*4882a593Smuzhiyun 	}
1547*4882a593Smuzhiyun }
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun /*
1550*4882a593Smuzhiyun  * By writing to CWRITER the guest announces new commands to be processed.
1551*4882a593Smuzhiyun  * To avoid any races in the first place, we take the its_cmd lock, which
1552*4882a593Smuzhiyun  * protects our ring buffer variables, so that there is only one user
1553*4882a593Smuzhiyun  * per ITS handling commands at a given time.
1554*4882a593Smuzhiyun  */
vgic_mmio_write_its_cwriter(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1555*4882a593Smuzhiyun static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
1556*4882a593Smuzhiyun 					gpa_t addr, unsigned int len,
1557*4882a593Smuzhiyun 					unsigned long val)
1558*4882a593Smuzhiyun {
1559*4882a593Smuzhiyun 	u64 reg;
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	if (!its)
1562*4882a593Smuzhiyun 		return;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	mutex_lock(&its->cmd_lock);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
1567*4882a593Smuzhiyun 	reg = ITS_CMD_OFFSET(reg);
1568*4882a593Smuzhiyun 	if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1569*4882a593Smuzhiyun 		mutex_unlock(&its->cmd_lock);
1570*4882a593Smuzhiyun 		return;
1571*4882a593Smuzhiyun 	}
1572*4882a593Smuzhiyun 	its->cwriter = reg;
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	vgic_its_process_commands(kvm, its);
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	mutex_unlock(&its->cmd_lock);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun 
vgic_mmio_read_its_cwriter(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1579*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_cwriter(struct kvm *kvm,
1580*4882a593Smuzhiyun 						struct vgic_its *its,
1581*4882a593Smuzhiyun 						gpa_t addr, unsigned int len)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	return extract_bytes(its->cwriter, addr & 0x7, len);
1584*4882a593Smuzhiyun }
1585*4882a593Smuzhiyun 
vgic_mmio_read_its_creadr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1586*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_creadr(struct kvm *kvm,
1587*4882a593Smuzhiyun 					       struct vgic_its *its,
1588*4882a593Smuzhiyun 					       gpa_t addr, unsigned int len)
1589*4882a593Smuzhiyun {
1590*4882a593Smuzhiyun 	return extract_bytes(its->creadr, addr & 0x7, len);
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun 
vgic_mmio_uaccess_write_its_creadr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1593*4882a593Smuzhiyun static int vgic_mmio_uaccess_write_its_creadr(struct kvm *kvm,
1594*4882a593Smuzhiyun 					      struct vgic_its *its,
1595*4882a593Smuzhiyun 					      gpa_t addr, unsigned int len,
1596*4882a593Smuzhiyun 					      unsigned long val)
1597*4882a593Smuzhiyun {
1598*4882a593Smuzhiyun 	u32 cmd_offset;
1599*4882a593Smuzhiyun 	int ret = 0;
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	mutex_lock(&its->cmd_lock);
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	if (its->enabled) {
1604*4882a593Smuzhiyun 		ret = -EBUSY;
1605*4882a593Smuzhiyun 		goto out;
1606*4882a593Smuzhiyun 	}
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 	cmd_offset = ITS_CMD_OFFSET(val);
1609*4882a593Smuzhiyun 	if (cmd_offset >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
1610*4882a593Smuzhiyun 		ret = -EINVAL;
1611*4882a593Smuzhiyun 		goto out;
1612*4882a593Smuzhiyun 	}
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	its->creadr = cmd_offset;
1615*4882a593Smuzhiyun out:
1616*4882a593Smuzhiyun 	mutex_unlock(&its->cmd_lock);
1617*4882a593Smuzhiyun 	return ret;
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun #define BASER_INDEX(addr) (((addr) / sizeof(u64)) & 0x7)
vgic_mmio_read_its_baser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len)1621*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_baser(struct kvm *kvm,
1622*4882a593Smuzhiyun 					      struct vgic_its *its,
1623*4882a593Smuzhiyun 					      gpa_t addr, unsigned int len)
1624*4882a593Smuzhiyun {
1625*4882a593Smuzhiyun 	u64 reg;
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	switch (BASER_INDEX(addr)) {
1628*4882a593Smuzhiyun 	case 0:
1629*4882a593Smuzhiyun 		reg = its->baser_device_table;
1630*4882a593Smuzhiyun 		break;
1631*4882a593Smuzhiyun 	case 1:
1632*4882a593Smuzhiyun 		reg = its->baser_coll_table;
1633*4882a593Smuzhiyun 		break;
1634*4882a593Smuzhiyun 	default:
1635*4882a593Smuzhiyun 		reg = 0;
1636*4882a593Smuzhiyun 		break;
1637*4882a593Smuzhiyun 	}
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 	return extract_bytes(reg, addr & 7, len);
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun 
1642*4882a593Smuzhiyun #define GITS_BASER_RO_MASK	(GENMASK_ULL(52, 48) | GENMASK_ULL(58, 56))
vgic_mmio_write_its_baser(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1643*4882a593Smuzhiyun static void vgic_mmio_write_its_baser(struct kvm *kvm,
1644*4882a593Smuzhiyun 				      struct vgic_its *its,
1645*4882a593Smuzhiyun 				      gpa_t addr, unsigned int len,
1646*4882a593Smuzhiyun 				      unsigned long val)
1647*4882a593Smuzhiyun {
1648*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
1649*4882a593Smuzhiyun 	u64 entry_size, table_type;
1650*4882a593Smuzhiyun 	u64 reg, *regptr, clearbits = 0;
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	/* When GITS_CTLR.Enable is 1, we ignore write accesses. */
1653*4882a593Smuzhiyun 	if (its->enabled)
1654*4882a593Smuzhiyun 		return;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	switch (BASER_INDEX(addr)) {
1657*4882a593Smuzhiyun 	case 0:
1658*4882a593Smuzhiyun 		regptr = &its->baser_device_table;
1659*4882a593Smuzhiyun 		entry_size = abi->dte_esz;
1660*4882a593Smuzhiyun 		table_type = GITS_BASER_TYPE_DEVICE;
1661*4882a593Smuzhiyun 		break;
1662*4882a593Smuzhiyun 	case 1:
1663*4882a593Smuzhiyun 		regptr = &its->baser_coll_table;
1664*4882a593Smuzhiyun 		entry_size = abi->cte_esz;
1665*4882a593Smuzhiyun 		table_type = GITS_BASER_TYPE_COLLECTION;
1666*4882a593Smuzhiyun 		clearbits = GITS_BASER_INDIRECT;
1667*4882a593Smuzhiyun 		break;
1668*4882a593Smuzhiyun 	default:
1669*4882a593Smuzhiyun 		return;
1670*4882a593Smuzhiyun 	}
1671*4882a593Smuzhiyun 
1672*4882a593Smuzhiyun 	reg = update_64bit_reg(*regptr, addr & 7, len, val);
1673*4882a593Smuzhiyun 	reg &= ~GITS_BASER_RO_MASK;
1674*4882a593Smuzhiyun 	reg &= ~clearbits;
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT;
1677*4882a593Smuzhiyun 	reg |= table_type << GITS_BASER_TYPE_SHIFT;
1678*4882a593Smuzhiyun 	reg = vgic_sanitise_its_baser(reg);
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	*regptr = reg;
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	if (!(reg & GITS_BASER_VALID)) {
1683*4882a593Smuzhiyun 		/* Take the its_lock to prevent a race with a save/restore */
1684*4882a593Smuzhiyun 		mutex_lock(&its->its_lock);
1685*4882a593Smuzhiyun 		switch (table_type) {
1686*4882a593Smuzhiyun 		case GITS_BASER_TYPE_DEVICE:
1687*4882a593Smuzhiyun 			vgic_its_free_device_list(kvm, its);
1688*4882a593Smuzhiyun 			break;
1689*4882a593Smuzhiyun 		case GITS_BASER_TYPE_COLLECTION:
1690*4882a593Smuzhiyun 			vgic_its_free_collection_list(kvm, its);
1691*4882a593Smuzhiyun 			break;
1692*4882a593Smuzhiyun 		}
1693*4882a593Smuzhiyun 		mutex_unlock(&its->its_lock);
1694*4882a593Smuzhiyun 	}
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun 
vgic_mmio_read_its_ctlr(struct kvm * vcpu,struct vgic_its * its,gpa_t addr,unsigned int len)1697*4882a593Smuzhiyun static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
1698*4882a593Smuzhiyun 					     struct vgic_its *its,
1699*4882a593Smuzhiyun 					     gpa_t addr, unsigned int len)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun 	u32 reg = 0;
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	mutex_lock(&its->cmd_lock);
1704*4882a593Smuzhiyun 	if (its->creadr == its->cwriter)
1705*4882a593Smuzhiyun 		reg |= GITS_CTLR_QUIESCENT;
1706*4882a593Smuzhiyun 	if (its->enabled)
1707*4882a593Smuzhiyun 		reg |= GITS_CTLR_ENABLE;
1708*4882a593Smuzhiyun 	mutex_unlock(&its->cmd_lock);
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	return reg;
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun 
vgic_mmio_write_its_ctlr(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1713*4882a593Smuzhiyun static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
1714*4882a593Smuzhiyun 				     gpa_t addr, unsigned int len,
1715*4882a593Smuzhiyun 				     unsigned long val)
1716*4882a593Smuzhiyun {
1717*4882a593Smuzhiyun 	mutex_lock(&its->cmd_lock);
1718*4882a593Smuzhiyun 
1719*4882a593Smuzhiyun 	/*
1720*4882a593Smuzhiyun 	 * It is UNPREDICTABLE to enable the ITS if any of the CBASER or
1721*4882a593Smuzhiyun 	 * device/collection BASER are invalid
1722*4882a593Smuzhiyun 	 */
1723*4882a593Smuzhiyun 	if (!its->enabled && (val & GITS_CTLR_ENABLE) &&
1724*4882a593Smuzhiyun 		(!(its->baser_device_table & GITS_BASER_VALID) ||
1725*4882a593Smuzhiyun 		 !(its->baser_coll_table & GITS_BASER_VALID) ||
1726*4882a593Smuzhiyun 		 !(its->cbaser & GITS_CBASER_VALID)))
1727*4882a593Smuzhiyun 		goto out;
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	its->enabled = !!(val & GITS_CTLR_ENABLE);
1730*4882a593Smuzhiyun 	if (!its->enabled)
1731*4882a593Smuzhiyun 		vgic_its_invalidate_cache(kvm);
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	/*
1734*4882a593Smuzhiyun 	 * Try to process any pending commands. This function bails out early
1735*4882a593Smuzhiyun 	 * if the ITS is disabled or no commands have been queued.
1736*4882a593Smuzhiyun 	 */
1737*4882a593Smuzhiyun 	vgic_its_process_commands(kvm, its);
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun out:
1740*4882a593Smuzhiyun 	mutex_unlock(&its->cmd_lock);
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun #define REGISTER_ITS_DESC(off, rd, wr, length, acc)		\
1744*4882a593Smuzhiyun {								\
1745*4882a593Smuzhiyun 	.reg_offset = off,					\
1746*4882a593Smuzhiyun 	.len = length,						\
1747*4882a593Smuzhiyun 	.access_flags = acc,					\
1748*4882a593Smuzhiyun 	.its_read = rd,						\
1749*4882a593Smuzhiyun 	.its_write = wr,					\
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun #define REGISTER_ITS_DESC_UACCESS(off, rd, wr, uwr, length, acc)\
1753*4882a593Smuzhiyun {								\
1754*4882a593Smuzhiyun 	.reg_offset = off,					\
1755*4882a593Smuzhiyun 	.len = length,						\
1756*4882a593Smuzhiyun 	.access_flags = acc,					\
1757*4882a593Smuzhiyun 	.its_read = rd,						\
1758*4882a593Smuzhiyun 	.its_write = wr,					\
1759*4882a593Smuzhiyun 	.uaccess_its_write = uwr,				\
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun 
its_mmio_write_wi(struct kvm * kvm,struct vgic_its * its,gpa_t addr,unsigned int len,unsigned long val)1762*4882a593Smuzhiyun static void its_mmio_write_wi(struct kvm *kvm, struct vgic_its *its,
1763*4882a593Smuzhiyun 			      gpa_t addr, unsigned int len, unsigned long val)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	/* Ignore */
1766*4882a593Smuzhiyun }
1767*4882a593Smuzhiyun 
1768*4882a593Smuzhiyun static struct vgic_register_region its_registers[] = {
1769*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_CTLR,
1770*4882a593Smuzhiyun 		vgic_mmio_read_its_ctlr, vgic_mmio_write_its_ctlr, 4,
1771*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
1772*4882a593Smuzhiyun 	REGISTER_ITS_DESC_UACCESS(GITS_IIDR,
1773*4882a593Smuzhiyun 		vgic_mmio_read_its_iidr, its_mmio_write_wi,
1774*4882a593Smuzhiyun 		vgic_mmio_uaccess_write_its_iidr, 4,
1775*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
1776*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_TYPER,
1777*4882a593Smuzhiyun 		vgic_mmio_read_its_typer, its_mmio_write_wi, 8,
1778*4882a593Smuzhiyun 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1779*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_CBASER,
1780*4882a593Smuzhiyun 		vgic_mmio_read_its_cbaser, vgic_mmio_write_its_cbaser, 8,
1781*4882a593Smuzhiyun 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1782*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_CWRITER,
1783*4882a593Smuzhiyun 		vgic_mmio_read_its_cwriter, vgic_mmio_write_its_cwriter, 8,
1784*4882a593Smuzhiyun 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1785*4882a593Smuzhiyun 	REGISTER_ITS_DESC_UACCESS(GITS_CREADR,
1786*4882a593Smuzhiyun 		vgic_mmio_read_its_creadr, its_mmio_write_wi,
1787*4882a593Smuzhiyun 		vgic_mmio_uaccess_write_its_creadr, 8,
1788*4882a593Smuzhiyun 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1789*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_BASER,
1790*4882a593Smuzhiyun 		vgic_mmio_read_its_baser, vgic_mmio_write_its_baser, 0x40,
1791*4882a593Smuzhiyun 		VGIC_ACCESS_64bit | VGIC_ACCESS_32bit),
1792*4882a593Smuzhiyun 	REGISTER_ITS_DESC(GITS_IDREGS_BASE,
1793*4882a593Smuzhiyun 		vgic_mmio_read_its_idregs, its_mmio_write_wi, 0x30,
1794*4882a593Smuzhiyun 		VGIC_ACCESS_32bit),
1795*4882a593Smuzhiyun };
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun /* This is called on setting the LPI enable bit in the redistributor. */
vgic_enable_lpis(struct kvm_vcpu * vcpu)1798*4882a593Smuzhiyun void vgic_enable_lpis(struct kvm_vcpu *vcpu)
1799*4882a593Smuzhiyun {
1800*4882a593Smuzhiyun 	if (!(vcpu->arch.vgic_cpu.pendbaser & GICR_PENDBASER_PTZ))
1801*4882a593Smuzhiyun 		its_sync_lpi_pending_table(vcpu);
1802*4882a593Smuzhiyun }
1803*4882a593Smuzhiyun 
vgic_register_its_iodev(struct kvm * kvm,struct vgic_its * its,u64 addr)1804*4882a593Smuzhiyun static int vgic_register_its_iodev(struct kvm *kvm, struct vgic_its *its,
1805*4882a593Smuzhiyun 				   u64 addr)
1806*4882a593Smuzhiyun {
1807*4882a593Smuzhiyun 	struct vgic_io_device *iodev = &its->iodev;
1808*4882a593Smuzhiyun 	int ret;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
1811*4882a593Smuzhiyun 	if (!IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1812*4882a593Smuzhiyun 		ret = -EBUSY;
1813*4882a593Smuzhiyun 		goto out;
1814*4882a593Smuzhiyun 	}
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	its->vgic_its_base = addr;
1817*4882a593Smuzhiyun 	iodev->regions = its_registers;
1818*4882a593Smuzhiyun 	iodev->nr_regions = ARRAY_SIZE(its_registers);
1819*4882a593Smuzhiyun 	kvm_iodevice_init(&iodev->dev, &kvm_io_gic_ops);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	iodev->base_addr = its->vgic_its_base;
1822*4882a593Smuzhiyun 	iodev->iodev_type = IODEV_ITS;
1823*4882a593Smuzhiyun 	iodev->its = its;
1824*4882a593Smuzhiyun 	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, iodev->base_addr,
1825*4882a593Smuzhiyun 				      KVM_VGIC_V3_ITS_SIZE, &iodev->dev);
1826*4882a593Smuzhiyun out:
1827*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	return ret;
1830*4882a593Smuzhiyun }
1831*4882a593Smuzhiyun 
1832*4882a593Smuzhiyun /* Default is 16 cached LPIs per vcpu */
1833*4882a593Smuzhiyun #define LPI_DEFAULT_PCPU_CACHE_SIZE	16
1834*4882a593Smuzhiyun 
vgic_lpi_translation_cache_init(struct kvm * kvm)1835*4882a593Smuzhiyun void vgic_lpi_translation_cache_init(struct kvm *kvm)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
1838*4882a593Smuzhiyun 	unsigned int sz;
1839*4882a593Smuzhiyun 	int i;
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	if (!list_empty(&dist->lpi_translation_cache))
1842*4882a593Smuzhiyun 		return;
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun 	sz = atomic_read(&kvm->online_vcpus) * LPI_DEFAULT_PCPU_CACHE_SIZE;
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	for (i = 0; i < sz; i++) {
1847*4882a593Smuzhiyun 		struct vgic_translation_cache_entry *cte;
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 		/* An allocation failure is not fatal */
1850*4882a593Smuzhiyun 		cte = kzalloc(sizeof(*cte), GFP_KERNEL);
1851*4882a593Smuzhiyun 		if (WARN_ON(!cte))
1852*4882a593Smuzhiyun 			break;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 		INIT_LIST_HEAD(&cte->entry);
1855*4882a593Smuzhiyun 		list_add(&cte->entry, &dist->lpi_translation_cache);
1856*4882a593Smuzhiyun 	}
1857*4882a593Smuzhiyun }
1858*4882a593Smuzhiyun 
vgic_lpi_translation_cache_destroy(struct kvm * kvm)1859*4882a593Smuzhiyun void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
1860*4882a593Smuzhiyun {
1861*4882a593Smuzhiyun 	struct vgic_dist *dist = &kvm->arch.vgic;
1862*4882a593Smuzhiyun 	struct vgic_translation_cache_entry *cte, *tmp;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	vgic_its_invalidate_cache(kvm);
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	list_for_each_entry_safe(cte, tmp,
1867*4882a593Smuzhiyun 				 &dist->lpi_translation_cache, entry) {
1868*4882a593Smuzhiyun 		list_del(&cte->entry);
1869*4882a593Smuzhiyun 		kfree(cte);
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun #define INITIAL_BASER_VALUE						  \
1874*4882a593Smuzhiyun 	(GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb)		| \
1875*4882a593Smuzhiyun 	 GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, SameAsInner)		| \
1876*4882a593Smuzhiyun 	 GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)		| \
1877*4882a593Smuzhiyun 	 GITS_BASER_PAGE_SIZE_64K)
1878*4882a593Smuzhiyun 
1879*4882a593Smuzhiyun #define INITIAL_PROPBASER_VALUE						  \
1880*4882a593Smuzhiyun 	(GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb)		| \
1881*4882a593Smuzhiyun 	 GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, SameAsInner)	| \
1882*4882a593Smuzhiyun 	 GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable))
1883*4882a593Smuzhiyun 
vgic_its_create(struct kvm_device * dev,u32 type)1884*4882a593Smuzhiyun static int vgic_its_create(struct kvm_device *dev, u32 type)
1885*4882a593Smuzhiyun {
1886*4882a593Smuzhiyun 	struct vgic_its *its;
1887*4882a593Smuzhiyun 
1888*4882a593Smuzhiyun 	if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
1889*4882a593Smuzhiyun 		return -ENODEV;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	its = kzalloc(sizeof(struct vgic_its), GFP_KERNEL);
1892*4882a593Smuzhiyun 	if (!its)
1893*4882a593Smuzhiyun 		return -ENOMEM;
1894*4882a593Smuzhiyun 
1895*4882a593Smuzhiyun 	if (vgic_initialized(dev->kvm)) {
1896*4882a593Smuzhiyun 		int ret = vgic_v4_init(dev->kvm);
1897*4882a593Smuzhiyun 		if (ret < 0) {
1898*4882a593Smuzhiyun 			kfree(its);
1899*4882a593Smuzhiyun 			return ret;
1900*4882a593Smuzhiyun 		}
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 		vgic_lpi_translation_cache_init(dev->kvm);
1903*4882a593Smuzhiyun 	}
1904*4882a593Smuzhiyun 
1905*4882a593Smuzhiyun 	mutex_init(&its->its_lock);
1906*4882a593Smuzhiyun 	mutex_init(&its->cmd_lock);
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	its->vgic_its_base = VGIC_ADDR_UNDEF;
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 	INIT_LIST_HEAD(&its->device_list);
1911*4882a593Smuzhiyun 	INIT_LIST_HEAD(&its->collection_list);
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun 	dev->kvm->arch.vgic.msis_require_devid = true;
1914*4882a593Smuzhiyun 	dev->kvm->arch.vgic.has_its = true;
1915*4882a593Smuzhiyun 	its->enabled = false;
1916*4882a593Smuzhiyun 	its->dev = dev;
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	its->baser_device_table = INITIAL_BASER_VALUE			|
1919*4882a593Smuzhiyun 		((u64)GITS_BASER_TYPE_DEVICE << GITS_BASER_TYPE_SHIFT);
1920*4882a593Smuzhiyun 	its->baser_coll_table = INITIAL_BASER_VALUE |
1921*4882a593Smuzhiyun 		((u64)GITS_BASER_TYPE_COLLECTION << GITS_BASER_TYPE_SHIFT);
1922*4882a593Smuzhiyun 	dev->kvm->arch.vgic.propbaser = INITIAL_PROPBASER_VALUE;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	dev->private = its;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
1927*4882a593Smuzhiyun }
1928*4882a593Smuzhiyun 
vgic_its_destroy(struct kvm_device * kvm_dev)1929*4882a593Smuzhiyun static void vgic_its_destroy(struct kvm_device *kvm_dev)
1930*4882a593Smuzhiyun {
1931*4882a593Smuzhiyun 	struct kvm *kvm = kvm_dev->kvm;
1932*4882a593Smuzhiyun 	struct vgic_its *its = kvm_dev->private;
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	mutex_lock(&its->its_lock);
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	vgic_its_free_device_list(kvm, its);
1937*4882a593Smuzhiyun 	vgic_its_free_collection_list(kvm, its);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	mutex_unlock(&its->its_lock);
1940*4882a593Smuzhiyun 	kfree(its);
1941*4882a593Smuzhiyun 	kfree(kvm_dev);/* alloc by kvm_ioctl_create_device, free by .destroy */
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun 
vgic_its_has_attr_regs(struct kvm_device * dev,struct kvm_device_attr * attr)1944*4882a593Smuzhiyun static int vgic_its_has_attr_regs(struct kvm_device *dev,
1945*4882a593Smuzhiyun 				  struct kvm_device_attr *attr)
1946*4882a593Smuzhiyun {
1947*4882a593Smuzhiyun 	const struct vgic_register_region *region;
1948*4882a593Smuzhiyun 	gpa_t offset = attr->attr;
1949*4882a593Smuzhiyun 	int align;
1950*4882a593Smuzhiyun 
1951*4882a593Smuzhiyun 	align = (offset < GITS_TYPER) || (offset >= GITS_PIDR4) ? 0x3 : 0x7;
1952*4882a593Smuzhiyun 
1953*4882a593Smuzhiyun 	if (offset & align)
1954*4882a593Smuzhiyun 		return -EINVAL;
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	region = vgic_find_mmio_region(its_registers,
1957*4882a593Smuzhiyun 				       ARRAY_SIZE(its_registers),
1958*4882a593Smuzhiyun 				       offset);
1959*4882a593Smuzhiyun 	if (!region)
1960*4882a593Smuzhiyun 		return -ENXIO;
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	return 0;
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun 
vgic_its_attr_regs_access(struct kvm_device * dev,struct kvm_device_attr * attr,u64 * reg,bool is_write)1965*4882a593Smuzhiyun static int vgic_its_attr_regs_access(struct kvm_device *dev,
1966*4882a593Smuzhiyun 				     struct kvm_device_attr *attr,
1967*4882a593Smuzhiyun 				     u64 *reg, bool is_write)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun 	const struct vgic_register_region *region;
1970*4882a593Smuzhiyun 	struct vgic_its *its;
1971*4882a593Smuzhiyun 	gpa_t addr, offset;
1972*4882a593Smuzhiyun 	unsigned int len;
1973*4882a593Smuzhiyun 	int align, ret = 0;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	its = dev->private;
1976*4882a593Smuzhiyun 	offset = attr->attr;
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	/*
1979*4882a593Smuzhiyun 	 * Although the spec supports upper/lower 32-bit accesses to
1980*4882a593Smuzhiyun 	 * 64-bit ITS registers, the userspace ABI requires 64-bit
1981*4882a593Smuzhiyun 	 * accesses to all 64-bit wide registers. We therefore only
1982*4882a593Smuzhiyun 	 * support 32-bit accesses to GITS_CTLR, GITS_IIDR and GITS ID
1983*4882a593Smuzhiyun 	 * registers
1984*4882a593Smuzhiyun 	 */
1985*4882a593Smuzhiyun 	if ((offset < GITS_TYPER) || (offset >= GITS_PIDR4))
1986*4882a593Smuzhiyun 		align = 0x3;
1987*4882a593Smuzhiyun 	else
1988*4882a593Smuzhiyun 		align = 0x7;
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	if (offset & align)
1991*4882a593Smuzhiyun 		return -EINVAL;
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	mutex_lock(&dev->kvm->lock);
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	if (IS_VGIC_ADDR_UNDEF(its->vgic_its_base)) {
1996*4882a593Smuzhiyun 		ret = -ENXIO;
1997*4882a593Smuzhiyun 		goto out;
1998*4882a593Smuzhiyun 	}
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 	region = vgic_find_mmio_region(its_registers,
2001*4882a593Smuzhiyun 				       ARRAY_SIZE(its_registers),
2002*4882a593Smuzhiyun 				       offset);
2003*4882a593Smuzhiyun 	if (!region) {
2004*4882a593Smuzhiyun 		ret = -ENXIO;
2005*4882a593Smuzhiyun 		goto out;
2006*4882a593Smuzhiyun 	}
2007*4882a593Smuzhiyun 
2008*4882a593Smuzhiyun 	if (!lock_all_vcpus(dev->kvm)) {
2009*4882a593Smuzhiyun 		ret = -EBUSY;
2010*4882a593Smuzhiyun 		goto out;
2011*4882a593Smuzhiyun 	}
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	addr = its->vgic_its_base + offset;
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 	len = region->access_flags & VGIC_ACCESS_64bit ? 8 : 4;
2016*4882a593Smuzhiyun 
2017*4882a593Smuzhiyun 	if (is_write) {
2018*4882a593Smuzhiyun 		if (region->uaccess_its_write)
2019*4882a593Smuzhiyun 			ret = region->uaccess_its_write(dev->kvm, its, addr,
2020*4882a593Smuzhiyun 							len, *reg);
2021*4882a593Smuzhiyun 		else
2022*4882a593Smuzhiyun 			region->its_write(dev->kvm, its, addr, len, *reg);
2023*4882a593Smuzhiyun 	} else {
2024*4882a593Smuzhiyun 		*reg = region->its_read(dev->kvm, its, addr, len);
2025*4882a593Smuzhiyun 	}
2026*4882a593Smuzhiyun 	unlock_all_vcpus(dev->kvm);
2027*4882a593Smuzhiyun out:
2028*4882a593Smuzhiyun 	mutex_unlock(&dev->kvm->lock);
2029*4882a593Smuzhiyun 	return ret;
2030*4882a593Smuzhiyun }
2031*4882a593Smuzhiyun 
compute_next_devid_offset(struct list_head * h,struct its_device * dev)2032*4882a593Smuzhiyun static u32 compute_next_devid_offset(struct list_head *h,
2033*4882a593Smuzhiyun 				     struct its_device *dev)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun 	struct its_device *next;
2036*4882a593Smuzhiyun 	u32 next_offset;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	if (list_is_last(&dev->dev_list, h))
2039*4882a593Smuzhiyun 		return 0;
2040*4882a593Smuzhiyun 	next = list_next_entry(dev, dev_list);
2041*4882a593Smuzhiyun 	next_offset = next->device_id - dev->device_id;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	return min_t(u32, next_offset, VITS_DTE_MAX_DEVID_OFFSET);
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun 
compute_next_eventid_offset(struct list_head * h,struct its_ite * ite)2046*4882a593Smuzhiyun static u32 compute_next_eventid_offset(struct list_head *h, struct its_ite *ite)
2047*4882a593Smuzhiyun {
2048*4882a593Smuzhiyun 	struct its_ite *next;
2049*4882a593Smuzhiyun 	u32 next_offset;
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	if (list_is_last(&ite->ite_list, h))
2052*4882a593Smuzhiyun 		return 0;
2053*4882a593Smuzhiyun 	next = list_next_entry(ite, ite_list);
2054*4882a593Smuzhiyun 	next_offset = next->event_id - ite->event_id;
2055*4882a593Smuzhiyun 
2056*4882a593Smuzhiyun 	return min_t(u32, next_offset, VITS_ITE_MAX_EVENTID_OFFSET);
2057*4882a593Smuzhiyun }
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun /**
2060*4882a593Smuzhiyun  * entry_fn_t - Callback called on a table entry restore path
2061*4882a593Smuzhiyun  * @its: its handle
2062*4882a593Smuzhiyun  * @id: id of the entry
2063*4882a593Smuzhiyun  * @entry: pointer to the entry
2064*4882a593Smuzhiyun  * @opaque: pointer to an opaque data
2065*4882a593Smuzhiyun  *
2066*4882a593Smuzhiyun  * Return: < 0 on error, 0 if last element was identified, id offset to next
2067*4882a593Smuzhiyun  * element otherwise
2068*4882a593Smuzhiyun  */
2069*4882a593Smuzhiyun typedef int (*entry_fn_t)(struct vgic_its *its, u32 id, void *entry,
2070*4882a593Smuzhiyun 			  void *opaque);
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun /**
2073*4882a593Smuzhiyun  * scan_its_table - Scan a contiguous table in guest RAM and applies a function
2074*4882a593Smuzhiyun  * to each entry
2075*4882a593Smuzhiyun  *
2076*4882a593Smuzhiyun  * @its: its handle
2077*4882a593Smuzhiyun  * @base: base gpa of the table
2078*4882a593Smuzhiyun  * @size: size of the table in bytes
2079*4882a593Smuzhiyun  * @esz: entry size in bytes
2080*4882a593Smuzhiyun  * @start_id: the ID of the first entry in the table
2081*4882a593Smuzhiyun  * (non zero for 2d level tables)
2082*4882a593Smuzhiyun  * @fn: function to apply on each entry
2083*4882a593Smuzhiyun  *
2084*4882a593Smuzhiyun  * Return: < 0 on error, 0 if last element was identified, 1 otherwise
2085*4882a593Smuzhiyun  * (the last element may not be found on second level tables)
2086*4882a593Smuzhiyun  */
scan_its_table(struct vgic_its * its,gpa_t base,int size,u32 esz,int start_id,entry_fn_t fn,void * opaque)2087*4882a593Smuzhiyun static int scan_its_table(struct vgic_its *its, gpa_t base, int size, u32 esz,
2088*4882a593Smuzhiyun 			  int start_id, entry_fn_t fn, void *opaque)
2089*4882a593Smuzhiyun {
2090*4882a593Smuzhiyun 	struct kvm *kvm = its->dev->kvm;
2091*4882a593Smuzhiyun 	unsigned long len = size;
2092*4882a593Smuzhiyun 	int id = start_id;
2093*4882a593Smuzhiyun 	gpa_t gpa = base;
2094*4882a593Smuzhiyun 	char entry[ESZ_MAX];
2095*4882a593Smuzhiyun 	int ret;
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	memset(entry, 0, esz);
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	while (true) {
2100*4882a593Smuzhiyun 		int next_offset;
2101*4882a593Smuzhiyun 		size_t byte_offset;
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 		ret = kvm_read_guest_lock(kvm, gpa, entry, esz);
2104*4882a593Smuzhiyun 		if (ret)
2105*4882a593Smuzhiyun 			return ret;
2106*4882a593Smuzhiyun 
2107*4882a593Smuzhiyun 		next_offset = fn(its, id, entry, opaque);
2108*4882a593Smuzhiyun 		if (next_offset <= 0)
2109*4882a593Smuzhiyun 			return next_offset;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 		byte_offset = next_offset * esz;
2112*4882a593Smuzhiyun 		if (byte_offset >= len)
2113*4882a593Smuzhiyun 			break;
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 		id += next_offset;
2116*4882a593Smuzhiyun 		gpa += byte_offset;
2117*4882a593Smuzhiyun 		len -= byte_offset;
2118*4882a593Smuzhiyun 	}
2119*4882a593Smuzhiyun 	return 1;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun /**
2123*4882a593Smuzhiyun  * vgic_its_save_ite - Save an interrupt translation entry at @gpa
2124*4882a593Smuzhiyun  */
vgic_its_save_ite(struct vgic_its * its,struct its_device * dev,struct its_ite * ite,gpa_t gpa,int ite_esz)2125*4882a593Smuzhiyun static int vgic_its_save_ite(struct vgic_its *its, struct its_device *dev,
2126*4882a593Smuzhiyun 			      struct its_ite *ite, gpa_t gpa, int ite_esz)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun 	struct kvm *kvm = its->dev->kvm;
2129*4882a593Smuzhiyun 	u32 next_offset;
2130*4882a593Smuzhiyun 	u64 val;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	next_offset = compute_next_eventid_offset(&dev->itt_head, ite);
2133*4882a593Smuzhiyun 	val = ((u64)next_offset << KVM_ITS_ITE_NEXT_SHIFT) |
2134*4882a593Smuzhiyun 	       ((u64)ite->irq->intid << KVM_ITS_ITE_PINTID_SHIFT) |
2135*4882a593Smuzhiyun 		ite->collection->collection_id;
2136*4882a593Smuzhiyun 	val = cpu_to_le64(val);
2137*4882a593Smuzhiyun 	return kvm_write_guest_lock(kvm, gpa, &val, ite_esz);
2138*4882a593Smuzhiyun }
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun /**
2141*4882a593Smuzhiyun  * vgic_its_restore_ite - restore an interrupt translation entry
2142*4882a593Smuzhiyun  * @event_id: id used for indexing
2143*4882a593Smuzhiyun  * @ptr: pointer to the ITE entry
2144*4882a593Smuzhiyun  * @opaque: pointer to the its_device
2145*4882a593Smuzhiyun  */
vgic_its_restore_ite(struct vgic_its * its,u32 event_id,void * ptr,void * opaque)2146*4882a593Smuzhiyun static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id,
2147*4882a593Smuzhiyun 				void *ptr, void *opaque)
2148*4882a593Smuzhiyun {
2149*4882a593Smuzhiyun 	struct its_device *dev = (struct its_device *)opaque;
2150*4882a593Smuzhiyun 	struct its_collection *collection;
2151*4882a593Smuzhiyun 	struct kvm *kvm = its->dev->kvm;
2152*4882a593Smuzhiyun 	struct kvm_vcpu *vcpu = NULL;
2153*4882a593Smuzhiyun 	u64 val;
2154*4882a593Smuzhiyun 	u64 *p = (u64 *)ptr;
2155*4882a593Smuzhiyun 	struct vgic_irq *irq;
2156*4882a593Smuzhiyun 	u32 coll_id, lpi_id;
2157*4882a593Smuzhiyun 	struct its_ite *ite;
2158*4882a593Smuzhiyun 	u32 offset;
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	val = *p;
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	val = le64_to_cpu(val);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun 	coll_id = val & KVM_ITS_ITE_ICID_MASK;
2165*4882a593Smuzhiyun 	lpi_id = (val & KVM_ITS_ITE_PINTID_MASK) >> KVM_ITS_ITE_PINTID_SHIFT;
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	if (!lpi_id)
2168*4882a593Smuzhiyun 		return 1; /* invalid entry, no choice but to scan next entry */
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	if (lpi_id < VGIC_MIN_LPI)
2171*4882a593Smuzhiyun 		return -EINVAL;
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	offset = val >> KVM_ITS_ITE_NEXT_SHIFT;
2174*4882a593Smuzhiyun 	if (event_id + offset >= BIT_ULL(dev->num_eventid_bits))
2175*4882a593Smuzhiyun 		return -EINVAL;
2176*4882a593Smuzhiyun 
2177*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
2178*4882a593Smuzhiyun 	if (!collection)
2179*4882a593Smuzhiyun 		return -EINVAL;
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	ite = vgic_its_alloc_ite(dev, collection, event_id);
2182*4882a593Smuzhiyun 	if (IS_ERR(ite))
2183*4882a593Smuzhiyun 		return PTR_ERR(ite);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	if (its_is_collection_mapped(collection))
2186*4882a593Smuzhiyun 		vcpu = kvm_get_vcpu(kvm, collection->target_addr);
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	irq = vgic_add_lpi(kvm, lpi_id, vcpu);
2189*4882a593Smuzhiyun 	if (IS_ERR(irq))
2190*4882a593Smuzhiyun 		return PTR_ERR(irq);
2191*4882a593Smuzhiyun 	ite->irq = irq;
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	return offset;
2194*4882a593Smuzhiyun }
2195*4882a593Smuzhiyun 
vgic_its_ite_cmp(void * priv,struct list_head * a,struct list_head * b)2196*4882a593Smuzhiyun static int vgic_its_ite_cmp(void *priv, struct list_head *a,
2197*4882a593Smuzhiyun 			    struct list_head *b)
2198*4882a593Smuzhiyun {
2199*4882a593Smuzhiyun 	struct its_ite *itea = container_of(a, struct its_ite, ite_list);
2200*4882a593Smuzhiyun 	struct its_ite *iteb = container_of(b, struct its_ite, ite_list);
2201*4882a593Smuzhiyun 
2202*4882a593Smuzhiyun 	if (itea->event_id < iteb->event_id)
2203*4882a593Smuzhiyun 		return -1;
2204*4882a593Smuzhiyun 	else
2205*4882a593Smuzhiyun 		return 1;
2206*4882a593Smuzhiyun }
2207*4882a593Smuzhiyun 
vgic_its_save_itt(struct vgic_its * its,struct its_device * device)2208*4882a593Smuzhiyun static int vgic_its_save_itt(struct vgic_its *its, struct its_device *device)
2209*4882a593Smuzhiyun {
2210*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2211*4882a593Smuzhiyun 	gpa_t base = device->itt_addr;
2212*4882a593Smuzhiyun 	struct its_ite *ite;
2213*4882a593Smuzhiyun 	int ret;
2214*4882a593Smuzhiyun 	int ite_esz = abi->ite_esz;
2215*4882a593Smuzhiyun 
2216*4882a593Smuzhiyun 	list_sort(NULL, &device->itt_head, vgic_its_ite_cmp);
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	list_for_each_entry(ite, &device->itt_head, ite_list) {
2219*4882a593Smuzhiyun 		gpa_t gpa = base + ite->event_id * ite_esz;
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun 		/*
2222*4882a593Smuzhiyun 		 * If an LPI carries the HW bit, this means that this
2223*4882a593Smuzhiyun 		 * interrupt is controlled by GICv4, and we do not
2224*4882a593Smuzhiyun 		 * have direct access to that state. Let's simply fail
2225*4882a593Smuzhiyun 		 * the save operation...
2226*4882a593Smuzhiyun 		 */
2227*4882a593Smuzhiyun 		if (ite->irq->hw)
2228*4882a593Smuzhiyun 			return -EACCES;
2229*4882a593Smuzhiyun 
2230*4882a593Smuzhiyun 		ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
2231*4882a593Smuzhiyun 		if (ret)
2232*4882a593Smuzhiyun 			return ret;
2233*4882a593Smuzhiyun 	}
2234*4882a593Smuzhiyun 	return 0;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun 
2237*4882a593Smuzhiyun /**
2238*4882a593Smuzhiyun  * vgic_its_restore_itt - restore the ITT of a device
2239*4882a593Smuzhiyun  *
2240*4882a593Smuzhiyun  * @its: its handle
2241*4882a593Smuzhiyun  * @dev: device handle
2242*4882a593Smuzhiyun  *
2243*4882a593Smuzhiyun  * Return 0 on success, < 0 on error
2244*4882a593Smuzhiyun  */
vgic_its_restore_itt(struct vgic_its * its,struct its_device * dev)2245*4882a593Smuzhiyun static int vgic_its_restore_itt(struct vgic_its *its, struct its_device *dev)
2246*4882a593Smuzhiyun {
2247*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2248*4882a593Smuzhiyun 	gpa_t base = dev->itt_addr;
2249*4882a593Smuzhiyun 	int ret;
2250*4882a593Smuzhiyun 	int ite_esz = abi->ite_esz;
2251*4882a593Smuzhiyun 	size_t max_size = BIT_ULL(dev->num_eventid_bits) * ite_esz;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	ret = scan_its_table(its, base, max_size, ite_esz, 0,
2254*4882a593Smuzhiyun 			     vgic_its_restore_ite, dev);
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	/* scan_its_table returns +1 if all ITEs are invalid */
2257*4882a593Smuzhiyun 	if (ret > 0)
2258*4882a593Smuzhiyun 		ret = 0;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 	return ret;
2261*4882a593Smuzhiyun }
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun /**
2264*4882a593Smuzhiyun  * vgic_its_save_dte - Save a device table entry at a given GPA
2265*4882a593Smuzhiyun  *
2266*4882a593Smuzhiyun  * @its: ITS handle
2267*4882a593Smuzhiyun  * @dev: ITS device
2268*4882a593Smuzhiyun  * @ptr: GPA
2269*4882a593Smuzhiyun  */
vgic_its_save_dte(struct vgic_its * its,struct its_device * dev,gpa_t ptr,int dte_esz)2270*4882a593Smuzhiyun static int vgic_its_save_dte(struct vgic_its *its, struct its_device *dev,
2271*4882a593Smuzhiyun 			     gpa_t ptr, int dte_esz)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun 	struct kvm *kvm = its->dev->kvm;
2274*4882a593Smuzhiyun 	u64 val, itt_addr_field;
2275*4882a593Smuzhiyun 	u32 next_offset;
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	itt_addr_field = dev->itt_addr >> 8;
2278*4882a593Smuzhiyun 	next_offset = compute_next_devid_offset(&its->device_list, dev);
2279*4882a593Smuzhiyun 	val = (1ULL << KVM_ITS_DTE_VALID_SHIFT |
2280*4882a593Smuzhiyun 	       ((u64)next_offset << KVM_ITS_DTE_NEXT_SHIFT) |
2281*4882a593Smuzhiyun 	       (itt_addr_field << KVM_ITS_DTE_ITTADDR_SHIFT) |
2282*4882a593Smuzhiyun 		(dev->num_eventid_bits - 1));
2283*4882a593Smuzhiyun 	val = cpu_to_le64(val);
2284*4882a593Smuzhiyun 	return kvm_write_guest_lock(kvm, ptr, &val, dte_esz);
2285*4882a593Smuzhiyun }
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun /**
2288*4882a593Smuzhiyun  * vgic_its_restore_dte - restore a device table entry
2289*4882a593Smuzhiyun  *
2290*4882a593Smuzhiyun  * @its: its handle
2291*4882a593Smuzhiyun  * @id: device id the DTE corresponds to
2292*4882a593Smuzhiyun  * @ptr: kernel VA where the 8 byte DTE is located
2293*4882a593Smuzhiyun  * @opaque: unused
2294*4882a593Smuzhiyun  *
2295*4882a593Smuzhiyun  * Return: < 0 on error, 0 if the dte is the last one, id offset to the
2296*4882a593Smuzhiyun  * next dte otherwise
2297*4882a593Smuzhiyun  */
vgic_its_restore_dte(struct vgic_its * its,u32 id,void * ptr,void * opaque)2298*4882a593Smuzhiyun static int vgic_its_restore_dte(struct vgic_its *its, u32 id,
2299*4882a593Smuzhiyun 				void *ptr, void *opaque)
2300*4882a593Smuzhiyun {
2301*4882a593Smuzhiyun 	struct its_device *dev;
2302*4882a593Smuzhiyun 	gpa_t itt_addr;
2303*4882a593Smuzhiyun 	u8 num_eventid_bits;
2304*4882a593Smuzhiyun 	u64 entry = *(u64 *)ptr;
2305*4882a593Smuzhiyun 	bool valid;
2306*4882a593Smuzhiyun 	u32 offset;
2307*4882a593Smuzhiyun 	int ret;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun 	entry = le64_to_cpu(entry);
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun 	valid = entry >> KVM_ITS_DTE_VALID_SHIFT;
2312*4882a593Smuzhiyun 	num_eventid_bits = (entry & KVM_ITS_DTE_SIZE_MASK) + 1;
2313*4882a593Smuzhiyun 	itt_addr = ((entry & KVM_ITS_DTE_ITTADDR_MASK)
2314*4882a593Smuzhiyun 			>> KVM_ITS_DTE_ITTADDR_SHIFT) << 8;
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	if (!valid)
2317*4882a593Smuzhiyun 		return 1;
2318*4882a593Smuzhiyun 
2319*4882a593Smuzhiyun 	/* dte entry is valid */
2320*4882a593Smuzhiyun 	offset = (entry & KVM_ITS_DTE_NEXT_MASK) >> KVM_ITS_DTE_NEXT_SHIFT;
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 	dev = vgic_its_alloc_device(its, id, itt_addr, num_eventid_bits);
2323*4882a593Smuzhiyun 	if (IS_ERR(dev))
2324*4882a593Smuzhiyun 		return PTR_ERR(dev);
2325*4882a593Smuzhiyun 
2326*4882a593Smuzhiyun 	ret = vgic_its_restore_itt(its, dev);
2327*4882a593Smuzhiyun 	if (ret) {
2328*4882a593Smuzhiyun 		vgic_its_free_device(its->dev->kvm, dev);
2329*4882a593Smuzhiyun 		return ret;
2330*4882a593Smuzhiyun 	}
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	return offset;
2333*4882a593Smuzhiyun }
2334*4882a593Smuzhiyun 
vgic_its_device_cmp(void * priv,struct list_head * a,struct list_head * b)2335*4882a593Smuzhiyun static int vgic_its_device_cmp(void *priv, struct list_head *a,
2336*4882a593Smuzhiyun 			       struct list_head *b)
2337*4882a593Smuzhiyun {
2338*4882a593Smuzhiyun 	struct its_device *deva = container_of(a, struct its_device, dev_list);
2339*4882a593Smuzhiyun 	struct its_device *devb = container_of(b, struct its_device, dev_list);
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	if (deva->device_id < devb->device_id)
2342*4882a593Smuzhiyun 		return -1;
2343*4882a593Smuzhiyun 	else
2344*4882a593Smuzhiyun 		return 1;
2345*4882a593Smuzhiyun }
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun /**
2348*4882a593Smuzhiyun  * vgic_its_save_device_tables - Save the device table and all ITT
2349*4882a593Smuzhiyun  * into guest RAM
2350*4882a593Smuzhiyun  *
2351*4882a593Smuzhiyun  * L1/L2 handling is hidden by vgic_its_check_id() helper which directly
2352*4882a593Smuzhiyun  * returns the GPA of the device entry
2353*4882a593Smuzhiyun  */
vgic_its_save_device_tables(struct vgic_its * its)2354*4882a593Smuzhiyun static int vgic_its_save_device_tables(struct vgic_its *its)
2355*4882a593Smuzhiyun {
2356*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2357*4882a593Smuzhiyun 	u64 baser = its->baser_device_table;
2358*4882a593Smuzhiyun 	struct its_device *dev;
2359*4882a593Smuzhiyun 	int dte_esz = abi->dte_esz;
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	if (!(baser & GITS_BASER_VALID))
2362*4882a593Smuzhiyun 		return 0;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	list_sort(NULL, &its->device_list, vgic_its_device_cmp);
2365*4882a593Smuzhiyun 
2366*4882a593Smuzhiyun 	list_for_each_entry(dev, &its->device_list, dev_list) {
2367*4882a593Smuzhiyun 		int ret;
2368*4882a593Smuzhiyun 		gpa_t eaddr;
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 		if (!vgic_its_check_id(its, baser,
2371*4882a593Smuzhiyun 				       dev->device_id, &eaddr))
2372*4882a593Smuzhiyun 			return -EINVAL;
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun 		ret = vgic_its_save_itt(its, dev);
2375*4882a593Smuzhiyun 		if (ret)
2376*4882a593Smuzhiyun 			return ret;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 		ret = vgic_its_save_dte(its, dev, eaddr, dte_esz);
2379*4882a593Smuzhiyun 		if (ret)
2380*4882a593Smuzhiyun 			return ret;
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun 	return 0;
2383*4882a593Smuzhiyun }
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun /**
2386*4882a593Smuzhiyun  * handle_l1_dte - callback used for L1 device table entries (2 stage case)
2387*4882a593Smuzhiyun  *
2388*4882a593Smuzhiyun  * @its: its handle
2389*4882a593Smuzhiyun  * @id: index of the entry in the L1 table
2390*4882a593Smuzhiyun  * @addr: kernel VA
2391*4882a593Smuzhiyun  * @opaque: unused
2392*4882a593Smuzhiyun  *
2393*4882a593Smuzhiyun  * L1 table entries are scanned by steps of 1 entry
2394*4882a593Smuzhiyun  * Return < 0 if error, 0 if last dte was found when scanning the L2
2395*4882a593Smuzhiyun  * table, +1 otherwise (meaning next L1 entry must be scanned)
2396*4882a593Smuzhiyun  */
handle_l1_dte(struct vgic_its * its,u32 id,void * addr,void * opaque)2397*4882a593Smuzhiyun static int handle_l1_dte(struct vgic_its *its, u32 id, void *addr,
2398*4882a593Smuzhiyun 			 void *opaque)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2401*4882a593Smuzhiyun 	int l2_start_id = id * (SZ_64K / abi->dte_esz);
2402*4882a593Smuzhiyun 	u64 entry = *(u64 *)addr;
2403*4882a593Smuzhiyun 	int dte_esz = abi->dte_esz;
2404*4882a593Smuzhiyun 	gpa_t gpa;
2405*4882a593Smuzhiyun 	int ret;
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	entry = le64_to_cpu(entry);
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	if (!(entry & KVM_ITS_L1E_VALID_MASK))
2410*4882a593Smuzhiyun 		return 1;
2411*4882a593Smuzhiyun 
2412*4882a593Smuzhiyun 	gpa = entry & KVM_ITS_L1E_ADDR_MASK;
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	ret = scan_its_table(its, gpa, SZ_64K, dte_esz,
2415*4882a593Smuzhiyun 			     l2_start_id, vgic_its_restore_dte, NULL);
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun 	return ret;
2418*4882a593Smuzhiyun }
2419*4882a593Smuzhiyun 
2420*4882a593Smuzhiyun /**
2421*4882a593Smuzhiyun  * vgic_its_restore_device_tables - Restore the device table and all ITT
2422*4882a593Smuzhiyun  * from guest RAM to internal data structs
2423*4882a593Smuzhiyun  */
vgic_its_restore_device_tables(struct vgic_its * its)2424*4882a593Smuzhiyun static int vgic_its_restore_device_tables(struct vgic_its *its)
2425*4882a593Smuzhiyun {
2426*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2427*4882a593Smuzhiyun 	u64 baser = its->baser_device_table;
2428*4882a593Smuzhiyun 	int l1_esz, ret;
2429*4882a593Smuzhiyun 	int l1_tbl_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2430*4882a593Smuzhiyun 	gpa_t l1_gpa;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	if (!(baser & GITS_BASER_VALID))
2433*4882a593Smuzhiyun 		return 0;
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun 	l1_gpa = GITS_BASER_ADDR_48_to_52(baser);
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 	if (baser & GITS_BASER_INDIRECT) {
2438*4882a593Smuzhiyun 		l1_esz = GITS_LVL1_ENTRY_SIZE;
2439*4882a593Smuzhiyun 		ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2440*4882a593Smuzhiyun 				     handle_l1_dte, NULL);
2441*4882a593Smuzhiyun 	} else {
2442*4882a593Smuzhiyun 		l1_esz = abi->dte_esz;
2443*4882a593Smuzhiyun 		ret = scan_its_table(its, l1_gpa, l1_tbl_size, l1_esz, 0,
2444*4882a593Smuzhiyun 				     vgic_its_restore_dte, NULL);
2445*4882a593Smuzhiyun 	}
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 	/* scan_its_table returns +1 if all entries are invalid */
2448*4882a593Smuzhiyun 	if (ret > 0)
2449*4882a593Smuzhiyun 		ret = 0;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	return ret;
2452*4882a593Smuzhiyun }
2453*4882a593Smuzhiyun 
vgic_its_save_cte(struct vgic_its * its,struct its_collection * collection,gpa_t gpa,int esz)2454*4882a593Smuzhiyun static int vgic_its_save_cte(struct vgic_its *its,
2455*4882a593Smuzhiyun 			     struct its_collection *collection,
2456*4882a593Smuzhiyun 			     gpa_t gpa, int esz)
2457*4882a593Smuzhiyun {
2458*4882a593Smuzhiyun 	u64 val;
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	val = (1ULL << KVM_ITS_CTE_VALID_SHIFT |
2461*4882a593Smuzhiyun 	       ((u64)collection->target_addr << KVM_ITS_CTE_RDBASE_SHIFT) |
2462*4882a593Smuzhiyun 	       collection->collection_id);
2463*4882a593Smuzhiyun 	val = cpu_to_le64(val);
2464*4882a593Smuzhiyun 	return kvm_write_guest_lock(its->dev->kvm, gpa, &val, esz);
2465*4882a593Smuzhiyun }
2466*4882a593Smuzhiyun 
vgic_its_restore_cte(struct vgic_its * its,gpa_t gpa,int esz)2467*4882a593Smuzhiyun static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz)
2468*4882a593Smuzhiyun {
2469*4882a593Smuzhiyun 	struct its_collection *collection;
2470*4882a593Smuzhiyun 	struct kvm *kvm = its->dev->kvm;
2471*4882a593Smuzhiyun 	u32 target_addr, coll_id;
2472*4882a593Smuzhiyun 	u64 val;
2473*4882a593Smuzhiyun 	int ret;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	BUG_ON(esz > sizeof(val));
2476*4882a593Smuzhiyun 	ret = kvm_read_guest_lock(kvm, gpa, &val, esz);
2477*4882a593Smuzhiyun 	if (ret)
2478*4882a593Smuzhiyun 		return ret;
2479*4882a593Smuzhiyun 	val = le64_to_cpu(val);
2480*4882a593Smuzhiyun 	if (!(val & KVM_ITS_CTE_VALID_MASK))
2481*4882a593Smuzhiyun 		return 0;
2482*4882a593Smuzhiyun 
2483*4882a593Smuzhiyun 	target_addr = (u32)(val >> KVM_ITS_CTE_RDBASE_SHIFT);
2484*4882a593Smuzhiyun 	coll_id = val & KVM_ITS_CTE_ICID_MASK;
2485*4882a593Smuzhiyun 
2486*4882a593Smuzhiyun 	if (target_addr != COLLECTION_NOT_MAPPED &&
2487*4882a593Smuzhiyun 	    target_addr >= atomic_read(&kvm->online_vcpus))
2488*4882a593Smuzhiyun 		return -EINVAL;
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	collection = find_collection(its, coll_id);
2491*4882a593Smuzhiyun 	if (collection)
2492*4882a593Smuzhiyun 		return -EEXIST;
2493*4882a593Smuzhiyun 	ret = vgic_its_alloc_collection(its, &collection, coll_id);
2494*4882a593Smuzhiyun 	if (ret)
2495*4882a593Smuzhiyun 		return ret;
2496*4882a593Smuzhiyun 	collection->target_addr = target_addr;
2497*4882a593Smuzhiyun 	return 1;
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun /**
2501*4882a593Smuzhiyun  * vgic_its_save_collection_table - Save the collection table into
2502*4882a593Smuzhiyun  * guest RAM
2503*4882a593Smuzhiyun  */
vgic_its_save_collection_table(struct vgic_its * its)2504*4882a593Smuzhiyun static int vgic_its_save_collection_table(struct vgic_its *its)
2505*4882a593Smuzhiyun {
2506*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2507*4882a593Smuzhiyun 	u64 baser = its->baser_coll_table;
2508*4882a593Smuzhiyun 	gpa_t gpa = GITS_BASER_ADDR_48_to_52(baser);
2509*4882a593Smuzhiyun 	struct its_collection *collection;
2510*4882a593Smuzhiyun 	u64 val;
2511*4882a593Smuzhiyun 	size_t max_size, filled = 0;
2512*4882a593Smuzhiyun 	int ret, cte_esz = abi->cte_esz;
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun 	if (!(baser & GITS_BASER_VALID))
2515*4882a593Smuzhiyun 		return 0;
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 	list_for_each_entry(collection, &its->collection_list, coll_list) {
2520*4882a593Smuzhiyun 		ret = vgic_its_save_cte(its, collection, gpa, cte_esz);
2521*4882a593Smuzhiyun 		if (ret)
2522*4882a593Smuzhiyun 			return ret;
2523*4882a593Smuzhiyun 		gpa += cte_esz;
2524*4882a593Smuzhiyun 		filled += cte_esz;
2525*4882a593Smuzhiyun 	}
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	if (filled == max_size)
2528*4882a593Smuzhiyun 		return 0;
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	/*
2531*4882a593Smuzhiyun 	 * table is not fully filled, add a last dummy element
2532*4882a593Smuzhiyun 	 * with valid bit unset
2533*4882a593Smuzhiyun 	 */
2534*4882a593Smuzhiyun 	val = 0;
2535*4882a593Smuzhiyun 	BUG_ON(cte_esz > sizeof(val));
2536*4882a593Smuzhiyun 	ret = kvm_write_guest_lock(its->dev->kvm, gpa, &val, cte_esz);
2537*4882a593Smuzhiyun 	return ret;
2538*4882a593Smuzhiyun }
2539*4882a593Smuzhiyun 
2540*4882a593Smuzhiyun /**
2541*4882a593Smuzhiyun  * vgic_its_restore_collection_table - reads the collection table
2542*4882a593Smuzhiyun  * in guest memory and restores the ITS internal state. Requires the
2543*4882a593Smuzhiyun  * BASER registers to be restored before.
2544*4882a593Smuzhiyun  */
vgic_its_restore_collection_table(struct vgic_its * its)2545*4882a593Smuzhiyun static int vgic_its_restore_collection_table(struct vgic_its *its)
2546*4882a593Smuzhiyun {
2547*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2548*4882a593Smuzhiyun 	u64 baser = its->baser_coll_table;
2549*4882a593Smuzhiyun 	int cte_esz = abi->cte_esz;
2550*4882a593Smuzhiyun 	size_t max_size, read = 0;
2551*4882a593Smuzhiyun 	gpa_t gpa;
2552*4882a593Smuzhiyun 	int ret;
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun 	if (!(baser & GITS_BASER_VALID))
2555*4882a593Smuzhiyun 		return 0;
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 	gpa = GITS_BASER_ADDR_48_to_52(baser);
2558*4882a593Smuzhiyun 
2559*4882a593Smuzhiyun 	max_size = GITS_BASER_NR_PAGES(baser) * SZ_64K;
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 	while (read < max_size) {
2562*4882a593Smuzhiyun 		ret = vgic_its_restore_cte(its, gpa, cte_esz);
2563*4882a593Smuzhiyun 		if (ret <= 0)
2564*4882a593Smuzhiyun 			break;
2565*4882a593Smuzhiyun 		gpa += cte_esz;
2566*4882a593Smuzhiyun 		read += cte_esz;
2567*4882a593Smuzhiyun 	}
2568*4882a593Smuzhiyun 
2569*4882a593Smuzhiyun 	if (ret > 0)
2570*4882a593Smuzhiyun 		return 0;
2571*4882a593Smuzhiyun 
2572*4882a593Smuzhiyun 	return ret;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun /**
2576*4882a593Smuzhiyun  * vgic_its_save_tables_v0 - Save the ITS tables into guest ARM
2577*4882a593Smuzhiyun  * according to v0 ABI
2578*4882a593Smuzhiyun  */
vgic_its_save_tables_v0(struct vgic_its * its)2579*4882a593Smuzhiyun static int vgic_its_save_tables_v0(struct vgic_its *its)
2580*4882a593Smuzhiyun {
2581*4882a593Smuzhiyun 	int ret;
2582*4882a593Smuzhiyun 
2583*4882a593Smuzhiyun 	ret = vgic_its_save_device_tables(its);
2584*4882a593Smuzhiyun 	if (ret)
2585*4882a593Smuzhiyun 		return ret;
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	return vgic_its_save_collection_table(its);
2588*4882a593Smuzhiyun }
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun /**
2591*4882a593Smuzhiyun  * vgic_its_restore_tables_v0 - Restore the ITS tables from guest RAM
2592*4882a593Smuzhiyun  * to internal data structs according to V0 ABI
2593*4882a593Smuzhiyun  *
2594*4882a593Smuzhiyun  */
vgic_its_restore_tables_v0(struct vgic_its * its)2595*4882a593Smuzhiyun static int vgic_its_restore_tables_v0(struct vgic_its *its)
2596*4882a593Smuzhiyun {
2597*4882a593Smuzhiyun 	int ret;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	ret = vgic_its_restore_collection_table(its);
2600*4882a593Smuzhiyun 	if (ret)
2601*4882a593Smuzhiyun 		return ret;
2602*4882a593Smuzhiyun 
2603*4882a593Smuzhiyun 	return vgic_its_restore_device_tables(its);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun 
vgic_its_commit_v0(struct vgic_its * its)2606*4882a593Smuzhiyun static int vgic_its_commit_v0(struct vgic_its *its)
2607*4882a593Smuzhiyun {
2608*4882a593Smuzhiyun 	const struct vgic_its_abi *abi;
2609*4882a593Smuzhiyun 
2610*4882a593Smuzhiyun 	abi = vgic_its_get_abi(its);
2611*4882a593Smuzhiyun 	its->baser_coll_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2612*4882a593Smuzhiyun 	its->baser_device_table &= ~GITS_BASER_ENTRY_SIZE_MASK;
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun 	its->baser_coll_table |= (GIC_ENCODE_SZ(abi->cte_esz, 5)
2615*4882a593Smuzhiyun 					<< GITS_BASER_ENTRY_SIZE_SHIFT);
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 	its->baser_device_table |= (GIC_ENCODE_SZ(abi->dte_esz, 5)
2618*4882a593Smuzhiyun 					<< GITS_BASER_ENTRY_SIZE_SHIFT);
2619*4882a593Smuzhiyun 	return 0;
2620*4882a593Smuzhiyun }
2621*4882a593Smuzhiyun 
vgic_its_reset(struct kvm * kvm,struct vgic_its * its)2622*4882a593Smuzhiyun static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its)
2623*4882a593Smuzhiyun {
2624*4882a593Smuzhiyun 	/* We need to keep the ABI specific field values */
2625*4882a593Smuzhiyun 	its->baser_coll_table &= ~GITS_BASER_VALID;
2626*4882a593Smuzhiyun 	its->baser_device_table &= ~GITS_BASER_VALID;
2627*4882a593Smuzhiyun 	its->cbaser = 0;
2628*4882a593Smuzhiyun 	its->creadr = 0;
2629*4882a593Smuzhiyun 	its->cwriter = 0;
2630*4882a593Smuzhiyun 	its->enabled = 0;
2631*4882a593Smuzhiyun 	vgic_its_free_device_list(kvm, its);
2632*4882a593Smuzhiyun 	vgic_its_free_collection_list(kvm, its);
2633*4882a593Smuzhiyun }
2634*4882a593Smuzhiyun 
vgic_its_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2635*4882a593Smuzhiyun static int vgic_its_has_attr(struct kvm_device *dev,
2636*4882a593Smuzhiyun 			     struct kvm_device_attr *attr)
2637*4882a593Smuzhiyun {
2638*4882a593Smuzhiyun 	switch (attr->group) {
2639*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ADDR:
2640*4882a593Smuzhiyun 		switch (attr->attr) {
2641*4882a593Smuzhiyun 		case KVM_VGIC_ITS_ADDR_TYPE:
2642*4882a593Smuzhiyun 			return 0;
2643*4882a593Smuzhiyun 		}
2644*4882a593Smuzhiyun 		break;
2645*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
2646*4882a593Smuzhiyun 		switch (attr->attr) {
2647*4882a593Smuzhiyun 		case KVM_DEV_ARM_VGIC_CTRL_INIT:
2648*4882a593Smuzhiyun 			return 0;
2649*4882a593Smuzhiyun 		case KVM_DEV_ARM_ITS_CTRL_RESET:
2650*4882a593Smuzhiyun 			return 0;
2651*4882a593Smuzhiyun 		case KVM_DEV_ARM_ITS_SAVE_TABLES:
2652*4882a593Smuzhiyun 			return 0;
2653*4882a593Smuzhiyun 		case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2654*4882a593Smuzhiyun 			return 0;
2655*4882a593Smuzhiyun 		}
2656*4882a593Smuzhiyun 		break;
2657*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS:
2658*4882a593Smuzhiyun 		return vgic_its_has_attr_regs(dev, attr);
2659*4882a593Smuzhiyun 	}
2660*4882a593Smuzhiyun 	return -ENXIO;
2661*4882a593Smuzhiyun }
2662*4882a593Smuzhiyun 
vgic_its_ctrl(struct kvm * kvm,struct vgic_its * its,u64 attr)2663*4882a593Smuzhiyun static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr)
2664*4882a593Smuzhiyun {
2665*4882a593Smuzhiyun 	const struct vgic_its_abi *abi = vgic_its_get_abi(its);
2666*4882a593Smuzhiyun 	int ret = 0;
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */
2669*4882a593Smuzhiyun 		return 0;
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun 	mutex_lock(&kvm->lock);
2672*4882a593Smuzhiyun 	mutex_lock(&its->its_lock);
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	if (!lock_all_vcpus(kvm)) {
2675*4882a593Smuzhiyun 		mutex_unlock(&its->its_lock);
2676*4882a593Smuzhiyun 		mutex_unlock(&kvm->lock);
2677*4882a593Smuzhiyun 		return -EBUSY;
2678*4882a593Smuzhiyun 	}
2679*4882a593Smuzhiyun 
2680*4882a593Smuzhiyun 	switch (attr) {
2681*4882a593Smuzhiyun 	case KVM_DEV_ARM_ITS_CTRL_RESET:
2682*4882a593Smuzhiyun 		vgic_its_reset(kvm, its);
2683*4882a593Smuzhiyun 		break;
2684*4882a593Smuzhiyun 	case KVM_DEV_ARM_ITS_SAVE_TABLES:
2685*4882a593Smuzhiyun 		ret = abi->save_tables(its);
2686*4882a593Smuzhiyun 		break;
2687*4882a593Smuzhiyun 	case KVM_DEV_ARM_ITS_RESTORE_TABLES:
2688*4882a593Smuzhiyun 		ret = abi->restore_tables(its);
2689*4882a593Smuzhiyun 		break;
2690*4882a593Smuzhiyun 	}
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 	unlock_all_vcpus(kvm);
2693*4882a593Smuzhiyun 	mutex_unlock(&its->its_lock);
2694*4882a593Smuzhiyun 	mutex_unlock(&kvm->lock);
2695*4882a593Smuzhiyun 	return ret;
2696*4882a593Smuzhiyun }
2697*4882a593Smuzhiyun 
vgic_its_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2698*4882a593Smuzhiyun static int vgic_its_set_attr(struct kvm_device *dev,
2699*4882a593Smuzhiyun 			     struct kvm_device_attr *attr)
2700*4882a593Smuzhiyun {
2701*4882a593Smuzhiyun 	struct vgic_its *its = dev->private;
2702*4882a593Smuzhiyun 	int ret;
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	switch (attr->group) {
2705*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2706*4882a593Smuzhiyun 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2707*4882a593Smuzhiyun 		unsigned long type = (unsigned long)attr->attr;
2708*4882a593Smuzhiyun 		u64 addr;
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 		if (type != KVM_VGIC_ITS_ADDR_TYPE)
2711*4882a593Smuzhiyun 			return -ENODEV;
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
2714*4882a593Smuzhiyun 			return -EFAULT;
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 		ret = vgic_check_ioaddr(dev->kvm, &its->vgic_its_base,
2717*4882a593Smuzhiyun 					addr, SZ_64K);
2718*4882a593Smuzhiyun 		if (ret)
2719*4882a593Smuzhiyun 			return ret;
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 		return vgic_register_its_iodev(dev->kvm, its, addr);
2722*4882a593Smuzhiyun 	}
2723*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_CTRL:
2724*4882a593Smuzhiyun 		return vgic_its_ctrl(dev->kvm, its, attr->attr);
2725*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2726*4882a593Smuzhiyun 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2727*4882a593Smuzhiyun 		u64 reg;
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 		if (get_user(reg, uaddr))
2730*4882a593Smuzhiyun 			return -EFAULT;
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 		return vgic_its_attr_regs_access(dev, attr, &reg, true);
2733*4882a593Smuzhiyun 	}
2734*4882a593Smuzhiyun 	}
2735*4882a593Smuzhiyun 	return -ENXIO;
2736*4882a593Smuzhiyun }
2737*4882a593Smuzhiyun 
vgic_its_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)2738*4882a593Smuzhiyun static int vgic_its_get_attr(struct kvm_device *dev,
2739*4882a593Smuzhiyun 			     struct kvm_device_attr *attr)
2740*4882a593Smuzhiyun {
2741*4882a593Smuzhiyun 	switch (attr->group) {
2742*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ADDR: {
2743*4882a593Smuzhiyun 		struct vgic_its *its = dev->private;
2744*4882a593Smuzhiyun 		u64 addr = its->vgic_its_base;
2745*4882a593Smuzhiyun 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2746*4882a593Smuzhiyun 		unsigned long type = (unsigned long)attr->attr;
2747*4882a593Smuzhiyun 
2748*4882a593Smuzhiyun 		if (type != KVM_VGIC_ITS_ADDR_TYPE)
2749*4882a593Smuzhiyun 			return -ENODEV;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
2752*4882a593Smuzhiyun 			return -EFAULT;
2753*4882a593Smuzhiyun 		break;
2754*4882a593Smuzhiyun 	}
2755*4882a593Smuzhiyun 	case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: {
2756*4882a593Smuzhiyun 		u64 __user *uaddr = (u64 __user *)(long)attr->addr;
2757*4882a593Smuzhiyun 		u64 reg;
2758*4882a593Smuzhiyun 		int ret;
2759*4882a593Smuzhiyun 
2760*4882a593Smuzhiyun 		ret = vgic_its_attr_regs_access(dev, attr, &reg, false);
2761*4882a593Smuzhiyun 		if (ret)
2762*4882a593Smuzhiyun 			return ret;
2763*4882a593Smuzhiyun 		return put_user(reg, uaddr);
2764*4882a593Smuzhiyun 	}
2765*4882a593Smuzhiyun 	default:
2766*4882a593Smuzhiyun 		return -ENXIO;
2767*4882a593Smuzhiyun 	}
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun 	return 0;
2770*4882a593Smuzhiyun }
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun static struct kvm_device_ops kvm_arm_vgic_its_ops = {
2773*4882a593Smuzhiyun 	.name = "kvm-arm-vgic-its",
2774*4882a593Smuzhiyun 	.create = vgic_its_create,
2775*4882a593Smuzhiyun 	.destroy = vgic_its_destroy,
2776*4882a593Smuzhiyun 	.set_attr = vgic_its_set_attr,
2777*4882a593Smuzhiyun 	.get_attr = vgic_its_get_attr,
2778*4882a593Smuzhiyun 	.has_attr = vgic_its_has_attr,
2779*4882a593Smuzhiyun };
2780*4882a593Smuzhiyun 
kvm_vgic_register_its_device(void)2781*4882a593Smuzhiyun int kvm_vgic_register_its_device(void)
2782*4882a593Smuzhiyun {
2783*4882a593Smuzhiyun 	return kvm_register_device_ops(&kvm_arm_vgic_its_ops,
2784*4882a593Smuzhiyun 				       KVM_DEV_TYPE_ARM_VGIC_ITS);
2785*4882a593Smuzhiyun }
2786