xref: /OK3568_Linux_fs/kernel/virt/kvm/coalesced_mmio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * KVM coalesced MMIO
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2008 Bull S.A.S.
6*4882a593Smuzhiyun  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <kvm/iodev.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kvm_host.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/kvm.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "coalesced_mmio.h"
19*4882a593Smuzhiyun 
to_mmio(struct kvm_io_device * dev)20*4882a593Smuzhiyun static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev * dev,gpa_t addr,int len)25*4882a593Smuzhiyun static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26*4882a593Smuzhiyun 				   gpa_t addr, int len)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	/* is it in a batchable area ?
29*4882a593Smuzhiyun 	 * (addr,len) is fully included in
30*4882a593Smuzhiyun 	 * (zone->addr, zone->size)
31*4882a593Smuzhiyun 	 */
32*4882a593Smuzhiyun 	if (len < 0)
33*4882a593Smuzhiyun 		return 0;
34*4882a593Smuzhiyun 	if (addr + len < addr)
35*4882a593Smuzhiyun 		return 0;
36*4882a593Smuzhiyun 	if (addr < dev->zone.addr)
37*4882a593Smuzhiyun 		return 0;
38*4882a593Smuzhiyun 	if (addr + len > dev->zone.addr + dev->zone.size)
39*4882a593Smuzhiyun 		return 0;
40*4882a593Smuzhiyun 	return 1;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev * dev,u32 last)43*4882a593Smuzhiyun static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_ring *ring;
46*4882a593Smuzhiyun 	unsigned avail;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	/* Are we able to batch it ? */
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* last is the first free entry
51*4882a593Smuzhiyun 	 * check if we don't meet the first used entry
52*4882a593Smuzhiyun 	 * there is always one unused entry in the buffer
53*4882a593Smuzhiyun 	 */
54*4882a593Smuzhiyun 	ring = dev->kvm->coalesced_mmio_ring;
55*4882a593Smuzhiyun 	avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56*4882a593Smuzhiyun 	if (avail == 0) {
57*4882a593Smuzhiyun 		/* full */
58*4882a593Smuzhiyun 		return 0;
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return 1;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
coalesced_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * this,gpa_t addr,int len,const void * val)64*4882a593Smuzhiyun static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65*4882a593Smuzhiyun 				struct kvm_io_device *this, gpa_t addr,
66*4882a593Smuzhiyun 				int len, const void *val)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70*4882a593Smuzhiyun 	__u32 insert;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	if (!coalesced_mmio_in_range(dev, addr, len))
73*4882a593Smuzhiyun 		return -EOPNOTSUPP;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	spin_lock(&dev->kvm->ring_lock);
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	insert = READ_ONCE(ring->last);
78*4882a593Smuzhiyun 	if (!coalesced_mmio_has_room(dev, insert) ||
79*4882a593Smuzhiyun 	    insert >= KVM_COALESCED_MMIO_MAX) {
80*4882a593Smuzhiyun 		spin_unlock(&dev->kvm->ring_lock);
81*4882a593Smuzhiyun 		return -EOPNOTSUPP;
82*4882a593Smuzhiyun 	}
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* copy data in first free entry of the ring */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	ring->coalesced_mmio[insert].phys_addr = addr;
87*4882a593Smuzhiyun 	ring->coalesced_mmio[insert].len = len;
88*4882a593Smuzhiyun 	memcpy(ring->coalesced_mmio[insert].data, val, len);
89*4882a593Smuzhiyun 	ring->coalesced_mmio[insert].pio = dev->zone.pio;
90*4882a593Smuzhiyun 	smp_wmb();
91*4882a593Smuzhiyun 	ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
92*4882a593Smuzhiyun 	spin_unlock(&dev->kvm->ring_lock);
93*4882a593Smuzhiyun 	return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun 
coalesced_mmio_destructor(struct kvm_io_device * this)96*4882a593Smuzhiyun static void coalesced_mmio_destructor(struct kvm_io_device *this)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	list_del(&dev->list);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	kfree(dev);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static const struct kvm_io_device_ops coalesced_mmio_ops = {
106*4882a593Smuzhiyun 	.write      = coalesced_mmio_write,
107*4882a593Smuzhiyun 	.destructor = coalesced_mmio_destructor,
108*4882a593Smuzhiyun };
109*4882a593Smuzhiyun 
kvm_coalesced_mmio_init(struct kvm * kvm)110*4882a593Smuzhiyun int kvm_coalesced_mmio_init(struct kvm *kvm)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	struct page *page;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
115*4882a593Smuzhiyun 	if (!page)
116*4882a593Smuzhiyun 		return -ENOMEM;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	kvm->coalesced_mmio_ring = page_address(page);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/*
121*4882a593Smuzhiyun 	 * We're using this spinlock to sync access to the coalesced ring.
122*4882a593Smuzhiyun 	 * The list doesn't need its own lock since device registration and
123*4882a593Smuzhiyun 	 * unregistration should only happen when kvm->slots_lock is held.
124*4882a593Smuzhiyun 	 */
125*4882a593Smuzhiyun 	spin_lock_init(&kvm->ring_lock);
126*4882a593Smuzhiyun 	INIT_LIST_HEAD(&kvm->coalesced_zones);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return 0;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
kvm_coalesced_mmio_free(struct kvm * kvm)131*4882a593Smuzhiyun void kvm_coalesced_mmio_free(struct kvm *kvm)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	if (kvm->coalesced_mmio_ring)
134*4882a593Smuzhiyun 		free_page((unsigned long)kvm->coalesced_mmio_ring);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
kvm_vm_ioctl_register_coalesced_mmio(struct kvm * kvm,struct kvm_coalesced_mmio_zone * zone)137*4882a593Smuzhiyun int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
138*4882a593Smuzhiyun 					 struct kvm_coalesced_mmio_zone *zone)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	int ret;
141*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_dev *dev;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (zone->pio != 1 && zone->pio != 0)
144*4882a593Smuzhiyun 		return -EINVAL;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
147*4882a593Smuzhiyun 		      GFP_KERNEL_ACCOUNT);
148*4882a593Smuzhiyun 	if (!dev)
149*4882a593Smuzhiyun 		return -ENOMEM;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
152*4882a593Smuzhiyun 	dev->kvm = kvm;
153*4882a593Smuzhiyun 	dev->zone = *zone;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
156*4882a593Smuzhiyun 	ret = kvm_io_bus_register_dev(kvm,
157*4882a593Smuzhiyun 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
158*4882a593Smuzhiyun 				zone->addr, zone->size, &dev->dev);
159*4882a593Smuzhiyun 	if (ret < 0)
160*4882a593Smuzhiyun 		goto out_free_dev;
161*4882a593Smuzhiyun 	list_add_tail(&dev->list, &kvm->coalesced_zones);
162*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	return 0;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun out_free_dev:
167*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
168*4882a593Smuzhiyun 	kfree(dev);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return ret;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm * kvm,struct kvm_coalesced_mmio_zone * zone)173*4882a593Smuzhiyun int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
174*4882a593Smuzhiyun 					   struct kvm_coalesced_mmio_zone *zone)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct kvm_coalesced_mmio_dev *dev, *tmp;
177*4882a593Smuzhiyun 	int r;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (zone->pio != 1 && zone->pio != 0)
180*4882a593Smuzhiyun 		return -EINVAL;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	mutex_lock(&kvm->slots_lock);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
185*4882a593Smuzhiyun 		if (zone->pio == dev->zone.pio &&
186*4882a593Smuzhiyun 		    coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
187*4882a593Smuzhiyun 			r = kvm_io_bus_unregister_dev(kvm,
188*4882a593Smuzhiyun 				zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 			/*
191*4882a593Smuzhiyun 			 * On failure, unregister destroys all devices on the
192*4882a593Smuzhiyun 			 * bus _except_ the target device, i.e. coalesced_zones
193*4882a593Smuzhiyun 			 * has been modified.  No need to restart the walk as
194*4882a593Smuzhiyun 			 * there aren't any zones left.
195*4882a593Smuzhiyun 			 */
196*4882a593Smuzhiyun 			if (r)
197*4882a593Smuzhiyun 				break;
198*4882a593Smuzhiyun 			kvm_iodevice_destructor(&dev->dev);
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	mutex_unlock(&kvm->slots_lock);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/*
205*4882a593Smuzhiyun 	 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
206*4882a593Smuzhiyun 	 * perspective, the coalesced MMIO is most definitely unregistered.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210