1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /* Virtio ring implementation.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright 2007 Rusty Russell IBM Corporation
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/virtio.h>
7*4882a593Smuzhiyun #include <linux/virtio_ring.h>
8*4882a593Smuzhiyun #include <linux/virtio_config.h>
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/hrtimer.h>
13*4882a593Smuzhiyun #include <linux/dma-mapping.h>
14*4882a593Smuzhiyun #include <xen/xen.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #ifdef DEBUG
17*4882a593Smuzhiyun /* For development, we want to crash whenever the ring is screwed. */
18*4882a593Smuzhiyun #define BAD_RING(_vq, fmt, args...) \
19*4882a593Smuzhiyun do { \
20*4882a593Smuzhiyun dev_err(&(_vq)->vq.vdev->dev, \
21*4882a593Smuzhiyun "%s:"fmt, (_vq)->vq.name, ##args); \
22*4882a593Smuzhiyun BUG(); \
23*4882a593Smuzhiyun } while (0)
24*4882a593Smuzhiyun /* Caller is supposed to guarantee no reentry. */
25*4882a593Smuzhiyun #define START_USE(_vq) \
26*4882a593Smuzhiyun do { \
27*4882a593Smuzhiyun if ((_vq)->in_use) \
28*4882a593Smuzhiyun panic("%s:in_use = %i\n", \
29*4882a593Smuzhiyun (_vq)->vq.name, (_vq)->in_use); \
30*4882a593Smuzhiyun (_vq)->in_use = __LINE__; \
31*4882a593Smuzhiyun } while (0)
32*4882a593Smuzhiyun #define END_USE(_vq) \
33*4882a593Smuzhiyun do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
34*4882a593Smuzhiyun #define LAST_ADD_TIME_UPDATE(_vq) \
35*4882a593Smuzhiyun do { \
36*4882a593Smuzhiyun ktime_t now = ktime_get(); \
37*4882a593Smuzhiyun \
38*4882a593Smuzhiyun /* No kick or get, with .1 second between? Warn. */ \
39*4882a593Smuzhiyun if ((_vq)->last_add_time_valid) \
40*4882a593Smuzhiyun WARN_ON(ktime_to_ms(ktime_sub(now, \
41*4882a593Smuzhiyun (_vq)->last_add_time)) > 100); \
42*4882a593Smuzhiyun (_vq)->last_add_time = now; \
43*4882a593Smuzhiyun (_vq)->last_add_time_valid = true; \
44*4882a593Smuzhiyun } while (0)
45*4882a593Smuzhiyun #define LAST_ADD_TIME_CHECK(_vq) \
46*4882a593Smuzhiyun do { \
47*4882a593Smuzhiyun if ((_vq)->last_add_time_valid) { \
48*4882a593Smuzhiyun WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), \
49*4882a593Smuzhiyun (_vq)->last_add_time)) > 100); \
50*4882a593Smuzhiyun } \
51*4882a593Smuzhiyun } while (0)
52*4882a593Smuzhiyun #define LAST_ADD_TIME_INVALID(_vq) \
53*4882a593Smuzhiyun ((_vq)->last_add_time_valid = false)
54*4882a593Smuzhiyun #else
55*4882a593Smuzhiyun #define BAD_RING(_vq, fmt, args...) \
56*4882a593Smuzhiyun do { \
57*4882a593Smuzhiyun dev_err(&_vq->vq.vdev->dev, \
58*4882a593Smuzhiyun "%s:"fmt, (_vq)->vq.name, ##args); \
59*4882a593Smuzhiyun (_vq)->broken = true; \
60*4882a593Smuzhiyun } while (0)
61*4882a593Smuzhiyun #define START_USE(vq)
62*4882a593Smuzhiyun #define END_USE(vq)
63*4882a593Smuzhiyun #define LAST_ADD_TIME_UPDATE(vq)
64*4882a593Smuzhiyun #define LAST_ADD_TIME_CHECK(vq)
65*4882a593Smuzhiyun #define LAST_ADD_TIME_INVALID(vq)
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun struct vring_desc_state_split {
69*4882a593Smuzhiyun void *data; /* Data for callback. */
70*4882a593Smuzhiyun struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct vring_desc_state_packed {
74*4882a593Smuzhiyun void *data; /* Data for callback. */
75*4882a593Smuzhiyun struct vring_packed_desc *indir_desc; /* Indirect descriptor, if any. */
76*4882a593Smuzhiyun u16 num; /* Descriptor list length. */
77*4882a593Smuzhiyun u16 next; /* The next desc state in a list. */
78*4882a593Smuzhiyun u16 last; /* The last desc state in a list. */
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct vring_desc_extra_packed {
82*4882a593Smuzhiyun dma_addr_t addr; /* Buffer DMA addr. */
83*4882a593Smuzhiyun u32 len; /* Buffer length. */
84*4882a593Smuzhiyun u16 flags; /* Descriptor flags. */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct vring_virtqueue {
88*4882a593Smuzhiyun struct virtqueue vq;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Is this a packed ring? */
91*4882a593Smuzhiyun bool packed_ring;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Is DMA API used? */
94*4882a593Smuzhiyun bool use_dma_api;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Can we use weak barriers? */
97*4882a593Smuzhiyun bool weak_barriers;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Other side has made a mess, don't try any more. */
100*4882a593Smuzhiyun bool broken;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Host supports indirect buffers */
103*4882a593Smuzhiyun bool indirect;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Host publishes avail event idx */
106*4882a593Smuzhiyun bool event;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* Head of free buffer list. */
109*4882a593Smuzhiyun unsigned int free_head;
110*4882a593Smuzhiyun /* Number we've added since last sync. */
111*4882a593Smuzhiyun unsigned int num_added;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Last used index we've seen. */
114*4882a593Smuzhiyun u16 last_used_idx;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun union {
117*4882a593Smuzhiyun /* Available for split ring */
118*4882a593Smuzhiyun struct {
119*4882a593Smuzhiyun /* Actual memory layout for this queue. */
120*4882a593Smuzhiyun struct vring vring;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* Last written value to avail->flags */
123*4882a593Smuzhiyun u16 avail_flags_shadow;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun * Last written value to avail->idx in
127*4882a593Smuzhiyun * guest byte order.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun u16 avail_idx_shadow;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Per-descriptor state. */
132*4882a593Smuzhiyun struct vring_desc_state_split *desc_state;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* DMA address and size information */
135*4882a593Smuzhiyun dma_addr_t queue_dma_addr;
136*4882a593Smuzhiyun size_t queue_size_in_bytes;
137*4882a593Smuzhiyun } split;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Available for packed ring */
140*4882a593Smuzhiyun struct {
141*4882a593Smuzhiyun /* Actual memory layout for this queue. */
142*4882a593Smuzhiyun struct {
143*4882a593Smuzhiyun unsigned int num;
144*4882a593Smuzhiyun struct vring_packed_desc *desc;
145*4882a593Smuzhiyun struct vring_packed_desc_event *driver;
146*4882a593Smuzhiyun struct vring_packed_desc_event *device;
147*4882a593Smuzhiyun } vring;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* Driver ring wrap counter. */
150*4882a593Smuzhiyun bool avail_wrap_counter;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Device ring wrap counter. */
153*4882a593Smuzhiyun bool used_wrap_counter;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Avail used flags. */
156*4882a593Smuzhiyun u16 avail_used_flags;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* Index of the next avail descriptor. */
159*4882a593Smuzhiyun u16 next_avail_idx;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * Last written value to driver->flags in
163*4882a593Smuzhiyun * guest byte order.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun u16 event_flags_shadow;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Per-descriptor state. */
168*4882a593Smuzhiyun struct vring_desc_state_packed *desc_state;
169*4882a593Smuzhiyun struct vring_desc_extra_packed *desc_extra;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* DMA address and size information */
172*4882a593Smuzhiyun dma_addr_t ring_dma_addr;
173*4882a593Smuzhiyun dma_addr_t driver_event_dma_addr;
174*4882a593Smuzhiyun dma_addr_t device_event_dma_addr;
175*4882a593Smuzhiyun size_t ring_size_in_bytes;
176*4882a593Smuzhiyun size_t event_size_in_bytes;
177*4882a593Smuzhiyun } packed;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* How to notify other side. FIXME: commonalize hcalls! */
181*4882a593Smuzhiyun bool (*notify)(struct virtqueue *vq);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* DMA, allocation, and size information */
184*4882a593Smuzhiyun bool we_own_ring;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun #ifdef DEBUG
187*4882a593Smuzhiyun /* They're supposed to lock for us. */
188*4882a593Smuzhiyun unsigned int in_use;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* Figure out if their kicks are too delayed. */
191*4882a593Smuzhiyun bool last_add_time_valid;
192*4882a593Smuzhiyun ktime_t last_add_time;
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /*
198*4882a593Smuzhiyun * Helpers.
199*4882a593Smuzhiyun */
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
202*4882a593Smuzhiyun
virtqueue_use_indirect(struct virtqueue * _vq,unsigned int total_sg)203*4882a593Smuzhiyun static inline bool virtqueue_use_indirect(struct virtqueue *_vq,
204*4882a593Smuzhiyun unsigned int total_sg)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /*
209*4882a593Smuzhiyun * If the host supports indirect descriptor tables, and we have multiple
210*4882a593Smuzhiyun * buffers, then go indirect. FIXME: tune this threshold
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun return (vq->indirect && total_sg > 1 && vq->vq.num_free);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun * Modern virtio devices have feature bits to specify whether they need a
217*4882a593Smuzhiyun * quirk and bypass the IOMMU. If not there, just use the DMA API.
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun * If there, the interaction between virtio and DMA API is messy.
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * On most systems with virtio, physical addresses match bus addresses,
222*4882a593Smuzhiyun * and it doesn't particularly matter whether we use the DMA API.
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * On some systems, including Xen and any system with a physical device
225*4882a593Smuzhiyun * that speaks virtio behind a physical IOMMU, we must use the DMA API
226*4882a593Smuzhiyun * for virtio DMA to work at all.
227*4882a593Smuzhiyun *
228*4882a593Smuzhiyun * On other systems, including SPARC and PPC64, virtio-pci devices are
229*4882a593Smuzhiyun * enumerated as though they are behind an IOMMU, but the virtio host
230*4882a593Smuzhiyun * ignores the IOMMU, so we must either pretend that the IOMMU isn't
231*4882a593Smuzhiyun * there or somehow map everything as the identity.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * For the time being, we preserve historic behavior and bypass the DMA
234*4882a593Smuzhiyun * API.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * TODO: install a per-device DMA ops structure that does the right thing
237*4882a593Smuzhiyun * taking into account all the above quirks, and use the DMA API
238*4882a593Smuzhiyun * unconditionally on data path.
239*4882a593Smuzhiyun */
240*4882a593Smuzhiyun
vring_use_dma_api(struct virtio_device * vdev)241*4882a593Smuzhiyun static bool vring_use_dma_api(struct virtio_device *vdev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun if (!virtio_has_dma_quirk(vdev))
244*4882a593Smuzhiyun return true;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Otherwise, we are left to guess. */
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun * In theory, it's possible to have a buggy QEMU-supposed
249*4882a593Smuzhiyun * emulated Q35 IOMMU and Xen enabled at the same time. On
250*4882a593Smuzhiyun * such a configuration, virtio has never worked and will
251*4882a593Smuzhiyun * not work without an even larger kludge. Instead, enable
252*4882a593Smuzhiyun * the DMA API if we're a Xen guest, which at least allows
253*4882a593Smuzhiyun * all of the sensible Xen configurations to work correctly.
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun if (xen_domain())
256*4882a593Smuzhiyun return true;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun return false;
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
virtio_max_dma_size(struct virtio_device * vdev)261*4882a593Smuzhiyun size_t virtio_max_dma_size(struct virtio_device *vdev)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun size_t max_segment_size = SIZE_MAX;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if (vring_use_dma_api(vdev))
266*4882a593Smuzhiyun max_segment_size = dma_max_mapping_size(vdev->dev.parent);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return max_segment_size;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtio_max_dma_size);
271*4882a593Smuzhiyun
vring_alloc_queue(struct virtio_device * vdev,size_t size,dma_addr_t * dma_handle,gfp_t flag)272*4882a593Smuzhiyun static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
273*4882a593Smuzhiyun dma_addr_t *dma_handle, gfp_t flag)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun if (vring_use_dma_api(vdev)) {
276*4882a593Smuzhiyun return dma_alloc_coherent(vdev->dev.parent, size,
277*4882a593Smuzhiyun dma_handle, flag);
278*4882a593Smuzhiyun } else {
279*4882a593Smuzhiyun void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (queue) {
282*4882a593Smuzhiyun phys_addr_t phys_addr = virt_to_phys(queue);
283*4882a593Smuzhiyun *dma_handle = (dma_addr_t)phys_addr;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /*
286*4882a593Smuzhiyun * Sanity check: make sure we dind't truncate
287*4882a593Smuzhiyun * the address. The only arches I can find that
288*4882a593Smuzhiyun * have 64-bit phys_addr_t but 32-bit dma_addr_t
289*4882a593Smuzhiyun * are certain non-highmem MIPS and x86
290*4882a593Smuzhiyun * configurations, but these configurations
291*4882a593Smuzhiyun * should never allocate physical pages above 32
292*4882a593Smuzhiyun * bits, so this is fine. Just in case, throw a
293*4882a593Smuzhiyun * warning and abort if we end up with an
294*4882a593Smuzhiyun * unrepresentable address.
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
297*4882a593Smuzhiyun free_pages_exact(queue, PAGE_ALIGN(size));
298*4882a593Smuzhiyun return NULL;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun return queue;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
vring_free_queue(struct virtio_device * vdev,size_t size,void * queue,dma_addr_t dma_handle)305*4882a593Smuzhiyun static void vring_free_queue(struct virtio_device *vdev, size_t size,
306*4882a593Smuzhiyun void *queue, dma_addr_t dma_handle)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun if (vring_use_dma_api(vdev))
309*4882a593Smuzhiyun dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
310*4882a593Smuzhiyun else
311*4882a593Smuzhiyun free_pages_exact(queue, PAGE_ALIGN(size));
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * The DMA ops on various arches are rather gnarly right now, and
316*4882a593Smuzhiyun * making all of the arch DMA ops work on the vring device itself
317*4882a593Smuzhiyun * is a mess. For now, we use the parent device for DMA ops.
318*4882a593Smuzhiyun */
vring_dma_dev(const struct vring_virtqueue * vq)319*4882a593Smuzhiyun static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun return vq->vq.vdev->dev.parent;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Map one sg entry. */
vring_map_one_sg(const struct vring_virtqueue * vq,struct scatterlist * sg,enum dma_data_direction direction)325*4882a593Smuzhiyun static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
326*4882a593Smuzhiyun struct scatterlist *sg,
327*4882a593Smuzhiyun enum dma_data_direction direction)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun if (!vq->use_dma_api)
330*4882a593Smuzhiyun return (dma_addr_t)sg_phys(sg);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * We can't use dma_map_sg, because we don't use scatterlists in
334*4882a593Smuzhiyun * the way it expects (we don't guarantee that the scatterlist
335*4882a593Smuzhiyun * will exist for the lifetime of the mapping).
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun return dma_map_page(vring_dma_dev(vq),
338*4882a593Smuzhiyun sg_page(sg), sg->offset, sg->length,
339*4882a593Smuzhiyun direction);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
vring_map_single(const struct vring_virtqueue * vq,void * cpu_addr,size_t size,enum dma_data_direction direction)342*4882a593Smuzhiyun static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
343*4882a593Smuzhiyun void *cpu_addr, size_t size,
344*4882a593Smuzhiyun enum dma_data_direction direction)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun if (!vq->use_dma_api)
347*4882a593Smuzhiyun return (dma_addr_t)virt_to_phys(cpu_addr);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return dma_map_single(vring_dma_dev(vq),
350*4882a593Smuzhiyun cpu_addr, size, direction);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
vring_mapping_error(const struct vring_virtqueue * vq,dma_addr_t addr)353*4882a593Smuzhiyun static int vring_mapping_error(const struct vring_virtqueue *vq,
354*4882a593Smuzhiyun dma_addr_t addr)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun if (!vq->use_dma_api)
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return dma_mapping_error(vring_dma_dev(vq), addr);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Split ring specific functions - *_split().
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun
vring_unmap_one_split(const struct vring_virtqueue * vq,struct vring_desc * desc)367*4882a593Smuzhiyun static void vring_unmap_one_split(const struct vring_virtqueue *vq,
368*4882a593Smuzhiyun struct vring_desc *desc)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun u16 flags;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (!vq->use_dma_api)
373*4882a593Smuzhiyun return;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (flags & VRING_DESC_F_INDIRECT) {
378*4882a593Smuzhiyun dma_unmap_single(vring_dma_dev(vq),
379*4882a593Smuzhiyun virtio64_to_cpu(vq->vq.vdev, desc->addr),
380*4882a593Smuzhiyun virtio32_to_cpu(vq->vq.vdev, desc->len),
381*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
382*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
383*4882a593Smuzhiyun } else {
384*4882a593Smuzhiyun dma_unmap_page(vring_dma_dev(vq),
385*4882a593Smuzhiyun virtio64_to_cpu(vq->vq.vdev, desc->addr),
386*4882a593Smuzhiyun virtio32_to_cpu(vq->vq.vdev, desc->len),
387*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
388*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
alloc_indirect_split(struct virtqueue * _vq,unsigned int total_sg,gfp_t gfp)392*4882a593Smuzhiyun static struct vring_desc *alloc_indirect_split(struct virtqueue *_vq,
393*4882a593Smuzhiyun unsigned int total_sg,
394*4882a593Smuzhiyun gfp_t gfp)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct vring_desc *desc;
397*4882a593Smuzhiyun unsigned int i;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * We require lowmem mappings for the descriptors because
401*4882a593Smuzhiyun * otherwise virt_to_phys will give us bogus addresses in the
402*4882a593Smuzhiyun * virtqueue.
403*4882a593Smuzhiyun */
404*4882a593Smuzhiyun gfp &= ~__GFP_HIGHMEM;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun desc = kmalloc_array(total_sg, sizeof(struct vring_desc), gfp);
407*4882a593Smuzhiyun if (!desc)
408*4882a593Smuzhiyun return NULL;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun for (i = 0; i < total_sg; i++)
411*4882a593Smuzhiyun desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
412*4882a593Smuzhiyun return desc;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
virtqueue_add_split(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)415*4882a593Smuzhiyun static inline int virtqueue_add_split(struct virtqueue *_vq,
416*4882a593Smuzhiyun struct scatterlist *sgs[],
417*4882a593Smuzhiyun unsigned int total_sg,
418*4882a593Smuzhiyun unsigned int out_sgs,
419*4882a593Smuzhiyun unsigned int in_sgs,
420*4882a593Smuzhiyun void *data,
421*4882a593Smuzhiyun void *ctx,
422*4882a593Smuzhiyun gfp_t gfp)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
425*4882a593Smuzhiyun struct scatterlist *sg;
426*4882a593Smuzhiyun struct vring_desc *desc;
427*4882a593Smuzhiyun unsigned int i, n, avail, descs_used, prev, err_idx;
428*4882a593Smuzhiyun int head;
429*4882a593Smuzhiyun bool indirect;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun START_USE(vq);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun BUG_ON(data == NULL);
434*4882a593Smuzhiyun BUG_ON(ctx && vq->indirect);
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun if (unlikely(vq->broken)) {
437*4882a593Smuzhiyun END_USE(vq);
438*4882a593Smuzhiyun return -EIO;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun LAST_ADD_TIME_UPDATE(vq);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun BUG_ON(total_sg == 0);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun head = vq->free_head;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (virtqueue_use_indirect(_vq, total_sg))
448*4882a593Smuzhiyun desc = alloc_indirect_split(_vq, total_sg, gfp);
449*4882a593Smuzhiyun else {
450*4882a593Smuzhiyun desc = NULL;
451*4882a593Smuzhiyun WARN_ON_ONCE(total_sg > vq->split.vring.num && !vq->indirect);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (desc) {
455*4882a593Smuzhiyun /* Use a single buffer which doesn't continue */
456*4882a593Smuzhiyun indirect = true;
457*4882a593Smuzhiyun /* Set up rest to use this indirect table. */
458*4882a593Smuzhiyun i = 0;
459*4882a593Smuzhiyun descs_used = 1;
460*4882a593Smuzhiyun } else {
461*4882a593Smuzhiyun indirect = false;
462*4882a593Smuzhiyun desc = vq->split.vring.desc;
463*4882a593Smuzhiyun i = head;
464*4882a593Smuzhiyun descs_used = total_sg;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (vq->vq.num_free < descs_used) {
468*4882a593Smuzhiyun pr_debug("Can't add buf len %i - avail = %i\n",
469*4882a593Smuzhiyun descs_used, vq->vq.num_free);
470*4882a593Smuzhiyun /* FIXME: for historical reasons, we force a notify here if
471*4882a593Smuzhiyun * there are outgoing parts to the buffer. Presumably the
472*4882a593Smuzhiyun * host should service the ring ASAP. */
473*4882a593Smuzhiyun if (out_sgs)
474*4882a593Smuzhiyun vq->notify(&vq->vq);
475*4882a593Smuzhiyun if (indirect)
476*4882a593Smuzhiyun kfree(desc);
477*4882a593Smuzhiyun END_USE(vq);
478*4882a593Smuzhiyun return -ENOSPC;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun for (n = 0; n < out_sgs; n++) {
482*4882a593Smuzhiyun for (sg = sgs[n]; sg; sg = sg_next(sg)) {
483*4882a593Smuzhiyun dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
484*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
485*4882a593Smuzhiyun goto unmap_release;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
488*4882a593Smuzhiyun desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
489*4882a593Smuzhiyun desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
490*4882a593Smuzhiyun prev = i;
491*4882a593Smuzhiyun i = virtio16_to_cpu(_vq->vdev, desc[i].next);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun for (; n < (out_sgs + in_sgs); n++) {
495*4882a593Smuzhiyun for (sg = sgs[n]; sg; sg = sg_next(sg)) {
496*4882a593Smuzhiyun dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
497*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
498*4882a593Smuzhiyun goto unmap_release;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
501*4882a593Smuzhiyun desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
502*4882a593Smuzhiyun desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
503*4882a593Smuzhiyun prev = i;
504*4882a593Smuzhiyun i = virtio16_to_cpu(_vq->vdev, desc[i].next);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun /* Last one doesn't continue. */
508*4882a593Smuzhiyun desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun if (indirect) {
511*4882a593Smuzhiyun /* Now that the indirect table is filled in, map it. */
512*4882a593Smuzhiyun dma_addr_t addr = vring_map_single(
513*4882a593Smuzhiyun vq, desc, total_sg * sizeof(struct vring_desc),
514*4882a593Smuzhiyun DMA_TO_DEVICE);
515*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
516*4882a593Smuzhiyun goto unmap_release;
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun vq->split.vring.desc[head].flags = cpu_to_virtio16(_vq->vdev,
519*4882a593Smuzhiyun VRING_DESC_F_INDIRECT);
520*4882a593Smuzhiyun vq->split.vring.desc[head].addr = cpu_to_virtio64(_vq->vdev,
521*4882a593Smuzhiyun addr);
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun vq->split.vring.desc[head].len = cpu_to_virtio32(_vq->vdev,
524*4882a593Smuzhiyun total_sg * sizeof(struct vring_desc));
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* We're using some buffers from the free list. */
528*4882a593Smuzhiyun vq->vq.num_free -= descs_used;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* Update free pointer */
531*4882a593Smuzhiyun if (indirect)
532*4882a593Smuzhiyun vq->free_head = virtio16_to_cpu(_vq->vdev,
533*4882a593Smuzhiyun vq->split.vring.desc[head].next);
534*4882a593Smuzhiyun else
535*4882a593Smuzhiyun vq->free_head = i;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* Store token and indirect buffer state. */
538*4882a593Smuzhiyun vq->split.desc_state[head].data = data;
539*4882a593Smuzhiyun if (indirect)
540*4882a593Smuzhiyun vq->split.desc_state[head].indir_desc = desc;
541*4882a593Smuzhiyun else
542*4882a593Smuzhiyun vq->split.desc_state[head].indir_desc = ctx;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Put entry in available array (but don't update avail->idx until they
545*4882a593Smuzhiyun * do sync). */
546*4882a593Smuzhiyun avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
547*4882a593Smuzhiyun vq->split.vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /* Descriptors and available array need to be set before we expose the
550*4882a593Smuzhiyun * new available array entries. */
551*4882a593Smuzhiyun virtio_wmb(vq->weak_barriers);
552*4882a593Smuzhiyun vq->split.avail_idx_shadow++;
553*4882a593Smuzhiyun vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
554*4882a593Smuzhiyun vq->split.avail_idx_shadow);
555*4882a593Smuzhiyun vq->num_added++;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun pr_debug("Added buffer head %i to %p\n", head, vq);
558*4882a593Smuzhiyun END_USE(vq);
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* This is very unlikely, but theoretically possible. Kick
561*4882a593Smuzhiyun * just in case. */
562*4882a593Smuzhiyun if (unlikely(vq->num_added == (1 << 16) - 1))
563*4882a593Smuzhiyun virtqueue_kick(_vq);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun return 0;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun unmap_release:
568*4882a593Smuzhiyun err_idx = i;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (indirect)
571*4882a593Smuzhiyun i = 0;
572*4882a593Smuzhiyun else
573*4882a593Smuzhiyun i = head;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun for (n = 0; n < total_sg; n++) {
576*4882a593Smuzhiyun if (i == err_idx)
577*4882a593Smuzhiyun break;
578*4882a593Smuzhiyun vring_unmap_one_split(vq, &desc[i]);
579*4882a593Smuzhiyun i = virtio16_to_cpu(_vq->vdev, desc[i].next);
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (indirect)
583*4882a593Smuzhiyun kfree(desc);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun END_USE(vq);
586*4882a593Smuzhiyun return -ENOMEM;
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun
virtqueue_kick_prepare_split(struct virtqueue * _vq)589*4882a593Smuzhiyun static bool virtqueue_kick_prepare_split(struct virtqueue *_vq)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
592*4882a593Smuzhiyun u16 new, old;
593*4882a593Smuzhiyun bool needs_kick;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun START_USE(vq);
596*4882a593Smuzhiyun /* We need to expose available array entries before checking avail
597*4882a593Smuzhiyun * event. */
598*4882a593Smuzhiyun virtio_mb(vq->weak_barriers);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun old = vq->split.avail_idx_shadow - vq->num_added;
601*4882a593Smuzhiyun new = vq->split.avail_idx_shadow;
602*4882a593Smuzhiyun vq->num_added = 0;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun LAST_ADD_TIME_CHECK(vq);
605*4882a593Smuzhiyun LAST_ADD_TIME_INVALID(vq);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun if (vq->event) {
608*4882a593Smuzhiyun needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev,
609*4882a593Smuzhiyun vring_avail_event(&vq->split.vring)),
610*4882a593Smuzhiyun new, old);
611*4882a593Smuzhiyun } else {
612*4882a593Smuzhiyun needs_kick = !(vq->split.vring.used->flags &
613*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev,
614*4882a593Smuzhiyun VRING_USED_F_NO_NOTIFY));
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun END_USE(vq);
617*4882a593Smuzhiyun return needs_kick;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
detach_buf_split(struct vring_virtqueue * vq,unsigned int head,void ** ctx)620*4882a593Smuzhiyun static void detach_buf_split(struct vring_virtqueue *vq, unsigned int head,
621*4882a593Smuzhiyun void **ctx)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun unsigned int i, j;
624*4882a593Smuzhiyun __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /* Clear data ptr. */
627*4882a593Smuzhiyun vq->split.desc_state[head].data = NULL;
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun /* Put back on free list: unmap first-level descriptors and find end */
630*4882a593Smuzhiyun i = head;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun while (vq->split.vring.desc[i].flags & nextflag) {
633*4882a593Smuzhiyun vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
634*4882a593Smuzhiyun i = virtio16_to_cpu(vq->vq.vdev, vq->split.vring.desc[i].next);
635*4882a593Smuzhiyun vq->vq.num_free++;
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun vring_unmap_one_split(vq, &vq->split.vring.desc[i]);
639*4882a593Smuzhiyun vq->split.vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev,
640*4882a593Smuzhiyun vq->free_head);
641*4882a593Smuzhiyun vq->free_head = head;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Plus final descriptor */
644*4882a593Smuzhiyun vq->vq.num_free++;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun if (vq->indirect) {
647*4882a593Smuzhiyun struct vring_desc *indir_desc =
648*4882a593Smuzhiyun vq->split.desc_state[head].indir_desc;
649*4882a593Smuzhiyun u32 len;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* Free the indirect table, if any, now that it's unmapped. */
652*4882a593Smuzhiyun if (!indir_desc)
653*4882a593Smuzhiyun return;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun len = virtio32_to_cpu(vq->vq.vdev,
656*4882a593Smuzhiyun vq->split.vring.desc[head].len);
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun BUG_ON(!(vq->split.vring.desc[head].flags &
659*4882a593Smuzhiyun cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
660*4882a593Smuzhiyun BUG_ON(len == 0 || len % sizeof(struct vring_desc));
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (j = 0; j < len / sizeof(struct vring_desc); j++)
663*4882a593Smuzhiyun vring_unmap_one_split(vq, &indir_desc[j]);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun kfree(indir_desc);
666*4882a593Smuzhiyun vq->split.desc_state[head].indir_desc = NULL;
667*4882a593Smuzhiyun } else if (ctx) {
668*4882a593Smuzhiyun *ctx = vq->split.desc_state[head].indir_desc;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
more_used_split(const struct vring_virtqueue * vq)672*4882a593Smuzhiyun static inline bool more_used_split(const struct vring_virtqueue *vq)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev,
675*4882a593Smuzhiyun vq->split.vring.used->idx);
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
virtqueue_get_buf_ctx_split(struct virtqueue * _vq,unsigned int * len,void ** ctx)678*4882a593Smuzhiyun static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
679*4882a593Smuzhiyun unsigned int *len,
680*4882a593Smuzhiyun void **ctx)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
683*4882a593Smuzhiyun void *ret;
684*4882a593Smuzhiyun unsigned int i;
685*4882a593Smuzhiyun u16 last_used;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun START_USE(vq);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (unlikely(vq->broken)) {
690*4882a593Smuzhiyun END_USE(vq);
691*4882a593Smuzhiyun return NULL;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun if (!more_used_split(vq)) {
695*4882a593Smuzhiyun pr_debug("No more buffers in queue\n");
696*4882a593Smuzhiyun END_USE(vq);
697*4882a593Smuzhiyun return NULL;
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* Only get used array entries after they have been exposed by host. */
701*4882a593Smuzhiyun virtio_rmb(vq->weak_barriers);
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun last_used = (vq->last_used_idx & (vq->split.vring.num - 1));
704*4882a593Smuzhiyun i = virtio32_to_cpu(_vq->vdev,
705*4882a593Smuzhiyun vq->split.vring.used->ring[last_used].id);
706*4882a593Smuzhiyun *len = virtio32_to_cpu(_vq->vdev,
707*4882a593Smuzhiyun vq->split.vring.used->ring[last_used].len);
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun if (unlikely(i >= vq->split.vring.num)) {
710*4882a593Smuzhiyun BAD_RING(vq, "id %u out of range\n", i);
711*4882a593Smuzhiyun return NULL;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun if (unlikely(!vq->split.desc_state[i].data)) {
714*4882a593Smuzhiyun BAD_RING(vq, "id %u is not a head!\n", i);
715*4882a593Smuzhiyun return NULL;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun /* detach_buf_split clears data, so grab it now. */
719*4882a593Smuzhiyun ret = vq->split.desc_state[i].data;
720*4882a593Smuzhiyun detach_buf_split(vq, i, ctx);
721*4882a593Smuzhiyun vq->last_used_idx++;
722*4882a593Smuzhiyun /* If we expect an interrupt for the next entry, tell host
723*4882a593Smuzhiyun * by writing event index and flush out the write before
724*4882a593Smuzhiyun * the read in the next get_buf call. */
725*4882a593Smuzhiyun if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
726*4882a593Smuzhiyun virtio_store_mb(vq->weak_barriers,
727*4882a593Smuzhiyun &vring_used_event(&vq->split.vring),
728*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun LAST_ADD_TIME_INVALID(vq);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun END_USE(vq);
733*4882a593Smuzhiyun return ret;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
virtqueue_disable_cb_split(struct virtqueue * _vq)736*4882a593Smuzhiyun static void virtqueue_disable_cb_split(struct virtqueue *_vq)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun if (!(vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
741*4882a593Smuzhiyun vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
742*4882a593Smuzhiyun if (!vq->event)
743*4882a593Smuzhiyun vq->split.vring.avail->flags =
744*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev,
745*4882a593Smuzhiyun vq->split.avail_flags_shadow);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun }
748*4882a593Smuzhiyun
virtqueue_enable_cb_prepare_split(struct virtqueue * _vq)749*4882a593Smuzhiyun static unsigned virtqueue_enable_cb_prepare_split(struct virtqueue *_vq)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
752*4882a593Smuzhiyun u16 last_used_idx;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun START_USE(vq);
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun /* We optimistically turn back on interrupts, then check if there was
757*4882a593Smuzhiyun * more to do. */
758*4882a593Smuzhiyun /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
759*4882a593Smuzhiyun * either clear the flags bit or point the event index at the next
760*4882a593Smuzhiyun * entry. Always do both to keep code simple. */
761*4882a593Smuzhiyun if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
762*4882a593Smuzhiyun vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
763*4882a593Smuzhiyun if (!vq->event)
764*4882a593Smuzhiyun vq->split.vring.avail->flags =
765*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev,
766*4882a593Smuzhiyun vq->split.avail_flags_shadow);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun vring_used_event(&vq->split.vring) = cpu_to_virtio16(_vq->vdev,
769*4882a593Smuzhiyun last_used_idx = vq->last_used_idx);
770*4882a593Smuzhiyun END_USE(vq);
771*4882a593Smuzhiyun return last_used_idx;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
virtqueue_poll_split(struct virtqueue * _vq,unsigned last_used_idx)774*4882a593Smuzhiyun static bool virtqueue_poll_split(struct virtqueue *_vq, unsigned last_used_idx)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev,
779*4882a593Smuzhiyun vq->split.vring.used->idx);
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
virtqueue_enable_cb_delayed_split(struct virtqueue * _vq)782*4882a593Smuzhiyun static bool virtqueue_enable_cb_delayed_split(struct virtqueue *_vq)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
785*4882a593Smuzhiyun u16 bufs;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun START_USE(vq);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* We optimistically turn back on interrupts, then check if there was
790*4882a593Smuzhiyun * more to do. */
791*4882a593Smuzhiyun /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
792*4882a593Smuzhiyun * either clear the flags bit or point the event index at the next
793*4882a593Smuzhiyun * entry. Always update the event index to keep code simple. */
794*4882a593Smuzhiyun if (vq->split.avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
795*4882a593Smuzhiyun vq->split.avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
796*4882a593Smuzhiyun if (!vq->event)
797*4882a593Smuzhiyun vq->split.vring.avail->flags =
798*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev,
799*4882a593Smuzhiyun vq->split.avail_flags_shadow);
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun /* TODO: tune this threshold */
802*4882a593Smuzhiyun bufs = (u16)(vq->split.avail_idx_shadow - vq->last_used_idx) * 3 / 4;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun virtio_store_mb(vq->weak_barriers,
805*4882a593Smuzhiyun &vring_used_event(&vq->split.vring),
806*4882a593Smuzhiyun cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->split.vring.used->idx)
809*4882a593Smuzhiyun - vq->last_used_idx) > bufs)) {
810*4882a593Smuzhiyun END_USE(vq);
811*4882a593Smuzhiyun return false;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun END_USE(vq);
815*4882a593Smuzhiyun return true;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
virtqueue_detach_unused_buf_split(struct virtqueue * _vq)818*4882a593Smuzhiyun static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
819*4882a593Smuzhiyun {
820*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
821*4882a593Smuzhiyun unsigned int i;
822*4882a593Smuzhiyun void *buf;
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun START_USE(vq);
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun for (i = 0; i < vq->split.vring.num; i++) {
827*4882a593Smuzhiyun if (!vq->split.desc_state[i].data)
828*4882a593Smuzhiyun continue;
829*4882a593Smuzhiyun /* detach_buf_split clears data, so grab it now. */
830*4882a593Smuzhiyun buf = vq->split.desc_state[i].data;
831*4882a593Smuzhiyun detach_buf_split(vq, i, NULL);
832*4882a593Smuzhiyun vq->split.avail_idx_shadow--;
833*4882a593Smuzhiyun vq->split.vring.avail->idx = cpu_to_virtio16(_vq->vdev,
834*4882a593Smuzhiyun vq->split.avail_idx_shadow);
835*4882a593Smuzhiyun END_USE(vq);
836*4882a593Smuzhiyun return buf;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun /* That should have freed everything. */
839*4882a593Smuzhiyun BUG_ON(vq->vq.num_free != vq->split.vring.num);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun END_USE(vq);
842*4882a593Smuzhiyun return NULL;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
vring_create_virtqueue_split(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)845*4882a593Smuzhiyun static struct virtqueue *vring_create_virtqueue_split(
846*4882a593Smuzhiyun unsigned int index,
847*4882a593Smuzhiyun unsigned int num,
848*4882a593Smuzhiyun unsigned int vring_align,
849*4882a593Smuzhiyun struct virtio_device *vdev,
850*4882a593Smuzhiyun bool weak_barriers,
851*4882a593Smuzhiyun bool may_reduce_num,
852*4882a593Smuzhiyun bool context,
853*4882a593Smuzhiyun bool (*notify)(struct virtqueue *),
854*4882a593Smuzhiyun void (*callback)(struct virtqueue *),
855*4882a593Smuzhiyun const char *name)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun struct virtqueue *vq;
858*4882a593Smuzhiyun void *queue = NULL;
859*4882a593Smuzhiyun dma_addr_t dma_addr;
860*4882a593Smuzhiyun size_t queue_size_in_bytes;
861*4882a593Smuzhiyun struct vring vring;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* We assume num is a power of 2. */
864*4882a593Smuzhiyun if (num & (num - 1)) {
865*4882a593Smuzhiyun dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
866*4882a593Smuzhiyun return NULL;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun /* TODO: allocate each queue chunk individually */
870*4882a593Smuzhiyun for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
871*4882a593Smuzhiyun queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
872*4882a593Smuzhiyun &dma_addr,
873*4882a593Smuzhiyun GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
874*4882a593Smuzhiyun if (queue)
875*4882a593Smuzhiyun break;
876*4882a593Smuzhiyun if (!may_reduce_num)
877*4882a593Smuzhiyun return NULL;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun if (!num)
881*4882a593Smuzhiyun return NULL;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun if (!queue) {
884*4882a593Smuzhiyun /* Try to get a single page. You are my only hope! */
885*4882a593Smuzhiyun queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
886*4882a593Smuzhiyun &dma_addr, GFP_KERNEL|__GFP_ZERO);
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun if (!queue)
889*4882a593Smuzhiyun return NULL;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun queue_size_in_bytes = vring_size(num, vring_align);
892*4882a593Smuzhiyun vring_init(&vring, num, queue, vring_align);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
895*4882a593Smuzhiyun notify, callback, name);
896*4882a593Smuzhiyun if (!vq) {
897*4882a593Smuzhiyun vring_free_queue(vdev, queue_size_in_bytes, queue,
898*4882a593Smuzhiyun dma_addr);
899*4882a593Smuzhiyun return NULL;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun to_vvq(vq)->split.queue_dma_addr = dma_addr;
903*4882a593Smuzhiyun to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
904*4882a593Smuzhiyun to_vvq(vq)->we_own_ring = true;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun return vq;
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun /*
911*4882a593Smuzhiyun * Packed ring specific functions - *_packed().
912*4882a593Smuzhiyun */
913*4882a593Smuzhiyun
vring_unmap_state_packed(const struct vring_virtqueue * vq,struct vring_desc_extra_packed * state)914*4882a593Smuzhiyun static void vring_unmap_state_packed(const struct vring_virtqueue *vq,
915*4882a593Smuzhiyun struct vring_desc_extra_packed *state)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun u16 flags;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (!vq->use_dma_api)
920*4882a593Smuzhiyun return;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun flags = state->flags;
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (flags & VRING_DESC_F_INDIRECT) {
925*4882a593Smuzhiyun dma_unmap_single(vring_dma_dev(vq),
926*4882a593Smuzhiyun state->addr, state->len,
927*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
928*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
929*4882a593Smuzhiyun } else {
930*4882a593Smuzhiyun dma_unmap_page(vring_dma_dev(vq),
931*4882a593Smuzhiyun state->addr, state->len,
932*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
933*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
vring_unmap_desc_packed(const struct vring_virtqueue * vq,struct vring_packed_desc * desc)937*4882a593Smuzhiyun static void vring_unmap_desc_packed(const struct vring_virtqueue *vq,
938*4882a593Smuzhiyun struct vring_packed_desc *desc)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun u16 flags;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (!vq->use_dma_api)
943*4882a593Smuzhiyun return;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun flags = le16_to_cpu(desc->flags);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun if (flags & VRING_DESC_F_INDIRECT) {
948*4882a593Smuzhiyun dma_unmap_single(vring_dma_dev(vq),
949*4882a593Smuzhiyun le64_to_cpu(desc->addr),
950*4882a593Smuzhiyun le32_to_cpu(desc->len),
951*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
952*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
953*4882a593Smuzhiyun } else {
954*4882a593Smuzhiyun dma_unmap_page(vring_dma_dev(vq),
955*4882a593Smuzhiyun le64_to_cpu(desc->addr),
956*4882a593Smuzhiyun le32_to_cpu(desc->len),
957*4882a593Smuzhiyun (flags & VRING_DESC_F_WRITE) ?
958*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
alloc_indirect_packed(unsigned int total_sg,gfp_t gfp)962*4882a593Smuzhiyun static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
963*4882a593Smuzhiyun gfp_t gfp)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun struct vring_packed_desc *desc;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun /*
968*4882a593Smuzhiyun * We require lowmem mappings for the descriptors because
969*4882a593Smuzhiyun * otherwise virt_to_phys will give us bogus addresses in the
970*4882a593Smuzhiyun * virtqueue.
971*4882a593Smuzhiyun */
972*4882a593Smuzhiyun gfp &= ~__GFP_HIGHMEM;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun desc = kmalloc_array(total_sg, sizeof(struct vring_packed_desc), gfp);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun return desc;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
virtqueue_add_indirect_packed(struct vring_virtqueue * vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)979*4882a593Smuzhiyun static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
980*4882a593Smuzhiyun struct scatterlist *sgs[],
981*4882a593Smuzhiyun unsigned int total_sg,
982*4882a593Smuzhiyun unsigned int out_sgs,
983*4882a593Smuzhiyun unsigned int in_sgs,
984*4882a593Smuzhiyun void *data,
985*4882a593Smuzhiyun gfp_t gfp)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun struct vring_packed_desc *desc;
988*4882a593Smuzhiyun struct scatterlist *sg;
989*4882a593Smuzhiyun unsigned int i, n, err_idx;
990*4882a593Smuzhiyun u16 head, id;
991*4882a593Smuzhiyun dma_addr_t addr;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun head = vq->packed.next_avail_idx;
994*4882a593Smuzhiyun desc = alloc_indirect_packed(total_sg, gfp);
995*4882a593Smuzhiyun if (!desc)
996*4882a593Smuzhiyun return -ENOMEM;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (unlikely(vq->vq.num_free < 1)) {
999*4882a593Smuzhiyun pr_debug("Can't add buf len 1 - avail = 0\n");
1000*4882a593Smuzhiyun kfree(desc);
1001*4882a593Smuzhiyun END_USE(vq);
1002*4882a593Smuzhiyun return -ENOSPC;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
1005*4882a593Smuzhiyun i = 0;
1006*4882a593Smuzhiyun id = vq->free_head;
1007*4882a593Smuzhiyun BUG_ON(id == vq->packed.vring.num);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun for (n = 0; n < out_sgs + in_sgs; n++) {
1010*4882a593Smuzhiyun for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1011*4882a593Smuzhiyun addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1012*4882a593Smuzhiyun DMA_TO_DEVICE : DMA_FROM_DEVICE);
1013*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
1014*4882a593Smuzhiyun goto unmap_release;
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun desc[i].flags = cpu_to_le16(n < out_sgs ?
1017*4882a593Smuzhiyun 0 : VRING_DESC_F_WRITE);
1018*4882a593Smuzhiyun desc[i].addr = cpu_to_le64(addr);
1019*4882a593Smuzhiyun desc[i].len = cpu_to_le32(sg->length);
1020*4882a593Smuzhiyun i++;
1021*4882a593Smuzhiyun }
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* Now that the indirect table is filled in, map it. */
1025*4882a593Smuzhiyun addr = vring_map_single(vq, desc,
1026*4882a593Smuzhiyun total_sg * sizeof(struct vring_packed_desc),
1027*4882a593Smuzhiyun DMA_TO_DEVICE);
1028*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
1029*4882a593Smuzhiyun goto unmap_release;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun vq->packed.vring.desc[head].addr = cpu_to_le64(addr);
1032*4882a593Smuzhiyun vq->packed.vring.desc[head].len = cpu_to_le32(total_sg *
1033*4882a593Smuzhiyun sizeof(struct vring_packed_desc));
1034*4882a593Smuzhiyun vq->packed.vring.desc[head].id = cpu_to_le16(id);
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun if (vq->use_dma_api) {
1037*4882a593Smuzhiyun vq->packed.desc_extra[id].addr = addr;
1038*4882a593Smuzhiyun vq->packed.desc_extra[id].len = total_sg *
1039*4882a593Smuzhiyun sizeof(struct vring_packed_desc);
1040*4882a593Smuzhiyun vq->packed.desc_extra[id].flags = VRING_DESC_F_INDIRECT |
1041*4882a593Smuzhiyun vq->packed.avail_used_flags;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /*
1045*4882a593Smuzhiyun * A driver MUST NOT make the first descriptor in the list
1046*4882a593Smuzhiyun * available before all subsequent descriptors comprising
1047*4882a593Smuzhiyun * the list are made available.
1048*4882a593Smuzhiyun */
1049*4882a593Smuzhiyun virtio_wmb(vq->weak_barriers);
1050*4882a593Smuzhiyun vq->packed.vring.desc[head].flags = cpu_to_le16(VRING_DESC_F_INDIRECT |
1051*4882a593Smuzhiyun vq->packed.avail_used_flags);
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun /* We're using some buffers from the free list. */
1054*4882a593Smuzhiyun vq->vq.num_free -= 1;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* Update free pointer */
1057*4882a593Smuzhiyun n = head + 1;
1058*4882a593Smuzhiyun if (n >= vq->packed.vring.num) {
1059*4882a593Smuzhiyun n = 0;
1060*4882a593Smuzhiyun vq->packed.avail_wrap_counter ^= 1;
1061*4882a593Smuzhiyun vq->packed.avail_used_flags ^=
1062*4882a593Smuzhiyun 1 << VRING_PACKED_DESC_F_AVAIL |
1063*4882a593Smuzhiyun 1 << VRING_PACKED_DESC_F_USED;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun vq->packed.next_avail_idx = n;
1066*4882a593Smuzhiyun vq->free_head = vq->packed.desc_state[id].next;
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* Store token and indirect buffer state. */
1069*4882a593Smuzhiyun vq->packed.desc_state[id].num = 1;
1070*4882a593Smuzhiyun vq->packed.desc_state[id].data = data;
1071*4882a593Smuzhiyun vq->packed.desc_state[id].indir_desc = desc;
1072*4882a593Smuzhiyun vq->packed.desc_state[id].last = id;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun vq->num_added += 1;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun pr_debug("Added buffer head %i to %p\n", head, vq);
1077*4882a593Smuzhiyun END_USE(vq);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun return 0;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun unmap_release:
1082*4882a593Smuzhiyun err_idx = i;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun for (i = 0; i < err_idx; i++)
1085*4882a593Smuzhiyun vring_unmap_desc_packed(vq, &desc[i]);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun kfree(desc);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun END_USE(vq);
1090*4882a593Smuzhiyun return -ENOMEM;
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
virtqueue_add_packed(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)1093*4882a593Smuzhiyun static inline int virtqueue_add_packed(struct virtqueue *_vq,
1094*4882a593Smuzhiyun struct scatterlist *sgs[],
1095*4882a593Smuzhiyun unsigned int total_sg,
1096*4882a593Smuzhiyun unsigned int out_sgs,
1097*4882a593Smuzhiyun unsigned int in_sgs,
1098*4882a593Smuzhiyun void *data,
1099*4882a593Smuzhiyun void *ctx,
1100*4882a593Smuzhiyun gfp_t gfp)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1103*4882a593Smuzhiyun struct vring_packed_desc *desc;
1104*4882a593Smuzhiyun struct scatterlist *sg;
1105*4882a593Smuzhiyun unsigned int i, n, c, descs_used, err_idx;
1106*4882a593Smuzhiyun __le16 head_flags, flags;
1107*4882a593Smuzhiyun u16 head, id, prev, curr, avail_used_flags;
1108*4882a593Smuzhiyun int err;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun START_USE(vq);
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun BUG_ON(data == NULL);
1113*4882a593Smuzhiyun BUG_ON(ctx && vq->indirect);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (unlikely(vq->broken)) {
1116*4882a593Smuzhiyun END_USE(vq);
1117*4882a593Smuzhiyun return -EIO;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun LAST_ADD_TIME_UPDATE(vq);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun BUG_ON(total_sg == 0);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if (virtqueue_use_indirect(_vq, total_sg)) {
1125*4882a593Smuzhiyun err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
1126*4882a593Smuzhiyun in_sgs, data, gfp);
1127*4882a593Smuzhiyun if (err != -ENOMEM) {
1128*4882a593Smuzhiyun END_USE(vq);
1129*4882a593Smuzhiyun return err;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun /* fall back on direct */
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun head = vq->packed.next_avail_idx;
1136*4882a593Smuzhiyun avail_used_flags = vq->packed.avail_used_flags;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun desc = vq->packed.vring.desc;
1141*4882a593Smuzhiyun i = head;
1142*4882a593Smuzhiyun descs_used = total_sg;
1143*4882a593Smuzhiyun
1144*4882a593Smuzhiyun if (unlikely(vq->vq.num_free < descs_used)) {
1145*4882a593Smuzhiyun pr_debug("Can't add buf len %i - avail = %i\n",
1146*4882a593Smuzhiyun descs_used, vq->vq.num_free);
1147*4882a593Smuzhiyun END_USE(vq);
1148*4882a593Smuzhiyun return -ENOSPC;
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun id = vq->free_head;
1152*4882a593Smuzhiyun BUG_ON(id == vq->packed.vring.num);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun curr = id;
1155*4882a593Smuzhiyun c = 0;
1156*4882a593Smuzhiyun for (n = 0; n < out_sgs + in_sgs; n++) {
1157*4882a593Smuzhiyun for (sg = sgs[n]; sg; sg = sg_next(sg)) {
1158*4882a593Smuzhiyun dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
1159*4882a593Smuzhiyun DMA_TO_DEVICE : DMA_FROM_DEVICE);
1160*4882a593Smuzhiyun if (vring_mapping_error(vq, addr))
1161*4882a593Smuzhiyun goto unmap_release;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun flags = cpu_to_le16(vq->packed.avail_used_flags |
1164*4882a593Smuzhiyun (++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
1165*4882a593Smuzhiyun (n < out_sgs ? 0 : VRING_DESC_F_WRITE));
1166*4882a593Smuzhiyun if (i == head)
1167*4882a593Smuzhiyun head_flags = flags;
1168*4882a593Smuzhiyun else
1169*4882a593Smuzhiyun desc[i].flags = flags;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun desc[i].addr = cpu_to_le64(addr);
1172*4882a593Smuzhiyun desc[i].len = cpu_to_le32(sg->length);
1173*4882a593Smuzhiyun desc[i].id = cpu_to_le16(id);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun if (unlikely(vq->use_dma_api)) {
1176*4882a593Smuzhiyun vq->packed.desc_extra[curr].addr = addr;
1177*4882a593Smuzhiyun vq->packed.desc_extra[curr].len = sg->length;
1178*4882a593Smuzhiyun vq->packed.desc_extra[curr].flags =
1179*4882a593Smuzhiyun le16_to_cpu(flags);
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun prev = curr;
1182*4882a593Smuzhiyun curr = vq->packed.desc_state[curr].next;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun if ((unlikely(++i >= vq->packed.vring.num))) {
1185*4882a593Smuzhiyun i = 0;
1186*4882a593Smuzhiyun vq->packed.avail_used_flags ^=
1187*4882a593Smuzhiyun 1 << VRING_PACKED_DESC_F_AVAIL |
1188*4882a593Smuzhiyun 1 << VRING_PACKED_DESC_F_USED;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun if (i < head)
1194*4882a593Smuzhiyun vq->packed.avail_wrap_counter ^= 1;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun /* We're using some buffers from the free list. */
1197*4882a593Smuzhiyun vq->vq.num_free -= descs_used;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun /* Update free pointer */
1200*4882a593Smuzhiyun vq->packed.next_avail_idx = i;
1201*4882a593Smuzhiyun vq->free_head = curr;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /* Store token. */
1204*4882a593Smuzhiyun vq->packed.desc_state[id].num = descs_used;
1205*4882a593Smuzhiyun vq->packed.desc_state[id].data = data;
1206*4882a593Smuzhiyun vq->packed.desc_state[id].indir_desc = ctx;
1207*4882a593Smuzhiyun vq->packed.desc_state[id].last = prev;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /*
1210*4882a593Smuzhiyun * A driver MUST NOT make the first descriptor in the list
1211*4882a593Smuzhiyun * available before all subsequent descriptors comprising
1212*4882a593Smuzhiyun * the list are made available.
1213*4882a593Smuzhiyun */
1214*4882a593Smuzhiyun virtio_wmb(vq->weak_barriers);
1215*4882a593Smuzhiyun vq->packed.vring.desc[head].flags = head_flags;
1216*4882a593Smuzhiyun vq->num_added += descs_used;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun pr_debug("Added buffer head %i to %p\n", head, vq);
1219*4882a593Smuzhiyun END_USE(vq);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun return 0;
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun unmap_release:
1224*4882a593Smuzhiyun err_idx = i;
1225*4882a593Smuzhiyun i = head;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun vq->packed.avail_used_flags = avail_used_flags;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun for (n = 0; n < total_sg; n++) {
1230*4882a593Smuzhiyun if (i == err_idx)
1231*4882a593Smuzhiyun break;
1232*4882a593Smuzhiyun vring_unmap_desc_packed(vq, &desc[i]);
1233*4882a593Smuzhiyun i++;
1234*4882a593Smuzhiyun if (i >= vq->packed.vring.num)
1235*4882a593Smuzhiyun i = 0;
1236*4882a593Smuzhiyun }
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun END_USE(vq);
1239*4882a593Smuzhiyun return -EIO;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
virtqueue_kick_prepare_packed(struct virtqueue * _vq)1242*4882a593Smuzhiyun static bool virtqueue_kick_prepare_packed(struct virtqueue *_vq)
1243*4882a593Smuzhiyun {
1244*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1245*4882a593Smuzhiyun u16 new, old, off_wrap, flags, wrap_counter, event_idx;
1246*4882a593Smuzhiyun bool needs_kick;
1247*4882a593Smuzhiyun union {
1248*4882a593Smuzhiyun struct {
1249*4882a593Smuzhiyun __le16 off_wrap;
1250*4882a593Smuzhiyun __le16 flags;
1251*4882a593Smuzhiyun };
1252*4882a593Smuzhiyun u32 u32;
1253*4882a593Smuzhiyun } snapshot;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun START_USE(vq);
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun /*
1258*4882a593Smuzhiyun * We need to expose the new flags value before checking notification
1259*4882a593Smuzhiyun * suppressions.
1260*4882a593Smuzhiyun */
1261*4882a593Smuzhiyun virtio_mb(vq->weak_barriers);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun old = vq->packed.next_avail_idx - vq->num_added;
1264*4882a593Smuzhiyun new = vq->packed.next_avail_idx;
1265*4882a593Smuzhiyun vq->num_added = 0;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun snapshot.u32 = *(u32 *)vq->packed.vring.device;
1268*4882a593Smuzhiyun flags = le16_to_cpu(snapshot.flags);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun LAST_ADD_TIME_CHECK(vq);
1271*4882a593Smuzhiyun LAST_ADD_TIME_INVALID(vq);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun if (flags != VRING_PACKED_EVENT_FLAG_DESC) {
1274*4882a593Smuzhiyun needs_kick = (flags != VRING_PACKED_EVENT_FLAG_DISABLE);
1275*4882a593Smuzhiyun goto out;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun off_wrap = le16_to_cpu(snapshot.off_wrap);
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1281*4882a593Smuzhiyun event_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1282*4882a593Smuzhiyun if (wrap_counter != vq->packed.avail_wrap_counter)
1283*4882a593Smuzhiyun event_idx -= vq->packed.vring.num;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun needs_kick = vring_need_event(event_idx, new, old);
1286*4882a593Smuzhiyun out:
1287*4882a593Smuzhiyun END_USE(vq);
1288*4882a593Smuzhiyun return needs_kick;
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun
detach_buf_packed(struct vring_virtqueue * vq,unsigned int id,void ** ctx)1291*4882a593Smuzhiyun static void detach_buf_packed(struct vring_virtqueue *vq,
1292*4882a593Smuzhiyun unsigned int id, void **ctx)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun struct vring_desc_state_packed *state = NULL;
1295*4882a593Smuzhiyun struct vring_packed_desc *desc;
1296*4882a593Smuzhiyun unsigned int i, curr;
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun state = &vq->packed.desc_state[id];
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun /* Clear data ptr. */
1301*4882a593Smuzhiyun state->data = NULL;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun vq->packed.desc_state[state->last].next = vq->free_head;
1304*4882a593Smuzhiyun vq->free_head = id;
1305*4882a593Smuzhiyun vq->vq.num_free += state->num;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun if (unlikely(vq->use_dma_api)) {
1308*4882a593Smuzhiyun curr = id;
1309*4882a593Smuzhiyun for (i = 0; i < state->num; i++) {
1310*4882a593Smuzhiyun vring_unmap_state_packed(vq,
1311*4882a593Smuzhiyun &vq->packed.desc_extra[curr]);
1312*4882a593Smuzhiyun curr = vq->packed.desc_state[curr].next;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun if (vq->indirect) {
1317*4882a593Smuzhiyun u32 len;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* Free the indirect table, if any, now that it's unmapped. */
1320*4882a593Smuzhiyun desc = state->indir_desc;
1321*4882a593Smuzhiyun if (!desc)
1322*4882a593Smuzhiyun return;
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun if (vq->use_dma_api) {
1325*4882a593Smuzhiyun len = vq->packed.desc_extra[id].len;
1326*4882a593Smuzhiyun for (i = 0; i < len / sizeof(struct vring_packed_desc);
1327*4882a593Smuzhiyun i++)
1328*4882a593Smuzhiyun vring_unmap_desc_packed(vq, &desc[i]);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun kfree(desc);
1331*4882a593Smuzhiyun state->indir_desc = NULL;
1332*4882a593Smuzhiyun } else if (ctx) {
1333*4882a593Smuzhiyun *ctx = state->indir_desc;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun
is_used_desc_packed(const struct vring_virtqueue * vq,u16 idx,bool used_wrap_counter)1337*4882a593Smuzhiyun static inline bool is_used_desc_packed(const struct vring_virtqueue *vq,
1338*4882a593Smuzhiyun u16 idx, bool used_wrap_counter)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun bool avail, used;
1341*4882a593Smuzhiyun u16 flags;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun flags = le16_to_cpu(vq->packed.vring.desc[idx].flags);
1344*4882a593Smuzhiyun avail = !!(flags & (1 << VRING_PACKED_DESC_F_AVAIL));
1345*4882a593Smuzhiyun used = !!(flags & (1 << VRING_PACKED_DESC_F_USED));
1346*4882a593Smuzhiyun
1347*4882a593Smuzhiyun return avail == used && used == used_wrap_counter;
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun
more_used_packed(const struct vring_virtqueue * vq)1350*4882a593Smuzhiyun static inline bool more_used_packed(const struct vring_virtqueue *vq)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun return is_used_desc_packed(vq, vq->last_used_idx,
1353*4882a593Smuzhiyun vq->packed.used_wrap_counter);
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
virtqueue_get_buf_ctx_packed(struct virtqueue * _vq,unsigned int * len,void ** ctx)1356*4882a593Smuzhiyun static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
1357*4882a593Smuzhiyun unsigned int *len,
1358*4882a593Smuzhiyun void **ctx)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1361*4882a593Smuzhiyun u16 last_used, id;
1362*4882a593Smuzhiyun void *ret;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun START_USE(vq);
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun if (unlikely(vq->broken)) {
1367*4882a593Smuzhiyun END_USE(vq);
1368*4882a593Smuzhiyun return NULL;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun if (!more_used_packed(vq)) {
1372*4882a593Smuzhiyun pr_debug("No more buffers in queue\n");
1373*4882a593Smuzhiyun END_USE(vq);
1374*4882a593Smuzhiyun return NULL;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun /* Only get used elements after they have been exposed by host. */
1378*4882a593Smuzhiyun virtio_rmb(vq->weak_barriers);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun last_used = vq->last_used_idx;
1381*4882a593Smuzhiyun id = le16_to_cpu(vq->packed.vring.desc[last_used].id);
1382*4882a593Smuzhiyun *len = le32_to_cpu(vq->packed.vring.desc[last_used].len);
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun if (unlikely(id >= vq->packed.vring.num)) {
1385*4882a593Smuzhiyun BAD_RING(vq, "id %u out of range\n", id);
1386*4882a593Smuzhiyun return NULL;
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun if (unlikely(!vq->packed.desc_state[id].data)) {
1389*4882a593Smuzhiyun BAD_RING(vq, "id %u is not a head!\n", id);
1390*4882a593Smuzhiyun return NULL;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun /* detach_buf_packed clears data, so grab it now. */
1394*4882a593Smuzhiyun ret = vq->packed.desc_state[id].data;
1395*4882a593Smuzhiyun detach_buf_packed(vq, id, ctx);
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun vq->last_used_idx += vq->packed.desc_state[id].num;
1398*4882a593Smuzhiyun if (unlikely(vq->last_used_idx >= vq->packed.vring.num)) {
1399*4882a593Smuzhiyun vq->last_used_idx -= vq->packed.vring.num;
1400*4882a593Smuzhiyun vq->packed.used_wrap_counter ^= 1;
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun /*
1404*4882a593Smuzhiyun * If we expect an interrupt for the next entry, tell host
1405*4882a593Smuzhiyun * by writing event index and flush out the write before
1406*4882a593Smuzhiyun * the read in the next get_buf call.
1407*4882a593Smuzhiyun */
1408*4882a593Smuzhiyun if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DESC)
1409*4882a593Smuzhiyun virtio_store_mb(vq->weak_barriers,
1410*4882a593Smuzhiyun &vq->packed.vring.driver->off_wrap,
1411*4882a593Smuzhiyun cpu_to_le16(vq->last_used_idx |
1412*4882a593Smuzhiyun (vq->packed.used_wrap_counter <<
1413*4882a593Smuzhiyun VRING_PACKED_EVENT_F_WRAP_CTR)));
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun LAST_ADD_TIME_INVALID(vq);
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun END_USE(vq);
1418*4882a593Smuzhiyun return ret;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
virtqueue_disable_cb_packed(struct virtqueue * _vq)1421*4882a593Smuzhiyun static void virtqueue_disable_cb_packed(struct virtqueue *_vq)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1424*4882a593Smuzhiyun
1425*4882a593Smuzhiyun if (vq->packed.event_flags_shadow != VRING_PACKED_EVENT_FLAG_DISABLE) {
1426*4882a593Smuzhiyun vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1427*4882a593Smuzhiyun vq->packed.vring.driver->flags =
1428*4882a593Smuzhiyun cpu_to_le16(vq->packed.event_flags_shadow);
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun }
1431*4882a593Smuzhiyun
virtqueue_enable_cb_prepare_packed(struct virtqueue * _vq)1432*4882a593Smuzhiyun static unsigned virtqueue_enable_cb_prepare_packed(struct virtqueue *_vq)
1433*4882a593Smuzhiyun {
1434*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1435*4882a593Smuzhiyun
1436*4882a593Smuzhiyun START_USE(vq);
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun /*
1439*4882a593Smuzhiyun * We optimistically turn back on interrupts, then check if there was
1440*4882a593Smuzhiyun * more to do.
1441*4882a593Smuzhiyun */
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun if (vq->event) {
1444*4882a593Smuzhiyun vq->packed.vring.driver->off_wrap =
1445*4882a593Smuzhiyun cpu_to_le16(vq->last_used_idx |
1446*4882a593Smuzhiyun (vq->packed.used_wrap_counter <<
1447*4882a593Smuzhiyun VRING_PACKED_EVENT_F_WRAP_CTR));
1448*4882a593Smuzhiyun /*
1449*4882a593Smuzhiyun * We need to update event offset and event wrap
1450*4882a593Smuzhiyun * counter first before updating event flags.
1451*4882a593Smuzhiyun */
1452*4882a593Smuzhiyun virtio_wmb(vq->weak_barriers);
1453*4882a593Smuzhiyun }
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1456*4882a593Smuzhiyun vq->packed.event_flags_shadow = vq->event ?
1457*4882a593Smuzhiyun VRING_PACKED_EVENT_FLAG_DESC :
1458*4882a593Smuzhiyun VRING_PACKED_EVENT_FLAG_ENABLE;
1459*4882a593Smuzhiyun vq->packed.vring.driver->flags =
1460*4882a593Smuzhiyun cpu_to_le16(vq->packed.event_flags_shadow);
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun END_USE(vq);
1464*4882a593Smuzhiyun return vq->last_used_idx | ((u16)vq->packed.used_wrap_counter <<
1465*4882a593Smuzhiyun VRING_PACKED_EVENT_F_WRAP_CTR);
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
virtqueue_poll_packed(struct virtqueue * _vq,u16 off_wrap)1468*4882a593Smuzhiyun static bool virtqueue_poll_packed(struct virtqueue *_vq, u16 off_wrap)
1469*4882a593Smuzhiyun {
1470*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1471*4882a593Smuzhiyun bool wrap_counter;
1472*4882a593Smuzhiyun u16 used_idx;
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun wrap_counter = off_wrap >> VRING_PACKED_EVENT_F_WRAP_CTR;
1475*4882a593Smuzhiyun used_idx = off_wrap & ~(1 << VRING_PACKED_EVENT_F_WRAP_CTR);
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun return is_used_desc_packed(vq, used_idx, wrap_counter);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun
virtqueue_enable_cb_delayed_packed(struct virtqueue * _vq)1480*4882a593Smuzhiyun static bool virtqueue_enable_cb_delayed_packed(struct virtqueue *_vq)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1483*4882a593Smuzhiyun u16 used_idx, wrap_counter;
1484*4882a593Smuzhiyun u16 bufs;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun START_USE(vq);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun /*
1489*4882a593Smuzhiyun * We optimistically turn back on interrupts, then check if there was
1490*4882a593Smuzhiyun * more to do.
1491*4882a593Smuzhiyun */
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun if (vq->event) {
1494*4882a593Smuzhiyun /* TODO: tune this threshold */
1495*4882a593Smuzhiyun bufs = (vq->packed.vring.num - vq->vq.num_free) * 3 / 4;
1496*4882a593Smuzhiyun wrap_counter = vq->packed.used_wrap_counter;
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun used_idx = vq->last_used_idx + bufs;
1499*4882a593Smuzhiyun if (used_idx >= vq->packed.vring.num) {
1500*4882a593Smuzhiyun used_idx -= vq->packed.vring.num;
1501*4882a593Smuzhiyun wrap_counter ^= 1;
1502*4882a593Smuzhiyun }
1503*4882a593Smuzhiyun
1504*4882a593Smuzhiyun vq->packed.vring.driver->off_wrap = cpu_to_le16(used_idx |
1505*4882a593Smuzhiyun (wrap_counter << VRING_PACKED_EVENT_F_WRAP_CTR));
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun /*
1508*4882a593Smuzhiyun * We need to update event offset and event wrap
1509*4882a593Smuzhiyun * counter first before updating event flags.
1510*4882a593Smuzhiyun */
1511*4882a593Smuzhiyun virtio_wmb(vq->weak_barriers);
1512*4882a593Smuzhiyun }
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (vq->packed.event_flags_shadow == VRING_PACKED_EVENT_FLAG_DISABLE) {
1515*4882a593Smuzhiyun vq->packed.event_flags_shadow = vq->event ?
1516*4882a593Smuzhiyun VRING_PACKED_EVENT_FLAG_DESC :
1517*4882a593Smuzhiyun VRING_PACKED_EVENT_FLAG_ENABLE;
1518*4882a593Smuzhiyun vq->packed.vring.driver->flags =
1519*4882a593Smuzhiyun cpu_to_le16(vq->packed.event_flags_shadow);
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun /*
1523*4882a593Smuzhiyun * We need to update event suppression structure first
1524*4882a593Smuzhiyun * before re-checking for more used buffers.
1525*4882a593Smuzhiyun */
1526*4882a593Smuzhiyun virtio_mb(vq->weak_barriers);
1527*4882a593Smuzhiyun
1528*4882a593Smuzhiyun if (is_used_desc_packed(vq,
1529*4882a593Smuzhiyun vq->last_used_idx,
1530*4882a593Smuzhiyun vq->packed.used_wrap_counter)) {
1531*4882a593Smuzhiyun END_USE(vq);
1532*4882a593Smuzhiyun return false;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun END_USE(vq);
1536*4882a593Smuzhiyun return true;
1537*4882a593Smuzhiyun }
1538*4882a593Smuzhiyun
virtqueue_detach_unused_buf_packed(struct virtqueue * _vq)1539*4882a593Smuzhiyun static void *virtqueue_detach_unused_buf_packed(struct virtqueue *_vq)
1540*4882a593Smuzhiyun {
1541*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1542*4882a593Smuzhiyun unsigned int i;
1543*4882a593Smuzhiyun void *buf;
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun START_USE(vq);
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun for (i = 0; i < vq->packed.vring.num; i++) {
1548*4882a593Smuzhiyun if (!vq->packed.desc_state[i].data)
1549*4882a593Smuzhiyun continue;
1550*4882a593Smuzhiyun /* detach_buf clears data, so grab it now. */
1551*4882a593Smuzhiyun buf = vq->packed.desc_state[i].data;
1552*4882a593Smuzhiyun detach_buf_packed(vq, i, NULL);
1553*4882a593Smuzhiyun END_USE(vq);
1554*4882a593Smuzhiyun return buf;
1555*4882a593Smuzhiyun }
1556*4882a593Smuzhiyun /* That should have freed everything. */
1557*4882a593Smuzhiyun BUG_ON(vq->vq.num_free != vq->packed.vring.num);
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun END_USE(vq);
1560*4882a593Smuzhiyun return NULL;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun
vring_create_virtqueue_packed(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)1563*4882a593Smuzhiyun static struct virtqueue *vring_create_virtqueue_packed(
1564*4882a593Smuzhiyun unsigned int index,
1565*4882a593Smuzhiyun unsigned int num,
1566*4882a593Smuzhiyun unsigned int vring_align,
1567*4882a593Smuzhiyun struct virtio_device *vdev,
1568*4882a593Smuzhiyun bool weak_barriers,
1569*4882a593Smuzhiyun bool may_reduce_num,
1570*4882a593Smuzhiyun bool context,
1571*4882a593Smuzhiyun bool (*notify)(struct virtqueue *),
1572*4882a593Smuzhiyun void (*callback)(struct virtqueue *),
1573*4882a593Smuzhiyun const char *name)
1574*4882a593Smuzhiyun {
1575*4882a593Smuzhiyun struct vring_virtqueue *vq;
1576*4882a593Smuzhiyun struct vring_packed_desc *ring;
1577*4882a593Smuzhiyun struct vring_packed_desc_event *driver, *device;
1578*4882a593Smuzhiyun dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
1579*4882a593Smuzhiyun size_t ring_size_in_bytes, event_size_in_bytes;
1580*4882a593Smuzhiyun unsigned int i;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun ring_size_in_bytes = num * sizeof(struct vring_packed_desc);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun ring = vring_alloc_queue(vdev, ring_size_in_bytes,
1585*4882a593Smuzhiyun &ring_dma_addr,
1586*4882a593Smuzhiyun GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1587*4882a593Smuzhiyun if (!ring)
1588*4882a593Smuzhiyun goto err_ring;
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun event_size_in_bytes = sizeof(struct vring_packed_desc_event);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun driver = vring_alloc_queue(vdev, event_size_in_bytes,
1593*4882a593Smuzhiyun &driver_event_dma_addr,
1594*4882a593Smuzhiyun GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1595*4882a593Smuzhiyun if (!driver)
1596*4882a593Smuzhiyun goto err_driver;
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun device = vring_alloc_queue(vdev, event_size_in_bytes,
1599*4882a593Smuzhiyun &device_event_dma_addr,
1600*4882a593Smuzhiyun GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1601*4882a593Smuzhiyun if (!device)
1602*4882a593Smuzhiyun goto err_device;
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun vq = kmalloc(sizeof(*vq), GFP_KERNEL);
1605*4882a593Smuzhiyun if (!vq)
1606*4882a593Smuzhiyun goto err_vq;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun vq->vq.callback = callback;
1609*4882a593Smuzhiyun vq->vq.vdev = vdev;
1610*4882a593Smuzhiyun vq->vq.name = name;
1611*4882a593Smuzhiyun vq->vq.num_free = num;
1612*4882a593Smuzhiyun vq->vq.index = index;
1613*4882a593Smuzhiyun vq->we_own_ring = true;
1614*4882a593Smuzhiyun vq->notify = notify;
1615*4882a593Smuzhiyun vq->weak_barriers = weak_barriers;
1616*4882a593Smuzhiyun vq->broken = false;
1617*4882a593Smuzhiyun vq->last_used_idx = 0;
1618*4882a593Smuzhiyun vq->num_added = 0;
1619*4882a593Smuzhiyun vq->packed_ring = true;
1620*4882a593Smuzhiyun vq->use_dma_api = vring_use_dma_api(vdev);
1621*4882a593Smuzhiyun #ifdef DEBUG
1622*4882a593Smuzhiyun vq->in_use = false;
1623*4882a593Smuzhiyun vq->last_add_time_valid = false;
1624*4882a593Smuzhiyun #endif
1625*4882a593Smuzhiyun
1626*4882a593Smuzhiyun vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
1627*4882a593Smuzhiyun !context;
1628*4882a593Smuzhiyun vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
1631*4882a593Smuzhiyun vq->weak_barriers = false;
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun vq->packed.ring_dma_addr = ring_dma_addr;
1634*4882a593Smuzhiyun vq->packed.driver_event_dma_addr = driver_event_dma_addr;
1635*4882a593Smuzhiyun vq->packed.device_event_dma_addr = device_event_dma_addr;
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun vq->packed.ring_size_in_bytes = ring_size_in_bytes;
1638*4882a593Smuzhiyun vq->packed.event_size_in_bytes = event_size_in_bytes;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun vq->packed.vring.num = num;
1641*4882a593Smuzhiyun vq->packed.vring.desc = ring;
1642*4882a593Smuzhiyun vq->packed.vring.driver = driver;
1643*4882a593Smuzhiyun vq->packed.vring.device = device;
1644*4882a593Smuzhiyun
1645*4882a593Smuzhiyun vq->packed.next_avail_idx = 0;
1646*4882a593Smuzhiyun vq->packed.avail_wrap_counter = 1;
1647*4882a593Smuzhiyun vq->packed.used_wrap_counter = 1;
1648*4882a593Smuzhiyun vq->packed.event_flags_shadow = 0;
1649*4882a593Smuzhiyun vq->packed.avail_used_flags = 1 << VRING_PACKED_DESC_F_AVAIL;
1650*4882a593Smuzhiyun
1651*4882a593Smuzhiyun vq->packed.desc_state = kmalloc_array(num,
1652*4882a593Smuzhiyun sizeof(struct vring_desc_state_packed),
1653*4882a593Smuzhiyun GFP_KERNEL);
1654*4882a593Smuzhiyun if (!vq->packed.desc_state)
1655*4882a593Smuzhiyun goto err_desc_state;
1656*4882a593Smuzhiyun
1657*4882a593Smuzhiyun memset(vq->packed.desc_state, 0,
1658*4882a593Smuzhiyun num * sizeof(struct vring_desc_state_packed));
1659*4882a593Smuzhiyun
1660*4882a593Smuzhiyun /* Put everything in free lists. */
1661*4882a593Smuzhiyun vq->free_head = 0;
1662*4882a593Smuzhiyun for (i = 0; i < num-1; i++)
1663*4882a593Smuzhiyun vq->packed.desc_state[i].next = i + 1;
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun vq->packed.desc_extra = kmalloc_array(num,
1666*4882a593Smuzhiyun sizeof(struct vring_desc_extra_packed),
1667*4882a593Smuzhiyun GFP_KERNEL);
1668*4882a593Smuzhiyun if (!vq->packed.desc_extra)
1669*4882a593Smuzhiyun goto err_desc_extra;
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun memset(vq->packed.desc_extra, 0,
1672*4882a593Smuzhiyun num * sizeof(struct vring_desc_extra_packed));
1673*4882a593Smuzhiyun
1674*4882a593Smuzhiyun /* No callback? Tell other side not to bother us. */
1675*4882a593Smuzhiyun if (!callback) {
1676*4882a593Smuzhiyun vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
1677*4882a593Smuzhiyun vq->packed.vring.driver->flags =
1678*4882a593Smuzhiyun cpu_to_le16(vq->packed.event_flags_shadow);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun
1681*4882a593Smuzhiyun list_add_tail(&vq->vq.list, &vdev->vqs);
1682*4882a593Smuzhiyun return &vq->vq;
1683*4882a593Smuzhiyun
1684*4882a593Smuzhiyun err_desc_extra:
1685*4882a593Smuzhiyun kfree(vq->packed.desc_state);
1686*4882a593Smuzhiyun err_desc_state:
1687*4882a593Smuzhiyun kfree(vq);
1688*4882a593Smuzhiyun err_vq:
1689*4882a593Smuzhiyun vring_free_queue(vdev, event_size_in_bytes, device, device_event_dma_addr);
1690*4882a593Smuzhiyun err_device:
1691*4882a593Smuzhiyun vring_free_queue(vdev, event_size_in_bytes, driver, driver_event_dma_addr);
1692*4882a593Smuzhiyun err_driver:
1693*4882a593Smuzhiyun vring_free_queue(vdev, ring_size_in_bytes, ring, ring_dma_addr);
1694*4882a593Smuzhiyun err_ring:
1695*4882a593Smuzhiyun return NULL;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun
1699*4882a593Smuzhiyun /*
1700*4882a593Smuzhiyun * Generic functions and exported symbols.
1701*4882a593Smuzhiyun */
1702*4882a593Smuzhiyun
virtqueue_add(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int total_sg,unsigned int out_sgs,unsigned int in_sgs,void * data,void * ctx,gfp_t gfp)1703*4882a593Smuzhiyun static inline int virtqueue_add(struct virtqueue *_vq,
1704*4882a593Smuzhiyun struct scatterlist *sgs[],
1705*4882a593Smuzhiyun unsigned int total_sg,
1706*4882a593Smuzhiyun unsigned int out_sgs,
1707*4882a593Smuzhiyun unsigned int in_sgs,
1708*4882a593Smuzhiyun void *data,
1709*4882a593Smuzhiyun void *ctx,
1710*4882a593Smuzhiyun gfp_t gfp)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_add_packed(_vq, sgs, total_sg,
1715*4882a593Smuzhiyun out_sgs, in_sgs, data, ctx, gfp) :
1716*4882a593Smuzhiyun virtqueue_add_split(_vq, sgs, total_sg,
1717*4882a593Smuzhiyun out_sgs, in_sgs, data, ctx, gfp);
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun /**
1721*4882a593Smuzhiyun * virtqueue_add_sgs - expose buffers to other end
1722*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1723*4882a593Smuzhiyun * @sgs: array of terminated scatterlists.
1724*4882a593Smuzhiyun * @out_sgs: the number of scatterlists readable by other side
1725*4882a593Smuzhiyun * @in_sgs: the number of scatterlists which are writable (after readable ones)
1726*4882a593Smuzhiyun * @data: the token identifying the buffer.
1727*4882a593Smuzhiyun * @gfp: how to do memory allocations (if necessary).
1728*4882a593Smuzhiyun *
1729*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue operations
1730*4882a593Smuzhiyun * at the same time (except where noted).
1731*4882a593Smuzhiyun *
1732*4882a593Smuzhiyun * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1733*4882a593Smuzhiyun */
virtqueue_add_sgs(struct virtqueue * _vq,struct scatterlist * sgs[],unsigned int out_sgs,unsigned int in_sgs,void * data,gfp_t gfp)1734*4882a593Smuzhiyun int virtqueue_add_sgs(struct virtqueue *_vq,
1735*4882a593Smuzhiyun struct scatterlist *sgs[],
1736*4882a593Smuzhiyun unsigned int out_sgs,
1737*4882a593Smuzhiyun unsigned int in_sgs,
1738*4882a593Smuzhiyun void *data,
1739*4882a593Smuzhiyun gfp_t gfp)
1740*4882a593Smuzhiyun {
1741*4882a593Smuzhiyun unsigned int i, total_sg = 0;
1742*4882a593Smuzhiyun
1743*4882a593Smuzhiyun /* Count them first. */
1744*4882a593Smuzhiyun for (i = 0; i < out_sgs + in_sgs; i++) {
1745*4882a593Smuzhiyun struct scatterlist *sg;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun for (sg = sgs[i]; sg; sg = sg_next(sg))
1748*4882a593Smuzhiyun total_sg++;
1749*4882a593Smuzhiyun }
1750*4882a593Smuzhiyun return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
1751*4882a593Smuzhiyun data, NULL, gfp);
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
1754*4882a593Smuzhiyun
1755*4882a593Smuzhiyun /**
1756*4882a593Smuzhiyun * virtqueue_add_outbuf - expose output buffers to other end
1757*4882a593Smuzhiyun * @vq: the struct virtqueue we're talking about.
1758*4882a593Smuzhiyun * @sg: scatterlist (must be well-formed and terminated!)
1759*4882a593Smuzhiyun * @num: the number of entries in @sg readable by other side
1760*4882a593Smuzhiyun * @data: the token identifying the buffer.
1761*4882a593Smuzhiyun * @gfp: how to do memory allocations (if necessary).
1762*4882a593Smuzhiyun *
1763*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue operations
1764*4882a593Smuzhiyun * at the same time (except where noted).
1765*4882a593Smuzhiyun *
1766*4882a593Smuzhiyun * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1767*4882a593Smuzhiyun */
virtqueue_add_outbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)1768*4882a593Smuzhiyun int virtqueue_add_outbuf(struct virtqueue *vq,
1769*4882a593Smuzhiyun struct scatterlist *sg, unsigned int num,
1770*4882a593Smuzhiyun void *data,
1771*4882a593Smuzhiyun gfp_t gfp)
1772*4882a593Smuzhiyun {
1773*4882a593Smuzhiyun return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
1774*4882a593Smuzhiyun }
1775*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
1776*4882a593Smuzhiyun
1777*4882a593Smuzhiyun /**
1778*4882a593Smuzhiyun * virtqueue_add_inbuf - expose input buffers to other end
1779*4882a593Smuzhiyun * @vq: the struct virtqueue we're talking about.
1780*4882a593Smuzhiyun * @sg: scatterlist (must be well-formed and terminated!)
1781*4882a593Smuzhiyun * @num: the number of entries in @sg writable by other side
1782*4882a593Smuzhiyun * @data: the token identifying the buffer.
1783*4882a593Smuzhiyun * @gfp: how to do memory allocations (if necessary).
1784*4882a593Smuzhiyun *
1785*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue operations
1786*4882a593Smuzhiyun * at the same time (except where noted).
1787*4882a593Smuzhiyun *
1788*4882a593Smuzhiyun * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1789*4882a593Smuzhiyun */
virtqueue_add_inbuf(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,gfp_t gfp)1790*4882a593Smuzhiyun int virtqueue_add_inbuf(struct virtqueue *vq,
1791*4882a593Smuzhiyun struct scatterlist *sg, unsigned int num,
1792*4882a593Smuzhiyun void *data,
1793*4882a593Smuzhiyun gfp_t gfp)
1794*4882a593Smuzhiyun {
1795*4882a593Smuzhiyun return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
1796*4882a593Smuzhiyun }
1797*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun /**
1800*4882a593Smuzhiyun * virtqueue_add_inbuf_ctx - expose input buffers to other end
1801*4882a593Smuzhiyun * @vq: the struct virtqueue we're talking about.
1802*4882a593Smuzhiyun * @sg: scatterlist (must be well-formed and terminated!)
1803*4882a593Smuzhiyun * @num: the number of entries in @sg writable by other side
1804*4882a593Smuzhiyun * @data: the token identifying the buffer.
1805*4882a593Smuzhiyun * @ctx: extra context for the token
1806*4882a593Smuzhiyun * @gfp: how to do memory allocations (if necessary).
1807*4882a593Smuzhiyun *
1808*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue operations
1809*4882a593Smuzhiyun * at the same time (except where noted).
1810*4882a593Smuzhiyun *
1811*4882a593Smuzhiyun * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
1812*4882a593Smuzhiyun */
virtqueue_add_inbuf_ctx(struct virtqueue * vq,struct scatterlist * sg,unsigned int num,void * data,void * ctx,gfp_t gfp)1813*4882a593Smuzhiyun int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
1814*4882a593Smuzhiyun struct scatterlist *sg, unsigned int num,
1815*4882a593Smuzhiyun void *data,
1816*4882a593Smuzhiyun void *ctx,
1817*4882a593Smuzhiyun gfp_t gfp)
1818*4882a593Smuzhiyun {
1819*4882a593Smuzhiyun return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
1820*4882a593Smuzhiyun }
1821*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun /**
1824*4882a593Smuzhiyun * virtqueue_kick_prepare - first half of split virtqueue_kick call.
1825*4882a593Smuzhiyun * @_vq: the struct virtqueue
1826*4882a593Smuzhiyun *
1827*4882a593Smuzhiyun * Instead of virtqueue_kick(), you can do:
1828*4882a593Smuzhiyun * if (virtqueue_kick_prepare(vq))
1829*4882a593Smuzhiyun * virtqueue_notify(vq);
1830*4882a593Smuzhiyun *
1831*4882a593Smuzhiyun * This is sometimes useful because the virtqueue_kick_prepare() needs
1832*4882a593Smuzhiyun * to be serialized, but the actual virtqueue_notify() call does not.
1833*4882a593Smuzhiyun */
virtqueue_kick_prepare(struct virtqueue * _vq)1834*4882a593Smuzhiyun bool virtqueue_kick_prepare(struct virtqueue *_vq)
1835*4882a593Smuzhiyun {
1836*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1837*4882a593Smuzhiyun
1838*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_kick_prepare_packed(_vq) :
1839*4882a593Smuzhiyun virtqueue_kick_prepare_split(_vq);
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun /**
1844*4882a593Smuzhiyun * virtqueue_notify - second half of split virtqueue_kick call.
1845*4882a593Smuzhiyun * @_vq: the struct virtqueue
1846*4882a593Smuzhiyun *
1847*4882a593Smuzhiyun * This does not need to be serialized.
1848*4882a593Smuzhiyun *
1849*4882a593Smuzhiyun * Returns false if host notify failed or queue is broken, otherwise true.
1850*4882a593Smuzhiyun */
virtqueue_notify(struct virtqueue * _vq)1851*4882a593Smuzhiyun bool virtqueue_notify(struct virtqueue *_vq)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1854*4882a593Smuzhiyun
1855*4882a593Smuzhiyun if (unlikely(vq->broken))
1856*4882a593Smuzhiyun return false;
1857*4882a593Smuzhiyun
1858*4882a593Smuzhiyun /* Prod other side to tell it about changes. */
1859*4882a593Smuzhiyun if (!vq->notify(_vq)) {
1860*4882a593Smuzhiyun vq->broken = true;
1861*4882a593Smuzhiyun return false;
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun return true;
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_notify);
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun /**
1868*4882a593Smuzhiyun * virtqueue_kick - update after add_buf
1869*4882a593Smuzhiyun * @vq: the struct virtqueue
1870*4882a593Smuzhiyun *
1871*4882a593Smuzhiyun * After one or more virtqueue_add_* calls, invoke this to kick
1872*4882a593Smuzhiyun * the other side.
1873*4882a593Smuzhiyun *
1874*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue
1875*4882a593Smuzhiyun * operations at the same time (except where noted).
1876*4882a593Smuzhiyun *
1877*4882a593Smuzhiyun * Returns false if kick failed, otherwise true.
1878*4882a593Smuzhiyun */
virtqueue_kick(struct virtqueue * vq)1879*4882a593Smuzhiyun bool virtqueue_kick(struct virtqueue *vq)
1880*4882a593Smuzhiyun {
1881*4882a593Smuzhiyun if (virtqueue_kick_prepare(vq))
1882*4882a593Smuzhiyun return virtqueue_notify(vq);
1883*4882a593Smuzhiyun return true;
1884*4882a593Smuzhiyun }
1885*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_kick);
1886*4882a593Smuzhiyun
1887*4882a593Smuzhiyun /**
1888*4882a593Smuzhiyun * virtqueue_get_buf - get the next used buffer
1889*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1890*4882a593Smuzhiyun * @len: the length written into the buffer
1891*4882a593Smuzhiyun * @ctx: extra context for the token
1892*4882a593Smuzhiyun *
1893*4882a593Smuzhiyun * If the device wrote data into the buffer, @len will be set to the
1894*4882a593Smuzhiyun * amount written. This means you don't need to clear the buffer
1895*4882a593Smuzhiyun * beforehand to ensure there's no data leakage in the case of short
1896*4882a593Smuzhiyun * writes.
1897*4882a593Smuzhiyun *
1898*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue
1899*4882a593Smuzhiyun * operations at the same time (except where noted).
1900*4882a593Smuzhiyun *
1901*4882a593Smuzhiyun * Returns NULL if there are no used buffers, or the "data" token
1902*4882a593Smuzhiyun * handed to virtqueue_add_*().
1903*4882a593Smuzhiyun */
virtqueue_get_buf_ctx(struct virtqueue * _vq,unsigned int * len,void ** ctx)1904*4882a593Smuzhiyun void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
1905*4882a593Smuzhiyun void **ctx)
1906*4882a593Smuzhiyun {
1907*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_get_buf_ctx_packed(_vq, len, ctx) :
1910*4882a593Smuzhiyun virtqueue_get_buf_ctx_split(_vq, len, ctx);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
1913*4882a593Smuzhiyun
virtqueue_get_buf(struct virtqueue * _vq,unsigned int * len)1914*4882a593Smuzhiyun void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
1915*4882a593Smuzhiyun {
1916*4882a593Smuzhiyun return virtqueue_get_buf_ctx(_vq, len, NULL);
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_buf);
1919*4882a593Smuzhiyun /**
1920*4882a593Smuzhiyun * virtqueue_disable_cb - disable callbacks
1921*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1922*4882a593Smuzhiyun *
1923*4882a593Smuzhiyun * Note that this is not necessarily synchronous, hence unreliable and only
1924*4882a593Smuzhiyun * useful as an optimization.
1925*4882a593Smuzhiyun *
1926*4882a593Smuzhiyun * Unlike other operations, this need not be serialized.
1927*4882a593Smuzhiyun */
virtqueue_disable_cb(struct virtqueue * _vq)1928*4882a593Smuzhiyun void virtqueue_disable_cb(struct virtqueue *_vq)
1929*4882a593Smuzhiyun {
1930*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1931*4882a593Smuzhiyun
1932*4882a593Smuzhiyun if (vq->packed_ring)
1933*4882a593Smuzhiyun virtqueue_disable_cb_packed(_vq);
1934*4882a593Smuzhiyun else
1935*4882a593Smuzhiyun virtqueue_disable_cb_split(_vq);
1936*4882a593Smuzhiyun }
1937*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
1938*4882a593Smuzhiyun
1939*4882a593Smuzhiyun /**
1940*4882a593Smuzhiyun * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
1941*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1942*4882a593Smuzhiyun *
1943*4882a593Smuzhiyun * This re-enables callbacks; it returns current queue state
1944*4882a593Smuzhiyun * in an opaque unsigned value. This value should be later tested by
1945*4882a593Smuzhiyun * virtqueue_poll, to detect a possible race between the driver checking for
1946*4882a593Smuzhiyun * more work, and enabling callbacks.
1947*4882a593Smuzhiyun *
1948*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue
1949*4882a593Smuzhiyun * operations at the same time (except where noted).
1950*4882a593Smuzhiyun */
virtqueue_enable_cb_prepare(struct virtqueue * _vq)1951*4882a593Smuzhiyun unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
1952*4882a593Smuzhiyun {
1953*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1954*4882a593Smuzhiyun
1955*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_enable_cb_prepare_packed(_vq) :
1956*4882a593Smuzhiyun virtqueue_enable_cb_prepare_split(_vq);
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun /**
1961*4882a593Smuzhiyun * virtqueue_poll - query pending used buffers
1962*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1963*4882a593Smuzhiyun * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
1964*4882a593Smuzhiyun *
1965*4882a593Smuzhiyun * Returns "true" if there are pending used buffers in the queue.
1966*4882a593Smuzhiyun *
1967*4882a593Smuzhiyun * This does not need to be serialized.
1968*4882a593Smuzhiyun */
virtqueue_poll(struct virtqueue * _vq,unsigned last_used_idx)1969*4882a593Smuzhiyun bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1970*4882a593Smuzhiyun {
1971*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
1972*4882a593Smuzhiyun
1973*4882a593Smuzhiyun if (unlikely(vq->broken))
1974*4882a593Smuzhiyun return false;
1975*4882a593Smuzhiyun
1976*4882a593Smuzhiyun virtio_mb(vq->weak_barriers);
1977*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1978*4882a593Smuzhiyun virtqueue_poll_split(_vq, last_used_idx);
1979*4882a593Smuzhiyun }
1980*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_poll);
1981*4882a593Smuzhiyun
1982*4882a593Smuzhiyun /**
1983*4882a593Smuzhiyun * virtqueue_enable_cb - restart callbacks after disable_cb.
1984*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
1985*4882a593Smuzhiyun *
1986*4882a593Smuzhiyun * This re-enables callbacks; it returns "false" if there are pending
1987*4882a593Smuzhiyun * buffers in the queue, to detect a possible race between the driver
1988*4882a593Smuzhiyun * checking for more work, and enabling callbacks.
1989*4882a593Smuzhiyun *
1990*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue
1991*4882a593Smuzhiyun * operations at the same time (except where noted).
1992*4882a593Smuzhiyun */
virtqueue_enable_cb(struct virtqueue * _vq)1993*4882a593Smuzhiyun bool virtqueue_enable_cb(struct virtqueue *_vq)
1994*4882a593Smuzhiyun {
1995*4882a593Smuzhiyun unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
1996*4882a593Smuzhiyun
1997*4882a593Smuzhiyun return !virtqueue_poll(_vq, last_used_idx);
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun /**
2002*4882a593Smuzhiyun * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
2003*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
2004*4882a593Smuzhiyun *
2005*4882a593Smuzhiyun * This re-enables callbacks but hints to the other side to delay
2006*4882a593Smuzhiyun * interrupts until most of the available buffers have been processed;
2007*4882a593Smuzhiyun * it returns "false" if there are many pending buffers in the queue,
2008*4882a593Smuzhiyun * to detect a possible race between the driver checking for more work,
2009*4882a593Smuzhiyun * and enabling callbacks.
2010*4882a593Smuzhiyun *
2011*4882a593Smuzhiyun * Caller must ensure we don't call this with other virtqueue
2012*4882a593Smuzhiyun * operations at the same time (except where noted).
2013*4882a593Smuzhiyun */
virtqueue_enable_cb_delayed(struct virtqueue * _vq)2014*4882a593Smuzhiyun bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
2015*4882a593Smuzhiyun {
2016*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2017*4882a593Smuzhiyun
2018*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_enable_cb_delayed_packed(_vq) :
2019*4882a593Smuzhiyun virtqueue_enable_cb_delayed_split(_vq);
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun /**
2024*4882a593Smuzhiyun * virtqueue_detach_unused_buf - detach first unused buffer
2025*4882a593Smuzhiyun * @_vq: the struct virtqueue we're talking about.
2026*4882a593Smuzhiyun *
2027*4882a593Smuzhiyun * Returns NULL or the "data" token handed to virtqueue_add_*().
2028*4882a593Smuzhiyun * This is not valid on an active queue; it is useful only for device
2029*4882a593Smuzhiyun * shutdown.
2030*4882a593Smuzhiyun */
virtqueue_detach_unused_buf(struct virtqueue * _vq)2031*4882a593Smuzhiyun void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
2032*4882a593Smuzhiyun {
2033*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2034*4882a593Smuzhiyun
2035*4882a593Smuzhiyun return vq->packed_ring ? virtqueue_detach_unused_buf_packed(_vq) :
2036*4882a593Smuzhiyun virtqueue_detach_unused_buf_split(_vq);
2037*4882a593Smuzhiyun }
2038*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
2039*4882a593Smuzhiyun
more_used(const struct vring_virtqueue * vq)2040*4882a593Smuzhiyun static inline bool more_used(const struct vring_virtqueue *vq)
2041*4882a593Smuzhiyun {
2042*4882a593Smuzhiyun return vq->packed_ring ? more_used_packed(vq) : more_used_split(vq);
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun
vring_interrupt(int irq,void * _vq)2045*4882a593Smuzhiyun irqreturn_t vring_interrupt(int irq, void *_vq)
2046*4882a593Smuzhiyun {
2047*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun if (!more_used(vq)) {
2050*4882a593Smuzhiyun pr_debug("virtqueue interrupt with no work for %p\n", vq);
2051*4882a593Smuzhiyun return IRQ_NONE;
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun
2054*4882a593Smuzhiyun if (unlikely(vq->broken))
2055*4882a593Smuzhiyun return IRQ_HANDLED;
2056*4882a593Smuzhiyun
2057*4882a593Smuzhiyun pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
2058*4882a593Smuzhiyun if (vq->vq.callback)
2059*4882a593Smuzhiyun vq->vq.callback(&vq->vq);
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun return IRQ_HANDLED;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vring_interrupt);
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun /* Only available for split ring */
__vring_new_virtqueue(unsigned int index,struct vring vring,struct virtio_device * vdev,bool weak_barriers,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)2066*4882a593Smuzhiyun struct virtqueue *__vring_new_virtqueue(unsigned int index,
2067*4882a593Smuzhiyun struct vring vring,
2068*4882a593Smuzhiyun struct virtio_device *vdev,
2069*4882a593Smuzhiyun bool weak_barriers,
2070*4882a593Smuzhiyun bool context,
2071*4882a593Smuzhiyun bool (*notify)(struct virtqueue *),
2072*4882a593Smuzhiyun void (*callback)(struct virtqueue *),
2073*4882a593Smuzhiyun const char *name)
2074*4882a593Smuzhiyun {
2075*4882a593Smuzhiyun unsigned int i;
2076*4882a593Smuzhiyun struct vring_virtqueue *vq;
2077*4882a593Smuzhiyun
2078*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2079*4882a593Smuzhiyun return NULL;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun vq = kmalloc(sizeof(*vq), GFP_KERNEL);
2082*4882a593Smuzhiyun if (!vq)
2083*4882a593Smuzhiyun return NULL;
2084*4882a593Smuzhiyun
2085*4882a593Smuzhiyun vq->packed_ring = false;
2086*4882a593Smuzhiyun vq->vq.callback = callback;
2087*4882a593Smuzhiyun vq->vq.vdev = vdev;
2088*4882a593Smuzhiyun vq->vq.name = name;
2089*4882a593Smuzhiyun vq->vq.num_free = vring.num;
2090*4882a593Smuzhiyun vq->vq.index = index;
2091*4882a593Smuzhiyun vq->we_own_ring = false;
2092*4882a593Smuzhiyun vq->notify = notify;
2093*4882a593Smuzhiyun vq->weak_barriers = weak_barriers;
2094*4882a593Smuzhiyun vq->broken = false;
2095*4882a593Smuzhiyun vq->last_used_idx = 0;
2096*4882a593Smuzhiyun vq->num_added = 0;
2097*4882a593Smuzhiyun vq->use_dma_api = vring_use_dma_api(vdev);
2098*4882a593Smuzhiyun #ifdef DEBUG
2099*4882a593Smuzhiyun vq->in_use = false;
2100*4882a593Smuzhiyun vq->last_add_time_valid = false;
2101*4882a593Smuzhiyun #endif
2102*4882a593Smuzhiyun
2103*4882a593Smuzhiyun vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
2104*4882a593Smuzhiyun !context;
2105*4882a593Smuzhiyun vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
2106*4882a593Smuzhiyun
2107*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM))
2108*4882a593Smuzhiyun vq->weak_barriers = false;
2109*4882a593Smuzhiyun
2110*4882a593Smuzhiyun vq->split.queue_dma_addr = 0;
2111*4882a593Smuzhiyun vq->split.queue_size_in_bytes = 0;
2112*4882a593Smuzhiyun
2113*4882a593Smuzhiyun vq->split.vring = vring;
2114*4882a593Smuzhiyun vq->split.avail_flags_shadow = 0;
2115*4882a593Smuzhiyun vq->split.avail_idx_shadow = 0;
2116*4882a593Smuzhiyun
2117*4882a593Smuzhiyun /* No callback? Tell other side not to bother us. */
2118*4882a593Smuzhiyun if (!callback) {
2119*4882a593Smuzhiyun vq->split.avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
2120*4882a593Smuzhiyun if (!vq->event)
2121*4882a593Smuzhiyun vq->split.vring.avail->flags = cpu_to_virtio16(vdev,
2122*4882a593Smuzhiyun vq->split.avail_flags_shadow);
2123*4882a593Smuzhiyun }
2124*4882a593Smuzhiyun
2125*4882a593Smuzhiyun vq->split.desc_state = kmalloc_array(vring.num,
2126*4882a593Smuzhiyun sizeof(struct vring_desc_state_split), GFP_KERNEL);
2127*4882a593Smuzhiyun if (!vq->split.desc_state) {
2128*4882a593Smuzhiyun kfree(vq);
2129*4882a593Smuzhiyun return NULL;
2130*4882a593Smuzhiyun }
2131*4882a593Smuzhiyun
2132*4882a593Smuzhiyun /* Put everything in free lists. */
2133*4882a593Smuzhiyun vq->free_head = 0;
2134*4882a593Smuzhiyun for (i = 0; i < vring.num-1; i++)
2135*4882a593Smuzhiyun vq->split.vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
2136*4882a593Smuzhiyun memset(vq->split.desc_state, 0, vring.num *
2137*4882a593Smuzhiyun sizeof(struct vring_desc_state_split));
2138*4882a593Smuzhiyun
2139*4882a593Smuzhiyun list_add_tail(&vq->vq.list, &vdev->vqs);
2140*4882a593Smuzhiyun return &vq->vq;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
2143*4882a593Smuzhiyun
vring_create_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool may_reduce_num,bool context,bool (* notify)(struct virtqueue *),void (* callback)(struct virtqueue *),const char * name)2144*4882a593Smuzhiyun struct virtqueue *vring_create_virtqueue(
2145*4882a593Smuzhiyun unsigned int index,
2146*4882a593Smuzhiyun unsigned int num,
2147*4882a593Smuzhiyun unsigned int vring_align,
2148*4882a593Smuzhiyun struct virtio_device *vdev,
2149*4882a593Smuzhiyun bool weak_barriers,
2150*4882a593Smuzhiyun bool may_reduce_num,
2151*4882a593Smuzhiyun bool context,
2152*4882a593Smuzhiyun bool (*notify)(struct virtqueue *),
2153*4882a593Smuzhiyun void (*callback)(struct virtqueue *),
2154*4882a593Smuzhiyun const char *name)
2155*4882a593Smuzhiyun {
2156*4882a593Smuzhiyun
2157*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2158*4882a593Smuzhiyun return vring_create_virtqueue_packed(index, num, vring_align,
2159*4882a593Smuzhiyun vdev, weak_barriers, may_reduce_num,
2160*4882a593Smuzhiyun context, notify, callback, name);
2161*4882a593Smuzhiyun
2162*4882a593Smuzhiyun return vring_create_virtqueue_split(index, num, vring_align,
2163*4882a593Smuzhiyun vdev, weak_barriers, may_reduce_num,
2164*4882a593Smuzhiyun context, notify, callback, name);
2165*4882a593Smuzhiyun }
2166*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vring_create_virtqueue);
2167*4882a593Smuzhiyun
2168*4882a593Smuzhiyun /* Only available for split ring */
vring_new_virtqueue(unsigned int index,unsigned int num,unsigned int vring_align,struct virtio_device * vdev,bool weak_barriers,bool context,void * pages,bool (* notify)(struct virtqueue * vq),void (* callback)(struct virtqueue * vq),const char * name)2169*4882a593Smuzhiyun struct virtqueue *vring_new_virtqueue(unsigned int index,
2170*4882a593Smuzhiyun unsigned int num,
2171*4882a593Smuzhiyun unsigned int vring_align,
2172*4882a593Smuzhiyun struct virtio_device *vdev,
2173*4882a593Smuzhiyun bool weak_barriers,
2174*4882a593Smuzhiyun bool context,
2175*4882a593Smuzhiyun void *pages,
2176*4882a593Smuzhiyun bool (*notify)(struct virtqueue *vq),
2177*4882a593Smuzhiyun void (*callback)(struct virtqueue *vq),
2178*4882a593Smuzhiyun const char *name)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun struct vring vring;
2181*4882a593Smuzhiyun
2182*4882a593Smuzhiyun if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
2183*4882a593Smuzhiyun return NULL;
2184*4882a593Smuzhiyun
2185*4882a593Smuzhiyun vring_init(&vring, num, pages, vring_align);
2186*4882a593Smuzhiyun return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
2187*4882a593Smuzhiyun notify, callback, name);
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vring_new_virtqueue);
2190*4882a593Smuzhiyun
vring_del_virtqueue(struct virtqueue * _vq)2191*4882a593Smuzhiyun void vring_del_virtqueue(struct virtqueue *_vq)
2192*4882a593Smuzhiyun {
2193*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2194*4882a593Smuzhiyun
2195*4882a593Smuzhiyun if (vq->we_own_ring) {
2196*4882a593Smuzhiyun if (vq->packed_ring) {
2197*4882a593Smuzhiyun vring_free_queue(vq->vq.vdev,
2198*4882a593Smuzhiyun vq->packed.ring_size_in_bytes,
2199*4882a593Smuzhiyun vq->packed.vring.desc,
2200*4882a593Smuzhiyun vq->packed.ring_dma_addr);
2201*4882a593Smuzhiyun
2202*4882a593Smuzhiyun vring_free_queue(vq->vq.vdev,
2203*4882a593Smuzhiyun vq->packed.event_size_in_bytes,
2204*4882a593Smuzhiyun vq->packed.vring.driver,
2205*4882a593Smuzhiyun vq->packed.driver_event_dma_addr);
2206*4882a593Smuzhiyun
2207*4882a593Smuzhiyun vring_free_queue(vq->vq.vdev,
2208*4882a593Smuzhiyun vq->packed.event_size_in_bytes,
2209*4882a593Smuzhiyun vq->packed.vring.device,
2210*4882a593Smuzhiyun vq->packed.device_event_dma_addr);
2211*4882a593Smuzhiyun
2212*4882a593Smuzhiyun kfree(vq->packed.desc_state);
2213*4882a593Smuzhiyun kfree(vq->packed.desc_extra);
2214*4882a593Smuzhiyun } else {
2215*4882a593Smuzhiyun vring_free_queue(vq->vq.vdev,
2216*4882a593Smuzhiyun vq->split.queue_size_in_bytes,
2217*4882a593Smuzhiyun vq->split.vring.desc,
2218*4882a593Smuzhiyun vq->split.queue_dma_addr);
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun }
2221*4882a593Smuzhiyun if (!vq->packed_ring)
2222*4882a593Smuzhiyun kfree(vq->split.desc_state);
2223*4882a593Smuzhiyun list_del(&_vq->list);
2224*4882a593Smuzhiyun kfree(vq);
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vring_del_virtqueue);
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun /* Manipulates transport-specific feature bits. */
vring_transport_features(struct virtio_device * vdev)2229*4882a593Smuzhiyun void vring_transport_features(struct virtio_device *vdev)
2230*4882a593Smuzhiyun {
2231*4882a593Smuzhiyun unsigned int i;
2232*4882a593Smuzhiyun
2233*4882a593Smuzhiyun for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
2234*4882a593Smuzhiyun switch (i) {
2235*4882a593Smuzhiyun case VIRTIO_RING_F_INDIRECT_DESC:
2236*4882a593Smuzhiyun break;
2237*4882a593Smuzhiyun case VIRTIO_RING_F_EVENT_IDX:
2238*4882a593Smuzhiyun break;
2239*4882a593Smuzhiyun case VIRTIO_F_VERSION_1:
2240*4882a593Smuzhiyun break;
2241*4882a593Smuzhiyun case VIRTIO_F_ACCESS_PLATFORM:
2242*4882a593Smuzhiyun break;
2243*4882a593Smuzhiyun case VIRTIO_F_RING_PACKED:
2244*4882a593Smuzhiyun break;
2245*4882a593Smuzhiyun case VIRTIO_F_ORDER_PLATFORM:
2246*4882a593Smuzhiyun break;
2247*4882a593Smuzhiyun default:
2248*4882a593Smuzhiyun /* We don't understand this bit. */
2249*4882a593Smuzhiyun __virtio_clear_bit(vdev, i);
2250*4882a593Smuzhiyun }
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun }
2253*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vring_transport_features);
2254*4882a593Smuzhiyun
2255*4882a593Smuzhiyun /**
2256*4882a593Smuzhiyun * virtqueue_get_vring_size - return the size of the virtqueue's vring
2257*4882a593Smuzhiyun * @_vq: the struct virtqueue containing the vring of interest.
2258*4882a593Smuzhiyun *
2259*4882a593Smuzhiyun * Returns the size of the vring. This is mainly used for boasting to
2260*4882a593Smuzhiyun * userspace. Unlike other operations, this need not be serialized.
2261*4882a593Smuzhiyun */
virtqueue_get_vring_size(struct virtqueue * _vq)2262*4882a593Smuzhiyun unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
2263*4882a593Smuzhiyun {
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun return vq->packed_ring ? vq->packed.vring.num : vq->split.vring.num;
2268*4882a593Smuzhiyun }
2269*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
2270*4882a593Smuzhiyun
virtqueue_is_broken(struct virtqueue * _vq)2271*4882a593Smuzhiyun bool virtqueue_is_broken(struct virtqueue *_vq)
2272*4882a593Smuzhiyun {
2273*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2274*4882a593Smuzhiyun
2275*4882a593Smuzhiyun return READ_ONCE(vq->broken);
2276*4882a593Smuzhiyun }
2277*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_is_broken);
2278*4882a593Smuzhiyun
2279*4882a593Smuzhiyun /*
2280*4882a593Smuzhiyun * This should prevent the device from being used, allowing drivers to
2281*4882a593Smuzhiyun * recover. You may need to grab appropriate locks to flush.
2282*4882a593Smuzhiyun */
virtio_break_device(struct virtio_device * dev)2283*4882a593Smuzhiyun void virtio_break_device(struct virtio_device *dev)
2284*4882a593Smuzhiyun {
2285*4882a593Smuzhiyun struct virtqueue *_vq;
2286*4882a593Smuzhiyun
2287*4882a593Smuzhiyun list_for_each_entry(_vq, &dev->vqs, list) {
2288*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun /* Pairs with READ_ONCE() in virtqueue_is_broken(). */
2291*4882a593Smuzhiyun WRITE_ONCE(vq->broken, true);
2292*4882a593Smuzhiyun }
2293*4882a593Smuzhiyun }
2294*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtio_break_device);
2295*4882a593Smuzhiyun
virtqueue_get_desc_addr(struct virtqueue * _vq)2296*4882a593Smuzhiyun dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2299*4882a593Smuzhiyun
2300*4882a593Smuzhiyun BUG_ON(!vq->we_own_ring);
2301*4882a593Smuzhiyun
2302*4882a593Smuzhiyun if (vq->packed_ring)
2303*4882a593Smuzhiyun return vq->packed.ring_dma_addr;
2304*4882a593Smuzhiyun
2305*4882a593Smuzhiyun return vq->split.queue_dma_addr;
2306*4882a593Smuzhiyun }
2307*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
2308*4882a593Smuzhiyun
virtqueue_get_avail_addr(struct virtqueue * _vq)2309*4882a593Smuzhiyun dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
2310*4882a593Smuzhiyun {
2311*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2312*4882a593Smuzhiyun
2313*4882a593Smuzhiyun BUG_ON(!vq->we_own_ring);
2314*4882a593Smuzhiyun
2315*4882a593Smuzhiyun if (vq->packed_ring)
2316*4882a593Smuzhiyun return vq->packed.driver_event_dma_addr;
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun return vq->split.queue_dma_addr +
2319*4882a593Smuzhiyun ((char *)vq->split.vring.avail - (char *)vq->split.vring.desc);
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
2322*4882a593Smuzhiyun
virtqueue_get_used_addr(struct virtqueue * _vq)2323*4882a593Smuzhiyun dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
2324*4882a593Smuzhiyun {
2325*4882a593Smuzhiyun struct vring_virtqueue *vq = to_vvq(_vq);
2326*4882a593Smuzhiyun
2327*4882a593Smuzhiyun BUG_ON(!vq->we_own_ring);
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun if (vq->packed_ring)
2330*4882a593Smuzhiyun return vq->packed.device_event_dma_addr;
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun return vq->split.queue_dma_addr +
2333*4882a593Smuzhiyun ((char *)vq->split.vring.used - (char *)vq->split.vring.desc);
2334*4882a593Smuzhiyun }
2335*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun /* Only available for split ring */
virtqueue_get_vring(struct virtqueue * vq)2338*4882a593Smuzhiyun const struct vring *virtqueue_get_vring(struct virtqueue *vq)
2339*4882a593Smuzhiyun {
2340*4882a593Smuzhiyun return &to_vvq(vq)->split.vring;
2341*4882a593Smuzhiyun }
2342*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(virtqueue_get_vring);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun MODULE_LICENSE("GPL");
2345