1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Helpers for the host side of a virtio ring.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Since these may be in userspace, we use (inline) accessors.
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #include <linux/compiler.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/vringh.h>
10*4882a593Smuzhiyun #include <linux/virtio_ring.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/ratelimit.h>
13*4882a593Smuzhiyun #include <linux/uaccess.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/export.h>
16*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
17*4882a593Smuzhiyun #include <linux/bvec.h>
18*4882a593Smuzhiyun #include <linux/highmem.h>
19*4882a593Smuzhiyun #include <linux/vhost_iotlb.h>
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun #include <uapi/linux/virtio_config.h>
22*4882a593Smuzhiyun
vringh_bad(const char * fmt,...)23*4882a593Smuzhiyun static __printf(1,2) __cold void vringh_bad(const char *fmt, ...)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun static DEFINE_RATELIMIT_STATE(vringh_rs,
26*4882a593Smuzhiyun DEFAULT_RATELIMIT_INTERVAL,
27*4882a593Smuzhiyun DEFAULT_RATELIMIT_BURST);
28*4882a593Smuzhiyun if (__ratelimit(&vringh_rs)) {
29*4882a593Smuzhiyun va_list ap;
30*4882a593Smuzhiyun va_start(ap, fmt);
31*4882a593Smuzhiyun printk(KERN_NOTICE "vringh:");
32*4882a593Smuzhiyun vprintk(fmt, ap);
33*4882a593Smuzhiyun va_end(ap);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Returns vring->num if empty, -ve on error. */
__vringh_get_head(const struct vringh * vrh,int (* getu16)(const struct vringh * vrh,u16 * val,const __virtio16 * p),u16 * last_avail_idx)38*4882a593Smuzhiyun static inline int __vringh_get_head(const struct vringh *vrh,
39*4882a593Smuzhiyun int (*getu16)(const struct vringh *vrh,
40*4882a593Smuzhiyun u16 *val, const __virtio16 *p),
41*4882a593Smuzhiyun u16 *last_avail_idx)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun u16 avail_idx, i, head;
44*4882a593Smuzhiyun int err;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun err = getu16(vrh, &avail_idx, &vrh->vring.avail->idx);
47*4882a593Smuzhiyun if (err) {
48*4882a593Smuzhiyun vringh_bad("Failed to access avail idx at %p",
49*4882a593Smuzhiyun &vrh->vring.avail->idx);
50*4882a593Smuzhiyun return err;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (*last_avail_idx == avail_idx)
54*4882a593Smuzhiyun return vrh->vring.num;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* Only get avail ring entries after they have been exposed by guest. */
57*4882a593Smuzhiyun virtio_rmb(vrh->weak_barriers);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun i = *last_avail_idx & (vrh->vring.num - 1);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun err = getu16(vrh, &head, &vrh->vring.avail->ring[i]);
62*4882a593Smuzhiyun if (err) {
63*4882a593Smuzhiyun vringh_bad("Failed to read head: idx %d address %p",
64*4882a593Smuzhiyun *last_avail_idx, &vrh->vring.avail->ring[i]);
65*4882a593Smuzhiyun return err;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun if (head >= vrh->vring.num) {
69*4882a593Smuzhiyun vringh_bad("Guest says index %u > %u is available",
70*4882a593Smuzhiyun head, vrh->vring.num);
71*4882a593Smuzhiyun return -EINVAL;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun (*last_avail_idx)++;
75*4882a593Smuzhiyun return head;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Copy some bytes to/from the iovec. Returns num copied. */
vringh_iov_xfer(struct vringh * vrh,struct vringh_kiov * iov,void * ptr,size_t len,int (* xfer)(const struct vringh * vrh,void * addr,void * ptr,size_t len))79*4882a593Smuzhiyun static inline ssize_t vringh_iov_xfer(struct vringh *vrh,
80*4882a593Smuzhiyun struct vringh_kiov *iov,
81*4882a593Smuzhiyun void *ptr, size_t len,
82*4882a593Smuzhiyun int (*xfer)(const struct vringh *vrh,
83*4882a593Smuzhiyun void *addr, void *ptr,
84*4882a593Smuzhiyun size_t len))
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun int err, done = 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun while (len && iov->i < iov->used) {
89*4882a593Smuzhiyun size_t partlen;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun partlen = min(iov->iov[iov->i].iov_len, len);
92*4882a593Smuzhiyun err = xfer(vrh, iov->iov[iov->i].iov_base, ptr, partlen);
93*4882a593Smuzhiyun if (err)
94*4882a593Smuzhiyun return err;
95*4882a593Smuzhiyun done += partlen;
96*4882a593Smuzhiyun len -= partlen;
97*4882a593Smuzhiyun ptr += partlen;
98*4882a593Smuzhiyun iov->consumed += partlen;
99*4882a593Smuzhiyun iov->iov[iov->i].iov_len -= partlen;
100*4882a593Smuzhiyun iov->iov[iov->i].iov_base += partlen;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (!iov->iov[iov->i].iov_len) {
103*4882a593Smuzhiyun /* Fix up old iov element then increment. */
104*4882a593Smuzhiyun iov->iov[iov->i].iov_len = iov->consumed;
105*4882a593Smuzhiyun iov->iov[iov->i].iov_base -= iov->consumed;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun iov->consumed = 0;
109*4882a593Smuzhiyun iov->i++;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun return done;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* May reduce *len if range is shorter. */
range_check(struct vringh * vrh,u64 addr,size_t * len,struct vringh_range * range,bool (* getrange)(struct vringh *,u64,struct vringh_range *))116*4882a593Smuzhiyun static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len,
117*4882a593Smuzhiyun struct vringh_range *range,
118*4882a593Smuzhiyun bool (*getrange)(struct vringh *,
119*4882a593Smuzhiyun u64, struct vringh_range *))
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun if (addr < range->start || addr > range->end_incl) {
122*4882a593Smuzhiyun if (!getrange(vrh, addr, range))
123*4882a593Smuzhiyun return false;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun BUG_ON(addr < range->start || addr > range->end_incl);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* To end of memory? */
128*4882a593Smuzhiyun if (unlikely(addr + *len == 0)) {
129*4882a593Smuzhiyun if (range->end_incl == -1ULL)
130*4882a593Smuzhiyun return true;
131*4882a593Smuzhiyun goto truncate;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Otherwise, don't wrap. */
135*4882a593Smuzhiyun if (addr + *len < addr) {
136*4882a593Smuzhiyun vringh_bad("Wrapping descriptor %zu@0x%llx",
137*4882a593Smuzhiyun *len, (unsigned long long)addr);
138*4882a593Smuzhiyun return false;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun if (unlikely(addr + *len - 1 > range->end_incl))
142*4882a593Smuzhiyun goto truncate;
143*4882a593Smuzhiyun return true;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun truncate:
146*4882a593Smuzhiyun *len = range->end_incl + 1 - addr;
147*4882a593Smuzhiyun return true;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
no_range_check(struct vringh * vrh,u64 addr,size_t * len,struct vringh_range * range,bool (* getrange)(struct vringh *,u64,struct vringh_range *))150*4882a593Smuzhiyun static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len,
151*4882a593Smuzhiyun struct vringh_range *range,
152*4882a593Smuzhiyun bool (*getrange)(struct vringh *,
153*4882a593Smuzhiyun u64, struct vringh_range *))
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return true;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /* No reason for this code to be inline. */
move_to_indirect(const struct vringh * vrh,int * up_next,u16 * i,void * addr,const struct vring_desc * desc,struct vring_desc ** descs,int * desc_max)159*4882a593Smuzhiyun static int move_to_indirect(const struct vringh *vrh,
160*4882a593Smuzhiyun int *up_next, u16 *i, void *addr,
161*4882a593Smuzhiyun const struct vring_desc *desc,
162*4882a593Smuzhiyun struct vring_desc **descs, int *desc_max)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun u32 len;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* Indirect tables can't have indirect. */
167*4882a593Smuzhiyun if (*up_next != -1) {
168*4882a593Smuzhiyun vringh_bad("Multilevel indirect %u->%u", *up_next, *i);
169*4882a593Smuzhiyun return -EINVAL;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun len = vringh32_to_cpu(vrh, desc->len);
173*4882a593Smuzhiyun if (unlikely(len % sizeof(struct vring_desc))) {
174*4882a593Smuzhiyun vringh_bad("Strange indirect len %u", desc->len);
175*4882a593Smuzhiyun return -EINVAL;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun /* We will check this when we follow it! */
179*4882a593Smuzhiyun if (desc->flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT))
180*4882a593Smuzhiyun *up_next = vringh16_to_cpu(vrh, desc->next);
181*4882a593Smuzhiyun else
182*4882a593Smuzhiyun *up_next = -2;
183*4882a593Smuzhiyun *descs = addr;
184*4882a593Smuzhiyun *desc_max = len / sizeof(struct vring_desc);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Now, start at the first indirect. */
187*4882a593Smuzhiyun *i = 0;
188*4882a593Smuzhiyun return 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
resize_iovec(struct vringh_kiov * iov,gfp_t gfp)191*4882a593Smuzhiyun static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct kvec *new;
194*4882a593Smuzhiyun unsigned int flag, new_num = (iov->max_num & ~VRINGH_IOV_ALLOCATED) * 2;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (new_num < 8)
197*4882a593Smuzhiyun new_num = 8;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun flag = (iov->max_num & VRINGH_IOV_ALLOCATED);
200*4882a593Smuzhiyun if (flag)
201*4882a593Smuzhiyun new = krealloc(iov->iov, new_num * sizeof(struct iovec), gfp);
202*4882a593Smuzhiyun else {
203*4882a593Smuzhiyun new = kmalloc_array(new_num, sizeof(struct iovec), gfp);
204*4882a593Smuzhiyun if (new) {
205*4882a593Smuzhiyun memcpy(new, iov->iov,
206*4882a593Smuzhiyun iov->max_num * sizeof(struct iovec));
207*4882a593Smuzhiyun flag = VRINGH_IOV_ALLOCATED;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun if (!new)
211*4882a593Smuzhiyun return -ENOMEM;
212*4882a593Smuzhiyun iov->iov = new;
213*4882a593Smuzhiyun iov->max_num = (new_num | flag);
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
return_from_indirect(const struct vringh * vrh,int * up_next,struct vring_desc ** descs,int * desc_max)217*4882a593Smuzhiyun static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next,
218*4882a593Smuzhiyun struct vring_desc **descs, int *desc_max)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun u16 i = *up_next;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun *up_next = -1;
223*4882a593Smuzhiyun *descs = vrh->vring.desc;
224*4882a593Smuzhiyun *desc_max = vrh->vring.num;
225*4882a593Smuzhiyun return i;
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
slow_copy(struct vringh * vrh,void * dst,const void * src,bool (* rcheck)(struct vringh * vrh,u64 addr,size_t * len,struct vringh_range * range,bool (* getrange)(struct vringh * vrh,u64,struct vringh_range *)),bool (* getrange)(struct vringh * vrh,u64 addr,struct vringh_range * r),struct vringh_range * range,int (* copy)(const struct vringh * vrh,void * dst,const void * src,size_t len))228*4882a593Smuzhiyun static int slow_copy(struct vringh *vrh, void *dst, const void *src,
229*4882a593Smuzhiyun bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
230*4882a593Smuzhiyun struct vringh_range *range,
231*4882a593Smuzhiyun bool (*getrange)(struct vringh *vrh,
232*4882a593Smuzhiyun u64,
233*4882a593Smuzhiyun struct vringh_range *)),
234*4882a593Smuzhiyun bool (*getrange)(struct vringh *vrh,
235*4882a593Smuzhiyun u64 addr,
236*4882a593Smuzhiyun struct vringh_range *r),
237*4882a593Smuzhiyun struct vringh_range *range,
238*4882a593Smuzhiyun int (*copy)(const struct vringh *vrh,
239*4882a593Smuzhiyun void *dst, const void *src, size_t len))
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun size_t part, len = sizeof(struct vring_desc);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun do {
244*4882a593Smuzhiyun u64 addr;
245*4882a593Smuzhiyun int err;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun part = len;
248*4882a593Smuzhiyun addr = (u64)(unsigned long)src - range->offset;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (!rcheck(vrh, addr, &part, range, getrange))
251*4882a593Smuzhiyun return -EINVAL;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun err = copy(vrh, dst, src, part);
254*4882a593Smuzhiyun if (err)
255*4882a593Smuzhiyun return err;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun dst += part;
258*4882a593Smuzhiyun src += part;
259*4882a593Smuzhiyun len -= part;
260*4882a593Smuzhiyun } while (len);
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun static inline int
__vringh_iov(struct vringh * vrh,u16 i,struct vringh_kiov * riov,struct vringh_kiov * wiov,bool (* rcheck)(struct vringh * vrh,u64 addr,size_t * len,struct vringh_range * range,bool (* getrange)(struct vringh *,u64,struct vringh_range *)),bool (* getrange)(struct vringh *,u64,struct vringh_range *),gfp_t gfp,int (* copy)(const struct vringh * vrh,void * dst,const void * src,size_t len))265*4882a593Smuzhiyun __vringh_iov(struct vringh *vrh, u16 i,
266*4882a593Smuzhiyun struct vringh_kiov *riov,
267*4882a593Smuzhiyun struct vringh_kiov *wiov,
268*4882a593Smuzhiyun bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len,
269*4882a593Smuzhiyun struct vringh_range *range,
270*4882a593Smuzhiyun bool (*getrange)(struct vringh *, u64,
271*4882a593Smuzhiyun struct vringh_range *)),
272*4882a593Smuzhiyun bool (*getrange)(struct vringh *, u64, struct vringh_range *),
273*4882a593Smuzhiyun gfp_t gfp,
274*4882a593Smuzhiyun int (*copy)(const struct vringh *vrh,
275*4882a593Smuzhiyun void *dst, const void *src, size_t len))
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun int err, count = 0, indirect_count = 0, up_next, desc_max;
278*4882a593Smuzhiyun struct vring_desc desc, *descs;
279*4882a593Smuzhiyun struct vringh_range range = { -1ULL, 0 }, slowrange;
280*4882a593Smuzhiyun bool slow = false;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* We start traversing vring's descriptor table. */
283*4882a593Smuzhiyun descs = vrh->vring.desc;
284*4882a593Smuzhiyun desc_max = vrh->vring.num;
285*4882a593Smuzhiyun up_next = -1;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun /* You must want something! */
288*4882a593Smuzhiyun if (WARN_ON(!riov && !wiov))
289*4882a593Smuzhiyun return -EINVAL;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun if (riov)
292*4882a593Smuzhiyun riov->i = riov->used = 0;
293*4882a593Smuzhiyun if (wiov)
294*4882a593Smuzhiyun wiov->i = wiov->used = 0;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun for (;;) {
297*4882a593Smuzhiyun void *addr;
298*4882a593Smuzhiyun struct vringh_kiov *iov;
299*4882a593Smuzhiyun size_t len;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (unlikely(slow))
302*4882a593Smuzhiyun err = slow_copy(vrh, &desc, &descs[i], rcheck, getrange,
303*4882a593Smuzhiyun &slowrange, copy);
304*4882a593Smuzhiyun else
305*4882a593Smuzhiyun err = copy(vrh, &desc, &descs[i], sizeof(desc));
306*4882a593Smuzhiyun if (unlikely(err))
307*4882a593Smuzhiyun goto fail;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun if (unlikely(desc.flags &
310*4882a593Smuzhiyun cpu_to_vringh16(vrh, VRING_DESC_F_INDIRECT))) {
311*4882a593Smuzhiyun u64 a = vringh64_to_cpu(vrh, desc.addr);
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun /* Make sure it's OK, and get offset. */
314*4882a593Smuzhiyun len = vringh32_to_cpu(vrh, desc.len);
315*4882a593Smuzhiyun if (!rcheck(vrh, a, &len, &range, getrange)) {
316*4882a593Smuzhiyun err = -EINVAL;
317*4882a593Smuzhiyun goto fail;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
321*4882a593Smuzhiyun slow = true;
322*4882a593Smuzhiyun /* We need to save this range to use offset */
323*4882a593Smuzhiyun slowrange = range;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun addr = (void *)(long)(a + range.offset);
327*4882a593Smuzhiyun err = move_to_indirect(vrh, &up_next, &i, addr, &desc,
328*4882a593Smuzhiyun &descs, &desc_max);
329*4882a593Smuzhiyun if (err)
330*4882a593Smuzhiyun goto fail;
331*4882a593Smuzhiyun continue;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (up_next == -1)
335*4882a593Smuzhiyun count++;
336*4882a593Smuzhiyun else
337*4882a593Smuzhiyun indirect_count++;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (count > vrh->vring.num || indirect_count > desc_max) {
340*4882a593Smuzhiyun vringh_bad("Descriptor loop in %p", descs);
341*4882a593Smuzhiyun err = -ELOOP;
342*4882a593Smuzhiyun goto fail;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_WRITE))
346*4882a593Smuzhiyun iov = wiov;
347*4882a593Smuzhiyun else {
348*4882a593Smuzhiyun iov = riov;
349*4882a593Smuzhiyun if (unlikely(wiov && wiov->used)) {
350*4882a593Smuzhiyun vringh_bad("Readable desc %p after writable",
351*4882a593Smuzhiyun &descs[i]);
352*4882a593Smuzhiyun err = -EINVAL;
353*4882a593Smuzhiyun goto fail;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (!iov) {
358*4882a593Smuzhiyun vringh_bad("Unexpected %s desc",
359*4882a593Smuzhiyun !wiov ? "writable" : "readable");
360*4882a593Smuzhiyun err = -EPROTO;
361*4882a593Smuzhiyun goto fail;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun again:
365*4882a593Smuzhiyun /* Make sure it's OK, and get offset. */
366*4882a593Smuzhiyun len = vringh32_to_cpu(vrh, desc.len);
367*4882a593Smuzhiyun if (!rcheck(vrh, vringh64_to_cpu(vrh, desc.addr), &len, &range,
368*4882a593Smuzhiyun getrange)) {
369*4882a593Smuzhiyun err = -EINVAL;
370*4882a593Smuzhiyun goto fail;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun addr = (void *)(unsigned long)(vringh64_to_cpu(vrh, desc.addr) +
373*4882a593Smuzhiyun range.offset);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (unlikely(iov->used == (iov->max_num & ~VRINGH_IOV_ALLOCATED))) {
376*4882a593Smuzhiyun err = resize_iovec(iov, gfp);
377*4882a593Smuzhiyun if (err)
378*4882a593Smuzhiyun goto fail;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun iov->iov[iov->used].iov_base = addr;
382*4882a593Smuzhiyun iov->iov[iov->used].iov_len = len;
383*4882a593Smuzhiyun iov->used++;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (unlikely(len != vringh32_to_cpu(vrh, desc.len))) {
386*4882a593Smuzhiyun desc.len = cpu_to_vringh32(vrh,
387*4882a593Smuzhiyun vringh32_to_cpu(vrh, desc.len) - len);
388*4882a593Smuzhiyun desc.addr = cpu_to_vringh64(vrh,
389*4882a593Smuzhiyun vringh64_to_cpu(vrh, desc.addr) + len);
390*4882a593Smuzhiyun goto again;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (desc.flags & cpu_to_vringh16(vrh, VRING_DESC_F_NEXT)) {
394*4882a593Smuzhiyun i = vringh16_to_cpu(vrh, desc.next);
395*4882a593Smuzhiyun } else {
396*4882a593Smuzhiyun /* Just in case we need to finish traversing above. */
397*4882a593Smuzhiyun if (unlikely(up_next > 0)) {
398*4882a593Smuzhiyun i = return_from_indirect(vrh, &up_next,
399*4882a593Smuzhiyun &descs, &desc_max);
400*4882a593Smuzhiyun slow = false;
401*4882a593Smuzhiyun indirect_count = 0;
402*4882a593Smuzhiyun } else
403*4882a593Smuzhiyun break;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun if (i >= desc_max) {
407*4882a593Smuzhiyun vringh_bad("Chained index %u > %u", i, desc_max);
408*4882a593Smuzhiyun err = -EINVAL;
409*4882a593Smuzhiyun goto fail;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun fail:
416*4882a593Smuzhiyun return err;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
__vringh_complete(struct vringh * vrh,const struct vring_used_elem * used,unsigned int num_used,int (* putu16)(const struct vringh * vrh,__virtio16 * p,u16 val),int (* putused)(const struct vringh * vrh,struct vring_used_elem * dst,const struct vring_used_elem * src,unsigned num))419*4882a593Smuzhiyun static inline int __vringh_complete(struct vringh *vrh,
420*4882a593Smuzhiyun const struct vring_used_elem *used,
421*4882a593Smuzhiyun unsigned int num_used,
422*4882a593Smuzhiyun int (*putu16)(const struct vringh *vrh,
423*4882a593Smuzhiyun __virtio16 *p, u16 val),
424*4882a593Smuzhiyun int (*putused)(const struct vringh *vrh,
425*4882a593Smuzhiyun struct vring_used_elem *dst,
426*4882a593Smuzhiyun const struct vring_used_elem
427*4882a593Smuzhiyun *src, unsigned num))
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct vring_used *used_ring;
430*4882a593Smuzhiyun int err;
431*4882a593Smuzhiyun u16 used_idx, off;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun used_ring = vrh->vring.used;
434*4882a593Smuzhiyun used_idx = vrh->last_used_idx + vrh->completed;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun off = used_idx % vrh->vring.num;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun /* Compiler knows num_used == 1 sometimes, hence extra check */
439*4882a593Smuzhiyun if (num_used > 1 && unlikely(off + num_used >= vrh->vring.num)) {
440*4882a593Smuzhiyun u16 part = vrh->vring.num - off;
441*4882a593Smuzhiyun err = putused(vrh, &used_ring->ring[off], used, part);
442*4882a593Smuzhiyun if (!err)
443*4882a593Smuzhiyun err = putused(vrh, &used_ring->ring[0], used + part,
444*4882a593Smuzhiyun num_used - part);
445*4882a593Smuzhiyun } else
446*4882a593Smuzhiyun err = putused(vrh, &used_ring->ring[off], used, num_used);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (err) {
449*4882a593Smuzhiyun vringh_bad("Failed to write %u used entries %u at %p",
450*4882a593Smuzhiyun num_used, off, &used_ring->ring[off]);
451*4882a593Smuzhiyun return err;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Make sure buffer is written before we update index. */
455*4882a593Smuzhiyun virtio_wmb(vrh->weak_barriers);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun err = putu16(vrh, &vrh->vring.used->idx, used_idx + num_used);
458*4882a593Smuzhiyun if (err) {
459*4882a593Smuzhiyun vringh_bad("Failed to update used index at %p",
460*4882a593Smuzhiyun &vrh->vring.used->idx);
461*4882a593Smuzhiyun return err;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun vrh->completed += num_used;
465*4882a593Smuzhiyun return 0;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun
__vringh_need_notify(struct vringh * vrh,int (* getu16)(const struct vringh * vrh,u16 * val,const __virtio16 * p))469*4882a593Smuzhiyun static inline int __vringh_need_notify(struct vringh *vrh,
470*4882a593Smuzhiyun int (*getu16)(const struct vringh *vrh,
471*4882a593Smuzhiyun u16 *val,
472*4882a593Smuzhiyun const __virtio16 *p))
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun bool notify;
475*4882a593Smuzhiyun u16 used_event;
476*4882a593Smuzhiyun int err;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun /* Flush out used index update. This is paired with the
479*4882a593Smuzhiyun * barrier that the Guest executes when enabling
480*4882a593Smuzhiyun * interrupts. */
481*4882a593Smuzhiyun virtio_mb(vrh->weak_barriers);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /* Old-style, without event indices. */
484*4882a593Smuzhiyun if (!vrh->event_indices) {
485*4882a593Smuzhiyun u16 flags;
486*4882a593Smuzhiyun err = getu16(vrh, &flags, &vrh->vring.avail->flags);
487*4882a593Smuzhiyun if (err) {
488*4882a593Smuzhiyun vringh_bad("Failed to get flags at %p",
489*4882a593Smuzhiyun &vrh->vring.avail->flags);
490*4882a593Smuzhiyun return err;
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun return (!(flags & VRING_AVAIL_F_NO_INTERRUPT));
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Modern: we know when other side wants to know. */
496*4882a593Smuzhiyun err = getu16(vrh, &used_event, &vring_used_event(&vrh->vring));
497*4882a593Smuzhiyun if (err) {
498*4882a593Smuzhiyun vringh_bad("Failed to get used event idx at %p",
499*4882a593Smuzhiyun &vring_used_event(&vrh->vring));
500*4882a593Smuzhiyun return err;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun /* Just in case we added so many that we wrap. */
504*4882a593Smuzhiyun if (unlikely(vrh->completed > 0xffff))
505*4882a593Smuzhiyun notify = true;
506*4882a593Smuzhiyun else
507*4882a593Smuzhiyun notify = vring_need_event(used_event,
508*4882a593Smuzhiyun vrh->last_used_idx + vrh->completed,
509*4882a593Smuzhiyun vrh->last_used_idx);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun vrh->last_used_idx += vrh->completed;
512*4882a593Smuzhiyun vrh->completed = 0;
513*4882a593Smuzhiyun return notify;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
__vringh_notify_enable(struct vringh * vrh,int (* getu16)(const struct vringh * vrh,u16 * val,const __virtio16 * p),int (* putu16)(const struct vringh * vrh,__virtio16 * p,u16 val))516*4882a593Smuzhiyun static inline bool __vringh_notify_enable(struct vringh *vrh,
517*4882a593Smuzhiyun int (*getu16)(const struct vringh *vrh,
518*4882a593Smuzhiyun u16 *val, const __virtio16 *p),
519*4882a593Smuzhiyun int (*putu16)(const struct vringh *vrh,
520*4882a593Smuzhiyun __virtio16 *p, u16 val))
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun u16 avail;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun if (!vrh->event_indices) {
525*4882a593Smuzhiyun /* Old-school; update flags. */
526*4882a593Smuzhiyun if (putu16(vrh, &vrh->vring.used->flags, 0) != 0) {
527*4882a593Smuzhiyun vringh_bad("Clearing used flags %p",
528*4882a593Smuzhiyun &vrh->vring.used->flags);
529*4882a593Smuzhiyun return true;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun } else {
532*4882a593Smuzhiyun if (putu16(vrh, &vring_avail_event(&vrh->vring),
533*4882a593Smuzhiyun vrh->last_avail_idx) != 0) {
534*4882a593Smuzhiyun vringh_bad("Updating avail event index %p",
535*4882a593Smuzhiyun &vring_avail_event(&vrh->vring));
536*4882a593Smuzhiyun return true;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* They could have slipped one in as we were doing that: make
541*4882a593Smuzhiyun * sure it's written, then check again. */
542*4882a593Smuzhiyun virtio_mb(vrh->weak_barriers);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (getu16(vrh, &avail, &vrh->vring.avail->idx) != 0) {
545*4882a593Smuzhiyun vringh_bad("Failed to check avail idx at %p",
546*4882a593Smuzhiyun &vrh->vring.avail->idx);
547*4882a593Smuzhiyun return true;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* This is unlikely, so we just leave notifications enabled
551*4882a593Smuzhiyun * (if we're using event_indices, we'll only get one
552*4882a593Smuzhiyun * notification anyway). */
553*4882a593Smuzhiyun return avail == vrh->last_avail_idx;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
__vringh_notify_disable(struct vringh * vrh,int (* putu16)(const struct vringh * vrh,__virtio16 * p,u16 val))556*4882a593Smuzhiyun static inline void __vringh_notify_disable(struct vringh *vrh,
557*4882a593Smuzhiyun int (*putu16)(const struct vringh *vrh,
558*4882a593Smuzhiyun __virtio16 *p, u16 val))
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun if (!vrh->event_indices) {
561*4882a593Smuzhiyun /* Old-school; update flags. */
562*4882a593Smuzhiyun if (putu16(vrh, &vrh->vring.used->flags,
563*4882a593Smuzhiyun VRING_USED_F_NO_NOTIFY)) {
564*4882a593Smuzhiyun vringh_bad("Setting used flags %p",
565*4882a593Smuzhiyun &vrh->vring.used->flags);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /* Userspace access helpers: in this case, addresses are really userspace. */
getu16_user(const struct vringh * vrh,u16 * val,const __virtio16 * p)571*4882a593Smuzhiyun static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun __virtio16 v = 0;
574*4882a593Smuzhiyun int rc = get_user(v, (__force __virtio16 __user *)p);
575*4882a593Smuzhiyun *val = vringh16_to_cpu(vrh, v);
576*4882a593Smuzhiyun return rc;
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
putu16_user(const struct vringh * vrh,__virtio16 * p,u16 val)579*4882a593Smuzhiyun static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun __virtio16 v = cpu_to_vringh16(vrh, val);
582*4882a593Smuzhiyun return put_user(v, (__force __virtio16 __user *)p);
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
copydesc_user(const struct vringh * vrh,void * dst,const void * src,size_t len)585*4882a593Smuzhiyun static inline int copydesc_user(const struct vringh *vrh,
586*4882a593Smuzhiyun void *dst, const void *src, size_t len)
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun return copy_from_user(dst, (__force void __user *)src, len) ?
589*4882a593Smuzhiyun -EFAULT : 0;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun
putused_user(const struct vringh * vrh,struct vring_used_elem * dst,const struct vring_used_elem * src,unsigned int num)592*4882a593Smuzhiyun static inline int putused_user(const struct vringh *vrh,
593*4882a593Smuzhiyun struct vring_used_elem *dst,
594*4882a593Smuzhiyun const struct vring_used_elem *src,
595*4882a593Smuzhiyun unsigned int num)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun return copy_to_user((__force void __user *)dst, src,
598*4882a593Smuzhiyun sizeof(*dst) * num) ? -EFAULT : 0;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
xfer_from_user(const struct vringh * vrh,void * src,void * dst,size_t len)601*4882a593Smuzhiyun static inline int xfer_from_user(const struct vringh *vrh, void *src,
602*4882a593Smuzhiyun void *dst, size_t len)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun return copy_from_user(dst, (__force void __user *)src, len) ?
605*4882a593Smuzhiyun -EFAULT : 0;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
xfer_to_user(const struct vringh * vrh,void * dst,void * src,size_t len)608*4882a593Smuzhiyun static inline int xfer_to_user(const struct vringh *vrh,
609*4882a593Smuzhiyun void *dst, void *src, size_t len)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun return copy_to_user((__force void __user *)dst, src, len) ?
612*4882a593Smuzhiyun -EFAULT : 0;
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /**
616*4882a593Smuzhiyun * vringh_init_user - initialize a vringh for a userspace vring.
617*4882a593Smuzhiyun * @vrh: the vringh to initialize.
618*4882a593Smuzhiyun * @features: the feature bits for this ring.
619*4882a593Smuzhiyun * @num: the number of elements.
620*4882a593Smuzhiyun * @weak_barriers: true if we only need memory barriers, not I/O.
621*4882a593Smuzhiyun * @desc: the userpace descriptor pointer.
622*4882a593Smuzhiyun * @avail: the userpace avail pointer.
623*4882a593Smuzhiyun * @used: the userpace used pointer.
624*4882a593Smuzhiyun *
625*4882a593Smuzhiyun * Returns an error if num is invalid: you should check pointers
626*4882a593Smuzhiyun * yourself!
627*4882a593Smuzhiyun */
vringh_init_user(struct vringh * vrh,u64 features,unsigned int num,bool weak_barriers,vring_desc_t __user * desc,vring_avail_t __user * avail,vring_used_t __user * used)628*4882a593Smuzhiyun int vringh_init_user(struct vringh *vrh, u64 features,
629*4882a593Smuzhiyun unsigned int num, bool weak_barriers,
630*4882a593Smuzhiyun vring_desc_t __user *desc,
631*4882a593Smuzhiyun vring_avail_t __user *avail,
632*4882a593Smuzhiyun vring_used_t __user *used)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun /* Sane power of 2 please! */
635*4882a593Smuzhiyun if (!num || num > 0xffff || (num & (num - 1))) {
636*4882a593Smuzhiyun vringh_bad("Bad ring size %u", num);
637*4882a593Smuzhiyun return -EINVAL;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
641*4882a593Smuzhiyun vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
642*4882a593Smuzhiyun vrh->weak_barriers = weak_barriers;
643*4882a593Smuzhiyun vrh->completed = 0;
644*4882a593Smuzhiyun vrh->last_avail_idx = 0;
645*4882a593Smuzhiyun vrh->last_used_idx = 0;
646*4882a593Smuzhiyun vrh->vring.num = num;
647*4882a593Smuzhiyun /* vring expects kernel addresses, but only used via accessors. */
648*4882a593Smuzhiyun vrh->vring.desc = (__force struct vring_desc *)desc;
649*4882a593Smuzhiyun vrh->vring.avail = (__force struct vring_avail *)avail;
650*4882a593Smuzhiyun vrh->vring.used = (__force struct vring_used *)used;
651*4882a593Smuzhiyun return 0;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_init_user);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /**
656*4882a593Smuzhiyun * vringh_getdesc_user - get next available descriptor from userspace ring.
657*4882a593Smuzhiyun * @vrh: the userspace vring.
658*4882a593Smuzhiyun * @riov: where to put the readable descriptors (or NULL)
659*4882a593Smuzhiyun * @wiov: where to put the writable descriptors (or NULL)
660*4882a593Smuzhiyun * @getrange: function to call to check ranges.
661*4882a593Smuzhiyun * @head: head index we received, for passing to vringh_complete_user().
662*4882a593Smuzhiyun *
663*4882a593Smuzhiyun * Returns 0 if there was no descriptor, 1 if there was, or -errno.
664*4882a593Smuzhiyun *
665*4882a593Smuzhiyun * Note that on error return, you can tell the difference between an
666*4882a593Smuzhiyun * invalid ring and a single invalid descriptor: in the former case,
667*4882a593Smuzhiyun * *head will be vrh->vring.num. You may be able to ignore an invalid
668*4882a593Smuzhiyun * descriptor, but there's not much you can do with an invalid ring.
669*4882a593Smuzhiyun *
670*4882a593Smuzhiyun * Note that you may need to clean up riov and wiov, even on error!
671*4882a593Smuzhiyun */
vringh_getdesc_user(struct vringh * vrh,struct vringh_iov * riov,struct vringh_iov * wiov,bool (* getrange)(struct vringh * vrh,u64 addr,struct vringh_range * r),u16 * head)672*4882a593Smuzhiyun int vringh_getdesc_user(struct vringh *vrh,
673*4882a593Smuzhiyun struct vringh_iov *riov,
674*4882a593Smuzhiyun struct vringh_iov *wiov,
675*4882a593Smuzhiyun bool (*getrange)(struct vringh *vrh,
676*4882a593Smuzhiyun u64 addr, struct vringh_range *r),
677*4882a593Smuzhiyun u16 *head)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun int err;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun *head = vrh->vring.num;
682*4882a593Smuzhiyun err = __vringh_get_head(vrh, getu16_user, &vrh->last_avail_idx);
683*4882a593Smuzhiyun if (err < 0)
684*4882a593Smuzhiyun return err;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun /* Empty... */
687*4882a593Smuzhiyun if (err == vrh->vring.num)
688*4882a593Smuzhiyun return 0;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* We need the layouts to be the identical for this to work */
691*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct vringh_kiov) != sizeof(struct vringh_iov));
692*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct vringh_kiov, iov) !=
693*4882a593Smuzhiyun offsetof(struct vringh_iov, iov));
694*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct vringh_kiov, i) !=
695*4882a593Smuzhiyun offsetof(struct vringh_iov, i));
696*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct vringh_kiov, used) !=
697*4882a593Smuzhiyun offsetof(struct vringh_iov, used));
698*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct vringh_kiov, max_num) !=
699*4882a593Smuzhiyun offsetof(struct vringh_iov, max_num));
700*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct iovec) != sizeof(struct kvec));
701*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct iovec, iov_base) !=
702*4882a593Smuzhiyun offsetof(struct kvec, iov_base));
703*4882a593Smuzhiyun BUILD_BUG_ON(offsetof(struct iovec, iov_len) !=
704*4882a593Smuzhiyun offsetof(struct kvec, iov_len));
705*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_base)
706*4882a593Smuzhiyun != sizeof(((struct kvec *)NULL)->iov_base));
707*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(((struct iovec *)NULL)->iov_len)
708*4882a593Smuzhiyun != sizeof(((struct kvec *)NULL)->iov_len));
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun *head = err;
711*4882a593Smuzhiyun err = __vringh_iov(vrh, *head, (struct vringh_kiov *)riov,
712*4882a593Smuzhiyun (struct vringh_kiov *)wiov,
713*4882a593Smuzhiyun range_check, getrange, GFP_KERNEL, copydesc_user);
714*4882a593Smuzhiyun if (err)
715*4882a593Smuzhiyun return err;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun return 1;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_getdesc_user);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /**
722*4882a593Smuzhiyun * vringh_iov_pull_user - copy bytes from vring_iov.
723*4882a593Smuzhiyun * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume)
724*4882a593Smuzhiyun * @dst: the place to copy.
725*4882a593Smuzhiyun * @len: the maximum length to copy.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
728*4882a593Smuzhiyun */
vringh_iov_pull_user(struct vringh_iov * riov,void * dst,size_t len)729*4882a593Smuzhiyun ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun return vringh_iov_xfer(NULL, (struct vringh_kiov *)riov,
732*4882a593Smuzhiyun dst, len, xfer_from_user);
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_pull_user);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /**
737*4882a593Smuzhiyun * vringh_iov_push_user - copy bytes into vring_iov.
738*4882a593Smuzhiyun * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume)
739*4882a593Smuzhiyun * @src: the place to copy from.
740*4882a593Smuzhiyun * @len: the maximum length to copy.
741*4882a593Smuzhiyun *
742*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
743*4882a593Smuzhiyun */
vringh_iov_push_user(struct vringh_iov * wiov,const void * src,size_t len)744*4882a593Smuzhiyun ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
745*4882a593Smuzhiyun const void *src, size_t len)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun return vringh_iov_xfer(NULL, (struct vringh_kiov *)wiov,
748*4882a593Smuzhiyun (void *)src, len, xfer_to_user);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_push_user);
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /**
753*4882a593Smuzhiyun * vringh_abandon_user - we've decided not to handle the descriptor(s).
754*4882a593Smuzhiyun * @vrh: the vring.
755*4882a593Smuzhiyun * @num: the number of descriptors to put back (ie. num
756*4882a593Smuzhiyun * vringh_get_user() to undo).
757*4882a593Smuzhiyun *
758*4882a593Smuzhiyun * The next vringh_get_user() will return the old descriptor(s) again.
759*4882a593Smuzhiyun */
vringh_abandon_user(struct vringh * vrh,unsigned int num)760*4882a593Smuzhiyun void vringh_abandon_user(struct vringh *vrh, unsigned int num)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun /* We only update vring_avail_event(vr) when we want to be notified,
763*4882a593Smuzhiyun * so we haven't changed that yet. */
764*4882a593Smuzhiyun vrh->last_avail_idx -= num;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_abandon_user);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun /**
769*4882a593Smuzhiyun * vringh_complete_user - we've finished with descriptor, publish it.
770*4882a593Smuzhiyun * @vrh: the vring.
771*4882a593Smuzhiyun * @head: the head as filled in by vringh_getdesc_user.
772*4882a593Smuzhiyun * @len: the length of data we have written.
773*4882a593Smuzhiyun *
774*4882a593Smuzhiyun * You should check vringh_need_notify_user() after one or more calls
775*4882a593Smuzhiyun * to this function.
776*4882a593Smuzhiyun */
vringh_complete_user(struct vringh * vrh,u16 head,u32 len)777*4882a593Smuzhiyun int vringh_complete_user(struct vringh *vrh, u16 head, u32 len)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun struct vring_used_elem used;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun used.id = cpu_to_vringh32(vrh, head);
782*4882a593Smuzhiyun used.len = cpu_to_vringh32(vrh, len);
783*4882a593Smuzhiyun return __vringh_complete(vrh, &used, 1, putu16_user, putused_user);
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_complete_user);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /**
788*4882a593Smuzhiyun * vringh_complete_multi_user - we've finished with many descriptors.
789*4882a593Smuzhiyun * @vrh: the vring.
790*4882a593Smuzhiyun * @used: the head, length pairs.
791*4882a593Smuzhiyun * @num_used: the number of used elements.
792*4882a593Smuzhiyun *
793*4882a593Smuzhiyun * You should check vringh_need_notify_user() after one or more calls
794*4882a593Smuzhiyun * to this function.
795*4882a593Smuzhiyun */
vringh_complete_multi_user(struct vringh * vrh,const struct vring_used_elem used[],unsigned num_used)796*4882a593Smuzhiyun int vringh_complete_multi_user(struct vringh *vrh,
797*4882a593Smuzhiyun const struct vring_used_elem used[],
798*4882a593Smuzhiyun unsigned num_used)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun return __vringh_complete(vrh, used, num_used,
801*4882a593Smuzhiyun putu16_user, putused_user);
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_complete_multi_user);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /**
806*4882a593Smuzhiyun * vringh_notify_enable_user - we want to know if something changes.
807*4882a593Smuzhiyun * @vrh: the vring.
808*4882a593Smuzhiyun *
809*4882a593Smuzhiyun * This always enables notifications, but returns false if there are
810*4882a593Smuzhiyun * now more buffers available in the vring.
811*4882a593Smuzhiyun */
vringh_notify_enable_user(struct vringh * vrh)812*4882a593Smuzhiyun bool vringh_notify_enable_user(struct vringh *vrh)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun return __vringh_notify_enable(vrh, getu16_user, putu16_user);
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_enable_user);
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun /**
819*4882a593Smuzhiyun * vringh_notify_disable_user - don't tell us if something changes.
820*4882a593Smuzhiyun * @vrh: the vring.
821*4882a593Smuzhiyun *
822*4882a593Smuzhiyun * This is our normal running state: we disable and then only enable when
823*4882a593Smuzhiyun * we're going to sleep.
824*4882a593Smuzhiyun */
vringh_notify_disable_user(struct vringh * vrh)825*4882a593Smuzhiyun void vringh_notify_disable_user(struct vringh *vrh)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun __vringh_notify_disable(vrh, putu16_user);
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_disable_user);
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun /**
832*4882a593Smuzhiyun * vringh_need_notify_user - must we tell the other side about used buffers?
833*4882a593Smuzhiyun * @vrh: the vring we've called vringh_complete_user() on.
834*4882a593Smuzhiyun *
835*4882a593Smuzhiyun * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
836*4882a593Smuzhiyun */
vringh_need_notify_user(struct vringh * vrh)837*4882a593Smuzhiyun int vringh_need_notify_user(struct vringh *vrh)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun return __vringh_need_notify(vrh, getu16_user);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_need_notify_user);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Kernelspace access helpers. */
getu16_kern(const struct vringh * vrh,u16 * val,const __virtio16 * p)844*4882a593Smuzhiyun static inline int getu16_kern(const struct vringh *vrh,
845*4882a593Smuzhiyun u16 *val, const __virtio16 *p)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun *val = vringh16_to_cpu(vrh, READ_ONCE(*p));
848*4882a593Smuzhiyun return 0;
849*4882a593Smuzhiyun }
850*4882a593Smuzhiyun
putu16_kern(const struct vringh * vrh,__virtio16 * p,u16 val)851*4882a593Smuzhiyun static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val)
852*4882a593Smuzhiyun {
853*4882a593Smuzhiyun WRITE_ONCE(*p, cpu_to_vringh16(vrh, val));
854*4882a593Smuzhiyun return 0;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun
copydesc_kern(const struct vringh * vrh,void * dst,const void * src,size_t len)857*4882a593Smuzhiyun static inline int copydesc_kern(const struct vringh *vrh,
858*4882a593Smuzhiyun void *dst, const void *src, size_t len)
859*4882a593Smuzhiyun {
860*4882a593Smuzhiyun memcpy(dst, src, len);
861*4882a593Smuzhiyun return 0;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
putused_kern(const struct vringh * vrh,struct vring_used_elem * dst,const struct vring_used_elem * src,unsigned int num)864*4882a593Smuzhiyun static inline int putused_kern(const struct vringh *vrh,
865*4882a593Smuzhiyun struct vring_used_elem *dst,
866*4882a593Smuzhiyun const struct vring_used_elem *src,
867*4882a593Smuzhiyun unsigned int num)
868*4882a593Smuzhiyun {
869*4882a593Smuzhiyun memcpy(dst, src, num * sizeof(*dst));
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
xfer_kern(const struct vringh * vrh,void * src,void * dst,size_t len)873*4882a593Smuzhiyun static inline int xfer_kern(const struct vringh *vrh, void *src,
874*4882a593Smuzhiyun void *dst, size_t len)
875*4882a593Smuzhiyun {
876*4882a593Smuzhiyun memcpy(dst, src, len);
877*4882a593Smuzhiyun return 0;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
kern_xfer(const struct vringh * vrh,void * dst,void * src,size_t len)880*4882a593Smuzhiyun static inline int kern_xfer(const struct vringh *vrh, void *dst,
881*4882a593Smuzhiyun void *src, size_t len)
882*4882a593Smuzhiyun {
883*4882a593Smuzhiyun memcpy(dst, src, len);
884*4882a593Smuzhiyun return 0;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /**
888*4882a593Smuzhiyun * vringh_init_kern - initialize a vringh for a kernelspace vring.
889*4882a593Smuzhiyun * @vrh: the vringh to initialize.
890*4882a593Smuzhiyun * @features: the feature bits for this ring.
891*4882a593Smuzhiyun * @num: the number of elements.
892*4882a593Smuzhiyun * @weak_barriers: true if we only need memory barriers, not I/O.
893*4882a593Smuzhiyun * @desc: the userpace descriptor pointer.
894*4882a593Smuzhiyun * @avail: the userpace avail pointer.
895*4882a593Smuzhiyun * @used: the userpace used pointer.
896*4882a593Smuzhiyun *
897*4882a593Smuzhiyun * Returns an error if num is invalid.
898*4882a593Smuzhiyun */
vringh_init_kern(struct vringh * vrh,u64 features,unsigned int num,bool weak_barriers,struct vring_desc * desc,struct vring_avail * avail,struct vring_used * used)899*4882a593Smuzhiyun int vringh_init_kern(struct vringh *vrh, u64 features,
900*4882a593Smuzhiyun unsigned int num, bool weak_barriers,
901*4882a593Smuzhiyun struct vring_desc *desc,
902*4882a593Smuzhiyun struct vring_avail *avail,
903*4882a593Smuzhiyun struct vring_used *used)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun /* Sane power of 2 please! */
906*4882a593Smuzhiyun if (!num || num > 0xffff || (num & (num - 1))) {
907*4882a593Smuzhiyun vringh_bad("Bad ring size %u", num);
908*4882a593Smuzhiyun return -EINVAL;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun vrh->little_endian = (features & (1ULL << VIRTIO_F_VERSION_1));
912*4882a593Smuzhiyun vrh->event_indices = (features & (1 << VIRTIO_RING_F_EVENT_IDX));
913*4882a593Smuzhiyun vrh->weak_barriers = weak_barriers;
914*4882a593Smuzhiyun vrh->completed = 0;
915*4882a593Smuzhiyun vrh->last_avail_idx = 0;
916*4882a593Smuzhiyun vrh->last_used_idx = 0;
917*4882a593Smuzhiyun vrh->vring.num = num;
918*4882a593Smuzhiyun vrh->vring.desc = desc;
919*4882a593Smuzhiyun vrh->vring.avail = avail;
920*4882a593Smuzhiyun vrh->vring.used = used;
921*4882a593Smuzhiyun return 0;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_init_kern);
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /**
926*4882a593Smuzhiyun * vringh_getdesc_kern - get next available descriptor from kernelspace ring.
927*4882a593Smuzhiyun * @vrh: the kernelspace vring.
928*4882a593Smuzhiyun * @riov: where to put the readable descriptors (or NULL)
929*4882a593Smuzhiyun * @wiov: where to put the writable descriptors (or NULL)
930*4882a593Smuzhiyun * @head: head index we received, for passing to vringh_complete_kern().
931*4882a593Smuzhiyun * @gfp: flags for allocating larger riov/wiov.
932*4882a593Smuzhiyun *
933*4882a593Smuzhiyun * Returns 0 if there was no descriptor, 1 if there was, or -errno.
934*4882a593Smuzhiyun *
935*4882a593Smuzhiyun * Note that on error return, you can tell the difference between an
936*4882a593Smuzhiyun * invalid ring and a single invalid descriptor: in the former case,
937*4882a593Smuzhiyun * *head will be vrh->vring.num. You may be able to ignore an invalid
938*4882a593Smuzhiyun * descriptor, but there's not much you can do with an invalid ring.
939*4882a593Smuzhiyun *
940*4882a593Smuzhiyun * Note that you may need to clean up riov and wiov, even on error!
941*4882a593Smuzhiyun */
vringh_getdesc_kern(struct vringh * vrh,struct vringh_kiov * riov,struct vringh_kiov * wiov,u16 * head,gfp_t gfp)942*4882a593Smuzhiyun int vringh_getdesc_kern(struct vringh *vrh,
943*4882a593Smuzhiyun struct vringh_kiov *riov,
944*4882a593Smuzhiyun struct vringh_kiov *wiov,
945*4882a593Smuzhiyun u16 *head,
946*4882a593Smuzhiyun gfp_t gfp)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun int err;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun err = __vringh_get_head(vrh, getu16_kern, &vrh->last_avail_idx);
951*4882a593Smuzhiyun if (err < 0)
952*4882a593Smuzhiyun return err;
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun /* Empty... */
955*4882a593Smuzhiyun if (err == vrh->vring.num)
956*4882a593Smuzhiyun return 0;
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun *head = err;
959*4882a593Smuzhiyun err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
960*4882a593Smuzhiyun gfp, copydesc_kern);
961*4882a593Smuzhiyun if (err)
962*4882a593Smuzhiyun return err;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun return 1;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_getdesc_kern);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /**
969*4882a593Smuzhiyun * vringh_iov_pull_kern - copy bytes from vring_iov.
970*4882a593Smuzhiyun * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume)
971*4882a593Smuzhiyun * @dst: the place to copy.
972*4882a593Smuzhiyun * @len: the maximum length to copy.
973*4882a593Smuzhiyun *
974*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
975*4882a593Smuzhiyun */
vringh_iov_pull_kern(struct vringh_kiov * riov,void * dst,size_t len)976*4882a593Smuzhiyun ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len)
977*4882a593Smuzhiyun {
978*4882a593Smuzhiyun return vringh_iov_xfer(NULL, riov, dst, len, xfer_kern);
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_pull_kern);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /**
983*4882a593Smuzhiyun * vringh_iov_push_kern - copy bytes into vring_iov.
984*4882a593Smuzhiyun * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume)
985*4882a593Smuzhiyun * @src: the place to copy from.
986*4882a593Smuzhiyun * @len: the maximum length to copy.
987*4882a593Smuzhiyun *
988*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
989*4882a593Smuzhiyun */
vringh_iov_push_kern(struct vringh_kiov * wiov,const void * src,size_t len)990*4882a593Smuzhiyun ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
991*4882a593Smuzhiyun const void *src, size_t len)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun return vringh_iov_xfer(NULL, wiov, (void *)src, len, kern_xfer);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_push_kern);
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun /**
998*4882a593Smuzhiyun * vringh_abandon_kern - we've decided not to handle the descriptor(s).
999*4882a593Smuzhiyun * @vrh: the vring.
1000*4882a593Smuzhiyun * @num: the number of descriptors to put back (ie. num
1001*4882a593Smuzhiyun * vringh_get_kern() to undo).
1002*4882a593Smuzhiyun *
1003*4882a593Smuzhiyun * The next vringh_get_kern() will return the old descriptor(s) again.
1004*4882a593Smuzhiyun */
vringh_abandon_kern(struct vringh * vrh,unsigned int num)1005*4882a593Smuzhiyun void vringh_abandon_kern(struct vringh *vrh, unsigned int num)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun /* We only update vring_avail_event(vr) when we want to be notified,
1008*4882a593Smuzhiyun * so we haven't changed that yet. */
1009*4882a593Smuzhiyun vrh->last_avail_idx -= num;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_abandon_kern);
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /**
1014*4882a593Smuzhiyun * vringh_complete_kern - we've finished with descriptor, publish it.
1015*4882a593Smuzhiyun * @vrh: the vring.
1016*4882a593Smuzhiyun * @head: the head as filled in by vringh_getdesc_kern.
1017*4882a593Smuzhiyun * @len: the length of data we have written.
1018*4882a593Smuzhiyun *
1019*4882a593Smuzhiyun * You should check vringh_need_notify_kern() after one or more calls
1020*4882a593Smuzhiyun * to this function.
1021*4882a593Smuzhiyun */
vringh_complete_kern(struct vringh * vrh,u16 head,u32 len)1022*4882a593Smuzhiyun int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun struct vring_used_elem used;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun used.id = cpu_to_vringh32(vrh, head);
1027*4882a593Smuzhiyun used.len = cpu_to_vringh32(vrh, len);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun return __vringh_complete(vrh, &used, 1, putu16_kern, putused_kern);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_complete_kern);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun /**
1034*4882a593Smuzhiyun * vringh_notify_enable_kern - we want to know if something changes.
1035*4882a593Smuzhiyun * @vrh: the vring.
1036*4882a593Smuzhiyun *
1037*4882a593Smuzhiyun * This always enables notifications, but returns false if there are
1038*4882a593Smuzhiyun * now more buffers available in the vring.
1039*4882a593Smuzhiyun */
vringh_notify_enable_kern(struct vringh * vrh)1040*4882a593Smuzhiyun bool vringh_notify_enable_kern(struct vringh *vrh)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun return __vringh_notify_enable(vrh, getu16_kern, putu16_kern);
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_enable_kern);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /**
1047*4882a593Smuzhiyun * vringh_notify_disable_kern - don't tell us if something changes.
1048*4882a593Smuzhiyun * @vrh: the vring.
1049*4882a593Smuzhiyun *
1050*4882a593Smuzhiyun * This is our normal running state: we disable and then only enable when
1051*4882a593Smuzhiyun * we're going to sleep.
1052*4882a593Smuzhiyun */
vringh_notify_disable_kern(struct vringh * vrh)1053*4882a593Smuzhiyun void vringh_notify_disable_kern(struct vringh *vrh)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun __vringh_notify_disable(vrh, putu16_kern);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_disable_kern);
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun /**
1060*4882a593Smuzhiyun * vringh_need_notify_kern - must we tell the other side about used buffers?
1061*4882a593Smuzhiyun * @vrh: the vring we've called vringh_complete_kern() on.
1062*4882a593Smuzhiyun *
1063*4882a593Smuzhiyun * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1064*4882a593Smuzhiyun */
vringh_need_notify_kern(struct vringh * vrh)1065*4882a593Smuzhiyun int vringh_need_notify_kern(struct vringh *vrh)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun return __vringh_need_notify(vrh, getu16_kern);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_need_notify_kern);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
1072*4882a593Smuzhiyun
iotlb_translate(const struct vringh * vrh,u64 addr,u64 len,struct bio_vec iov[],int iov_size,u32 perm)1073*4882a593Smuzhiyun static int iotlb_translate(const struct vringh *vrh,
1074*4882a593Smuzhiyun u64 addr, u64 len, struct bio_vec iov[],
1075*4882a593Smuzhiyun int iov_size, u32 perm)
1076*4882a593Smuzhiyun {
1077*4882a593Smuzhiyun struct vhost_iotlb_map *map;
1078*4882a593Smuzhiyun struct vhost_iotlb *iotlb = vrh->iotlb;
1079*4882a593Smuzhiyun int ret = 0;
1080*4882a593Smuzhiyun u64 s = 0;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun while (len > s) {
1083*4882a593Smuzhiyun u64 size, pa, pfn;
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun if (unlikely(ret >= iov_size)) {
1086*4882a593Smuzhiyun ret = -ENOBUFS;
1087*4882a593Smuzhiyun break;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun map = vhost_iotlb_itree_first(iotlb, addr,
1091*4882a593Smuzhiyun addr + len - 1);
1092*4882a593Smuzhiyun if (!map || map->start > addr) {
1093*4882a593Smuzhiyun ret = -EINVAL;
1094*4882a593Smuzhiyun break;
1095*4882a593Smuzhiyun } else if (!(map->perm & perm)) {
1096*4882a593Smuzhiyun ret = -EPERM;
1097*4882a593Smuzhiyun break;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun size = map->size - addr + map->start;
1101*4882a593Smuzhiyun pa = map->addr + addr - map->start;
1102*4882a593Smuzhiyun pfn = pa >> PAGE_SHIFT;
1103*4882a593Smuzhiyun iov[ret].bv_page = pfn_to_page(pfn);
1104*4882a593Smuzhiyun iov[ret].bv_len = min(len - s, size);
1105*4882a593Smuzhiyun iov[ret].bv_offset = pa & (PAGE_SIZE - 1);
1106*4882a593Smuzhiyun s += size;
1107*4882a593Smuzhiyun addr += size;
1108*4882a593Smuzhiyun ++ret;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun return ret;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun
copy_from_iotlb(const struct vringh * vrh,void * dst,void * src,size_t len)1114*4882a593Smuzhiyun static inline int copy_from_iotlb(const struct vringh *vrh, void *dst,
1115*4882a593Smuzhiyun void *src, size_t len)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun struct iov_iter iter;
1118*4882a593Smuzhiyun struct bio_vec iov[16];
1119*4882a593Smuzhiyun int ret;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun ret = iotlb_translate(vrh, (u64)(uintptr_t)src,
1122*4882a593Smuzhiyun len, iov, 16, VHOST_MAP_RO);
1123*4882a593Smuzhiyun if (ret < 0)
1124*4882a593Smuzhiyun return ret;
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun iov_iter_bvec(&iter, READ, iov, ret, len);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun ret = copy_from_iter(dst, len, &iter);
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun return ret;
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
copy_to_iotlb(const struct vringh * vrh,void * dst,void * src,size_t len)1133*4882a593Smuzhiyun static inline int copy_to_iotlb(const struct vringh *vrh, void *dst,
1134*4882a593Smuzhiyun void *src, size_t len)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun struct iov_iter iter;
1137*4882a593Smuzhiyun struct bio_vec iov[16];
1138*4882a593Smuzhiyun int ret;
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun ret = iotlb_translate(vrh, (u64)(uintptr_t)dst,
1141*4882a593Smuzhiyun len, iov, 16, VHOST_MAP_WO);
1142*4882a593Smuzhiyun if (ret < 0)
1143*4882a593Smuzhiyun return ret;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun iov_iter_bvec(&iter, WRITE, iov, ret, len);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun return copy_to_iter(src, len, &iter);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun
getu16_iotlb(const struct vringh * vrh,u16 * val,const __virtio16 * p)1150*4882a593Smuzhiyun static inline int getu16_iotlb(const struct vringh *vrh,
1151*4882a593Smuzhiyun u16 *val, const __virtio16 *p)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun struct bio_vec iov;
1154*4882a593Smuzhiyun void *kaddr, *from;
1155*4882a593Smuzhiyun int ret;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /* Atomic read is needed for getu16 */
1158*4882a593Smuzhiyun ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1159*4882a593Smuzhiyun &iov, 1, VHOST_MAP_RO);
1160*4882a593Smuzhiyun if (ret < 0)
1161*4882a593Smuzhiyun return ret;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun kaddr = kmap_atomic(iov.bv_page);
1164*4882a593Smuzhiyun from = kaddr + iov.bv_offset;
1165*4882a593Smuzhiyun *val = vringh16_to_cpu(vrh, READ_ONCE(*(__virtio16 *)from));
1166*4882a593Smuzhiyun kunmap_atomic(kaddr);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun return 0;
1169*4882a593Smuzhiyun }
1170*4882a593Smuzhiyun
putu16_iotlb(const struct vringh * vrh,__virtio16 * p,u16 val)1171*4882a593Smuzhiyun static inline int putu16_iotlb(const struct vringh *vrh,
1172*4882a593Smuzhiyun __virtio16 *p, u16 val)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun struct bio_vec iov;
1175*4882a593Smuzhiyun void *kaddr, *to;
1176*4882a593Smuzhiyun int ret;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun /* Atomic write is needed for putu16 */
1179*4882a593Smuzhiyun ret = iotlb_translate(vrh, (u64)(uintptr_t)p, sizeof(*p),
1180*4882a593Smuzhiyun &iov, 1, VHOST_MAP_WO);
1181*4882a593Smuzhiyun if (ret < 0)
1182*4882a593Smuzhiyun return ret;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun kaddr = kmap_atomic(iov.bv_page);
1185*4882a593Smuzhiyun to = kaddr + iov.bv_offset;
1186*4882a593Smuzhiyun WRITE_ONCE(*(__virtio16 *)to, cpu_to_vringh16(vrh, val));
1187*4882a593Smuzhiyun kunmap_atomic(kaddr);
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun return 0;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
copydesc_iotlb(const struct vringh * vrh,void * dst,const void * src,size_t len)1192*4882a593Smuzhiyun static inline int copydesc_iotlb(const struct vringh *vrh,
1193*4882a593Smuzhiyun void *dst, const void *src, size_t len)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun int ret;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun ret = copy_from_iotlb(vrh, dst, (void *)src, len);
1198*4882a593Smuzhiyun if (ret != len)
1199*4882a593Smuzhiyun return -EFAULT;
1200*4882a593Smuzhiyun
1201*4882a593Smuzhiyun return 0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun
xfer_from_iotlb(const struct vringh * vrh,void * src,void * dst,size_t len)1204*4882a593Smuzhiyun static inline int xfer_from_iotlb(const struct vringh *vrh, void *src,
1205*4882a593Smuzhiyun void *dst, size_t len)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun int ret;
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun ret = copy_from_iotlb(vrh, dst, src, len);
1210*4882a593Smuzhiyun if (ret != len)
1211*4882a593Smuzhiyun return -EFAULT;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun return 0;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
xfer_to_iotlb(const struct vringh * vrh,void * dst,void * src,size_t len)1216*4882a593Smuzhiyun static inline int xfer_to_iotlb(const struct vringh *vrh,
1217*4882a593Smuzhiyun void *dst, void *src, size_t len)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun int ret;
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun ret = copy_to_iotlb(vrh, dst, src, len);
1222*4882a593Smuzhiyun if (ret != len)
1223*4882a593Smuzhiyun return -EFAULT;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun return 0;
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun
putused_iotlb(const struct vringh * vrh,struct vring_used_elem * dst,const struct vring_used_elem * src,unsigned int num)1228*4882a593Smuzhiyun static inline int putused_iotlb(const struct vringh *vrh,
1229*4882a593Smuzhiyun struct vring_used_elem *dst,
1230*4882a593Smuzhiyun const struct vring_used_elem *src,
1231*4882a593Smuzhiyun unsigned int num)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun int size = num * sizeof(*dst);
1234*4882a593Smuzhiyun int ret;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun ret = copy_to_iotlb(vrh, dst, (void *)src, num * sizeof(*dst));
1237*4882a593Smuzhiyun if (ret != size)
1238*4882a593Smuzhiyun return -EFAULT;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun return 0;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun /**
1244*4882a593Smuzhiyun * vringh_init_iotlb - initialize a vringh for a ring with IOTLB.
1245*4882a593Smuzhiyun * @vrh: the vringh to initialize.
1246*4882a593Smuzhiyun * @features: the feature bits for this ring.
1247*4882a593Smuzhiyun * @num: the number of elements.
1248*4882a593Smuzhiyun * @weak_barriers: true if we only need memory barriers, not I/O.
1249*4882a593Smuzhiyun * @desc: the userpace descriptor pointer.
1250*4882a593Smuzhiyun * @avail: the userpace avail pointer.
1251*4882a593Smuzhiyun * @used: the userpace used pointer.
1252*4882a593Smuzhiyun *
1253*4882a593Smuzhiyun * Returns an error if num is invalid.
1254*4882a593Smuzhiyun */
vringh_init_iotlb(struct vringh * vrh,u64 features,unsigned int num,bool weak_barriers,struct vring_desc * desc,struct vring_avail * avail,struct vring_used * used)1255*4882a593Smuzhiyun int vringh_init_iotlb(struct vringh *vrh, u64 features,
1256*4882a593Smuzhiyun unsigned int num, bool weak_barriers,
1257*4882a593Smuzhiyun struct vring_desc *desc,
1258*4882a593Smuzhiyun struct vring_avail *avail,
1259*4882a593Smuzhiyun struct vring_used *used)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun return vringh_init_kern(vrh, features, num, weak_barriers,
1262*4882a593Smuzhiyun desc, avail, used);
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_init_iotlb);
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun /**
1267*4882a593Smuzhiyun * vringh_set_iotlb - initialize a vringh for a ring with IOTLB.
1268*4882a593Smuzhiyun * @vrh: the vring
1269*4882a593Smuzhiyun * @iotlb: iotlb associated with this vring
1270*4882a593Smuzhiyun */
vringh_set_iotlb(struct vringh * vrh,struct vhost_iotlb * iotlb)1271*4882a593Smuzhiyun void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb)
1272*4882a593Smuzhiyun {
1273*4882a593Smuzhiyun vrh->iotlb = iotlb;
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_set_iotlb);
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun /**
1278*4882a593Smuzhiyun * vringh_getdesc_iotlb - get next available descriptor from ring with
1279*4882a593Smuzhiyun * IOTLB.
1280*4882a593Smuzhiyun * @vrh: the kernelspace vring.
1281*4882a593Smuzhiyun * @riov: where to put the readable descriptors (or NULL)
1282*4882a593Smuzhiyun * @wiov: where to put the writable descriptors (or NULL)
1283*4882a593Smuzhiyun * @head: head index we received, for passing to vringh_complete_iotlb().
1284*4882a593Smuzhiyun * @gfp: flags for allocating larger riov/wiov.
1285*4882a593Smuzhiyun *
1286*4882a593Smuzhiyun * Returns 0 if there was no descriptor, 1 if there was, or -errno.
1287*4882a593Smuzhiyun *
1288*4882a593Smuzhiyun * Note that on error return, you can tell the difference between an
1289*4882a593Smuzhiyun * invalid ring and a single invalid descriptor: in the former case,
1290*4882a593Smuzhiyun * *head will be vrh->vring.num. You may be able to ignore an invalid
1291*4882a593Smuzhiyun * descriptor, but there's not much you can do with an invalid ring.
1292*4882a593Smuzhiyun *
1293*4882a593Smuzhiyun * Note that you may need to clean up riov and wiov, even on error!
1294*4882a593Smuzhiyun */
vringh_getdesc_iotlb(struct vringh * vrh,struct vringh_kiov * riov,struct vringh_kiov * wiov,u16 * head,gfp_t gfp)1295*4882a593Smuzhiyun int vringh_getdesc_iotlb(struct vringh *vrh,
1296*4882a593Smuzhiyun struct vringh_kiov *riov,
1297*4882a593Smuzhiyun struct vringh_kiov *wiov,
1298*4882a593Smuzhiyun u16 *head,
1299*4882a593Smuzhiyun gfp_t gfp)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun int err;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun err = __vringh_get_head(vrh, getu16_iotlb, &vrh->last_avail_idx);
1304*4882a593Smuzhiyun if (err < 0)
1305*4882a593Smuzhiyun return err;
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /* Empty... */
1308*4882a593Smuzhiyun if (err == vrh->vring.num)
1309*4882a593Smuzhiyun return 0;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun *head = err;
1312*4882a593Smuzhiyun err = __vringh_iov(vrh, *head, riov, wiov, no_range_check, NULL,
1313*4882a593Smuzhiyun gfp, copydesc_iotlb);
1314*4882a593Smuzhiyun if (err)
1315*4882a593Smuzhiyun return err;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun return 1;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_getdesc_iotlb);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun /**
1322*4882a593Smuzhiyun * vringh_iov_pull_iotlb - copy bytes from vring_iov.
1323*4882a593Smuzhiyun * @vrh: the vring.
1324*4882a593Smuzhiyun * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume)
1325*4882a593Smuzhiyun * @dst: the place to copy.
1326*4882a593Smuzhiyun * @len: the maximum length to copy.
1327*4882a593Smuzhiyun *
1328*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
1329*4882a593Smuzhiyun */
vringh_iov_pull_iotlb(struct vringh * vrh,struct vringh_kiov * riov,void * dst,size_t len)1330*4882a593Smuzhiyun ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
1331*4882a593Smuzhiyun struct vringh_kiov *riov,
1332*4882a593Smuzhiyun void *dst, size_t len)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun return vringh_iov_xfer(vrh, riov, dst, len, xfer_from_iotlb);
1335*4882a593Smuzhiyun }
1336*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_pull_iotlb);
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun /**
1339*4882a593Smuzhiyun * vringh_iov_push_iotlb - copy bytes into vring_iov.
1340*4882a593Smuzhiyun * @vrh: the vring.
1341*4882a593Smuzhiyun * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume)
1342*4882a593Smuzhiyun * @src: the place to copy from.
1343*4882a593Smuzhiyun * @len: the maximum length to copy.
1344*4882a593Smuzhiyun *
1345*4882a593Smuzhiyun * Returns the bytes copied <= len or a negative errno.
1346*4882a593Smuzhiyun */
vringh_iov_push_iotlb(struct vringh * vrh,struct vringh_kiov * wiov,const void * src,size_t len)1347*4882a593Smuzhiyun ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
1348*4882a593Smuzhiyun struct vringh_kiov *wiov,
1349*4882a593Smuzhiyun const void *src, size_t len)
1350*4882a593Smuzhiyun {
1351*4882a593Smuzhiyun return vringh_iov_xfer(vrh, wiov, (void *)src, len, xfer_to_iotlb);
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_iov_push_iotlb);
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun /**
1356*4882a593Smuzhiyun * vringh_abandon_iotlb - we've decided not to handle the descriptor(s).
1357*4882a593Smuzhiyun * @vrh: the vring.
1358*4882a593Smuzhiyun * @num: the number of descriptors to put back (ie. num
1359*4882a593Smuzhiyun * vringh_get_iotlb() to undo).
1360*4882a593Smuzhiyun *
1361*4882a593Smuzhiyun * The next vringh_get_iotlb() will return the old descriptor(s) again.
1362*4882a593Smuzhiyun */
vringh_abandon_iotlb(struct vringh * vrh,unsigned int num)1363*4882a593Smuzhiyun void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num)
1364*4882a593Smuzhiyun {
1365*4882a593Smuzhiyun /* We only update vring_avail_event(vr) when we want to be notified,
1366*4882a593Smuzhiyun * so we haven't changed that yet.
1367*4882a593Smuzhiyun */
1368*4882a593Smuzhiyun vrh->last_avail_idx -= num;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_abandon_iotlb);
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun /**
1373*4882a593Smuzhiyun * vringh_complete_iotlb - we've finished with descriptor, publish it.
1374*4882a593Smuzhiyun * @vrh: the vring.
1375*4882a593Smuzhiyun * @head: the head as filled in by vringh_getdesc_iotlb.
1376*4882a593Smuzhiyun * @len: the length of data we have written.
1377*4882a593Smuzhiyun *
1378*4882a593Smuzhiyun * You should check vringh_need_notify_iotlb() after one or more calls
1379*4882a593Smuzhiyun * to this function.
1380*4882a593Smuzhiyun */
vringh_complete_iotlb(struct vringh * vrh,u16 head,u32 len)1381*4882a593Smuzhiyun int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len)
1382*4882a593Smuzhiyun {
1383*4882a593Smuzhiyun struct vring_used_elem used;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun used.id = cpu_to_vringh32(vrh, head);
1386*4882a593Smuzhiyun used.len = cpu_to_vringh32(vrh, len);
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun return __vringh_complete(vrh, &used, 1, putu16_iotlb, putused_iotlb);
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_complete_iotlb);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun /**
1393*4882a593Smuzhiyun * vringh_notify_enable_iotlb - we want to know if something changes.
1394*4882a593Smuzhiyun * @vrh: the vring.
1395*4882a593Smuzhiyun *
1396*4882a593Smuzhiyun * This always enables notifications, but returns false if there are
1397*4882a593Smuzhiyun * now more buffers available in the vring.
1398*4882a593Smuzhiyun */
vringh_notify_enable_iotlb(struct vringh * vrh)1399*4882a593Smuzhiyun bool vringh_notify_enable_iotlb(struct vringh *vrh)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun return __vringh_notify_enable(vrh, getu16_iotlb, putu16_iotlb);
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_enable_iotlb);
1404*4882a593Smuzhiyun
1405*4882a593Smuzhiyun /**
1406*4882a593Smuzhiyun * vringh_notify_disable_iotlb - don't tell us if something changes.
1407*4882a593Smuzhiyun * @vrh: the vring.
1408*4882a593Smuzhiyun *
1409*4882a593Smuzhiyun * This is our normal running state: we disable and then only enable when
1410*4882a593Smuzhiyun * we're going to sleep.
1411*4882a593Smuzhiyun */
vringh_notify_disable_iotlb(struct vringh * vrh)1412*4882a593Smuzhiyun void vringh_notify_disable_iotlb(struct vringh *vrh)
1413*4882a593Smuzhiyun {
1414*4882a593Smuzhiyun __vringh_notify_disable(vrh, putu16_iotlb);
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_notify_disable_iotlb);
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun /**
1419*4882a593Smuzhiyun * vringh_need_notify_iotlb - must we tell the other side about used buffers?
1420*4882a593Smuzhiyun * @vrh: the vring we've called vringh_complete_iotlb() on.
1421*4882a593Smuzhiyun *
1422*4882a593Smuzhiyun * Returns -errno or 0 if we don't need to tell the other side, 1 if we do.
1423*4882a593Smuzhiyun */
vringh_need_notify_iotlb(struct vringh * vrh)1424*4882a593Smuzhiyun int vringh_need_notify_iotlb(struct vringh *vrh)
1425*4882a593Smuzhiyun {
1426*4882a593Smuzhiyun return __vringh_need_notify(vrh, getu16_iotlb);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun EXPORT_SYMBOL(vringh_need_notify_iotlb);
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun #endif
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1433