xref: /OK3568_Linux_fs/kernel/include/linux/vringh.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Linux host-side vring helpers; for when the kernel needs to access
4*4882a593Smuzhiyun  * someone else's vring.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Copyright IBM Corporation, 2013.
7*4882a593Smuzhiyun  * Parts taken from drivers/vhost/vhost.c Copyright 2009 Red Hat, Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Written by: Rusty Russell <rusty@rustcorp.com.au>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #ifndef _LINUX_VRINGH_H
12*4882a593Smuzhiyun #define _LINUX_VRINGH_H
13*4882a593Smuzhiyun #include <uapi/linux/virtio_ring.h>
14*4882a593Smuzhiyun #include <linux/virtio_byteorder.h>
15*4882a593Smuzhiyun #include <linux/uio.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
18*4882a593Smuzhiyun #include <linux/dma-direction.h>
19*4882a593Smuzhiyun #include <linux/vhost_iotlb.h>
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun #include <asm/barrier.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* virtio_ring with information needed for host access. */
24*4882a593Smuzhiyun struct vringh {
25*4882a593Smuzhiyun 	/* Everything is little endian */
26*4882a593Smuzhiyun 	bool little_endian;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* Guest publishes used event idx (note: we always do). */
29*4882a593Smuzhiyun 	bool event_indices;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	/* Can we get away with weak barriers? */
32*4882a593Smuzhiyun 	bool weak_barriers;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	/* Last available index we saw (ie. where we're up to). */
35*4882a593Smuzhiyun 	u16 last_avail_idx;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* Last index we used. */
38*4882a593Smuzhiyun 	u16 last_used_idx;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/* How many descriptors we've completed since last need_notify(). */
41*4882a593Smuzhiyun 	u32 completed;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	/* The vring (note: it may contain user pointers!) */
44*4882a593Smuzhiyun 	struct vring vring;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	/* IOTLB for this vring */
47*4882a593Smuzhiyun 	struct vhost_iotlb *iotlb;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* The function to call to notify the guest about added buffers */
50*4882a593Smuzhiyun 	void (*notify)(struct vringh *);
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /**
54*4882a593Smuzhiyun  * struct vringh_config_ops - ops for creating a host vring from a virtio driver
55*4882a593Smuzhiyun  * @find_vrhs: find the host vrings and instantiate them
56*4882a593Smuzhiyun  *	vdev: the virtio_device
57*4882a593Smuzhiyun  *	nhvrs: the number of host vrings to find
58*4882a593Smuzhiyun  *	hvrs: on success, includes new host vrings
59*4882a593Smuzhiyun  *	callbacks: array of driver callbacks, for each host vring
60*4882a593Smuzhiyun  *		include a NULL entry for vqs that do not need a callback
61*4882a593Smuzhiyun  *	Returns 0 on success or error status
62*4882a593Smuzhiyun  * @del_vrhs: free the host vrings found by find_vrhs().
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun struct virtio_device;
65*4882a593Smuzhiyun typedef void vrh_callback_t(struct virtio_device *, struct vringh *);
66*4882a593Smuzhiyun struct vringh_config_ops {
67*4882a593Smuzhiyun 	int (*find_vrhs)(struct virtio_device *vdev, unsigned nhvrs,
68*4882a593Smuzhiyun 			 struct vringh *vrhs[], vrh_callback_t *callbacks[]);
69*4882a593Smuzhiyun 	void (*del_vrhs)(struct virtio_device *vdev);
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* The memory the vring can access, and what offset to apply. */
73*4882a593Smuzhiyun struct vringh_range {
74*4882a593Smuzhiyun 	u64 start, end_incl;
75*4882a593Smuzhiyun 	u64 offset;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /**
79*4882a593Smuzhiyun  * struct vringh_iov - iovec mangler.
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Mangles iovec in place, and restores it.
82*4882a593Smuzhiyun  * Remaining data is iov + i, of used - i elements.
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun struct vringh_iov {
85*4882a593Smuzhiyun 	struct iovec *iov;
86*4882a593Smuzhiyun 	size_t consumed; /* Within iov[i] */
87*4882a593Smuzhiyun 	unsigned i, used, max_num;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * struct vringh_iov - kvec mangler.
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  * Mangles kvec in place, and restores it.
94*4882a593Smuzhiyun  * Remaining data is iov + i, of used - i elements.
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun struct vringh_kiov {
97*4882a593Smuzhiyun 	struct kvec *iov;
98*4882a593Smuzhiyun 	size_t consumed; /* Within iov[i] */
99*4882a593Smuzhiyun 	unsigned i, used, max_num;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* Flag on max_num to indicate we're kmalloced. */
103*4882a593Smuzhiyun #define VRINGH_IOV_ALLOCATED 0x8000000
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Helpers for userspace vrings. */
106*4882a593Smuzhiyun int vringh_init_user(struct vringh *vrh, u64 features,
107*4882a593Smuzhiyun 		     unsigned int num, bool weak_barriers,
108*4882a593Smuzhiyun 		     vring_desc_t __user *desc,
109*4882a593Smuzhiyun 		     vring_avail_t __user *avail,
110*4882a593Smuzhiyun 		     vring_used_t __user *used);
111*4882a593Smuzhiyun 
vringh_iov_init(struct vringh_iov * iov,struct iovec * iovec,unsigned num)112*4882a593Smuzhiyun static inline void vringh_iov_init(struct vringh_iov *iov,
113*4882a593Smuzhiyun 				   struct iovec *iovec, unsigned num)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	iov->used = iov->i = 0;
116*4882a593Smuzhiyun 	iov->consumed = 0;
117*4882a593Smuzhiyun 	iov->max_num = num;
118*4882a593Smuzhiyun 	iov->iov = iovec;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
vringh_iov_reset(struct vringh_iov * iov)121*4882a593Smuzhiyun static inline void vringh_iov_reset(struct vringh_iov *iov)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	iov->iov[iov->i].iov_len += iov->consumed;
124*4882a593Smuzhiyun 	iov->iov[iov->i].iov_base -= iov->consumed;
125*4882a593Smuzhiyun 	iov->consumed = 0;
126*4882a593Smuzhiyun 	iov->i = 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
vringh_iov_cleanup(struct vringh_iov * iov)129*4882a593Smuzhiyun static inline void vringh_iov_cleanup(struct vringh_iov *iov)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	if (iov->max_num & VRINGH_IOV_ALLOCATED)
132*4882a593Smuzhiyun 		kfree(iov->iov);
133*4882a593Smuzhiyun 	iov->max_num = iov->used = iov->i = iov->consumed = 0;
134*4882a593Smuzhiyun 	iov->iov = NULL;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /* Convert a descriptor into iovecs. */
138*4882a593Smuzhiyun int vringh_getdesc_user(struct vringh *vrh,
139*4882a593Smuzhiyun 			struct vringh_iov *riov,
140*4882a593Smuzhiyun 			struct vringh_iov *wiov,
141*4882a593Smuzhiyun 			bool (*getrange)(struct vringh *vrh,
142*4882a593Smuzhiyun 					 u64 addr, struct vringh_range *r),
143*4882a593Smuzhiyun 			u16 *head);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* Copy bytes from readable vsg, consuming it (and incrementing wiov->i). */
146*4882a593Smuzhiyun ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len);
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* Copy bytes into writable vsg, consuming it (and incrementing wiov->i). */
149*4882a593Smuzhiyun ssize_t vringh_iov_push_user(struct vringh_iov *wiov,
150*4882a593Smuzhiyun 			     const void *src, size_t len);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun /* Mark a descriptor as used. */
153*4882a593Smuzhiyun int vringh_complete_user(struct vringh *vrh, u16 head, u32 len);
154*4882a593Smuzhiyun int vringh_complete_multi_user(struct vringh *vrh,
155*4882a593Smuzhiyun 			       const struct vring_used_elem used[],
156*4882a593Smuzhiyun 			       unsigned num_used);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /* Pretend we've never seen descriptor (for easy error handling). */
159*4882a593Smuzhiyun void vringh_abandon_user(struct vringh *vrh, unsigned int num);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun /* Do we need to fire the eventfd to notify the other side? */
162*4882a593Smuzhiyun int vringh_need_notify_user(struct vringh *vrh);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun bool vringh_notify_enable_user(struct vringh *vrh);
165*4882a593Smuzhiyun void vringh_notify_disable_user(struct vringh *vrh);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* Helpers for kernelspace vrings. */
168*4882a593Smuzhiyun int vringh_init_kern(struct vringh *vrh, u64 features,
169*4882a593Smuzhiyun 		     unsigned int num, bool weak_barriers,
170*4882a593Smuzhiyun 		     struct vring_desc *desc,
171*4882a593Smuzhiyun 		     struct vring_avail *avail,
172*4882a593Smuzhiyun 		     struct vring_used *used);
173*4882a593Smuzhiyun 
vringh_kiov_init(struct vringh_kiov * kiov,struct kvec * kvec,unsigned num)174*4882a593Smuzhiyun static inline void vringh_kiov_init(struct vringh_kiov *kiov,
175*4882a593Smuzhiyun 				    struct kvec *kvec, unsigned num)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	kiov->used = kiov->i = 0;
178*4882a593Smuzhiyun 	kiov->consumed = 0;
179*4882a593Smuzhiyun 	kiov->max_num = num;
180*4882a593Smuzhiyun 	kiov->iov = kvec;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
vringh_kiov_reset(struct vringh_kiov * kiov)183*4882a593Smuzhiyun static inline void vringh_kiov_reset(struct vringh_kiov *kiov)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	kiov->iov[kiov->i].iov_len += kiov->consumed;
186*4882a593Smuzhiyun 	kiov->iov[kiov->i].iov_base -= kiov->consumed;
187*4882a593Smuzhiyun 	kiov->consumed = 0;
188*4882a593Smuzhiyun 	kiov->i = 0;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
vringh_kiov_cleanup(struct vringh_kiov * kiov)191*4882a593Smuzhiyun static inline void vringh_kiov_cleanup(struct vringh_kiov *kiov)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	if (kiov->max_num & VRINGH_IOV_ALLOCATED)
194*4882a593Smuzhiyun 		kfree(kiov->iov);
195*4882a593Smuzhiyun 	kiov->max_num = kiov->used = kiov->i = kiov->consumed = 0;
196*4882a593Smuzhiyun 	kiov->iov = NULL;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun int vringh_getdesc_kern(struct vringh *vrh,
200*4882a593Smuzhiyun 			struct vringh_kiov *riov,
201*4882a593Smuzhiyun 			struct vringh_kiov *wiov,
202*4882a593Smuzhiyun 			u16 *head,
203*4882a593Smuzhiyun 			gfp_t gfp);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len);
206*4882a593Smuzhiyun ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov,
207*4882a593Smuzhiyun 			     const void *src, size_t len);
208*4882a593Smuzhiyun void vringh_abandon_kern(struct vringh *vrh, unsigned int num);
209*4882a593Smuzhiyun int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun bool vringh_notify_enable_kern(struct vringh *vrh);
212*4882a593Smuzhiyun void vringh_notify_disable_kern(struct vringh *vrh);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun int vringh_need_notify_kern(struct vringh *vrh);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* Notify the guest about buffers added to the used ring */
vringh_notify(struct vringh * vrh)217*4882a593Smuzhiyun static inline void vringh_notify(struct vringh *vrh)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	if (vrh->notify)
220*4882a593Smuzhiyun 		vrh->notify(vrh);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
vringh_is_little_endian(const struct vringh * vrh)223*4882a593Smuzhiyun static inline bool vringh_is_little_endian(const struct vringh *vrh)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	return vrh->little_endian ||
226*4882a593Smuzhiyun 		virtio_legacy_is_little_endian();
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
vringh16_to_cpu(const struct vringh * vrh,__virtio16 val)229*4882a593Smuzhiyun static inline u16 vringh16_to_cpu(const struct vringh *vrh, __virtio16 val)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return __virtio16_to_cpu(vringh_is_little_endian(vrh), val);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
cpu_to_vringh16(const struct vringh * vrh,u16 val)234*4882a593Smuzhiyun static inline __virtio16 cpu_to_vringh16(const struct vringh *vrh, u16 val)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	return __cpu_to_virtio16(vringh_is_little_endian(vrh), val);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
vringh32_to_cpu(const struct vringh * vrh,__virtio32 val)239*4882a593Smuzhiyun static inline u32 vringh32_to_cpu(const struct vringh *vrh, __virtio32 val)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return __virtio32_to_cpu(vringh_is_little_endian(vrh), val);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
cpu_to_vringh32(const struct vringh * vrh,u32 val)244*4882a593Smuzhiyun static inline __virtio32 cpu_to_vringh32(const struct vringh *vrh, u32 val)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return __cpu_to_virtio32(vringh_is_little_endian(vrh), val);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
vringh64_to_cpu(const struct vringh * vrh,__virtio64 val)249*4882a593Smuzhiyun static inline u64 vringh64_to_cpu(const struct vringh *vrh, __virtio64 val)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	return __virtio64_to_cpu(vringh_is_little_endian(vrh), val);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
cpu_to_vringh64(const struct vringh * vrh,u64 val)254*4882a593Smuzhiyun static inline __virtio64 cpu_to_vringh64(const struct vringh *vrh, u64 val)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return __cpu_to_virtio64(vringh_is_little_endian(vrh), val);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun #if IS_REACHABLE(CONFIG_VHOST_IOTLB)
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun int vringh_init_iotlb(struct vringh *vrh, u64 features,
264*4882a593Smuzhiyun 		      unsigned int num, bool weak_barriers,
265*4882a593Smuzhiyun 		      struct vring_desc *desc,
266*4882a593Smuzhiyun 		      struct vring_avail *avail,
267*4882a593Smuzhiyun 		      struct vring_used *used);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun int vringh_getdesc_iotlb(struct vringh *vrh,
270*4882a593Smuzhiyun 			 struct vringh_kiov *riov,
271*4882a593Smuzhiyun 			 struct vringh_kiov *wiov,
272*4882a593Smuzhiyun 			 u16 *head,
273*4882a593Smuzhiyun 			 gfp_t gfp);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun ssize_t vringh_iov_pull_iotlb(struct vringh *vrh,
276*4882a593Smuzhiyun 			      struct vringh_kiov *riov,
277*4882a593Smuzhiyun 			      void *dst, size_t len);
278*4882a593Smuzhiyun ssize_t vringh_iov_push_iotlb(struct vringh *vrh,
279*4882a593Smuzhiyun 			      struct vringh_kiov *wiov,
280*4882a593Smuzhiyun 			      const void *src, size_t len);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun bool vringh_notify_enable_iotlb(struct vringh *vrh);
287*4882a593Smuzhiyun void vringh_notify_disable_iotlb(struct vringh *vrh);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun int vringh_need_notify_iotlb(struct vringh *vrh);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun #endif /* CONFIG_VHOST_IOTLB */
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #endif /* _LINUX_VRINGH_H */
294