xref: /OK3568_Linux_fs/kernel/drivers/vhost/vhost.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _VHOST_H
3*4882a593Smuzhiyun #define _VHOST_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <linux/eventfd.h>
6*4882a593Smuzhiyun #include <linux/vhost.h>
7*4882a593Smuzhiyun #include <linux/mm.h>
8*4882a593Smuzhiyun #include <linux/mutex.h>
9*4882a593Smuzhiyun #include <linux/poll.h>
10*4882a593Smuzhiyun #include <linux/file.h>
11*4882a593Smuzhiyun #include <linux/uio.h>
12*4882a593Smuzhiyun #include <linux/virtio_config.h>
13*4882a593Smuzhiyun #include <linux/virtio_ring.h>
14*4882a593Smuzhiyun #include <linux/atomic.h>
15*4882a593Smuzhiyun #include <linux/vhost_iotlb.h>
16*4882a593Smuzhiyun #include <linux/irqbypass.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun struct vhost_work;
19*4882a593Smuzhiyun typedef void (*vhost_work_fn_t)(struct vhost_work *work);
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define VHOST_WORK_QUEUED 1
22*4882a593Smuzhiyun struct vhost_work {
23*4882a593Smuzhiyun 	struct llist_node	  node;
24*4882a593Smuzhiyun 	vhost_work_fn_t		  fn;
25*4882a593Smuzhiyun 	unsigned long		  flags;
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* Poll a file (eventfd or socket) */
29*4882a593Smuzhiyun /* Note: there's nothing vhost specific about this structure. */
30*4882a593Smuzhiyun struct vhost_poll {
31*4882a593Smuzhiyun 	poll_table                table;
32*4882a593Smuzhiyun 	wait_queue_head_t        *wqh;
33*4882a593Smuzhiyun 	wait_queue_entry_t              wait;
34*4882a593Smuzhiyun 	struct vhost_work	  work;
35*4882a593Smuzhiyun 	__poll_t		  mask;
36*4882a593Smuzhiyun 	struct vhost_dev	 *dev;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
40*4882a593Smuzhiyun void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
41*4882a593Smuzhiyun bool vhost_has_work(struct vhost_dev *dev);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44*4882a593Smuzhiyun 		     __poll_t mask, struct vhost_dev *dev);
45*4882a593Smuzhiyun int vhost_poll_start(struct vhost_poll *poll, struct file *file);
46*4882a593Smuzhiyun void vhost_poll_stop(struct vhost_poll *poll);
47*4882a593Smuzhiyun void vhost_poll_flush(struct vhost_poll *poll);
48*4882a593Smuzhiyun void vhost_poll_queue(struct vhost_poll *poll);
49*4882a593Smuzhiyun void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work);
50*4882a593Smuzhiyun long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun struct vhost_log {
53*4882a593Smuzhiyun 	u64 addr;
54*4882a593Smuzhiyun 	u64 len;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun enum vhost_uaddr_type {
58*4882a593Smuzhiyun 	VHOST_ADDR_DESC = 0,
59*4882a593Smuzhiyun 	VHOST_ADDR_AVAIL = 1,
60*4882a593Smuzhiyun 	VHOST_ADDR_USED = 2,
61*4882a593Smuzhiyun 	VHOST_NUM_ADDRS = 3,
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct vhost_vring_call {
65*4882a593Smuzhiyun 	struct eventfd_ctx *ctx;
66*4882a593Smuzhiyun 	struct irq_bypass_producer producer;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* The virtqueue structure describes a queue attached to a device. */
70*4882a593Smuzhiyun struct vhost_virtqueue {
71*4882a593Smuzhiyun 	struct vhost_dev *dev;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	/* The actual ring of buffers. */
74*4882a593Smuzhiyun 	struct mutex mutex;
75*4882a593Smuzhiyun 	unsigned int num;
76*4882a593Smuzhiyun 	vring_desc_t __user *desc;
77*4882a593Smuzhiyun 	vring_avail_t __user *avail;
78*4882a593Smuzhiyun 	vring_used_t __user *used;
79*4882a593Smuzhiyun 	const struct vhost_iotlb_map *meta_iotlb[VHOST_NUM_ADDRS];
80*4882a593Smuzhiyun 	struct file *kick;
81*4882a593Smuzhiyun 	struct vhost_vring_call call_ctx;
82*4882a593Smuzhiyun 	struct eventfd_ctx *error_ctx;
83*4882a593Smuzhiyun 	struct eventfd_ctx *log_ctx;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	struct vhost_poll poll;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	/* The routine to call when the Guest pings us, or timeout. */
88*4882a593Smuzhiyun 	vhost_work_fn_t handle_kick;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Last available index we saw. */
91*4882a593Smuzhiyun 	u16 last_avail_idx;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	/* Caches available index value from user. */
94*4882a593Smuzhiyun 	u16 avail_idx;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* Last index we used. */
97*4882a593Smuzhiyun 	u16 last_used_idx;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* Used flags */
100*4882a593Smuzhiyun 	u16 used_flags;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* Last used index value we have signalled on */
103*4882a593Smuzhiyun 	u16 signalled_used;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* Last used index value we have signalled on */
106*4882a593Smuzhiyun 	bool signalled_used_valid;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Log writes to used structure. */
109*4882a593Smuzhiyun 	bool log_used;
110*4882a593Smuzhiyun 	u64 log_addr;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	struct iovec iov[UIO_MAXIOV];
113*4882a593Smuzhiyun 	struct iovec iotlb_iov[64];
114*4882a593Smuzhiyun 	struct iovec *indirect;
115*4882a593Smuzhiyun 	struct vring_used_elem *heads;
116*4882a593Smuzhiyun 	/* Protected by virtqueue mutex. */
117*4882a593Smuzhiyun 	struct vhost_iotlb *umem;
118*4882a593Smuzhiyun 	struct vhost_iotlb *iotlb;
119*4882a593Smuzhiyun 	void *private_data;
120*4882a593Smuzhiyun 	u64 acked_features;
121*4882a593Smuzhiyun 	u64 acked_backend_features;
122*4882a593Smuzhiyun 	/* Log write descriptors */
123*4882a593Smuzhiyun 	void __user *log_base;
124*4882a593Smuzhiyun 	struct vhost_log *log;
125*4882a593Smuzhiyun 	struct iovec log_iov[64];
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	/* Ring endianness. Defaults to legacy native endianness.
128*4882a593Smuzhiyun 	 * Set to true when starting a modern virtio device. */
129*4882a593Smuzhiyun 	bool is_le;
130*4882a593Smuzhiyun #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
131*4882a593Smuzhiyun 	/* Ring endianness requested by userspace for cross-endian support. */
132*4882a593Smuzhiyun 	bool user_be;
133*4882a593Smuzhiyun #endif
134*4882a593Smuzhiyun 	u32 busyloop_timeout;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun struct vhost_msg_node {
138*4882a593Smuzhiyun   union {
139*4882a593Smuzhiyun 	  struct vhost_msg msg;
140*4882a593Smuzhiyun 	  struct vhost_msg_v2 msg_v2;
141*4882a593Smuzhiyun   };
142*4882a593Smuzhiyun   struct vhost_virtqueue *vq;
143*4882a593Smuzhiyun   struct list_head node;
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun struct vhost_dev {
147*4882a593Smuzhiyun 	struct mm_struct *mm;
148*4882a593Smuzhiyun 	struct mutex mutex;
149*4882a593Smuzhiyun 	struct vhost_virtqueue **vqs;
150*4882a593Smuzhiyun 	int nvqs;
151*4882a593Smuzhiyun 	struct eventfd_ctx *log_ctx;
152*4882a593Smuzhiyun 	struct llist_head work_list;
153*4882a593Smuzhiyun 	struct task_struct *worker;
154*4882a593Smuzhiyun 	struct vhost_iotlb *umem;
155*4882a593Smuzhiyun 	struct vhost_iotlb *iotlb;
156*4882a593Smuzhiyun 	spinlock_t iotlb_lock;
157*4882a593Smuzhiyun 	struct list_head read_list;
158*4882a593Smuzhiyun 	struct list_head pending_list;
159*4882a593Smuzhiyun 	wait_queue_head_t wait;
160*4882a593Smuzhiyun 	int iov_limit;
161*4882a593Smuzhiyun 	int weight;
162*4882a593Smuzhiyun 	int byte_weight;
163*4882a593Smuzhiyun 	u64 kcov_handle;
164*4882a593Smuzhiyun 	bool use_worker;
165*4882a593Smuzhiyun 	int (*msg_handler)(struct vhost_dev *dev,
166*4882a593Smuzhiyun 			   struct vhost_iotlb_msg *msg);
167*4882a593Smuzhiyun };
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun bool vhost_exceeds_weight(struct vhost_virtqueue *vq, int pkts, int total_len);
170*4882a593Smuzhiyun void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs,
171*4882a593Smuzhiyun 		    int nvqs, int iov_limit, int weight, int byte_weight,
172*4882a593Smuzhiyun 		    bool use_worker,
173*4882a593Smuzhiyun 		    int (*msg_handler)(struct vhost_dev *dev,
174*4882a593Smuzhiyun 				       struct vhost_iotlb_msg *msg));
175*4882a593Smuzhiyun long vhost_dev_set_owner(struct vhost_dev *dev);
176*4882a593Smuzhiyun bool vhost_dev_has_owner(struct vhost_dev *dev);
177*4882a593Smuzhiyun long vhost_dev_check_owner(struct vhost_dev *);
178*4882a593Smuzhiyun struct vhost_iotlb *vhost_dev_reset_owner_prepare(void);
179*4882a593Smuzhiyun void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_iotlb *iotlb);
180*4882a593Smuzhiyun void vhost_dev_cleanup(struct vhost_dev *);
181*4882a593Smuzhiyun void vhost_dev_stop(struct vhost_dev *);
182*4882a593Smuzhiyun long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
183*4882a593Smuzhiyun long vhost_vring_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp);
184*4882a593Smuzhiyun bool vhost_vq_access_ok(struct vhost_virtqueue *vq);
185*4882a593Smuzhiyun bool vhost_log_access_ok(struct vhost_dev *);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun int vhost_get_vq_desc(struct vhost_virtqueue *,
188*4882a593Smuzhiyun 		      struct iovec iov[], unsigned int iov_count,
189*4882a593Smuzhiyun 		      unsigned int *out_num, unsigned int *in_num,
190*4882a593Smuzhiyun 		      struct vhost_log *log, unsigned int *log_num);
191*4882a593Smuzhiyun void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun bool vhost_vq_is_setup(struct vhost_virtqueue *vq);
194*4882a593Smuzhiyun int vhost_vq_init_access(struct vhost_virtqueue *);
195*4882a593Smuzhiyun int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
196*4882a593Smuzhiyun int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
197*4882a593Smuzhiyun 		     unsigned count);
198*4882a593Smuzhiyun void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
199*4882a593Smuzhiyun 			       unsigned int id, int len);
200*4882a593Smuzhiyun void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
201*4882a593Smuzhiyun 			       struct vring_used_elem *heads, unsigned count);
202*4882a593Smuzhiyun void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
203*4882a593Smuzhiyun void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
204*4882a593Smuzhiyun bool vhost_vq_avail_empty(struct vhost_dev *, struct vhost_virtqueue *);
205*4882a593Smuzhiyun bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
208*4882a593Smuzhiyun 		    unsigned int log_num, u64 len,
209*4882a593Smuzhiyun 		    struct iovec *iov, int count);
210*4882a593Smuzhiyun int vq_meta_prefetch(struct vhost_virtqueue *vq);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun struct vhost_msg_node *vhost_new_msg(struct vhost_virtqueue *vq, int type);
213*4882a593Smuzhiyun void vhost_enqueue_msg(struct vhost_dev *dev,
214*4882a593Smuzhiyun 		       struct list_head *head,
215*4882a593Smuzhiyun 		       struct vhost_msg_node *node);
216*4882a593Smuzhiyun struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
217*4882a593Smuzhiyun 					 struct list_head *head);
218*4882a593Smuzhiyun void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun __poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
221*4882a593Smuzhiyun 			    poll_table *wait);
222*4882a593Smuzhiyun ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
223*4882a593Smuzhiyun 			    int noblock);
224*4882a593Smuzhiyun ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
225*4882a593Smuzhiyun 			     struct iov_iter *from);
226*4882a593Smuzhiyun int vhost_init_device_iotlb(struct vhost_dev *d, bool enabled);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun void vhost_iotlb_map_free(struct vhost_iotlb *iotlb,
229*4882a593Smuzhiyun 			  struct vhost_iotlb_map *map);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun #define vq_err(vq, fmt, ...) do {                                  \
232*4882a593Smuzhiyun 		pr_debug(pr_fmt(fmt), ##__VA_ARGS__);       \
233*4882a593Smuzhiyun 		if ((vq)->error_ctx)                               \
234*4882a593Smuzhiyun 				eventfd_signal((vq)->error_ctx, 1);\
235*4882a593Smuzhiyun 	} while (0)
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun enum {
238*4882a593Smuzhiyun 	VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
239*4882a593Smuzhiyun 			 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
240*4882a593Smuzhiyun 			 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
241*4882a593Smuzhiyun 			 (1ULL << VHOST_F_LOG_ALL) |
242*4882a593Smuzhiyun 			 (1ULL << VIRTIO_F_ANY_LAYOUT) |
243*4882a593Smuzhiyun 			 (1ULL << VIRTIO_F_VERSION_1)
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun  * vhost_vq_set_backend - Set backend.
248*4882a593Smuzhiyun  *
249*4882a593Smuzhiyun  * @vq            Virtqueue.
250*4882a593Smuzhiyun  * @private_data  The private data.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * Context: Need to call with vq->mutex acquired.
253*4882a593Smuzhiyun  */
vhost_vq_set_backend(struct vhost_virtqueue * vq,void * private_data)254*4882a593Smuzhiyun static inline void vhost_vq_set_backend(struct vhost_virtqueue *vq,
255*4882a593Smuzhiyun 					void *private_data)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	vq->private_data = private_data;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun /**
261*4882a593Smuzhiyun  * vhost_vq_get_backend - Get backend.
262*4882a593Smuzhiyun  *
263*4882a593Smuzhiyun  * @vq            Virtqueue.
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  * Context: Need to call with vq->mutex acquired.
266*4882a593Smuzhiyun  * Return: Private data previously set with vhost_vq_set_backend.
267*4882a593Smuzhiyun  */
vhost_vq_get_backend(struct vhost_virtqueue * vq)268*4882a593Smuzhiyun static inline void *vhost_vq_get_backend(struct vhost_virtqueue *vq)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun 	return vq->private_data;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
vhost_has_feature(struct vhost_virtqueue * vq,int bit)273*4882a593Smuzhiyun static inline bool vhost_has_feature(struct vhost_virtqueue *vq, int bit)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	return vq->acked_features & (1ULL << bit);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
vhost_backend_has_feature(struct vhost_virtqueue * vq,int bit)278*4882a593Smuzhiyun static inline bool vhost_backend_has_feature(struct vhost_virtqueue *vq, int bit)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	return vq->acked_backend_features & (1ULL << bit);
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun #ifdef CONFIG_VHOST_CROSS_ENDIAN_LEGACY
vhost_is_little_endian(struct vhost_virtqueue * vq)284*4882a593Smuzhiyun static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return vq->is_le;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun #else
vhost_is_little_endian(struct vhost_virtqueue * vq)289*4882a593Smuzhiyun static inline bool vhost_is_little_endian(struct vhost_virtqueue *vq)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	return virtio_legacy_is_little_endian() || vq->is_le;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun /* Memory accessors */
vhost16_to_cpu(struct vhost_virtqueue * vq,__virtio16 val)296*4882a593Smuzhiyun static inline u16 vhost16_to_cpu(struct vhost_virtqueue *vq, __virtio16 val)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return __virtio16_to_cpu(vhost_is_little_endian(vq), val);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
cpu_to_vhost16(struct vhost_virtqueue * vq,u16 val)301*4882a593Smuzhiyun static inline __virtio16 cpu_to_vhost16(struct vhost_virtqueue *vq, u16 val)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	return __cpu_to_virtio16(vhost_is_little_endian(vq), val);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
vhost32_to_cpu(struct vhost_virtqueue * vq,__virtio32 val)306*4882a593Smuzhiyun static inline u32 vhost32_to_cpu(struct vhost_virtqueue *vq, __virtio32 val)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	return __virtio32_to_cpu(vhost_is_little_endian(vq), val);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
cpu_to_vhost32(struct vhost_virtqueue * vq,u32 val)311*4882a593Smuzhiyun static inline __virtio32 cpu_to_vhost32(struct vhost_virtqueue *vq, u32 val)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	return __cpu_to_virtio32(vhost_is_little_endian(vq), val);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
vhost64_to_cpu(struct vhost_virtqueue * vq,__virtio64 val)316*4882a593Smuzhiyun static inline u64 vhost64_to_cpu(struct vhost_virtqueue *vq, __virtio64 val)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return __virtio64_to_cpu(vhost_is_little_endian(vq), val);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
cpu_to_vhost64(struct vhost_virtqueue * vq,u64 val)321*4882a593Smuzhiyun static inline __virtio64 cpu_to_vhost64(struct vhost_virtqueue *vq, u64 val)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun 	return __cpu_to_virtio64(vhost_is_little_endian(vq), val);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun #endif
326