1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _LINUX_VIRTIO_RING_H
3*4882a593Smuzhiyun #define _LINUX_VIRTIO_RING_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <asm/barrier.h>
6*4882a593Smuzhiyun #include <linux/irqreturn.h>
7*4882a593Smuzhiyun #include <uapi/linux/virtio_ring.h>
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun /*
10*4882a593Smuzhiyun * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11*4882a593Smuzhiyun * they're not on an SMP host system, so they need to assume real
12*4882a593Smuzhiyun * barriers. Non-SMP virtio hosts could skip the barriers, but does
13*4882a593Smuzhiyun * anyone care?
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * For virtio_pci on SMP, we don't need to order with respect to MMIO
16*4882a593Smuzhiyun * accesses through relaxed memory I/O windows, so virt_mb() et al are
17*4882a593Smuzhiyun * sufficient.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * For using virtio to talk to real devices (eg. other heterogeneous
20*4882a593Smuzhiyun * CPUs) we do need real barriers. In theory, we could be using both
21*4882a593Smuzhiyun * kinds of virtio, so it's a runtime decision, and the branch is
22*4882a593Smuzhiyun * actually quite cheap.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
virtio_mb(bool weak_barriers)25*4882a593Smuzhiyun static inline void virtio_mb(bool weak_barriers)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun if (weak_barriers)
28*4882a593Smuzhiyun virt_mb();
29*4882a593Smuzhiyun else
30*4882a593Smuzhiyun mb();
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun
virtio_rmb(bool weak_barriers)33*4882a593Smuzhiyun static inline void virtio_rmb(bool weak_barriers)
34*4882a593Smuzhiyun {
35*4882a593Smuzhiyun if (weak_barriers)
36*4882a593Smuzhiyun virt_rmb();
37*4882a593Smuzhiyun else
38*4882a593Smuzhiyun dma_rmb();
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun
virtio_wmb(bool weak_barriers)41*4882a593Smuzhiyun static inline void virtio_wmb(bool weak_barriers)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun if (weak_barriers)
44*4882a593Smuzhiyun virt_wmb();
45*4882a593Smuzhiyun else
46*4882a593Smuzhiyun dma_wmb();
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define virtio_store_mb(weak_barriers, p, v) \
50*4882a593Smuzhiyun do { \
51*4882a593Smuzhiyun if (weak_barriers) { \
52*4882a593Smuzhiyun virt_store_mb(*p, v); \
53*4882a593Smuzhiyun } else { \
54*4882a593Smuzhiyun WRITE_ONCE(*p, v); \
55*4882a593Smuzhiyun mb(); \
56*4882a593Smuzhiyun } \
57*4882a593Smuzhiyun } while (0) \
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun struct virtio_device;
60*4882a593Smuzhiyun struct virtqueue;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Creates a virtqueue and allocates the descriptor ring. If
64*4882a593Smuzhiyun * may_reduce_num is set, then this may allocate a smaller ring than
65*4882a593Smuzhiyun * expected. The caller should query virtqueue_get_vring_size to learn
66*4882a593Smuzhiyun * the actual size of the ring.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun struct virtqueue *vring_create_virtqueue(unsigned int index,
69*4882a593Smuzhiyun unsigned int num,
70*4882a593Smuzhiyun unsigned int vring_align,
71*4882a593Smuzhiyun struct virtio_device *vdev,
72*4882a593Smuzhiyun bool weak_barriers,
73*4882a593Smuzhiyun bool may_reduce_num,
74*4882a593Smuzhiyun bool ctx,
75*4882a593Smuzhiyun bool (*notify)(struct virtqueue *vq),
76*4882a593Smuzhiyun void (*callback)(struct virtqueue *vq),
77*4882a593Smuzhiyun const char *name);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* Creates a virtqueue with a custom layout. */
80*4882a593Smuzhiyun struct virtqueue *__vring_new_virtqueue(unsigned int index,
81*4882a593Smuzhiyun struct vring vring,
82*4882a593Smuzhiyun struct virtio_device *vdev,
83*4882a593Smuzhiyun bool weak_barriers,
84*4882a593Smuzhiyun bool ctx,
85*4882a593Smuzhiyun bool (*notify)(struct virtqueue *),
86*4882a593Smuzhiyun void (*callback)(struct virtqueue *),
87*4882a593Smuzhiyun const char *name);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun * Creates a virtqueue with a standard layout but a caller-allocated
91*4882a593Smuzhiyun * ring.
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun struct virtqueue *vring_new_virtqueue(unsigned int index,
94*4882a593Smuzhiyun unsigned int num,
95*4882a593Smuzhiyun unsigned int vring_align,
96*4882a593Smuzhiyun struct virtio_device *vdev,
97*4882a593Smuzhiyun bool weak_barriers,
98*4882a593Smuzhiyun bool ctx,
99*4882a593Smuzhiyun void *pages,
100*4882a593Smuzhiyun bool (*notify)(struct virtqueue *vq),
101*4882a593Smuzhiyun void (*callback)(struct virtqueue *vq),
102*4882a593Smuzhiyun const char *name);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Destroys a virtqueue. If created with vring_create_virtqueue, this
106*4882a593Smuzhiyun * also frees the ring.
107*4882a593Smuzhiyun */
108*4882a593Smuzhiyun void vring_del_virtqueue(struct virtqueue *vq);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Filter out transport-specific feature bits. */
111*4882a593Smuzhiyun void vring_transport_features(struct virtio_device *vdev);
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun irqreturn_t vring_interrupt(int irq, void *_vq);
114*4882a593Smuzhiyun #endif /* _LINUX_VIRTIO_RING_H */
115