xref: /OK3568_Linux_fs/kernel/include/uapi/linux/virtio_ring.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun #ifndef _UAPI_LINUX_VIRTIO_RING_H
2*4882a593Smuzhiyun #define _UAPI_LINUX_VIRTIO_RING_H
3*4882a593Smuzhiyun /* An interface for efficient virtio implementation, currently for use by KVM,
4*4882a593Smuzhiyun  * but hopefully others soon.  Do NOT change this since it will
5*4882a593Smuzhiyun  * break existing servers and clients.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This header is BSD licensed so anyone can use the definitions to implement
8*4882a593Smuzhiyun  * compatible drivers/servers.
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Redistribution and use in source and binary forms, with or without
11*4882a593Smuzhiyun  * modification, are permitted provided that the following conditions
12*4882a593Smuzhiyun  * are met:
13*4882a593Smuzhiyun  * 1. Redistributions of source code must retain the above copyright
14*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer.
15*4882a593Smuzhiyun  * 2. Redistributions in binary form must reproduce the above copyright
16*4882a593Smuzhiyun  *    notice, this list of conditions and the following disclaimer in the
17*4882a593Smuzhiyun  *    documentation and/or other materials provided with the distribution.
18*4882a593Smuzhiyun  * 3. Neither the name of IBM nor the names of its contributors
19*4882a593Smuzhiyun  *    may be used to endorse or promote products derived from this software
20*4882a593Smuzhiyun  *    without specific prior written permission.
21*4882a593Smuzhiyun  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22*4882a593Smuzhiyun  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23*4882a593Smuzhiyun  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24*4882a593Smuzhiyun  * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25*4882a593Smuzhiyun  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26*4882a593Smuzhiyun  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27*4882a593Smuzhiyun  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28*4882a593Smuzhiyun  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29*4882a593Smuzhiyun  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30*4882a593Smuzhiyun  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31*4882a593Smuzhiyun  * SUCH DAMAGE.
32*4882a593Smuzhiyun  *
33*4882a593Smuzhiyun  * Copyright Rusty Russell IBM Corporation 2007. */
34*4882a593Smuzhiyun #ifndef __KERNEL__
35*4882a593Smuzhiyun #include <stdint.h>
36*4882a593Smuzhiyun #endif
37*4882a593Smuzhiyun #include <linux/types.h>
38*4882a593Smuzhiyun #include <linux/virtio_types.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /* This marks a buffer as continuing via the next field. */
41*4882a593Smuzhiyun #define VRING_DESC_F_NEXT	1
42*4882a593Smuzhiyun /* This marks a buffer as write-only (otherwise read-only). */
43*4882a593Smuzhiyun #define VRING_DESC_F_WRITE	2
44*4882a593Smuzhiyun /* This means the buffer contains a list of buffer descriptors. */
45*4882a593Smuzhiyun #define VRING_DESC_F_INDIRECT	4
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun  * Mark a descriptor as available or used in packed ring.
49*4882a593Smuzhiyun  * Notice: they are defined as shifts instead of shifted values.
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun #define VRING_PACKED_DESC_F_AVAIL	7
52*4882a593Smuzhiyun #define VRING_PACKED_DESC_F_USED	15
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* The Host uses this in used->flags to advise the Guest: don't kick me when
55*4882a593Smuzhiyun  * you add a buffer.  It's unreliable, so it's simply an optimization.  Guest
56*4882a593Smuzhiyun  * will still kick if it's out of buffers. */
57*4882a593Smuzhiyun #define VRING_USED_F_NO_NOTIFY	1
58*4882a593Smuzhiyun /* The Guest uses this in avail->flags to advise the Host: don't interrupt me
59*4882a593Smuzhiyun  * when you consume a buffer.  It's unreliable, so it's simply an
60*4882a593Smuzhiyun  * optimization.  */
61*4882a593Smuzhiyun #define VRING_AVAIL_F_NO_INTERRUPT	1
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Enable events in packed ring. */
64*4882a593Smuzhiyun #define VRING_PACKED_EVENT_FLAG_ENABLE	0x0
65*4882a593Smuzhiyun /* Disable events in packed ring. */
66*4882a593Smuzhiyun #define VRING_PACKED_EVENT_FLAG_DISABLE	0x1
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun  * Enable events for a specific descriptor in packed ring.
69*4882a593Smuzhiyun  * (as specified by Descriptor Ring Change Event Offset/Wrap Counter).
70*4882a593Smuzhiyun  * Only valid if VIRTIO_RING_F_EVENT_IDX has been negotiated.
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun #define VRING_PACKED_EVENT_FLAG_DESC	0x2
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun  * Wrap counter bit shift in event suppression structure
76*4882a593Smuzhiyun  * of packed ring.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #define VRING_PACKED_EVENT_F_WRAP_CTR	15
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* We support indirect buffer descriptors */
81*4882a593Smuzhiyun #define VIRTIO_RING_F_INDIRECT_DESC	28
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /* The Guest publishes the used index for which it expects an interrupt
84*4882a593Smuzhiyun  * at the end of the avail ring. Host should ignore the avail->flags field. */
85*4882a593Smuzhiyun /* The Host publishes the avail index for which it expects a kick
86*4882a593Smuzhiyun  * at the end of the used ring. Guest should ignore the used->flags field. */
87*4882a593Smuzhiyun #define VIRTIO_RING_F_EVENT_IDX		29
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Alignment requirements for vring elements.
90*4882a593Smuzhiyun  * When using pre-virtio 1.0 layout, these fall out naturally.
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun #define VRING_AVAIL_ALIGN_SIZE 2
93*4882a593Smuzhiyun #define VRING_USED_ALIGN_SIZE 4
94*4882a593Smuzhiyun #define VRING_DESC_ALIGN_SIZE 16
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /* Virtio ring descriptors: 16 bytes.  These can chain together via "next". */
97*4882a593Smuzhiyun struct vring_desc {
98*4882a593Smuzhiyun 	/* Address (guest-physical). */
99*4882a593Smuzhiyun 	__virtio64 addr;
100*4882a593Smuzhiyun 	/* Length. */
101*4882a593Smuzhiyun 	__virtio32 len;
102*4882a593Smuzhiyun 	/* The flags as indicated above. */
103*4882a593Smuzhiyun 	__virtio16 flags;
104*4882a593Smuzhiyun 	/* We chain unused descriptors via this, too */
105*4882a593Smuzhiyun 	__virtio16 next;
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun struct vring_avail {
109*4882a593Smuzhiyun 	__virtio16 flags;
110*4882a593Smuzhiyun 	__virtio16 idx;
111*4882a593Smuzhiyun 	__virtio16 ring[];
112*4882a593Smuzhiyun };
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* u32 is used here for ids for padding reasons. */
115*4882a593Smuzhiyun struct vring_used_elem {
116*4882a593Smuzhiyun 	/* Index of start of used descriptor chain. */
117*4882a593Smuzhiyun 	__virtio32 id;
118*4882a593Smuzhiyun 	/* Total length of the descriptor chain which was used (written to) */
119*4882a593Smuzhiyun 	__virtio32 len;
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun typedef struct vring_used_elem __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
123*4882a593Smuzhiyun 	vring_used_elem_t;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun struct vring_used {
126*4882a593Smuzhiyun 	__virtio16 flags;
127*4882a593Smuzhiyun 	__virtio16 idx;
128*4882a593Smuzhiyun 	vring_used_elem_t ring[];
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun  * The ring element addresses are passed between components with different
133*4882a593Smuzhiyun  * alignments assumptions. Thus, we might need to decrease the compiler-selected
134*4882a593Smuzhiyun  * alignment, and so must use a typedef to make sure the aligned attribute
135*4882a593Smuzhiyun  * actually takes hold:
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * https://gcc.gnu.org/onlinedocs//gcc/Common-Type-Attributes.html#Common-Type-Attributes
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * When used on a struct, or struct member, the aligned attribute can only
140*4882a593Smuzhiyun  * increase the alignment; in order to decrease it, the packed attribute must
141*4882a593Smuzhiyun  * be specified as well. When used as part of a typedef, the aligned attribute
142*4882a593Smuzhiyun  * can both increase and decrease alignment, and specifying the packed
143*4882a593Smuzhiyun  * attribute generates a warning.
144*4882a593Smuzhiyun  */
145*4882a593Smuzhiyun typedef struct vring_desc __attribute__((aligned(VRING_DESC_ALIGN_SIZE)))
146*4882a593Smuzhiyun 	vring_desc_t;
147*4882a593Smuzhiyun typedef struct vring_avail __attribute__((aligned(VRING_AVAIL_ALIGN_SIZE)))
148*4882a593Smuzhiyun 	vring_avail_t;
149*4882a593Smuzhiyun typedef struct vring_used __attribute__((aligned(VRING_USED_ALIGN_SIZE)))
150*4882a593Smuzhiyun 	vring_used_t;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun struct vring {
153*4882a593Smuzhiyun 	unsigned int num;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	vring_desc_t *desc;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	vring_avail_t *avail;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	vring_used_t *used;
160*4882a593Smuzhiyun };
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #ifndef VIRTIO_RING_NO_LEGACY
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /* The standard layout for the ring is a continuous chunk of memory which looks
165*4882a593Smuzhiyun  * like this.  We assume num is a power of 2.
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * struct vring
168*4882a593Smuzhiyun  * {
169*4882a593Smuzhiyun  *	// The actual descriptors (16 bytes each)
170*4882a593Smuzhiyun  *	struct vring_desc desc[num];
171*4882a593Smuzhiyun  *
172*4882a593Smuzhiyun  *	// A ring of available descriptor heads with free-running index.
173*4882a593Smuzhiyun  *	__virtio16 avail_flags;
174*4882a593Smuzhiyun  *	__virtio16 avail_idx;
175*4882a593Smuzhiyun  *	__virtio16 available[num];
176*4882a593Smuzhiyun  *	__virtio16 used_event_idx;
177*4882a593Smuzhiyun  *
178*4882a593Smuzhiyun  *	// Padding to the next align boundary.
179*4882a593Smuzhiyun  *	char pad[];
180*4882a593Smuzhiyun  *
181*4882a593Smuzhiyun  *	// A ring of used descriptor heads with free-running index.
182*4882a593Smuzhiyun  *	__virtio16 used_flags;
183*4882a593Smuzhiyun  *	__virtio16 used_idx;
184*4882a593Smuzhiyun  *	struct vring_used_elem used[num];
185*4882a593Smuzhiyun  *	__virtio16 avail_event_idx;
186*4882a593Smuzhiyun  * };
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun /* We publish the used event index at the end of the available ring, and vice
189*4882a593Smuzhiyun  * versa. They are at the end for backwards compatibility. */
190*4882a593Smuzhiyun #define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
191*4882a593Smuzhiyun #define vring_avail_event(vr) (*(__virtio16 *)&(vr)->used->ring[(vr)->num])
192*4882a593Smuzhiyun 
vring_init(struct vring * vr,unsigned int num,void * p,unsigned long align)193*4882a593Smuzhiyun static inline void vring_init(struct vring *vr, unsigned int num, void *p,
194*4882a593Smuzhiyun 			      unsigned long align)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	vr->num = num;
197*4882a593Smuzhiyun 	vr->desc = p;
198*4882a593Smuzhiyun 	vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
199*4882a593Smuzhiyun 	vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
200*4882a593Smuzhiyun 		+ align-1) & ~(align - 1));
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
vring_size(unsigned int num,unsigned long align)203*4882a593Smuzhiyun static inline unsigned vring_size(unsigned int num, unsigned long align)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	return ((sizeof(struct vring_desc) * num + sizeof(__virtio16) * (3 + num)
206*4882a593Smuzhiyun 		 + align - 1) & ~(align - 1))
207*4882a593Smuzhiyun 		+ sizeof(__virtio16) * 3 + sizeof(struct vring_used_elem) * num;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #endif /* VIRTIO_RING_NO_LEGACY */
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
213*4882a593Smuzhiyun /* Assuming a given event_idx value from the other side, if
214*4882a593Smuzhiyun  * we have just incremented index from old to new_idx,
215*4882a593Smuzhiyun  * should we trigger an event? */
vring_need_event(__u16 event_idx,__u16 new_idx,__u16 old)216*4882a593Smuzhiyun static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	/* Note: Xen has similar logic for notification hold-off
219*4882a593Smuzhiyun 	 * in include/xen/interface/io/ring.h with req_event and req_prod
220*4882a593Smuzhiyun 	 * corresponding to event_idx + 1 and new_idx respectively.
221*4882a593Smuzhiyun 	 * Note also that req_event and req_prod in Xen start at 1,
222*4882a593Smuzhiyun 	 * event indexes in virtio start at 0. */
223*4882a593Smuzhiyun 	return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun struct vring_packed_desc_event {
227*4882a593Smuzhiyun 	/* Descriptor Ring Change Event Offset/Wrap Counter. */
228*4882a593Smuzhiyun 	__le16 off_wrap;
229*4882a593Smuzhiyun 	/* Descriptor Ring Change Event Flags. */
230*4882a593Smuzhiyun 	__le16 flags;
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun struct vring_packed_desc {
234*4882a593Smuzhiyun 	/* Buffer Address. */
235*4882a593Smuzhiyun 	__le64 addr;
236*4882a593Smuzhiyun 	/* Buffer Length. */
237*4882a593Smuzhiyun 	__le32 len;
238*4882a593Smuzhiyun 	/* Buffer ID. */
239*4882a593Smuzhiyun 	__le16 id;
240*4882a593Smuzhiyun 	/* The flags depending on descriptor type. */
241*4882a593Smuzhiyun 	__le16 flags;
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun #endif /* _UAPI_LINUX_VIRTIO_RING_H */
245