xref: /OK3568_Linux_fs/kernel/include/xen/interface/io/blkif.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun  * blkif.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Unified block-device I/O interface for Xen guest OSes.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2003-2004, Keir Fraser
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef __XEN_PUBLIC_IO_BLKIF_H__
11*4882a593Smuzhiyun #define __XEN_PUBLIC_IO_BLKIF_H__
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <xen/interface/io/ring.h>
14*4882a593Smuzhiyun #include <xen/interface/grant_table.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * Front->back notifications: When enqueuing a new request, sending a
18*4882a593Smuzhiyun  * notification can be made conditional on req_event (i.e., the generic
19*4882a593Smuzhiyun  * hold-off mechanism provided by the ring macros). Backends must set
20*4882a593Smuzhiyun  * req_event appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * Back->front notifications: When enqueuing a new response, sending a
23*4882a593Smuzhiyun  * notification can be made conditional on rsp_event (i.e., the generic
24*4882a593Smuzhiyun  * hold-off mechanism provided by the ring macros). Frontends must set
25*4882a593Smuzhiyun  * rsp_event appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
26*4882a593Smuzhiyun  */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun typedef uint16_t blkif_vdev_t;
29*4882a593Smuzhiyun typedef uint64_t blkif_sector_t;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Multiple hardware queues/rings:
33*4882a593Smuzhiyun  * If supported, the backend will write the key "multi-queue-max-queues" to
34*4882a593Smuzhiyun  * the directory for that vbd, and set its value to the maximum supported
35*4882a593Smuzhiyun  * number of queues.
36*4882a593Smuzhiyun  * Frontends that are aware of this feature and wish to use it can write the
37*4882a593Smuzhiyun  * key "multi-queue-num-queues" with the number they wish to use, which must be
38*4882a593Smuzhiyun  * greater than zero, and no more than the value reported by the backend in
39*4882a593Smuzhiyun  * "multi-queue-max-queues".
40*4882a593Smuzhiyun  *
41*4882a593Smuzhiyun  * For frontends requesting just one queue, the usual event-channel and
42*4882a593Smuzhiyun  * ring-ref keys are written as before, simplifying the backend processing
43*4882a593Smuzhiyun  * to avoid distinguishing between a frontend that doesn't understand the
44*4882a593Smuzhiyun  * multi-queue feature, and one that does, but requested only one queue.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Frontends requesting two or more queues must not write the toplevel
47*4882a593Smuzhiyun  * event-channel and ring-ref keys, instead writing those keys under sub-keys
48*4882a593Smuzhiyun  * having the name "queue-N" where N is the integer ID of the queue/ring for
49*4882a593Smuzhiyun  * which those keys belong. Queues are indexed from zero.
50*4882a593Smuzhiyun  * For example, a frontend with two queues must write the following set of
51*4882a593Smuzhiyun  * queue-related keys:
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
54*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0 = ""
55*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0/ring-ref = "<ring-ref#0>"
56*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
57*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1 = ""
58*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1/ring-ref = "<ring-ref#1>"
59*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * It is also possible to use multiple queues/rings together with
62*4882a593Smuzhiyun  * feature multi-page ring buffer.
63*4882a593Smuzhiyun  * For example, a frontend requests two queues/rings and the size of each ring
64*4882a593Smuzhiyun  * buffer is two pages must write the following set of related keys:
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/multi-queue-num-queues = "2"
67*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/ring-page-order = "1"
68*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0 = ""
69*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0/ring-ref0 = "<ring-ref#0>"
70*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0/ring-ref1 = "<ring-ref#1>"
71*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-0/event-channel = "<evtchn#0>"
72*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1 = ""
73*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1/ring-ref0 = "<ring-ref#2>"
74*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1/ring-ref1 = "<ring-ref#3>"
75*4882a593Smuzhiyun  * /local/domain/1/device/vbd/0/queue-1/event-channel = "<evtchn#1>"
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * REQUEST CODES.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun #define BLKIF_OP_READ              0
83*4882a593Smuzhiyun #define BLKIF_OP_WRITE             1
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun  * Recognised only if "feature-barrier" is present in backend xenbus info.
86*4882a593Smuzhiyun  * The "feature_barrier" node contains a boolean indicating whether barrier
87*4882a593Smuzhiyun  * requests are likely to succeed or fail. Either way, a barrier request
88*4882a593Smuzhiyun  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
89*4882a593Smuzhiyun  * the underlying block-device hardware. The boolean simply indicates whether
90*4882a593Smuzhiyun  * or not it is worthwhile for the frontend to attempt barrier requests.
91*4882a593Smuzhiyun  * If a backend does not recognise BLKIF_OP_WRITE_BARRIER, it should *not*
92*4882a593Smuzhiyun  * create the "feature-barrier" node!
93*4882a593Smuzhiyun  */
94*4882a593Smuzhiyun #define BLKIF_OP_WRITE_BARRIER     2
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun  * Recognised if "feature-flush-cache" is present in backend xenbus
98*4882a593Smuzhiyun  * info.  A flush will ask the underlying storage hardware to flush its
99*4882a593Smuzhiyun  * non-volatile caches as appropriate.  The "feature-flush-cache" node
100*4882a593Smuzhiyun  * contains a boolean indicating whether flush requests are likely to
101*4882a593Smuzhiyun  * succeed or fail. Either way, a flush request may fail at any time
102*4882a593Smuzhiyun  * with BLKIF_RSP_EOPNOTSUPP if it is unsupported by the underlying
103*4882a593Smuzhiyun  * block-device hardware. The boolean simply indicates whether or not it
104*4882a593Smuzhiyun  * is worthwhile for the frontend to attempt flushes.  If a backend does
105*4882a593Smuzhiyun  * not recognise BLKIF_OP_WRITE_FLUSH_CACHE, it should *not* create the
106*4882a593Smuzhiyun  * "feature-flush-cache" node!
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun #define BLKIF_OP_FLUSH_DISKCACHE   3
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun  * Recognised only if "feature-discard" is present in backend xenbus info.
112*4882a593Smuzhiyun  * The "feature-discard" node contains a boolean indicating whether trim
113*4882a593Smuzhiyun  * (ATA) or unmap (SCSI) - conviently called discard requests are likely
114*4882a593Smuzhiyun  * to succeed or fail. Either way, a discard request
115*4882a593Smuzhiyun  * may fail at any time with BLKIF_RSP_EOPNOTSUPP if it is unsupported by
116*4882a593Smuzhiyun  * the underlying block-device hardware. The boolean simply indicates whether
117*4882a593Smuzhiyun  * or not it is worthwhile for the frontend to attempt discard requests.
118*4882a593Smuzhiyun  * If a backend does not recognise BLKIF_OP_DISCARD, it should *not*
119*4882a593Smuzhiyun  * create the "feature-discard" node!
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * Discard operation is a request for the underlying block device to mark
122*4882a593Smuzhiyun  * extents to be erased. However, discard does not guarantee that the blocks
123*4882a593Smuzhiyun  * will be erased from the device - it is just a hint to the device
124*4882a593Smuzhiyun  * controller that these blocks are no longer in use. What the device
125*4882a593Smuzhiyun  * controller does with that information is left to the controller.
126*4882a593Smuzhiyun  * Discard operations are passed with sector_number as the
127*4882a593Smuzhiyun  * sector index to begin discard operations at and nr_sectors as the number of
128*4882a593Smuzhiyun  * sectors to be discarded. The specified sectors should be discarded if the
129*4882a593Smuzhiyun  * underlying block device supports trim (ATA) or unmap (SCSI) operations,
130*4882a593Smuzhiyun  * or a BLKIF_RSP_EOPNOTSUPP  should be returned.
131*4882a593Smuzhiyun  * More information about trim/unmap operations at:
132*4882a593Smuzhiyun  * http://t13.org/Documents/UploadedDocuments/docs2008/
133*4882a593Smuzhiyun  *     e07154r6-Data_Set_Management_Proposal_for_ATA-ACS2.doc
134*4882a593Smuzhiyun  * http://www.seagate.com/staticfiles/support/disc/manuals/
135*4882a593Smuzhiyun  *     Interface%20manuals/100293068c.pdf
136*4882a593Smuzhiyun  * The backend can optionally provide three extra XenBus attributes to
137*4882a593Smuzhiyun  * further optimize the discard functionality:
138*4882a593Smuzhiyun  * 'discard-alignment' - Devices that support discard functionality may
139*4882a593Smuzhiyun  * internally allocate space in units that are bigger than the exported
140*4882a593Smuzhiyun  * logical block size. The discard-alignment parameter indicates how many bytes
141*4882a593Smuzhiyun  * the beginning of the partition is offset from the internal allocation unit's
142*4882a593Smuzhiyun  * natural alignment.
143*4882a593Smuzhiyun  * 'discard-granularity'  - Devices that support discard functionality may
144*4882a593Smuzhiyun  * internally allocate space using units that are bigger than the logical block
145*4882a593Smuzhiyun  * size. The discard-granularity parameter indicates the size of the internal
146*4882a593Smuzhiyun  * allocation unit in bytes if reported by the device. Otherwise the
147*4882a593Smuzhiyun  * discard-granularity will be set to match the device's physical block size.
148*4882a593Smuzhiyun  * 'discard-secure' - All copies of the discarded sectors (potentially created
149*4882a593Smuzhiyun  * by garbage collection) must also be erased.  To use this feature, the flag
150*4882a593Smuzhiyun  * BLKIF_DISCARD_SECURE must be set in the blkif_request_trim.
151*4882a593Smuzhiyun  */
152*4882a593Smuzhiyun #define BLKIF_OP_DISCARD           5
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun  * Recognized if "feature-max-indirect-segments" in present in the backend
156*4882a593Smuzhiyun  * xenbus info. The "feature-max-indirect-segments" node contains the maximum
157*4882a593Smuzhiyun  * number of segments allowed by the backend per request. If the node is
158*4882a593Smuzhiyun  * present, the frontend might use blkif_request_indirect structs in order to
159*4882a593Smuzhiyun  * issue requests with more than BLKIF_MAX_SEGMENTS_PER_REQUEST (11). The
160*4882a593Smuzhiyun  * maximum number of indirect segments is fixed by the backend, but the
161*4882a593Smuzhiyun  * frontend can issue requests with any number of indirect segments as long as
162*4882a593Smuzhiyun  * it's less than the number provided by the backend. The indirect_grefs field
163*4882a593Smuzhiyun  * in blkif_request_indirect should be filled by the frontend with the
164*4882a593Smuzhiyun  * grant references of the pages that are holding the indirect segments.
165*4882a593Smuzhiyun  * These pages are filled with an array of blkif_request_segment that hold the
166*4882a593Smuzhiyun  * information about the segments. The number of indirect pages to use is
167*4882a593Smuzhiyun  * determined by the number of segments an indirect request contains. Every
168*4882a593Smuzhiyun  * indirect page can contain a maximum of
169*4882a593Smuzhiyun  * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
170*4882a593Smuzhiyun  * calculate the number of indirect pages to use we have to do
171*4882a593Smuzhiyun  * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
174*4882a593Smuzhiyun  * create the "feature-max-indirect-segments" node!
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun #define BLKIF_OP_INDIRECT          6
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun /*
179*4882a593Smuzhiyun  * Maximum scatter/gather segments per request.
180*4882a593Smuzhiyun  * This is carefully chosen so that sizeof(struct blkif_ring) <= PAGE_SIZE.
181*4882a593Smuzhiyun  * NB. This could be 12 if the ring indexes weren't stored in the same page.
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun struct blkif_request_segment {
188*4882a593Smuzhiyun 		grant_ref_t gref;        /* reference to I/O buffer frame        */
189*4882a593Smuzhiyun 		/* @first_sect: first sector in frame to transfer (inclusive).   */
190*4882a593Smuzhiyun 		/* @last_sect: last sector in frame to transfer (inclusive).     */
191*4882a593Smuzhiyun 		uint8_t     first_sect, last_sect;
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun struct blkif_request_rw {
195*4882a593Smuzhiyun 	uint8_t        nr_segments;  /* number of segments                   */
196*4882a593Smuzhiyun 	blkif_vdev_t   handle;       /* only for read/write requests         */
197*4882a593Smuzhiyun #ifndef CONFIG_X86_32
198*4882a593Smuzhiyun 	uint32_t       _pad1;	     /* offsetof(blkif_request,u.rw.id) == 8 */
199*4882a593Smuzhiyun #endif
200*4882a593Smuzhiyun 	uint64_t       id;           /* private guest value, echoed in resp  */
201*4882a593Smuzhiyun 	blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
202*4882a593Smuzhiyun 	struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
203*4882a593Smuzhiyun } __attribute__((__packed__));
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun struct blkif_request_discard {
206*4882a593Smuzhiyun 	uint8_t        flag;         /* BLKIF_DISCARD_SECURE or zero.        */
207*4882a593Smuzhiyun #define BLKIF_DISCARD_SECURE (1<<0)  /* ignored if discard-secure=0          */
208*4882a593Smuzhiyun 	blkif_vdev_t   _pad1;        /* only for read/write requests         */
209*4882a593Smuzhiyun #ifndef CONFIG_X86_32
210*4882a593Smuzhiyun 	uint32_t       _pad2;        /* offsetof(blkif_req..,u.discard.id)==8*/
211*4882a593Smuzhiyun #endif
212*4882a593Smuzhiyun 	uint64_t       id;           /* private guest value, echoed in resp  */
213*4882a593Smuzhiyun 	blkif_sector_t sector_number;
214*4882a593Smuzhiyun 	uint64_t       nr_sectors;
215*4882a593Smuzhiyun 	uint8_t        _pad3;
216*4882a593Smuzhiyun } __attribute__((__packed__));
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct blkif_request_other {
219*4882a593Smuzhiyun 	uint8_t      _pad1;
220*4882a593Smuzhiyun 	blkif_vdev_t _pad2;        /* only for read/write requests         */
221*4882a593Smuzhiyun #ifndef CONFIG_X86_32
222*4882a593Smuzhiyun 	uint32_t     _pad3;        /* offsetof(blkif_req..,u.other.id)==8*/
223*4882a593Smuzhiyun #endif
224*4882a593Smuzhiyun 	uint64_t     id;           /* private guest value, echoed in resp  */
225*4882a593Smuzhiyun } __attribute__((__packed__));
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun struct blkif_request_indirect {
228*4882a593Smuzhiyun 	uint8_t        indirect_op;
229*4882a593Smuzhiyun 	uint16_t       nr_segments;
230*4882a593Smuzhiyun #ifndef CONFIG_X86_32
231*4882a593Smuzhiyun 	uint32_t       _pad1;        /* offsetof(blkif_...,u.indirect.id) == 8 */
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun 	uint64_t       id;
234*4882a593Smuzhiyun 	blkif_sector_t sector_number;
235*4882a593Smuzhiyun 	blkif_vdev_t   handle;
236*4882a593Smuzhiyun 	uint16_t       _pad2;
237*4882a593Smuzhiyun 	grant_ref_t    indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
238*4882a593Smuzhiyun #ifndef CONFIG_X86_32
239*4882a593Smuzhiyun 	uint32_t      _pad3;         /* make it 64 byte aligned */
240*4882a593Smuzhiyun #else
241*4882a593Smuzhiyun 	uint64_t      _pad3;         /* make it 64 byte aligned */
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun } __attribute__((__packed__));
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun struct blkif_request {
246*4882a593Smuzhiyun 	uint8_t        operation;    /* BLKIF_OP_???                         */
247*4882a593Smuzhiyun 	union {
248*4882a593Smuzhiyun 		struct blkif_request_rw rw;
249*4882a593Smuzhiyun 		struct blkif_request_discard discard;
250*4882a593Smuzhiyun 		struct blkif_request_other other;
251*4882a593Smuzhiyun 		struct blkif_request_indirect indirect;
252*4882a593Smuzhiyun 	} u;
253*4882a593Smuzhiyun } __attribute__((__packed__));
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun struct blkif_response {
256*4882a593Smuzhiyun 	uint64_t        id;              /* copied from request */
257*4882a593Smuzhiyun 	uint8_t         operation;       /* copied from request */
258*4882a593Smuzhiyun 	int16_t         status;          /* BLKIF_RSP_???       */
259*4882a593Smuzhiyun };
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /*
262*4882a593Smuzhiyun  * STATUS RETURN CODES.
263*4882a593Smuzhiyun  */
264*4882a593Smuzhiyun  /* Operation not supported (only happens on barrier writes). */
265*4882a593Smuzhiyun #define BLKIF_RSP_EOPNOTSUPP  -2
266*4882a593Smuzhiyun  /* Operation failed for some unspecified reason (-EIO). */
267*4882a593Smuzhiyun #define BLKIF_RSP_ERROR       -1
268*4882a593Smuzhiyun  /* Operation completed successfully. */
269*4882a593Smuzhiyun #define BLKIF_RSP_OKAY         0
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /*
272*4882a593Smuzhiyun  * Generate blkif ring structures and types.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun DEFINE_RING_TYPES(blkif, struct blkif_request, struct blkif_response);
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #define VDISK_CDROM        0x1
278*4882a593Smuzhiyun #define VDISK_REMOVABLE    0x2
279*4882a593Smuzhiyun #define VDISK_READONLY     0x4
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* Xen-defined major numbers for virtual disks, they look strangely
282*4882a593Smuzhiyun  * familiar */
283*4882a593Smuzhiyun #define XEN_IDE0_MAJOR	3
284*4882a593Smuzhiyun #define XEN_IDE1_MAJOR	22
285*4882a593Smuzhiyun #define XEN_SCSI_DISK0_MAJOR	8
286*4882a593Smuzhiyun #define XEN_SCSI_DISK1_MAJOR	65
287*4882a593Smuzhiyun #define XEN_SCSI_DISK2_MAJOR	66
288*4882a593Smuzhiyun #define XEN_SCSI_DISK3_MAJOR	67
289*4882a593Smuzhiyun #define XEN_SCSI_DISK4_MAJOR	68
290*4882a593Smuzhiyun #define XEN_SCSI_DISK5_MAJOR	69
291*4882a593Smuzhiyun #define XEN_SCSI_DISK6_MAJOR	70
292*4882a593Smuzhiyun #define XEN_SCSI_DISK7_MAJOR	71
293*4882a593Smuzhiyun #define XEN_SCSI_DISK8_MAJOR	128
294*4882a593Smuzhiyun #define XEN_SCSI_DISK9_MAJOR	129
295*4882a593Smuzhiyun #define XEN_SCSI_DISK10_MAJOR	130
296*4882a593Smuzhiyun #define XEN_SCSI_DISK11_MAJOR	131
297*4882a593Smuzhiyun #define XEN_SCSI_DISK12_MAJOR	132
298*4882a593Smuzhiyun #define XEN_SCSI_DISK13_MAJOR	133
299*4882a593Smuzhiyun #define XEN_SCSI_DISK14_MAJOR	134
300*4882a593Smuzhiyun #define XEN_SCSI_DISK15_MAJOR	135
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun #endif /* __XEN_PUBLIC_IO_BLKIF_H__ */
303