1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
3*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
4*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
5*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
6*4882a593Smuzhiyun * software packages, subject to the following license:
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
9*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
10*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
11*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
12*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
13*4882a593Smuzhiyun * the following conditions:
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
16*4882a593Smuzhiyun * all copies or substantial portions of the Software.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24*4882a593Smuzhiyun * IN THE SOFTWARE.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #ifndef __XEN_BLKIF__BACKEND__COMMON_H__
28*4882a593Smuzhiyun #define __XEN_BLKIF__BACKEND__COMMON_H__
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/interrupt.h>
32*4882a593Smuzhiyun #include <linux/slab.h>
33*4882a593Smuzhiyun #include <linux/blkdev.h>
34*4882a593Smuzhiyun #include <linux/vmalloc.h>
35*4882a593Smuzhiyun #include <linux/wait.h>
36*4882a593Smuzhiyun #include <linux/io.h>
37*4882a593Smuzhiyun #include <linux/rbtree.h>
38*4882a593Smuzhiyun #include <asm/setup.h>
39*4882a593Smuzhiyun #include <asm/hypervisor.h>
40*4882a593Smuzhiyun #include <xen/grant_table.h>
41*4882a593Smuzhiyun #include <xen/page.h>
42*4882a593Smuzhiyun #include <xen/xenbus.h>
43*4882a593Smuzhiyun #include <xen/interface/io/ring.h>
44*4882a593Smuzhiyun #include <xen/interface/io/blkif.h>
45*4882a593Smuzhiyun #include <xen/interface/io/protocols.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun extern unsigned int xen_blkif_max_ring_order;
48*4882a593Smuzhiyun extern unsigned int xenblk_max_queues;
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * This is the maximum number of segments that would be allowed in indirect
51*4882a593Smuzhiyun * requests. This value will also be passed to the frontend.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #define MAX_INDIRECT_SEGMENTS 256
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Xen use 4K pages. The guest may use different page size (4K or 64K)
57*4882a593Smuzhiyun * Number of Xen pages per segment
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun #define XEN_PAGES_PER_SEGMENT (PAGE_SIZE / XEN_PAGE_SIZE)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define XEN_PAGES_PER_INDIRECT_FRAME \
62*4882a593Smuzhiyun (XEN_PAGE_SIZE/sizeof(struct blkif_request_segment))
63*4882a593Smuzhiyun #define SEGS_PER_INDIRECT_FRAME \
64*4882a593Smuzhiyun (XEN_PAGES_PER_INDIRECT_FRAME / XEN_PAGES_PER_SEGMENT)
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define MAX_INDIRECT_PAGES \
67*4882a593Smuzhiyun ((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
68*4882a593Smuzhiyun #define INDIRECT_PAGES(_segs) DIV_ROUND_UP(_segs, XEN_PAGES_PER_INDIRECT_FRAME)
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Not a real protocol. Used to generate ring structs which contain
71*4882a593Smuzhiyun * the elements common to all protocols only. This way we get a
72*4882a593Smuzhiyun * compiler-checkable way to use common struct elements, so we can
73*4882a593Smuzhiyun * avoid using switch(protocol) in a number of places. */
74*4882a593Smuzhiyun struct blkif_common_request {
75*4882a593Smuzhiyun char dummy;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* i386 protocol version */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun struct blkif_x86_32_request_rw {
81*4882a593Smuzhiyun uint8_t nr_segments; /* number of segments */
82*4882a593Smuzhiyun blkif_vdev_t handle; /* only for read/write requests */
83*4882a593Smuzhiyun uint64_t id; /* private guest value, echoed in resp */
84*4882a593Smuzhiyun blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
85*4882a593Smuzhiyun struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
86*4882a593Smuzhiyun } __attribute__((__packed__));
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun struct blkif_x86_32_request_discard {
89*4882a593Smuzhiyun uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
90*4882a593Smuzhiyun blkif_vdev_t _pad1; /* was "handle" for read/write requests */
91*4882a593Smuzhiyun uint64_t id; /* private guest value, echoed in resp */
92*4882a593Smuzhiyun blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
93*4882a593Smuzhiyun uint64_t nr_sectors;
94*4882a593Smuzhiyun } __attribute__((__packed__));
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun struct blkif_x86_32_request_other {
97*4882a593Smuzhiyun uint8_t _pad1;
98*4882a593Smuzhiyun blkif_vdev_t _pad2;
99*4882a593Smuzhiyun uint64_t id; /* private guest value, echoed in resp */
100*4882a593Smuzhiyun } __attribute__((__packed__));
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun struct blkif_x86_32_request_indirect {
103*4882a593Smuzhiyun uint8_t indirect_op;
104*4882a593Smuzhiyun uint16_t nr_segments;
105*4882a593Smuzhiyun uint64_t id;
106*4882a593Smuzhiyun blkif_sector_t sector_number;
107*4882a593Smuzhiyun blkif_vdev_t handle;
108*4882a593Smuzhiyun uint16_t _pad1;
109*4882a593Smuzhiyun grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * The maximum number of indirect segments (and pages) that will
112*4882a593Smuzhiyun * be used is determined by MAX_INDIRECT_SEGMENTS, this value
113*4882a593Smuzhiyun * is also exported to the guest (via xenstore
114*4882a593Smuzhiyun * feature-max-indirect-segments entry), so the frontend knows how
115*4882a593Smuzhiyun * many indirect segments the backend supports.
116*4882a593Smuzhiyun */
117*4882a593Smuzhiyun uint64_t _pad2; /* make it 64 byte aligned */
118*4882a593Smuzhiyun } __attribute__((__packed__));
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun struct blkif_x86_32_request {
121*4882a593Smuzhiyun uint8_t operation; /* BLKIF_OP_??? */
122*4882a593Smuzhiyun union {
123*4882a593Smuzhiyun struct blkif_x86_32_request_rw rw;
124*4882a593Smuzhiyun struct blkif_x86_32_request_discard discard;
125*4882a593Smuzhiyun struct blkif_x86_32_request_other other;
126*4882a593Smuzhiyun struct blkif_x86_32_request_indirect indirect;
127*4882a593Smuzhiyun } u;
128*4882a593Smuzhiyun } __attribute__((__packed__));
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* x86_64 protocol version */
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun struct blkif_x86_64_request_rw {
133*4882a593Smuzhiyun uint8_t nr_segments; /* number of segments */
134*4882a593Smuzhiyun blkif_vdev_t handle; /* only for read/write requests */
135*4882a593Smuzhiyun uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
136*4882a593Smuzhiyun uint64_t id;
137*4882a593Smuzhiyun blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
138*4882a593Smuzhiyun struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
139*4882a593Smuzhiyun } __attribute__((__packed__));
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun struct blkif_x86_64_request_discard {
142*4882a593Smuzhiyun uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
143*4882a593Smuzhiyun blkif_vdev_t _pad1; /* was "handle" for read/write requests */
144*4882a593Smuzhiyun uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
145*4882a593Smuzhiyun uint64_t id;
146*4882a593Smuzhiyun blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
147*4882a593Smuzhiyun uint64_t nr_sectors;
148*4882a593Smuzhiyun } __attribute__((__packed__));
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun struct blkif_x86_64_request_other {
151*4882a593Smuzhiyun uint8_t _pad1;
152*4882a593Smuzhiyun blkif_vdev_t _pad2;
153*4882a593Smuzhiyun uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
154*4882a593Smuzhiyun uint64_t id; /* private guest value, echoed in resp */
155*4882a593Smuzhiyun } __attribute__((__packed__));
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun struct blkif_x86_64_request_indirect {
158*4882a593Smuzhiyun uint8_t indirect_op;
159*4882a593Smuzhiyun uint16_t nr_segments;
160*4882a593Smuzhiyun uint32_t _pad1; /* offsetof(blkif_..,u.indirect.id)==8 */
161*4882a593Smuzhiyun uint64_t id;
162*4882a593Smuzhiyun blkif_sector_t sector_number;
163*4882a593Smuzhiyun blkif_vdev_t handle;
164*4882a593Smuzhiyun uint16_t _pad2;
165*4882a593Smuzhiyun grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * The maximum number of indirect segments (and pages) that will
168*4882a593Smuzhiyun * be used is determined by MAX_INDIRECT_SEGMENTS, this value
169*4882a593Smuzhiyun * is also exported to the guest (via xenstore
170*4882a593Smuzhiyun * feature-max-indirect-segments entry), so the frontend knows how
171*4882a593Smuzhiyun * many indirect segments the backend supports.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun uint32_t _pad3; /* make it 64 byte aligned */
174*4882a593Smuzhiyun } __attribute__((__packed__));
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun struct blkif_x86_64_request {
177*4882a593Smuzhiyun uint8_t operation; /* BLKIF_OP_??? */
178*4882a593Smuzhiyun union {
179*4882a593Smuzhiyun struct blkif_x86_64_request_rw rw;
180*4882a593Smuzhiyun struct blkif_x86_64_request_discard discard;
181*4882a593Smuzhiyun struct blkif_x86_64_request_other other;
182*4882a593Smuzhiyun struct blkif_x86_64_request_indirect indirect;
183*4882a593Smuzhiyun } u;
184*4882a593Smuzhiyun } __attribute__((__packed__));
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
187*4882a593Smuzhiyun struct blkif_response);
188*4882a593Smuzhiyun DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
189*4882a593Smuzhiyun struct blkif_response __packed);
190*4882a593Smuzhiyun DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
191*4882a593Smuzhiyun struct blkif_response);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun union blkif_back_rings {
194*4882a593Smuzhiyun struct blkif_back_ring native;
195*4882a593Smuzhiyun struct blkif_common_back_ring common;
196*4882a593Smuzhiyun struct blkif_x86_32_back_ring x86_32;
197*4882a593Smuzhiyun struct blkif_x86_64_back_ring x86_64;
198*4882a593Smuzhiyun };
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun enum blkif_protocol {
201*4882a593Smuzhiyun BLKIF_PROTOCOL_NATIVE = 1,
202*4882a593Smuzhiyun BLKIF_PROTOCOL_X86_32 = 2,
203*4882a593Smuzhiyun BLKIF_PROTOCOL_X86_64 = 3,
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun * Default protocol if the frontend doesn't specify one.
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun #ifdef CONFIG_X86
210*4882a593Smuzhiyun # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_X86_32
211*4882a593Smuzhiyun #else
212*4882a593Smuzhiyun # define BLKIF_PROTOCOL_DEFAULT BLKIF_PROTOCOL_NATIVE
213*4882a593Smuzhiyun #endif
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun struct xen_vbd {
216*4882a593Smuzhiyun /* What the domain refers to this vbd as. */
217*4882a593Smuzhiyun blkif_vdev_t handle;
218*4882a593Smuzhiyun /* Non-zero -> read-only */
219*4882a593Smuzhiyun unsigned char readonly;
220*4882a593Smuzhiyun /* VDISK_xxx */
221*4882a593Smuzhiyun unsigned char type;
222*4882a593Smuzhiyun /* phys device that this vbd maps to. */
223*4882a593Smuzhiyun u32 pdevice;
224*4882a593Smuzhiyun struct block_device *bdev;
225*4882a593Smuzhiyun /* Cached size parameter. */
226*4882a593Smuzhiyun sector_t size;
227*4882a593Smuzhiyun unsigned int flush_support:1;
228*4882a593Smuzhiyun unsigned int discard_secure:1;
229*4882a593Smuzhiyun /* Connect-time cached feature_persistent parameter value */
230*4882a593Smuzhiyun unsigned int feature_gnt_persistent_parm:1;
231*4882a593Smuzhiyun /* Persistent grants feature negotiation result */
232*4882a593Smuzhiyun unsigned int feature_gnt_persistent:1;
233*4882a593Smuzhiyun unsigned int overflow_max_grants:1;
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun struct backend_info;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Number of requests that we can fit in a ring */
239*4882a593Smuzhiyun #define XEN_BLKIF_REQS_PER_PAGE 32
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun struct persistent_gnt {
242*4882a593Smuzhiyun struct page *page;
243*4882a593Smuzhiyun grant_ref_t gnt;
244*4882a593Smuzhiyun grant_handle_t handle;
245*4882a593Smuzhiyun unsigned long last_used;
246*4882a593Smuzhiyun bool active;
247*4882a593Smuzhiyun struct rb_node node;
248*4882a593Smuzhiyun struct list_head remove_node;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /* Per-ring information. */
252*4882a593Smuzhiyun struct xen_blkif_ring {
253*4882a593Smuzhiyun /* Physical parameters of the comms window. */
254*4882a593Smuzhiyun unsigned int irq;
255*4882a593Smuzhiyun union blkif_back_rings blk_rings;
256*4882a593Smuzhiyun void *blk_ring;
257*4882a593Smuzhiyun /* Private fields. */
258*4882a593Smuzhiyun spinlock_t blk_ring_lock;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun wait_queue_head_t wq;
261*4882a593Smuzhiyun atomic_t inflight;
262*4882a593Smuzhiyun bool active;
263*4882a593Smuzhiyun /* One thread per blkif ring. */
264*4882a593Smuzhiyun struct task_struct *xenblkd;
265*4882a593Smuzhiyun unsigned int waiting_reqs;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* List of all 'pending_req' available */
268*4882a593Smuzhiyun struct list_head pending_free;
269*4882a593Smuzhiyun /* And its spinlock. */
270*4882a593Smuzhiyun spinlock_t pending_free_lock;
271*4882a593Smuzhiyun wait_queue_head_t pending_free_wq;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Tree to store persistent grants. */
274*4882a593Smuzhiyun struct rb_root persistent_gnts;
275*4882a593Smuzhiyun unsigned int persistent_gnt_c;
276*4882a593Smuzhiyun atomic_t persistent_gnt_in_use;
277*4882a593Smuzhiyun unsigned long next_lru;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Statistics. */
280*4882a593Smuzhiyun unsigned long st_print;
281*4882a593Smuzhiyun unsigned long long st_rd_req;
282*4882a593Smuzhiyun unsigned long long st_wr_req;
283*4882a593Smuzhiyun unsigned long long st_oo_req;
284*4882a593Smuzhiyun unsigned long long st_f_req;
285*4882a593Smuzhiyun unsigned long long st_ds_req;
286*4882a593Smuzhiyun unsigned long long st_rd_sect;
287*4882a593Smuzhiyun unsigned long long st_wr_sect;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* Used by the kworker that offload work from the persistent purge. */
290*4882a593Smuzhiyun struct list_head persistent_purge_list;
291*4882a593Smuzhiyun struct work_struct persistent_purge_work;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Buffer of free pages to map grant refs. */
294*4882a593Smuzhiyun struct gnttab_page_cache free_pages;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun struct work_struct free_work;
297*4882a593Smuzhiyun /* Thread shutdown wait queue. */
298*4882a593Smuzhiyun wait_queue_head_t shutdown_wq;
299*4882a593Smuzhiyun struct xen_blkif *blkif;
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun struct xen_blkif {
303*4882a593Smuzhiyun /* Unique identifier for this interface. */
304*4882a593Smuzhiyun domid_t domid;
305*4882a593Smuzhiyun unsigned int handle;
306*4882a593Smuzhiyun /* Comms information. */
307*4882a593Smuzhiyun enum blkif_protocol blk_protocol;
308*4882a593Smuzhiyun /* The VBD attached to this interface. */
309*4882a593Smuzhiyun struct xen_vbd vbd;
310*4882a593Smuzhiyun /* Back pointer to the backend_info. */
311*4882a593Smuzhiyun struct backend_info *be;
312*4882a593Smuzhiyun atomic_t refcnt;
313*4882a593Smuzhiyun /* for barrier (drain) requests */
314*4882a593Smuzhiyun struct completion drain_complete;
315*4882a593Smuzhiyun atomic_t drain;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun struct work_struct free_work;
318*4882a593Smuzhiyun unsigned int nr_ring_pages;
319*4882a593Smuzhiyun bool multi_ref;
320*4882a593Smuzhiyun /* All rings for this device. */
321*4882a593Smuzhiyun struct xen_blkif_ring *rings;
322*4882a593Smuzhiyun unsigned int nr_rings;
323*4882a593Smuzhiyun unsigned long buffer_squeeze_end;
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun struct seg_buf {
327*4882a593Smuzhiyun unsigned long offset;
328*4882a593Smuzhiyun unsigned int nsec;
329*4882a593Smuzhiyun };
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun struct grant_page {
332*4882a593Smuzhiyun struct page *page;
333*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt;
334*4882a593Smuzhiyun grant_handle_t handle;
335*4882a593Smuzhiyun grant_ref_t gref;
336*4882a593Smuzhiyun };
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /*
339*4882a593Smuzhiyun * Each outstanding request that we've passed to the lower device layers has a
340*4882a593Smuzhiyun * 'pending_req' allocated to it. Each buffer_head that completes decrements
341*4882a593Smuzhiyun * the pendcnt towards zero. When it hits zero, the specified domain has a
342*4882a593Smuzhiyun * response queued for it, with the saved 'id' passed back.
343*4882a593Smuzhiyun */
344*4882a593Smuzhiyun struct pending_req {
345*4882a593Smuzhiyun struct xen_blkif_ring *ring;
346*4882a593Smuzhiyun u64 id;
347*4882a593Smuzhiyun int nr_segs;
348*4882a593Smuzhiyun atomic_t pendcnt;
349*4882a593Smuzhiyun unsigned short operation;
350*4882a593Smuzhiyun int status;
351*4882a593Smuzhiyun struct list_head free_list;
352*4882a593Smuzhiyun struct grant_page *segments[MAX_INDIRECT_SEGMENTS];
353*4882a593Smuzhiyun /* Indirect descriptors */
354*4882a593Smuzhiyun struct grant_page *indirect_pages[MAX_INDIRECT_PAGES];
355*4882a593Smuzhiyun struct seg_buf seg[MAX_INDIRECT_SEGMENTS];
356*4882a593Smuzhiyun struct bio *biolist[MAX_INDIRECT_SEGMENTS];
357*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[MAX_INDIRECT_SEGMENTS];
358*4882a593Smuzhiyun struct page *unmap_pages[MAX_INDIRECT_SEGMENTS];
359*4882a593Smuzhiyun struct gntab_unmap_queue_data gnttab_unmap_data;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun #define vbd_sz(_v) ((_v)->bdev->bd_part ? \
364*4882a593Smuzhiyun (_v)->bdev->bd_part->nr_sects : \
365*4882a593Smuzhiyun get_capacity((_v)->bdev->bd_disk))
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun #define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
368*4882a593Smuzhiyun #define xen_blkif_put(_b) \
369*4882a593Smuzhiyun do { \
370*4882a593Smuzhiyun if (atomic_dec_and_test(&(_b)->refcnt)) \
371*4882a593Smuzhiyun schedule_work(&(_b)->free_work);\
372*4882a593Smuzhiyun } while (0)
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun struct phys_req {
375*4882a593Smuzhiyun unsigned short dev;
376*4882a593Smuzhiyun blkif_sector_t nr_sects;
377*4882a593Smuzhiyun struct block_device *bdev;
378*4882a593Smuzhiyun blkif_sector_t sector_number;
379*4882a593Smuzhiyun };
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun int xen_blkif_interface_init(void);
382*4882a593Smuzhiyun void xen_blkif_interface_fini(void);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun int xen_blkif_xenbus_init(void);
385*4882a593Smuzhiyun void xen_blkif_xenbus_fini(void);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
388*4882a593Smuzhiyun int xen_blkif_schedule(void *arg);
389*4882a593Smuzhiyun int xen_blkif_purge_persistent(void *arg);
390*4882a593Smuzhiyun void xen_blkbk_free_caches(struct xen_blkif_ring *ring);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
393*4882a593Smuzhiyun struct backend_info *be, int state);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun int xen_blkbk_barrier(struct xenbus_transaction xbt,
396*4882a593Smuzhiyun struct backend_info *be, int state);
397*4882a593Smuzhiyun struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
398*4882a593Smuzhiyun void xen_blkbk_unmap_purged_grants(struct work_struct *work);
399*4882a593Smuzhiyun
blkif_get_x86_32_req(struct blkif_request * dst,struct blkif_x86_32_request * src)400*4882a593Smuzhiyun static inline void blkif_get_x86_32_req(struct blkif_request *dst,
401*4882a593Smuzhiyun struct blkif_x86_32_request *src)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
404*4882a593Smuzhiyun dst->operation = READ_ONCE(src->operation);
405*4882a593Smuzhiyun switch (dst->operation) {
406*4882a593Smuzhiyun case BLKIF_OP_READ:
407*4882a593Smuzhiyun case BLKIF_OP_WRITE:
408*4882a593Smuzhiyun case BLKIF_OP_WRITE_BARRIER:
409*4882a593Smuzhiyun case BLKIF_OP_FLUSH_DISKCACHE:
410*4882a593Smuzhiyun dst->u.rw.nr_segments = src->u.rw.nr_segments;
411*4882a593Smuzhiyun dst->u.rw.handle = src->u.rw.handle;
412*4882a593Smuzhiyun dst->u.rw.id = src->u.rw.id;
413*4882a593Smuzhiyun dst->u.rw.sector_number = src->u.rw.sector_number;
414*4882a593Smuzhiyun barrier();
415*4882a593Smuzhiyun if (n > dst->u.rw.nr_segments)
416*4882a593Smuzhiyun n = dst->u.rw.nr_segments;
417*4882a593Smuzhiyun for (i = 0; i < n; i++)
418*4882a593Smuzhiyun dst->u.rw.seg[i] = src->u.rw.seg[i];
419*4882a593Smuzhiyun break;
420*4882a593Smuzhiyun case BLKIF_OP_DISCARD:
421*4882a593Smuzhiyun dst->u.discard.flag = src->u.discard.flag;
422*4882a593Smuzhiyun dst->u.discard.id = src->u.discard.id;
423*4882a593Smuzhiyun dst->u.discard.sector_number = src->u.discard.sector_number;
424*4882a593Smuzhiyun dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
425*4882a593Smuzhiyun break;
426*4882a593Smuzhiyun case BLKIF_OP_INDIRECT:
427*4882a593Smuzhiyun dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
428*4882a593Smuzhiyun dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
429*4882a593Smuzhiyun dst->u.indirect.handle = src->u.indirect.handle;
430*4882a593Smuzhiyun dst->u.indirect.id = src->u.indirect.id;
431*4882a593Smuzhiyun dst->u.indirect.sector_number = src->u.indirect.sector_number;
432*4882a593Smuzhiyun barrier();
433*4882a593Smuzhiyun j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
434*4882a593Smuzhiyun for (i = 0; i < j; i++)
435*4882a593Smuzhiyun dst->u.indirect.indirect_grefs[i] =
436*4882a593Smuzhiyun src->u.indirect.indirect_grefs[i];
437*4882a593Smuzhiyun break;
438*4882a593Smuzhiyun default:
439*4882a593Smuzhiyun /*
440*4882a593Smuzhiyun * Don't know how to translate this op. Only get the
441*4882a593Smuzhiyun * ID so failure can be reported to the frontend.
442*4882a593Smuzhiyun */
443*4882a593Smuzhiyun dst->u.other.id = src->u.other.id;
444*4882a593Smuzhiyun break;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
blkif_get_x86_64_req(struct blkif_request * dst,struct blkif_x86_64_request * src)448*4882a593Smuzhiyun static inline void blkif_get_x86_64_req(struct blkif_request *dst,
449*4882a593Smuzhiyun struct blkif_x86_64_request *src)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
452*4882a593Smuzhiyun dst->operation = READ_ONCE(src->operation);
453*4882a593Smuzhiyun switch (dst->operation) {
454*4882a593Smuzhiyun case BLKIF_OP_READ:
455*4882a593Smuzhiyun case BLKIF_OP_WRITE:
456*4882a593Smuzhiyun case BLKIF_OP_WRITE_BARRIER:
457*4882a593Smuzhiyun case BLKIF_OP_FLUSH_DISKCACHE:
458*4882a593Smuzhiyun dst->u.rw.nr_segments = src->u.rw.nr_segments;
459*4882a593Smuzhiyun dst->u.rw.handle = src->u.rw.handle;
460*4882a593Smuzhiyun dst->u.rw.id = src->u.rw.id;
461*4882a593Smuzhiyun dst->u.rw.sector_number = src->u.rw.sector_number;
462*4882a593Smuzhiyun barrier();
463*4882a593Smuzhiyun if (n > dst->u.rw.nr_segments)
464*4882a593Smuzhiyun n = dst->u.rw.nr_segments;
465*4882a593Smuzhiyun for (i = 0; i < n; i++)
466*4882a593Smuzhiyun dst->u.rw.seg[i] = src->u.rw.seg[i];
467*4882a593Smuzhiyun break;
468*4882a593Smuzhiyun case BLKIF_OP_DISCARD:
469*4882a593Smuzhiyun dst->u.discard.flag = src->u.discard.flag;
470*4882a593Smuzhiyun dst->u.discard.id = src->u.discard.id;
471*4882a593Smuzhiyun dst->u.discard.sector_number = src->u.discard.sector_number;
472*4882a593Smuzhiyun dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
473*4882a593Smuzhiyun break;
474*4882a593Smuzhiyun case BLKIF_OP_INDIRECT:
475*4882a593Smuzhiyun dst->u.indirect.indirect_op = src->u.indirect.indirect_op;
476*4882a593Smuzhiyun dst->u.indirect.nr_segments = src->u.indirect.nr_segments;
477*4882a593Smuzhiyun dst->u.indirect.handle = src->u.indirect.handle;
478*4882a593Smuzhiyun dst->u.indirect.id = src->u.indirect.id;
479*4882a593Smuzhiyun dst->u.indirect.sector_number = src->u.indirect.sector_number;
480*4882a593Smuzhiyun barrier();
481*4882a593Smuzhiyun j = min(MAX_INDIRECT_PAGES, INDIRECT_PAGES(dst->u.indirect.nr_segments));
482*4882a593Smuzhiyun for (i = 0; i < j; i++)
483*4882a593Smuzhiyun dst->u.indirect.indirect_grefs[i] =
484*4882a593Smuzhiyun src->u.indirect.indirect_grefs[i];
485*4882a593Smuzhiyun break;
486*4882a593Smuzhiyun default:
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun * Don't know how to translate this op. Only get the
489*4882a593Smuzhiyun * ID so failure can be reported to the frontend.
490*4882a593Smuzhiyun */
491*4882a593Smuzhiyun dst->u.other.id = src->u.other.id;
492*4882a593Smuzhiyun break;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun #endif /* __XEN_BLKIF__BACKEND__COMMON_H__ */
497