1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Back-end of the driver for virtual block devices. This portion of the
4*4882a593Smuzhiyun * driver exports a 'unified' block-device interface that can be accessed
5*4882a593Smuzhiyun * by any operating system that implements a compatible front end. A
6*4882a593Smuzhiyun * reference front-end implementation can be found in:
7*4882a593Smuzhiyun * drivers/block/xen-blkfront.c
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10*4882a593Smuzhiyun * Copyright (c) 2005, Christopher Clark
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
13*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
14*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
15*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
16*4882a593Smuzhiyun * software packages, subject to the following license:
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
19*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
20*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
21*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
23*4882a593Smuzhiyun * the following conditions:
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
26*4882a593Smuzhiyun * all copies or substantial portions of the Software.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34*4882a593Smuzhiyun * IN THE SOFTWARE.
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define pr_fmt(fmt) "xen-blkback: " fmt
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include <linux/spinlock.h>
40*4882a593Smuzhiyun #include <linux/kthread.h>
41*4882a593Smuzhiyun #include <linux/list.h>
42*4882a593Smuzhiyun #include <linux/delay.h>
43*4882a593Smuzhiyun #include <linux/freezer.h>
44*4882a593Smuzhiyun #include <linux/bitmap.h>
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun #include <xen/events.h>
47*4882a593Smuzhiyun #include <xen/page.h>
48*4882a593Smuzhiyun #include <xen/xen.h>
49*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
50*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
51*4882a593Smuzhiyun #include <xen/balloon.h>
52*4882a593Smuzhiyun #include <xen/grant_table.h>
53*4882a593Smuzhiyun #include "common.h"
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun * Maximum number of unused free pages to keep in the internal buffer.
57*4882a593Smuzhiyun * Setting this to a value too low will reduce memory used in each backend,
58*4882a593Smuzhiyun * but can have a performance penalty.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
61*4882a593Smuzhiyun * be set to a lower value that might degrade performance on some intensive
62*4882a593Smuzhiyun * IO workloads.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun static int max_buffer_pages = 1024;
66*4882a593Smuzhiyun module_param_named(max_buffer_pages, max_buffer_pages, int, 0644);
67*4882a593Smuzhiyun MODULE_PARM_DESC(max_buffer_pages,
68*4882a593Smuzhiyun "Maximum number of free pages to keep in each block backend buffer");
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * Maximum number of grants to map persistently in blkback. For maximum
72*4882a593Smuzhiyun * performance this should be the total numbers of grants that can be used
73*4882a593Smuzhiyun * to fill the ring, but since this might become too high, specially with
74*4882a593Smuzhiyun * the use of indirect descriptors, we set it to a value that provides good
75*4882a593Smuzhiyun * performance without using too much memory.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun * When the list of persistent grants is full we clean it up using a LRU
78*4882a593Smuzhiyun * algorithm.
79*4882a593Smuzhiyun */
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun static int max_pgrants = 1056;
82*4882a593Smuzhiyun module_param_named(max_persistent_grants, max_pgrants, int, 0644);
83*4882a593Smuzhiyun MODULE_PARM_DESC(max_persistent_grants,
84*4882a593Smuzhiyun "Maximum number of grants to map persistently");
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * How long a persistent grant is allowed to remain allocated without being in
88*4882a593Smuzhiyun * use. The time is in seconds, 0 means indefinitely long.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun static unsigned int pgrant_timeout = 60;
92*4882a593Smuzhiyun module_param_named(persistent_grant_unused_seconds, pgrant_timeout,
93*4882a593Smuzhiyun uint, 0644);
94*4882a593Smuzhiyun MODULE_PARM_DESC(persistent_grant_unused_seconds,
95*4882a593Smuzhiyun "Time in seconds an unused persistent grant is allowed to "
96*4882a593Smuzhiyun "remain allocated. Default is 60, 0 means unlimited.");
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun * Maximum number of rings/queues blkback supports, allow as many queues as there
100*4882a593Smuzhiyun * are CPUs if user has not specified a value.
101*4882a593Smuzhiyun */
102*4882a593Smuzhiyun unsigned int xenblk_max_queues;
103*4882a593Smuzhiyun module_param_named(max_queues, xenblk_max_queues, uint, 0644);
104*4882a593Smuzhiyun MODULE_PARM_DESC(max_queues,
105*4882a593Smuzhiyun "Maximum number of hardware queues per virtual disk." \
106*4882a593Smuzhiyun "By default it is the number of online CPUs.");
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * Maximum order of pages to be used for the shared ring between front and
110*4882a593Smuzhiyun * backend, 4KB page granularity is used.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun unsigned int xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
113*4882a593Smuzhiyun module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
114*4882a593Smuzhiyun MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
115*4882a593Smuzhiyun /*
116*4882a593Smuzhiyun * The LRU mechanism to clean the lists of persistent grants needs to
117*4882a593Smuzhiyun * be executed periodically. The time interval between consecutive executions
118*4882a593Smuzhiyun * of the purge mechanism is set in ms.
119*4882a593Smuzhiyun */
120*4882a593Smuzhiyun #define LRU_INTERVAL 100
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun * When the persistent grants list is full we will remove unused grants
124*4882a593Smuzhiyun * from the list. The percent number of grants to be removed at each LRU
125*4882a593Smuzhiyun * execution.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun #define LRU_PERCENT_CLEAN 5
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Run-time switchable: /sys/module/blkback/parameters/ */
130*4882a593Smuzhiyun static unsigned int log_stats;
131*4882a593Smuzhiyun module_param(log_stats, int, 0644);
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define BLKBACK_INVALID_HANDLE (~0)
134*4882a593Smuzhiyun
persistent_gnt_timeout(struct persistent_gnt * persistent_gnt)135*4882a593Smuzhiyun static inline bool persistent_gnt_timeout(struct persistent_gnt *persistent_gnt)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
138*4882a593Smuzhiyun HZ * pgrant_timeout);
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144*4882a593Smuzhiyun static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
145*4882a593Smuzhiyun struct blkif_request *req,
146*4882a593Smuzhiyun struct pending_req *pending_req);
147*4882a593Smuzhiyun static void make_response(struct xen_blkif_ring *ring, u64 id,
148*4882a593Smuzhiyun unsigned short op, int st);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun #define foreach_grant_safe(pos, n, rbtree, node) \
151*4882a593Smuzhiyun for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
152*4882a593Smuzhiyun (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
153*4882a593Smuzhiyun &(pos)->node != NULL; \
154*4882a593Smuzhiyun (pos) = container_of(n, typeof(*(pos)), node), \
155*4882a593Smuzhiyun (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun /*
159*4882a593Smuzhiyun * We don't need locking around the persistent grant helpers
160*4882a593Smuzhiyun * because blkback uses a single-thread for each backend, so we
161*4882a593Smuzhiyun * can be sure that this functions will never be called recursively.
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * The only exception to that is put_persistent_grant, that can be called
164*4882a593Smuzhiyun * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
165*4882a593Smuzhiyun * bit operations to modify the flags of a persistent grant and to count
166*4882a593Smuzhiyun * the number of used grants.
167*4882a593Smuzhiyun */
add_persistent_gnt(struct xen_blkif_ring * ring,struct persistent_gnt * persistent_gnt)168*4882a593Smuzhiyun static int add_persistent_gnt(struct xen_blkif_ring *ring,
169*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct rb_node **new = NULL, *parent = NULL;
172*4882a593Smuzhiyun struct persistent_gnt *this;
173*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (ring->persistent_gnt_c >= max_pgrants) {
176*4882a593Smuzhiyun if (!blkif->vbd.overflow_max_grants)
177*4882a593Smuzhiyun blkif->vbd.overflow_max_grants = 1;
178*4882a593Smuzhiyun return -EBUSY;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun /* Figure out where to put new node */
181*4882a593Smuzhiyun new = &ring->persistent_gnts.rb_node;
182*4882a593Smuzhiyun while (*new) {
183*4882a593Smuzhiyun this = container_of(*new, struct persistent_gnt, node);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun parent = *new;
186*4882a593Smuzhiyun if (persistent_gnt->gnt < this->gnt)
187*4882a593Smuzhiyun new = &((*new)->rb_left);
188*4882a593Smuzhiyun else if (persistent_gnt->gnt > this->gnt)
189*4882a593Smuzhiyun new = &((*new)->rb_right);
190*4882a593Smuzhiyun else {
191*4882a593Smuzhiyun pr_alert_ratelimited("trying to add a gref that's already in the tree\n");
192*4882a593Smuzhiyun return -EINVAL;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun persistent_gnt->active = true;
197*4882a593Smuzhiyun /* Add new node and rebalance tree. */
198*4882a593Smuzhiyun rb_link_node(&(persistent_gnt->node), parent, new);
199*4882a593Smuzhiyun rb_insert_color(&(persistent_gnt->node), &ring->persistent_gnts);
200*4882a593Smuzhiyun ring->persistent_gnt_c++;
201*4882a593Smuzhiyun atomic_inc(&ring->persistent_gnt_in_use);
202*4882a593Smuzhiyun return 0;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
get_persistent_gnt(struct xen_blkif_ring * ring,grant_ref_t gref)205*4882a593Smuzhiyun static struct persistent_gnt *get_persistent_gnt(struct xen_blkif_ring *ring,
206*4882a593Smuzhiyun grant_ref_t gref)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct persistent_gnt *data;
209*4882a593Smuzhiyun struct rb_node *node = NULL;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun node = ring->persistent_gnts.rb_node;
212*4882a593Smuzhiyun while (node) {
213*4882a593Smuzhiyun data = container_of(node, struct persistent_gnt, node);
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun if (gref < data->gnt)
216*4882a593Smuzhiyun node = node->rb_left;
217*4882a593Smuzhiyun else if (gref > data->gnt)
218*4882a593Smuzhiyun node = node->rb_right;
219*4882a593Smuzhiyun else {
220*4882a593Smuzhiyun if (data->active) {
221*4882a593Smuzhiyun pr_alert_ratelimited("requesting a grant already in use\n");
222*4882a593Smuzhiyun return NULL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun data->active = true;
225*4882a593Smuzhiyun atomic_inc(&ring->persistent_gnt_in_use);
226*4882a593Smuzhiyun return data;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun return NULL;
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
put_persistent_gnt(struct xen_blkif_ring * ring,struct persistent_gnt * persistent_gnt)232*4882a593Smuzhiyun static void put_persistent_gnt(struct xen_blkif_ring *ring,
233*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun if (!persistent_gnt->active)
236*4882a593Smuzhiyun pr_alert_ratelimited("freeing a grant already unused\n");
237*4882a593Smuzhiyun persistent_gnt->last_used = jiffies;
238*4882a593Smuzhiyun persistent_gnt->active = false;
239*4882a593Smuzhiyun atomic_dec(&ring->persistent_gnt_in_use);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
free_persistent_gnts(struct xen_blkif_ring * ring,struct rb_root * root,unsigned int num)242*4882a593Smuzhiyun static void free_persistent_gnts(struct xen_blkif_ring *ring, struct rb_root *root,
243*4882a593Smuzhiyun unsigned int num)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
246*4882a593Smuzhiyun struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
247*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt;
248*4882a593Smuzhiyun struct rb_node *n;
249*4882a593Smuzhiyun int segs_to_unmap = 0;
250*4882a593Smuzhiyun struct gntab_unmap_queue_data unmap_data;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun unmap_data.pages = pages;
253*4882a593Smuzhiyun unmap_data.unmap_ops = unmap;
254*4882a593Smuzhiyun unmap_data.kunmap_ops = NULL;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun foreach_grant_safe(persistent_gnt, n, root, node) {
257*4882a593Smuzhiyun BUG_ON(persistent_gnt->handle ==
258*4882a593Smuzhiyun BLKBACK_INVALID_HANDLE);
259*4882a593Smuzhiyun gnttab_set_unmap_op(&unmap[segs_to_unmap],
260*4882a593Smuzhiyun (unsigned long) pfn_to_kaddr(page_to_pfn(
261*4882a593Smuzhiyun persistent_gnt->page)),
262*4882a593Smuzhiyun GNTMAP_host_map,
263*4882a593Smuzhiyun persistent_gnt->handle);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun pages[segs_to_unmap] = persistent_gnt->page;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
268*4882a593Smuzhiyun !rb_next(&persistent_gnt->node)) {
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun unmap_data.count = segs_to_unmap;
271*4882a593Smuzhiyun BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages, pages,
274*4882a593Smuzhiyun segs_to_unmap);
275*4882a593Smuzhiyun segs_to_unmap = 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun rb_erase(&persistent_gnt->node, root);
279*4882a593Smuzhiyun kfree(persistent_gnt);
280*4882a593Smuzhiyun num--;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun BUG_ON(num != 0);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
xen_blkbk_unmap_purged_grants(struct work_struct * work)285*4882a593Smuzhiyun void xen_blkbk_unmap_purged_grants(struct work_struct *work)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
288*4882a593Smuzhiyun struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
289*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt;
290*4882a593Smuzhiyun int segs_to_unmap = 0;
291*4882a593Smuzhiyun struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work);
292*4882a593Smuzhiyun struct gntab_unmap_queue_data unmap_data;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun unmap_data.pages = pages;
295*4882a593Smuzhiyun unmap_data.unmap_ops = unmap;
296*4882a593Smuzhiyun unmap_data.kunmap_ops = NULL;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun while(!list_empty(&ring->persistent_purge_list)) {
299*4882a593Smuzhiyun persistent_gnt = list_first_entry(&ring->persistent_purge_list,
300*4882a593Smuzhiyun struct persistent_gnt,
301*4882a593Smuzhiyun remove_node);
302*4882a593Smuzhiyun list_del(&persistent_gnt->remove_node);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun gnttab_set_unmap_op(&unmap[segs_to_unmap],
305*4882a593Smuzhiyun vaddr(persistent_gnt->page),
306*4882a593Smuzhiyun GNTMAP_host_map,
307*4882a593Smuzhiyun persistent_gnt->handle);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun pages[segs_to_unmap] = persistent_gnt->page;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
312*4882a593Smuzhiyun unmap_data.count = segs_to_unmap;
313*4882a593Smuzhiyun BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
314*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages, pages,
315*4882a593Smuzhiyun segs_to_unmap);
316*4882a593Smuzhiyun segs_to_unmap = 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun kfree(persistent_gnt);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun if (segs_to_unmap > 0) {
321*4882a593Smuzhiyun unmap_data.count = segs_to_unmap;
322*4882a593Smuzhiyun BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
323*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages, pages, segs_to_unmap);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
purge_persistent_gnt(struct xen_blkif_ring * ring)327*4882a593Smuzhiyun static void purge_persistent_gnt(struct xen_blkif_ring *ring)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt;
330*4882a593Smuzhiyun struct rb_node *n;
331*4882a593Smuzhiyun unsigned int num_clean, total;
332*4882a593Smuzhiyun bool scan_used = false;
333*4882a593Smuzhiyun struct rb_root *root;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (work_busy(&ring->persistent_purge_work)) {
336*4882a593Smuzhiyun pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
337*4882a593Smuzhiyun goto out;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (ring->persistent_gnt_c < max_pgrants ||
341*4882a593Smuzhiyun (ring->persistent_gnt_c == max_pgrants &&
342*4882a593Smuzhiyun !ring->blkif->vbd.overflow_max_grants)) {
343*4882a593Smuzhiyun num_clean = 0;
344*4882a593Smuzhiyun } else {
345*4882a593Smuzhiyun num_clean = (max_pgrants / 100) * LRU_PERCENT_CLEAN;
346*4882a593Smuzhiyun num_clean = ring->persistent_gnt_c - max_pgrants + num_clean;
347*4882a593Smuzhiyun num_clean = min(ring->persistent_gnt_c, num_clean);
348*4882a593Smuzhiyun pr_debug("Going to purge at least %u persistent grants\n",
349*4882a593Smuzhiyun num_clean);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /*
353*4882a593Smuzhiyun * At this point, we can assure that there will be no calls
354*4882a593Smuzhiyun * to get_persistent_grant (because we are executing this code from
355*4882a593Smuzhiyun * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
356*4882a593Smuzhiyun * which means that the number of currently used grants will go down,
357*4882a593Smuzhiyun * but never up, so we will always be able to remove the requested
358*4882a593Smuzhiyun * number of grants.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun total = 0;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun BUG_ON(!list_empty(&ring->persistent_purge_list));
364*4882a593Smuzhiyun root = &ring->persistent_gnts;
365*4882a593Smuzhiyun purge_list:
366*4882a593Smuzhiyun foreach_grant_safe(persistent_gnt, n, root, node) {
367*4882a593Smuzhiyun BUG_ON(persistent_gnt->handle ==
368*4882a593Smuzhiyun BLKBACK_INVALID_HANDLE);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun if (persistent_gnt->active)
371*4882a593Smuzhiyun continue;
372*4882a593Smuzhiyun if (!scan_used && !persistent_gnt_timeout(persistent_gnt))
373*4882a593Smuzhiyun continue;
374*4882a593Smuzhiyun if (scan_used && total >= num_clean)
375*4882a593Smuzhiyun continue;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun rb_erase(&persistent_gnt->node, root);
378*4882a593Smuzhiyun list_add(&persistent_gnt->remove_node,
379*4882a593Smuzhiyun &ring->persistent_purge_list);
380*4882a593Smuzhiyun total++;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * Check whether we also need to start cleaning
384*4882a593Smuzhiyun * grants that were used since last purge in order to cope
385*4882a593Smuzhiyun * with the requested num
386*4882a593Smuzhiyun */
387*4882a593Smuzhiyun if (!scan_used && total < num_clean) {
388*4882a593Smuzhiyun pr_debug("Still missing %u purged frames\n", num_clean - total);
389*4882a593Smuzhiyun scan_used = true;
390*4882a593Smuzhiyun goto purge_list;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (total) {
394*4882a593Smuzhiyun ring->persistent_gnt_c -= total;
395*4882a593Smuzhiyun ring->blkif->vbd.overflow_max_grants = 0;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* We can defer this work */
398*4882a593Smuzhiyun schedule_work(&ring->persistent_purge_work);
399*4882a593Smuzhiyun pr_debug("Purged %u/%u\n", num_clean, total);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun out:
403*4882a593Smuzhiyun return;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /*
407*4882a593Smuzhiyun * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
408*4882a593Smuzhiyun */
alloc_req(struct xen_blkif_ring * ring)409*4882a593Smuzhiyun static struct pending_req *alloc_req(struct xen_blkif_ring *ring)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun struct pending_req *req = NULL;
412*4882a593Smuzhiyun unsigned long flags;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun spin_lock_irqsave(&ring->pending_free_lock, flags);
415*4882a593Smuzhiyun if (!list_empty(&ring->pending_free)) {
416*4882a593Smuzhiyun req = list_entry(ring->pending_free.next, struct pending_req,
417*4882a593Smuzhiyun free_list);
418*4882a593Smuzhiyun list_del(&req->free_list);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->pending_free_lock, flags);
421*4882a593Smuzhiyun return req;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * Return the 'pending_req' structure back to the freepool. We also
426*4882a593Smuzhiyun * wake up the thread if it was waiting for a free page.
427*4882a593Smuzhiyun */
free_req(struct xen_blkif_ring * ring,struct pending_req * req)428*4882a593Smuzhiyun static void free_req(struct xen_blkif_ring *ring, struct pending_req *req)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun unsigned long flags;
431*4882a593Smuzhiyun int was_empty;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun spin_lock_irqsave(&ring->pending_free_lock, flags);
434*4882a593Smuzhiyun was_empty = list_empty(&ring->pending_free);
435*4882a593Smuzhiyun list_add(&req->free_list, &ring->pending_free);
436*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->pending_free_lock, flags);
437*4882a593Smuzhiyun if (was_empty)
438*4882a593Smuzhiyun wake_up(&ring->pending_free_wq);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /*
442*4882a593Smuzhiyun * Routines for managing virtual block devices (vbds).
443*4882a593Smuzhiyun */
xen_vbd_translate(struct phys_req * req,struct xen_blkif * blkif,int operation)444*4882a593Smuzhiyun static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
445*4882a593Smuzhiyun int operation)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun struct xen_vbd *vbd = &blkif->vbd;
448*4882a593Smuzhiyun int rc = -EACCES;
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun if ((operation != REQ_OP_READ) && vbd->readonly)
451*4882a593Smuzhiyun goto out;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (likely(req->nr_sects)) {
454*4882a593Smuzhiyun blkif_sector_t end = req->sector_number + req->nr_sects;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun if (unlikely(end < req->sector_number))
457*4882a593Smuzhiyun goto out;
458*4882a593Smuzhiyun if (unlikely(end > vbd_sz(vbd)))
459*4882a593Smuzhiyun goto out;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun req->dev = vbd->pdevice;
463*4882a593Smuzhiyun req->bdev = vbd->bdev;
464*4882a593Smuzhiyun rc = 0;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun out:
467*4882a593Smuzhiyun return rc;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
xen_vbd_resize(struct xen_blkif * blkif)470*4882a593Smuzhiyun static void xen_vbd_resize(struct xen_blkif *blkif)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun struct xen_vbd *vbd = &blkif->vbd;
473*4882a593Smuzhiyun struct xenbus_transaction xbt;
474*4882a593Smuzhiyun int err;
475*4882a593Smuzhiyun struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
476*4882a593Smuzhiyun unsigned long long new_size = vbd_sz(vbd);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun pr_info("VBD Resize: Domid: %d, Device: (%d, %d)\n",
479*4882a593Smuzhiyun blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
480*4882a593Smuzhiyun pr_info("VBD Resize: new size %llu\n", new_size);
481*4882a593Smuzhiyun vbd->size = new_size;
482*4882a593Smuzhiyun again:
483*4882a593Smuzhiyun err = xenbus_transaction_start(&xbt);
484*4882a593Smuzhiyun if (err) {
485*4882a593Smuzhiyun pr_warn("Error starting transaction\n");
486*4882a593Smuzhiyun return;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
489*4882a593Smuzhiyun (unsigned long long)vbd_sz(vbd));
490*4882a593Smuzhiyun if (err) {
491*4882a593Smuzhiyun pr_warn("Error writing new size\n");
492*4882a593Smuzhiyun goto abort;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun /*
495*4882a593Smuzhiyun * Write the current state; we will use this to synchronize
496*4882a593Smuzhiyun * the front-end. If the current state is "connected" the
497*4882a593Smuzhiyun * front-end will get the new size information online.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
500*4882a593Smuzhiyun if (err) {
501*4882a593Smuzhiyun pr_warn("Error writing the state\n");
502*4882a593Smuzhiyun goto abort;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun err = xenbus_transaction_end(xbt, 0);
506*4882a593Smuzhiyun if (err == -EAGAIN)
507*4882a593Smuzhiyun goto again;
508*4882a593Smuzhiyun if (err)
509*4882a593Smuzhiyun pr_warn("Error ending transaction\n");
510*4882a593Smuzhiyun return;
511*4882a593Smuzhiyun abort:
512*4882a593Smuzhiyun xenbus_transaction_end(xbt, 1);
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /*
516*4882a593Smuzhiyun * Notification from the guest OS.
517*4882a593Smuzhiyun */
blkif_notify_work(struct xen_blkif_ring * ring)518*4882a593Smuzhiyun static void blkif_notify_work(struct xen_blkif_ring *ring)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun ring->waiting_reqs = 1;
521*4882a593Smuzhiyun wake_up(&ring->wq);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
xen_blkif_be_int(int irq,void * dev_id)524*4882a593Smuzhiyun irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun blkif_notify_work(dev_id);
527*4882a593Smuzhiyun return IRQ_HANDLED;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /*
531*4882a593Smuzhiyun * SCHEDULER FUNCTIONS
532*4882a593Smuzhiyun */
533*4882a593Smuzhiyun
print_stats(struct xen_blkif_ring * ring)534*4882a593Smuzhiyun static void print_stats(struct xen_blkif_ring *ring)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun pr_info("(%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
537*4882a593Smuzhiyun " | ds %4llu | pg: %4u/%4d\n",
538*4882a593Smuzhiyun current->comm, ring->st_oo_req,
539*4882a593Smuzhiyun ring->st_rd_req, ring->st_wr_req,
540*4882a593Smuzhiyun ring->st_f_req, ring->st_ds_req,
541*4882a593Smuzhiyun ring->persistent_gnt_c, max_pgrants);
542*4882a593Smuzhiyun ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
543*4882a593Smuzhiyun ring->st_rd_req = 0;
544*4882a593Smuzhiyun ring->st_wr_req = 0;
545*4882a593Smuzhiyun ring->st_oo_req = 0;
546*4882a593Smuzhiyun ring->st_ds_req = 0;
547*4882a593Smuzhiyun }
548*4882a593Smuzhiyun
xen_blkif_schedule(void * arg)549*4882a593Smuzhiyun int xen_blkif_schedule(void *arg)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct xen_blkif_ring *ring = arg;
552*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
553*4882a593Smuzhiyun struct xen_vbd *vbd = &blkif->vbd;
554*4882a593Smuzhiyun unsigned long timeout;
555*4882a593Smuzhiyun int ret;
556*4882a593Smuzhiyun bool do_eoi;
557*4882a593Smuzhiyun unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun set_freezable();
560*4882a593Smuzhiyun while (!kthread_should_stop()) {
561*4882a593Smuzhiyun if (try_to_freeze())
562*4882a593Smuzhiyun continue;
563*4882a593Smuzhiyun if (unlikely(vbd->size != vbd_sz(vbd)))
564*4882a593Smuzhiyun xen_vbd_resize(blkif);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun timeout = msecs_to_jiffies(LRU_INTERVAL);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun timeout = wait_event_interruptible_timeout(
569*4882a593Smuzhiyun ring->wq,
570*4882a593Smuzhiyun ring->waiting_reqs || kthread_should_stop(),
571*4882a593Smuzhiyun timeout);
572*4882a593Smuzhiyun if (timeout == 0)
573*4882a593Smuzhiyun goto purge_gnt_list;
574*4882a593Smuzhiyun timeout = wait_event_interruptible_timeout(
575*4882a593Smuzhiyun ring->pending_free_wq,
576*4882a593Smuzhiyun !list_empty(&ring->pending_free) ||
577*4882a593Smuzhiyun kthread_should_stop(),
578*4882a593Smuzhiyun timeout);
579*4882a593Smuzhiyun if (timeout == 0)
580*4882a593Smuzhiyun goto purge_gnt_list;
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun do_eoi = ring->waiting_reqs;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun ring->waiting_reqs = 0;
585*4882a593Smuzhiyun smp_mb(); /* clear flag *before* checking for work */
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun ret = do_block_io_op(ring, &eoi_flags);
588*4882a593Smuzhiyun if (ret > 0)
589*4882a593Smuzhiyun ring->waiting_reqs = 1;
590*4882a593Smuzhiyun if (ret == -EACCES)
591*4882a593Smuzhiyun wait_event_interruptible(ring->shutdown_wq,
592*4882a593Smuzhiyun kthread_should_stop());
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun if (do_eoi && !ring->waiting_reqs) {
595*4882a593Smuzhiyun xen_irq_lateeoi(ring->irq, eoi_flags);
596*4882a593Smuzhiyun eoi_flags |= XEN_EOI_FLAG_SPURIOUS;
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun purge_gnt_list:
600*4882a593Smuzhiyun if (blkif->vbd.feature_gnt_persistent &&
601*4882a593Smuzhiyun time_after(jiffies, ring->next_lru)) {
602*4882a593Smuzhiyun purge_persistent_gnt(ring);
603*4882a593Smuzhiyun ring->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun /* Shrink the free pages pool if it is too large. */
607*4882a593Smuzhiyun if (time_before(jiffies, blkif->buffer_squeeze_end))
608*4882a593Smuzhiyun gnttab_page_cache_shrink(&ring->free_pages, 0);
609*4882a593Smuzhiyun else
610*4882a593Smuzhiyun gnttab_page_cache_shrink(&ring->free_pages,
611*4882a593Smuzhiyun max_buffer_pages);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (log_stats && time_after(jiffies, ring->st_print))
614*4882a593Smuzhiyun print_stats(ring);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Drain pending purge work */
618*4882a593Smuzhiyun flush_work(&ring->persistent_purge_work);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (log_stats)
621*4882a593Smuzhiyun print_stats(ring);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun ring->xenblkd = NULL;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return 0;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun * Remove persistent grants and empty the pool of free pages
630*4882a593Smuzhiyun */
xen_blkbk_free_caches(struct xen_blkif_ring * ring)631*4882a593Smuzhiyun void xen_blkbk_free_caches(struct xen_blkif_ring *ring)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun /* Free all persistent grant pages */
634*4882a593Smuzhiyun if (!RB_EMPTY_ROOT(&ring->persistent_gnts))
635*4882a593Smuzhiyun free_persistent_gnts(ring, &ring->persistent_gnts,
636*4882a593Smuzhiyun ring->persistent_gnt_c);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun BUG_ON(!RB_EMPTY_ROOT(&ring->persistent_gnts));
639*4882a593Smuzhiyun ring->persistent_gnt_c = 0;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /* Since we are shutting down remove all pages from the buffer */
642*4882a593Smuzhiyun gnttab_page_cache_shrink(&ring->free_pages, 0 /* All */);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun
xen_blkbk_unmap_prepare(struct xen_blkif_ring * ring,struct grant_page ** pages,unsigned int num,struct gnttab_unmap_grant_ref * unmap_ops,struct page ** unmap_pages)645*4882a593Smuzhiyun static unsigned int xen_blkbk_unmap_prepare(
646*4882a593Smuzhiyun struct xen_blkif_ring *ring,
647*4882a593Smuzhiyun struct grant_page **pages,
648*4882a593Smuzhiyun unsigned int num,
649*4882a593Smuzhiyun struct gnttab_unmap_grant_ref *unmap_ops,
650*4882a593Smuzhiyun struct page **unmap_pages)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun unsigned int i, invcount = 0;
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun for (i = 0; i < num; i++) {
655*4882a593Smuzhiyun if (pages[i]->persistent_gnt != NULL) {
656*4882a593Smuzhiyun put_persistent_gnt(ring, pages[i]->persistent_gnt);
657*4882a593Smuzhiyun continue;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
660*4882a593Smuzhiyun continue;
661*4882a593Smuzhiyun unmap_pages[invcount] = pages[i]->page;
662*4882a593Smuzhiyun gnttab_set_unmap_op(&unmap_ops[invcount], vaddr(pages[i]->page),
663*4882a593Smuzhiyun GNTMAP_host_map, pages[i]->handle);
664*4882a593Smuzhiyun pages[i]->handle = BLKBACK_INVALID_HANDLE;
665*4882a593Smuzhiyun invcount++;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun return invcount;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
xen_blkbk_unmap_and_respond_callback(int result,struct gntab_unmap_queue_data * data)671*4882a593Smuzhiyun static void xen_blkbk_unmap_and_respond_callback(int result, struct gntab_unmap_queue_data *data)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct pending_req *pending_req = (struct pending_req *)(data->data);
674*4882a593Smuzhiyun struct xen_blkif_ring *ring = pending_req->ring;
675*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun /* BUG_ON used to reproduce existing behaviour,
678*4882a593Smuzhiyun but is this the best way to deal with this? */
679*4882a593Smuzhiyun BUG_ON(result);
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages, data->pages, data->count);
682*4882a593Smuzhiyun make_response(ring, pending_req->id,
683*4882a593Smuzhiyun pending_req->operation, pending_req->status);
684*4882a593Smuzhiyun free_req(ring, pending_req);
685*4882a593Smuzhiyun /*
686*4882a593Smuzhiyun * Make sure the request is freed before releasing blkif,
687*4882a593Smuzhiyun * or there could be a race between free_req and the
688*4882a593Smuzhiyun * cleanup done in xen_blkif_free during shutdown.
689*4882a593Smuzhiyun *
690*4882a593Smuzhiyun * NB: The fact that we might try to wake up pending_free_wq
691*4882a593Smuzhiyun * before drain_complete (in case there's a drain going on)
692*4882a593Smuzhiyun * it's not a problem with our current implementation
693*4882a593Smuzhiyun * because we can assure there's no thread waiting on
694*4882a593Smuzhiyun * pending_free_wq if there's a drain going on, but it has
695*4882a593Smuzhiyun * to be taken into account if the current model is changed.
696*4882a593Smuzhiyun */
697*4882a593Smuzhiyun if (atomic_dec_and_test(&ring->inflight) && atomic_read(&blkif->drain)) {
698*4882a593Smuzhiyun complete(&blkif->drain_complete);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun xen_blkif_put(blkif);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
xen_blkbk_unmap_and_respond(struct pending_req * req)703*4882a593Smuzhiyun static void xen_blkbk_unmap_and_respond(struct pending_req *req)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun struct gntab_unmap_queue_data* work = &req->gnttab_unmap_data;
706*4882a593Smuzhiyun struct xen_blkif_ring *ring = req->ring;
707*4882a593Smuzhiyun struct grant_page **pages = req->segments;
708*4882a593Smuzhiyun unsigned int invcount;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun invcount = xen_blkbk_unmap_prepare(ring, pages, req->nr_segs,
711*4882a593Smuzhiyun req->unmap, req->unmap_pages);
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun work->data = req;
714*4882a593Smuzhiyun work->done = xen_blkbk_unmap_and_respond_callback;
715*4882a593Smuzhiyun work->unmap_ops = req->unmap;
716*4882a593Smuzhiyun work->kunmap_ops = NULL;
717*4882a593Smuzhiyun work->pages = req->unmap_pages;
718*4882a593Smuzhiyun work->count = invcount;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun gnttab_unmap_refs_async(&req->gnttab_unmap_data);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun /*
725*4882a593Smuzhiyun * Unmap the grant references.
726*4882a593Smuzhiyun *
727*4882a593Smuzhiyun * This could accumulate ops up to the batch size to reduce the number
728*4882a593Smuzhiyun * of hypercalls, but since this is only used in error paths there's
729*4882a593Smuzhiyun * no real need.
730*4882a593Smuzhiyun */
xen_blkbk_unmap(struct xen_blkif_ring * ring,struct grant_page * pages[],int num)731*4882a593Smuzhiyun static void xen_blkbk_unmap(struct xen_blkif_ring *ring,
732*4882a593Smuzhiyun struct grant_page *pages[],
733*4882a593Smuzhiyun int num)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
736*4882a593Smuzhiyun struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
737*4882a593Smuzhiyun unsigned int invcount = 0;
738*4882a593Smuzhiyun int ret;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun while (num) {
741*4882a593Smuzhiyun unsigned int batch = min(num, BLKIF_MAX_SEGMENTS_PER_REQUEST);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun invcount = xen_blkbk_unmap_prepare(ring, pages, batch,
744*4882a593Smuzhiyun unmap, unmap_pages);
745*4882a593Smuzhiyun if (invcount) {
746*4882a593Smuzhiyun ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
747*4882a593Smuzhiyun BUG_ON(ret);
748*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages, unmap_pages,
749*4882a593Smuzhiyun invcount);
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun pages += batch;
752*4882a593Smuzhiyun num -= batch;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
xen_blkbk_map(struct xen_blkif_ring * ring,struct grant_page * pages[],int num,bool ro)756*4882a593Smuzhiyun static int xen_blkbk_map(struct xen_blkif_ring *ring,
757*4882a593Smuzhiyun struct grant_page *pages[],
758*4882a593Smuzhiyun int num, bool ro)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
761*4882a593Smuzhiyun struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
762*4882a593Smuzhiyun struct persistent_gnt *persistent_gnt = NULL;
763*4882a593Smuzhiyun phys_addr_t addr = 0;
764*4882a593Smuzhiyun int i, seg_idx, new_map_idx;
765*4882a593Smuzhiyun int segs_to_map = 0;
766*4882a593Smuzhiyun int ret = 0;
767*4882a593Smuzhiyun int last_map = 0, map_until = 0;
768*4882a593Smuzhiyun int use_persistent_gnts;
769*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /*
774*4882a593Smuzhiyun * Fill out preq.nr_sects with proper amount of sectors, and setup
775*4882a593Smuzhiyun * assign map[..] with the PFN of the page in our domain with the
776*4882a593Smuzhiyun * corresponding grant reference for each page.
777*4882a593Smuzhiyun */
778*4882a593Smuzhiyun again:
779*4882a593Smuzhiyun for (i = map_until; i < num; i++) {
780*4882a593Smuzhiyun uint32_t flags;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (use_persistent_gnts) {
783*4882a593Smuzhiyun persistent_gnt = get_persistent_gnt(
784*4882a593Smuzhiyun ring,
785*4882a593Smuzhiyun pages[i]->gref);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun if (persistent_gnt) {
789*4882a593Smuzhiyun /*
790*4882a593Smuzhiyun * We are using persistent grants and
791*4882a593Smuzhiyun * the grant is already mapped
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun pages[i]->page = persistent_gnt->page;
794*4882a593Smuzhiyun pages[i]->persistent_gnt = persistent_gnt;
795*4882a593Smuzhiyun } else {
796*4882a593Smuzhiyun if (gnttab_page_cache_get(&ring->free_pages,
797*4882a593Smuzhiyun &pages[i]->page)) {
798*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages,
799*4882a593Smuzhiyun pages_to_gnt,
800*4882a593Smuzhiyun segs_to_map);
801*4882a593Smuzhiyun ret = -ENOMEM;
802*4882a593Smuzhiyun goto out;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun addr = vaddr(pages[i]->page);
805*4882a593Smuzhiyun pages_to_gnt[segs_to_map] = pages[i]->page;
806*4882a593Smuzhiyun pages[i]->persistent_gnt = NULL;
807*4882a593Smuzhiyun flags = GNTMAP_host_map;
808*4882a593Smuzhiyun if (!use_persistent_gnts && ro)
809*4882a593Smuzhiyun flags |= GNTMAP_readonly;
810*4882a593Smuzhiyun gnttab_set_map_op(&map[segs_to_map++], addr,
811*4882a593Smuzhiyun flags, pages[i]->gref,
812*4882a593Smuzhiyun blkif->domid);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun map_until = i + 1;
815*4882a593Smuzhiyun if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
816*4882a593Smuzhiyun break;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun if (segs_to_map)
820*4882a593Smuzhiyun ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * Now swizzle the MFN in our domain with the MFN from the other domain
824*4882a593Smuzhiyun * so that when we access vaddr(pending_req,i) it has the contents of
825*4882a593Smuzhiyun * the page from the other domain.
826*4882a593Smuzhiyun */
827*4882a593Smuzhiyun for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
828*4882a593Smuzhiyun if (!pages[seg_idx]->persistent_gnt) {
829*4882a593Smuzhiyun /* This is a newly mapped grant */
830*4882a593Smuzhiyun BUG_ON(new_map_idx >= segs_to_map);
831*4882a593Smuzhiyun if (unlikely(map[new_map_idx].status != 0)) {
832*4882a593Smuzhiyun pr_debug("invalid buffer -- could not remap it\n");
833*4882a593Smuzhiyun gnttab_page_cache_put(&ring->free_pages,
834*4882a593Smuzhiyun &pages[seg_idx]->page, 1);
835*4882a593Smuzhiyun pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
836*4882a593Smuzhiyun ret |= !ret;
837*4882a593Smuzhiyun goto next;
838*4882a593Smuzhiyun }
839*4882a593Smuzhiyun pages[seg_idx]->handle = map[new_map_idx].handle;
840*4882a593Smuzhiyun } else {
841*4882a593Smuzhiyun continue;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun if (use_persistent_gnts &&
844*4882a593Smuzhiyun ring->persistent_gnt_c < max_pgrants) {
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun * We are using persistent grants, the grant is
847*4882a593Smuzhiyun * not mapped but we might have room for it.
848*4882a593Smuzhiyun */
849*4882a593Smuzhiyun persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
850*4882a593Smuzhiyun GFP_KERNEL);
851*4882a593Smuzhiyun if (!persistent_gnt) {
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * If we don't have enough memory to
854*4882a593Smuzhiyun * allocate the persistent_gnt struct
855*4882a593Smuzhiyun * map this grant non-persistenly
856*4882a593Smuzhiyun */
857*4882a593Smuzhiyun goto next;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun persistent_gnt->gnt = map[new_map_idx].ref;
860*4882a593Smuzhiyun persistent_gnt->handle = map[new_map_idx].handle;
861*4882a593Smuzhiyun persistent_gnt->page = pages[seg_idx]->page;
862*4882a593Smuzhiyun if (add_persistent_gnt(ring,
863*4882a593Smuzhiyun persistent_gnt)) {
864*4882a593Smuzhiyun kfree(persistent_gnt);
865*4882a593Smuzhiyun persistent_gnt = NULL;
866*4882a593Smuzhiyun goto next;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun pages[seg_idx]->persistent_gnt = persistent_gnt;
869*4882a593Smuzhiyun pr_debug("grant %u added to the tree of persistent grants, using %u/%u\n",
870*4882a593Smuzhiyun persistent_gnt->gnt, ring->persistent_gnt_c,
871*4882a593Smuzhiyun max_pgrants);
872*4882a593Smuzhiyun goto next;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
875*4882a593Smuzhiyun blkif->vbd.overflow_max_grants = 1;
876*4882a593Smuzhiyun pr_debug("domain %u, device %#x is using maximum number of persistent grants\n",
877*4882a593Smuzhiyun blkif->domid, blkif->vbd.handle);
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun /*
880*4882a593Smuzhiyun * We could not map this grant persistently, so use it as
881*4882a593Smuzhiyun * a non-persistent grant.
882*4882a593Smuzhiyun */
883*4882a593Smuzhiyun next:
884*4882a593Smuzhiyun new_map_idx++;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun segs_to_map = 0;
887*4882a593Smuzhiyun last_map = map_until;
888*4882a593Smuzhiyun if (!ret && map_until != num)
889*4882a593Smuzhiyun goto again;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun out:
892*4882a593Smuzhiyun for (i = last_map; i < num; i++) {
893*4882a593Smuzhiyun /* Don't zap current batch's valid persistent grants. */
894*4882a593Smuzhiyun if(i >= map_until)
895*4882a593Smuzhiyun pages[i]->persistent_gnt = NULL;
896*4882a593Smuzhiyun pages[i]->handle = BLKBACK_INVALID_HANDLE;
897*4882a593Smuzhiyun }
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return ret;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
xen_blkbk_map_seg(struct pending_req * pending_req)902*4882a593Smuzhiyun static int xen_blkbk_map_seg(struct pending_req *pending_req)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun int rc;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun rc = xen_blkbk_map(pending_req->ring, pending_req->segments,
907*4882a593Smuzhiyun pending_req->nr_segs,
908*4882a593Smuzhiyun (pending_req->operation != BLKIF_OP_READ));
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun return rc;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun
xen_blkbk_parse_indirect(struct blkif_request * req,struct pending_req * pending_req,struct seg_buf seg[],struct phys_req * preq)913*4882a593Smuzhiyun static int xen_blkbk_parse_indirect(struct blkif_request *req,
914*4882a593Smuzhiyun struct pending_req *pending_req,
915*4882a593Smuzhiyun struct seg_buf seg[],
916*4882a593Smuzhiyun struct phys_req *preq)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct grant_page **pages = pending_req->indirect_pages;
919*4882a593Smuzhiyun struct xen_blkif_ring *ring = pending_req->ring;
920*4882a593Smuzhiyun int indirect_grefs, rc, n, nseg, i;
921*4882a593Smuzhiyun struct blkif_request_segment *segments = NULL;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun nseg = pending_req->nr_segs;
924*4882a593Smuzhiyun indirect_grefs = INDIRECT_PAGES(nseg);
925*4882a593Smuzhiyun BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun for (i = 0; i < indirect_grefs; i++)
928*4882a593Smuzhiyun pages[i]->gref = req->u.indirect.indirect_grefs[i];
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun rc = xen_blkbk_map(ring, pages, indirect_grefs, true);
931*4882a593Smuzhiyun if (rc)
932*4882a593Smuzhiyun goto unmap;
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun for (n = 0, i = 0; n < nseg; n++) {
935*4882a593Smuzhiyun uint8_t first_sect, last_sect;
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
938*4882a593Smuzhiyun /* Map indirect segments */
939*4882a593Smuzhiyun if (segments)
940*4882a593Smuzhiyun kunmap_atomic(segments);
941*4882a593Smuzhiyun segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun i = n % SEGS_PER_INDIRECT_FRAME;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun pending_req->segments[n]->gref = segments[i].gref;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun first_sect = READ_ONCE(segments[i].first_sect);
948*4882a593Smuzhiyun last_sect = READ_ONCE(segments[i].last_sect);
949*4882a593Smuzhiyun if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
950*4882a593Smuzhiyun rc = -EINVAL;
951*4882a593Smuzhiyun goto unmap;
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun seg[n].nsec = last_sect - first_sect + 1;
955*4882a593Smuzhiyun seg[n].offset = first_sect << 9;
956*4882a593Smuzhiyun preq->nr_sects += seg[n].nsec;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun unmap:
960*4882a593Smuzhiyun if (segments)
961*4882a593Smuzhiyun kunmap_atomic(segments);
962*4882a593Smuzhiyun xen_blkbk_unmap(ring, pages, indirect_grefs);
963*4882a593Smuzhiyun return rc;
964*4882a593Smuzhiyun }
965*4882a593Smuzhiyun
dispatch_discard_io(struct xen_blkif_ring * ring,struct blkif_request * req)966*4882a593Smuzhiyun static int dispatch_discard_io(struct xen_blkif_ring *ring,
967*4882a593Smuzhiyun struct blkif_request *req)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun int err = 0;
970*4882a593Smuzhiyun int status = BLKIF_RSP_OKAY;
971*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
972*4882a593Smuzhiyun struct block_device *bdev = blkif->vbd.bdev;
973*4882a593Smuzhiyun unsigned long secure;
974*4882a593Smuzhiyun struct phys_req preq;
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun xen_blkif_get(blkif);
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun preq.sector_number = req->u.discard.sector_number;
979*4882a593Smuzhiyun preq.nr_sects = req->u.discard.nr_sectors;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun err = xen_vbd_translate(&preq, blkif, REQ_OP_WRITE);
982*4882a593Smuzhiyun if (err) {
983*4882a593Smuzhiyun pr_warn("access denied: DISCARD [%llu->%llu] on dev=%04x\n",
984*4882a593Smuzhiyun preq.sector_number,
985*4882a593Smuzhiyun preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
986*4882a593Smuzhiyun goto fail_response;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun ring->st_ds_req++;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun secure = (blkif->vbd.discard_secure &&
991*4882a593Smuzhiyun (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
992*4882a593Smuzhiyun BLKDEV_DISCARD_SECURE : 0;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
995*4882a593Smuzhiyun req->u.discard.nr_sectors,
996*4882a593Smuzhiyun GFP_KERNEL, secure);
997*4882a593Smuzhiyun fail_response:
998*4882a593Smuzhiyun if (err == -EOPNOTSUPP) {
999*4882a593Smuzhiyun pr_debug("discard op failed, not supported\n");
1000*4882a593Smuzhiyun status = BLKIF_RSP_EOPNOTSUPP;
1001*4882a593Smuzhiyun } else if (err)
1002*4882a593Smuzhiyun status = BLKIF_RSP_ERROR;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun make_response(ring, req->u.discard.id, req->operation, status);
1005*4882a593Smuzhiyun xen_blkif_put(blkif);
1006*4882a593Smuzhiyun return err;
1007*4882a593Smuzhiyun }
1008*4882a593Smuzhiyun
dispatch_other_io(struct xen_blkif_ring * ring,struct blkif_request * req,struct pending_req * pending_req)1009*4882a593Smuzhiyun static int dispatch_other_io(struct xen_blkif_ring *ring,
1010*4882a593Smuzhiyun struct blkif_request *req,
1011*4882a593Smuzhiyun struct pending_req *pending_req)
1012*4882a593Smuzhiyun {
1013*4882a593Smuzhiyun free_req(ring, pending_req);
1014*4882a593Smuzhiyun make_response(ring, req->u.other.id, req->operation,
1015*4882a593Smuzhiyun BLKIF_RSP_EOPNOTSUPP);
1016*4882a593Smuzhiyun return -EIO;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun
xen_blk_drain_io(struct xen_blkif_ring * ring)1019*4882a593Smuzhiyun static void xen_blk_drain_io(struct xen_blkif_ring *ring)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct xen_blkif *blkif = ring->blkif;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun atomic_set(&blkif->drain, 1);
1024*4882a593Smuzhiyun do {
1025*4882a593Smuzhiyun if (atomic_read(&ring->inflight) == 0)
1026*4882a593Smuzhiyun break;
1027*4882a593Smuzhiyun wait_for_completion_interruptible_timeout(
1028*4882a593Smuzhiyun &blkif->drain_complete, HZ);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun if (!atomic_read(&blkif->drain))
1031*4882a593Smuzhiyun break;
1032*4882a593Smuzhiyun } while (!kthread_should_stop());
1033*4882a593Smuzhiyun atomic_set(&blkif->drain, 0);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
__end_block_io_op(struct pending_req * pending_req,blk_status_t error)1036*4882a593Smuzhiyun static void __end_block_io_op(struct pending_req *pending_req,
1037*4882a593Smuzhiyun blk_status_t error)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun /* An error fails the entire request. */
1040*4882a593Smuzhiyun if (pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE &&
1041*4882a593Smuzhiyun error == BLK_STS_NOTSUPP) {
1042*4882a593Smuzhiyun pr_debug("flush diskcache op failed, not supported\n");
1043*4882a593Smuzhiyun xen_blkbk_flush_diskcache(XBT_NIL, pending_req->ring->blkif->be, 0);
1044*4882a593Smuzhiyun pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1045*4882a593Smuzhiyun } else if (pending_req->operation == BLKIF_OP_WRITE_BARRIER &&
1046*4882a593Smuzhiyun error == BLK_STS_NOTSUPP) {
1047*4882a593Smuzhiyun pr_debug("write barrier op failed, not supported\n");
1048*4882a593Smuzhiyun xen_blkbk_barrier(XBT_NIL, pending_req->ring->blkif->be, 0);
1049*4882a593Smuzhiyun pending_req->status = BLKIF_RSP_EOPNOTSUPP;
1050*4882a593Smuzhiyun } else if (error) {
1051*4882a593Smuzhiyun pr_debug("Buffer not up-to-date at end of operation,"
1052*4882a593Smuzhiyun " error=%d\n", error);
1053*4882a593Smuzhiyun pending_req->status = BLKIF_RSP_ERROR;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /*
1057*4882a593Smuzhiyun * If all of the bio's have completed it is time to unmap
1058*4882a593Smuzhiyun * the grant references associated with 'request' and provide
1059*4882a593Smuzhiyun * the proper response on the ring.
1060*4882a593Smuzhiyun */
1061*4882a593Smuzhiyun if (atomic_dec_and_test(&pending_req->pendcnt))
1062*4882a593Smuzhiyun xen_blkbk_unmap_and_respond(pending_req);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun /*
1066*4882a593Smuzhiyun * bio callback.
1067*4882a593Smuzhiyun */
end_block_io_op(struct bio * bio)1068*4882a593Smuzhiyun static void end_block_io_op(struct bio *bio)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun __end_block_io_op(bio->bi_private, bio->bi_status);
1071*4882a593Smuzhiyun bio_put(bio);
1072*4882a593Smuzhiyun }
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun /*
1077*4882a593Smuzhiyun * Function to copy the from the ring buffer the 'struct blkif_request'
1078*4882a593Smuzhiyun * (which has the sectors we want, number of them, grant references, etc),
1079*4882a593Smuzhiyun * and transmute it to the block API to hand it over to the proper block disk.
1080*4882a593Smuzhiyun */
1081*4882a593Smuzhiyun static int
__do_block_io_op(struct xen_blkif_ring * ring,unsigned int * eoi_flags)1082*4882a593Smuzhiyun __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun union blkif_back_rings *blk_rings = &ring->blk_rings;
1085*4882a593Smuzhiyun struct blkif_request req;
1086*4882a593Smuzhiyun struct pending_req *pending_req;
1087*4882a593Smuzhiyun RING_IDX rc, rp;
1088*4882a593Smuzhiyun int more_to_do = 0;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun rc = blk_rings->common.req_cons;
1091*4882a593Smuzhiyun rp = blk_rings->common.sring->req_prod;
1092*4882a593Smuzhiyun rmb(); /* Ensure we see queued requests up to 'rp'. */
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1095*4882a593Smuzhiyun rc = blk_rings->common.rsp_prod_pvt;
1096*4882a593Smuzhiyun pr_warn("Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1097*4882a593Smuzhiyun rp, rc, rp - rc, ring->blkif->vbd.pdevice);
1098*4882a593Smuzhiyun return -EACCES;
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun while (rc != rp) {
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1103*4882a593Smuzhiyun break;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun /* We've seen a request, so clear spurious eoi flag. */
1106*4882a593Smuzhiyun *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun if (kthread_should_stop()) {
1109*4882a593Smuzhiyun more_to_do = 1;
1110*4882a593Smuzhiyun break;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun pending_req = alloc_req(ring);
1114*4882a593Smuzhiyun if (NULL == pending_req) {
1115*4882a593Smuzhiyun ring->st_oo_req++;
1116*4882a593Smuzhiyun more_to_do = 1;
1117*4882a593Smuzhiyun break;
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun switch (ring->blkif->blk_protocol) {
1121*4882a593Smuzhiyun case BLKIF_PROTOCOL_NATIVE:
1122*4882a593Smuzhiyun memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1123*4882a593Smuzhiyun break;
1124*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_32:
1125*4882a593Smuzhiyun blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1126*4882a593Smuzhiyun break;
1127*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_64:
1128*4882a593Smuzhiyun blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1129*4882a593Smuzhiyun break;
1130*4882a593Smuzhiyun default:
1131*4882a593Smuzhiyun BUG();
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun blk_rings->common.req_cons = ++rc; /* before make_response() */
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun /* Apply all sanity checks to /private copy/ of request. */
1136*4882a593Smuzhiyun barrier();
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun switch (req.operation) {
1139*4882a593Smuzhiyun case BLKIF_OP_READ:
1140*4882a593Smuzhiyun case BLKIF_OP_WRITE:
1141*4882a593Smuzhiyun case BLKIF_OP_WRITE_BARRIER:
1142*4882a593Smuzhiyun case BLKIF_OP_FLUSH_DISKCACHE:
1143*4882a593Smuzhiyun case BLKIF_OP_INDIRECT:
1144*4882a593Smuzhiyun if (dispatch_rw_block_io(ring, &req, pending_req))
1145*4882a593Smuzhiyun goto done;
1146*4882a593Smuzhiyun break;
1147*4882a593Smuzhiyun case BLKIF_OP_DISCARD:
1148*4882a593Smuzhiyun free_req(ring, pending_req);
1149*4882a593Smuzhiyun if (dispatch_discard_io(ring, &req))
1150*4882a593Smuzhiyun goto done;
1151*4882a593Smuzhiyun break;
1152*4882a593Smuzhiyun default:
1153*4882a593Smuzhiyun if (dispatch_other_io(ring, &req, pending_req))
1154*4882a593Smuzhiyun goto done;
1155*4882a593Smuzhiyun break;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun /* Yield point for this unbounded loop. */
1159*4882a593Smuzhiyun cond_resched();
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun done:
1162*4882a593Smuzhiyun return more_to_do;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun static int
do_block_io_op(struct xen_blkif_ring * ring,unsigned int * eoi_flags)1166*4882a593Smuzhiyun do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun union blkif_back_rings *blk_rings = &ring->blk_rings;
1169*4882a593Smuzhiyun int more_to_do;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun do {
1172*4882a593Smuzhiyun more_to_do = __do_block_io_op(ring, eoi_flags);
1173*4882a593Smuzhiyun if (more_to_do)
1174*4882a593Smuzhiyun break;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1177*4882a593Smuzhiyun } while (more_to_do);
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun return more_to_do;
1180*4882a593Smuzhiyun }
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1183*4882a593Smuzhiyun * and call the 'submit_bio' to pass it to the underlying storage.
1184*4882a593Smuzhiyun */
dispatch_rw_block_io(struct xen_blkif_ring * ring,struct blkif_request * req,struct pending_req * pending_req)1185*4882a593Smuzhiyun static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
1186*4882a593Smuzhiyun struct blkif_request *req,
1187*4882a593Smuzhiyun struct pending_req *pending_req)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun struct phys_req preq;
1190*4882a593Smuzhiyun struct seg_buf *seg = pending_req->seg;
1191*4882a593Smuzhiyun unsigned int nseg;
1192*4882a593Smuzhiyun struct bio *bio = NULL;
1193*4882a593Smuzhiyun struct bio **biolist = pending_req->biolist;
1194*4882a593Smuzhiyun int i, nbio = 0;
1195*4882a593Smuzhiyun int operation;
1196*4882a593Smuzhiyun int operation_flags = 0;
1197*4882a593Smuzhiyun struct blk_plug plug;
1198*4882a593Smuzhiyun bool drain = false;
1199*4882a593Smuzhiyun struct grant_page **pages = pending_req->segments;
1200*4882a593Smuzhiyun unsigned short req_operation;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun req_operation = req->operation == BLKIF_OP_INDIRECT ?
1203*4882a593Smuzhiyun req->u.indirect.indirect_op : req->operation;
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun if ((req->operation == BLKIF_OP_INDIRECT) &&
1206*4882a593Smuzhiyun (req_operation != BLKIF_OP_READ) &&
1207*4882a593Smuzhiyun (req_operation != BLKIF_OP_WRITE)) {
1208*4882a593Smuzhiyun pr_debug("Invalid indirect operation (%u)\n", req_operation);
1209*4882a593Smuzhiyun goto fail_response;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun switch (req_operation) {
1213*4882a593Smuzhiyun case BLKIF_OP_READ:
1214*4882a593Smuzhiyun ring->st_rd_req++;
1215*4882a593Smuzhiyun operation = REQ_OP_READ;
1216*4882a593Smuzhiyun break;
1217*4882a593Smuzhiyun case BLKIF_OP_WRITE:
1218*4882a593Smuzhiyun ring->st_wr_req++;
1219*4882a593Smuzhiyun operation = REQ_OP_WRITE;
1220*4882a593Smuzhiyun operation_flags = REQ_SYNC | REQ_IDLE;
1221*4882a593Smuzhiyun break;
1222*4882a593Smuzhiyun case BLKIF_OP_WRITE_BARRIER:
1223*4882a593Smuzhiyun drain = true;
1224*4882a593Smuzhiyun fallthrough;
1225*4882a593Smuzhiyun case BLKIF_OP_FLUSH_DISKCACHE:
1226*4882a593Smuzhiyun ring->st_f_req++;
1227*4882a593Smuzhiyun operation = REQ_OP_WRITE;
1228*4882a593Smuzhiyun operation_flags = REQ_PREFLUSH;
1229*4882a593Smuzhiyun break;
1230*4882a593Smuzhiyun default:
1231*4882a593Smuzhiyun operation = 0; /* make gcc happy */
1232*4882a593Smuzhiyun goto fail_response;
1233*4882a593Smuzhiyun break;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun /* Check that the number of segments is sane. */
1237*4882a593Smuzhiyun nseg = req->operation == BLKIF_OP_INDIRECT ?
1238*4882a593Smuzhiyun req->u.indirect.nr_segments : req->u.rw.nr_segments;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun if (unlikely(nseg == 0 && operation_flags != REQ_PREFLUSH) ||
1241*4882a593Smuzhiyun unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1242*4882a593Smuzhiyun (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1243*4882a593Smuzhiyun unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1244*4882a593Smuzhiyun (nseg > MAX_INDIRECT_SEGMENTS))) {
1245*4882a593Smuzhiyun pr_debug("Bad number of segments in request (%d)\n", nseg);
1246*4882a593Smuzhiyun /* Haven't submitted any bio's yet. */
1247*4882a593Smuzhiyun goto fail_response;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun preq.nr_sects = 0;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun pending_req->ring = ring;
1253*4882a593Smuzhiyun pending_req->id = req->u.rw.id;
1254*4882a593Smuzhiyun pending_req->operation = req_operation;
1255*4882a593Smuzhiyun pending_req->status = BLKIF_RSP_OKAY;
1256*4882a593Smuzhiyun pending_req->nr_segs = nseg;
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun if (req->operation != BLKIF_OP_INDIRECT) {
1259*4882a593Smuzhiyun preq.dev = req->u.rw.handle;
1260*4882a593Smuzhiyun preq.sector_number = req->u.rw.sector_number;
1261*4882a593Smuzhiyun for (i = 0; i < nseg; i++) {
1262*4882a593Smuzhiyun pages[i]->gref = req->u.rw.seg[i].gref;
1263*4882a593Smuzhiyun seg[i].nsec = req->u.rw.seg[i].last_sect -
1264*4882a593Smuzhiyun req->u.rw.seg[i].first_sect + 1;
1265*4882a593Smuzhiyun seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1266*4882a593Smuzhiyun if ((req->u.rw.seg[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
1267*4882a593Smuzhiyun (req->u.rw.seg[i].last_sect <
1268*4882a593Smuzhiyun req->u.rw.seg[i].first_sect))
1269*4882a593Smuzhiyun goto fail_response;
1270*4882a593Smuzhiyun preq.nr_sects += seg[i].nsec;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun } else {
1273*4882a593Smuzhiyun preq.dev = req->u.indirect.handle;
1274*4882a593Smuzhiyun preq.sector_number = req->u.indirect.sector_number;
1275*4882a593Smuzhiyun if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1276*4882a593Smuzhiyun goto fail_response;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun if (xen_vbd_translate(&preq, ring->blkif, operation) != 0) {
1280*4882a593Smuzhiyun pr_debug("access denied: %s of [%llu,%llu] on dev=%04x\n",
1281*4882a593Smuzhiyun operation == REQ_OP_READ ? "read" : "write",
1282*4882a593Smuzhiyun preq.sector_number,
1283*4882a593Smuzhiyun preq.sector_number + preq.nr_sects,
1284*4882a593Smuzhiyun ring->blkif->vbd.pdevice);
1285*4882a593Smuzhiyun goto fail_response;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /*
1289*4882a593Smuzhiyun * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1290*4882a593Smuzhiyun * is set there.
1291*4882a593Smuzhiyun */
1292*4882a593Smuzhiyun for (i = 0; i < nseg; i++) {
1293*4882a593Smuzhiyun if (((int)preq.sector_number|(int)seg[i].nsec) &
1294*4882a593Smuzhiyun ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1295*4882a593Smuzhiyun pr_debug("Misaligned I/O request from domain %d\n",
1296*4882a593Smuzhiyun ring->blkif->domid);
1297*4882a593Smuzhiyun goto fail_response;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /* Wait on all outstanding I/O's and once that has been completed
1302*4882a593Smuzhiyun * issue the flush.
1303*4882a593Smuzhiyun */
1304*4882a593Smuzhiyun if (drain)
1305*4882a593Smuzhiyun xen_blk_drain_io(pending_req->ring);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun /*
1308*4882a593Smuzhiyun * If we have failed at this point, we need to undo the M2P override,
1309*4882a593Smuzhiyun * set gnttab_set_unmap_op on all of the grant references and perform
1310*4882a593Smuzhiyun * the hypercall to unmap the grants - that is all done in
1311*4882a593Smuzhiyun * xen_blkbk_unmap.
1312*4882a593Smuzhiyun */
1313*4882a593Smuzhiyun if (xen_blkbk_map_seg(pending_req))
1314*4882a593Smuzhiyun goto fail_flush;
1315*4882a593Smuzhiyun
1316*4882a593Smuzhiyun /*
1317*4882a593Smuzhiyun * This corresponding xen_blkif_put is done in __end_block_io_op, or
1318*4882a593Smuzhiyun * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1319*4882a593Smuzhiyun */
1320*4882a593Smuzhiyun xen_blkif_get(ring->blkif);
1321*4882a593Smuzhiyun atomic_inc(&ring->inflight);
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun for (i = 0; i < nseg; i++) {
1324*4882a593Smuzhiyun while ((bio == NULL) ||
1325*4882a593Smuzhiyun (bio_add_page(bio,
1326*4882a593Smuzhiyun pages[i]->page,
1327*4882a593Smuzhiyun seg[i].nsec << 9,
1328*4882a593Smuzhiyun seg[i].offset) == 0)) {
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1331*4882a593Smuzhiyun bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1332*4882a593Smuzhiyun if (unlikely(bio == NULL))
1333*4882a593Smuzhiyun goto fail_put_bio;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun biolist[nbio++] = bio;
1336*4882a593Smuzhiyun bio_set_dev(bio, preq.bdev);
1337*4882a593Smuzhiyun bio->bi_private = pending_req;
1338*4882a593Smuzhiyun bio->bi_end_io = end_block_io_op;
1339*4882a593Smuzhiyun bio->bi_iter.bi_sector = preq.sector_number;
1340*4882a593Smuzhiyun bio_set_op_attrs(bio, operation, operation_flags);
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun preq.sector_number += seg[i].nsec;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /* This will be hit if the operation was a flush or discard. */
1347*4882a593Smuzhiyun if (!bio) {
1348*4882a593Smuzhiyun BUG_ON(operation_flags != REQ_PREFLUSH);
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun bio = bio_alloc(GFP_KERNEL, 0);
1351*4882a593Smuzhiyun if (unlikely(bio == NULL))
1352*4882a593Smuzhiyun goto fail_put_bio;
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun biolist[nbio++] = bio;
1355*4882a593Smuzhiyun bio_set_dev(bio, preq.bdev);
1356*4882a593Smuzhiyun bio->bi_private = pending_req;
1357*4882a593Smuzhiyun bio->bi_end_io = end_block_io_op;
1358*4882a593Smuzhiyun bio_set_op_attrs(bio, operation, operation_flags);
1359*4882a593Smuzhiyun }
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun atomic_set(&pending_req->pendcnt, nbio);
1362*4882a593Smuzhiyun blk_start_plug(&plug);
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun for (i = 0; i < nbio; i++)
1365*4882a593Smuzhiyun submit_bio(biolist[i]);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /* Let the I/Os go.. */
1368*4882a593Smuzhiyun blk_finish_plug(&plug);
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun if (operation == REQ_OP_READ)
1371*4882a593Smuzhiyun ring->st_rd_sect += preq.nr_sects;
1372*4882a593Smuzhiyun else if (operation == REQ_OP_WRITE)
1373*4882a593Smuzhiyun ring->st_wr_sect += preq.nr_sects;
1374*4882a593Smuzhiyun
1375*4882a593Smuzhiyun return 0;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun fail_flush:
1378*4882a593Smuzhiyun xen_blkbk_unmap(ring, pending_req->segments,
1379*4882a593Smuzhiyun pending_req->nr_segs);
1380*4882a593Smuzhiyun fail_response:
1381*4882a593Smuzhiyun /* Haven't submitted any bio's yet. */
1382*4882a593Smuzhiyun make_response(ring, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1383*4882a593Smuzhiyun free_req(ring, pending_req);
1384*4882a593Smuzhiyun msleep(1); /* back off a bit */
1385*4882a593Smuzhiyun return -EIO;
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun fail_put_bio:
1388*4882a593Smuzhiyun for (i = 0; i < nbio; i++)
1389*4882a593Smuzhiyun bio_put(biolist[i]);
1390*4882a593Smuzhiyun atomic_set(&pending_req->pendcnt, 1);
1391*4882a593Smuzhiyun __end_block_io_op(pending_req, BLK_STS_RESOURCE);
1392*4882a593Smuzhiyun msleep(1); /* back off a bit */
1393*4882a593Smuzhiyun return -EIO;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun /*
1399*4882a593Smuzhiyun * Put a response on the ring on how the operation fared.
1400*4882a593Smuzhiyun */
make_response(struct xen_blkif_ring * ring,u64 id,unsigned short op,int st)1401*4882a593Smuzhiyun static void make_response(struct xen_blkif_ring *ring, u64 id,
1402*4882a593Smuzhiyun unsigned short op, int st)
1403*4882a593Smuzhiyun {
1404*4882a593Smuzhiyun struct blkif_response *resp;
1405*4882a593Smuzhiyun unsigned long flags;
1406*4882a593Smuzhiyun union blkif_back_rings *blk_rings;
1407*4882a593Smuzhiyun int notify;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun spin_lock_irqsave(&ring->blk_ring_lock, flags);
1410*4882a593Smuzhiyun blk_rings = &ring->blk_rings;
1411*4882a593Smuzhiyun /* Place on the response ring for the relevant domain. */
1412*4882a593Smuzhiyun switch (ring->blkif->blk_protocol) {
1413*4882a593Smuzhiyun case BLKIF_PROTOCOL_NATIVE:
1414*4882a593Smuzhiyun resp = RING_GET_RESPONSE(&blk_rings->native,
1415*4882a593Smuzhiyun blk_rings->native.rsp_prod_pvt);
1416*4882a593Smuzhiyun break;
1417*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_32:
1418*4882a593Smuzhiyun resp = RING_GET_RESPONSE(&blk_rings->x86_32,
1419*4882a593Smuzhiyun blk_rings->x86_32.rsp_prod_pvt);
1420*4882a593Smuzhiyun break;
1421*4882a593Smuzhiyun case BLKIF_PROTOCOL_X86_64:
1422*4882a593Smuzhiyun resp = RING_GET_RESPONSE(&blk_rings->x86_64,
1423*4882a593Smuzhiyun blk_rings->x86_64.rsp_prod_pvt);
1424*4882a593Smuzhiyun break;
1425*4882a593Smuzhiyun default:
1426*4882a593Smuzhiyun BUG();
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun resp->id = id;
1430*4882a593Smuzhiyun resp->operation = op;
1431*4882a593Smuzhiyun resp->status = st;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun blk_rings->common.rsp_prod_pvt++;
1434*4882a593Smuzhiyun RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1435*4882a593Smuzhiyun spin_unlock_irqrestore(&ring->blk_ring_lock, flags);
1436*4882a593Smuzhiyun if (notify)
1437*4882a593Smuzhiyun notify_remote_via_irq(ring->irq);
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
xen_blkif_init(void)1440*4882a593Smuzhiyun static int __init xen_blkif_init(void)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun int rc = 0;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (!xen_domain())
1445*4882a593Smuzhiyun return -ENODEV;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
1448*4882a593Smuzhiyun pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
1449*4882a593Smuzhiyun xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
1450*4882a593Smuzhiyun xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if (xenblk_max_queues == 0)
1454*4882a593Smuzhiyun xenblk_max_queues = num_online_cpus();
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun rc = xen_blkif_interface_init();
1457*4882a593Smuzhiyun if (rc)
1458*4882a593Smuzhiyun goto failed_init;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun rc = xen_blkif_xenbus_init();
1461*4882a593Smuzhiyun if (rc)
1462*4882a593Smuzhiyun goto failed_init;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun failed_init:
1465*4882a593Smuzhiyun return rc;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun
1468*4882a593Smuzhiyun module_init(xen_blkif_init);
1469*4882a593Smuzhiyun
xen_blkif_fini(void)1470*4882a593Smuzhiyun static void __exit xen_blkif_fini(void)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun xen_blkif_xenbus_fini();
1473*4882a593Smuzhiyun xen_blkif_interface_fini();
1474*4882a593Smuzhiyun }
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun module_exit(xen_blkif_fini);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
1479*4882a593Smuzhiyun MODULE_ALIAS("xen-backend:vbd");
1480