1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Xen SCSI frontend driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (c) 2008, FUJITSU Limited
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
7*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
8*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
9*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
10*4882a593Smuzhiyun * software packages, subject to the following license:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
13*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
14*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
15*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
17*4882a593Smuzhiyun * the following conditions:
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
20*4882a593Smuzhiyun * all copies or substantial portions of the Software.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28*4882a593Smuzhiyun * IN THE SOFTWARE.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/module.h>
32*4882a593Smuzhiyun #include <linux/kernel.h>
33*4882a593Smuzhiyun #include <linux/device.h>
34*4882a593Smuzhiyun #include <linux/wait.h>
35*4882a593Smuzhiyun #include <linux/interrupt.h>
36*4882a593Smuzhiyun #include <linux/mutex.h>
37*4882a593Smuzhiyun #include <linux/spinlock.h>
38*4882a593Smuzhiyun #include <linux/sched.h>
39*4882a593Smuzhiyun #include <linux/blkdev.h>
40*4882a593Smuzhiyun #include <linux/pfn.h>
41*4882a593Smuzhiyun #include <linux/slab.h>
42*4882a593Smuzhiyun #include <linux/bitops.h>
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
45*4882a593Smuzhiyun #include <scsi/scsi_device.h>
46*4882a593Smuzhiyun #include <scsi/scsi.h>
47*4882a593Smuzhiyun #include <scsi/scsi_host.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <xen/xen.h>
50*4882a593Smuzhiyun #include <xen/xenbus.h>
51*4882a593Smuzhiyun #include <xen/grant_table.h>
52*4882a593Smuzhiyun #include <xen/events.h>
53*4882a593Smuzhiyun #include <xen/page.h>
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #include <xen/interface/grant_table.h>
56*4882a593Smuzhiyun #include <xen/interface/io/vscsiif.h>
57*4882a593Smuzhiyun #include <xen/interface/io/protocols.h>
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define GRANT_INVALID_REF 0
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun #define VSCSIFRONT_OP_ADD_LUN 1
65*4882a593Smuzhiyun #define VSCSIFRONT_OP_DEL_LUN 2
66*4882a593Smuzhiyun #define VSCSIFRONT_OP_READD_LUN 3
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Tuning point. */
69*4882a593Smuzhiyun #define VSCSIIF_DEFAULT_CMD_PER_LUN 10
70*4882a593Smuzhiyun #define VSCSIIF_MAX_TARGET 64
71*4882a593Smuzhiyun #define VSCSIIF_MAX_LUN 255
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define VSCSIIF_RING_SIZE __CONST_RING_SIZE(vscsiif, PAGE_SIZE)
74*4882a593Smuzhiyun #define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define vscsiif_grants_sg(_sg) (PFN_UP((_sg) * \
77*4882a593Smuzhiyun sizeof(struct scsiif_request_segment)))
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct vscsifrnt_shadow {
80*4882a593Smuzhiyun /* command between backend and frontend */
81*4882a593Smuzhiyun unsigned char act;
82*4882a593Smuzhiyun uint8_t nr_segments;
83*4882a593Smuzhiyun uint16_t rqid;
84*4882a593Smuzhiyun uint16_t ref_rqid;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun unsigned int nr_grants; /* number of grants in gref[] */
87*4882a593Smuzhiyun struct scsiif_request_segment *sg; /* scatter/gather elements */
88*4882a593Smuzhiyun struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Do reset or abort function. */
91*4882a593Smuzhiyun wait_queue_head_t wq_reset; /* reset work queue */
92*4882a593Smuzhiyun int wait_reset; /* reset work queue condition */
93*4882a593Smuzhiyun int32_t rslt_reset; /* reset response status: */
94*4882a593Smuzhiyun /* SUCCESS or FAILED or: */
95*4882a593Smuzhiyun #define RSLT_RESET_WAITING 0
96*4882a593Smuzhiyun #define RSLT_RESET_ERR -1
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Requested struct scsi_cmnd is stored from kernel. */
99*4882a593Smuzhiyun struct scsi_cmnd *sc;
100*4882a593Smuzhiyun int gref[vscsiif_grants_sg(SG_ALL) + SG_ALL];
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun struct vscsifrnt_info {
104*4882a593Smuzhiyun struct xenbus_device *dev;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun struct Scsi_Host *host;
107*4882a593Smuzhiyun int host_active;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun unsigned int evtchn;
110*4882a593Smuzhiyun unsigned int irq;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun grant_ref_t ring_ref;
113*4882a593Smuzhiyun struct vscsiif_front_ring ring;
114*4882a593Smuzhiyun struct vscsiif_response ring_rsp;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun spinlock_t shadow_lock;
117*4882a593Smuzhiyun DECLARE_BITMAP(shadow_free_bitmap, VSCSIIF_MAX_REQS);
118*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow[VSCSIIF_MAX_REQS];
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Following items are protected by the host lock. */
121*4882a593Smuzhiyun wait_queue_head_t wq_sync;
122*4882a593Smuzhiyun wait_queue_head_t wq_pause;
123*4882a593Smuzhiyun unsigned int wait_ring_available:1;
124*4882a593Smuzhiyun unsigned int waiting_pause:1;
125*4882a593Smuzhiyun unsigned int pause:1;
126*4882a593Smuzhiyun unsigned callers;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun char dev_state_path[64];
129*4882a593Smuzhiyun struct task_struct *curr;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static DEFINE_MUTEX(scsifront_mutex);
133*4882a593Smuzhiyun
scsifront_wake_up(struct vscsifrnt_info * info)134*4882a593Smuzhiyun static void scsifront_wake_up(struct vscsifrnt_info *info)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun info->wait_ring_available = 0;
137*4882a593Smuzhiyun wake_up(&info->wq_sync);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
scsifront_get_rqid(struct vscsifrnt_info * info)140*4882a593Smuzhiyun static int scsifront_get_rqid(struct vscsifrnt_info *info)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun unsigned long flags;
143*4882a593Smuzhiyun int free;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun spin_lock_irqsave(&info->shadow_lock, flags);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun free = find_first_bit(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
148*4882a593Smuzhiyun __clear_bit(free, info->shadow_free_bitmap);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun spin_unlock_irqrestore(&info->shadow_lock, flags);
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return free;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
_scsifront_put_rqid(struct vscsifrnt_info * info,uint32_t id)155*4882a593Smuzhiyun static int _scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun int empty = bitmap_empty(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun __set_bit(id, info->shadow_free_bitmap);
160*4882a593Smuzhiyun info->shadow[id] = NULL;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun return empty || info->wait_ring_available;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
scsifront_put_rqid(struct vscsifrnt_info * info,uint32_t id)165*4882a593Smuzhiyun static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun unsigned long flags;
168*4882a593Smuzhiyun int kick;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun spin_lock_irqsave(&info->shadow_lock, flags);
171*4882a593Smuzhiyun kick = _scsifront_put_rqid(info, id);
172*4882a593Smuzhiyun spin_unlock_irqrestore(&info->shadow_lock, flags);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun if (kick)
175*4882a593Smuzhiyun scsifront_wake_up(info);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun
scsifront_do_request(struct vscsifrnt_info * info,struct vscsifrnt_shadow * shadow)178*4882a593Smuzhiyun static int scsifront_do_request(struct vscsifrnt_info *info,
179*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun struct vscsiif_front_ring *ring = &(info->ring);
182*4882a593Smuzhiyun struct vscsiif_request *ring_req;
183*4882a593Smuzhiyun struct scsi_cmnd *sc = shadow->sc;
184*4882a593Smuzhiyun uint32_t id;
185*4882a593Smuzhiyun int i, notify;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (RING_FULL(&info->ring))
188*4882a593Smuzhiyun return -EBUSY;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun id = scsifront_get_rqid(info); /* use id in response */
191*4882a593Smuzhiyun if (id >= VSCSIIF_MAX_REQS)
192*4882a593Smuzhiyun return -EBUSY;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun info->shadow[id] = shadow;
195*4882a593Smuzhiyun shadow->rqid = id;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
198*4882a593Smuzhiyun ring->req_prod_pvt++;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ring_req->rqid = id;
201*4882a593Smuzhiyun ring_req->act = shadow->act;
202*4882a593Smuzhiyun ring_req->ref_rqid = shadow->ref_rqid;
203*4882a593Smuzhiyun ring_req->nr_segments = shadow->nr_segments;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun ring_req->id = sc->device->id;
206*4882a593Smuzhiyun ring_req->lun = sc->device->lun;
207*4882a593Smuzhiyun ring_req->channel = sc->device->channel;
208*4882a593Smuzhiyun ring_req->cmd_len = sc->cmd_len;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
215*4882a593Smuzhiyun ring_req->timeout_per_command = sc->request->timeout / HZ;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
218*4882a593Smuzhiyun ring_req->seg[i] = shadow->seg[i];
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
221*4882a593Smuzhiyun if (notify)
222*4882a593Smuzhiyun notify_remote_via_irq(info->irq);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
scsifront_gnttab_done(struct vscsifrnt_info * info,struct vscsifrnt_shadow * shadow)227*4882a593Smuzhiyun static void scsifront_gnttab_done(struct vscsifrnt_info *info,
228*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun int i;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (shadow->sc->sc_data_direction == DMA_NONE)
233*4882a593Smuzhiyun return;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun for (i = 0; i < shadow->nr_grants; i++) {
236*4882a593Smuzhiyun if (unlikely(!gnttab_try_end_foreign_access(shadow->gref[i]))) {
237*4882a593Smuzhiyun shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
238*4882a593Smuzhiyun "grant still in use by backend\n");
239*4882a593Smuzhiyun BUG();
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun kfree(shadow->sg);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
scsifront_cdb_cmd_done(struct vscsifrnt_info * info,struct vscsiif_response * ring_rsp)246*4882a593Smuzhiyun static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
247*4882a593Smuzhiyun struct vscsiif_response *ring_rsp)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow;
250*4882a593Smuzhiyun struct scsi_cmnd *sc;
251*4882a593Smuzhiyun uint32_t id;
252*4882a593Smuzhiyun uint8_t sense_len;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun id = ring_rsp->rqid;
255*4882a593Smuzhiyun shadow = info->shadow[id];
256*4882a593Smuzhiyun sc = shadow->sc;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun BUG_ON(sc == NULL);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun scsifront_gnttab_done(info, shadow);
261*4882a593Smuzhiyun scsifront_put_rqid(info, id);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun sc->result = ring_rsp->rslt;
264*4882a593Smuzhiyun scsi_set_resid(sc, ring_rsp->residual_len);
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun sense_len = min_t(uint8_t, VSCSIIF_SENSE_BUFFERSIZE,
267*4882a593Smuzhiyun ring_rsp->sense_len);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (sense_len)
270*4882a593Smuzhiyun memcpy(sc->sense_buffer, ring_rsp->sense_buffer, sense_len);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun sc->scsi_done(sc);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
scsifront_sync_cmd_done(struct vscsifrnt_info * info,struct vscsiif_response * ring_rsp)275*4882a593Smuzhiyun static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
276*4882a593Smuzhiyun struct vscsiif_response *ring_rsp)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun uint16_t id = ring_rsp->rqid;
279*4882a593Smuzhiyun unsigned long flags;
280*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow = info->shadow[id];
281*4882a593Smuzhiyun int kick;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun spin_lock_irqsave(&info->shadow_lock, flags);
284*4882a593Smuzhiyun shadow->wait_reset = 1;
285*4882a593Smuzhiyun switch (shadow->rslt_reset) {
286*4882a593Smuzhiyun case RSLT_RESET_WAITING:
287*4882a593Smuzhiyun shadow->rslt_reset = ring_rsp->rslt;
288*4882a593Smuzhiyun break;
289*4882a593Smuzhiyun case RSLT_RESET_ERR:
290*4882a593Smuzhiyun kick = _scsifront_put_rqid(info, id);
291*4882a593Smuzhiyun spin_unlock_irqrestore(&info->shadow_lock, flags);
292*4882a593Smuzhiyun kfree(shadow);
293*4882a593Smuzhiyun if (kick)
294*4882a593Smuzhiyun scsifront_wake_up(info);
295*4882a593Smuzhiyun return;
296*4882a593Smuzhiyun default:
297*4882a593Smuzhiyun shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
298*4882a593Smuzhiyun "bad reset state %d, possibly leaking %u\n",
299*4882a593Smuzhiyun shadow->rslt_reset, id);
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun spin_unlock_irqrestore(&info->shadow_lock, flags);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun wake_up(&shadow->wq_reset);
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
scsifront_do_response(struct vscsifrnt_info * info,struct vscsiif_response * ring_rsp)307*4882a593Smuzhiyun static void scsifront_do_response(struct vscsifrnt_info *info,
308*4882a593Smuzhiyun struct vscsiif_response *ring_rsp)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun if (WARN(ring_rsp->rqid >= VSCSIIF_MAX_REQS ||
311*4882a593Smuzhiyun test_bit(ring_rsp->rqid, info->shadow_free_bitmap),
312*4882a593Smuzhiyun "illegal rqid %u returned by backend!\n", ring_rsp->rqid))
313*4882a593Smuzhiyun return;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (info->shadow[ring_rsp->rqid]->act == VSCSIIF_ACT_SCSI_CDB)
316*4882a593Smuzhiyun scsifront_cdb_cmd_done(info, ring_rsp);
317*4882a593Smuzhiyun else
318*4882a593Smuzhiyun scsifront_sync_cmd_done(info, ring_rsp);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
scsifront_ring_drain(struct vscsifrnt_info * info)321*4882a593Smuzhiyun static int scsifront_ring_drain(struct vscsifrnt_info *info)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct vscsiif_response *ring_rsp;
324*4882a593Smuzhiyun RING_IDX i, rp;
325*4882a593Smuzhiyun int more_to_do = 0;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun rp = info->ring.sring->rsp_prod;
328*4882a593Smuzhiyun rmb(); /* ordering required respective to dom0 */
329*4882a593Smuzhiyun for (i = info->ring.rsp_cons; i != rp; i++) {
330*4882a593Smuzhiyun ring_rsp = RING_GET_RESPONSE(&info->ring, i);
331*4882a593Smuzhiyun scsifront_do_response(info, ring_rsp);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun info->ring.rsp_cons = i;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (i != info->ring.req_prod_pvt)
337*4882a593Smuzhiyun RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
338*4882a593Smuzhiyun else
339*4882a593Smuzhiyun info->ring.sring->rsp_event = i + 1;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun return more_to_do;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
scsifront_cmd_done(struct vscsifrnt_info * info)344*4882a593Smuzhiyun static int scsifront_cmd_done(struct vscsifrnt_info *info)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun int more_to_do;
347*4882a593Smuzhiyun unsigned long flags;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun spin_lock_irqsave(info->host->host_lock, flags);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun more_to_do = scsifront_ring_drain(info);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun info->wait_ring_available = 0;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun spin_unlock_irqrestore(info->host->host_lock, flags);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun wake_up(&info->wq_sync);
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun return more_to_do;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun
scsifront_irq_fn(int irq,void * dev_id)362*4882a593Smuzhiyun static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct vscsifrnt_info *info = dev_id;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun while (scsifront_cmd_done(info))
367*4882a593Smuzhiyun /* Yield point for this unbounded loop. */
368*4882a593Smuzhiyun cond_resched();
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun return IRQ_HANDLED;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
scsifront_finish_all(struct vscsifrnt_info * info)373*4882a593Smuzhiyun static void scsifront_finish_all(struct vscsifrnt_info *info)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun unsigned i;
376*4882a593Smuzhiyun struct vscsiif_response resp;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun scsifront_ring_drain(info);
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
381*4882a593Smuzhiyun if (test_bit(i, info->shadow_free_bitmap))
382*4882a593Smuzhiyun continue;
383*4882a593Smuzhiyun resp.rqid = i;
384*4882a593Smuzhiyun resp.sense_len = 0;
385*4882a593Smuzhiyun resp.rslt = DID_RESET << 16;
386*4882a593Smuzhiyun resp.residual_len = 0;
387*4882a593Smuzhiyun scsifront_do_response(info, &resp);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
map_data_for_request(struct vscsifrnt_info * info,struct scsi_cmnd * sc,struct vscsifrnt_shadow * shadow)391*4882a593Smuzhiyun static int map_data_for_request(struct vscsifrnt_info *info,
392*4882a593Smuzhiyun struct scsi_cmnd *sc,
393*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun grant_ref_t gref_head;
396*4882a593Smuzhiyun struct page *page;
397*4882a593Smuzhiyun int err, ref, ref_cnt = 0;
398*4882a593Smuzhiyun int grant_ro = (sc->sc_data_direction == DMA_TO_DEVICE);
399*4882a593Smuzhiyun unsigned int i, off, len, bytes;
400*4882a593Smuzhiyun unsigned int data_len = scsi_bufflen(sc);
401*4882a593Smuzhiyun unsigned int data_grants = 0, seg_grants = 0;
402*4882a593Smuzhiyun struct scatterlist *sg;
403*4882a593Smuzhiyun struct scsiif_request_segment *seg;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (sc->sc_data_direction == DMA_NONE || !data_len)
406*4882a593Smuzhiyun return 0;
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i)
409*4882a593Smuzhiyun data_grants += PFN_UP(sg->offset + sg->length);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (data_grants > VSCSIIF_SG_TABLESIZE) {
412*4882a593Smuzhiyun if (data_grants > info->host->sg_tablesize) {
413*4882a593Smuzhiyun shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
414*4882a593Smuzhiyun "Unable to map request_buffer for command!\n");
415*4882a593Smuzhiyun return -E2BIG;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun seg_grants = vscsiif_grants_sg(data_grants);
418*4882a593Smuzhiyun shadow->sg = kcalloc(data_grants,
419*4882a593Smuzhiyun sizeof(struct scsiif_request_segment), GFP_ATOMIC);
420*4882a593Smuzhiyun if (!shadow->sg)
421*4882a593Smuzhiyun return -ENOMEM;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun seg = shadow->sg ? : shadow->seg;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun err = gnttab_alloc_grant_references(seg_grants + data_grants,
426*4882a593Smuzhiyun &gref_head);
427*4882a593Smuzhiyun if (err) {
428*4882a593Smuzhiyun kfree(shadow->sg);
429*4882a593Smuzhiyun shost_printk(KERN_ERR, info->host, KBUILD_MODNAME
430*4882a593Smuzhiyun "gnttab_alloc_grant_references() error\n");
431*4882a593Smuzhiyun return -ENOMEM;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun if (seg_grants) {
435*4882a593Smuzhiyun page = virt_to_page(seg);
436*4882a593Smuzhiyun off = offset_in_page(seg);
437*4882a593Smuzhiyun len = sizeof(struct scsiif_request_segment) * data_grants;
438*4882a593Smuzhiyun while (len > 0) {
439*4882a593Smuzhiyun bytes = min_t(unsigned int, len, PAGE_SIZE - off);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun ref = gnttab_claim_grant_reference(&gref_head);
442*4882a593Smuzhiyun BUG_ON(ref == -ENOSPC);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun gnttab_grant_foreign_access_ref(ref,
445*4882a593Smuzhiyun info->dev->otherend_id,
446*4882a593Smuzhiyun xen_page_to_gfn(page), 1);
447*4882a593Smuzhiyun shadow->gref[ref_cnt] = ref;
448*4882a593Smuzhiyun shadow->seg[ref_cnt].gref = ref;
449*4882a593Smuzhiyun shadow->seg[ref_cnt].offset = (uint16_t)off;
450*4882a593Smuzhiyun shadow->seg[ref_cnt].length = (uint16_t)bytes;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun page++;
453*4882a593Smuzhiyun len -= bytes;
454*4882a593Smuzhiyun off = 0;
455*4882a593Smuzhiyun ref_cnt++;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun BUG_ON(seg_grants < ref_cnt);
458*4882a593Smuzhiyun seg_grants = ref_cnt;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun scsi_for_each_sg(sc, sg, scsi_sg_count(sc), i) {
462*4882a593Smuzhiyun page = sg_page(sg);
463*4882a593Smuzhiyun off = sg->offset;
464*4882a593Smuzhiyun len = sg->length;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun while (len > 0 && data_len > 0) {
467*4882a593Smuzhiyun /*
468*4882a593Smuzhiyun * sg sends a scatterlist that is larger than
469*4882a593Smuzhiyun * the data_len it wants transferred for certain
470*4882a593Smuzhiyun * IO sizes.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun bytes = min_t(unsigned int, len, PAGE_SIZE - off);
473*4882a593Smuzhiyun bytes = min(bytes, data_len);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun ref = gnttab_claim_grant_reference(&gref_head);
476*4882a593Smuzhiyun BUG_ON(ref == -ENOSPC);
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun gnttab_grant_foreign_access_ref(ref,
479*4882a593Smuzhiyun info->dev->otherend_id,
480*4882a593Smuzhiyun xen_page_to_gfn(page),
481*4882a593Smuzhiyun grant_ro);
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun shadow->gref[ref_cnt] = ref;
484*4882a593Smuzhiyun seg->gref = ref;
485*4882a593Smuzhiyun seg->offset = (uint16_t)off;
486*4882a593Smuzhiyun seg->length = (uint16_t)bytes;
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun page++;
489*4882a593Smuzhiyun seg++;
490*4882a593Smuzhiyun len -= bytes;
491*4882a593Smuzhiyun data_len -= bytes;
492*4882a593Smuzhiyun off = 0;
493*4882a593Smuzhiyun ref_cnt++;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (seg_grants)
498*4882a593Smuzhiyun shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
499*4882a593Smuzhiyun else
500*4882a593Smuzhiyun shadow->nr_segments = (uint8_t)ref_cnt;
501*4882a593Smuzhiyun shadow->nr_grants = ref_cnt;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
scsifront_enter(struct vscsifrnt_info * info)506*4882a593Smuzhiyun static int scsifront_enter(struct vscsifrnt_info *info)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun if (info->pause)
509*4882a593Smuzhiyun return 1;
510*4882a593Smuzhiyun info->callers++;
511*4882a593Smuzhiyun return 0;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
scsifront_return(struct vscsifrnt_info * info)514*4882a593Smuzhiyun static void scsifront_return(struct vscsifrnt_info *info)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun info->callers--;
517*4882a593Smuzhiyun if (info->callers)
518*4882a593Smuzhiyun return;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun if (!info->waiting_pause)
521*4882a593Smuzhiyun return;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun info->waiting_pause = 0;
524*4882a593Smuzhiyun wake_up(&info->wq_pause);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
scsifront_queuecommand(struct Scsi_Host * shost,struct scsi_cmnd * sc)527*4882a593Smuzhiyun static int scsifront_queuecommand(struct Scsi_Host *shost,
528*4882a593Smuzhiyun struct scsi_cmnd *sc)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct vscsifrnt_info *info = shost_priv(shost);
531*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
532*4882a593Smuzhiyun unsigned long flags;
533*4882a593Smuzhiyun int err;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun sc->result = 0;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun shadow->sc = sc;
538*4882a593Smuzhiyun shadow->act = VSCSIIF_ACT_SCSI_CDB;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
541*4882a593Smuzhiyun if (scsifront_enter(info)) {
542*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
543*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun err = map_data_for_request(info, sc, shadow);
547*4882a593Smuzhiyun if (err < 0) {
548*4882a593Smuzhiyun pr_debug("%s: err %d\n", __func__, err);
549*4882a593Smuzhiyun scsifront_return(info);
550*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
551*4882a593Smuzhiyun if (err == -ENOMEM)
552*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
553*4882a593Smuzhiyun sc->result = DID_ERROR << 16;
554*4882a593Smuzhiyun sc->scsi_done(sc);
555*4882a593Smuzhiyun return 0;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (scsifront_do_request(info, shadow)) {
559*4882a593Smuzhiyun scsifront_gnttab_done(info, shadow);
560*4882a593Smuzhiyun goto busy;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun scsifront_return(info);
564*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun busy:
569*4882a593Smuzhiyun scsifront_return(info);
570*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
571*4882a593Smuzhiyun pr_debug("%s: busy\n", __func__);
572*4882a593Smuzhiyun return SCSI_MLQUEUE_HOST_BUSY;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /*
576*4882a593Smuzhiyun * Any exception handling (reset or abort) must be forwarded to the backend.
577*4882a593Smuzhiyun * We have to wait until an answer is returned. This answer contains the
578*4882a593Smuzhiyun * result to be returned to the requestor.
579*4882a593Smuzhiyun */
scsifront_action_handler(struct scsi_cmnd * sc,uint8_t act)580*4882a593Smuzhiyun static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct Scsi_Host *host = sc->device->host;
583*4882a593Smuzhiyun struct vscsifrnt_info *info = shost_priv(host);
584*4882a593Smuzhiyun struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
585*4882a593Smuzhiyun int err = 0;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
588*4882a593Smuzhiyun if (!shadow)
589*4882a593Smuzhiyun return FAILED;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun shadow->act = act;
592*4882a593Smuzhiyun shadow->rslt_reset = RSLT_RESET_WAITING;
593*4882a593Smuzhiyun shadow->sc = sc;
594*4882a593Smuzhiyun shadow->ref_rqid = s->rqid;
595*4882a593Smuzhiyun init_waitqueue_head(&shadow->wq_reset);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun for (;;) {
600*4882a593Smuzhiyun if (scsifront_enter(info))
601*4882a593Smuzhiyun goto fail;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun if (!scsifront_do_request(info, shadow))
604*4882a593Smuzhiyun break;
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun scsifront_return(info);
607*4882a593Smuzhiyun if (err)
608*4882a593Smuzhiyun goto fail;
609*4882a593Smuzhiyun info->wait_ring_available = 1;
610*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
611*4882a593Smuzhiyun err = wait_event_interruptible(info->wq_sync,
612*4882a593Smuzhiyun !info->wait_ring_available);
613*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
617*4882a593Smuzhiyun err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
618*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (!err) {
621*4882a593Smuzhiyun err = shadow->rslt_reset;
622*4882a593Smuzhiyun scsifront_put_rqid(info, shadow->rqid);
623*4882a593Smuzhiyun kfree(shadow);
624*4882a593Smuzhiyun } else {
625*4882a593Smuzhiyun spin_lock(&info->shadow_lock);
626*4882a593Smuzhiyun shadow->rslt_reset = RSLT_RESET_ERR;
627*4882a593Smuzhiyun spin_unlock(&info->shadow_lock);
628*4882a593Smuzhiyun err = FAILED;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun scsifront_return(info);
632*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
633*4882a593Smuzhiyun return err;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun fail:
636*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
637*4882a593Smuzhiyun kfree(shadow);
638*4882a593Smuzhiyun return FAILED;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
scsifront_eh_abort_handler(struct scsi_cmnd * sc)641*4882a593Smuzhiyun static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun pr_debug("%s\n", __func__);
644*4882a593Smuzhiyun return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_ABORT);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun
scsifront_dev_reset_handler(struct scsi_cmnd * sc)647*4882a593Smuzhiyun static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun pr_debug("%s\n", __func__);
650*4882a593Smuzhiyun return scsifront_action_handler(sc, VSCSIIF_ACT_SCSI_RESET);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
scsifront_sdev_configure(struct scsi_device * sdev)653*4882a593Smuzhiyun static int scsifront_sdev_configure(struct scsi_device *sdev)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun struct vscsifrnt_info *info = shost_priv(sdev->host);
656*4882a593Smuzhiyun int err;
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun if (info && current == info->curr) {
659*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, info->dev->nodename,
660*4882a593Smuzhiyun info->dev_state_path, "%d", XenbusStateConnected);
661*4882a593Smuzhiyun if (err) {
662*4882a593Smuzhiyun xenbus_dev_error(info->dev, err,
663*4882a593Smuzhiyun "%s: writing dev_state_path", __func__);
664*4882a593Smuzhiyun return err;
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun return 0;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
scsifront_sdev_destroy(struct scsi_device * sdev)671*4882a593Smuzhiyun static void scsifront_sdev_destroy(struct scsi_device *sdev)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun struct vscsifrnt_info *info = shost_priv(sdev->host);
674*4882a593Smuzhiyun int err;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (info && current == info->curr) {
677*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, info->dev->nodename,
678*4882a593Smuzhiyun info->dev_state_path, "%d", XenbusStateClosed);
679*4882a593Smuzhiyun if (err)
680*4882a593Smuzhiyun xenbus_dev_error(info->dev, err,
681*4882a593Smuzhiyun "%s: writing dev_state_path", __func__);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun static struct scsi_host_template scsifront_sht = {
686*4882a593Smuzhiyun .module = THIS_MODULE,
687*4882a593Smuzhiyun .name = "Xen SCSI frontend driver",
688*4882a593Smuzhiyun .queuecommand = scsifront_queuecommand,
689*4882a593Smuzhiyun .eh_abort_handler = scsifront_eh_abort_handler,
690*4882a593Smuzhiyun .eh_device_reset_handler = scsifront_dev_reset_handler,
691*4882a593Smuzhiyun .slave_configure = scsifront_sdev_configure,
692*4882a593Smuzhiyun .slave_destroy = scsifront_sdev_destroy,
693*4882a593Smuzhiyun .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
694*4882a593Smuzhiyun .can_queue = VSCSIIF_MAX_REQS,
695*4882a593Smuzhiyun .this_id = -1,
696*4882a593Smuzhiyun .cmd_size = sizeof(struct vscsifrnt_shadow),
697*4882a593Smuzhiyun .sg_tablesize = VSCSIIF_SG_TABLESIZE,
698*4882a593Smuzhiyun .proc_name = "scsifront",
699*4882a593Smuzhiyun };
700*4882a593Smuzhiyun
scsifront_alloc_ring(struct vscsifrnt_info * info)701*4882a593Smuzhiyun static int scsifront_alloc_ring(struct vscsifrnt_info *info)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct xenbus_device *dev = info->dev;
704*4882a593Smuzhiyun struct vscsiif_sring *sring;
705*4882a593Smuzhiyun grant_ref_t gref;
706*4882a593Smuzhiyun int err = -ENOMEM;
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun /***** Frontend to Backend ring start *****/
709*4882a593Smuzhiyun sring = (struct vscsiif_sring *)__get_free_page(GFP_KERNEL);
710*4882a593Smuzhiyun if (!sring) {
711*4882a593Smuzhiyun xenbus_dev_fatal(dev, err,
712*4882a593Smuzhiyun "fail to allocate shared ring (Front to Back)");
713*4882a593Smuzhiyun return err;
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun SHARED_RING_INIT(sring);
716*4882a593Smuzhiyun FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
717*4882a593Smuzhiyun
718*4882a593Smuzhiyun err = xenbus_grant_ring(dev, sring, 1, &gref);
719*4882a593Smuzhiyun if (err < 0) {
720*4882a593Smuzhiyun free_page((unsigned long)sring);
721*4882a593Smuzhiyun xenbus_dev_fatal(dev, err,
722*4882a593Smuzhiyun "fail to grant shared ring (Front to Back)");
723*4882a593Smuzhiyun return err;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun info->ring_ref = gref;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun err = xenbus_alloc_evtchn(dev, &info->evtchn);
728*4882a593Smuzhiyun if (err) {
729*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "xenbus_alloc_evtchn");
730*4882a593Smuzhiyun goto free_gnttab;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun err = bind_evtchn_to_irq(info->evtchn);
734*4882a593Smuzhiyun if (err <= 0) {
735*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "bind_evtchn_to_irq");
736*4882a593Smuzhiyun goto free_gnttab;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun info->irq = err;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun err = request_threaded_irq(info->irq, NULL, scsifront_irq_fn,
742*4882a593Smuzhiyun IRQF_ONESHOT, "scsifront", info);
743*4882a593Smuzhiyun if (err) {
744*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "request_threaded_irq");
745*4882a593Smuzhiyun goto free_irq;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return 0;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun /* free resource */
751*4882a593Smuzhiyun free_irq:
752*4882a593Smuzhiyun unbind_from_irqhandler(info->irq, info);
753*4882a593Smuzhiyun free_gnttab:
754*4882a593Smuzhiyun gnttab_end_foreign_access(info->ring_ref, 0,
755*4882a593Smuzhiyun (unsigned long)info->ring.sring);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun return err;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
scsifront_free_ring(struct vscsifrnt_info * info)760*4882a593Smuzhiyun static void scsifront_free_ring(struct vscsifrnt_info *info)
761*4882a593Smuzhiyun {
762*4882a593Smuzhiyun unbind_from_irqhandler(info->irq, info);
763*4882a593Smuzhiyun gnttab_end_foreign_access(info->ring_ref, 0,
764*4882a593Smuzhiyun (unsigned long)info->ring.sring);
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
scsifront_init_ring(struct vscsifrnt_info * info)767*4882a593Smuzhiyun static int scsifront_init_ring(struct vscsifrnt_info *info)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct xenbus_device *dev = info->dev;
770*4882a593Smuzhiyun struct xenbus_transaction xbt;
771*4882a593Smuzhiyun int err;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun pr_debug("%s\n", __func__);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun err = scsifront_alloc_ring(info);
776*4882a593Smuzhiyun if (err)
777*4882a593Smuzhiyun return err;
778*4882a593Smuzhiyun pr_debug("%s: %u %u\n", __func__, info->ring_ref, info->evtchn);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun again:
781*4882a593Smuzhiyun err = xenbus_transaction_start(&xbt);
782*4882a593Smuzhiyun if (err)
783*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "starting transaction");
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
786*4882a593Smuzhiyun info->ring_ref);
787*4882a593Smuzhiyun if (err) {
788*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
789*4882a593Smuzhiyun goto fail;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
793*4882a593Smuzhiyun info->evtchn);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun if (err) {
796*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
797*4882a593Smuzhiyun goto fail;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun err = xenbus_transaction_end(xbt, 0);
801*4882a593Smuzhiyun if (err) {
802*4882a593Smuzhiyun if (err == -EAGAIN)
803*4882a593Smuzhiyun goto again;
804*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "completing transaction");
805*4882a593Smuzhiyun goto free_sring;
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun return 0;
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun fail:
811*4882a593Smuzhiyun xenbus_transaction_end(xbt, 1);
812*4882a593Smuzhiyun free_sring:
813*4882a593Smuzhiyun scsifront_free_ring(info);
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun return err;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun
scsifront_probe(struct xenbus_device * dev,const struct xenbus_device_id * id)819*4882a593Smuzhiyun static int scsifront_probe(struct xenbus_device *dev,
820*4882a593Smuzhiyun const struct xenbus_device_id *id)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct vscsifrnt_info *info;
823*4882a593Smuzhiyun struct Scsi_Host *host;
824*4882a593Smuzhiyun int err = -ENOMEM;
825*4882a593Smuzhiyun char name[TASK_COMM_LEN];
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
828*4882a593Smuzhiyun if (!host) {
829*4882a593Smuzhiyun xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
830*4882a593Smuzhiyun return err;
831*4882a593Smuzhiyun }
832*4882a593Smuzhiyun info = (struct vscsifrnt_info *)host->hostdata;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun dev_set_drvdata(&dev->dev, info);
835*4882a593Smuzhiyun info->dev = dev;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun bitmap_fill(info->shadow_free_bitmap, VSCSIIF_MAX_REQS);
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun err = scsifront_init_ring(info);
840*4882a593Smuzhiyun if (err) {
841*4882a593Smuzhiyun scsi_host_put(host);
842*4882a593Smuzhiyun return err;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun init_waitqueue_head(&info->wq_sync);
846*4882a593Smuzhiyun init_waitqueue_head(&info->wq_pause);
847*4882a593Smuzhiyun spin_lock_init(&info->shadow_lock);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun snprintf(name, TASK_COMM_LEN, "vscsiif.%d", host->host_no);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun host->max_id = VSCSIIF_MAX_TARGET;
852*4882a593Smuzhiyun host->max_channel = 0;
853*4882a593Smuzhiyun host->max_lun = VSCSIIF_MAX_LUN;
854*4882a593Smuzhiyun host->max_sectors = (host->sg_tablesize - 1) * PAGE_SIZE / 512;
855*4882a593Smuzhiyun host->max_cmd_len = VSCSIIF_MAX_COMMAND_SIZE;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun err = scsi_add_host(host, &dev->dev);
858*4882a593Smuzhiyun if (err) {
859*4882a593Smuzhiyun dev_err(&dev->dev, "fail to add scsi host %d\n", err);
860*4882a593Smuzhiyun goto free_sring;
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun info->host = host;
863*4882a593Smuzhiyun info->host_active = 1;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateInitialised);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return 0;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun free_sring:
870*4882a593Smuzhiyun scsifront_free_ring(info);
871*4882a593Smuzhiyun scsi_host_put(host);
872*4882a593Smuzhiyun return err;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
scsifront_resume(struct xenbus_device * dev)875*4882a593Smuzhiyun static int scsifront_resume(struct xenbus_device *dev)
876*4882a593Smuzhiyun {
877*4882a593Smuzhiyun struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
878*4882a593Smuzhiyun struct Scsi_Host *host = info->host;
879*4882a593Smuzhiyun int err;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun /* Finish all still pending commands. */
884*4882a593Smuzhiyun scsifront_finish_all(info);
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun /* Reconnect to dom0. */
889*4882a593Smuzhiyun scsifront_free_ring(info);
890*4882a593Smuzhiyun err = scsifront_init_ring(info);
891*4882a593Smuzhiyun if (err) {
892*4882a593Smuzhiyun dev_err(&dev->dev, "fail to resume %d\n", err);
893*4882a593Smuzhiyun scsi_host_put(host);
894*4882a593Smuzhiyun return err;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateInitialised);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun return 0;
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun
scsifront_suspend(struct xenbus_device * dev)902*4882a593Smuzhiyun static int scsifront_suspend(struct xenbus_device *dev)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
905*4882a593Smuzhiyun struct Scsi_Host *host = info->host;
906*4882a593Smuzhiyun int err = 0;
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun /* No new commands for the backend. */
909*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
910*4882a593Smuzhiyun info->pause = 1;
911*4882a593Smuzhiyun while (info->callers && !err) {
912*4882a593Smuzhiyun info->waiting_pause = 1;
913*4882a593Smuzhiyun info->wait_ring_available = 0;
914*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
915*4882a593Smuzhiyun wake_up(&info->wq_sync);
916*4882a593Smuzhiyun err = wait_event_interruptible(info->wq_pause,
917*4882a593Smuzhiyun !info->waiting_pause);
918*4882a593Smuzhiyun spin_lock_irq(host->host_lock);
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun spin_unlock_irq(host->host_lock);
921*4882a593Smuzhiyun return err;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
scsifront_remove(struct xenbus_device * dev)924*4882a593Smuzhiyun static int scsifront_remove(struct xenbus_device *dev)
925*4882a593Smuzhiyun {
926*4882a593Smuzhiyun struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun pr_debug("%s: %s removed\n", __func__, dev->nodename);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun mutex_lock(&scsifront_mutex);
931*4882a593Smuzhiyun if (info->host_active) {
932*4882a593Smuzhiyun /* Scsi_host not yet removed */
933*4882a593Smuzhiyun scsi_remove_host(info->host);
934*4882a593Smuzhiyun info->host_active = 0;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun mutex_unlock(&scsifront_mutex);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun scsifront_free_ring(info);
939*4882a593Smuzhiyun scsi_host_put(info->host);
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun return 0;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
scsifront_disconnect(struct vscsifrnt_info * info)944*4882a593Smuzhiyun static void scsifront_disconnect(struct vscsifrnt_info *info)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun struct xenbus_device *dev = info->dev;
947*4882a593Smuzhiyun struct Scsi_Host *host = info->host;
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun pr_debug("%s: %s disconnect\n", __func__, dev->nodename);
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /*
952*4882a593Smuzhiyun * When this function is executed, all devices of
953*4882a593Smuzhiyun * Frontend have been deleted.
954*4882a593Smuzhiyun * Therefore, it need not block I/O before remove_host.
955*4882a593Smuzhiyun */
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun mutex_lock(&scsifront_mutex);
958*4882a593Smuzhiyun if (info->host_active) {
959*4882a593Smuzhiyun scsi_remove_host(host);
960*4882a593Smuzhiyun info->host_active = 0;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun mutex_unlock(&scsifront_mutex);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun xenbus_frontend_closed(dev);
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
scsifront_do_lun_hotplug(struct vscsifrnt_info * info,int op)967*4882a593Smuzhiyun static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct xenbus_device *dev = info->dev;
970*4882a593Smuzhiyun int i, err = 0;
971*4882a593Smuzhiyun char str[64];
972*4882a593Smuzhiyun char **dir;
973*4882a593Smuzhiyun unsigned int dir_n = 0;
974*4882a593Smuzhiyun unsigned int device_state;
975*4882a593Smuzhiyun unsigned int hst, chn, tgt, lun;
976*4882a593Smuzhiyun struct scsi_device *sdev;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
979*4882a593Smuzhiyun if (IS_ERR(dir))
980*4882a593Smuzhiyun return;
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /* mark current task as the one allowed to modify device states */
983*4882a593Smuzhiyun BUG_ON(info->curr);
984*4882a593Smuzhiyun info->curr = current;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun for (i = 0; i < dir_n; i++) {
987*4882a593Smuzhiyun /* read status */
988*4882a593Smuzhiyun snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
989*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
990*4882a593Smuzhiyun &device_state);
991*4882a593Smuzhiyun if (XENBUS_EXIST_ERR(err))
992*4882a593Smuzhiyun continue;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun /* virtual SCSI device */
995*4882a593Smuzhiyun snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
996*4882a593Smuzhiyun err = xenbus_scanf(XBT_NIL, dev->otherend, str,
997*4882a593Smuzhiyun "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
998*4882a593Smuzhiyun if (XENBUS_EXIST_ERR(err))
999*4882a593Smuzhiyun continue;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /*
1002*4882a593Smuzhiyun * Front device state path, used in slave_configure called
1003*4882a593Smuzhiyun * on successfull scsi_add_device, and in slave_destroy called
1004*4882a593Smuzhiyun * on remove of a device.
1005*4882a593Smuzhiyun */
1006*4882a593Smuzhiyun snprintf(info->dev_state_path, sizeof(info->dev_state_path),
1007*4882a593Smuzhiyun "vscsi-devs/%s/state", dir[i]);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun switch (op) {
1010*4882a593Smuzhiyun case VSCSIFRONT_OP_ADD_LUN:
1011*4882a593Smuzhiyun if (device_state != XenbusStateInitialised)
1012*4882a593Smuzhiyun break;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun if (scsi_add_device(info->host, chn, tgt, lun)) {
1015*4882a593Smuzhiyun dev_err(&dev->dev, "scsi_add_device\n");
1016*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, dev->nodename,
1017*4882a593Smuzhiyun info->dev_state_path,
1018*4882a593Smuzhiyun "%d", XenbusStateClosed);
1019*4882a593Smuzhiyun if (err)
1020*4882a593Smuzhiyun xenbus_dev_error(dev, err,
1021*4882a593Smuzhiyun "%s: writing dev_state_path", __func__);
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun break;
1024*4882a593Smuzhiyun case VSCSIFRONT_OP_DEL_LUN:
1025*4882a593Smuzhiyun if (device_state != XenbusStateClosing)
1026*4882a593Smuzhiyun break;
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun sdev = scsi_device_lookup(info->host, chn, tgt, lun);
1029*4882a593Smuzhiyun if (sdev) {
1030*4882a593Smuzhiyun scsi_remove_device(sdev);
1031*4882a593Smuzhiyun scsi_device_put(sdev);
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun break;
1034*4882a593Smuzhiyun case VSCSIFRONT_OP_READD_LUN:
1035*4882a593Smuzhiyun if (device_state == XenbusStateConnected) {
1036*4882a593Smuzhiyun err = xenbus_printf(XBT_NIL, dev->nodename,
1037*4882a593Smuzhiyun info->dev_state_path,
1038*4882a593Smuzhiyun "%d", XenbusStateConnected);
1039*4882a593Smuzhiyun if (err)
1040*4882a593Smuzhiyun xenbus_dev_error(dev, err,
1041*4882a593Smuzhiyun "%s: writing dev_state_path", __func__);
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun break;
1044*4882a593Smuzhiyun default:
1045*4882a593Smuzhiyun break;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun info->curr = NULL;
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun kfree(dir);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
scsifront_read_backend_params(struct xenbus_device * dev,struct vscsifrnt_info * info)1054*4882a593Smuzhiyun static void scsifront_read_backend_params(struct xenbus_device *dev,
1055*4882a593Smuzhiyun struct vscsifrnt_info *info)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun unsigned int sg_grant, nr_segs;
1058*4882a593Smuzhiyun struct Scsi_Host *host = info->host;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun sg_grant = xenbus_read_unsigned(dev->otherend, "feature-sg-grant", 0);
1061*4882a593Smuzhiyun nr_segs = min_t(unsigned int, sg_grant, SG_ALL);
1062*4882a593Smuzhiyun nr_segs = max_t(unsigned int, nr_segs, VSCSIIF_SG_TABLESIZE);
1063*4882a593Smuzhiyun nr_segs = min_t(unsigned int, nr_segs,
1064*4882a593Smuzhiyun VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
1065*4882a593Smuzhiyun sizeof(struct scsiif_request_segment));
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun if (!info->pause && sg_grant)
1068*4882a593Smuzhiyun dev_info(&dev->dev, "using up to %d SG entries\n", nr_segs);
1069*4882a593Smuzhiyun else if (info->pause && nr_segs < host->sg_tablesize)
1070*4882a593Smuzhiyun dev_warn(&dev->dev,
1071*4882a593Smuzhiyun "SG entries decreased from %d to %u - device may not work properly anymore\n",
1072*4882a593Smuzhiyun host->sg_tablesize, nr_segs);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun host->sg_tablesize = nr_segs;
1075*4882a593Smuzhiyun host->max_sectors = (nr_segs - 1) * PAGE_SIZE / 512;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun
scsifront_backend_changed(struct xenbus_device * dev,enum xenbus_state backend_state)1078*4882a593Smuzhiyun static void scsifront_backend_changed(struct xenbus_device *dev,
1079*4882a593Smuzhiyun enum xenbus_state backend_state)
1080*4882a593Smuzhiyun {
1081*4882a593Smuzhiyun struct vscsifrnt_info *info = dev_get_drvdata(&dev->dev);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun pr_debug("%s: %p %u %u\n", __func__, dev, dev->state, backend_state);
1084*4882a593Smuzhiyun
1085*4882a593Smuzhiyun switch (backend_state) {
1086*4882a593Smuzhiyun case XenbusStateUnknown:
1087*4882a593Smuzhiyun case XenbusStateInitialising:
1088*4882a593Smuzhiyun case XenbusStateInitWait:
1089*4882a593Smuzhiyun case XenbusStateInitialised:
1090*4882a593Smuzhiyun break;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun case XenbusStateConnected:
1093*4882a593Smuzhiyun scsifront_read_backend_params(dev, info);
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun if (info->pause) {
1096*4882a593Smuzhiyun scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_READD_LUN);
1097*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateConnected);
1098*4882a593Smuzhiyun info->pause = 0;
1099*4882a593Smuzhiyun return;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun if (xenbus_read_driver_state(dev->nodename) ==
1103*4882a593Smuzhiyun XenbusStateInitialised)
1104*4882a593Smuzhiyun scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (dev->state != XenbusStateConnected)
1107*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateConnected);
1108*4882a593Smuzhiyun break;
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun case XenbusStateClosed:
1111*4882a593Smuzhiyun if (dev->state == XenbusStateClosed)
1112*4882a593Smuzhiyun break;
1113*4882a593Smuzhiyun fallthrough; /* Missed the backend's Closing state */
1114*4882a593Smuzhiyun case XenbusStateClosing:
1115*4882a593Smuzhiyun scsifront_disconnect(info);
1116*4882a593Smuzhiyun break;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun case XenbusStateReconfiguring:
1119*4882a593Smuzhiyun scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
1120*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateReconfiguring);
1121*4882a593Smuzhiyun break;
1122*4882a593Smuzhiyun
1123*4882a593Smuzhiyun case XenbusStateReconfigured:
1124*4882a593Smuzhiyun scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
1125*4882a593Smuzhiyun xenbus_switch_state(dev, XenbusStateConnected);
1126*4882a593Smuzhiyun break;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun static const struct xenbus_device_id scsifront_ids[] = {
1131*4882a593Smuzhiyun { "vscsi" },
1132*4882a593Smuzhiyun { "" }
1133*4882a593Smuzhiyun };
1134*4882a593Smuzhiyun
1135*4882a593Smuzhiyun static struct xenbus_driver scsifront_driver = {
1136*4882a593Smuzhiyun .ids = scsifront_ids,
1137*4882a593Smuzhiyun .probe = scsifront_probe,
1138*4882a593Smuzhiyun .remove = scsifront_remove,
1139*4882a593Smuzhiyun .resume = scsifront_resume,
1140*4882a593Smuzhiyun .suspend = scsifront_suspend,
1141*4882a593Smuzhiyun .otherend_changed = scsifront_backend_changed,
1142*4882a593Smuzhiyun };
1143*4882a593Smuzhiyun
scsifront_init(void)1144*4882a593Smuzhiyun static int __init scsifront_init(void)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun if (!xen_domain())
1147*4882a593Smuzhiyun return -ENODEV;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun return xenbus_register_frontend(&scsifront_driver);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun module_init(scsifront_init);
1152*4882a593Smuzhiyun
scsifront_exit(void)1153*4882a593Smuzhiyun static void __exit scsifront_exit(void)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun xenbus_unregister_driver(&scsifront_driver);
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun module_exit(scsifront_exit);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun MODULE_DESCRIPTION("Xen SCSI frontend driver");
1160*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1161*4882a593Smuzhiyun MODULE_ALIAS("xen:vscsi");
1162*4882a593Smuzhiyun MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");
1163