1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * USB Attached SCSI
4*4882a593Smuzhiyun * Note that this is not the same as the USB Mass Storage driver
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright Hans de Goede <hdegoede@redhat.com> for Red Hat, Inc. 2013 - 2016
7*4882a593Smuzhiyun * Copyright Matthew Wilcox for Intel Corp, 2010
8*4882a593Smuzhiyun * Copyright Sarah Sharp for Intel Corp, 2010
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/blkdev.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/usb.h>
16*4882a593Smuzhiyun #include <linux/usb_usual.h>
17*4882a593Smuzhiyun #include <linux/usb/hcd.h>
18*4882a593Smuzhiyun #include <linux/usb/storage.h>
19*4882a593Smuzhiyun #include <linux/usb/uas.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <scsi/scsi.h>
22*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
23*4882a593Smuzhiyun #include <scsi/scsi_dbg.h>
24*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
25*4882a593Smuzhiyun #include <scsi/scsi_device.h>
26*4882a593Smuzhiyun #include <scsi/scsi_host.h>
27*4882a593Smuzhiyun #include <scsi/scsi_tcq.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "uas-detect.h"
30*4882a593Smuzhiyun #include "scsiglue.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define MAX_CMNDS 256
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct uas_dev_info {
35*4882a593Smuzhiyun struct usb_interface *intf;
36*4882a593Smuzhiyun struct usb_device *udev;
37*4882a593Smuzhiyun struct usb_anchor cmd_urbs;
38*4882a593Smuzhiyun struct usb_anchor sense_urbs;
39*4882a593Smuzhiyun struct usb_anchor data_urbs;
40*4882a593Smuzhiyun unsigned long flags;
41*4882a593Smuzhiyun int qdepth, resetting;
42*4882a593Smuzhiyun unsigned cmd_pipe, status_pipe, data_in_pipe, data_out_pipe;
43*4882a593Smuzhiyun unsigned use_streams:1;
44*4882a593Smuzhiyun unsigned shutdown:1;
45*4882a593Smuzhiyun struct scsi_cmnd *cmnd[MAX_CMNDS];
46*4882a593Smuzhiyun spinlock_t lock;
47*4882a593Smuzhiyun struct work_struct work;
48*4882a593Smuzhiyun struct work_struct scan_work; /* for async scanning */
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun enum {
52*4882a593Smuzhiyun SUBMIT_STATUS_URB = BIT(1),
53*4882a593Smuzhiyun ALLOC_DATA_IN_URB = BIT(2),
54*4882a593Smuzhiyun SUBMIT_DATA_IN_URB = BIT(3),
55*4882a593Smuzhiyun ALLOC_DATA_OUT_URB = BIT(4),
56*4882a593Smuzhiyun SUBMIT_DATA_OUT_URB = BIT(5),
57*4882a593Smuzhiyun ALLOC_CMD_URB = BIT(6),
58*4882a593Smuzhiyun SUBMIT_CMD_URB = BIT(7),
59*4882a593Smuzhiyun COMMAND_INFLIGHT = BIT(8),
60*4882a593Smuzhiyun DATA_IN_URB_INFLIGHT = BIT(9),
61*4882a593Smuzhiyun DATA_OUT_URB_INFLIGHT = BIT(10),
62*4882a593Smuzhiyun COMMAND_ABORTED = BIT(11),
63*4882a593Smuzhiyun IS_IN_WORK_LIST = BIT(12),
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Overrides scsi_pointer */
67*4882a593Smuzhiyun struct uas_cmd_info {
68*4882a593Smuzhiyun unsigned int state;
69*4882a593Smuzhiyun unsigned int uas_tag;
70*4882a593Smuzhiyun struct urb *cmd_urb;
71*4882a593Smuzhiyun struct urb *data_in_urb;
72*4882a593Smuzhiyun struct urb *data_out_urb;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* I hate forward declarations, but I actually have a loop */
76*4882a593Smuzhiyun static int uas_submit_urbs(struct scsi_cmnd *cmnd,
77*4882a593Smuzhiyun struct uas_dev_info *devinfo);
78*4882a593Smuzhiyun static void uas_do_work(struct work_struct *work);
79*4882a593Smuzhiyun static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller);
80*4882a593Smuzhiyun static void uas_free_streams(struct uas_dev_info *devinfo);
81*4882a593Smuzhiyun static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
82*4882a593Smuzhiyun int status);
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /*
85*4882a593Smuzhiyun * This driver needs its own workqueue, as we need to control memory allocation.
86*4882a593Smuzhiyun *
87*4882a593Smuzhiyun * In the course of error handling and power management uas_wait_for_pending_cmnds()
88*4882a593Smuzhiyun * needs to flush pending work items. In these contexts we cannot allocate memory
89*4882a593Smuzhiyun * by doing block IO as we would deadlock. For the same reason we cannot wait
90*4882a593Smuzhiyun * for anything allocating memory not heeding these constraints.
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * So we have to control all work items that can be on the workqueue we flush.
93*4882a593Smuzhiyun * Hence we cannot share a queue and need our own.
94*4882a593Smuzhiyun */
95*4882a593Smuzhiyun static struct workqueue_struct *workqueue;
96*4882a593Smuzhiyun
uas_do_work(struct work_struct * work)97*4882a593Smuzhiyun static void uas_do_work(struct work_struct *work)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct uas_dev_info *devinfo =
100*4882a593Smuzhiyun container_of(work, struct uas_dev_info, work);
101*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo;
102*4882a593Smuzhiyun struct scsi_cmnd *cmnd;
103*4882a593Smuzhiyun unsigned long flags;
104*4882a593Smuzhiyun int i, err;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (devinfo->resetting)
109*4882a593Smuzhiyun goto out;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun for (i = 0; i < devinfo->qdepth; i++) {
112*4882a593Smuzhiyun if (!devinfo->cmnd[i])
113*4882a593Smuzhiyun continue;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun cmnd = devinfo->cmnd[i];
116*4882a593Smuzhiyun cmdinfo = (void *)&cmnd->SCp;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (!(cmdinfo->state & IS_IN_WORK_LIST))
119*4882a593Smuzhiyun continue;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun err = uas_submit_urbs(cmnd, cmnd->device->hostdata);
122*4882a593Smuzhiyun if (!err)
123*4882a593Smuzhiyun cmdinfo->state &= ~IS_IN_WORK_LIST;
124*4882a593Smuzhiyun else
125*4882a593Smuzhiyun queue_work(workqueue, &devinfo->work);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun out:
128*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
uas_scan_work(struct work_struct * work)131*4882a593Smuzhiyun static void uas_scan_work(struct work_struct *work)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct uas_dev_info *devinfo =
134*4882a593Smuzhiyun container_of(work, struct uas_dev_info, scan_work);
135*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun dev_dbg(&devinfo->intf->dev, "starting scan\n");
138*4882a593Smuzhiyun scsi_scan_host(shost);
139*4882a593Smuzhiyun dev_dbg(&devinfo->intf->dev, "scan complete\n");
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
uas_add_work(struct uas_cmd_info * cmdinfo)142*4882a593Smuzhiyun static void uas_add_work(struct uas_cmd_info *cmdinfo)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun struct scsi_pointer *scp = (void *)cmdinfo;
145*4882a593Smuzhiyun struct scsi_cmnd *cmnd = container_of(scp, struct scsi_cmnd, SCp);
146*4882a593Smuzhiyun struct uas_dev_info *devinfo = cmnd->device->hostdata;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun lockdep_assert_held(&devinfo->lock);
149*4882a593Smuzhiyun cmdinfo->state |= IS_IN_WORK_LIST;
150*4882a593Smuzhiyun queue_work(workqueue, &devinfo->work);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
uas_zap_pending(struct uas_dev_info * devinfo,int result)153*4882a593Smuzhiyun static void uas_zap_pending(struct uas_dev_info *devinfo, int result)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo;
156*4882a593Smuzhiyun struct scsi_cmnd *cmnd;
157*4882a593Smuzhiyun unsigned long flags;
158*4882a593Smuzhiyun int i, err;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
161*4882a593Smuzhiyun for (i = 0; i < devinfo->qdepth; i++) {
162*4882a593Smuzhiyun if (!devinfo->cmnd[i])
163*4882a593Smuzhiyun continue;
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun cmnd = devinfo->cmnd[i];
166*4882a593Smuzhiyun cmdinfo = (void *)&cmnd->SCp;
167*4882a593Smuzhiyun uas_log_cmd_state(cmnd, __func__, 0);
168*4882a593Smuzhiyun /* Sense urbs were killed, clear COMMAND_INFLIGHT manually */
169*4882a593Smuzhiyun cmdinfo->state &= ~COMMAND_INFLIGHT;
170*4882a593Smuzhiyun cmnd->result = result << 16;
171*4882a593Smuzhiyun err = uas_try_complete(cmnd, __func__);
172*4882a593Smuzhiyun WARN_ON(err != 0);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
uas_sense(struct urb * urb,struct scsi_cmnd * cmnd)177*4882a593Smuzhiyun static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct sense_iu *sense_iu = urb->transfer_buffer;
180*4882a593Smuzhiyun struct scsi_device *sdev = cmnd->device;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (urb->actual_length > 16) {
183*4882a593Smuzhiyun unsigned len = be16_to_cpup(&sense_iu->len);
184*4882a593Smuzhiyun if (len + 16 != urb->actual_length) {
185*4882a593Smuzhiyun int newlen = min(len + 16, urb->actual_length) - 16;
186*4882a593Smuzhiyun if (newlen < 0)
187*4882a593Smuzhiyun newlen = 0;
188*4882a593Smuzhiyun sdev_printk(KERN_INFO, sdev, "%s: urb length %d "
189*4882a593Smuzhiyun "disagrees with IU sense data length %d, "
190*4882a593Smuzhiyun "using %d bytes of sense data\n", __func__,
191*4882a593Smuzhiyun urb->actual_length, len, newlen);
192*4882a593Smuzhiyun len = newlen;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun memcpy(cmnd->sense_buffer, sense_iu->sense, len);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun cmnd->result = sense_iu->status;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
uas_log_cmd_state(struct scsi_cmnd * cmnd,const char * prefix,int status)200*4882a593Smuzhiyun static void uas_log_cmd_state(struct scsi_cmnd *cmnd, const char *prefix,
201*4882a593Smuzhiyun int status)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct uas_cmd_info *ci = (void *)&cmnd->SCp;
204*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (status == -ENODEV) /* too late */
207*4882a593Smuzhiyun return;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun scmd_printk(KERN_INFO, cmnd,
210*4882a593Smuzhiyun "%s %d uas-tag %d inflight:%s%s%s%s%s%s%s%s%s%s%s%s ",
211*4882a593Smuzhiyun prefix, status, cmdinfo->uas_tag,
212*4882a593Smuzhiyun (ci->state & SUBMIT_STATUS_URB) ? " s-st" : "",
213*4882a593Smuzhiyun (ci->state & ALLOC_DATA_IN_URB) ? " a-in" : "",
214*4882a593Smuzhiyun (ci->state & SUBMIT_DATA_IN_URB) ? " s-in" : "",
215*4882a593Smuzhiyun (ci->state & ALLOC_DATA_OUT_URB) ? " a-out" : "",
216*4882a593Smuzhiyun (ci->state & SUBMIT_DATA_OUT_URB) ? " s-out" : "",
217*4882a593Smuzhiyun (ci->state & ALLOC_CMD_URB) ? " a-cmd" : "",
218*4882a593Smuzhiyun (ci->state & SUBMIT_CMD_URB) ? " s-cmd" : "",
219*4882a593Smuzhiyun (ci->state & COMMAND_INFLIGHT) ? " CMD" : "",
220*4882a593Smuzhiyun (ci->state & DATA_IN_URB_INFLIGHT) ? " IN" : "",
221*4882a593Smuzhiyun (ci->state & DATA_OUT_URB_INFLIGHT) ? " OUT" : "",
222*4882a593Smuzhiyun (ci->state & COMMAND_ABORTED) ? " abort" : "",
223*4882a593Smuzhiyun (ci->state & IS_IN_WORK_LIST) ? " work" : "");
224*4882a593Smuzhiyun scsi_print_command(cmnd);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
uas_free_unsubmitted_urbs(struct scsi_cmnd * cmnd)227*4882a593Smuzhiyun static void uas_free_unsubmitted_urbs(struct scsi_cmnd *cmnd)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (!cmnd)
232*4882a593Smuzhiyun return;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun cmdinfo = (void *)&cmnd->SCp;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_CMD_URB)
237*4882a593Smuzhiyun usb_free_urb(cmdinfo->cmd_urb);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* data urbs may have never gotten their submit flag set */
240*4882a593Smuzhiyun if (!(cmdinfo->state & DATA_IN_URB_INFLIGHT))
241*4882a593Smuzhiyun usb_free_urb(cmdinfo->data_in_urb);
242*4882a593Smuzhiyun if (!(cmdinfo->state & DATA_OUT_URB_INFLIGHT))
243*4882a593Smuzhiyun usb_free_urb(cmdinfo->data_out_urb);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
uas_try_complete(struct scsi_cmnd * cmnd,const char * caller)246*4882a593Smuzhiyun static int uas_try_complete(struct scsi_cmnd *cmnd, const char *caller)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
249*4882a593Smuzhiyun struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun lockdep_assert_held(&devinfo->lock);
252*4882a593Smuzhiyun if (cmdinfo->state & (COMMAND_INFLIGHT |
253*4882a593Smuzhiyun DATA_IN_URB_INFLIGHT |
254*4882a593Smuzhiyun DATA_OUT_URB_INFLIGHT |
255*4882a593Smuzhiyun COMMAND_ABORTED))
256*4882a593Smuzhiyun return -EBUSY;
257*4882a593Smuzhiyun devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
258*4882a593Smuzhiyun uas_free_unsubmitted_urbs(cmnd);
259*4882a593Smuzhiyun cmnd->scsi_done(cmnd);
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
uas_xfer_data(struct urb * urb,struct scsi_cmnd * cmnd,unsigned direction)263*4882a593Smuzhiyun static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
264*4882a593Smuzhiyun unsigned direction)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
267*4882a593Smuzhiyun int err;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun cmdinfo->state |= direction | SUBMIT_STATUS_URB;
270*4882a593Smuzhiyun err = uas_submit_urbs(cmnd, cmnd->device->hostdata);
271*4882a593Smuzhiyun if (err) {
272*4882a593Smuzhiyun uas_add_work(cmdinfo);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
uas_evaluate_response_iu(struct response_iu * riu,struct scsi_cmnd * cmnd)276*4882a593Smuzhiyun static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd *cmnd)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun u8 response_code = riu->response_code;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun switch (response_code) {
281*4882a593Smuzhiyun case RC_INCORRECT_LUN:
282*4882a593Smuzhiyun set_host_byte(cmnd, DID_BAD_TARGET);
283*4882a593Smuzhiyun break;
284*4882a593Smuzhiyun case RC_TMF_SUCCEEDED:
285*4882a593Smuzhiyun set_host_byte(cmnd, DID_OK);
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun case RC_TMF_NOT_SUPPORTED:
288*4882a593Smuzhiyun set_host_byte(cmnd, DID_TARGET_FAILURE);
289*4882a593Smuzhiyun break;
290*4882a593Smuzhiyun default:
291*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "response iu", response_code);
292*4882a593Smuzhiyun set_host_byte(cmnd, DID_ERROR);
293*4882a593Smuzhiyun break;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun return response_code == RC_TMF_SUCCEEDED;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
uas_stat_cmplt(struct urb * urb)299*4882a593Smuzhiyun static void uas_stat_cmplt(struct urb *urb)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun struct iu *iu = urb->transfer_buffer;
302*4882a593Smuzhiyun struct Scsi_Host *shost = urb->context;
303*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
304*4882a593Smuzhiyun struct urb *data_in_urb = NULL;
305*4882a593Smuzhiyun struct urb *data_out_urb = NULL;
306*4882a593Smuzhiyun struct scsi_cmnd *cmnd;
307*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo;
308*4882a593Smuzhiyun unsigned long flags;
309*4882a593Smuzhiyun unsigned int idx;
310*4882a593Smuzhiyun int status = urb->status;
311*4882a593Smuzhiyun bool success;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun if (devinfo->resetting)
316*4882a593Smuzhiyun goto out;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (status) {
319*4882a593Smuzhiyun if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
320*4882a593Smuzhiyun dev_err(&urb->dev->dev, "stat urb: status %d\n", status);
321*4882a593Smuzhiyun goto out;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun idx = be16_to_cpup(&iu->tag) - 1;
325*4882a593Smuzhiyun if (idx >= MAX_CMNDS || !devinfo->cmnd[idx]) {
326*4882a593Smuzhiyun dev_err(&urb->dev->dev,
327*4882a593Smuzhiyun "stat urb: no pending cmd for uas-tag %d\n", idx + 1);
328*4882a593Smuzhiyun goto out;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun cmnd = devinfo->cmnd[idx];
332*4882a593Smuzhiyun cmdinfo = (void *)&cmnd->SCp;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (!(cmdinfo->state & COMMAND_INFLIGHT)) {
335*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "unexpected status cmplt", 0);
336*4882a593Smuzhiyun goto out;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun switch (iu->iu_id) {
340*4882a593Smuzhiyun case IU_ID_STATUS:
341*4882a593Smuzhiyun uas_sense(urb, cmnd);
342*4882a593Smuzhiyun if (cmnd->result != 0) {
343*4882a593Smuzhiyun /* cancel data transfers on error */
344*4882a593Smuzhiyun data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
345*4882a593Smuzhiyun data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun cmdinfo->state &= ~COMMAND_INFLIGHT;
348*4882a593Smuzhiyun uas_try_complete(cmnd, __func__);
349*4882a593Smuzhiyun break;
350*4882a593Smuzhiyun case IU_ID_READ_READY:
351*4882a593Smuzhiyun if (!cmdinfo->data_in_urb ||
352*4882a593Smuzhiyun (cmdinfo->state & DATA_IN_URB_INFLIGHT)) {
353*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "unexpected read rdy", 0);
354*4882a593Smuzhiyun break;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun uas_xfer_data(urb, cmnd, SUBMIT_DATA_IN_URB);
357*4882a593Smuzhiyun break;
358*4882a593Smuzhiyun case IU_ID_WRITE_READY:
359*4882a593Smuzhiyun if (!cmdinfo->data_out_urb ||
360*4882a593Smuzhiyun (cmdinfo->state & DATA_OUT_URB_INFLIGHT)) {
361*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "unexpected write rdy", 0);
362*4882a593Smuzhiyun break;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun uas_xfer_data(urb, cmnd, SUBMIT_DATA_OUT_URB);
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun case IU_ID_RESPONSE:
367*4882a593Smuzhiyun cmdinfo->state &= ~COMMAND_INFLIGHT;
368*4882a593Smuzhiyun success = uas_evaluate_response_iu((struct response_iu *)iu, cmnd);
369*4882a593Smuzhiyun if (!success) {
370*4882a593Smuzhiyun /* Error, cancel data transfers */
371*4882a593Smuzhiyun data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
372*4882a593Smuzhiyun data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun uas_try_complete(cmnd, __func__);
375*4882a593Smuzhiyun break;
376*4882a593Smuzhiyun default:
377*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "bogus IU", iu->iu_id);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun out:
380*4882a593Smuzhiyun usb_free_urb(urb);
381*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* Unlinking of data urbs must be done without holding the lock */
384*4882a593Smuzhiyun if (data_in_urb) {
385*4882a593Smuzhiyun usb_unlink_urb(data_in_urb);
386*4882a593Smuzhiyun usb_put_urb(data_in_urb);
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun if (data_out_urb) {
389*4882a593Smuzhiyun usb_unlink_urb(data_out_urb);
390*4882a593Smuzhiyun usb_put_urb(data_out_urb);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
uas_data_cmplt(struct urb * urb)394*4882a593Smuzhiyun static void uas_data_cmplt(struct urb *urb)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun struct scsi_cmnd *cmnd = urb->context;
397*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
398*4882a593Smuzhiyun struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
399*4882a593Smuzhiyun struct scsi_data_buffer *sdb = &cmnd->sdb;
400*4882a593Smuzhiyun unsigned long flags;
401*4882a593Smuzhiyun int status = urb->status;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun if (cmdinfo->data_in_urb == urb) {
406*4882a593Smuzhiyun cmdinfo->state &= ~DATA_IN_URB_INFLIGHT;
407*4882a593Smuzhiyun cmdinfo->data_in_urb = NULL;
408*4882a593Smuzhiyun } else if (cmdinfo->data_out_urb == urb) {
409*4882a593Smuzhiyun cmdinfo->state &= ~DATA_OUT_URB_INFLIGHT;
410*4882a593Smuzhiyun cmdinfo->data_out_urb = NULL;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (devinfo->resetting)
414*4882a593Smuzhiyun goto out;
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* Data urbs should not complete before the cmd urb is submitted */
417*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_CMD_URB) {
418*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "unexpected data cmplt", 0);
419*4882a593Smuzhiyun goto out;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (status) {
423*4882a593Smuzhiyun if (status != -ENOENT && status != -ECONNRESET && status != -ESHUTDOWN)
424*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "data cmplt err", status);
425*4882a593Smuzhiyun /* error: no data transfered */
426*4882a593Smuzhiyun scsi_set_resid(cmnd, sdb->length);
427*4882a593Smuzhiyun } else {
428*4882a593Smuzhiyun scsi_set_resid(cmnd, sdb->length - urb->actual_length);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun uas_try_complete(cmnd, __func__);
431*4882a593Smuzhiyun out:
432*4882a593Smuzhiyun usb_free_urb(urb);
433*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
uas_cmd_cmplt(struct urb * urb)436*4882a593Smuzhiyun static void uas_cmd_cmplt(struct urb *urb)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun if (urb->status)
439*4882a593Smuzhiyun dev_err(&urb->dev->dev, "cmd cmplt err %d\n", urb->status);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun usb_free_urb(urb);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
uas_alloc_data_urb(struct uas_dev_info * devinfo,gfp_t gfp,struct scsi_cmnd * cmnd,enum dma_data_direction dir)444*4882a593Smuzhiyun static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
445*4882a593Smuzhiyun struct scsi_cmnd *cmnd,
446*4882a593Smuzhiyun enum dma_data_direction dir)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
449*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
450*4882a593Smuzhiyun struct urb *urb = usb_alloc_urb(0, gfp);
451*4882a593Smuzhiyun struct scsi_data_buffer *sdb = &cmnd->sdb;
452*4882a593Smuzhiyun unsigned int pipe = (dir == DMA_FROM_DEVICE)
453*4882a593Smuzhiyun ? devinfo->data_in_pipe : devinfo->data_out_pipe;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun if (!urb)
456*4882a593Smuzhiyun goto out;
457*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
458*4882a593Smuzhiyun uas_data_cmplt, cmnd);
459*4882a593Smuzhiyun if (devinfo->use_streams)
460*4882a593Smuzhiyun urb->stream_id = cmdinfo->uas_tag;
461*4882a593Smuzhiyun urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
462*4882a593Smuzhiyun urb->sg = sdb->table.sgl;
463*4882a593Smuzhiyun out:
464*4882a593Smuzhiyun return urb;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
uas_alloc_sense_urb(struct uas_dev_info * devinfo,gfp_t gfp,struct scsi_cmnd * cmnd)467*4882a593Smuzhiyun static struct urb *uas_alloc_sense_urb(struct uas_dev_info *devinfo, gfp_t gfp,
468*4882a593Smuzhiyun struct scsi_cmnd *cmnd)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
471*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
472*4882a593Smuzhiyun struct urb *urb = usb_alloc_urb(0, gfp);
473*4882a593Smuzhiyun struct sense_iu *iu;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (!urb)
476*4882a593Smuzhiyun goto out;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun iu = kzalloc(sizeof(*iu), gfp);
479*4882a593Smuzhiyun if (!iu)
480*4882a593Smuzhiyun goto free;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev, devinfo->status_pipe, iu, sizeof(*iu),
483*4882a593Smuzhiyun uas_stat_cmplt, cmnd->device->host);
484*4882a593Smuzhiyun if (devinfo->use_streams)
485*4882a593Smuzhiyun urb->stream_id = cmdinfo->uas_tag;
486*4882a593Smuzhiyun urb->transfer_flags |= URB_FREE_BUFFER;
487*4882a593Smuzhiyun out:
488*4882a593Smuzhiyun return urb;
489*4882a593Smuzhiyun free:
490*4882a593Smuzhiyun usb_free_urb(urb);
491*4882a593Smuzhiyun return NULL;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
uas_alloc_cmd_urb(struct uas_dev_info * devinfo,gfp_t gfp,struct scsi_cmnd * cmnd)494*4882a593Smuzhiyun static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
495*4882a593Smuzhiyun struct scsi_cmnd *cmnd)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
498*4882a593Smuzhiyun struct scsi_device *sdev = cmnd->device;
499*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
500*4882a593Smuzhiyun struct urb *urb = usb_alloc_urb(0, gfp);
501*4882a593Smuzhiyun struct command_iu *iu;
502*4882a593Smuzhiyun int len;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun if (!urb)
505*4882a593Smuzhiyun goto out;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun len = cmnd->cmd_len - 16;
508*4882a593Smuzhiyun if (len < 0)
509*4882a593Smuzhiyun len = 0;
510*4882a593Smuzhiyun len = ALIGN(len, 4);
511*4882a593Smuzhiyun iu = kzalloc(sizeof(*iu) + len, gfp);
512*4882a593Smuzhiyun if (!iu)
513*4882a593Smuzhiyun goto free;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun iu->iu_id = IU_ID_COMMAND;
516*4882a593Smuzhiyun iu->tag = cpu_to_be16(cmdinfo->uas_tag);
517*4882a593Smuzhiyun iu->prio_attr = UAS_SIMPLE_TAG;
518*4882a593Smuzhiyun iu->len = len;
519*4882a593Smuzhiyun int_to_scsilun(sdev->lun, &iu->lun);
520*4882a593Smuzhiyun memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun usb_fill_bulk_urb(urb, udev, devinfo->cmd_pipe, iu, sizeof(*iu) + len,
523*4882a593Smuzhiyun uas_cmd_cmplt, NULL);
524*4882a593Smuzhiyun urb->transfer_flags |= URB_FREE_BUFFER;
525*4882a593Smuzhiyun out:
526*4882a593Smuzhiyun return urb;
527*4882a593Smuzhiyun free:
528*4882a593Smuzhiyun usb_free_urb(urb);
529*4882a593Smuzhiyun return NULL;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /*
533*4882a593Smuzhiyun * Why should I request the Status IU before sending the Command IU? Spec
534*4882a593Smuzhiyun * says to, but also says the device may receive them in any order. Seems
535*4882a593Smuzhiyun * daft to me.
536*4882a593Smuzhiyun */
537*4882a593Smuzhiyun
uas_submit_sense_urb(struct scsi_cmnd * cmnd,gfp_t gfp)538*4882a593Smuzhiyun static struct urb *uas_submit_sense_urb(struct scsi_cmnd *cmnd, gfp_t gfp)
539*4882a593Smuzhiyun {
540*4882a593Smuzhiyun struct uas_dev_info *devinfo = cmnd->device->hostdata;
541*4882a593Smuzhiyun struct urb *urb;
542*4882a593Smuzhiyun int err;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun urb = uas_alloc_sense_urb(devinfo, gfp, cmnd);
545*4882a593Smuzhiyun if (!urb)
546*4882a593Smuzhiyun return NULL;
547*4882a593Smuzhiyun usb_anchor_urb(urb, &devinfo->sense_urbs);
548*4882a593Smuzhiyun err = usb_submit_urb(urb, gfp);
549*4882a593Smuzhiyun if (err) {
550*4882a593Smuzhiyun usb_unanchor_urb(urb);
551*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "sense submit err", err);
552*4882a593Smuzhiyun usb_free_urb(urb);
553*4882a593Smuzhiyun return NULL;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun return urb;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
uas_submit_urbs(struct scsi_cmnd * cmnd,struct uas_dev_info * devinfo)558*4882a593Smuzhiyun static int uas_submit_urbs(struct scsi_cmnd *cmnd,
559*4882a593Smuzhiyun struct uas_dev_info *devinfo)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
562*4882a593Smuzhiyun struct urb *urb;
563*4882a593Smuzhiyun int err;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun lockdep_assert_held(&devinfo->lock);
566*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_STATUS_URB) {
567*4882a593Smuzhiyun urb = uas_submit_sense_urb(cmnd, GFP_ATOMIC);
568*4882a593Smuzhiyun if (!urb)
569*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
570*4882a593Smuzhiyun cmdinfo->state &= ~SUBMIT_STATUS_URB;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun if (cmdinfo->state & ALLOC_DATA_IN_URB) {
574*4882a593Smuzhiyun cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
575*4882a593Smuzhiyun cmnd, DMA_FROM_DEVICE);
576*4882a593Smuzhiyun if (!cmdinfo->data_in_urb)
577*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
578*4882a593Smuzhiyun cmdinfo->state &= ~ALLOC_DATA_IN_URB;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_DATA_IN_URB) {
582*4882a593Smuzhiyun usb_anchor_urb(cmdinfo->data_in_urb, &devinfo->data_urbs);
583*4882a593Smuzhiyun err = usb_submit_urb(cmdinfo->data_in_urb, GFP_ATOMIC);
584*4882a593Smuzhiyun if (err) {
585*4882a593Smuzhiyun usb_unanchor_urb(cmdinfo->data_in_urb);
586*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "data in submit err", err);
587*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun cmdinfo->state &= ~SUBMIT_DATA_IN_URB;
590*4882a593Smuzhiyun cmdinfo->state |= DATA_IN_URB_INFLIGHT;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
594*4882a593Smuzhiyun cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, GFP_ATOMIC,
595*4882a593Smuzhiyun cmnd, DMA_TO_DEVICE);
596*4882a593Smuzhiyun if (!cmdinfo->data_out_urb)
597*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
598*4882a593Smuzhiyun cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_DATA_OUT_URB) {
602*4882a593Smuzhiyun usb_anchor_urb(cmdinfo->data_out_urb, &devinfo->data_urbs);
603*4882a593Smuzhiyun err = usb_submit_urb(cmdinfo->data_out_urb, GFP_ATOMIC);
604*4882a593Smuzhiyun if (err) {
605*4882a593Smuzhiyun usb_unanchor_urb(cmdinfo->data_out_urb);
606*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "data out submit err", err);
607*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun cmdinfo->state &= ~SUBMIT_DATA_OUT_URB;
610*4882a593Smuzhiyun cmdinfo->state |= DATA_OUT_URB_INFLIGHT;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun if (cmdinfo->state & ALLOC_CMD_URB) {
614*4882a593Smuzhiyun cmdinfo->cmd_urb = uas_alloc_cmd_urb(devinfo, GFP_ATOMIC, cmnd);
615*4882a593Smuzhiyun if (!cmdinfo->cmd_urb)
616*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
617*4882a593Smuzhiyun cmdinfo->state &= ~ALLOC_CMD_URB;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_CMD_URB) {
621*4882a593Smuzhiyun usb_anchor_urb(cmdinfo->cmd_urb, &devinfo->cmd_urbs);
622*4882a593Smuzhiyun err = usb_submit_urb(cmdinfo->cmd_urb, GFP_ATOMIC);
623*4882a593Smuzhiyun if (err) {
624*4882a593Smuzhiyun usb_unanchor_urb(cmdinfo->cmd_urb);
625*4882a593Smuzhiyun uas_log_cmd_state(cmnd, "cmd submit err", err);
626*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun cmdinfo->cmd_urb = NULL;
629*4882a593Smuzhiyun cmdinfo->state &= ~SUBMIT_CMD_URB;
630*4882a593Smuzhiyun cmdinfo->state |= COMMAND_INFLIGHT;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun return 0;
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun
uas_queuecommand_lck(struct scsi_cmnd * cmnd,void (* done)(struct scsi_cmnd *))636*4882a593Smuzhiyun static int uas_queuecommand_lck(struct scsi_cmnd *cmnd,
637*4882a593Smuzhiyun void (*done)(struct scsi_cmnd *))
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun struct scsi_device *sdev = cmnd->device;
640*4882a593Smuzhiyun struct uas_dev_info *devinfo = sdev->hostdata;
641*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
642*4882a593Smuzhiyun unsigned long flags;
643*4882a593Smuzhiyun int idx, err;
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct uas_cmd_info) > sizeof(struct scsi_pointer));
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun /* Re-check scsi_block_requests now that we've the host-lock */
648*4882a593Smuzhiyun if (cmnd->device->host->host_self_blocked)
649*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun if ((devinfo->flags & US_FL_NO_ATA_1X) &&
652*4882a593Smuzhiyun (cmnd->cmnd[0] == ATA_12 || cmnd->cmnd[0] == ATA_16)) {
653*4882a593Smuzhiyun memcpy(cmnd->sense_buffer, usb_stor_sense_invalidCDB,
654*4882a593Smuzhiyun sizeof(usb_stor_sense_invalidCDB));
655*4882a593Smuzhiyun cmnd->result = SAM_STAT_CHECK_CONDITION;
656*4882a593Smuzhiyun cmnd->scsi_done(cmnd);
657*4882a593Smuzhiyun return 0;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (devinfo->resetting) {
663*4882a593Smuzhiyun set_host_byte(cmnd, DID_ERROR);
664*4882a593Smuzhiyun cmnd->scsi_done(cmnd);
665*4882a593Smuzhiyun goto zombie;
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun /* Find a free uas-tag */
669*4882a593Smuzhiyun for (idx = 0; idx < devinfo->qdepth; idx++) {
670*4882a593Smuzhiyun if (!devinfo->cmnd[idx])
671*4882a593Smuzhiyun break;
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun if (idx == devinfo->qdepth) {
674*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
675*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun cmnd->scsi_done = done;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun memset(cmdinfo, 0, sizeof(*cmdinfo));
681*4882a593Smuzhiyun cmdinfo->uas_tag = idx + 1; /* uas-tag == usb-stream-id, so 1 based */
682*4882a593Smuzhiyun cmdinfo->state = SUBMIT_STATUS_URB | ALLOC_CMD_URB | SUBMIT_CMD_URB;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun switch (cmnd->sc_data_direction) {
685*4882a593Smuzhiyun case DMA_FROM_DEVICE:
686*4882a593Smuzhiyun cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
687*4882a593Smuzhiyun break;
688*4882a593Smuzhiyun case DMA_BIDIRECTIONAL:
689*4882a593Smuzhiyun cmdinfo->state |= ALLOC_DATA_IN_URB | SUBMIT_DATA_IN_URB;
690*4882a593Smuzhiyun fallthrough;
691*4882a593Smuzhiyun case DMA_TO_DEVICE:
692*4882a593Smuzhiyun cmdinfo->state |= ALLOC_DATA_OUT_URB | SUBMIT_DATA_OUT_URB;
693*4882a593Smuzhiyun case DMA_NONE:
694*4882a593Smuzhiyun break;
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun if (!devinfo->use_streams)
698*4882a593Smuzhiyun cmdinfo->state &= ~(SUBMIT_DATA_IN_URB | SUBMIT_DATA_OUT_URB);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun err = uas_submit_urbs(cmnd, devinfo);
701*4882a593Smuzhiyun /*
702*4882a593Smuzhiyun * in case of fatal errors the SCSI layer is peculiar
703*4882a593Smuzhiyun * a command that has finished is a success for the purpose
704*4882a593Smuzhiyun * of queueing, no matter how fatal the error
705*4882a593Smuzhiyun */
706*4882a593Smuzhiyun if (err == -ENODEV) {
707*4882a593Smuzhiyun set_host_byte(cmnd, DID_ERROR);
708*4882a593Smuzhiyun cmnd->scsi_done(cmnd);
709*4882a593Smuzhiyun goto zombie;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun if (err) {
712*4882a593Smuzhiyun /* If we did nothing, give up now */
713*4882a593Smuzhiyun if (cmdinfo->state & SUBMIT_STATUS_URB) {
714*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
715*4882a593Smuzhiyun return SCSI_MLQUEUE_DEVICE_BUSY;
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun uas_add_work(cmdinfo);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun devinfo->cmnd[idx] = cmnd;
721*4882a593Smuzhiyun zombie:
722*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
723*4882a593Smuzhiyun return 0;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
DEF_SCSI_QCMD(uas_queuecommand)726*4882a593Smuzhiyun static DEF_SCSI_QCMD(uas_queuecommand)
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun /*
729*4882a593Smuzhiyun * For now we do not support actually sending an abort to the device, so
730*4882a593Smuzhiyun * this eh always fails. Still we must define it to make sure that we've
731*4882a593Smuzhiyun * dropped all references to the cmnd in question once this function exits.
732*4882a593Smuzhiyun */
733*4882a593Smuzhiyun static int uas_eh_abort_handler(struct scsi_cmnd *cmnd)
734*4882a593Smuzhiyun {
735*4882a593Smuzhiyun struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
736*4882a593Smuzhiyun struct uas_dev_info *devinfo = (void *)cmnd->device->hostdata;
737*4882a593Smuzhiyun struct urb *data_in_urb = NULL;
738*4882a593Smuzhiyun struct urb *data_out_urb = NULL;
739*4882a593Smuzhiyun unsigned long flags;
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun uas_log_cmd_state(cmnd, __func__, 0);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* Ensure that try_complete does not call scsi_done */
746*4882a593Smuzhiyun cmdinfo->state |= COMMAND_ABORTED;
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /* Drop all refs to this cmnd, kill data urbs to break their ref */
749*4882a593Smuzhiyun devinfo->cmnd[cmdinfo->uas_tag - 1] = NULL;
750*4882a593Smuzhiyun if (cmdinfo->state & DATA_IN_URB_INFLIGHT)
751*4882a593Smuzhiyun data_in_urb = usb_get_urb(cmdinfo->data_in_urb);
752*4882a593Smuzhiyun if (cmdinfo->state & DATA_OUT_URB_INFLIGHT)
753*4882a593Smuzhiyun data_out_urb = usb_get_urb(cmdinfo->data_out_urb);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun uas_free_unsubmitted_urbs(cmnd);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun if (data_in_urb) {
760*4882a593Smuzhiyun usb_kill_urb(data_in_urb);
761*4882a593Smuzhiyun usb_put_urb(data_in_urb);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun if (data_out_urb) {
764*4882a593Smuzhiyun usb_kill_urb(data_out_urb);
765*4882a593Smuzhiyun usb_put_urb(data_out_urb);
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return FAILED;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
uas_eh_device_reset_handler(struct scsi_cmnd * cmnd)771*4882a593Smuzhiyun static int uas_eh_device_reset_handler(struct scsi_cmnd *cmnd)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct scsi_device *sdev = cmnd->device;
774*4882a593Smuzhiyun struct uas_dev_info *devinfo = sdev->hostdata;
775*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
776*4882a593Smuzhiyun unsigned long flags;
777*4882a593Smuzhiyun int err;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun err = usb_lock_device_for_reset(udev, devinfo->intf);
780*4882a593Smuzhiyun if (err) {
781*4882a593Smuzhiyun shost_printk(KERN_ERR, sdev->host,
782*4882a593Smuzhiyun "%s FAILED to get lock err %d\n", __func__, err);
783*4882a593Smuzhiyun return FAILED;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun shost_printk(KERN_INFO, sdev->host, "%s start\n", __func__);
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
789*4882a593Smuzhiyun devinfo->resetting = 1;
790*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->cmd_urbs);
793*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->sense_urbs);
794*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->data_urbs);
795*4882a593Smuzhiyun uas_zap_pending(devinfo, DID_RESET);
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun err = usb_reset_device(udev);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
800*4882a593Smuzhiyun devinfo->resetting = 0;
801*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun usb_unlock_device(udev);
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun if (err) {
806*4882a593Smuzhiyun shost_printk(KERN_INFO, sdev->host, "%s FAILED err %d\n",
807*4882a593Smuzhiyun __func__, err);
808*4882a593Smuzhiyun return FAILED;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun shost_printk(KERN_INFO, sdev->host, "%s success\n", __func__);
812*4882a593Smuzhiyun return SUCCESS;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
uas_target_alloc(struct scsi_target * starget)815*4882a593Smuzhiyun static int uas_target_alloc(struct scsi_target *starget)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)
818*4882a593Smuzhiyun dev_to_shost(starget->dev.parent)->hostdata;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (devinfo->flags & US_FL_NO_REPORT_LUNS)
821*4882a593Smuzhiyun starget->no_report_luns = 1;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun return 0;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
uas_slave_alloc(struct scsi_device * sdev)826*4882a593Smuzhiyun static int uas_slave_alloc(struct scsi_device *sdev)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun struct uas_dev_info *devinfo =
829*4882a593Smuzhiyun (struct uas_dev_info *)sdev->host->hostdata;
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun sdev->hostdata = devinfo;
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun /*
834*4882a593Smuzhiyun * The protocol has no requirements on alignment in the strict sense.
835*4882a593Smuzhiyun * Controllers may or may not have alignment restrictions.
836*4882a593Smuzhiyun * As this is not exported, we use an extremely conservative guess.
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (devinfo->flags & US_FL_MAX_SECTORS_64)
841*4882a593Smuzhiyun blk_queue_max_hw_sectors(sdev->request_queue, 64);
842*4882a593Smuzhiyun else if (devinfo->flags & US_FL_MAX_SECTORS_240)
843*4882a593Smuzhiyun blk_queue_max_hw_sectors(sdev->request_queue, 240);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun return 0;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun
uas_slave_configure(struct scsi_device * sdev)848*4882a593Smuzhiyun static int uas_slave_configure(struct scsi_device *sdev)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun struct uas_dev_info *devinfo = sdev->hostdata;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun if (devinfo->flags & US_FL_NO_REPORT_OPCODES)
853*4882a593Smuzhiyun sdev->no_report_opcodes = 1;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /* A few buggy USB-ATA bridges don't understand FUA */
856*4882a593Smuzhiyun if (devinfo->flags & US_FL_BROKEN_FUA)
857*4882a593Smuzhiyun sdev->broken_fua = 1;
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun /* UAS also needs to support FL_ALWAYS_SYNC */
860*4882a593Smuzhiyun if (devinfo->flags & US_FL_ALWAYS_SYNC) {
861*4882a593Smuzhiyun sdev->skip_ms_page_3f = 1;
862*4882a593Smuzhiyun sdev->skip_ms_page_8 = 1;
863*4882a593Smuzhiyun sdev->wce_default_on = 1;
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Some disks cannot handle READ_CAPACITY_16 */
867*4882a593Smuzhiyun if (devinfo->flags & US_FL_NO_READ_CAPACITY_16)
868*4882a593Smuzhiyun sdev->no_read_capacity_16 = 1;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /* Some disks cannot handle WRITE_SAME */
871*4882a593Smuzhiyun if (devinfo->flags & US_FL_NO_SAME)
872*4882a593Smuzhiyun sdev->no_write_same = 1;
873*4882a593Smuzhiyun /*
874*4882a593Smuzhiyun * Some disks return the total number of blocks in response
875*4882a593Smuzhiyun * to READ CAPACITY rather than the highest block number.
876*4882a593Smuzhiyun * If this device makes that mistake, tell the sd driver.
877*4882a593Smuzhiyun */
878*4882a593Smuzhiyun if (devinfo->flags & US_FL_FIX_CAPACITY)
879*4882a593Smuzhiyun sdev->fix_capacity = 1;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun /*
882*4882a593Smuzhiyun * in some cases we have to guess
883*4882a593Smuzhiyun */
884*4882a593Smuzhiyun if (devinfo->flags & US_FL_CAPACITY_HEURISTICS)
885*4882a593Smuzhiyun sdev->guess_capacity = 1;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /*
888*4882a593Smuzhiyun * Some devices don't like MODE SENSE with page=0x3f,
889*4882a593Smuzhiyun * which is the command used for checking if a device
890*4882a593Smuzhiyun * is write-protected. Now that we tell the sd driver
891*4882a593Smuzhiyun * to do a 192-byte transfer with this command the
892*4882a593Smuzhiyun * majority of devices work fine, but a few still can't
893*4882a593Smuzhiyun * handle it. The sd driver will simply assume those
894*4882a593Smuzhiyun * devices are write-enabled.
895*4882a593Smuzhiyun */
896*4882a593Smuzhiyun if (devinfo->flags & US_FL_NO_WP_DETECT)
897*4882a593Smuzhiyun sdev->skip_ms_page_3f = 1;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
900*4882a593Smuzhiyun return 0;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun static struct scsi_host_template uas_host_template = {
904*4882a593Smuzhiyun .module = THIS_MODULE,
905*4882a593Smuzhiyun .name = "uas",
906*4882a593Smuzhiyun .queuecommand = uas_queuecommand,
907*4882a593Smuzhiyun .target_alloc = uas_target_alloc,
908*4882a593Smuzhiyun .slave_alloc = uas_slave_alloc,
909*4882a593Smuzhiyun .slave_configure = uas_slave_configure,
910*4882a593Smuzhiyun .eh_abort_handler = uas_eh_abort_handler,
911*4882a593Smuzhiyun .eh_device_reset_handler = uas_eh_device_reset_handler,
912*4882a593Smuzhiyun .this_id = -1,
913*4882a593Smuzhiyun .skip_settle_delay = 1,
914*4882a593Smuzhiyun .dma_boundary = PAGE_SIZE - 1,
915*4882a593Smuzhiyun };
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun #define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
918*4882a593Smuzhiyun vendorName, productName, useProtocol, useTransport, \
919*4882a593Smuzhiyun initFunction, flags) \
920*4882a593Smuzhiyun { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
921*4882a593Smuzhiyun .driver_info = (flags) }
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun static struct usb_device_id uas_usb_ids[] = {
924*4882a593Smuzhiyun # include "unusual_uas.h"
925*4882a593Smuzhiyun { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_BULK) },
926*4882a593Smuzhiyun { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, USB_SC_SCSI, USB_PR_UAS) },
927*4882a593Smuzhiyun { }
928*4882a593Smuzhiyun };
929*4882a593Smuzhiyun MODULE_DEVICE_TABLE(usb, uas_usb_ids);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun #undef UNUSUAL_DEV
932*4882a593Smuzhiyun
uas_switch_interface(struct usb_device * udev,struct usb_interface * intf)933*4882a593Smuzhiyun static int uas_switch_interface(struct usb_device *udev,
934*4882a593Smuzhiyun struct usb_interface *intf)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun struct usb_host_interface *alt;
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun alt = uas_find_uas_alt_setting(intf);
939*4882a593Smuzhiyun if (!alt)
940*4882a593Smuzhiyun return -ENODEV;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun return usb_set_interface(udev, alt->desc.bInterfaceNumber,
943*4882a593Smuzhiyun alt->desc.bAlternateSetting);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
uas_configure_endpoints(struct uas_dev_info * devinfo)946*4882a593Smuzhiyun static int uas_configure_endpoints(struct uas_dev_info *devinfo)
947*4882a593Smuzhiyun {
948*4882a593Smuzhiyun struct usb_host_endpoint *eps[4] = { };
949*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
950*4882a593Smuzhiyun int r;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun r = uas_find_endpoints(devinfo->intf->cur_altsetting, eps);
953*4882a593Smuzhiyun if (r)
954*4882a593Smuzhiyun return r;
955*4882a593Smuzhiyun
956*4882a593Smuzhiyun devinfo->cmd_pipe = usb_sndbulkpipe(udev,
957*4882a593Smuzhiyun usb_endpoint_num(&eps[0]->desc));
958*4882a593Smuzhiyun devinfo->status_pipe = usb_rcvbulkpipe(udev,
959*4882a593Smuzhiyun usb_endpoint_num(&eps[1]->desc));
960*4882a593Smuzhiyun devinfo->data_in_pipe = usb_rcvbulkpipe(udev,
961*4882a593Smuzhiyun usb_endpoint_num(&eps[2]->desc));
962*4882a593Smuzhiyun devinfo->data_out_pipe = usb_sndbulkpipe(udev,
963*4882a593Smuzhiyun usb_endpoint_num(&eps[3]->desc));
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun if (udev->speed < USB_SPEED_SUPER) {
966*4882a593Smuzhiyun devinfo->qdepth = 32;
967*4882a593Smuzhiyun devinfo->use_streams = 0;
968*4882a593Smuzhiyun } else {
969*4882a593Smuzhiyun devinfo->qdepth = usb_alloc_streams(devinfo->intf, eps + 1,
970*4882a593Smuzhiyun 3, MAX_CMNDS, GFP_NOIO);
971*4882a593Smuzhiyun if (devinfo->qdepth < 0)
972*4882a593Smuzhiyun return devinfo->qdepth;
973*4882a593Smuzhiyun devinfo->use_streams = 1;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun return 0;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun
uas_free_streams(struct uas_dev_info * devinfo)979*4882a593Smuzhiyun static void uas_free_streams(struct uas_dev_info *devinfo)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun struct usb_device *udev = devinfo->udev;
982*4882a593Smuzhiyun struct usb_host_endpoint *eps[3];
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun eps[0] = usb_pipe_endpoint(udev, devinfo->status_pipe);
985*4882a593Smuzhiyun eps[1] = usb_pipe_endpoint(udev, devinfo->data_in_pipe);
986*4882a593Smuzhiyun eps[2] = usb_pipe_endpoint(udev, devinfo->data_out_pipe);
987*4882a593Smuzhiyun usb_free_streams(devinfo->intf, eps, 3, GFP_NOIO);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
uas_probe(struct usb_interface * intf,const struct usb_device_id * id)990*4882a593Smuzhiyun static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun int result = -ENOMEM;
993*4882a593Smuzhiyun struct Scsi_Host *shost = NULL;
994*4882a593Smuzhiyun struct uas_dev_info *devinfo;
995*4882a593Smuzhiyun struct usb_device *udev = interface_to_usbdev(intf);
996*4882a593Smuzhiyun unsigned long dev_flags;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (!uas_use_uas_driver(intf, id, &dev_flags))
999*4882a593Smuzhiyun return -ENODEV;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (uas_switch_interface(udev, intf))
1002*4882a593Smuzhiyun return -ENODEV;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun shost = scsi_host_alloc(&uas_host_template,
1005*4882a593Smuzhiyun sizeof(struct uas_dev_info));
1006*4882a593Smuzhiyun if (!shost)
1007*4882a593Smuzhiyun goto set_alt0;
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun shost->max_cmd_len = 16 + 252;
1010*4882a593Smuzhiyun shost->max_id = 1;
1011*4882a593Smuzhiyun shost->max_lun = 256;
1012*4882a593Smuzhiyun shost->max_channel = 0;
1013*4882a593Smuzhiyun shost->sg_tablesize = udev->bus->sg_tablesize;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun devinfo = (struct uas_dev_info *)shost->hostdata;
1016*4882a593Smuzhiyun devinfo->intf = intf;
1017*4882a593Smuzhiyun devinfo->udev = udev;
1018*4882a593Smuzhiyun devinfo->resetting = 0;
1019*4882a593Smuzhiyun devinfo->shutdown = 0;
1020*4882a593Smuzhiyun devinfo->flags = dev_flags;
1021*4882a593Smuzhiyun init_usb_anchor(&devinfo->cmd_urbs);
1022*4882a593Smuzhiyun init_usb_anchor(&devinfo->sense_urbs);
1023*4882a593Smuzhiyun init_usb_anchor(&devinfo->data_urbs);
1024*4882a593Smuzhiyun spin_lock_init(&devinfo->lock);
1025*4882a593Smuzhiyun INIT_WORK(&devinfo->work, uas_do_work);
1026*4882a593Smuzhiyun INIT_WORK(&devinfo->scan_work, uas_scan_work);
1027*4882a593Smuzhiyun
1028*4882a593Smuzhiyun result = uas_configure_endpoints(devinfo);
1029*4882a593Smuzhiyun if (result)
1030*4882a593Smuzhiyun goto set_alt0;
1031*4882a593Smuzhiyun
1032*4882a593Smuzhiyun /*
1033*4882a593Smuzhiyun * 1 tag is reserved for untagged commands +
1034*4882a593Smuzhiyun * 1 tag to avoid off by one errors in some bridge firmwares
1035*4882a593Smuzhiyun */
1036*4882a593Smuzhiyun shost->can_queue = devinfo->qdepth - 2;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun usb_set_intfdata(intf, shost);
1039*4882a593Smuzhiyun result = scsi_add_host(shost, &intf->dev);
1040*4882a593Smuzhiyun if (result)
1041*4882a593Smuzhiyun goto free_streams;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /* Submit the delayed_work for SCSI-device scanning */
1044*4882a593Smuzhiyun schedule_work(&devinfo->scan_work);
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun return result;
1047*4882a593Smuzhiyun
1048*4882a593Smuzhiyun free_streams:
1049*4882a593Smuzhiyun uas_free_streams(devinfo);
1050*4882a593Smuzhiyun usb_set_intfdata(intf, NULL);
1051*4882a593Smuzhiyun set_alt0:
1052*4882a593Smuzhiyun usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
1053*4882a593Smuzhiyun if (shost)
1054*4882a593Smuzhiyun scsi_host_put(shost);
1055*4882a593Smuzhiyun return result;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
uas_cmnd_list_empty(struct uas_dev_info * devinfo)1058*4882a593Smuzhiyun static int uas_cmnd_list_empty(struct uas_dev_info *devinfo)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun unsigned long flags;
1061*4882a593Smuzhiyun int i, r = 1;
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun for (i = 0; i < devinfo->qdepth; i++) {
1066*4882a593Smuzhiyun if (devinfo->cmnd[i]) {
1067*4882a593Smuzhiyun r = 0; /* Not empty */
1068*4882a593Smuzhiyun break;
1069*4882a593Smuzhiyun }
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun
1072*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun return r;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun /*
1078*4882a593Smuzhiyun * Wait for any pending cmnds to complete, on usb-2 sense_urbs may temporarily
1079*4882a593Smuzhiyun * get empty while there still is more work to do due to sense-urbs completing
1080*4882a593Smuzhiyun * with a READ/WRITE_READY iu code, so keep waiting until the list gets empty.
1081*4882a593Smuzhiyun */
uas_wait_for_pending_cmnds(struct uas_dev_info * devinfo)1082*4882a593Smuzhiyun static int uas_wait_for_pending_cmnds(struct uas_dev_info *devinfo)
1083*4882a593Smuzhiyun {
1084*4882a593Smuzhiyun unsigned long start_time;
1085*4882a593Smuzhiyun int r;
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun start_time = jiffies;
1088*4882a593Smuzhiyun do {
1089*4882a593Smuzhiyun flush_work(&devinfo->work);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun r = usb_wait_anchor_empty_timeout(&devinfo->sense_urbs, 5000);
1092*4882a593Smuzhiyun if (r == 0)
1093*4882a593Smuzhiyun return -ETIME;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun r = usb_wait_anchor_empty_timeout(&devinfo->data_urbs, 500);
1096*4882a593Smuzhiyun if (r == 0)
1097*4882a593Smuzhiyun return -ETIME;
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun if (time_after(jiffies, start_time + 5 * HZ))
1100*4882a593Smuzhiyun return -ETIME;
1101*4882a593Smuzhiyun } while (!uas_cmnd_list_empty(devinfo));
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun return 0;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun
uas_pre_reset(struct usb_interface * intf)1106*4882a593Smuzhiyun static int uas_pre_reset(struct usb_interface *intf)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1109*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1110*4882a593Smuzhiyun unsigned long flags;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (devinfo->shutdown)
1113*4882a593Smuzhiyun return 0;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /* Block new requests */
1116*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1117*4882a593Smuzhiyun scsi_block_requests(shost);
1118*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun if (uas_wait_for_pending_cmnds(devinfo) != 0) {
1121*4882a593Smuzhiyun shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
1122*4882a593Smuzhiyun scsi_unblock_requests(shost);
1123*4882a593Smuzhiyun return 1;
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun uas_free_streams(devinfo);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun return 0;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
uas_post_reset(struct usb_interface * intf)1131*4882a593Smuzhiyun static int uas_post_reset(struct usb_interface *intf)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1134*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1135*4882a593Smuzhiyun unsigned long flags;
1136*4882a593Smuzhiyun int err;
1137*4882a593Smuzhiyun
1138*4882a593Smuzhiyun if (devinfo->shutdown)
1139*4882a593Smuzhiyun return 0;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun err = uas_configure_endpoints(devinfo);
1142*4882a593Smuzhiyun if (err && err != -ENODEV)
1143*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
1144*4882a593Smuzhiyun "%s: alloc streams error %d after reset",
1145*4882a593Smuzhiyun __func__, err);
1146*4882a593Smuzhiyun
1147*4882a593Smuzhiyun /* we must unblock the host in every case lest we deadlock */
1148*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1149*4882a593Smuzhiyun scsi_report_bus_reset(shost, 0);
1150*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun scsi_unblock_requests(shost);
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun return err ? 1 : 0;
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
uas_suspend(struct usb_interface * intf,pm_message_t message)1157*4882a593Smuzhiyun static int uas_suspend(struct usb_interface *intf, pm_message_t message)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1160*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun if (uas_wait_for_pending_cmnds(devinfo) != 0) {
1163*4882a593Smuzhiyun shost_printk(KERN_ERR, shost, "%s: timed out\n", __func__);
1164*4882a593Smuzhiyun return -ETIME;
1165*4882a593Smuzhiyun }
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun return 0;
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun
uas_resume(struct usb_interface * intf)1170*4882a593Smuzhiyun static int uas_resume(struct usb_interface *intf)
1171*4882a593Smuzhiyun {
1172*4882a593Smuzhiyun return 0;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
uas_reset_resume(struct usb_interface * intf)1175*4882a593Smuzhiyun static int uas_reset_resume(struct usb_interface *intf)
1176*4882a593Smuzhiyun {
1177*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1178*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1179*4882a593Smuzhiyun unsigned long flags;
1180*4882a593Smuzhiyun int err;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun err = uas_configure_endpoints(devinfo);
1183*4882a593Smuzhiyun if (err) {
1184*4882a593Smuzhiyun shost_printk(KERN_ERR, shost,
1185*4882a593Smuzhiyun "%s: alloc streams error %d after reset",
1186*4882a593Smuzhiyun __func__, err);
1187*4882a593Smuzhiyun return -EIO;
1188*4882a593Smuzhiyun }
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun spin_lock_irqsave(shost->host_lock, flags);
1191*4882a593Smuzhiyun scsi_report_bus_reset(shost, 0);
1192*4882a593Smuzhiyun spin_unlock_irqrestore(shost->host_lock, flags);
1193*4882a593Smuzhiyun
1194*4882a593Smuzhiyun return 0;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
uas_disconnect(struct usb_interface * intf)1197*4882a593Smuzhiyun static void uas_disconnect(struct usb_interface *intf)
1198*4882a593Smuzhiyun {
1199*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1200*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1201*4882a593Smuzhiyun unsigned long flags;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun spin_lock_irqsave(&devinfo->lock, flags);
1204*4882a593Smuzhiyun devinfo->resetting = 1;
1205*4882a593Smuzhiyun spin_unlock_irqrestore(&devinfo->lock, flags);
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun cancel_work_sync(&devinfo->work);
1208*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->cmd_urbs);
1209*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->sense_urbs);
1210*4882a593Smuzhiyun usb_kill_anchored_urbs(&devinfo->data_urbs);
1211*4882a593Smuzhiyun uas_zap_pending(devinfo, DID_NO_CONNECT);
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /*
1214*4882a593Smuzhiyun * Prevent SCSI scanning (if it hasn't started yet)
1215*4882a593Smuzhiyun * or wait for the SCSI-scanning routine to stop.
1216*4882a593Smuzhiyun */
1217*4882a593Smuzhiyun cancel_work_sync(&devinfo->scan_work);
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun scsi_remove_host(shost);
1220*4882a593Smuzhiyun uas_free_streams(devinfo);
1221*4882a593Smuzhiyun scsi_host_put(shost);
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /*
1225*4882a593Smuzhiyun * Put the device back in usb-storage mode on shutdown, as some BIOS-es
1226*4882a593Smuzhiyun * hang on reboot when the device is still in uas mode. Note the reset is
1227*4882a593Smuzhiyun * necessary as some devices won't revert to usb-storage mode without it.
1228*4882a593Smuzhiyun */
uas_shutdown(struct device * dev)1229*4882a593Smuzhiyun static void uas_shutdown(struct device *dev)
1230*4882a593Smuzhiyun {
1231*4882a593Smuzhiyun struct usb_interface *intf = to_usb_interface(dev);
1232*4882a593Smuzhiyun struct usb_device *udev = interface_to_usbdev(intf);
1233*4882a593Smuzhiyun struct Scsi_Host *shost = usb_get_intfdata(intf);
1234*4882a593Smuzhiyun struct uas_dev_info *devinfo = (struct uas_dev_info *)shost->hostdata;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (system_state != SYSTEM_RESTART)
1237*4882a593Smuzhiyun return;
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun devinfo->shutdown = 1;
1240*4882a593Smuzhiyun uas_free_streams(devinfo);
1241*4882a593Smuzhiyun usb_set_interface(udev, intf->altsetting[0].desc.bInterfaceNumber, 0);
1242*4882a593Smuzhiyun usb_reset_device(udev);
1243*4882a593Smuzhiyun }
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun static struct usb_driver uas_driver = {
1246*4882a593Smuzhiyun .name = "uas",
1247*4882a593Smuzhiyun .probe = uas_probe,
1248*4882a593Smuzhiyun .disconnect = uas_disconnect,
1249*4882a593Smuzhiyun .pre_reset = uas_pre_reset,
1250*4882a593Smuzhiyun .post_reset = uas_post_reset,
1251*4882a593Smuzhiyun .suspend = uas_suspend,
1252*4882a593Smuzhiyun .resume = uas_resume,
1253*4882a593Smuzhiyun .reset_resume = uas_reset_resume,
1254*4882a593Smuzhiyun .drvwrap.driver.shutdown = uas_shutdown,
1255*4882a593Smuzhiyun .id_table = uas_usb_ids,
1256*4882a593Smuzhiyun };
1257*4882a593Smuzhiyun
uas_init(void)1258*4882a593Smuzhiyun static int __init uas_init(void)
1259*4882a593Smuzhiyun {
1260*4882a593Smuzhiyun int rv;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun workqueue = alloc_workqueue("uas", WQ_MEM_RECLAIM, 0);
1263*4882a593Smuzhiyun if (!workqueue)
1264*4882a593Smuzhiyun return -ENOMEM;
1265*4882a593Smuzhiyun
1266*4882a593Smuzhiyun rv = usb_register(&uas_driver);
1267*4882a593Smuzhiyun if (rv) {
1268*4882a593Smuzhiyun destroy_workqueue(workqueue);
1269*4882a593Smuzhiyun return -ENOMEM;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun return 0;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
uas_exit(void)1275*4882a593Smuzhiyun static void __exit uas_exit(void)
1276*4882a593Smuzhiyun {
1277*4882a593Smuzhiyun usb_deregister(&uas_driver);
1278*4882a593Smuzhiyun destroy_workqueue(workqueue);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun module_init(uas_init);
1282*4882a593Smuzhiyun module_exit(uas_exit);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1285*4882a593Smuzhiyun MODULE_IMPORT_NS(USB_STORAGE);
1286*4882a593Smuzhiyun MODULE_AUTHOR(
1287*4882a593Smuzhiyun "Hans de Goede <hdegoede@redhat.com>, Matthew Wilcox and Sarah Sharp");
1288