xref: /OK3568_Linux_fs/kernel/drivers/usb/storage/transport.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for USB Mass Storage compliant devices
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Current development and maintenance by:
6*4882a593Smuzhiyun  *   (c) 1999-2002 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Developed with the assistance of:
9*4882a593Smuzhiyun  *   (c) 2000 David L. Brown, Jr. (usb-storage@davidb.org)
10*4882a593Smuzhiyun  *   (c) 2000 Stephen J. Gowdy (SGowdy@lbl.gov)
11*4882a593Smuzhiyun  *   (c) 2002 Alan Stern <stern@rowland.org>
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Initial work by:
14*4882a593Smuzhiyun  *   (c) 1999 Michael Gee (michael@linuxspecific.com)
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This driver is based on the 'USB Mass Storage Class' document. This
17*4882a593Smuzhiyun  * describes in detail the protocol used to communicate with such
18*4882a593Smuzhiyun  * devices.  Clearly, the designers had SCSI and ATAPI commands in
19*4882a593Smuzhiyun  * mind when they created this document.  The commands are all very
20*4882a593Smuzhiyun  * similar to commands in the SCSI-II and ATAPI specifications.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  * It is important to note that in a number of cases this class
23*4882a593Smuzhiyun  * exhibits class-specific exemptions from the USB specification.
24*4882a593Smuzhiyun  * Notably the usage of NAK, STALL and ACK differs from the norm, in
25*4882a593Smuzhiyun  * that they are used to communicate wait, failed and OK on commands.
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Also, for certain devices, the interrupt endpoint is used to convey
28*4882a593Smuzhiyun  * status of a command.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #include <linux/sched.h>
32*4882a593Smuzhiyun #include <linux/gfp.h>
33*4882a593Smuzhiyun #include <linux/errno.h>
34*4882a593Smuzhiyun #include <linux/export.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include <linux/usb/quirks.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <scsi/scsi.h>
39*4882a593Smuzhiyun #include <scsi/scsi_eh.h>
40*4882a593Smuzhiyun #include <scsi/scsi_device.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include "usb.h"
43*4882a593Smuzhiyun #include "transport.h"
44*4882a593Smuzhiyun #include "protocol.h"
45*4882a593Smuzhiyun #include "scsiglue.h"
46*4882a593Smuzhiyun #include "debug.h"
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <linux/blkdev.h>
49*4882a593Smuzhiyun #include "../../scsi/sd.h"
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /***********************************************************************
53*4882a593Smuzhiyun  * Data transfer routines
54*4882a593Smuzhiyun  ***********************************************************************/
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /*
57*4882a593Smuzhiyun  * This is subtle, so pay attention:
58*4882a593Smuzhiyun  * ---------------------------------
59*4882a593Smuzhiyun  * We're very concerned about races with a command abort.  Hanging this code
60*4882a593Smuzhiyun  * is a sure fire way to hang the kernel.  (Note that this discussion applies
61*4882a593Smuzhiyun  * only to transactions resulting from a scsi queued-command, since only
62*4882a593Smuzhiyun  * these transactions are subject to a scsi abort.  Other transactions, such
63*4882a593Smuzhiyun  * as those occurring during device-specific initialization, must be handled
64*4882a593Smuzhiyun  * by a separate code path.)
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * The abort function (usb_storage_command_abort() in scsiglue.c) first
67*4882a593Smuzhiyun  * sets the machine state and the ABORTING bit in us->dflags to prevent
68*4882a593Smuzhiyun  * new URBs from being submitted.  It then calls usb_stor_stop_transport()
69*4882a593Smuzhiyun  * below, which atomically tests-and-clears the URB_ACTIVE bit in us->dflags
70*4882a593Smuzhiyun  * to see if the current_urb needs to be stopped.  Likewise, the SG_ACTIVE
71*4882a593Smuzhiyun  * bit is tested to see if the current_sg scatter-gather request needs to be
72*4882a593Smuzhiyun  * stopped.  The timeout callback routine does much the same thing.
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * When a disconnect occurs, the DISCONNECTING bit in us->dflags is set to
75*4882a593Smuzhiyun  * prevent new URBs from being submitted, and usb_stor_stop_transport() is
76*4882a593Smuzhiyun  * called to stop any ongoing requests.
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * The submit function first verifies that the submitting is allowed
79*4882a593Smuzhiyun  * (neither ABORTING nor DISCONNECTING bits are set) and that the submit
80*4882a593Smuzhiyun  * completes without errors, and only then sets the URB_ACTIVE bit.  This
81*4882a593Smuzhiyun  * prevents the stop_transport() function from trying to cancel the URB
82*4882a593Smuzhiyun  * while the submit call is underway.  Next, the submit function must test
83*4882a593Smuzhiyun  * the flags to see if an abort or disconnect occurred during the submission
84*4882a593Smuzhiyun  * or before the URB_ACTIVE bit was set.  If so, it's essential to cancel
85*4882a593Smuzhiyun  * the URB if it hasn't been cancelled already (i.e., if the URB_ACTIVE bit
86*4882a593Smuzhiyun  * is still set).  Either way, the function must then wait for the URB to
87*4882a593Smuzhiyun  * finish.  Note that the URB can still be in progress even after a call to
88*4882a593Smuzhiyun  * usb_unlink_urb() returns.
89*4882a593Smuzhiyun  *
90*4882a593Smuzhiyun  * The idea is that (1) once the ABORTING or DISCONNECTING bit is set,
91*4882a593Smuzhiyun  * either the stop_transport() function or the submitting function
92*4882a593Smuzhiyun  * is guaranteed to call usb_unlink_urb() for an active URB,
93*4882a593Smuzhiyun  * and (2) test_and_clear_bit() prevents usb_unlink_urb() from being
94*4882a593Smuzhiyun  * called more than once or from being called during usb_submit_urb().
95*4882a593Smuzhiyun  */
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun  * This is the completion handler which will wake us up when an URB
99*4882a593Smuzhiyun  * completes.
100*4882a593Smuzhiyun  */
usb_stor_blocking_completion(struct urb * urb)101*4882a593Smuzhiyun static void usb_stor_blocking_completion(struct urb *urb)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct completion *urb_done_ptr = urb->context;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	complete(urb_done_ptr);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun  * This is the common part of the URB message submission code
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * All URBs from the usb-storage driver involved in handling a queued scsi
112*4882a593Smuzhiyun  * command _must_ pass through this function (or something like it) for the
113*4882a593Smuzhiyun  * abort mechanisms to work properly.
114*4882a593Smuzhiyun  */
usb_stor_msg_common(struct us_data * us,int timeout)115*4882a593Smuzhiyun static int usb_stor_msg_common(struct us_data *us, int timeout)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct completion urb_done;
118*4882a593Smuzhiyun 	long timeleft;
119*4882a593Smuzhiyun 	int status;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	/* don't submit URBs during abort processing */
122*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_ABORTING, &us->dflags))
123*4882a593Smuzhiyun 		return -EIO;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	/* set up data structures for the wakeup system */
126*4882a593Smuzhiyun 	init_completion(&urb_done);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* fill the common fields in the URB */
129*4882a593Smuzhiyun 	us->current_urb->context = &urb_done;
130*4882a593Smuzhiyun 	us->current_urb->transfer_flags = 0;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	/*
133*4882a593Smuzhiyun 	 * we assume that if transfer_buffer isn't us->iobuf then it
134*4882a593Smuzhiyun 	 * hasn't been mapped for DMA.  Yes, this is clunky, but it's
135*4882a593Smuzhiyun 	 * easier than always having the caller tell us whether the
136*4882a593Smuzhiyun 	 * transfer buffer has already been mapped.
137*4882a593Smuzhiyun 	 */
138*4882a593Smuzhiyun 	if (us->current_urb->transfer_buffer == us->iobuf)
139*4882a593Smuzhiyun 		us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
140*4882a593Smuzhiyun 	us->current_urb->transfer_dma = us->iobuf_dma;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	/* submit the URB */
143*4882a593Smuzhiyun 	status = usb_submit_urb(us->current_urb, GFP_NOIO);
144*4882a593Smuzhiyun 	if (status) {
145*4882a593Smuzhiyun 		/* something went wrong */
146*4882a593Smuzhiyun 		return status;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/*
150*4882a593Smuzhiyun 	 * since the URB has been submitted successfully, it's now okay
151*4882a593Smuzhiyun 	 * to cancel it
152*4882a593Smuzhiyun 	 */
153*4882a593Smuzhiyun 	set_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* did an abort occur during the submission? */
156*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		/* cancel the URB, if it hasn't been cancelled already */
159*4882a593Smuzhiyun 		if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
160*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- cancelling URB\n");
161*4882a593Smuzhiyun 			usb_unlink_urb(us->current_urb);
162*4882a593Smuzhiyun 		}
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	/* wait for the completion of the URB */
166*4882a593Smuzhiyun 	timeleft = wait_for_completion_interruptible_timeout(
167*4882a593Smuzhiyun 			&urb_done, timeout ? : MAX_SCHEDULE_TIMEOUT);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (timeleft <= 0) {
172*4882a593Smuzhiyun 		usb_stor_dbg(us, "%s -- cancelling URB\n",
173*4882a593Smuzhiyun 			     timeleft == 0 ? "Timeout" : "Signal");
174*4882a593Smuzhiyun 		usb_kill_urb(us->current_urb);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	/* return the URB status */
178*4882a593Smuzhiyun 	return us->current_urb->status;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /*
182*4882a593Smuzhiyun  * Transfer one control message, with timeouts, and allowing early
183*4882a593Smuzhiyun  * termination.  Return codes are usual -Exxx, *not* USB_STOR_XFER_xxx.
184*4882a593Smuzhiyun  */
usb_stor_control_msg(struct us_data * us,unsigned int pipe,u8 request,u8 requesttype,u16 value,u16 index,void * data,u16 size,int timeout)185*4882a593Smuzhiyun int usb_stor_control_msg(struct us_data *us, unsigned int pipe,
186*4882a593Smuzhiyun 		 u8 request, u8 requesttype, u16 value, u16 index,
187*4882a593Smuzhiyun 		 void *data, u16 size, int timeout)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	int status;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	usb_stor_dbg(us, "rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
192*4882a593Smuzhiyun 		     request, requesttype, value, index, size);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* fill in the devrequest structure */
195*4882a593Smuzhiyun 	us->cr->bRequestType = requesttype;
196*4882a593Smuzhiyun 	us->cr->bRequest = request;
197*4882a593Smuzhiyun 	us->cr->wValue = cpu_to_le16(value);
198*4882a593Smuzhiyun 	us->cr->wIndex = cpu_to_le16(index);
199*4882a593Smuzhiyun 	us->cr->wLength = cpu_to_le16(size);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* fill and submit the URB */
202*4882a593Smuzhiyun 	usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
203*4882a593Smuzhiyun 			 (unsigned char*) us->cr, data, size,
204*4882a593Smuzhiyun 			 usb_stor_blocking_completion, NULL);
205*4882a593Smuzhiyun 	status = usb_stor_msg_common(us, timeout);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* return the actual length of the data transferred if no error */
208*4882a593Smuzhiyun 	if (status == 0)
209*4882a593Smuzhiyun 		status = us->current_urb->actual_length;
210*4882a593Smuzhiyun 	return status;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_control_msg);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun  * This is a version of usb_clear_halt() that allows early termination and
216*4882a593Smuzhiyun  * doesn't read the status from the device -- this is because some devices
217*4882a593Smuzhiyun  * crash their internal firmware when the status is requested after a halt.
218*4882a593Smuzhiyun  *
219*4882a593Smuzhiyun  * A definitive list of these 'bad' devices is too difficult to maintain or
220*4882a593Smuzhiyun  * make complete enough to be useful.  This problem was first observed on the
221*4882a593Smuzhiyun  * Hagiwara FlashGate DUAL unit.  However, bus traces reveal that neither
222*4882a593Smuzhiyun  * MacOS nor Windows checks the status after clearing a halt.
223*4882a593Smuzhiyun  *
224*4882a593Smuzhiyun  * Since many vendors in this space limit their testing to interoperability
225*4882a593Smuzhiyun  * with these two OSes, specification violations like this one are common.
226*4882a593Smuzhiyun  */
usb_stor_clear_halt(struct us_data * us,unsigned int pipe)227*4882a593Smuzhiyun int usb_stor_clear_halt(struct us_data *us, unsigned int pipe)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	int result;
230*4882a593Smuzhiyun 	int endp = usb_pipeendpoint(pipe);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	if (usb_pipein (pipe))
233*4882a593Smuzhiyun 		endp |= USB_DIR_IN;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	result = usb_stor_control_msg(us, us->send_ctrl_pipe,
236*4882a593Smuzhiyun 		USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT,
237*4882a593Smuzhiyun 		USB_ENDPOINT_HALT, endp,
238*4882a593Smuzhiyun 		NULL, 0, 3*HZ);
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (result >= 0)
241*4882a593Smuzhiyun 		usb_reset_endpoint(us->pusb_dev, endp);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	usb_stor_dbg(us, "result = %d\n", result);
244*4882a593Smuzhiyun 	return result;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_clear_halt);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun /*
250*4882a593Smuzhiyun  * Interpret the results of a URB transfer
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * This function prints appropriate debugging messages, clears halts on
253*4882a593Smuzhiyun  * non-control endpoints, and translates the status to the corresponding
254*4882a593Smuzhiyun  * USB_STOR_XFER_xxx return code.
255*4882a593Smuzhiyun  */
interpret_urb_result(struct us_data * us,unsigned int pipe,unsigned int length,int result,unsigned int partial)256*4882a593Smuzhiyun static int interpret_urb_result(struct us_data *us, unsigned int pipe,
257*4882a593Smuzhiyun 		unsigned int length, int result, unsigned int partial)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	usb_stor_dbg(us, "Status code %d; transferred %u/%u\n",
260*4882a593Smuzhiyun 		     result, partial, length);
261*4882a593Smuzhiyun 	switch (result) {
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/* no error code; did we send all the data? */
264*4882a593Smuzhiyun 	case 0:
265*4882a593Smuzhiyun 		if (partial != length) {
266*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- short transfer\n");
267*4882a593Smuzhiyun 			return USB_STOR_XFER_SHORT;
268*4882a593Smuzhiyun 		}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- transfer complete\n");
271*4882a593Smuzhiyun 		return USB_STOR_XFER_GOOD;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	/* stalled */
274*4882a593Smuzhiyun 	case -EPIPE:
275*4882a593Smuzhiyun 		/*
276*4882a593Smuzhiyun 		 * for control endpoints, (used by CB[I]) a stall indicates
277*4882a593Smuzhiyun 		 * a failed command
278*4882a593Smuzhiyun 		 */
279*4882a593Smuzhiyun 		if (usb_pipecontrol(pipe)) {
280*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- stall on control pipe\n");
281*4882a593Smuzhiyun 			return USB_STOR_XFER_STALLED;
282*4882a593Smuzhiyun 		}
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 		/* for other sorts of endpoint, clear the stall */
285*4882a593Smuzhiyun 		usb_stor_dbg(us, "clearing endpoint halt for pipe 0x%x\n",
286*4882a593Smuzhiyun 			     pipe);
287*4882a593Smuzhiyun 		if (usb_stor_clear_halt(us, pipe) < 0)
288*4882a593Smuzhiyun 			return USB_STOR_XFER_ERROR;
289*4882a593Smuzhiyun 		return USB_STOR_XFER_STALLED;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* babble - the device tried to send more than we wanted to read */
292*4882a593Smuzhiyun 	case -EOVERFLOW:
293*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- babble\n");
294*4882a593Smuzhiyun 		return USB_STOR_XFER_LONG;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* the transfer was cancelled by abort, disconnect, or timeout */
297*4882a593Smuzhiyun 	case -ECONNRESET:
298*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- transfer cancelled\n");
299*4882a593Smuzhiyun 		return USB_STOR_XFER_ERROR;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* short scatter-gather read transfer */
302*4882a593Smuzhiyun 	case -EREMOTEIO:
303*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- short read transfer\n");
304*4882a593Smuzhiyun 		return USB_STOR_XFER_SHORT;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* abort or disconnect in progress */
307*4882a593Smuzhiyun 	case -EIO:
308*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- abort or disconnect in progress\n");
309*4882a593Smuzhiyun 		return USB_STOR_XFER_ERROR;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* the catch-all error case */
312*4882a593Smuzhiyun 	default:
313*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- unknown error\n");
314*4882a593Smuzhiyun 		return USB_STOR_XFER_ERROR;
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun  * Transfer one control message, without timeouts, but allowing early
320*4882a593Smuzhiyun  * termination.  Return codes are USB_STOR_XFER_xxx.
321*4882a593Smuzhiyun  */
usb_stor_ctrl_transfer(struct us_data * us,unsigned int pipe,u8 request,u8 requesttype,u16 value,u16 index,void * data,u16 size)322*4882a593Smuzhiyun int usb_stor_ctrl_transfer(struct us_data *us, unsigned int pipe,
323*4882a593Smuzhiyun 		u8 request, u8 requesttype, u16 value, u16 index,
324*4882a593Smuzhiyun 		void *data, u16 size)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	int result;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	usb_stor_dbg(us, "rq=%02x rqtype=%02x value=%04x index=%02x len=%u\n",
329*4882a593Smuzhiyun 		     request, requesttype, value, index, size);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* fill in the devrequest structure */
332*4882a593Smuzhiyun 	us->cr->bRequestType = requesttype;
333*4882a593Smuzhiyun 	us->cr->bRequest = request;
334*4882a593Smuzhiyun 	us->cr->wValue = cpu_to_le16(value);
335*4882a593Smuzhiyun 	us->cr->wIndex = cpu_to_le16(index);
336*4882a593Smuzhiyun 	us->cr->wLength = cpu_to_le16(size);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* fill and submit the URB */
339*4882a593Smuzhiyun 	usb_fill_control_urb(us->current_urb, us->pusb_dev, pipe,
340*4882a593Smuzhiyun 			 (unsigned char*) us->cr, data, size,
341*4882a593Smuzhiyun 			 usb_stor_blocking_completion, NULL);
342*4882a593Smuzhiyun 	result = usb_stor_msg_common(us, 0);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return interpret_urb_result(us, pipe, size, result,
345*4882a593Smuzhiyun 			us->current_urb->actual_length);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_ctrl_transfer);
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun /*
350*4882a593Smuzhiyun  * Receive one interrupt buffer, without timeouts, but allowing early
351*4882a593Smuzhiyun  * termination.  Return codes are USB_STOR_XFER_xxx.
352*4882a593Smuzhiyun  *
353*4882a593Smuzhiyun  * This routine always uses us->recv_intr_pipe as the pipe and
354*4882a593Smuzhiyun  * us->ep_bInterval as the interrupt interval.
355*4882a593Smuzhiyun  */
usb_stor_intr_transfer(struct us_data * us,void * buf,unsigned int length)356*4882a593Smuzhiyun static int usb_stor_intr_transfer(struct us_data *us, void *buf,
357*4882a593Smuzhiyun 				  unsigned int length)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	int result;
360*4882a593Smuzhiyun 	unsigned int pipe = us->recv_intr_pipe;
361*4882a593Smuzhiyun 	unsigned int maxp;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	usb_stor_dbg(us, "xfer %u bytes\n", length);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* calculate the max packet size */
366*4882a593Smuzhiyun 	maxp = usb_maxpacket(us->pusb_dev, pipe, usb_pipeout(pipe));
367*4882a593Smuzhiyun 	if (maxp > length)
368*4882a593Smuzhiyun 		maxp = length;
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 	/* fill and submit the URB */
371*4882a593Smuzhiyun 	usb_fill_int_urb(us->current_urb, us->pusb_dev, pipe, buf,
372*4882a593Smuzhiyun 			maxp, usb_stor_blocking_completion, NULL,
373*4882a593Smuzhiyun 			us->ep_bInterval);
374*4882a593Smuzhiyun 	result = usb_stor_msg_common(us, 0);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	return interpret_urb_result(us, pipe, length, result,
377*4882a593Smuzhiyun 			us->current_urb->actual_length);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun  * Transfer one buffer via bulk pipe, without timeouts, but allowing early
382*4882a593Smuzhiyun  * termination.  Return codes are USB_STOR_XFER_xxx.  If the bulk pipe
383*4882a593Smuzhiyun  * stalls during the transfer, the halt is automatically cleared.
384*4882a593Smuzhiyun  */
usb_stor_bulk_transfer_buf(struct us_data * us,unsigned int pipe,void * buf,unsigned int length,unsigned int * act_len)385*4882a593Smuzhiyun int usb_stor_bulk_transfer_buf(struct us_data *us, unsigned int pipe,
386*4882a593Smuzhiyun 	void *buf, unsigned int length, unsigned int *act_len)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun 	int result;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	usb_stor_dbg(us, "xfer %u bytes\n", length);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* fill and submit the URB */
393*4882a593Smuzhiyun 	usb_fill_bulk_urb(us->current_urb, us->pusb_dev, pipe, buf, length,
394*4882a593Smuzhiyun 		      usb_stor_blocking_completion, NULL);
395*4882a593Smuzhiyun 	result = usb_stor_msg_common(us, 0);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* store the actual length of the data transferred */
398*4882a593Smuzhiyun 	if (act_len)
399*4882a593Smuzhiyun 		*act_len = us->current_urb->actual_length;
400*4882a593Smuzhiyun 	return interpret_urb_result(us, pipe, length, result,
401*4882a593Smuzhiyun 			us->current_urb->actual_length);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_buf);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun  * Transfer a scatter-gather list via bulk transfer
407*4882a593Smuzhiyun  *
408*4882a593Smuzhiyun  * This function does basically the same thing as usb_stor_bulk_transfer_buf()
409*4882a593Smuzhiyun  * above, but it uses the usbcore scatter-gather library.
410*4882a593Smuzhiyun  */
usb_stor_bulk_transfer_sglist(struct us_data * us,unsigned int pipe,struct scatterlist * sg,int num_sg,unsigned int length,unsigned int * act_len)411*4882a593Smuzhiyun static int usb_stor_bulk_transfer_sglist(struct us_data *us, unsigned int pipe,
412*4882a593Smuzhiyun 		struct scatterlist *sg, int num_sg, unsigned int length,
413*4882a593Smuzhiyun 		unsigned int *act_len)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	int result;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	/* don't submit s-g requests during abort processing */
418*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_ABORTING, &us->dflags))
419*4882a593Smuzhiyun 		return USB_STOR_XFER_ERROR;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* initialize the scatter-gather request block */
422*4882a593Smuzhiyun 	usb_stor_dbg(us, "xfer %u bytes, %d entries\n", length, num_sg);
423*4882a593Smuzhiyun 	result = usb_sg_init(&us->current_sg, us->pusb_dev, pipe, 0,
424*4882a593Smuzhiyun 			sg, num_sg, length, GFP_NOIO);
425*4882a593Smuzhiyun 	if (result) {
426*4882a593Smuzhiyun 		usb_stor_dbg(us, "usb_sg_init returned %d\n", result);
427*4882a593Smuzhiyun 		return USB_STOR_XFER_ERROR;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	/*
431*4882a593Smuzhiyun 	 * since the block has been initialized successfully, it's now
432*4882a593Smuzhiyun 	 * okay to cancel it
433*4882a593Smuzhiyun 	 */
434*4882a593Smuzhiyun 	set_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/* did an abort occur during the submission? */
437*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_ABORTING, &us->dflags)) {
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 		/* cancel the request, if it hasn't been cancelled already */
440*4882a593Smuzhiyun 		if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
441*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- cancelling sg request\n");
442*4882a593Smuzhiyun 			usb_sg_cancel(&us->current_sg);
443*4882a593Smuzhiyun 		}
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* wait for the completion of the transfer */
447*4882a593Smuzhiyun 	usb_sg_wait(&us->current_sg);
448*4882a593Smuzhiyun 	clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	result = us->current_sg.status;
451*4882a593Smuzhiyun 	if (act_len)
452*4882a593Smuzhiyun 		*act_len = us->current_sg.bytes;
453*4882a593Smuzhiyun 	return interpret_urb_result(us, pipe, length, result,
454*4882a593Smuzhiyun 			us->current_sg.bytes);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun /*
458*4882a593Smuzhiyun  * Common used function. Transfer a complete command
459*4882a593Smuzhiyun  * via usb_stor_bulk_transfer_sglist() above. Set cmnd resid
460*4882a593Smuzhiyun  */
usb_stor_bulk_srb(struct us_data * us,unsigned int pipe,struct scsi_cmnd * srb)461*4882a593Smuzhiyun int usb_stor_bulk_srb(struct us_data* us, unsigned int pipe,
462*4882a593Smuzhiyun 		      struct scsi_cmnd* srb)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	unsigned int partial;
465*4882a593Smuzhiyun 	int result = usb_stor_bulk_transfer_sglist(us, pipe, scsi_sglist(srb),
466*4882a593Smuzhiyun 				      scsi_sg_count(srb), scsi_bufflen(srb),
467*4882a593Smuzhiyun 				      &partial);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	scsi_set_resid(srb, scsi_bufflen(srb) - partial);
470*4882a593Smuzhiyun 	return result;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_bulk_srb);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun /*
475*4882a593Smuzhiyun  * Transfer an entire SCSI command's worth of data payload over the bulk
476*4882a593Smuzhiyun  * pipe.
477*4882a593Smuzhiyun  *
478*4882a593Smuzhiyun  * Note that this uses usb_stor_bulk_transfer_buf() and
479*4882a593Smuzhiyun  * usb_stor_bulk_transfer_sglist() to achieve its goals --
480*4882a593Smuzhiyun  * this function simply determines whether we're going to use
481*4882a593Smuzhiyun  * scatter-gather or not, and acts appropriately.
482*4882a593Smuzhiyun  */
usb_stor_bulk_transfer_sg(struct us_data * us,unsigned int pipe,void * buf,unsigned int length_left,int use_sg,int * residual)483*4882a593Smuzhiyun int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
484*4882a593Smuzhiyun 		void *buf, unsigned int length_left, int use_sg, int *residual)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	int result;
487*4882a593Smuzhiyun 	unsigned int partial;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	/* are we scatter-gathering? */
490*4882a593Smuzhiyun 	if (use_sg) {
491*4882a593Smuzhiyun 		/* use the usb core scatter-gather primitives */
492*4882a593Smuzhiyun 		result = usb_stor_bulk_transfer_sglist(us, pipe,
493*4882a593Smuzhiyun 				(struct scatterlist *) buf, use_sg,
494*4882a593Smuzhiyun 				length_left, &partial);
495*4882a593Smuzhiyun 		length_left -= partial;
496*4882a593Smuzhiyun 	} else {
497*4882a593Smuzhiyun 		/* no scatter-gather, just make the request */
498*4882a593Smuzhiyun 		result = usb_stor_bulk_transfer_buf(us, pipe, buf,
499*4882a593Smuzhiyun 				length_left, &partial);
500*4882a593Smuzhiyun 		length_left -= partial;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* store the residual and return the error code */
504*4882a593Smuzhiyun 	if (residual)
505*4882a593Smuzhiyun 		*residual = length_left;
506*4882a593Smuzhiyun 	return result;
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_bulk_transfer_sg);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /***********************************************************************
511*4882a593Smuzhiyun  * Transport routines
512*4882a593Smuzhiyun  ***********************************************************************/
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun /*
515*4882a593Smuzhiyun  * There are so many devices that report the capacity incorrectly,
516*4882a593Smuzhiyun  * this routine was written to counteract some of the resulting
517*4882a593Smuzhiyun  * problems.
518*4882a593Smuzhiyun  */
last_sector_hacks(struct us_data * us,struct scsi_cmnd * srb)519*4882a593Smuzhiyun static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct gendisk *disk;
522*4882a593Smuzhiyun 	struct scsi_disk *sdkp;
523*4882a593Smuzhiyun 	u32 sector;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* To Report "Medium Error: Record Not Found */
526*4882a593Smuzhiyun 	static unsigned char record_not_found[18] = {
527*4882a593Smuzhiyun 		[0]	= 0x70,			/* current error */
528*4882a593Smuzhiyun 		[2]	= MEDIUM_ERROR,		/* = 0x03 */
529*4882a593Smuzhiyun 		[7]	= 0x0a,			/* additional length */
530*4882a593Smuzhiyun 		[12]	= 0x14			/* Record Not Found */
531*4882a593Smuzhiyun 	};
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	/*
534*4882a593Smuzhiyun 	 * If last-sector problems can't occur, whether because the
535*4882a593Smuzhiyun 	 * capacity was already decremented or because the device is
536*4882a593Smuzhiyun 	 * known to report the correct capacity, then we don't need
537*4882a593Smuzhiyun 	 * to do anything.
538*4882a593Smuzhiyun 	 */
539*4882a593Smuzhiyun 	if (!us->use_last_sector_hacks)
540*4882a593Smuzhiyun 		return;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	/* Was this command a READ(10) or a WRITE(10)? */
543*4882a593Smuzhiyun 	if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
544*4882a593Smuzhiyun 		goto done;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	/* Did this command access the last sector? */
547*4882a593Smuzhiyun 	sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
548*4882a593Smuzhiyun 			(srb->cmnd[4] << 8) | (srb->cmnd[5]);
549*4882a593Smuzhiyun 	disk = srb->request->rq_disk;
550*4882a593Smuzhiyun 	if (!disk)
551*4882a593Smuzhiyun 		goto done;
552*4882a593Smuzhiyun 	sdkp = scsi_disk(disk);
553*4882a593Smuzhiyun 	if (!sdkp)
554*4882a593Smuzhiyun 		goto done;
555*4882a593Smuzhiyun 	if (sector + 1 != sdkp->capacity)
556*4882a593Smuzhiyun 		goto done;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 		/*
561*4882a593Smuzhiyun 		 * The command succeeded.  We know this device doesn't
562*4882a593Smuzhiyun 		 * have the last-sector bug, so stop checking it.
563*4882a593Smuzhiyun 		 */
564*4882a593Smuzhiyun 		us->use_last_sector_hacks = 0;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	} else {
567*4882a593Smuzhiyun 		/*
568*4882a593Smuzhiyun 		 * The command failed.  Allow up to 3 retries in case this
569*4882a593Smuzhiyun 		 * is some normal sort of failure.  After that, assume the
570*4882a593Smuzhiyun 		 * capacity is wrong and we're trying to access the sector
571*4882a593Smuzhiyun 		 * beyond the end.  Replace the result code and sense data
572*4882a593Smuzhiyun 		 * with values that will cause the SCSI core to fail the
573*4882a593Smuzhiyun 		 * command immediately, instead of going into an infinite
574*4882a593Smuzhiyun 		 * (or even just a very long) retry loop.
575*4882a593Smuzhiyun 		 */
576*4882a593Smuzhiyun 		if (++us->last_sector_retries < 3)
577*4882a593Smuzhiyun 			return;
578*4882a593Smuzhiyun 		srb->result = SAM_STAT_CHECK_CONDITION;
579*4882a593Smuzhiyun 		memcpy(srb->sense_buffer, record_not_found,
580*4882a593Smuzhiyun 				sizeof(record_not_found));
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun  done:
584*4882a593Smuzhiyun 	/*
585*4882a593Smuzhiyun 	 * Don't reset the retry counter for TEST UNIT READY commands,
586*4882a593Smuzhiyun 	 * because they get issued after device resets which might be
587*4882a593Smuzhiyun 	 * caused by a failed last-sector access.
588*4882a593Smuzhiyun 	 */
589*4882a593Smuzhiyun 	if (srb->cmnd[0] != TEST_UNIT_READY)
590*4882a593Smuzhiyun 		us->last_sector_retries = 0;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun /*
594*4882a593Smuzhiyun  * Invoke the transport and basic error-handling/recovery methods
595*4882a593Smuzhiyun  *
596*4882a593Smuzhiyun  * This is used by the protocol layers to actually send the message to
597*4882a593Smuzhiyun  * the device and receive the response.
598*4882a593Smuzhiyun  */
usb_stor_invoke_transport(struct scsi_cmnd * srb,struct us_data * us)599*4882a593Smuzhiyun void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun 	int need_auto_sense;
602*4882a593Smuzhiyun 	int result;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	/* send the command to the transport layer */
605*4882a593Smuzhiyun 	scsi_set_resid(srb, 0);
606*4882a593Smuzhiyun 	result = us->transport(srb, us);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/*
609*4882a593Smuzhiyun 	 * if the command gets aborted by the higher layers, we need to
610*4882a593Smuzhiyun 	 * short-circuit all other processing
611*4882a593Smuzhiyun 	 */
612*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
613*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- command was aborted\n");
614*4882a593Smuzhiyun 		srb->result = DID_ABORT << 16;
615*4882a593Smuzhiyun 		goto Handle_Errors;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	/* if there is a transport error, reset and don't auto-sense */
619*4882a593Smuzhiyun 	if (result == USB_STOR_TRANSPORT_ERROR) {
620*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- transport indicates error, resetting\n");
621*4882a593Smuzhiyun 		srb->result = DID_ERROR << 16;
622*4882a593Smuzhiyun 		goto Handle_Errors;
623*4882a593Smuzhiyun 	}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	/* if the transport provided its own sense data, don't auto-sense */
626*4882a593Smuzhiyun 	if (result == USB_STOR_TRANSPORT_NO_SENSE) {
627*4882a593Smuzhiyun 		srb->result = SAM_STAT_CHECK_CONDITION;
628*4882a593Smuzhiyun 		last_sector_hacks(us, srb);
629*4882a593Smuzhiyun 		return;
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	srb->result = SAM_STAT_GOOD;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/*
635*4882a593Smuzhiyun 	 * Determine if we need to auto-sense
636*4882a593Smuzhiyun 	 *
637*4882a593Smuzhiyun 	 * I normally don't use a flag like this, but it's almost impossible
638*4882a593Smuzhiyun 	 * to understand what's going on here if I don't.
639*4882a593Smuzhiyun 	 */
640*4882a593Smuzhiyun 	need_auto_sense = 0;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	/*
643*4882a593Smuzhiyun 	 * If we're running the CB transport, which is incapable
644*4882a593Smuzhiyun 	 * of determining status on its own, we will auto-sense
645*4882a593Smuzhiyun 	 * unless the operation involved a data-in transfer.  Devices
646*4882a593Smuzhiyun 	 * can signal most data-in errors by stalling the bulk-in pipe.
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 	if ((us->protocol == USB_PR_CB || us->protocol == USB_PR_DPCM_USB) &&
649*4882a593Smuzhiyun 			srb->sc_data_direction != DMA_FROM_DEVICE) {
650*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- CB transport device requiring auto-sense\n");
651*4882a593Smuzhiyun 		need_auto_sense = 1;
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/* Some devices (Kindle) require another command after SYNC CACHE */
655*4882a593Smuzhiyun 	if ((us->fflags & US_FL_SENSE_AFTER_SYNC) &&
656*4882a593Smuzhiyun 			srb->cmnd[0] == SYNCHRONIZE_CACHE) {
657*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- sense after SYNC CACHE\n");
658*4882a593Smuzhiyun 		need_auto_sense = 1;
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	/*
662*4882a593Smuzhiyun 	 * If we have a failure, we're going to do a REQUEST_SENSE
663*4882a593Smuzhiyun 	 * automatically.  Note that we differentiate between a command
664*4882a593Smuzhiyun 	 * "failure" and an "error" in the transport mechanism.
665*4882a593Smuzhiyun 	 */
666*4882a593Smuzhiyun 	if (result == USB_STOR_TRANSPORT_FAILED) {
667*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- transport indicates command failure\n");
668*4882a593Smuzhiyun 		need_auto_sense = 1;
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	/*
672*4882a593Smuzhiyun 	 * Determine if this device is SAT by seeing if the
673*4882a593Smuzhiyun 	 * command executed successfully.  Otherwise we'll have
674*4882a593Smuzhiyun 	 * to wait for at least one CHECK_CONDITION to determine
675*4882a593Smuzhiyun 	 * SANE_SENSE support
676*4882a593Smuzhiyun 	 */
677*4882a593Smuzhiyun 	if (unlikely((srb->cmnd[0] == ATA_16 || srb->cmnd[0] == ATA_12) &&
678*4882a593Smuzhiyun 	    result == USB_STOR_TRANSPORT_GOOD &&
679*4882a593Smuzhiyun 	    !(us->fflags & US_FL_SANE_SENSE) &&
680*4882a593Smuzhiyun 	    !(us->fflags & US_FL_BAD_SENSE) &&
681*4882a593Smuzhiyun 	    !(srb->cmnd[2] & 0x20))) {
682*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- SAT supported, increasing auto-sense\n");
683*4882a593Smuzhiyun 		us->fflags |= US_FL_SANE_SENSE;
684*4882a593Smuzhiyun 	}
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	/*
687*4882a593Smuzhiyun 	 * A short transfer on a command where we don't expect it
688*4882a593Smuzhiyun 	 * is unusual, but it doesn't mean we need to auto-sense.
689*4882a593Smuzhiyun 	 */
690*4882a593Smuzhiyun 	if ((scsi_get_resid(srb) > 0) &&
691*4882a593Smuzhiyun 	    !((srb->cmnd[0] == REQUEST_SENSE) ||
692*4882a593Smuzhiyun 	      (srb->cmnd[0] == INQUIRY) ||
693*4882a593Smuzhiyun 	      (srb->cmnd[0] == MODE_SENSE) ||
694*4882a593Smuzhiyun 	      (srb->cmnd[0] == LOG_SENSE) ||
695*4882a593Smuzhiyun 	      (srb->cmnd[0] == MODE_SENSE_10))) {
696*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- unexpectedly short transfer\n");
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	/* Now, if we need to do the auto-sense, let's do it */
700*4882a593Smuzhiyun 	if (need_auto_sense) {
701*4882a593Smuzhiyun 		int temp_result;
702*4882a593Smuzhiyun 		struct scsi_eh_save ses;
703*4882a593Smuzhiyun 		int sense_size = US_SENSE_SIZE;
704*4882a593Smuzhiyun 		struct scsi_sense_hdr sshdr;
705*4882a593Smuzhiyun 		const u8 *scdd;
706*4882a593Smuzhiyun 		u8 fm_ili;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 		/* device supports and needs bigger sense buffer */
709*4882a593Smuzhiyun 		if (us->fflags & US_FL_SANE_SENSE)
710*4882a593Smuzhiyun 			sense_size = ~0;
711*4882a593Smuzhiyun Retry_Sense:
712*4882a593Smuzhiyun 		usb_stor_dbg(us, "Issuing auto-REQUEST_SENSE\n");
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sense_size);
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 		/* FIXME: we must do the protocol translation here */
717*4882a593Smuzhiyun 		if (us->subclass == USB_SC_RBC || us->subclass == USB_SC_SCSI ||
718*4882a593Smuzhiyun 				us->subclass == USB_SC_CYP_ATACB)
719*4882a593Smuzhiyun 			srb->cmd_len = 6;
720*4882a593Smuzhiyun 		else
721*4882a593Smuzhiyun 			srb->cmd_len = 12;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 		/* issue the auto-sense command */
724*4882a593Smuzhiyun 		scsi_set_resid(srb, 0);
725*4882a593Smuzhiyun 		temp_result = us->transport(us->srb, us);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		/* let's clean up right away */
728*4882a593Smuzhiyun 		scsi_eh_restore_cmnd(srb, &ses);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 		if (test_bit(US_FLIDX_TIMED_OUT, &us->dflags)) {
731*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- auto-sense aborted\n");
732*4882a593Smuzhiyun 			srb->result = DID_ABORT << 16;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 			/* If SANE_SENSE caused this problem, disable it */
735*4882a593Smuzhiyun 			if (sense_size != US_SENSE_SIZE) {
736*4882a593Smuzhiyun 				us->fflags &= ~US_FL_SANE_SENSE;
737*4882a593Smuzhiyun 				us->fflags |= US_FL_BAD_SENSE;
738*4882a593Smuzhiyun 			}
739*4882a593Smuzhiyun 			goto Handle_Errors;
740*4882a593Smuzhiyun 		}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 		/*
743*4882a593Smuzhiyun 		 * Some devices claim to support larger sense but fail when
744*4882a593Smuzhiyun 		 * trying to request it. When a transport failure happens
745*4882a593Smuzhiyun 		 * using US_FS_SANE_SENSE, we always retry with a standard
746*4882a593Smuzhiyun 		 * (small) sense request. This fixes some USB GSM modems
747*4882a593Smuzhiyun 		 */
748*4882a593Smuzhiyun 		if (temp_result == USB_STOR_TRANSPORT_FAILED &&
749*4882a593Smuzhiyun 				sense_size != US_SENSE_SIZE) {
750*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- auto-sense failure, retry small sense\n");
751*4882a593Smuzhiyun 			sense_size = US_SENSE_SIZE;
752*4882a593Smuzhiyun 			us->fflags &= ~US_FL_SANE_SENSE;
753*4882a593Smuzhiyun 			us->fflags |= US_FL_BAD_SENSE;
754*4882a593Smuzhiyun 			goto Retry_Sense;
755*4882a593Smuzhiyun 		}
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 		/* Other failures */
758*4882a593Smuzhiyun 		if (temp_result != USB_STOR_TRANSPORT_GOOD) {
759*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- auto-sense failure\n");
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 			/*
762*4882a593Smuzhiyun 			 * we skip the reset if this happens to be a
763*4882a593Smuzhiyun 			 * multi-target device, since failure of an
764*4882a593Smuzhiyun 			 * auto-sense is perfectly valid
765*4882a593Smuzhiyun 			 */
766*4882a593Smuzhiyun 			srb->result = DID_ERROR << 16;
767*4882a593Smuzhiyun 			if (!(us->fflags & US_FL_SCM_MULT_TARG))
768*4882a593Smuzhiyun 				goto Handle_Errors;
769*4882a593Smuzhiyun 			return;
770*4882a593Smuzhiyun 		}
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		/*
773*4882a593Smuzhiyun 		 * If the sense data returned is larger than 18-bytes then we
774*4882a593Smuzhiyun 		 * assume this device supports requesting more in the future.
775*4882a593Smuzhiyun 		 * The response code must be 70h through 73h inclusive.
776*4882a593Smuzhiyun 		 */
777*4882a593Smuzhiyun 		if (srb->sense_buffer[7] > (US_SENSE_SIZE - 8) &&
778*4882a593Smuzhiyun 		    !(us->fflags & US_FL_SANE_SENSE) &&
779*4882a593Smuzhiyun 		    !(us->fflags & US_FL_BAD_SENSE) &&
780*4882a593Smuzhiyun 		    (srb->sense_buffer[0] & 0x7C) == 0x70) {
781*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- SANE_SENSE support enabled\n");
782*4882a593Smuzhiyun 			us->fflags |= US_FL_SANE_SENSE;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 			/*
785*4882a593Smuzhiyun 			 * Indicate to the user that we truncated their sense
786*4882a593Smuzhiyun 			 * because we didn't know it supported larger sense.
787*4882a593Smuzhiyun 			 */
788*4882a593Smuzhiyun 			usb_stor_dbg(us, "-- Sense data truncated to %i from %i\n",
789*4882a593Smuzhiyun 				     US_SENSE_SIZE,
790*4882a593Smuzhiyun 				     srb->sense_buffer[7] + 8);
791*4882a593Smuzhiyun 			srb->sense_buffer[7] = (US_SENSE_SIZE - 8);
792*4882a593Smuzhiyun 		}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 		scsi_normalize_sense(srb->sense_buffer, SCSI_SENSE_BUFFERSIZE,
795*4882a593Smuzhiyun 				     &sshdr);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- Result from auto-sense is %d\n",
798*4882a593Smuzhiyun 			     temp_result);
799*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- code: 0x%x, key: 0x%x, ASC: 0x%x, ASCQ: 0x%x\n",
800*4882a593Smuzhiyun 			     sshdr.response_code, sshdr.sense_key,
801*4882a593Smuzhiyun 			     sshdr.asc, sshdr.ascq);
802*4882a593Smuzhiyun #ifdef CONFIG_USB_STORAGE_DEBUG
803*4882a593Smuzhiyun 		usb_stor_show_sense(us, sshdr.sense_key, sshdr.asc, sshdr.ascq);
804*4882a593Smuzhiyun #endif
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		/* set the result so the higher layers expect this data */
807*4882a593Smuzhiyun 		srb->result = SAM_STAT_CHECK_CONDITION;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		scdd = scsi_sense_desc_find(srb->sense_buffer,
810*4882a593Smuzhiyun 					    SCSI_SENSE_BUFFERSIZE, 4);
811*4882a593Smuzhiyun 		fm_ili = (scdd ? scdd[3] : srb->sense_buffer[2]) & 0xA0;
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 		/*
814*4882a593Smuzhiyun 		 * We often get empty sense data.  This could indicate that
815*4882a593Smuzhiyun 		 * everything worked or that there was an unspecified
816*4882a593Smuzhiyun 		 * problem.  We have to decide which.
817*4882a593Smuzhiyun 		 */
818*4882a593Smuzhiyun 		if (sshdr.sense_key == 0 && sshdr.asc == 0 && sshdr.ascq == 0 &&
819*4882a593Smuzhiyun 		    fm_ili == 0) {
820*4882a593Smuzhiyun 			/*
821*4882a593Smuzhiyun 			 * If things are really okay, then let's show that.
822*4882a593Smuzhiyun 			 * Zero out the sense buffer so the higher layers
823*4882a593Smuzhiyun 			 * won't realize we did an unsolicited auto-sense.
824*4882a593Smuzhiyun 			 */
825*4882a593Smuzhiyun 			if (result == USB_STOR_TRANSPORT_GOOD) {
826*4882a593Smuzhiyun 				srb->result = SAM_STAT_GOOD;
827*4882a593Smuzhiyun 				srb->sense_buffer[0] = 0x0;
828*4882a593Smuzhiyun 			}
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 			/*
831*4882a593Smuzhiyun 			 * ATA-passthru commands use sense data to report
832*4882a593Smuzhiyun 			 * the command completion status, and often devices
833*4882a593Smuzhiyun 			 * return Check Condition status when nothing is
834*4882a593Smuzhiyun 			 * wrong.
835*4882a593Smuzhiyun 			 */
836*4882a593Smuzhiyun 			else if (srb->cmnd[0] == ATA_16 ||
837*4882a593Smuzhiyun 					srb->cmnd[0] == ATA_12) {
838*4882a593Smuzhiyun 				/* leave the data alone */
839*4882a593Smuzhiyun 			}
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 			/*
842*4882a593Smuzhiyun 			 * If there was a problem, report an unspecified
843*4882a593Smuzhiyun 			 * hardware error to prevent the higher layers from
844*4882a593Smuzhiyun 			 * entering an infinite retry loop.
845*4882a593Smuzhiyun 			 */
846*4882a593Smuzhiyun 			else {
847*4882a593Smuzhiyun 				srb->result = DID_ERROR << 16;
848*4882a593Smuzhiyun 				if ((sshdr.response_code & 0x72) == 0x72)
849*4882a593Smuzhiyun 					srb->sense_buffer[1] = HARDWARE_ERROR;
850*4882a593Smuzhiyun 				else
851*4882a593Smuzhiyun 					srb->sense_buffer[2] = HARDWARE_ERROR;
852*4882a593Smuzhiyun 			}
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/*
857*4882a593Smuzhiyun 	 * Some devices don't work or return incorrect data the first
858*4882a593Smuzhiyun 	 * time they get a READ(10) command, or for the first READ(10)
859*4882a593Smuzhiyun 	 * after a media change.  If the INITIAL_READ10 flag is set,
860*4882a593Smuzhiyun 	 * keep track of whether READ(10) commands succeed.  If the
861*4882a593Smuzhiyun 	 * previous one succeeded and this one failed, set the REDO_READ10
862*4882a593Smuzhiyun 	 * flag to force a retry.
863*4882a593Smuzhiyun 	 */
864*4882a593Smuzhiyun 	if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
865*4882a593Smuzhiyun 			srb->cmnd[0] == READ_10)) {
866*4882a593Smuzhiyun 		if (srb->result == SAM_STAT_GOOD) {
867*4882a593Smuzhiyun 			set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
868*4882a593Smuzhiyun 		} else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
869*4882a593Smuzhiyun 			clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
870*4882a593Smuzhiyun 			set_bit(US_FLIDX_REDO_READ10, &us->dflags);
871*4882a593Smuzhiyun 		}
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 		/*
874*4882a593Smuzhiyun 		 * Next, if the REDO_READ10 flag is set, return a result
875*4882a593Smuzhiyun 		 * code that will cause the SCSI core to retry the READ(10)
876*4882a593Smuzhiyun 		 * command immediately.
877*4882a593Smuzhiyun 		 */
878*4882a593Smuzhiyun 		if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
879*4882a593Smuzhiyun 			clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
880*4882a593Smuzhiyun 			srb->result = DID_IMM_RETRY << 16;
881*4882a593Smuzhiyun 			srb->sense_buffer[0] = 0;
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 	}
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Did we transfer less than the minimum amount required? */
886*4882a593Smuzhiyun 	if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
887*4882a593Smuzhiyun 			scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
888*4882a593Smuzhiyun 		srb->result = DID_ERROR << 16;
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	last_sector_hacks(us, srb);
891*4882a593Smuzhiyun 	return;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	/*
894*4882a593Smuzhiyun 	 * Error and abort processing: try to resynchronize with the device
895*4882a593Smuzhiyun 	 * by issuing a port reset.  If that fails, try a class-specific
896*4882a593Smuzhiyun 	 * device reset.
897*4882a593Smuzhiyun 	 */
898*4882a593Smuzhiyun   Handle_Errors:
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	/*
901*4882a593Smuzhiyun 	 * Set the RESETTING bit, and clear the ABORTING bit so that
902*4882a593Smuzhiyun 	 * the reset may proceed.
903*4882a593Smuzhiyun 	 */
904*4882a593Smuzhiyun 	scsi_lock(us_to_host(us));
905*4882a593Smuzhiyun 	set_bit(US_FLIDX_RESETTING, &us->dflags);
906*4882a593Smuzhiyun 	clear_bit(US_FLIDX_ABORTING, &us->dflags);
907*4882a593Smuzhiyun 	scsi_unlock(us_to_host(us));
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	/*
910*4882a593Smuzhiyun 	 * We must release the device lock because the pre_reset routine
911*4882a593Smuzhiyun 	 * will want to acquire it.
912*4882a593Smuzhiyun 	 */
913*4882a593Smuzhiyun 	mutex_unlock(&us->dev_mutex);
914*4882a593Smuzhiyun 	result = usb_stor_port_reset(us);
915*4882a593Smuzhiyun 	mutex_lock(&us->dev_mutex);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (result < 0) {
918*4882a593Smuzhiyun 		scsi_lock(us_to_host(us));
919*4882a593Smuzhiyun 		usb_stor_report_device_reset(us);
920*4882a593Smuzhiyun 		scsi_unlock(us_to_host(us));
921*4882a593Smuzhiyun 		us->transport_reset(us);
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 	clear_bit(US_FLIDX_RESETTING, &us->dflags);
924*4882a593Smuzhiyun 	last_sector_hacks(us, srb);
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun /* Stop the current URB transfer */
usb_stor_stop_transport(struct us_data * us)928*4882a593Smuzhiyun void usb_stor_stop_transport(struct us_data *us)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	/*
931*4882a593Smuzhiyun 	 * If the state machine is blocked waiting for an URB,
932*4882a593Smuzhiyun 	 * let's wake it up.  The test_and_clear_bit() call
933*4882a593Smuzhiyun 	 * guarantees that if a URB has just been submitted,
934*4882a593Smuzhiyun 	 * it won't be cancelled more than once.
935*4882a593Smuzhiyun 	 */
936*4882a593Smuzhiyun 	if (test_and_clear_bit(US_FLIDX_URB_ACTIVE, &us->dflags)) {
937*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- cancelling URB\n");
938*4882a593Smuzhiyun 		usb_unlink_urb(us->current_urb);
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	/* If we are waiting for a scatter-gather operation, cancel it. */
942*4882a593Smuzhiyun 	if (test_and_clear_bit(US_FLIDX_SG_ACTIVE, &us->dflags)) {
943*4882a593Smuzhiyun 		usb_stor_dbg(us, "-- cancelling sg request\n");
944*4882a593Smuzhiyun 		usb_sg_cancel(&us->current_sg);
945*4882a593Smuzhiyun 	}
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun /*
949*4882a593Smuzhiyun  * Control/Bulk and Control/Bulk/Interrupt transport
950*4882a593Smuzhiyun  */
951*4882a593Smuzhiyun 
usb_stor_CB_transport(struct scsi_cmnd * srb,struct us_data * us)952*4882a593Smuzhiyun int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us)
953*4882a593Smuzhiyun {
954*4882a593Smuzhiyun 	unsigned int transfer_length = scsi_bufflen(srb);
955*4882a593Smuzhiyun 	unsigned int pipe = 0;
956*4882a593Smuzhiyun 	int result;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	/* COMMAND STAGE */
959*4882a593Smuzhiyun 	/* let's send the command via the control pipe */
960*4882a593Smuzhiyun 	/*
961*4882a593Smuzhiyun 	 * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
962*4882a593Smuzhiyun 	 * Stack may be vmallocated.  So no DMA for us.  Make a copy.
963*4882a593Smuzhiyun 	 */
964*4882a593Smuzhiyun 	memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
965*4882a593Smuzhiyun 	result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
966*4882a593Smuzhiyun 				      US_CBI_ADSC,
967*4882a593Smuzhiyun 				      USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
968*4882a593Smuzhiyun 				      us->ifnum, us->iobuf, srb->cmd_len);
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* check the return code for the command */
971*4882a593Smuzhiyun 	usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
972*4882a593Smuzhiyun 		     result);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	/* if we stalled the command, it means command failed */
975*4882a593Smuzhiyun 	if (result == USB_STOR_XFER_STALLED) {
976*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_FAILED;
977*4882a593Smuzhiyun 	}
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	/* Uh oh... serious problem here */
980*4882a593Smuzhiyun 	if (result != USB_STOR_XFER_GOOD) {
981*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	/* DATA STAGE */
985*4882a593Smuzhiyun 	/* transfer the data payload for this command, if one exists*/
986*4882a593Smuzhiyun 	if (transfer_length) {
987*4882a593Smuzhiyun 		pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
988*4882a593Smuzhiyun 				us->recv_bulk_pipe : us->send_bulk_pipe;
989*4882a593Smuzhiyun 		result = usb_stor_bulk_srb(us, pipe, srb);
990*4882a593Smuzhiyun 		usb_stor_dbg(us, "CBI data stage result is 0x%x\n", result);
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 		/* if we stalled the data transfer it means command failed */
993*4882a593Smuzhiyun 		if (result == USB_STOR_XFER_STALLED)
994*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_FAILED;
995*4882a593Smuzhiyun 		if (result > USB_STOR_XFER_STALLED)
996*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_ERROR;
997*4882a593Smuzhiyun 	}
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/* STATUS STAGE */
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	/*
1002*4882a593Smuzhiyun 	 * NOTE: CB does not have a status stage.  Silly, I know.  So
1003*4882a593Smuzhiyun 	 * we have to catch this at a higher level.
1004*4882a593Smuzhiyun 	 */
1005*4882a593Smuzhiyun 	if (us->protocol != USB_PR_CBI)
1006*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_GOOD;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	result = usb_stor_intr_transfer(us, us->iobuf, 2);
1009*4882a593Smuzhiyun 	usb_stor_dbg(us, "Got interrupt data (0x%x, 0x%x)\n",
1010*4882a593Smuzhiyun 		     us->iobuf[0], us->iobuf[1]);
1011*4882a593Smuzhiyun 	if (result != USB_STOR_XFER_GOOD)
1012*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	/*
1015*4882a593Smuzhiyun 	 * UFI gives us ASC and ASCQ, like a request sense
1016*4882a593Smuzhiyun 	 *
1017*4882a593Smuzhiyun 	 * REQUEST_SENSE and INQUIRY don't affect the sense data on UFI
1018*4882a593Smuzhiyun 	 * devices, so we ignore the information for those commands.  Note
1019*4882a593Smuzhiyun 	 * that this means we could be ignoring a real error on these
1020*4882a593Smuzhiyun 	 * commands, but that can't be helped.
1021*4882a593Smuzhiyun 	 */
1022*4882a593Smuzhiyun 	if (us->subclass == USB_SC_UFI) {
1023*4882a593Smuzhiyun 		if (srb->cmnd[0] == REQUEST_SENSE ||
1024*4882a593Smuzhiyun 		    srb->cmnd[0] == INQUIRY)
1025*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_GOOD;
1026*4882a593Smuzhiyun 		if (us->iobuf[0])
1027*4882a593Smuzhiyun 			goto Failed;
1028*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_GOOD;
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/*
1032*4882a593Smuzhiyun 	 * If not UFI, we interpret the data as a result code
1033*4882a593Smuzhiyun 	 * The first byte should always be a 0x0.
1034*4882a593Smuzhiyun 	 *
1035*4882a593Smuzhiyun 	 * Some bogus devices don't follow that rule.  They stuff the ASC
1036*4882a593Smuzhiyun 	 * into the first byte -- so if it's non-zero, call it a failure.
1037*4882a593Smuzhiyun 	 */
1038*4882a593Smuzhiyun 	if (us->iobuf[0]) {
1039*4882a593Smuzhiyun 		usb_stor_dbg(us, "CBI IRQ data showed reserved bType 0x%x\n",
1040*4882a593Smuzhiyun 			     us->iobuf[0]);
1041*4882a593Smuzhiyun 		goto Failed;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* The second byte & 0x0F should be 0x0 for good, otherwise error */
1046*4882a593Smuzhiyun 	switch (us->iobuf[1] & 0x0F) {
1047*4882a593Smuzhiyun 		case 0x00:
1048*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_GOOD;
1049*4882a593Smuzhiyun 		case 0x01:
1050*4882a593Smuzhiyun 			goto Failed;
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 	return USB_STOR_TRANSPORT_ERROR;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	/*
1055*4882a593Smuzhiyun 	 * the CBI spec requires that the bulk pipe must be cleared
1056*4882a593Smuzhiyun 	 * following any data-in/out command failure (section 2.4.3.1.3)
1057*4882a593Smuzhiyun 	 */
1058*4882a593Smuzhiyun   Failed:
1059*4882a593Smuzhiyun 	if (pipe)
1060*4882a593Smuzhiyun 		usb_stor_clear_halt(us, pipe);
1061*4882a593Smuzhiyun 	return USB_STOR_TRANSPORT_FAILED;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_CB_transport);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun /*
1066*4882a593Smuzhiyun  * Bulk only transport
1067*4882a593Smuzhiyun  */
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun /* Determine what the maximum LUN supported is */
usb_stor_Bulk_max_lun(struct us_data * us)1070*4882a593Smuzhiyun int usb_stor_Bulk_max_lun(struct us_data *us)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun 	int result;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/* issue the command */
1075*4882a593Smuzhiyun 	us->iobuf[0] = 0;
1076*4882a593Smuzhiyun 	result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
1077*4882a593Smuzhiyun 				 US_BULK_GET_MAX_LUN,
1078*4882a593Smuzhiyun 				 USB_DIR_IN | USB_TYPE_CLASS |
1079*4882a593Smuzhiyun 				 USB_RECIP_INTERFACE,
1080*4882a593Smuzhiyun 				 0, us->ifnum, us->iobuf, 1, 10*HZ);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	usb_stor_dbg(us, "GetMaxLUN command result is %d, data is %d\n",
1083*4882a593Smuzhiyun 		     result, us->iobuf[0]);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/*
1086*4882a593Smuzhiyun 	 * If we have a successful request, return the result if valid. The
1087*4882a593Smuzhiyun 	 * CBW LUN field is 4 bits wide, so the value reported by the device
1088*4882a593Smuzhiyun 	 * should fit into that.
1089*4882a593Smuzhiyun 	 */
1090*4882a593Smuzhiyun 	if (result > 0) {
1091*4882a593Smuzhiyun 		if (us->iobuf[0] < 16) {
1092*4882a593Smuzhiyun 			return us->iobuf[0];
1093*4882a593Smuzhiyun 		} else {
1094*4882a593Smuzhiyun 			dev_info(&us->pusb_intf->dev,
1095*4882a593Smuzhiyun 				 "Max LUN %d is not valid, using 0 instead",
1096*4882a593Smuzhiyun 				 us->iobuf[0]);
1097*4882a593Smuzhiyun 		}
1098*4882a593Smuzhiyun 	}
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	/*
1101*4882a593Smuzhiyun 	 * Some devices don't like GetMaxLUN.  They may STALL the control
1102*4882a593Smuzhiyun 	 * pipe, they may return a zero-length result, they may do nothing at
1103*4882a593Smuzhiyun 	 * all and timeout, or they may fail in even more bizarrely creative
1104*4882a593Smuzhiyun 	 * ways.  In these cases the best approach is to use the default
1105*4882a593Smuzhiyun 	 * value: only one LUN.
1106*4882a593Smuzhiyun 	 */
1107*4882a593Smuzhiyun 	return 0;
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun 
usb_stor_Bulk_transport(struct scsi_cmnd * srb,struct us_data * us)1110*4882a593Smuzhiyun int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1111*4882a593Smuzhiyun {
1112*4882a593Smuzhiyun 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
1113*4882a593Smuzhiyun 	struct bulk_cs_wrap *bcs = (struct bulk_cs_wrap *) us->iobuf;
1114*4882a593Smuzhiyun 	unsigned int transfer_length = scsi_bufflen(srb);
1115*4882a593Smuzhiyun 	unsigned int residue;
1116*4882a593Smuzhiyun 	int result;
1117*4882a593Smuzhiyun 	int fake_sense = 0;
1118*4882a593Smuzhiyun 	unsigned int cswlen;
1119*4882a593Smuzhiyun 	unsigned int cbwlen = US_BULK_CB_WRAP_LEN;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/* Take care of BULK32 devices; set extra byte to 0 */
1122*4882a593Smuzhiyun 	if (unlikely(us->fflags & US_FL_BULK32)) {
1123*4882a593Smuzhiyun 		cbwlen = 32;
1124*4882a593Smuzhiyun 		us->iobuf[31] = 0;
1125*4882a593Smuzhiyun 	}
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	/* set up the command wrapper */
1128*4882a593Smuzhiyun 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
1129*4882a593Smuzhiyun 	bcb->DataTransferLength = cpu_to_le32(transfer_length);
1130*4882a593Smuzhiyun 	bcb->Flags = srb->sc_data_direction == DMA_FROM_DEVICE ?
1131*4882a593Smuzhiyun 		US_BULK_FLAG_IN : 0;
1132*4882a593Smuzhiyun 	bcb->Tag = ++us->tag;
1133*4882a593Smuzhiyun 	bcb->Lun = srb->device->lun;
1134*4882a593Smuzhiyun 	if (us->fflags & US_FL_SCM_MULT_TARG)
1135*4882a593Smuzhiyun 		bcb->Lun |= srb->device->id << 4;
1136*4882a593Smuzhiyun 	bcb->Length = srb->cmd_len;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	/* copy the command payload */
1139*4882a593Smuzhiyun 	memset(bcb->CDB, 0, sizeof(bcb->CDB));
1140*4882a593Smuzhiyun 	memcpy(bcb->CDB, srb->cmnd, bcb->Length);
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	/* send it to out endpoint */
1143*4882a593Smuzhiyun 	usb_stor_dbg(us, "Bulk Command S 0x%x T 0x%x L %d F %d Trg %d LUN %d CL %d\n",
1144*4882a593Smuzhiyun 		     le32_to_cpu(bcb->Signature), bcb->Tag,
1145*4882a593Smuzhiyun 		     le32_to_cpu(bcb->DataTransferLength), bcb->Flags,
1146*4882a593Smuzhiyun 		     (bcb->Lun >> 4), (bcb->Lun & 0x0F),
1147*4882a593Smuzhiyun 		     bcb->Length);
1148*4882a593Smuzhiyun 	result = usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
1149*4882a593Smuzhiyun 				bcb, cbwlen, NULL);
1150*4882a593Smuzhiyun 	usb_stor_dbg(us, "Bulk command transfer result=%d\n", result);
1151*4882a593Smuzhiyun 	if (result != USB_STOR_XFER_GOOD)
1152*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	/* DATA STAGE */
1155*4882a593Smuzhiyun 	/* send/receive data payload, if there is any */
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	/*
1158*4882a593Smuzhiyun 	 * Some USB-IDE converter chips need a 100us delay between the
1159*4882a593Smuzhiyun 	 * command phase and the data phase.  Some devices need a little
1160*4882a593Smuzhiyun 	 * more than that, probably because of clock rate inaccuracies.
1161*4882a593Smuzhiyun 	 */
1162*4882a593Smuzhiyun 	if (unlikely(us->fflags & US_FL_GO_SLOW))
1163*4882a593Smuzhiyun 		usleep_range(125, 150);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 	if (transfer_length) {
1166*4882a593Smuzhiyun 		unsigned int pipe = srb->sc_data_direction == DMA_FROM_DEVICE ?
1167*4882a593Smuzhiyun 				us->recv_bulk_pipe : us->send_bulk_pipe;
1168*4882a593Smuzhiyun 		result = usb_stor_bulk_srb(us, pipe, srb);
1169*4882a593Smuzhiyun 		usb_stor_dbg(us, "Bulk data transfer result 0x%x\n", result);
1170*4882a593Smuzhiyun 		if (result == USB_STOR_XFER_ERROR)
1171*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_ERROR;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 		/*
1174*4882a593Smuzhiyun 		 * If the device tried to send back more data than the
1175*4882a593Smuzhiyun 		 * amount requested, the spec requires us to transfer
1176*4882a593Smuzhiyun 		 * the CSW anyway.  Since there's no point retrying the
1177*4882a593Smuzhiyun 		 * the command, we'll return fake sense data indicating
1178*4882a593Smuzhiyun 		 * Illegal Request, Invalid Field in CDB.
1179*4882a593Smuzhiyun 		 */
1180*4882a593Smuzhiyun 		if (result == USB_STOR_XFER_LONG)
1181*4882a593Smuzhiyun 			fake_sense = 1;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 		/*
1184*4882a593Smuzhiyun 		 * Sometimes a device will mistakenly skip the data phase
1185*4882a593Smuzhiyun 		 * and go directly to the status phase without sending a
1186*4882a593Smuzhiyun 		 * zero-length packet.  If we get a 13-byte response here,
1187*4882a593Smuzhiyun 		 * check whether it really is a CSW.
1188*4882a593Smuzhiyun 		 */
1189*4882a593Smuzhiyun 		if (result == USB_STOR_XFER_SHORT &&
1190*4882a593Smuzhiyun 				srb->sc_data_direction == DMA_FROM_DEVICE &&
1191*4882a593Smuzhiyun 				transfer_length - scsi_get_resid(srb) ==
1192*4882a593Smuzhiyun 					US_BULK_CS_WRAP_LEN) {
1193*4882a593Smuzhiyun 			struct scatterlist *sg = NULL;
1194*4882a593Smuzhiyun 			unsigned int offset = 0;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 			if (usb_stor_access_xfer_buf((unsigned char *) bcs,
1197*4882a593Smuzhiyun 					US_BULK_CS_WRAP_LEN, srb, &sg,
1198*4882a593Smuzhiyun 					&offset, FROM_XFER_BUF) ==
1199*4882a593Smuzhiyun 						US_BULK_CS_WRAP_LEN &&
1200*4882a593Smuzhiyun 					bcs->Signature ==
1201*4882a593Smuzhiyun 						cpu_to_le32(US_BULK_CS_SIGN)) {
1202*4882a593Smuzhiyun 				usb_stor_dbg(us, "Device skipped data phase\n");
1203*4882a593Smuzhiyun 				scsi_set_resid(srb, transfer_length);
1204*4882a593Smuzhiyun 				goto skipped_data_phase;
1205*4882a593Smuzhiyun 			}
1206*4882a593Smuzhiyun 		}
1207*4882a593Smuzhiyun 	}
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/*
1210*4882a593Smuzhiyun 	 * See flow chart on pg 15 of the Bulk Only Transport spec for
1211*4882a593Smuzhiyun 	 * an explanation of how this code works.
1212*4882a593Smuzhiyun 	 */
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/* get CSW for device status */
1215*4882a593Smuzhiyun 	usb_stor_dbg(us, "Attempting to get CSW...\n");
1216*4882a593Smuzhiyun 	result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
1217*4882a593Smuzhiyun 				bcs, US_BULK_CS_WRAP_LEN, &cswlen);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/*
1220*4882a593Smuzhiyun 	 * Some broken devices add unnecessary zero-length packets to the
1221*4882a593Smuzhiyun 	 * end of their data transfers.  Such packets show up as 0-length
1222*4882a593Smuzhiyun 	 * CSWs.  If we encounter such a thing, try to read the CSW again.
1223*4882a593Smuzhiyun 	 */
1224*4882a593Smuzhiyun 	if (result == USB_STOR_XFER_SHORT && cswlen == 0) {
1225*4882a593Smuzhiyun 		usb_stor_dbg(us, "Received 0-length CSW; retrying...\n");
1226*4882a593Smuzhiyun 		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
1227*4882a593Smuzhiyun 				bcs, US_BULK_CS_WRAP_LEN, &cswlen);
1228*4882a593Smuzhiyun 	}
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	/* did the attempt to read the CSW fail? */
1231*4882a593Smuzhiyun 	if (result == USB_STOR_XFER_STALLED) {
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 		/* get the status again */
1234*4882a593Smuzhiyun 		usb_stor_dbg(us, "Attempting to get CSW (2nd try)...\n");
1235*4882a593Smuzhiyun 		result = usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
1236*4882a593Smuzhiyun 				bcs, US_BULK_CS_WRAP_LEN, NULL);
1237*4882a593Smuzhiyun 	}
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* if we still have a failure at this point, we're in trouble */
1240*4882a593Smuzhiyun 	usb_stor_dbg(us, "Bulk status result = %d\n", result);
1241*4882a593Smuzhiyun 	if (result != USB_STOR_XFER_GOOD)
1242*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun  skipped_data_phase:
1245*4882a593Smuzhiyun 	/* check bulk status */
1246*4882a593Smuzhiyun 	residue = le32_to_cpu(bcs->Residue);
1247*4882a593Smuzhiyun 	usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
1248*4882a593Smuzhiyun 		     le32_to_cpu(bcs->Signature), bcs->Tag,
1249*4882a593Smuzhiyun 		     residue, bcs->Status);
1250*4882a593Smuzhiyun 	if (!(bcs->Tag == us->tag || (us->fflags & US_FL_BULK_IGNORE_TAG)) ||
1251*4882a593Smuzhiyun 		bcs->Status > US_BULK_STAT_PHASE) {
1252*4882a593Smuzhiyun 		usb_stor_dbg(us, "Bulk logical error\n");
1253*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
1254*4882a593Smuzhiyun 	}
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	/*
1257*4882a593Smuzhiyun 	 * Some broken devices report odd signatures, so we do not check them
1258*4882a593Smuzhiyun 	 * for validity against the spec. We store the first one we see,
1259*4882a593Smuzhiyun 	 * and check subsequent transfers for validity against this signature.
1260*4882a593Smuzhiyun 	 */
1261*4882a593Smuzhiyun 	if (!us->bcs_signature) {
1262*4882a593Smuzhiyun 		us->bcs_signature = bcs->Signature;
1263*4882a593Smuzhiyun 		if (us->bcs_signature != cpu_to_le32(US_BULK_CS_SIGN))
1264*4882a593Smuzhiyun 			usb_stor_dbg(us, "Learnt BCS signature 0x%08X\n",
1265*4882a593Smuzhiyun 				     le32_to_cpu(us->bcs_signature));
1266*4882a593Smuzhiyun 	} else if (bcs->Signature != us->bcs_signature) {
1267*4882a593Smuzhiyun 		usb_stor_dbg(us, "Signature mismatch: got %08X, expecting %08X\n",
1268*4882a593Smuzhiyun 			     le32_to_cpu(bcs->Signature),
1269*4882a593Smuzhiyun 			     le32_to_cpu(us->bcs_signature));
1270*4882a593Smuzhiyun 		return USB_STOR_TRANSPORT_ERROR;
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	/*
1274*4882a593Smuzhiyun 	 * try to compute the actual residue, based on how much data
1275*4882a593Smuzhiyun 	 * was really transferred and what the device tells us
1276*4882a593Smuzhiyun 	 */
1277*4882a593Smuzhiyun 	if (residue && !(us->fflags & US_FL_IGNORE_RESIDUE)) {
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun 		/*
1280*4882a593Smuzhiyun 		 * Heuristically detect devices that generate bogus residues
1281*4882a593Smuzhiyun 		 * by seeing what happens with INQUIRY and READ CAPACITY
1282*4882a593Smuzhiyun 		 * commands.
1283*4882a593Smuzhiyun 		 */
1284*4882a593Smuzhiyun 		if (bcs->Status == US_BULK_STAT_OK &&
1285*4882a593Smuzhiyun 				scsi_get_resid(srb) == 0 &&
1286*4882a593Smuzhiyun 					((srb->cmnd[0] == INQUIRY &&
1287*4882a593Smuzhiyun 						transfer_length == 36) ||
1288*4882a593Smuzhiyun 					(srb->cmnd[0] == READ_CAPACITY &&
1289*4882a593Smuzhiyun 						transfer_length == 8))) {
1290*4882a593Smuzhiyun 			us->fflags |= US_FL_IGNORE_RESIDUE;
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun 		} else {
1293*4882a593Smuzhiyun 			residue = min(residue, transfer_length);
1294*4882a593Smuzhiyun 			scsi_set_resid(srb, max(scsi_get_resid(srb), residue));
1295*4882a593Smuzhiyun 		}
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	/* based on the status code, we report good or bad */
1299*4882a593Smuzhiyun 	switch (bcs->Status) {
1300*4882a593Smuzhiyun 		case US_BULK_STAT_OK:
1301*4882a593Smuzhiyun 			/* device babbled -- return fake sense data */
1302*4882a593Smuzhiyun 			if (fake_sense) {
1303*4882a593Smuzhiyun 				memcpy(srb->sense_buffer,
1304*4882a593Smuzhiyun 				       usb_stor_sense_invalidCDB,
1305*4882a593Smuzhiyun 				       sizeof(usb_stor_sense_invalidCDB));
1306*4882a593Smuzhiyun 				return USB_STOR_TRANSPORT_NO_SENSE;
1307*4882a593Smuzhiyun 			}
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 			/* command good -- note that data could be short */
1310*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_GOOD;
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 		case US_BULK_STAT_FAIL:
1313*4882a593Smuzhiyun 			/* command failed */
1314*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_FAILED;
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 		case US_BULK_STAT_PHASE:
1317*4882a593Smuzhiyun 			/*
1318*4882a593Smuzhiyun 			 * phase error -- note that a transport reset will be
1319*4882a593Smuzhiyun 			 * invoked by the invoke_transport() function
1320*4882a593Smuzhiyun 			 */
1321*4882a593Smuzhiyun 			return USB_STOR_TRANSPORT_ERROR;
1322*4882a593Smuzhiyun 	}
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	/* we should never get here, but if we do, we're in trouble */
1325*4882a593Smuzhiyun 	return USB_STOR_TRANSPORT_ERROR;
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_Bulk_transport);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun /***********************************************************************
1330*4882a593Smuzhiyun  * Reset routines
1331*4882a593Smuzhiyun  ***********************************************************************/
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun /*
1334*4882a593Smuzhiyun  * This is the common part of the device reset code.
1335*4882a593Smuzhiyun  *
1336*4882a593Smuzhiyun  * It's handy that every transport mechanism uses the control endpoint for
1337*4882a593Smuzhiyun  * resets.
1338*4882a593Smuzhiyun  *
1339*4882a593Smuzhiyun  * Basically, we send a reset with a 5-second timeout, so we don't get
1340*4882a593Smuzhiyun  * jammed attempting to do the reset.
1341*4882a593Smuzhiyun  */
usb_stor_reset_common(struct us_data * us,u8 request,u8 requesttype,u16 value,u16 index,void * data,u16 size)1342*4882a593Smuzhiyun static int usb_stor_reset_common(struct us_data *us,
1343*4882a593Smuzhiyun 		u8 request, u8 requesttype,
1344*4882a593Smuzhiyun 		u16 value, u16 index, void *data, u16 size)
1345*4882a593Smuzhiyun {
1346*4882a593Smuzhiyun 	int result;
1347*4882a593Smuzhiyun 	int result2;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
1350*4882a593Smuzhiyun 		usb_stor_dbg(us, "No reset during disconnect\n");
1351*4882a593Smuzhiyun 		return -EIO;
1352*4882a593Smuzhiyun 	}
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	result = usb_stor_control_msg(us, us->send_ctrl_pipe,
1355*4882a593Smuzhiyun 			request, requesttype, value, index, data, size,
1356*4882a593Smuzhiyun 			5*HZ);
1357*4882a593Smuzhiyun 	if (result < 0) {
1358*4882a593Smuzhiyun 		usb_stor_dbg(us, "Soft reset failed: %d\n", result);
1359*4882a593Smuzhiyun 		return result;
1360*4882a593Smuzhiyun 	}
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	/*
1363*4882a593Smuzhiyun 	 * Give the device some time to recover from the reset,
1364*4882a593Smuzhiyun 	 * but don't delay disconnect processing.
1365*4882a593Smuzhiyun 	 */
1366*4882a593Smuzhiyun 	wait_event_interruptible_timeout(us->delay_wait,
1367*4882a593Smuzhiyun 			test_bit(US_FLIDX_DISCONNECTING, &us->dflags),
1368*4882a593Smuzhiyun 			HZ*6);
1369*4882a593Smuzhiyun 	if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
1370*4882a593Smuzhiyun 		usb_stor_dbg(us, "Reset interrupted by disconnect\n");
1371*4882a593Smuzhiyun 		return -EIO;
1372*4882a593Smuzhiyun 	}
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	usb_stor_dbg(us, "Soft reset: clearing bulk-in endpoint halt\n");
1375*4882a593Smuzhiyun 	result = usb_stor_clear_halt(us, us->recv_bulk_pipe);
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	usb_stor_dbg(us, "Soft reset: clearing bulk-out endpoint halt\n");
1378*4882a593Smuzhiyun 	result2 = usb_stor_clear_halt(us, us->send_bulk_pipe);
1379*4882a593Smuzhiyun 
1380*4882a593Smuzhiyun 	/* return a result code based on the result of the clear-halts */
1381*4882a593Smuzhiyun 	if (result >= 0)
1382*4882a593Smuzhiyun 		result = result2;
1383*4882a593Smuzhiyun 	if (result < 0)
1384*4882a593Smuzhiyun 		usb_stor_dbg(us, "Soft reset failed\n");
1385*4882a593Smuzhiyun 	else
1386*4882a593Smuzhiyun 		usb_stor_dbg(us, "Soft reset done\n");
1387*4882a593Smuzhiyun 	return result;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun /* This issues a CB[I] Reset to the device in question */
1391*4882a593Smuzhiyun #define CB_RESET_CMD_SIZE	12
1392*4882a593Smuzhiyun 
usb_stor_CB_reset(struct us_data * us)1393*4882a593Smuzhiyun int usb_stor_CB_reset(struct us_data *us)
1394*4882a593Smuzhiyun {
1395*4882a593Smuzhiyun 	memset(us->iobuf, 0xFF, CB_RESET_CMD_SIZE);
1396*4882a593Smuzhiyun 	us->iobuf[0] = SEND_DIAGNOSTIC;
1397*4882a593Smuzhiyun 	us->iobuf[1] = 4;
1398*4882a593Smuzhiyun 	return usb_stor_reset_common(us, US_CBI_ADSC,
1399*4882a593Smuzhiyun 				 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
1400*4882a593Smuzhiyun 				 0, us->ifnum, us->iobuf, CB_RESET_CMD_SIZE);
1401*4882a593Smuzhiyun }
1402*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_CB_reset);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun /*
1405*4882a593Smuzhiyun  * This issues a Bulk-only Reset to the device in question, including
1406*4882a593Smuzhiyun  * clearing the subsequent endpoint halts that may occur.
1407*4882a593Smuzhiyun  */
usb_stor_Bulk_reset(struct us_data * us)1408*4882a593Smuzhiyun int usb_stor_Bulk_reset(struct us_data *us)
1409*4882a593Smuzhiyun {
1410*4882a593Smuzhiyun 	return usb_stor_reset_common(us, US_BULK_RESET_REQUEST,
1411*4882a593Smuzhiyun 				 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
1412*4882a593Smuzhiyun 				 0, us->ifnum, NULL, 0);
1413*4882a593Smuzhiyun }
1414*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(usb_stor_Bulk_reset);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun /*
1417*4882a593Smuzhiyun  * Issue a USB port reset to the device.  The caller must not hold
1418*4882a593Smuzhiyun  * us->dev_mutex.
1419*4882a593Smuzhiyun  */
usb_stor_port_reset(struct us_data * us)1420*4882a593Smuzhiyun int usb_stor_port_reset(struct us_data *us)
1421*4882a593Smuzhiyun {
1422*4882a593Smuzhiyun 	int result;
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	/*for these devices we must use the class specific method */
1425*4882a593Smuzhiyun 	if (us->pusb_dev->quirks & USB_QUIRK_RESET)
1426*4882a593Smuzhiyun 		return -EPERM;
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
1429*4882a593Smuzhiyun 	if (result < 0)
1430*4882a593Smuzhiyun 		usb_stor_dbg(us, "unable to lock device for reset: %d\n",
1431*4882a593Smuzhiyun 			     result);
1432*4882a593Smuzhiyun 	else {
1433*4882a593Smuzhiyun 		/* Were we disconnected while waiting for the lock? */
1434*4882a593Smuzhiyun 		if (test_bit(US_FLIDX_DISCONNECTING, &us->dflags)) {
1435*4882a593Smuzhiyun 			result = -EIO;
1436*4882a593Smuzhiyun 			usb_stor_dbg(us, "No reset during disconnect\n");
1437*4882a593Smuzhiyun 		} else {
1438*4882a593Smuzhiyun 			result = usb_reset_device(us->pusb_dev);
1439*4882a593Smuzhiyun 			usb_stor_dbg(us, "usb_reset_device returns %d\n",
1440*4882a593Smuzhiyun 				     result);
1441*4882a593Smuzhiyun 		}
1442*4882a593Smuzhiyun 		usb_unlock_device(us->pusb_dev);
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 	return result;
1445*4882a593Smuzhiyun }
1446