xref: /OK3568_Linux_fs/u-boot/drivers/usb/gadget/f_mass_storage.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * f_mass_storage.c -- Mass Storage USB Composite Function
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2003-2008 Alan Stern
5*4882a593Smuzhiyun  * Copyright (C) 2009 Samsung Electronics
6*4882a593Smuzhiyun  *                    Author: Michal Nazarewicz <m.nazarewicz@samsung.com>
7*4882a593Smuzhiyun  * All rights reserved.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * SPDX-License-Identifier: GPL-2.0+	BSD-3-Clause
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun  * The Mass Storage Function acts as a USB Mass Storage device,
14*4882a593Smuzhiyun  * appearing to the host as a disk drive or as a CD-ROM drive.  In
15*4882a593Smuzhiyun  * addition to providing an example of a genuinely useful composite
16*4882a593Smuzhiyun  * function for a USB device, it also illustrates a technique of
17*4882a593Smuzhiyun  * double-buffering for increased throughput.
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  * Function supports multiple logical units (LUNs).  Backing storage
20*4882a593Smuzhiyun  * for each LUN is provided by a regular file or a block device.
21*4882a593Smuzhiyun  * Access for each LUN can be limited to read-only.  Moreover, the
22*4882a593Smuzhiyun  * function can indicate that LUN is removable and/or CD-ROM.  (The
23*4882a593Smuzhiyun  * later implies read-only access.)
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * MSF is configured by specifying a fsg_config structure.  It has the
26*4882a593Smuzhiyun  * following fields:
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  *	nluns		Number of LUNs function have (anywhere from 1
29*4882a593Smuzhiyun  *				to FSG_MAX_LUNS which is 8).
30*4882a593Smuzhiyun  *	luns		An array of LUN configuration values.  This
31*4882a593Smuzhiyun  *				should be filled for each LUN that
32*4882a593Smuzhiyun  *				function will include (ie. for "nluns"
33*4882a593Smuzhiyun  *				LUNs).  Each element of the array has
34*4882a593Smuzhiyun  *				the following fields:
35*4882a593Smuzhiyun  *	->filename	The path to the backing file for the LUN.
36*4882a593Smuzhiyun  *				Required if LUN is not marked as
37*4882a593Smuzhiyun  *				removable.
38*4882a593Smuzhiyun  *	->ro		Flag specifying access to the LUN shall be
39*4882a593Smuzhiyun  *				read-only.  This is implied if CD-ROM
40*4882a593Smuzhiyun  *				emulation is enabled as well as when
41*4882a593Smuzhiyun  *				it was impossible to open "filename"
42*4882a593Smuzhiyun  *				in R/W mode.
43*4882a593Smuzhiyun  *	->removable	Flag specifying that LUN shall be indicated as
44*4882a593Smuzhiyun  *				being removable.
45*4882a593Smuzhiyun  *	->cdrom		Flag specifying that LUN shall be reported as
46*4882a593Smuzhiyun  *				being a CD-ROM.
47*4882a593Smuzhiyun  *
48*4882a593Smuzhiyun  *	lun_name_format	A printf-like format for names of the LUN
49*4882a593Smuzhiyun  *				devices.  This determines how the
50*4882a593Smuzhiyun  *				directory in sysfs will be named.
51*4882a593Smuzhiyun  *				Unless you are using several MSFs in
52*4882a593Smuzhiyun  *				a single gadget (as opposed to single
53*4882a593Smuzhiyun  *				MSF in many configurations) you may
54*4882a593Smuzhiyun  *				leave it as NULL (in which case
55*4882a593Smuzhiyun  *				"lun%d" will be used).  In the format
56*4882a593Smuzhiyun  *				you can use "%d" to index LUNs for
57*4882a593Smuzhiyun  *				MSF's with more than one LUN.  (Beware
58*4882a593Smuzhiyun  *				that there is only one integer given
59*4882a593Smuzhiyun  *				as an argument for the format and
60*4882a593Smuzhiyun  *				specifying invalid format may cause
61*4882a593Smuzhiyun  *				unspecified behaviour.)
62*4882a593Smuzhiyun  *	thread_name	Name of the kernel thread process used by the
63*4882a593Smuzhiyun  *				MSF.  You can safely set it to NULL
64*4882a593Smuzhiyun  *				(in which case default "file-storage"
65*4882a593Smuzhiyun  *				will be used).
66*4882a593Smuzhiyun  *
67*4882a593Smuzhiyun  *	vendor_name
68*4882a593Smuzhiyun  *	product_name
69*4882a593Smuzhiyun  *	release		Information used as a reply to INQUIRY
70*4882a593Smuzhiyun  *				request.  To use default set to NULL,
71*4882a593Smuzhiyun  *				NULL, 0xffff respectively.  The first
72*4882a593Smuzhiyun  *				field should be 8 and the second 16
73*4882a593Smuzhiyun  *				characters or less.
74*4882a593Smuzhiyun  *
75*4882a593Smuzhiyun  *	can_stall	Set to permit function to halt bulk endpoints.
76*4882a593Smuzhiyun  *				Disabled on some USB devices known not
77*4882a593Smuzhiyun  *				to work correctly.  You should set it
78*4882a593Smuzhiyun  *				to true.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * If "removable" is not set for a LUN then a backing file must be
81*4882a593Smuzhiyun  * specified.  If it is set, then NULL filename means the LUN's medium
82*4882a593Smuzhiyun  * is not loaded (an empty string as "filename" in the fsg_config
83*4882a593Smuzhiyun  * structure causes error).  The CD-ROM emulation includes a single
84*4882a593Smuzhiyun  * data track and no audio tracks; hence there need be only one
85*4882a593Smuzhiyun  * backing file per LUN.  Note also that the CD-ROM block length is
86*4882a593Smuzhiyun  * set to 512 rather than the more common value 2048.
87*4882a593Smuzhiyun  *
88*4882a593Smuzhiyun  *
89*4882a593Smuzhiyun  * MSF includes support for module parameters.  If gadget using it
90*4882a593Smuzhiyun  * decides to use it, the following module parameters will be
91*4882a593Smuzhiyun  * available:
92*4882a593Smuzhiyun  *
93*4882a593Smuzhiyun  *	file=filename[,filename...]
94*4882a593Smuzhiyun  *			Names of the files or block devices used for
95*4882a593Smuzhiyun  *				backing storage.
96*4882a593Smuzhiyun  *	ro=b[,b...]	Default false, boolean for read-only access.
97*4882a593Smuzhiyun  *	removable=b[,b...]
98*4882a593Smuzhiyun  *			Default true, boolean for removable media.
99*4882a593Smuzhiyun  *	cdrom=b[,b...]	Default false, boolean for whether to emulate
100*4882a593Smuzhiyun  *				a CD-ROM drive.
101*4882a593Smuzhiyun  *	luns=N		Default N = number of filenames, number of
102*4882a593Smuzhiyun  *				LUNs to support.
103*4882a593Smuzhiyun  *	stall		Default determined according to the type of
104*4882a593Smuzhiyun  *				USB device controller (usually true),
105*4882a593Smuzhiyun  *				boolean to permit the driver to halt
106*4882a593Smuzhiyun  *				bulk endpoints.
107*4882a593Smuzhiyun  *
108*4882a593Smuzhiyun  * The module parameters may be prefixed with some string.  You need
109*4882a593Smuzhiyun  * to consult gadget's documentation or source to verify whether it is
110*4882a593Smuzhiyun  * using those module parameters and if it does what are the prefixes
111*4882a593Smuzhiyun  * (look for FSG_MODULE_PARAMETERS() macro usage, what's inside it is
112*4882a593Smuzhiyun  * the prefix).
113*4882a593Smuzhiyun  *
114*4882a593Smuzhiyun  *
115*4882a593Smuzhiyun  * Requirements are modest; only a bulk-in and a bulk-out endpoint are
116*4882a593Smuzhiyun  * needed.  The memory requirement amounts to two 16K buffers, size
117*4882a593Smuzhiyun  * configurable by a parameter.  Support is included for both
118*4882a593Smuzhiyun  * full-speed and high-speed operation.
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Note that the driver is slightly non-portable in that it assumes a
121*4882a593Smuzhiyun  * single memory/DMA buffer will be useable for bulk-in, bulk-out, and
122*4882a593Smuzhiyun  * interrupt-in endpoints.  With most device controllers this isn't an
123*4882a593Smuzhiyun  * issue, but there may be some with hardware restrictions that prevent
124*4882a593Smuzhiyun  * a buffer from being used by more than one endpoint.
125*4882a593Smuzhiyun  *
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * The pathnames of the backing files and the ro settings are
128*4882a593Smuzhiyun  * available in the attribute files "file" and "ro" in the lun<n> (or
129*4882a593Smuzhiyun  * to be more precise in a directory which name comes from
130*4882a593Smuzhiyun  * "lun_name_format" option!) subdirectory of the gadget's sysfs
131*4882a593Smuzhiyun  * directory.  If the "removable" option is set, writing to these
132*4882a593Smuzhiyun  * files will simulate ejecting/loading the medium (writing an empty
133*4882a593Smuzhiyun  * line means eject) and adjusting a write-enable tab.  Changes to the
134*4882a593Smuzhiyun  * ro setting are not allowed when the medium is loaded or if CD-ROM
135*4882a593Smuzhiyun  * emulation is being used.
136*4882a593Smuzhiyun  *
137*4882a593Smuzhiyun  * When a LUN receive an "eject" SCSI request (Start/Stop Unit),
138*4882a593Smuzhiyun  * if the LUN is removable, the backing file is released to simulate
139*4882a593Smuzhiyun  * ejection.
140*4882a593Smuzhiyun  *
141*4882a593Smuzhiyun  *
142*4882a593Smuzhiyun  * This function is heavily based on "File-backed Storage Gadget" by
143*4882a593Smuzhiyun  * Alan Stern which in turn is heavily based on "Gadget Zero" by David
144*4882a593Smuzhiyun  * Brownell.  The driver's SCSI command interface was based on the
145*4882a593Smuzhiyun  * "Information technology - Small Computer System Interface - 2"
146*4882a593Smuzhiyun  * document from X3T9.2 Project 375D, Revision 10L, 7-SEP-93,
147*4882a593Smuzhiyun  * available at <http://www.t10.org/ftp/t10/drafts/s2/s2-r10l.pdf>.
148*4882a593Smuzhiyun  * The single exception is opcode 0x23 (READ FORMAT CAPACITIES), which
149*4882a593Smuzhiyun  * was based on the "Universal Serial Bus Mass Storage Class UFI
150*4882a593Smuzhiyun  * Command Specification" document, Revision 1.0, December 14, 1998,
151*4882a593Smuzhiyun  * available at
152*4882a593Smuzhiyun  * <http://www.usb.org/developers/devclass_docs/usbmass-ufi10.pdf>.
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  *				Driver Design
157*4882a593Smuzhiyun  *
158*4882a593Smuzhiyun  * The MSF is fairly straightforward.  There is a main kernel
159*4882a593Smuzhiyun  * thread that handles most of the work.  Interrupt routines field
160*4882a593Smuzhiyun  * callbacks from the controller driver: bulk- and interrupt-request
161*4882a593Smuzhiyun  * completion notifications, endpoint-0 events, and disconnect events.
162*4882a593Smuzhiyun  * Completion events are passed to the main thread by wakeup calls.  Many
163*4882a593Smuzhiyun  * ep0 requests are handled at interrupt time, but SetInterface,
164*4882a593Smuzhiyun  * SetConfiguration, and device reset requests are forwarded to the
165*4882a593Smuzhiyun  * thread in the form of "exceptions" using SIGUSR1 signals (since they
166*4882a593Smuzhiyun  * should interrupt any ongoing file I/O operations).
167*4882a593Smuzhiyun  *
168*4882a593Smuzhiyun  * The thread's main routine implements the standard command/data/status
169*4882a593Smuzhiyun  * parts of a SCSI interaction.  It and its subroutines are full of tests
170*4882a593Smuzhiyun  * for pending signals/exceptions -- all this polling is necessary since
171*4882a593Smuzhiyun  * the kernel has no setjmp/longjmp equivalents.  (Maybe this is an
172*4882a593Smuzhiyun  * indication that the driver really wants to be running in userspace.)
173*4882a593Smuzhiyun  * An important point is that so long as the thread is alive it keeps an
174*4882a593Smuzhiyun  * open reference to the backing file.  This will prevent unmounting
175*4882a593Smuzhiyun  * the backing file's underlying filesystem and could cause problems
176*4882a593Smuzhiyun  * during system shutdown, for example.  To prevent such problems, the
177*4882a593Smuzhiyun  * thread catches INT, TERM, and KILL signals and converts them into
178*4882a593Smuzhiyun  * an EXIT exception.
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  * In normal operation the main thread is started during the gadget's
181*4882a593Smuzhiyun  * fsg_bind() callback and stopped during fsg_unbind().  But it can
182*4882a593Smuzhiyun  * also exit when it receives a signal, and there's no point leaving
183*4882a593Smuzhiyun  * the gadget running when the thread is dead.  At of this moment, MSF
184*4882a593Smuzhiyun  * provides no way to deregister the gadget when thread dies -- maybe
185*4882a593Smuzhiyun  * a callback functions is needed.
186*4882a593Smuzhiyun  *
187*4882a593Smuzhiyun  * To provide maximum throughput, the driver uses a circular pipeline of
188*4882a593Smuzhiyun  * buffer heads (struct fsg_buffhd).  In principle the pipeline can be
189*4882a593Smuzhiyun  * arbitrarily long; in practice the benefits don't justify having more
190*4882a593Smuzhiyun  * than 2 stages (i.e., double buffering).  But it helps to think of the
191*4882a593Smuzhiyun  * pipeline as being a long one.  Each buffer head contains a bulk-in and
192*4882a593Smuzhiyun  * a bulk-out request pointer (since the buffer can be used for both
193*4882a593Smuzhiyun  * output and input -- directions always are given from the host's
194*4882a593Smuzhiyun  * point of view) as well as a pointer to the buffer and various state
195*4882a593Smuzhiyun  * variables.
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * Use of the pipeline follows a simple protocol.  There is a variable
198*4882a593Smuzhiyun  * (fsg->next_buffhd_to_fill) that points to the next buffer head to use.
199*4882a593Smuzhiyun  * At any time that buffer head may still be in use from an earlier
200*4882a593Smuzhiyun  * request, so each buffer head has a state variable indicating whether
201*4882a593Smuzhiyun  * it is EMPTY, FULL, or BUSY.  Typical use involves waiting for the
202*4882a593Smuzhiyun  * buffer head to be EMPTY, filling the buffer either by file I/O or by
203*4882a593Smuzhiyun  * USB I/O (during which the buffer head is BUSY), and marking the buffer
204*4882a593Smuzhiyun  * head FULL when the I/O is complete.  Then the buffer will be emptied
205*4882a593Smuzhiyun  * (again possibly by USB I/O, during which it is marked BUSY) and
206*4882a593Smuzhiyun  * finally marked EMPTY again (possibly by a completion routine).
207*4882a593Smuzhiyun  *
208*4882a593Smuzhiyun  * A module parameter tells the driver to avoid stalling the bulk
209*4882a593Smuzhiyun  * endpoints wherever the transport specification allows.  This is
210*4882a593Smuzhiyun  * necessary for some UDCs like the SuperH, which cannot reliably clear a
211*4882a593Smuzhiyun  * halt on a bulk endpoint.  However, under certain circumstances the
212*4882a593Smuzhiyun  * Bulk-only specification requires a stall.  In such cases the driver
213*4882a593Smuzhiyun  * will halt the endpoint and set a flag indicating that it should clear
214*4882a593Smuzhiyun  * the halt in software during the next device reset.  Hopefully this
215*4882a593Smuzhiyun  * will permit everything to work correctly.  Furthermore, although the
216*4882a593Smuzhiyun  * specification allows the bulk-out endpoint to halt when the host sends
217*4882a593Smuzhiyun  * too much data, implementing this would cause an unavoidable race.
218*4882a593Smuzhiyun  * The driver will always use the "no-stall" approach for OUT transfers.
219*4882a593Smuzhiyun  *
220*4882a593Smuzhiyun  * One subtle point concerns sending status-stage responses for ep0
221*4882a593Smuzhiyun  * requests.  Some of these requests, such as device reset, can involve
222*4882a593Smuzhiyun  * interrupting an ongoing file I/O operation, which might take an
223*4882a593Smuzhiyun  * arbitrarily long time.  During that delay the host might give up on
224*4882a593Smuzhiyun  * the original ep0 request and issue a new one.  When that happens the
225*4882a593Smuzhiyun  * driver should not notify the host about completion of the original
226*4882a593Smuzhiyun  * request, as the host will no longer be waiting for it.  So the driver
227*4882a593Smuzhiyun  * assigns to each ep0 request a unique tag, and it keeps track of the
228*4882a593Smuzhiyun  * tag value of the request associated with a long-running exception
229*4882a593Smuzhiyun  * (device-reset, interface-change, or configuration-change).  When the
230*4882a593Smuzhiyun  * exception handler is finished, the status-stage response is submitted
231*4882a593Smuzhiyun  * only if the current ep0 request tag is equal to the exception request
232*4882a593Smuzhiyun  * tag.  Thus only the most recently received ep0 request will get a
233*4882a593Smuzhiyun  * status-stage response.
234*4882a593Smuzhiyun  *
235*4882a593Smuzhiyun  * Warning: This driver source file is too long.  It ought to be split up
236*4882a593Smuzhiyun  * into a header file plus about 3 separate .c files, to handle the details
237*4882a593Smuzhiyun  * of the Gadget, USB Mass Storage, and SCSI protocols.
238*4882a593Smuzhiyun  */
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /* #define VERBOSE_DEBUG */
241*4882a593Smuzhiyun /* #define DUMP_MSGS */
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun #include <config.h>
244*4882a593Smuzhiyun #include <hexdump.h>
245*4882a593Smuzhiyun #include <malloc.h>
246*4882a593Smuzhiyun #include <common.h>
247*4882a593Smuzhiyun #include <console.h>
248*4882a593Smuzhiyun #include <g_dnl.h>
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun #include <linux/err.h>
251*4882a593Smuzhiyun #include <linux/usb/ch9.h>
252*4882a593Smuzhiyun #include <linux/usb/gadget.h>
253*4882a593Smuzhiyun #include <usb_mass_storage.h>
254*4882a593Smuzhiyun #include <rockusb.h>
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun #include <asm/unaligned.h>
257*4882a593Smuzhiyun #include <linux/bitops.h>
258*4882a593Smuzhiyun #include <linux/usb/gadget.h>
259*4882a593Smuzhiyun #include <linux/usb/gadget.h>
260*4882a593Smuzhiyun #include <linux/usb/composite.h>
261*4882a593Smuzhiyun #include <linux/bitmap.h>
262*4882a593Smuzhiyun #include <g_dnl.h>
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun /*------------------------------------------------------------------------*/
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun #define FSG_DRIVER_DESC	"Mass Storage Function"
267*4882a593Smuzhiyun #define FSG_DRIVER_VERSION	"2012/06/5"
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static const char fsg_string_interface[] = "Mass Storage";
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun #define FSG_NO_INTR_EP 1
272*4882a593Smuzhiyun #define FSG_NO_DEVICE_STRINGS    1
273*4882a593Smuzhiyun #define FSG_NO_OTG               1
274*4882a593Smuzhiyun #define FSG_NO_INTR_EP           1
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #include "storage_common.c"
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define GFP_ATOMIC ((gfp_t) 0)
281*4882a593Smuzhiyun #define PAGE_CACHE_SHIFT	12
282*4882a593Smuzhiyun #define PAGE_CACHE_SIZE		(1 << PAGE_CACHE_SHIFT)
283*4882a593Smuzhiyun #define kthread_create(...)	__builtin_return_address(0)
284*4882a593Smuzhiyun #define wait_for_completion(...) do {} while (0)
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun struct kref {int x; };
287*4882a593Smuzhiyun struct completion {int x; };
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun struct fsg_dev;
290*4882a593Smuzhiyun struct fsg_common;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun /* Data shared by all the FSG instances. */
293*4882a593Smuzhiyun struct fsg_common {
294*4882a593Smuzhiyun 	struct usb_gadget	*gadget;
295*4882a593Smuzhiyun 	struct fsg_dev		*fsg, *new_fsg;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	struct usb_ep		*ep0;		/* Copy of gadget->ep0 */
298*4882a593Smuzhiyun 	struct usb_request	*ep0req;	/* Copy of cdev->req */
299*4882a593Smuzhiyun 	unsigned int		ep0_req_tag;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	struct fsg_buffhd	*next_buffhd_to_fill;
302*4882a593Smuzhiyun 	struct fsg_buffhd	*next_buffhd_to_drain;
303*4882a593Smuzhiyun 	struct fsg_buffhd	buffhds[FSG_NUM_BUFFERS];
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	int			cmnd_size;
306*4882a593Smuzhiyun 	u8			cmnd[MAX_COMMAND_SIZE];
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	unsigned int		nluns;
309*4882a593Smuzhiyun 	unsigned int		lun;
310*4882a593Smuzhiyun 	struct fsg_lun          luns[FSG_MAX_LUNS];
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	unsigned int		bulk_out_maxpacket;
313*4882a593Smuzhiyun 	enum fsg_state		state;		/* For exception handling */
314*4882a593Smuzhiyun 	unsigned int		exception_req_tag;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	enum data_direction	data_dir;
317*4882a593Smuzhiyun 	u32			data_size;
318*4882a593Smuzhiyun 	u32			data_size_from_cmnd;
319*4882a593Smuzhiyun 	u32			tag;
320*4882a593Smuzhiyun 	u32			residue;
321*4882a593Smuzhiyun 	u32			usb_amount_left;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	unsigned int		can_stall:1;
324*4882a593Smuzhiyun 	unsigned int		free_storage_on_release:1;
325*4882a593Smuzhiyun 	unsigned int		phase_error:1;
326*4882a593Smuzhiyun 	unsigned int		short_packet_received:1;
327*4882a593Smuzhiyun 	unsigned int		bad_lun_okay:1;
328*4882a593Smuzhiyun 	unsigned int		running:1;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	int			thread_wakeup_needed;
331*4882a593Smuzhiyun 	struct completion	thread_notifier;
332*4882a593Smuzhiyun 	struct task_struct	*thread_task;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	/* Callback functions. */
335*4882a593Smuzhiyun 	const struct fsg_operations	*ops;
336*4882a593Smuzhiyun 	/* Gadget's private data. */
337*4882a593Smuzhiyun 	void			*private_data;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	const char *vendor_name;		/*  8 characters or less */
340*4882a593Smuzhiyun 	const char *product_name;		/* 16 characters or less */
341*4882a593Smuzhiyun 	u16 release;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Vendor (8 chars), product (16 chars), release (4
344*4882a593Smuzhiyun 	 * hexadecimal digits) and NUL byte */
345*4882a593Smuzhiyun 	char inquiry_string[8 + 16 + 4 + 1];
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	struct kref		ref;
348*4882a593Smuzhiyun };
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun struct fsg_config {
351*4882a593Smuzhiyun 	unsigned nluns;
352*4882a593Smuzhiyun 	struct fsg_lun_config {
353*4882a593Smuzhiyun 		const char *filename;
354*4882a593Smuzhiyun 		char ro;
355*4882a593Smuzhiyun 		char removable;
356*4882a593Smuzhiyun 		char cdrom;
357*4882a593Smuzhiyun 		char nofua;
358*4882a593Smuzhiyun 	} luns[FSG_MAX_LUNS];
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	/* Callback functions. */
361*4882a593Smuzhiyun 	const struct fsg_operations     *ops;
362*4882a593Smuzhiyun 	/* Gadget's private data. */
363*4882a593Smuzhiyun 	void			*private_data;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	const char *vendor_name;		/*  8 characters or less */
366*4882a593Smuzhiyun 	const char *product_name;		/* 16 characters or less */
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	char			can_stall;
369*4882a593Smuzhiyun };
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun struct fsg_dev {
372*4882a593Smuzhiyun 	struct usb_function	function;
373*4882a593Smuzhiyun 	struct usb_gadget	*gadget;	/* Copy of cdev->gadget */
374*4882a593Smuzhiyun 	struct fsg_common	*common;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	u16			interface_number;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	unsigned int		bulk_in_enabled:1;
379*4882a593Smuzhiyun 	unsigned int		bulk_out_enabled:1;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	unsigned long		atomic_bitflags;
382*4882a593Smuzhiyun #define IGNORE_BULK_OUT		0
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	struct usb_ep		*bulk_in;
385*4882a593Smuzhiyun 	struct usb_ep		*bulk_out;
386*4882a593Smuzhiyun };
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 
__fsg_is_set(struct fsg_common * common,const char * func,unsigned line)389*4882a593Smuzhiyun static inline int __fsg_is_set(struct fsg_common *common,
390*4882a593Smuzhiyun 			       const char *func, unsigned line)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	if (common->fsg)
393*4882a593Smuzhiyun 		return 1;
394*4882a593Smuzhiyun 	ERROR(common, "common->fsg is NULL in %s at %u\n", func, line);
395*4882a593Smuzhiyun 	WARN_ON(1);
396*4882a593Smuzhiyun 	return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun #define fsg_is_set(common) likely(__fsg_is_set(common, __func__, __LINE__))
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 
fsg_from_func(struct usb_function * f)402*4882a593Smuzhiyun static inline struct fsg_dev *fsg_from_func(struct usb_function *f)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	return container_of(f, struct fsg_dev, function);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun typedef void (*fsg_routine_t)(struct fsg_dev *);
409*4882a593Smuzhiyun 
exception_in_progress(struct fsg_common * common)410*4882a593Smuzhiyun static int exception_in_progress(struct fsg_common *common)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	return common->state > FSG_STATE_IDLE;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /* Make bulk-out requests be divisible by the maxpacket size */
set_bulk_out_req_length(struct fsg_common * common,struct fsg_buffhd * bh,unsigned int length)416*4882a593Smuzhiyun static void set_bulk_out_req_length(struct fsg_common *common,
417*4882a593Smuzhiyun 		struct fsg_buffhd *bh, unsigned int length)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	unsigned int	rem;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	bh->bulk_out_intended_length = length;
422*4882a593Smuzhiyun 	rem = length % common->bulk_out_maxpacket;
423*4882a593Smuzhiyun 	if (rem > 0)
424*4882a593Smuzhiyun 		length += common->bulk_out_maxpacket - rem;
425*4882a593Smuzhiyun 	bh->outreq->length = length;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun static struct ums *ums;
431*4882a593Smuzhiyun static int ums_count;
432*4882a593Smuzhiyun static struct fsg_common *the_fsg_common;
433*4882a593Smuzhiyun 
fsg_set_halt(struct fsg_dev * fsg,struct usb_ep * ep)434*4882a593Smuzhiyun static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	const char	*name;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (ep == fsg->bulk_in)
439*4882a593Smuzhiyun 		name = "bulk-in";
440*4882a593Smuzhiyun 	else if (ep == fsg->bulk_out)
441*4882a593Smuzhiyun 		name = "bulk-out";
442*4882a593Smuzhiyun 	else
443*4882a593Smuzhiyun 		name = ep->name;
444*4882a593Smuzhiyun 	DBG(fsg, "%s set halt\n", name);
445*4882a593Smuzhiyun 	return usb_ep_set_halt(ep);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun /* These routines may be called in process context or in_irq */
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun /* Caller must hold fsg->lock */
wakeup_thread(struct fsg_common * common)453*4882a593Smuzhiyun static void wakeup_thread(struct fsg_common *common)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	common->thread_wakeup_needed = 1;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
raise_exception(struct fsg_common * common,enum fsg_state new_state)458*4882a593Smuzhiyun static void raise_exception(struct fsg_common *common, enum fsg_state new_state)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	/* Do nothing if a higher-priority exception is already in progress.
461*4882a593Smuzhiyun 	 * If a lower-or-equal priority exception is in progress, preempt it
462*4882a593Smuzhiyun 	 * and notify the main thread by sending it a signal. */
463*4882a593Smuzhiyun 	if (common->state <= new_state) {
464*4882a593Smuzhiyun 		common->exception_req_tag = common->ep0_req_tag;
465*4882a593Smuzhiyun 		common->state = new_state;
466*4882a593Smuzhiyun 		common->thread_wakeup_needed = 1;
467*4882a593Smuzhiyun 	}
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
471*4882a593Smuzhiyun 
ep0_queue(struct fsg_common * common)472*4882a593Smuzhiyun static int ep0_queue(struct fsg_common *common)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	int	rc;
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	rc = usb_ep_queue(common->ep0, common->ep0req, GFP_ATOMIC);
477*4882a593Smuzhiyun 	common->ep0->driver_data = common;
478*4882a593Smuzhiyun 	if (rc != 0 && rc != -ESHUTDOWN) {
479*4882a593Smuzhiyun 		/* We can't do much more than wait for a reset */
480*4882a593Smuzhiyun 		WARNING(common, "error in submission: %s --> %d\n",
481*4882a593Smuzhiyun 			common->ep0->name, rc);
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 	return rc;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun /* Bulk and interrupt endpoint completion handlers.
489*4882a593Smuzhiyun  * These always run in_irq. */
490*4882a593Smuzhiyun 
bulk_in_complete(struct usb_ep * ep,struct usb_request * req)491*4882a593Smuzhiyun static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun 	struct fsg_common	*common = ep->driver_data;
494*4882a593Smuzhiyun 	struct fsg_buffhd	*bh = req->context;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (req->status || req->actual != req->length)
497*4882a593Smuzhiyun 		DBG(common, "%s --> %d, %u/%u\n", __func__,
498*4882a593Smuzhiyun 				req->status, req->actual, req->length);
499*4882a593Smuzhiyun 	if (req->status == -ECONNRESET)		/* Request was cancelled */
500*4882a593Smuzhiyun 		usb_ep_fifo_flush(ep);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* Hold the lock while we update the request and buffer states */
503*4882a593Smuzhiyun 	bh->inreq_busy = 0;
504*4882a593Smuzhiyun 	bh->state = BUF_STATE_EMPTY;
505*4882a593Smuzhiyun 	wakeup_thread(common);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun 
bulk_out_complete(struct usb_ep * ep,struct usb_request * req)508*4882a593Smuzhiyun static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun 	struct fsg_common	*common = ep->driver_data;
511*4882a593Smuzhiyun 	struct fsg_buffhd	*bh = req->context;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	dump_msg(common, "bulk-out", req->buf, req->actual);
514*4882a593Smuzhiyun 	if (req->status || req->actual != bh->bulk_out_intended_length)
515*4882a593Smuzhiyun 		DBG(common, "%s --> %d, %u/%u\n", __func__,
516*4882a593Smuzhiyun 				req->status, req->actual,
517*4882a593Smuzhiyun 				bh->bulk_out_intended_length);
518*4882a593Smuzhiyun 	if (req->status == -ECONNRESET)		/* Request was cancelled */
519*4882a593Smuzhiyun 		usb_ep_fifo_flush(ep);
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	/* Hold the lock while we update the request and buffer states */
522*4882a593Smuzhiyun 	bh->outreq_busy = 0;
523*4882a593Smuzhiyun 	bh->state = BUF_STATE_FULL;
524*4882a593Smuzhiyun 	wakeup_thread(common);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun /* Ep0 class-specific handlers.  These always run in_irq. */
530*4882a593Smuzhiyun 
fsg_setup(struct usb_function * f,const struct usb_ctrlrequest * ctrl)531*4882a593Smuzhiyun static int fsg_setup(struct usb_function *f,
532*4882a593Smuzhiyun 		const struct usb_ctrlrequest *ctrl)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct fsg_dev		*fsg = fsg_from_func(f);
535*4882a593Smuzhiyun 	struct usb_request	*req = fsg->common->ep0req;
536*4882a593Smuzhiyun 	u16			w_index = get_unaligned_le16(&ctrl->wIndex);
537*4882a593Smuzhiyun 	u16			w_value = get_unaligned_le16(&ctrl->wValue);
538*4882a593Smuzhiyun 	u16			w_length = get_unaligned_le16(&ctrl->wLength);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (!fsg_is_set(fsg->common))
541*4882a593Smuzhiyun 		return -EOPNOTSUPP;
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	switch (ctrl->bRequest) {
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	case USB_BULK_RESET_REQUEST:
546*4882a593Smuzhiyun 		if (ctrl->bRequestType !=
547*4882a593Smuzhiyun 		    (USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
548*4882a593Smuzhiyun 			break;
549*4882a593Smuzhiyun 		if (w_index != fsg->interface_number || w_value != 0)
550*4882a593Smuzhiyun 			return -EDOM;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 		/* Raise an exception to stop the current operation
553*4882a593Smuzhiyun 		 * and reinitialize our state. */
554*4882a593Smuzhiyun 		DBG(fsg, "bulk reset request\n");
555*4882a593Smuzhiyun 		raise_exception(fsg->common, FSG_STATE_RESET);
556*4882a593Smuzhiyun 		return DELAYED_STATUS;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	case USB_BULK_GET_MAX_LUN_REQUEST:
559*4882a593Smuzhiyun 		if (ctrl->bRequestType !=
560*4882a593Smuzhiyun 		    (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE))
561*4882a593Smuzhiyun 			break;
562*4882a593Smuzhiyun 		if (w_index != fsg->interface_number || w_value != 0)
563*4882a593Smuzhiyun 			return -EDOM;
564*4882a593Smuzhiyun 		VDBG(fsg, "get max LUN\n");
565*4882a593Smuzhiyun 		*(u8 *) req->buf = fsg->common->nluns - 1;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		/* Respond with data/status */
568*4882a593Smuzhiyun 		req->length = min((u16)1, w_length);
569*4882a593Smuzhiyun 		return ep0_queue(fsg->common);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	VDBG(fsg,
573*4882a593Smuzhiyun 	     "unknown class-specific control req "
574*4882a593Smuzhiyun 	     "%02x.%02x v%04x i%04x l%u\n",
575*4882a593Smuzhiyun 	     ctrl->bRequestType, ctrl->bRequest,
576*4882a593Smuzhiyun 	     get_unaligned_le16(&ctrl->wValue), w_index, w_length);
577*4882a593Smuzhiyun 	return -EOPNOTSUPP;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /* All the following routines run in process context */
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun /* Use this for bulk or interrupt transfers, not ep0 */
start_transfer(struct fsg_dev * fsg,struct usb_ep * ep,struct usb_request * req,int * pbusy,enum fsg_buffer_state * state)585*4882a593Smuzhiyun static void start_transfer(struct fsg_dev *fsg, struct usb_ep *ep,
586*4882a593Smuzhiyun 		struct usb_request *req, int *pbusy,
587*4882a593Smuzhiyun 		enum fsg_buffer_state *state)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	int	rc;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if (ep == fsg->bulk_in)
592*4882a593Smuzhiyun 		dump_msg(fsg, "bulk-in", req->buf, req->length);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	*pbusy = 1;
595*4882a593Smuzhiyun 	*state = BUF_STATE_BUSY;
596*4882a593Smuzhiyun 	rc = usb_ep_queue(ep, req, GFP_KERNEL);
597*4882a593Smuzhiyun 	if (rc != 0) {
598*4882a593Smuzhiyun 		*pbusy = 0;
599*4882a593Smuzhiyun 		*state = BUF_STATE_EMPTY;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		/* We can't do much more than wait for a reset */
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		/* Note: currently the net2280 driver fails zero-length
604*4882a593Smuzhiyun 		 * submissions if DMA is enabled. */
605*4882a593Smuzhiyun 		if (rc != -ESHUTDOWN && !(rc == -EOPNOTSUPP &&
606*4882a593Smuzhiyun 						req->length == 0))
607*4882a593Smuzhiyun 			WARNING(fsg, "error in submission: %s --> %d\n",
608*4882a593Smuzhiyun 					ep->name, rc);
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #define START_TRANSFER_OR(common, ep_name, req, pbusy, state)		\
613*4882a593Smuzhiyun 	if (fsg_is_set(common))						\
614*4882a593Smuzhiyun 		start_transfer((common)->fsg, (common)->fsg->ep_name,	\
615*4882a593Smuzhiyun 			       req, pbusy, state);			\
616*4882a593Smuzhiyun 	else
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun #define START_TRANSFER(common, ep_name, req, pbusy, state)		\
619*4882a593Smuzhiyun 	START_TRANSFER_OR(common, ep_name, req, pbusy, state) (void)0
620*4882a593Smuzhiyun 
busy_indicator(void)621*4882a593Smuzhiyun static void busy_indicator(void)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun 	static int state;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	switch (state) {
626*4882a593Smuzhiyun 	case 0:
627*4882a593Smuzhiyun 		puts("\r|"); break;
628*4882a593Smuzhiyun 	case 1:
629*4882a593Smuzhiyun 		puts("\r/"); break;
630*4882a593Smuzhiyun 	case 2:
631*4882a593Smuzhiyun 		puts("\r-"); break;
632*4882a593Smuzhiyun 	case 3:
633*4882a593Smuzhiyun 		puts("\r\\"); break;
634*4882a593Smuzhiyun 	case 4:
635*4882a593Smuzhiyun 		puts("\r|"); break;
636*4882a593Smuzhiyun 	case 5:
637*4882a593Smuzhiyun 		puts("\r/"); break;
638*4882a593Smuzhiyun 	case 6:
639*4882a593Smuzhiyun 		puts("\r-"); break;
640*4882a593Smuzhiyun 	case 7:
641*4882a593Smuzhiyun 		puts("\r\\"); break;
642*4882a593Smuzhiyun 	default:
643*4882a593Smuzhiyun 		state = 0;
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun 	if (state++ == 8)
646*4882a593Smuzhiyun 		state = 0;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun 
sleep_thread(struct fsg_common * common)649*4882a593Smuzhiyun static int sleep_thread(struct fsg_common *common)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun 	int	rc = 0;
652*4882a593Smuzhiyun 	int i = 0, k = 0;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/* Wait until a signal arrives or we are woken up */
655*4882a593Smuzhiyun 	for (;;) {
656*4882a593Smuzhiyun 		if (common->thread_wakeup_needed)
657*4882a593Smuzhiyun 			break;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 		if (++i == 20000) {
660*4882a593Smuzhiyun 			busy_indicator();
661*4882a593Smuzhiyun 			i = 0;
662*4882a593Smuzhiyun 			k++;
663*4882a593Smuzhiyun 		}
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 		if (k == 10) {
666*4882a593Smuzhiyun 			/* Handle CTRL+C */
667*4882a593Smuzhiyun 			if (ctrlc())
668*4882a593Smuzhiyun 				return -EPIPE;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 			/* Check cable connection */
671*4882a593Smuzhiyun 			if (!g_dnl_board_usb_cable_connected())
672*4882a593Smuzhiyun 				return -EIO;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 			k = 0;
675*4882a593Smuzhiyun 		}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 		usb_gadget_handle_interrupts(0);
678*4882a593Smuzhiyun 	}
679*4882a593Smuzhiyun 	common->thread_wakeup_needed = 0;
680*4882a593Smuzhiyun 	return rc;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
684*4882a593Smuzhiyun 
do_read(struct fsg_common * common)685*4882a593Smuzhiyun static int do_read(struct fsg_common *common)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun 	struct fsg_lun		*curlun = &common->luns[common->lun];
688*4882a593Smuzhiyun 	u32			lba;
689*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
690*4882a593Smuzhiyun 	int			rc;
691*4882a593Smuzhiyun 	u32			amount_left;
692*4882a593Smuzhiyun 	loff_t			file_offset;
693*4882a593Smuzhiyun 	unsigned int		amount;
694*4882a593Smuzhiyun 	unsigned int		partial_page;
695*4882a593Smuzhiyun 	ssize_t			nread;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Get the starting Logical Block Address and check that it's
698*4882a593Smuzhiyun 	 * not too big */
699*4882a593Smuzhiyun 	if (common->cmnd[0] == SC_READ_6)
700*4882a593Smuzhiyun 		lba = get_unaligned_be24(&common->cmnd[1]);
701*4882a593Smuzhiyun 	else {
702*4882a593Smuzhiyun 		lba = get_unaligned_be32(&common->cmnd[2]);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		/* We allow DPO (Disable Page Out = don't save data in the
705*4882a593Smuzhiyun 		 * cache) and FUA (Force Unit Access = don't read from the
706*4882a593Smuzhiyun 		 * cache), but we don't implement them. */
707*4882a593Smuzhiyun 		if ((common->cmnd[1] & ~0x18) != 0) {
708*4882a593Smuzhiyun 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
709*4882a593Smuzhiyun 			return -EINVAL;
710*4882a593Smuzhiyun 		}
711*4882a593Smuzhiyun 	}
712*4882a593Smuzhiyun 	if (lba >= curlun->num_sectors) {
713*4882a593Smuzhiyun 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
714*4882a593Smuzhiyun 		return -EINVAL;
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun 	file_offset = ((loff_t) lba) << 9;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* Carry out the file reads */
719*4882a593Smuzhiyun 	amount_left = common->data_size_from_cmnd;
720*4882a593Smuzhiyun 	if (unlikely(amount_left == 0))
721*4882a593Smuzhiyun 		return -EIO;		/* No default reply */
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	for (;;) {
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 		/* Figure out how much we need to read:
726*4882a593Smuzhiyun 		 * Try to read the remaining amount.
727*4882a593Smuzhiyun 		 * But don't read more than the buffer size.
728*4882a593Smuzhiyun 		 * And don't try to read past the end of the file.
729*4882a593Smuzhiyun 		 * Finally, if we're not at a page boundary, don't read past
730*4882a593Smuzhiyun 		 *	the next page.
731*4882a593Smuzhiyun 		 * If this means reading 0 then we were asked to read past
732*4882a593Smuzhiyun 		 *	the end of file. */
733*4882a593Smuzhiyun 		amount = min(amount_left, FSG_BUFLEN);
734*4882a593Smuzhiyun 		partial_page = file_offset & (PAGE_CACHE_SIZE - 1);
735*4882a593Smuzhiyun 		if (partial_page > 0)
736*4882a593Smuzhiyun 			amount = min(amount, (unsigned int) PAGE_CACHE_SIZE -
737*4882a593Smuzhiyun 					partial_page);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 		/* Wait for the next buffer to become available */
740*4882a593Smuzhiyun 		bh = common->next_buffhd_to_fill;
741*4882a593Smuzhiyun 		while (bh->state != BUF_STATE_EMPTY) {
742*4882a593Smuzhiyun 			rc = sleep_thread(common);
743*4882a593Smuzhiyun 			if (rc)
744*4882a593Smuzhiyun 				return rc;
745*4882a593Smuzhiyun 		}
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 		/* If we were asked to read past the end of file,
748*4882a593Smuzhiyun 		 * end with an empty buffer. */
749*4882a593Smuzhiyun 		if (amount == 0) {
750*4882a593Smuzhiyun 			curlun->sense_data =
751*4882a593Smuzhiyun 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
752*4882a593Smuzhiyun 			curlun->info_valid = 1;
753*4882a593Smuzhiyun 			bh->inreq->length = 0;
754*4882a593Smuzhiyun 			bh->state = BUF_STATE_FULL;
755*4882a593Smuzhiyun 			break;
756*4882a593Smuzhiyun 		}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 		/* Perform the read */
759*4882a593Smuzhiyun 		rc = ums[common->lun].read_sector(&ums[common->lun],
760*4882a593Smuzhiyun 				      file_offset / SECTOR_SIZE,
761*4882a593Smuzhiyun 				      amount / SECTOR_SIZE,
762*4882a593Smuzhiyun 				      (char __user *)bh->buf);
763*4882a593Smuzhiyun 		if (!rc)
764*4882a593Smuzhiyun 			return -EIO;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 		nread = rc * SECTOR_SIZE;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
769*4882a593Smuzhiyun 				(unsigned long long) file_offset,
770*4882a593Smuzhiyun 				(int) nread);
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		if (nread < 0) {
773*4882a593Smuzhiyun 			LDBG(curlun, "error in file read: %d\n",
774*4882a593Smuzhiyun 					(int) nread);
775*4882a593Smuzhiyun 			nread = 0;
776*4882a593Smuzhiyun 		} else if (nread < amount) {
777*4882a593Smuzhiyun 			LDBG(curlun, "partial file read: %d/%u\n",
778*4882a593Smuzhiyun 					(int) nread, amount);
779*4882a593Smuzhiyun 			nread -= (nread & 511);	/* Round down to a block */
780*4882a593Smuzhiyun 		}
781*4882a593Smuzhiyun 		file_offset  += nread;
782*4882a593Smuzhiyun 		amount_left  -= nread;
783*4882a593Smuzhiyun 		common->residue -= nread;
784*4882a593Smuzhiyun 		bh->inreq->length = nread;
785*4882a593Smuzhiyun 		bh->state = BUF_STATE_FULL;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		/* If an error occurred, report it and its position */
788*4882a593Smuzhiyun 		if (nread < amount) {
789*4882a593Smuzhiyun 			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
790*4882a593Smuzhiyun 			curlun->info_valid = 1;
791*4882a593Smuzhiyun 			break;
792*4882a593Smuzhiyun 		}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 		if (amount_left == 0)
795*4882a593Smuzhiyun 			break;		/* No more left to read */
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		/* Send this buffer and go read some more */
798*4882a593Smuzhiyun 		bh->inreq->zero = 0;
799*4882a593Smuzhiyun 		START_TRANSFER_OR(common, bulk_in, bh->inreq,
800*4882a593Smuzhiyun 			       &bh->inreq_busy, &bh->state)
801*4882a593Smuzhiyun 			/* Don't know what to do if
802*4882a593Smuzhiyun 			 * common->fsg is NULL */
803*4882a593Smuzhiyun 			return -EIO;
804*4882a593Smuzhiyun 		common->next_buffhd_to_fill = bh->next;
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return -EIO;		/* No default reply */
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
811*4882a593Smuzhiyun 
do_write(struct fsg_common * common)812*4882a593Smuzhiyun static int do_write(struct fsg_common *common)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	struct fsg_lun		*curlun = &common->luns[common->lun];
815*4882a593Smuzhiyun 	u32			lba;
816*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
817*4882a593Smuzhiyun 	int			get_some_more;
818*4882a593Smuzhiyun 	u32			amount_left_to_req, amount_left_to_write;
819*4882a593Smuzhiyun 	loff_t			usb_offset, file_offset;
820*4882a593Smuzhiyun 	unsigned int		amount;
821*4882a593Smuzhiyun 	unsigned int		partial_page;
822*4882a593Smuzhiyun 	ssize_t			nwritten;
823*4882a593Smuzhiyun 	int			rc;
824*4882a593Smuzhiyun 	const char		*cdev_name __maybe_unused;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (curlun->ro) {
827*4882a593Smuzhiyun 		curlun->sense_data = SS_WRITE_PROTECTED;
828*4882a593Smuzhiyun 		return -EINVAL;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	/* Get the starting Logical Block Address and check that it's
832*4882a593Smuzhiyun 	 * not too big */
833*4882a593Smuzhiyun 	if (common->cmnd[0] == SC_WRITE_6)
834*4882a593Smuzhiyun 		lba = get_unaligned_be24(&common->cmnd[1]);
835*4882a593Smuzhiyun 	else {
836*4882a593Smuzhiyun 		lba = get_unaligned_be32(&common->cmnd[2]);
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 		/* We allow DPO (Disable Page Out = don't save data in the
839*4882a593Smuzhiyun 		 * cache) and FUA (Force Unit Access = write directly to the
840*4882a593Smuzhiyun 		 * medium).  We don't implement DPO; we implement FUA by
841*4882a593Smuzhiyun 		 * performing synchronous output. */
842*4882a593Smuzhiyun 		if (common->cmnd[1] & ~0x18) {
843*4882a593Smuzhiyun 			curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
844*4882a593Smuzhiyun 			return -EINVAL;
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 	}
847*4882a593Smuzhiyun 	if (lba >= curlun->num_sectors) {
848*4882a593Smuzhiyun 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
849*4882a593Smuzhiyun 		return -EINVAL;
850*4882a593Smuzhiyun 	}
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* Carry out the file writes */
853*4882a593Smuzhiyun 	get_some_more = 1;
854*4882a593Smuzhiyun 	file_offset = usb_offset = ((loff_t) lba) << 9;
855*4882a593Smuzhiyun 	amount_left_to_req = common->data_size_from_cmnd;
856*4882a593Smuzhiyun 	amount_left_to_write = common->data_size_from_cmnd;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	while (amount_left_to_write > 0) {
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		/* Queue a request for more data from the host */
861*4882a593Smuzhiyun 		bh = common->next_buffhd_to_fill;
862*4882a593Smuzhiyun 		if (bh->state == BUF_STATE_EMPTY && get_some_more) {
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 			/* Figure out how much we want to get:
865*4882a593Smuzhiyun 			 * Try to get the remaining amount.
866*4882a593Smuzhiyun 			 * But don't get more than the buffer size.
867*4882a593Smuzhiyun 			 * And don't try to go past the end of the file.
868*4882a593Smuzhiyun 			 * If we're not at a page boundary,
869*4882a593Smuzhiyun 			 *	don't go past the next page.
870*4882a593Smuzhiyun 			 * If this means getting 0, then we were asked
871*4882a593Smuzhiyun 			 *	to write past the end of file.
872*4882a593Smuzhiyun 			 * Finally, round down to a block boundary. */
873*4882a593Smuzhiyun 			amount = min(amount_left_to_req, FSG_BUFLEN);
874*4882a593Smuzhiyun 			partial_page = usb_offset & (PAGE_CACHE_SIZE - 1);
875*4882a593Smuzhiyun 			if (partial_page > 0)
876*4882a593Smuzhiyun 				amount = min(amount,
877*4882a593Smuzhiyun 	(unsigned int) PAGE_CACHE_SIZE - partial_page);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 			if (amount == 0) {
880*4882a593Smuzhiyun 				get_some_more = 0;
881*4882a593Smuzhiyun 				curlun->sense_data =
882*4882a593Smuzhiyun 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
883*4882a593Smuzhiyun 				curlun->info_valid = 1;
884*4882a593Smuzhiyun 				continue;
885*4882a593Smuzhiyun 			}
886*4882a593Smuzhiyun 			amount -= (amount & 511);
887*4882a593Smuzhiyun 			if (amount == 0) {
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 				/* Why were we were asked to transfer a
890*4882a593Smuzhiyun 				 * partial block? */
891*4882a593Smuzhiyun 				get_some_more = 0;
892*4882a593Smuzhiyun 				continue;
893*4882a593Smuzhiyun 			}
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 			/* Get the next buffer */
896*4882a593Smuzhiyun 			usb_offset += amount;
897*4882a593Smuzhiyun 			common->usb_amount_left -= amount;
898*4882a593Smuzhiyun 			amount_left_to_req -= amount;
899*4882a593Smuzhiyun 			if (amount_left_to_req == 0)
900*4882a593Smuzhiyun 				get_some_more = 0;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 			/* amount is always divisible by 512, hence by
903*4882a593Smuzhiyun 			 * the bulk-out maxpacket size */
904*4882a593Smuzhiyun 			bh->outreq->length = amount;
905*4882a593Smuzhiyun 			bh->bulk_out_intended_length = amount;
906*4882a593Smuzhiyun 			bh->outreq->short_not_ok = 1;
907*4882a593Smuzhiyun 			START_TRANSFER_OR(common, bulk_out, bh->outreq,
908*4882a593Smuzhiyun 					  &bh->outreq_busy, &bh->state)
909*4882a593Smuzhiyun 				/* Don't know what to do if
910*4882a593Smuzhiyun 				 * common->fsg is NULL */
911*4882a593Smuzhiyun 				return -EIO;
912*4882a593Smuzhiyun 			common->next_buffhd_to_fill = bh->next;
913*4882a593Smuzhiyun 			continue;
914*4882a593Smuzhiyun 		}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 		/* Write the received data to the backing file */
917*4882a593Smuzhiyun 		bh = common->next_buffhd_to_drain;
918*4882a593Smuzhiyun 		if (bh->state == BUF_STATE_EMPTY && !get_some_more)
919*4882a593Smuzhiyun 			break;			/* We stopped early */
920*4882a593Smuzhiyun 		if (bh->state == BUF_STATE_FULL) {
921*4882a593Smuzhiyun 			common->next_buffhd_to_drain = bh->next;
922*4882a593Smuzhiyun 			bh->state = BUF_STATE_EMPTY;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 			/* Did something go wrong with the transfer? */
925*4882a593Smuzhiyun 			if (bh->outreq->status != 0) {
926*4882a593Smuzhiyun 				curlun->sense_data = SS_COMMUNICATION_FAILURE;
927*4882a593Smuzhiyun 				curlun->info_valid = 1;
928*4882a593Smuzhiyun 				break;
929*4882a593Smuzhiyun 			}
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 			amount = bh->outreq->actual;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 			/* Perform the write */
934*4882a593Smuzhiyun 			rc = ums[common->lun].write_sector(&ums[common->lun],
935*4882a593Smuzhiyun 					       file_offset / SECTOR_SIZE,
936*4882a593Smuzhiyun 					       amount / SECTOR_SIZE,
937*4882a593Smuzhiyun 					       (char __user *)bh->buf);
938*4882a593Smuzhiyun 			if (!rc)
939*4882a593Smuzhiyun 				return -EIO;
940*4882a593Smuzhiyun 			nwritten = rc * SECTOR_SIZE;
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun 			VLDBG(curlun, "file write %u @ %llu -> %d\n", amount,
943*4882a593Smuzhiyun 					(unsigned long long) file_offset,
944*4882a593Smuzhiyun 					(int) nwritten);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 			if (nwritten < 0) {
947*4882a593Smuzhiyun 				LDBG(curlun, "error in file write: %d\n",
948*4882a593Smuzhiyun 						(int) nwritten);
949*4882a593Smuzhiyun 				nwritten = 0;
950*4882a593Smuzhiyun 			} else if (nwritten < amount) {
951*4882a593Smuzhiyun 				LDBG(curlun, "partial file write: %d/%u\n",
952*4882a593Smuzhiyun 						(int) nwritten, amount);
953*4882a593Smuzhiyun 				nwritten -= (nwritten & 511);
954*4882a593Smuzhiyun 				/* Round down to a block */
955*4882a593Smuzhiyun 			}
956*4882a593Smuzhiyun 			file_offset += nwritten;
957*4882a593Smuzhiyun 			amount_left_to_write -= nwritten;
958*4882a593Smuzhiyun 			common->residue -= nwritten;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 			/* If an error occurred, report it and its position */
961*4882a593Smuzhiyun 			if (nwritten < amount) {
962*4882a593Smuzhiyun 				printf("nwritten:%zd amount:%u\n", nwritten,
963*4882a593Smuzhiyun 				       amount);
964*4882a593Smuzhiyun 				curlun->sense_data = SS_WRITE_ERROR;
965*4882a593Smuzhiyun 				curlun->info_valid = 1;
966*4882a593Smuzhiyun 				break;
967*4882a593Smuzhiyun 			}
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 			/* Did the host decide to stop early? */
970*4882a593Smuzhiyun 			if (bh->outreq->actual != bh->outreq->length) {
971*4882a593Smuzhiyun 				common->short_packet_received = 1;
972*4882a593Smuzhiyun 				break;
973*4882a593Smuzhiyun 			}
974*4882a593Smuzhiyun 			continue;
975*4882a593Smuzhiyun 		}
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 		/* Wait for something to happen */
978*4882a593Smuzhiyun 		rc = sleep_thread(common);
979*4882a593Smuzhiyun 		if (rc)
980*4882a593Smuzhiyun 			return rc;
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	cdev_name = common->fsg->function.config->cdev->driver->name;
984*4882a593Smuzhiyun 	if (IS_RKUSB_UMS_DNL(cdev_name))
985*4882a593Smuzhiyun 		rkusb_do_check_parity(common);
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	return -EIO;		/* No default reply */
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
991*4882a593Smuzhiyun 
do_synchronize_cache(struct fsg_common * common)992*4882a593Smuzhiyun static int do_synchronize_cache(struct fsg_common *common)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun 	return 0;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
998*4882a593Smuzhiyun 
do_verify(struct fsg_common * common)999*4882a593Smuzhiyun static int do_verify(struct fsg_common *common)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	struct fsg_lun		*curlun = &common->luns[common->lun];
1002*4882a593Smuzhiyun 	u32			lba;
1003*4882a593Smuzhiyun 	u32			verification_length;
1004*4882a593Smuzhiyun 	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
1005*4882a593Smuzhiyun 	loff_t			file_offset;
1006*4882a593Smuzhiyun 	u32			amount_left;
1007*4882a593Smuzhiyun 	unsigned int		amount;
1008*4882a593Smuzhiyun 	ssize_t			nread;
1009*4882a593Smuzhiyun 	int			rc;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	/* Get the starting Logical Block Address and check that it's
1012*4882a593Smuzhiyun 	 * not too big */
1013*4882a593Smuzhiyun 	lba = get_unaligned_be32(&common->cmnd[2]);
1014*4882a593Smuzhiyun 	if (lba >= curlun->num_sectors) {
1015*4882a593Smuzhiyun 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1016*4882a593Smuzhiyun 		return -EINVAL;
1017*4882a593Smuzhiyun 	}
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 	/* We allow DPO (Disable Page Out = don't save data in the
1020*4882a593Smuzhiyun 	 * cache) but we don't implement it. */
1021*4882a593Smuzhiyun 	if (common->cmnd[1] & ~0x10) {
1022*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1023*4882a593Smuzhiyun 		return -EINVAL;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	verification_length = get_unaligned_be16(&common->cmnd[7]);
1027*4882a593Smuzhiyun 	if (unlikely(verification_length == 0))
1028*4882a593Smuzhiyun 		return -EIO;		/* No default reply */
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	/* Prepare to carry out the file verify */
1031*4882a593Smuzhiyun 	amount_left = verification_length << 9;
1032*4882a593Smuzhiyun 	file_offset = ((loff_t) lba) << 9;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	/* Write out all the dirty buffers before invalidating them */
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	/* Just try to read the requested blocks */
1037*4882a593Smuzhiyun 	while (amount_left > 0) {
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 		/* Figure out how much we need to read:
1040*4882a593Smuzhiyun 		 * Try to read the remaining amount, but not more than
1041*4882a593Smuzhiyun 		 * the buffer size.
1042*4882a593Smuzhiyun 		 * And don't try to read past the end of the file.
1043*4882a593Smuzhiyun 		 * If this means reading 0 then we were asked to read
1044*4882a593Smuzhiyun 		 * past the end of file. */
1045*4882a593Smuzhiyun 		amount = min(amount_left, FSG_BUFLEN);
1046*4882a593Smuzhiyun 		if (amount == 0) {
1047*4882a593Smuzhiyun 			curlun->sense_data =
1048*4882a593Smuzhiyun 					SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1049*4882a593Smuzhiyun 			curlun->info_valid = 1;
1050*4882a593Smuzhiyun 			break;
1051*4882a593Smuzhiyun 		}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 		/* Perform the read */
1054*4882a593Smuzhiyun 		rc = ums[common->lun].read_sector(&ums[common->lun],
1055*4882a593Smuzhiyun 				      file_offset / SECTOR_SIZE,
1056*4882a593Smuzhiyun 				      amount / SECTOR_SIZE,
1057*4882a593Smuzhiyun 				      (char __user *)bh->buf);
1058*4882a593Smuzhiyun 		if (!rc)
1059*4882a593Smuzhiyun 			return -EIO;
1060*4882a593Smuzhiyun 		nread = rc * SECTOR_SIZE;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 		VLDBG(curlun, "file read %u @ %llu -> %d\n", amount,
1063*4882a593Smuzhiyun 				(unsigned long long) file_offset,
1064*4882a593Smuzhiyun 				(int) nread);
1065*4882a593Smuzhiyun 		if (nread < 0) {
1066*4882a593Smuzhiyun 			LDBG(curlun, "error in file verify: %d\n",
1067*4882a593Smuzhiyun 					(int) nread);
1068*4882a593Smuzhiyun 			nread = 0;
1069*4882a593Smuzhiyun 		} else if (nread < amount) {
1070*4882a593Smuzhiyun 			LDBG(curlun, "partial file verify: %d/%u\n",
1071*4882a593Smuzhiyun 					(int) nread, amount);
1072*4882a593Smuzhiyun 			nread -= (nread & 511);	/* Round down to a sector */
1073*4882a593Smuzhiyun 		}
1074*4882a593Smuzhiyun 		if (nread == 0) {
1075*4882a593Smuzhiyun 			curlun->sense_data = SS_UNRECOVERED_READ_ERROR;
1076*4882a593Smuzhiyun 			curlun->info_valid = 1;
1077*4882a593Smuzhiyun 			break;
1078*4882a593Smuzhiyun 		}
1079*4882a593Smuzhiyun 		file_offset += nread;
1080*4882a593Smuzhiyun 		amount_left -= nread;
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 	return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1086*4882a593Smuzhiyun 
do_inquiry(struct fsg_common * common,struct fsg_buffhd * bh)1087*4882a593Smuzhiyun static int do_inquiry(struct fsg_common *common, struct fsg_buffhd *bh)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun 	struct fsg_lun *curlun = &common->luns[common->lun];
1090*4882a593Smuzhiyun 	static const char vendor_id[] = "Linux   ";
1091*4882a593Smuzhiyun 	u8	*buf = (u8 *) bh->buf;
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (!curlun) {		/* Unsupported LUNs are okay */
1094*4882a593Smuzhiyun 		common->bad_lun_okay = 1;
1095*4882a593Smuzhiyun 		memset(buf, 0, 36);
1096*4882a593Smuzhiyun 		buf[0] = 0x7f;		/* Unsupported, no device-type */
1097*4882a593Smuzhiyun 		buf[4] = 31;		/* Additional length */
1098*4882a593Smuzhiyun 		return 36;
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	memset(buf, 0, 8);
1102*4882a593Smuzhiyun 	buf[0] = TYPE_DISK;
1103*4882a593Smuzhiyun 	buf[1] = curlun->removable ? 0x80 : 0;
1104*4882a593Smuzhiyun 	buf[2] = 2;		/* ANSI SCSI level 2 */
1105*4882a593Smuzhiyun 	buf[3] = 2;		/* SCSI-2 INQUIRY data format */
1106*4882a593Smuzhiyun 	buf[4] = 31;		/* Additional length */
1107*4882a593Smuzhiyun 				/* No special options */
1108*4882a593Smuzhiyun 	sprintf((char *) (buf + 8), "%-8s%-16s%04x", (char*) vendor_id ,
1109*4882a593Smuzhiyun 			ums[common->lun].name, (u16) 0xffff);
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	return 36;
1112*4882a593Smuzhiyun }
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 
do_request_sense(struct fsg_common * common,struct fsg_buffhd * bh)1115*4882a593Smuzhiyun static int do_request_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1116*4882a593Smuzhiyun {
1117*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1118*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1119*4882a593Smuzhiyun 	u32		sd, sdinfo;
1120*4882a593Smuzhiyun 	int		valid;
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	/*
1123*4882a593Smuzhiyun 	 * From the SCSI-2 spec., section 7.9 (Unit attention condition):
1124*4882a593Smuzhiyun 	 *
1125*4882a593Smuzhiyun 	 * If a REQUEST SENSE command is received from an initiator
1126*4882a593Smuzhiyun 	 * with a pending unit attention condition (before the target
1127*4882a593Smuzhiyun 	 * generates the contingent allegiance condition), then the
1128*4882a593Smuzhiyun 	 * target shall either:
1129*4882a593Smuzhiyun 	 *   a) report any pending sense data and preserve the unit
1130*4882a593Smuzhiyun 	 *	attention condition on the logical unit, or,
1131*4882a593Smuzhiyun 	 *   b) report the unit attention condition, may discard any
1132*4882a593Smuzhiyun 	 *	pending sense data, and clear the unit attention
1133*4882a593Smuzhiyun 	 *	condition on the logical unit for that initiator.
1134*4882a593Smuzhiyun 	 *
1135*4882a593Smuzhiyun 	 * FSG normally uses option a); enable this code to use option b).
1136*4882a593Smuzhiyun 	 */
1137*4882a593Smuzhiyun #if 0
1138*4882a593Smuzhiyun 	if (curlun && curlun->unit_attention_data != SS_NO_SENSE) {
1139*4882a593Smuzhiyun 		curlun->sense_data = curlun->unit_attention_data;
1140*4882a593Smuzhiyun 		curlun->unit_attention_data = SS_NO_SENSE;
1141*4882a593Smuzhiyun 	}
1142*4882a593Smuzhiyun #endif
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if (!curlun) {		/* Unsupported LUNs are okay */
1145*4882a593Smuzhiyun 		common->bad_lun_okay = 1;
1146*4882a593Smuzhiyun 		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1147*4882a593Smuzhiyun 		sdinfo = 0;
1148*4882a593Smuzhiyun 		valid = 0;
1149*4882a593Smuzhiyun 	} else {
1150*4882a593Smuzhiyun 		sd = curlun->sense_data;
1151*4882a593Smuzhiyun 		valid = curlun->info_valid << 7;
1152*4882a593Smuzhiyun 		curlun->sense_data = SS_NO_SENSE;
1153*4882a593Smuzhiyun 		curlun->info_valid = 0;
1154*4882a593Smuzhiyun 	}
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	memset(buf, 0, 18);
1157*4882a593Smuzhiyun 	buf[0] = valid | 0x70;			/* Valid, current error */
1158*4882a593Smuzhiyun 	buf[2] = SK(sd);
1159*4882a593Smuzhiyun 	put_unaligned_be32(sdinfo, &buf[3]);	/* Sense information */
1160*4882a593Smuzhiyun 	buf[7] = 18 - 8;			/* Additional sense length */
1161*4882a593Smuzhiyun 	buf[12] = ASC(sd);
1162*4882a593Smuzhiyun 	buf[13] = ASCQ(sd);
1163*4882a593Smuzhiyun 	return 18;
1164*4882a593Smuzhiyun }
1165*4882a593Smuzhiyun 
do_read_capacity(struct fsg_common * common,struct fsg_buffhd * bh)1166*4882a593Smuzhiyun static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
1167*4882a593Smuzhiyun {
1168*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1169*4882a593Smuzhiyun 	u32		lba = get_unaligned_be32(&common->cmnd[2]);
1170*4882a593Smuzhiyun 	int		pmi = common->cmnd[8];
1171*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	/* Check the PMI and LBA fields */
1174*4882a593Smuzhiyun 	if (pmi > 1 || (pmi == 0 && lba != 0)) {
1175*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1176*4882a593Smuzhiyun 		return -EINVAL;
1177*4882a593Smuzhiyun 	}
1178*4882a593Smuzhiyun 
1179*4882a593Smuzhiyun 	put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
1180*4882a593Smuzhiyun 						/* Max logical block */
1181*4882a593Smuzhiyun 	put_unaligned_be32(512, &buf[4]);	/* Block length */
1182*4882a593Smuzhiyun 	return 8;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun 
do_read_header(struct fsg_common * common,struct fsg_buffhd * bh)1185*4882a593Smuzhiyun static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1188*4882a593Smuzhiyun 	int		msf = common->cmnd[1] & 0x02;
1189*4882a593Smuzhiyun 	u32		lba = get_unaligned_be32(&common->cmnd[2]);
1190*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	if (common->cmnd[1] & ~0x02) {		/* Mask away MSF */
1193*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1194*4882a593Smuzhiyun 		return -EINVAL;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 	if (lba >= curlun->num_sectors) {
1197*4882a593Smuzhiyun 		curlun->sense_data = SS_LOGICAL_BLOCK_ADDRESS_OUT_OF_RANGE;
1198*4882a593Smuzhiyun 		return -EINVAL;
1199*4882a593Smuzhiyun 	}
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	memset(buf, 0, 8);
1202*4882a593Smuzhiyun 	buf[0] = 0x01;		/* 2048 bytes of user data, rest is EC */
1203*4882a593Smuzhiyun 	store_cdrom_address(&buf[4], msf, lba);
1204*4882a593Smuzhiyun 	return 8;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 
do_read_toc(struct fsg_common * common,struct fsg_buffhd * bh)1208*4882a593Smuzhiyun static int do_read_toc(struct fsg_common *common, struct fsg_buffhd *bh)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1211*4882a593Smuzhiyun 	int		msf = common->cmnd[1] & 0x02;
1212*4882a593Smuzhiyun 	int		start_track = common->cmnd[6];
1213*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	if ((common->cmnd[1] & ~0x02) != 0 ||	/* Mask away MSF */
1216*4882a593Smuzhiyun 			start_track > 1) {
1217*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1218*4882a593Smuzhiyun 		return -EINVAL;
1219*4882a593Smuzhiyun 	}
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	memset(buf, 0, 20);
1222*4882a593Smuzhiyun 	buf[1] = (20-2);		/* TOC data length */
1223*4882a593Smuzhiyun 	buf[2] = 1;			/* First track number */
1224*4882a593Smuzhiyun 	buf[3] = 1;			/* Last track number */
1225*4882a593Smuzhiyun 	buf[5] = 0x16;			/* Data track, copying allowed */
1226*4882a593Smuzhiyun 	buf[6] = 0x01;			/* Only track is number 1 */
1227*4882a593Smuzhiyun 	store_cdrom_address(&buf[8], msf, 0);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	buf[13] = 0x16;			/* Lead-out track is data */
1230*4882a593Smuzhiyun 	buf[14] = 0xAA;			/* Lead-out track number */
1231*4882a593Smuzhiyun 	store_cdrom_address(&buf[16], msf, curlun->num_sectors);
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	return 20;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun 
do_mode_sense(struct fsg_common * common,struct fsg_buffhd * bh)1236*4882a593Smuzhiyun static int do_mode_sense(struct fsg_common *common, struct fsg_buffhd *bh)
1237*4882a593Smuzhiyun {
1238*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1239*4882a593Smuzhiyun 	int		mscmnd = common->cmnd[0];
1240*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1241*4882a593Smuzhiyun 	u8		*buf0 = buf;
1242*4882a593Smuzhiyun 	int		pc, page_code;
1243*4882a593Smuzhiyun 	int		changeable_values, all_pages;
1244*4882a593Smuzhiyun 	int		valid_page = 0;
1245*4882a593Smuzhiyun 	int		len, limit;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	if ((common->cmnd[1] & ~0x08) != 0) {	/* Mask away DBD */
1248*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1249*4882a593Smuzhiyun 		return -EINVAL;
1250*4882a593Smuzhiyun 	}
1251*4882a593Smuzhiyun 	pc = common->cmnd[2] >> 6;
1252*4882a593Smuzhiyun 	page_code = common->cmnd[2] & 0x3f;
1253*4882a593Smuzhiyun 	if (pc == 3) {
1254*4882a593Smuzhiyun 		curlun->sense_data = SS_SAVING_PARAMETERS_NOT_SUPPORTED;
1255*4882a593Smuzhiyun 		return -EINVAL;
1256*4882a593Smuzhiyun 	}
1257*4882a593Smuzhiyun 	changeable_values = (pc == 1);
1258*4882a593Smuzhiyun 	all_pages = (page_code == 0x3f);
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun 	/* Write the mode parameter header.  Fixed values are: default
1261*4882a593Smuzhiyun 	 * medium type, no cache control (DPOFUA), and no block descriptors.
1262*4882a593Smuzhiyun 	 * The only variable value is the WriteProtect bit.  We will fill in
1263*4882a593Smuzhiyun 	 * the mode data length later. */
1264*4882a593Smuzhiyun 	memset(buf, 0, 8);
1265*4882a593Smuzhiyun 	if (mscmnd == SC_MODE_SENSE_6) {
1266*4882a593Smuzhiyun 		buf[2] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1267*4882a593Smuzhiyun 		buf += 4;
1268*4882a593Smuzhiyun 		limit = 255;
1269*4882a593Smuzhiyun 	} else {			/* SC_MODE_SENSE_10 */
1270*4882a593Smuzhiyun 		buf[3] = (curlun->ro ? 0x80 : 0x00);		/* WP, DPOFUA */
1271*4882a593Smuzhiyun 		buf += 8;
1272*4882a593Smuzhiyun 		limit = 65535;		/* Should really be FSG_BUFLEN */
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	/* No block descriptors */
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	/* The mode pages, in numerical order.  The only page we support
1278*4882a593Smuzhiyun 	 * is the Caching page. */
1279*4882a593Smuzhiyun 	if (page_code == 0x08 || all_pages) {
1280*4882a593Smuzhiyun 		valid_page = 1;
1281*4882a593Smuzhiyun 		buf[0] = 0x08;		/* Page code */
1282*4882a593Smuzhiyun 		buf[1] = 10;		/* Page length */
1283*4882a593Smuzhiyun 		memset(buf+2, 0, 10);	/* None of the fields are changeable */
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 		if (!changeable_values) {
1286*4882a593Smuzhiyun 			buf[2] = 0x04;	/* Write cache enable, */
1287*4882a593Smuzhiyun 					/* Read cache not disabled */
1288*4882a593Smuzhiyun 					/* No cache retention priorities */
1289*4882a593Smuzhiyun 			put_unaligned_be16(0xffff, &buf[4]);
1290*4882a593Smuzhiyun 					/* Don't disable prefetch */
1291*4882a593Smuzhiyun 					/* Minimum prefetch = 0 */
1292*4882a593Smuzhiyun 			put_unaligned_be16(0xffff, &buf[8]);
1293*4882a593Smuzhiyun 					/* Maximum prefetch */
1294*4882a593Smuzhiyun 			put_unaligned_be16(0xffff, &buf[10]);
1295*4882a593Smuzhiyun 					/* Maximum prefetch ceiling */
1296*4882a593Smuzhiyun 		}
1297*4882a593Smuzhiyun 		buf += 12;
1298*4882a593Smuzhiyun 	}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	/* Check that a valid page was requested and the mode data length
1301*4882a593Smuzhiyun 	 * isn't too long. */
1302*4882a593Smuzhiyun 	len = buf - buf0;
1303*4882a593Smuzhiyun 	if (!valid_page || len > limit) {
1304*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1305*4882a593Smuzhiyun 		return -EINVAL;
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	/*  Store the mode data length */
1309*4882a593Smuzhiyun 	if (mscmnd == SC_MODE_SENSE_6)
1310*4882a593Smuzhiyun 		buf0[0] = len - 1;
1311*4882a593Smuzhiyun 	else
1312*4882a593Smuzhiyun 		put_unaligned_be16(len - 2, buf0);
1313*4882a593Smuzhiyun 	return len;
1314*4882a593Smuzhiyun }
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 
do_start_stop(struct fsg_common * common)1317*4882a593Smuzhiyun static int do_start_stop(struct fsg_common *common)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 	if (!curlun) {
1322*4882a593Smuzhiyun 		return -EINVAL;
1323*4882a593Smuzhiyun 	} else if (!curlun->removable) {
1324*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_COMMAND;
1325*4882a593Smuzhiyun 		return -EINVAL;
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	return 0;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun 
do_prevent_allow(struct fsg_common * common)1331*4882a593Smuzhiyun static int do_prevent_allow(struct fsg_common *common)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1334*4882a593Smuzhiyun 	int		prevent;
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 	if (!curlun->removable) {
1337*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_COMMAND;
1338*4882a593Smuzhiyun 		return -EINVAL;
1339*4882a593Smuzhiyun 	}
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	prevent = common->cmnd[4] & 0x01;
1342*4882a593Smuzhiyun 	if ((common->cmnd[4] & ~0x01) != 0) {	/* Mask away Prevent */
1343*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1344*4882a593Smuzhiyun 		return -EINVAL;
1345*4882a593Smuzhiyun 	}
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	if (curlun->prevent_medium_removal && !prevent)
1348*4882a593Smuzhiyun 		fsg_lun_fsync_sub(curlun);
1349*4882a593Smuzhiyun 	curlun->prevent_medium_removal = prevent;
1350*4882a593Smuzhiyun 	return 0;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 
do_read_format_capacities(struct fsg_common * common,struct fsg_buffhd * bh)1354*4882a593Smuzhiyun static int do_read_format_capacities(struct fsg_common *common,
1355*4882a593Smuzhiyun 			struct fsg_buffhd *bh)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1358*4882a593Smuzhiyun 	u8		*buf = (u8 *) bh->buf;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	buf[0] = buf[1] = buf[2] = 0;
1361*4882a593Smuzhiyun 	buf[3] = 8;	/* Only the Current/Maximum Capacity Descriptor */
1362*4882a593Smuzhiyun 	buf += 4;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	put_unaligned_be32(curlun->num_sectors, &buf[0]);
1365*4882a593Smuzhiyun 						/* Number of blocks */
1366*4882a593Smuzhiyun 	put_unaligned_be32(512, &buf[4]);	/* Block length */
1367*4882a593Smuzhiyun 	buf[4] = 0x02;				/* Current capacity */
1368*4882a593Smuzhiyun 	return 12;
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 
do_mode_select(struct fsg_common * common,struct fsg_buffhd * bh)1372*4882a593Smuzhiyun static int do_mode_select(struct fsg_common *common, struct fsg_buffhd *bh)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun 	struct fsg_lun	*curlun = &common->luns[common->lun];
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	/* We don't support MODE SELECT */
1377*4882a593Smuzhiyun 	if (curlun)
1378*4882a593Smuzhiyun 		curlun->sense_data = SS_INVALID_COMMAND;
1379*4882a593Smuzhiyun 	return -EINVAL;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1384*4882a593Smuzhiyun 
halt_bulk_in_endpoint(struct fsg_dev * fsg)1385*4882a593Smuzhiyun static int halt_bulk_in_endpoint(struct fsg_dev *fsg)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	int	rc;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	rc = fsg_set_halt(fsg, fsg->bulk_in);
1390*4882a593Smuzhiyun 	if (rc == -EAGAIN)
1391*4882a593Smuzhiyun 		VDBG(fsg, "delayed bulk-in endpoint halt\n");
1392*4882a593Smuzhiyun 	while (rc != 0) {
1393*4882a593Smuzhiyun 		if (rc != -EAGAIN) {
1394*4882a593Smuzhiyun 			WARNING(fsg, "usb_ep_set_halt -> %d\n", rc);
1395*4882a593Smuzhiyun 			rc = 0;
1396*4882a593Smuzhiyun 			break;
1397*4882a593Smuzhiyun 		}
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 		rc = usb_ep_set_halt(fsg->bulk_in);
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 	return rc;
1402*4882a593Smuzhiyun }
1403*4882a593Smuzhiyun 
wedge_bulk_in_endpoint(struct fsg_dev * fsg)1404*4882a593Smuzhiyun static int wedge_bulk_in_endpoint(struct fsg_dev *fsg)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	int	rc;
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	DBG(fsg, "bulk-in set wedge\n");
1409*4882a593Smuzhiyun 	rc = 0; /* usb_ep_set_wedge(fsg->bulk_in); */
1410*4882a593Smuzhiyun 	if (rc == -EAGAIN)
1411*4882a593Smuzhiyun 		VDBG(fsg, "delayed bulk-in endpoint wedge\n");
1412*4882a593Smuzhiyun 	while (rc != 0) {
1413*4882a593Smuzhiyun 		if (rc != -EAGAIN) {
1414*4882a593Smuzhiyun 			WARNING(fsg, "usb_ep_set_wedge -> %d\n", rc);
1415*4882a593Smuzhiyun 			rc = 0;
1416*4882a593Smuzhiyun 			break;
1417*4882a593Smuzhiyun 		}
1418*4882a593Smuzhiyun 	}
1419*4882a593Smuzhiyun 	return rc;
1420*4882a593Smuzhiyun }
1421*4882a593Smuzhiyun 
pad_with_zeros(struct fsg_dev * fsg)1422*4882a593Smuzhiyun static int pad_with_zeros(struct fsg_dev *fsg)
1423*4882a593Smuzhiyun {
1424*4882a593Smuzhiyun 	struct fsg_buffhd	*bh = fsg->common->next_buffhd_to_fill;
1425*4882a593Smuzhiyun 	u32			nkeep = bh->inreq->length;
1426*4882a593Smuzhiyun 	u32			nsend;
1427*4882a593Smuzhiyun 	int			rc;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	bh->state = BUF_STATE_EMPTY;		/* For the first iteration */
1430*4882a593Smuzhiyun 	fsg->common->usb_amount_left = nkeep + fsg->common->residue;
1431*4882a593Smuzhiyun 	while (fsg->common->usb_amount_left > 0) {
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 		/* Wait for the next buffer to be free */
1434*4882a593Smuzhiyun 		while (bh->state != BUF_STATE_EMPTY) {
1435*4882a593Smuzhiyun 			rc = sleep_thread(fsg->common);
1436*4882a593Smuzhiyun 			if (rc)
1437*4882a593Smuzhiyun 				return rc;
1438*4882a593Smuzhiyun 		}
1439*4882a593Smuzhiyun 
1440*4882a593Smuzhiyun 		nsend = min(fsg->common->usb_amount_left, FSG_BUFLEN);
1441*4882a593Smuzhiyun 		memset(bh->buf + nkeep, 0, nsend - nkeep);
1442*4882a593Smuzhiyun 		bh->inreq->length = nsend;
1443*4882a593Smuzhiyun 		bh->inreq->zero = 0;
1444*4882a593Smuzhiyun 		start_transfer(fsg, fsg->bulk_in, bh->inreq,
1445*4882a593Smuzhiyun 				&bh->inreq_busy, &bh->state);
1446*4882a593Smuzhiyun 		bh = fsg->common->next_buffhd_to_fill = bh->next;
1447*4882a593Smuzhiyun 		fsg->common->usb_amount_left -= nsend;
1448*4882a593Smuzhiyun 		nkeep = 0;
1449*4882a593Smuzhiyun 	}
1450*4882a593Smuzhiyun 	return 0;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun 
throw_away_data(struct fsg_common * common)1453*4882a593Smuzhiyun static int throw_away_data(struct fsg_common *common)
1454*4882a593Smuzhiyun {
1455*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
1456*4882a593Smuzhiyun 	u32			amount;
1457*4882a593Smuzhiyun 	int			rc;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	for (bh = common->next_buffhd_to_drain;
1460*4882a593Smuzhiyun 	     bh->state != BUF_STATE_EMPTY || common->usb_amount_left > 0;
1461*4882a593Smuzhiyun 	     bh = common->next_buffhd_to_drain) {
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 		/* Throw away the data in a filled buffer */
1464*4882a593Smuzhiyun 		if (bh->state == BUF_STATE_FULL) {
1465*4882a593Smuzhiyun 			bh->state = BUF_STATE_EMPTY;
1466*4882a593Smuzhiyun 			common->next_buffhd_to_drain = bh->next;
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 			/* A short packet or an error ends everything */
1469*4882a593Smuzhiyun 			if (bh->outreq->actual != bh->outreq->length ||
1470*4882a593Smuzhiyun 					bh->outreq->status != 0) {
1471*4882a593Smuzhiyun 				raise_exception(common,
1472*4882a593Smuzhiyun 						FSG_STATE_ABORT_BULK_OUT);
1473*4882a593Smuzhiyun 				return -EINTR;
1474*4882a593Smuzhiyun 			}
1475*4882a593Smuzhiyun 			continue;
1476*4882a593Smuzhiyun 		}
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 		/* Try to submit another request if we need one */
1479*4882a593Smuzhiyun 		bh = common->next_buffhd_to_fill;
1480*4882a593Smuzhiyun 		if (bh->state == BUF_STATE_EMPTY
1481*4882a593Smuzhiyun 		 && common->usb_amount_left > 0) {
1482*4882a593Smuzhiyun 			amount = min(common->usb_amount_left, FSG_BUFLEN);
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 			/* amount is always divisible by 512, hence by
1485*4882a593Smuzhiyun 			 * the bulk-out maxpacket size */
1486*4882a593Smuzhiyun 			bh->outreq->length = amount;
1487*4882a593Smuzhiyun 			bh->bulk_out_intended_length = amount;
1488*4882a593Smuzhiyun 			bh->outreq->short_not_ok = 1;
1489*4882a593Smuzhiyun 			START_TRANSFER_OR(common, bulk_out, bh->outreq,
1490*4882a593Smuzhiyun 					  &bh->outreq_busy, &bh->state)
1491*4882a593Smuzhiyun 				/* Don't know what to do if
1492*4882a593Smuzhiyun 				 * common->fsg is NULL */
1493*4882a593Smuzhiyun 				return -EIO;
1494*4882a593Smuzhiyun 			common->next_buffhd_to_fill = bh->next;
1495*4882a593Smuzhiyun 			common->usb_amount_left -= amount;
1496*4882a593Smuzhiyun 			continue;
1497*4882a593Smuzhiyun 		}
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 		/* Otherwise wait for something to happen */
1500*4882a593Smuzhiyun 		rc = sleep_thread(common);
1501*4882a593Smuzhiyun 		if (rc)
1502*4882a593Smuzhiyun 			return rc;
1503*4882a593Smuzhiyun 	}
1504*4882a593Smuzhiyun 	return 0;
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 
finish_reply(struct fsg_common * common)1508*4882a593Smuzhiyun static int finish_reply(struct fsg_common *common)
1509*4882a593Smuzhiyun {
1510*4882a593Smuzhiyun 	struct fsg_buffhd	*bh = common->next_buffhd_to_fill;
1511*4882a593Smuzhiyun 	int			rc = 0;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	switch (common->data_dir) {
1514*4882a593Smuzhiyun 	case DATA_DIR_NONE:
1515*4882a593Smuzhiyun 		break;			/* Nothing to send */
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	/* If we don't know whether the host wants to read or write,
1518*4882a593Smuzhiyun 	 * this must be CB or CBI with an unknown command.  We mustn't
1519*4882a593Smuzhiyun 	 * try to send or receive any data.  So stall both bulk pipes
1520*4882a593Smuzhiyun 	 * if we can and wait for a reset. */
1521*4882a593Smuzhiyun 	case DATA_DIR_UNKNOWN:
1522*4882a593Smuzhiyun 		if (!common->can_stall) {
1523*4882a593Smuzhiyun 			/* Nothing */
1524*4882a593Smuzhiyun 		} else if (fsg_is_set(common)) {
1525*4882a593Smuzhiyun 			fsg_set_halt(common->fsg, common->fsg->bulk_out);
1526*4882a593Smuzhiyun 			rc = halt_bulk_in_endpoint(common->fsg);
1527*4882a593Smuzhiyun 		} else {
1528*4882a593Smuzhiyun 			/* Don't know what to do if common->fsg is NULL */
1529*4882a593Smuzhiyun 			rc = -EIO;
1530*4882a593Smuzhiyun 		}
1531*4882a593Smuzhiyun 		break;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	/* All but the last buffer of data must have already been sent */
1534*4882a593Smuzhiyun 	case DATA_DIR_TO_HOST:
1535*4882a593Smuzhiyun 		if (common->data_size == 0) {
1536*4882a593Smuzhiyun 			/* Nothing to send */
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 		/* If there's no residue, simply send the last buffer */
1539*4882a593Smuzhiyun 		} else if (common->residue == 0) {
1540*4882a593Smuzhiyun 			bh->inreq->zero = 0;
1541*4882a593Smuzhiyun 			START_TRANSFER_OR(common, bulk_in, bh->inreq,
1542*4882a593Smuzhiyun 					  &bh->inreq_busy, &bh->state)
1543*4882a593Smuzhiyun 				return -EIO;
1544*4882a593Smuzhiyun 			common->next_buffhd_to_fill = bh->next;
1545*4882a593Smuzhiyun 
1546*4882a593Smuzhiyun 		/* For Bulk-only, if we're allowed to stall then send the
1547*4882a593Smuzhiyun 		 * short packet and halt the bulk-in endpoint.  If we can't
1548*4882a593Smuzhiyun 		 * stall, pad out the remaining data with 0's. */
1549*4882a593Smuzhiyun 		} else if (common->can_stall) {
1550*4882a593Smuzhiyun 			bh->inreq->zero = 1;
1551*4882a593Smuzhiyun 			START_TRANSFER_OR(common, bulk_in, bh->inreq,
1552*4882a593Smuzhiyun 					  &bh->inreq_busy, &bh->state)
1553*4882a593Smuzhiyun 				/* Don't know what to do if
1554*4882a593Smuzhiyun 				 * common->fsg is NULL */
1555*4882a593Smuzhiyun 				rc = -EIO;
1556*4882a593Smuzhiyun 			common->next_buffhd_to_fill = bh->next;
1557*4882a593Smuzhiyun 			if (common->fsg)
1558*4882a593Smuzhiyun 				rc = halt_bulk_in_endpoint(common->fsg);
1559*4882a593Smuzhiyun 		} else if (fsg_is_set(common)) {
1560*4882a593Smuzhiyun 			rc = pad_with_zeros(common->fsg);
1561*4882a593Smuzhiyun 		} else {
1562*4882a593Smuzhiyun 			/* Don't know what to do if common->fsg is NULL */
1563*4882a593Smuzhiyun 			rc = -EIO;
1564*4882a593Smuzhiyun 		}
1565*4882a593Smuzhiyun 		break;
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	/* We have processed all we want from the data the host has sent.
1568*4882a593Smuzhiyun 	 * There may still be outstanding bulk-out requests. */
1569*4882a593Smuzhiyun 	case DATA_DIR_FROM_HOST:
1570*4882a593Smuzhiyun 		if (common->residue == 0) {
1571*4882a593Smuzhiyun 			/* Nothing to receive */
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 		/* Did the host stop sending unexpectedly early? */
1574*4882a593Smuzhiyun 		} else if (common->short_packet_received) {
1575*4882a593Smuzhiyun 			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1576*4882a593Smuzhiyun 			rc = -EINTR;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 		/* We haven't processed all the incoming data.  Even though
1579*4882a593Smuzhiyun 		 * we may be allowed to stall, doing so would cause a race.
1580*4882a593Smuzhiyun 		 * The controller may already have ACK'ed all the remaining
1581*4882a593Smuzhiyun 		 * bulk-out packets, in which case the host wouldn't see a
1582*4882a593Smuzhiyun 		 * STALL.  Not realizing the endpoint was halted, it wouldn't
1583*4882a593Smuzhiyun 		 * clear the halt -- leading to problems later on. */
1584*4882a593Smuzhiyun #if 0
1585*4882a593Smuzhiyun 		} else if (common->can_stall) {
1586*4882a593Smuzhiyun 			if (fsg_is_set(common))
1587*4882a593Smuzhiyun 				fsg_set_halt(common->fsg,
1588*4882a593Smuzhiyun 					     common->fsg->bulk_out);
1589*4882a593Smuzhiyun 			raise_exception(common, FSG_STATE_ABORT_BULK_OUT);
1590*4882a593Smuzhiyun 			rc = -EINTR;
1591*4882a593Smuzhiyun #endif
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 		/* We can't stall.  Read in the excess data and throw it
1594*4882a593Smuzhiyun 		 * all away. */
1595*4882a593Smuzhiyun 		} else {
1596*4882a593Smuzhiyun 			rc = throw_away_data(common);
1597*4882a593Smuzhiyun 		}
1598*4882a593Smuzhiyun 		break;
1599*4882a593Smuzhiyun 	}
1600*4882a593Smuzhiyun 	return rc;
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 
send_status(struct fsg_common * common)1604*4882a593Smuzhiyun static int send_status(struct fsg_common *common)
1605*4882a593Smuzhiyun {
1606*4882a593Smuzhiyun 	struct fsg_lun		*curlun = &common->luns[common->lun];
1607*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
1608*4882a593Smuzhiyun 	struct bulk_cs_wrap	*csw;
1609*4882a593Smuzhiyun 	int			rc;
1610*4882a593Smuzhiyun 	u8			status = USB_STATUS_PASS;
1611*4882a593Smuzhiyun 	u32			sd, sdinfo = 0;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 	/* Wait for the next buffer to become available */
1614*4882a593Smuzhiyun 	bh = common->next_buffhd_to_fill;
1615*4882a593Smuzhiyun 	while (bh->state != BUF_STATE_EMPTY) {
1616*4882a593Smuzhiyun 		rc = sleep_thread(common);
1617*4882a593Smuzhiyun 		if (rc)
1618*4882a593Smuzhiyun 			return rc;
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun 
1621*4882a593Smuzhiyun 	if (curlun)
1622*4882a593Smuzhiyun 		sd = curlun->sense_data;
1623*4882a593Smuzhiyun 	else if (common->bad_lun_okay)
1624*4882a593Smuzhiyun 		sd = SS_NO_SENSE;
1625*4882a593Smuzhiyun 	else
1626*4882a593Smuzhiyun 		sd = SS_LOGICAL_UNIT_NOT_SUPPORTED;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	if (common->phase_error) {
1629*4882a593Smuzhiyun 		DBG(common, "sending phase-error status\n");
1630*4882a593Smuzhiyun 		status = USB_STATUS_PHASE_ERROR;
1631*4882a593Smuzhiyun 		sd = SS_INVALID_COMMAND;
1632*4882a593Smuzhiyun 	} else if (sd != SS_NO_SENSE) {
1633*4882a593Smuzhiyun 		DBG(common, "sending command-failure status\n");
1634*4882a593Smuzhiyun 		status = USB_STATUS_FAIL;
1635*4882a593Smuzhiyun 		VDBG(common, "  sense data: SK x%02x, ASC x%02x, ASCQ x%02x;"
1636*4882a593Smuzhiyun 			"  info x%x\n",
1637*4882a593Smuzhiyun 			SK(sd), ASC(sd), ASCQ(sd), sdinfo);
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	/* Store and send the Bulk-only CSW */
1641*4882a593Smuzhiyun 	csw = (void *)bh->buf;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	csw->Signature = cpu_to_le32(USB_BULK_CS_SIG);
1644*4882a593Smuzhiyun 	csw->Tag = common->tag;
1645*4882a593Smuzhiyun 	csw->Residue = cpu_to_le32(common->residue);
1646*4882a593Smuzhiyun 	csw->Status = status;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	bh->inreq->length = USB_BULK_CS_WRAP_LEN;
1649*4882a593Smuzhiyun 	bh->inreq->zero = 0;
1650*4882a593Smuzhiyun 	START_TRANSFER_OR(common, bulk_in, bh->inreq,
1651*4882a593Smuzhiyun 			  &bh->inreq_busy, &bh->state)
1652*4882a593Smuzhiyun 		/* Don't know what to do if common->fsg is NULL */
1653*4882a593Smuzhiyun 		return -EIO;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	common->next_buffhd_to_fill = bh->next;
1656*4882a593Smuzhiyun 	return 0;
1657*4882a593Smuzhiyun }
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
1661*4882a593Smuzhiyun #ifdef CONFIG_CMD_ROCKUSB
1662*4882a593Smuzhiyun #include "f_rockusb.c"
1663*4882a593Smuzhiyun #endif
1664*4882a593Smuzhiyun 
1665*4882a593Smuzhiyun /* Check whether the command is properly formed and whether its data size
1666*4882a593Smuzhiyun  * and direction agree with the values we already have. */
check_command(struct fsg_common * common,int cmnd_size,enum data_direction data_dir,unsigned int mask,int needs_medium,const char * name)1667*4882a593Smuzhiyun static int check_command(struct fsg_common *common, int cmnd_size,
1668*4882a593Smuzhiyun 		enum data_direction data_dir, unsigned int mask,
1669*4882a593Smuzhiyun 		int needs_medium, const char *name)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun 	int			i;
1672*4882a593Smuzhiyun 	int			lun = common->cmnd[1] >> 5;
1673*4882a593Smuzhiyun 	static const char	dirletter[4] = {'u', 'o', 'i', 'n'};
1674*4882a593Smuzhiyun 	char			hdlen[20];
1675*4882a593Smuzhiyun 	struct fsg_lun		*curlun;
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	hdlen[0] = 0;
1678*4882a593Smuzhiyun 	if (common->data_dir != DATA_DIR_UNKNOWN)
1679*4882a593Smuzhiyun 		sprintf(hdlen, ", H%c=%u", dirletter[(int) common->data_dir],
1680*4882a593Smuzhiyun 				common->data_size);
1681*4882a593Smuzhiyun 	VDBG(common, "SCSI command: %s;  Dc=%d, D%c=%u;  Hc=%d%s\n",
1682*4882a593Smuzhiyun 	     name, cmnd_size, dirletter[(int) data_dir],
1683*4882a593Smuzhiyun 	     common->data_size_from_cmnd, common->cmnd_size, hdlen);
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun 	/* We can't reply at all until we know the correct data direction
1686*4882a593Smuzhiyun 	 * and size. */
1687*4882a593Smuzhiyun 	if (common->data_size_from_cmnd == 0)
1688*4882a593Smuzhiyun 		data_dir = DATA_DIR_NONE;
1689*4882a593Smuzhiyun 	if (common->data_size < common->data_size_from_cmnd) {
1690*4882a593Smuzhiyun 		/* Host data size < Device data size is a phase error.
1691*4882a593Smuzhiyun 		 * Carry out the command, but only transfer as much as
1692*4882a593Smuzhiyun 		 * we are allowed. */
1693*4882a593Smuzhiyun 		common->data_size_from_cmnd = common->data_size;
1694*4882a593Smuzhiyun 		common->phase_error = 1;
1695*4882a593Smuzhiyun 	}
1696*4882a593Smuzhiyun 	common->residue = common->data_size;
1697*4882a593Smuzhiyun 	common->usb_amount_left = common->data_size;
1698*4882a593Smuzhiyun 
1699*4882a593Smuzhiyun 	/* Conflicting data directions is a phase error */
1700*4882a593Smuzhiyun 	if (common->data_dir != data_dir
1701*4882a593Smuzhiyun 	 && common->data_size_from_cmnd > 0) {
1702*4882a593Smuzhiyun 		common->phase_error = 1;
1703*4882a593Smuzhiyun 		return -EINVAL;
1704*4882a593Smuzhiyun 	}
1705*4882a593Smuzhiyun 
1706*4882a593Smuzhiyun 	/* Verify the length of the command itself */
1707*4882a593Smuzhiyun 	if (cmnd_size != common->cmnd_size) {
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 		/* Special case workaround: There are plenty of buggy SCSI
1710*4882a593Smuzhiyun 		 * implementations. Many have issues with cbw->Length
1711*4882a593Smuzhiyun 		 * field passing a wrong command size. For those cases we
1712*4882a593Smuzhiyun 		 * always try to work around the problem by using the length
1713*4882a593Smuzhiyun 		 * sent by the host side provided it is at least as large
1714*4882a593Smuzhiyun 		 * as the correct command length.
1715*4882a593Smuzhiyun 		 * Examples of such cases would be MS-Windows, which issues
1716*4882a593Smuzhiyun 		 * REQUEST SENSE with cbw->Length == 12 where it should
1717*4882a593Smuzhiyun 		 * be 6, and xbox360 issuing INQUIRY, TEST UNIT READY and
1718*4882a593Smuzhiyun 		 * REQUEST SENSE with cbw->Length == 10 where it should
1719*4882a593Smuzhiyun 		 * be 6 as well.
1720*4882a593Smuzhiyun 		 */
1721*4882a593Smuzhiyun 		if (cmnd_size <= common->cmnd_size) {
1722*4882a593Smuzhiyun 			DBG(common, "%s is buggy! Expected length %d "
1723*4882a593Smuzhiyun 			    "but we got %d\n", name,
1724*4882a593Smuzhiyun 			    cmnd_size, common->cmnd_size);
1725*4882a593Smuzhiyun 			cmnd_size = common->cmnd_size;
1726*4882a593Smuzhiyun 		} else {
1727*4882a593Smuzhiyun 			common->phase_error = 1;
1728*4882a593Smuzhiyun 			return -EINVAL;
1729*4882a593Smuzhiyun 		}
1730*4882a593Smuzhiyun 	}
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	/* Check that the LUN values are consistent */
1733*4882a593Smuzhiyun 	if (common->lun != lun)
1734*4882a593Smuzhiyun 		DBG(common, "using LUN %d from CBW, not LUN %d from CDB\n",
1735*4882a593Smuzhiyun 		    common->lun, lun);
1736*4882a593Smuzhiyun 
1737*4882a593Smuzhiyun 	/* Check the LUN */
1738*4882a593Smuzhiyun 	if (common->lun < common->nluns) {
1739*4882a593Smuzhiyun 		curlun = &common->luns[common->lun];
1740*4882a593Smuzhiyun 		if (common->cmnd[0] != SC_REQUEST_SENSE) {
1741*4882a593Smuzhiyun 			curlun->sense_data = SS_NO_SENSE;
1742*4882a593Smuzhiyun 			curlun->info_valid = 0;
1743*4882a593Smuzhiyun 		}
1744*4882a593Smuzhiyun 	} else {
1745*4882a593Smuzhiyun 		curlun = NULL;
1746*4882a593Smuzhiyun 		common->bad_lun_okay = 0;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 		/* INQUIRY and REQUEST SENSE commands are explicitly allowed
1749*4882a593Smuzhiyun 		 * to use unsupported LUNs; all others may not. */
1750*4882a593Smuzhiyun 		if (common->cmnd[0] != SC_INQUIRY &&
1751*4882a593Smuzhiyun 		    common->cmnd[0] != SC_REQUEST_SENSE) {
1752*4882a593Smuzhiyun 			DBG(common, "unsupported LUN %d\n", common->lun);
1753*4882a593Smuzhiyun 			return -EINVAL;
1754*4882a593Smuzhiyun 		}
1755*4882a593Smuzhiyun 	}
1756*4882a593Smuzhiyun #if 0
1757*4882a593Smuzhiyun 	/* If a unit attention condition exists, only INQUIRY and
1758*4882a593Smuzhiyun 	 * REQUEST SENSE commands are allowed; anything else must fail. */
1759*4882a593Smuzhiyun 	if (curlun && curlun->unit_attention_data != SS_NO_SENSE &&
1760*4882a593Smuzhiyun 			common->cmnd[0] != SC_INQUIRY &&
1761*4882a593Smuzhiyun 			common->cmnd[0] != SC_REQUEST_SENSE) {
1762*4882a593Smuzhiyun 		curlun->sense_data = curlun->unit_attention_data;
1763*4882a593Smuzhiyun 		curlun->unit_attention_data = SS_NO_SENSE;
1764*4882a593Smuzhiyun 		return -EINVAL;
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun #endif
1767*4882a593Smuzhiyun 	/* Check that only command bytes listed in the mask are non-zero */
1768*4882a593Smuzhiyun 	common->cmnd[1] &= 0x1f;			/* Mask away the LUN */
1769*4882a593Smuzhiyun 	for (i = 1; i < cmnd_size; ++i) {
1770*4882a593Smuzhiyun 		if (common->cmnd[i] && !(mask & (1 << i))) {
1771*4882a593Smuzhiyun 			if (curlun)
1772*4882a593Smuzhiyun 				curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
1773*4882a593Smuzhiyun 			return -EINVAL;
1774*4882a593Smuzhiyun 		}
1775*4882a593Smuzhiyun 	}
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	return 0;
1778*4882a593Smuzhiyun }
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun 
do_scsi_command(struct fsg_common * common)1781*4882a593Smuzhiyun static int do_scsi_command(struct fsg_common *common)
1782*4882a593Smuzhiyun {
1783*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
1784*4882a593Smuzhiyun 	int			rc;
1785*4882a593Smuzhiyun 	int			reply = -EINVAL;
1786*4882a593Smuzhiyun 	int			i;
1787*4882a593Smuzhiyun 	static char		unknown[16];
1788*4882a593Smuzhiyun 	struct fsg_lun		*curlun = &common->luns[common->lun];
1789*4882a593Smuzhiyun 	const char		*cdev_name __maybe_unused;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	dump_cdb(common);
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	/* Wait for the next buffer to become available for data or status */
1794*4882a593Smuzhiyun 	bh = common->next_buffhd_to_fill;
1795*4882a593Smuzhiyun 	common->next_buffhd_to_drain = bh;
1796*4882a593Smuzhiyun 	while (bh->state != BUF_STATE_EMPTY) {
1797*4882a593Smuzhiyun 		rc = sleep_thread(common);
1798*4882a593Smuzhiyun 		if (rc)
1799*4882a593Smuzhiyun 			return rc;
1800*4882a593Smuzhiyun 	}
1801*4882a593Smuzhiyun 	common->phase_error = 0;
1802*4882a593Smuzhiyun 	common->short_packet_received = 0;
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 	down_read(&common->filesem);	/* We're using the backing file */
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	cdev_name = common->fsg->function.config->cdev->driver->name;
1807*4882a593Smuzhiyun 	if (IS_RKUSB_UMS_DNL(cdev_name)) {
1808*4882a593Smuzhiyun 		rc = rkusb_cmd_process(common, bh, &reply);
1809*4882a593Smuzhiyun 		if (rc == RKUSB_RC_FINISHED || rc == RKUSB_RC_ERROR)
1810*4882a593Smuzhiyun 			goto finish;
1811*4882a593Smuzhiyun 		else if (rc == RKUSB_RC_UNKNOWN_CMND)
1812*4882a593Smuzhiyun 			goto unknown_cmnd;
1813*4882a593Smuzhiyun 	}
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	switch (common->cmnd[0]) {
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun 	case SC_INQUIRY:
1818*4882a593Smuzhiyun 		common->data_size_from_cmnd = common->cmnd[4];
1819*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1820*4882a593Smuzhiyun 				      (1<<4), 0,
1821*4882a593Smuzhiyun 				      "INQUIRY");
1822*4882a593Smuzhiyun 		if (reply == 0)
1823*4882a593Smuzhiyun 			reply = do_inquiry(common, bh);
1824*4882a593Smuzhiyun 		break;
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun 	case SC_MODE_SELECT_6:
1827*4882a593Smuzhiyun 		common->data_size_from_cmnd = common->cmnd[4];
1828*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1829*4882a593Smuzhiyun 				      (1<<1) | (1<<4), 0,
1830*4882a593Smuzhiyun 				      "MODE SELECT(6)");
1831*4882a593Smuzhiyun 		if (reply == 0)
1832*4882a593Smuzhiyun 			reply = do_mode_select(common, bh);
1833*4882a593Smuzhiyun 		break;
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun 	case SC_MODE_SELECT_10:
1836*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1837*4882a593Smuzhiyun 			get_unaligned_be16(&common->cmnd[7]);
1838*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_FROM_HOST,
1839*4882a593Smuzhiyun 				      (1<<1) | (3<<7), 0,
1840*4882a593Smuzhiyun 				      "MODE SELECT(10)");
1841*4882a593Smuzhiyun 		if (reply == 0)
1842*4882a593Smuzhiyun 			reply = do_mode_select(common, bh);
1843*4882a593Smuzhiyun 		break;
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	case SC_MODE_SENSE_6:
1846*4882a593Smuzhiyun 		common->data_size_from_cmnd = common->cmnd[4];
1847*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1848*4882a593Smuzhiyun 				      (1<<1) | (1<<2) | (1<<4), 0,
1849*4882a593Smuzhiyun 				      "MODE SENSE(6)");
1850*4882a593Smuzhiyun 		if (reply == 0)
1851*4882a593Smuzhiyun 			reply = do_mode_sense(common, bh);
1852*4882a593Smuzhiyun 		break;
1853*4882a593Smuzhiyun 
1854*4882a593Smuzhiyun 	case SC_MODE_SENSE_10:
1855*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1856*4882a593Smuzhiyun 			get_unaligned_be16(&common->cmnd[7]);
1857*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1858*4882a593Smuzhiyun 				      (1<<1) | (1<<2) | (3<<7), 0,
1859*4882a593Smuzhiyun 				      "MODE SENSE(10)");
1860*4882a593Smuzhiyun 		if (reply == 0)
1861*4882a593Smuzhiyun 			reply = do_mode_sense(common, bh);
1862*4882a593Smuzhiyun 		break;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	case SC_PREVENT_ALLOW_MEDIUM_REMOVAL:
1865*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
1866*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_NONE,
1867*4882a593Smuzhiyun 				      (1<<4), 0,
1868*4882a593Smuzhiyun 				      "PREVENT-ALLOW MEDIUM REMOVAL");
1869*4882a593Smuzhiyun 		if (reply == 0)
1870*4882a593Smuzhiyun 			reply = do_prevent_allow(common);
1871*4882a593Smuzhiyun 		break;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	case SC_READ_6:
1874*4882a593Smuzhiyun 		i = common->cmnd[4];
1875*4882a593Smuzhiyun 		common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1876*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1877*4882a593Smuzhiyun 				      (7<<1) | (1<<4), 1,
1878*4882a593Smuzhiyun 				      "READ(6)");
1879*4882a593Smuzhiyun 		if (reply == 0)
1880*4882a593Smuzhiyun 			reply = do_read(common);
1881*4882a593Smuzhiyun 		break;
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	case SC_READ_10:
1884*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1885*4882a593Smuzhiyun 				get_unaligned_be16(&common->cmnd[7]) << 9;
1886*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1887*4882a593Smuzhiyun 				      (1<<1) | (0xf<<2) | (3<<7), 1,
1888*4882a593Smuzhiyun 				      "READ(10)");
1889*4882a593Smuzhiyun 		if (reply == 0)
1890*4882a593Smuzhiyun 			reply = do_read(common);
1891*4882a593Smuzhiyun 		break;
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 	case SC_READ_12:
1894*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1895*4882a593Smuzhiyun 				get_unaligned_be32(&common->cmnd[6]) << 9;
1896*4882a593Smuzhiyun 		reply = check_command(common, 12, DATA_DIR_TO_HOST,
1897*4882a593Smuzhiyun 				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
1898*4882a593Smuzhiyun 				      "READ(12)");
1899*4882a593Smuzhiyun 		if (reply == 0)
1900*4882a593Smuzhiyun 			reply = do_read(common);
1901*4882a593Smuzhiyun 		break;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 	case SC_READ_CAPACITY:
1904*4882a593Smuzhiyun 		common->data_size_from_cmnd = 8;
1905*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1906*4882a593Smuzhiyun 				      (0xf<<2) | (1<<8), 1,
1907*4882a593Smuzhiyun 				      "READ CAPACITY");
1908*4882a593Smuzhiyun 		if (reply == 0)
1909*4882a593Smuzhiyun 			reply = do_read_capacity(common, bh);
1910*4882a593Smuzhiyun 		break;
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	case SC_READ_HEADER:
1913*4882a593Smuzhiyun 		if (!common->luns[common->lun].cdrom)
1914*4882a593Smuzhiyun 			goto unknown_cmnd;
1915*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1916*4882a593Smuzhiyun 			get_unaligned_be16(&common->cmnd[7]);
1917*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1918*4882a593Smuzhiyun 				      (3<<7) | (0x1f<<1), 1,
1919*4882a593Smuzhiyun 				      "READ HEADER");
1920*4882a593Smuzhiyun 		if (reply == 0)
1921*4882a593Smuzhiyun 			reply = do_read_header(common, bh);
1922*4882a593Smuzhiyun 		break;
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 	case SC_READ_TOC:
1925*4882a593Smuzhiyun 		if (!common->luns[common->lun].cdrom)
1926*4882a593Smuzhiyun 			goto unknown_cmnd;
1927*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1928*4882a593Smuzhiyun 			get_unaligned_be16(&common->cmnd[7]);
1929*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1930*4882a593Smuzhiyun 				      (7<<6) | (1<<1), 1,
1931*4882a593Smuzhiyun 				      "READ TOC");
1932*4882a593Smuzhiyun 		if (reply == 0)
1933*4882a593Smuzhiyun 			reply = do_read_toc(common, bh);
1934*4882a593Smuzhiyun 		break;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	case SC_READ_FORMAT_CAPACITIES:
1937*4882a593Smuzhiyun 		common->data_size_from_cmnd =
1938*4882a593Smuzhiyun 			get_unaligned_be16(&common->cmnd[7]);
1939*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_TO_HOST,
1940*4882a593Smuzhiyun 				      (3<<7), 1,
1941*4882a593Smuzhiyun 				      "READ FORMAT CAPACITIES");
1942*4882a593Smuzhiyun 		if (reply == 0)
1943*4882a593Smuzhiyun 			reply = do_read_format_capacities(common, bh);
1944*4882a593Smuzhiyun 		break;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	case SC_REQUEST_SENSE:
1947*4882a593Smuzhiyun 		common->data_size_from_cmnd = common->cmnd[4];
1948*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_TO_HOST,
1949*4882a593Smuzhiyun 				      (1<<4), 0,
1950*4882a593Smuzhiyun 				      "REQUEST SENSE");
1951*4882a593Smuzhiyun 		if (reply == 0)
1952*4882a593Smuzhiyun 			reply = do_request_sense(common, bh);
1953*4882a593Smuzhiyun 		break;
1954*4882a593Smuzhiyun 
1955*4882a593Smuzhiyun 	case SC_START_STOP_UNIT:
1956*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
1957*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_NONE,
1958*4882a593Smuzhiyun 				      (1<<1) | (1<<4), 0,
1959*4882a593Smuzhiyun 				      "START-STOP UNIT");
1960*4882a593Smuzhiyun 		if (reply == 0)
1961*4882a593Smuzhiyun 			reply = do_start_stop(common);
1962*4882a593Smuzhiyun 		break;
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun 	case SC_SYNCHRONIZE_CACHE:
1965*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
1966*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_NONE,
1967*4882a593Smuzhiyun 				      (0xf<<2) | (3<<7), 1,
1968*4882a593Smuzhiyun 				      "SYNCHRONIZE CACHE");
1969*4882a593Smuzhiyun 		if (reply == 0)
1970*4882a593Smuzhiyun 			reply = do_synchronize_cache(common);
1971*4882a593Smuzhiyun 		break;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	case SC_TEST_UNIT_READY:
1974*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
1975*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_NONE,
1976*4882a593Smuzhiyun 				0, 1,
1977*4882a593Smuzhiyun 				"TEST UNIT READY");
1978*4882a593Smuzhiyun 		break;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	/* Although optional, this command is used by MS-Windows.  We
1981*4882a593Smuzhiyun 	 * support a minimal version: BytChk must be 0. */
1982*4882a593Smuzhiyun 	case SC_VERIFY:
1983*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
1984*4882a593Smuzhiyun 		reply = check_command(common, 10, DATA_DIR_NONE,
1985*4882a593Smuzhiyun 				      (1<<1) | (0xf<<2) | (3<<7), 1,
1986*4882a593Smuzhiyun 				      "VERIFY");
1987*4882a593Smuzhiyun 		if (reply == 0)
1988*4882a593Smuzhiyun 			reply = do_verify(common);
1989*4882a593Smuzhiyun 		break;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	case SC_WRITE_6:
1992*4882a593Smuzhiyun 		i = common->cmnd[4];
1993*4882a593Smuzhiyun 		common->data_size_from_cmnd = (i == 0 ? 256 : i) << 9;
1994*4882a593Smuzhiyun 		reply = check_command(common, 6, DATA_DIR_FROM_HOST,
1995*4882a593Smuzhiyun 				      (7<<1) | (1<<4), 1,
1996*4882a593Smuzhiyun 				      "WRITE(6)");
1997*4882a593Smuzhiyun 		if (reply == 0)
1998*4882a593Smuzhiyun 			reply = do_write(common);
1999*4882a593Smuzhiyun 		break;
2000*4882a593Smuzhiyun 
2001*4882a593Smuzhiyun 	case SC_WRITE_10:
2002*4882a593Smuzhiyun 		common->data_size_from_cmnd =
2003*4882a593Smuzhiyun 				get_unaligned_be16(&common->cmnd[7]) << 9;
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 		if (IS_RKUSB_UMS_DNL(cdev_name)) {
2006*4882a593Smuzhiyun 			reply = check_command(common, common->cmnd_size, DATA_DIR_FROM_HOST,
2007*4882a593Smuzhiyun 					      (1 << 1) | (0xf << 2) | (3 << 7) | (0xf << 9), 1,
2008*4882a593Smuzhiyun 					      "WRITE(10)");
2009*4882a593Smuzhiyun 		} else {
2010*4882a593Smuzhiyun 			reply = check_command(common, 10, DATA_DIR_FROM_HOST,
2011*4882a593Smuzhiyun 					      (1 << 1) | (0xf << 2) | (3 << 7), 1,
2012*4882a593Smuzhiyun 					      "WRITE(10)");
2013*4882a593Smuzhiyun 		}
2014*4882a593Smuzhiyun 
2015*4882a593Smuzhiyun 		if (reply == 0)
2016*4882a593Smuzhiyun 			reply = do_write(common);
2017*4882a593Smuzhiyun 		break;
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun 	case SC_WRITE_12:
2020*4882a593Smuzhiyun 		common->data_size_from_cmnd =
2021*4882a593Smuzhiyun 				get_unaligned_be32(&common->cmnd[6]) << 9;
2022*4882a593Smuzhiyun 		reply = check_command(common, 12, DATA_DIR_FROM_HOST,
2023*4882a593Smuzhiyun 				      (1<<1) | (0xf<<2) | (0xf<<6), 1,
2024*4882a593Smuzhiyun 				      "WRITE(12)");
2025*4882a593Smuzhiyun 		if (reply == 0)
2026*4882a593Smuzhiyun 			reply = do_write(common);
2027*4882a593Smuzhiyun 		break;
2028*4882a593Smuzhiyun 
2029*4882a593Smuzhiyun 	/* Some mandatory commands that we recognize but don't implement.
2030*4882a593Smuzhiyun 	 * They don't mean much in this setting.  It's left as an exercise
2031*4882a593Smuzhiyun 	 * for anyone interested to implement RESERVE and RELEASE in terms
2032*4882a593Smuzhiyun 	 * of Posix locks. */
2033*4882a593Smuzhiyun 	case SC_FORMAT_UNIT:
2034*4882a593Smuzhiyun 	case SC_RELEASE:
2035*4882a593Smuzhiyun 	case SC_RESERVE:
2036*4882a593Smuzhiyun 	case SC_SEND_DIAGNOSTIC:
2037*4882a593Smuzhiyun 		/* Fall through */
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	default:
2040*4882a593Smuzhiyun unknown_cmnd:
2041*4882a593Smuzhiyun 		common->data_size_from_cmnd = 0;
2042*4882a593Smuzhiyun 		sprintf(unknown, "Unknown x%02x", common->cmnd[0]);
2043*4882a593Smuzhiyun 		reply = check_command(common, common->cmnd_size,
2044*4882a593Smuzhiyun 				      DATA_DIR_UNKNOWN, 0xff, 0, unknown);
2045*4882a593Smuzhiyun 		if (reply == 0) {
2046*4882a593Smuzhiyun 			curlun->sense_data = SS_INVALID_COMMAND;
2047*4882a593Smuzhiyun 			reply = -EINVAL;
2048*4882a593Smuzhiyun 		}
2049*4882a593Smuzhiyun 		break;
2050*4882a593Smuzhiyun 	}
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun finish:
2053*4882a593Smuzhiyun 	up_read(&common->filesem);
2054*4882a593Smuzhiyun 
2055*4882a593Smuzhiyun 	if (reply == -EINTR)
2056*4882a593Smuzhiyun 		return -EINTR;
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	/* Set up the single reply buffer for finish_reply() */
2059*4882a593Smuzhiyun 	if (reply == -EINVAL)
2060*4882a593Smuzhiyun 		reply = 0;		/* Error reply length */
2061*4882a593Smuzhiyun 	if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) {
2062*4882a593Smuzhiyun 		reply = min((u32) reply, common->data_size_from_cmnd);
2063*4882a593Smuzhiyun 		bh->inreq->length = reply;
2064*4882a593Smuzhiyun 		bh->state = BUF_STATE_FULL;
2065*4882a593Smuzhiyun 		common->residue -= reply;
2066*4882a593Smuzhiyun 	}				/* Otherwise it's already set */
2067*4882a593Smuzhiyun 
2068*4882a593Smuzhiyun 	return 0;
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun 
2071*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2072*4882a593Smuzhiyun 
received_cbw(struct fsg_dev * fsg,struct fsg_buffhd * bh)2073*4882a593Smuzhiyun static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
2074*4882a593Smuzhiyun {
2075*4882a593Smuzhiyun 	struct usb_request	*req = bh->outreq;
2076*4882a593Smuzhiyun 	struct fsg_bulk_cb_wrap	*cbw = req->buf;
2077*4882a593Smuzhiyun 	struct fsg_common	*common = fsg->common;
2078*4882a593Smuzhiyun 
2079*4882a593Smuzhiyun 	/* Was this a real packet?  Should it be ignored? */
2080*4882a593Smuzhiyun 	if (req->status || test_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags))
2081*4882a593Smuzhiyun 		return -EINVAL;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	/* Is the CBW valid? */
2084*4882a593Smuzhiyun 	if (req->actual != USB_BULK_CB_WRAP_LEN ||
2085*4882a593Smuzhiyun 			cbw->Signature != cpu_to_le32(
2086*4882a593Smuzhiyun 				USB_BULK_CB_SIG)) {
2087*4882a593Smuzhiyun 		DBG(fsg, "invalid CBW: len %u sig 0x%x\n",
2088*4882a593Smuzhiyun 				req->actual,
2089*4882a593Smuzhiyun 				le32_to_cpu(cbw->Signature));
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun 		/* The Bulk-only spec says we MUST stall the IN endpoint
2092*4882a593Smuzhiyun 		 * (6.6.1), so it's unavoidable.  It also says we must
2093*4882a593Smuzhiyun 		 * retain this state until the next reset, but there's
2094*4882a593Smuzhiyun 		 * no way to tell the controller driver it should ignore
2095*4882a593Smuzhiyun 		 * Clear-Feature(HALT) requests.
2096*4882a593Smuzhiyun 		 *
2097*4882a593Smuzhiyun 		 * We aren't required to halt the OUT endpoint; instead
2098*4882a593Smuzhiyun 		 * we can simply accept and discard any data received
2099*4882a593Smuzhiyun 		 * until the next reset. */
2100*4882a593Smuzhiyun 		wedge_bulk_in_endpoint(fsg);
2101*4882a593Smuzhiyun 		generic_set_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2102*4882a593Smuzhiyun 		return -EINVAL;
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	/* Is the CBW meaningful? */
2106*4882a593Smuzhiyun 	if (cbw->Lun >= FSG_MAX_LUNS || cbw->Flags & ~USB_BULK_IN_FLAG ||
2107*4882a593Smuzhiyun 			cbw->Length <= 0 || cbw->Length > MAX_COMMAND_SIZE) {
2108*4882a593Smuzhiyun 		DBG(fsg, "non-meaningful CBW: lun = %u, flags = 0x%x, "
2109*4882a593Smuzhiyun 				"cmdlen %u\n",
2110*4882a593Smuzhiyun 				cbw->Lun, cbw->Flags, cbw->Length);
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 		/* We can do anything we want here, so let's stall the
2113*4882a593Smuzhiyun 		 * bulk pipes if we are allowed to. */
2114*4882a593Smuzhiyun 		if (common->can_stall) {
2115*4882a593Smuzhiyun 			fsg_set_halt(fsg, fsg->bulk_out);
2116*4882a593Smuzhiyun 			halt_bulk_in_endpoint(fsg);
2117*4882a593Smuzhiyun 		}
2118*4882a593Smuzhiyun 		return -EINVAL;
2119*4882a593Smuzhiyun 	}
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	/* Save the command for later */
2122*4882a593Smuzhiyun 	common->cmnd_size = cbw->Length;
2123*4882a593Smuzhiyun 	memcpy(common->cmnd, cbw->CDB, common->cmnd_size);
2124*4882a593Smuzhiyun 	if (cbw->Flags & USB_BULK_IN_FLAG)
2125*4882a593Smuzhiyun 		common->data_dir = DATA_DIR_TO_HOST;
2126*4882a593Smuzhiyun 	else
2127*4882a593Smuzhiyun 		common->data_dir = DATA_DIR_FROM_HOST;
2128*4882a593Smuzhiyun 	common->data_size = le32_to_cpu(cbw->DataTransferLength);
2129*4882a593Smuzhiyun 	if (common->data_size == 0)
2130*4882a593Smuzhiyun 		common->data_dir = DATA_DIR_NONE;
2131*4882a593Smuzhiyun 	common->lun = cbw->Lun;
2132*4882a593Smuzhiyun 	common->tag = cbw->Tag;
2133*4882a593Smuzhiyun 	return 0;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
2136*4882a593Smuzhiyun 
get_next_command(struct fsg_common * common)2137*4882a593Smuzhiyun static int get_next_command(struct fsg_common *common)
2138*4882a593Smuzhiyun {
2139*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
2140*4882a593Smuzhiyun 	int			rc = 0;
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	/* Wait for the next buffer to become available */
2143*4882a593Smuzhiyun 	bh = common->next_buffhd_to_fill;
2144*4882a593Smuzhiyun 	while (bh->state != BUF_STATE_EMPTY) {
2145*4882a593Smuzhiyun 		rc = sleep_thread(common);
2146*4882a593Smuzhiyun 		if (rc)
2147*4882a593Smuzhiyun 			return rc;
2148*4882a593Smuzhiyun 	}
2149*4882a593Smuzhiyun 
2150*4882a593Smuzhiyun 	/* Queue a request to read a Bulk-only CBW */
2151*4882a593Smuzhiyun 	set_bulk_out_req_length(common, bh, USB_BULK_CB_WRAP_LEN);
2152*4882a593Smuzhiyun 	bh->outreq->short_not_ok = 1;
2153*4882a593Smuzhiyun 	START_TRANSFER_OR(common, bulk_out, bh->outreq,
2154*4882a593Smuzhiyun 			  &bh->outreq_busy, &bh->state)
2155*4882a593Smuzhiyun 		/* Don't know what to do if common->fsg is NULL */
2156*4882a593Smuzhiyun 		return -EIO;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 	/* We will drain the buffer in software, which means we
2159*4882a593Smuzhiyun 	 * can reuse it for the next filling.  No need to advance
2160*4882a593Smuzhiyun 	 * next_buffhd_to_fill. */
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun 	/* Wait for the CBW to arrive */
2163*4882a593Smuzhiyun 	while (bh->state != BUF_STATE_FULL) {
2164*4882a593Smuzhiyun 		rc = sleep_thread(common);
2165*4882a593Smuzhiyun 		if (rc)
2166*4882a593Smuzhiyun 			return rc;
2167*4882a593Smuzhiyun 	}
2168*4882a593Smuzhiyun 
2169*4882a593Smuzhiyun 	rc = fsg_is_set(common) ? received_cbw(common->fsg, bh) : -EIO;
2170*4882a593Smuzhiyun 	bh->state = BUF_STATE_EMPTY;
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 	return rc;
2173*4882a593Smuzhiyun }
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2177*4882a593Smuzhiyun 
enable_endpoint(struct fsg_common * common,struct usb_ep * ep,const struct usb_endpoint_descriptor * d)2178*4882a593Smuzhiyun static int enable_endpoint(struct fsg_common *common, struct usb_ep *ep,
2179*4882a593Smuzhiyun 		const struct usb_endpoint_descriptor *d)
2180*4882a593Smuzhiyun {
2181*4882a593Smuzhiyun 	int	rc;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	ep->driver_data = common;
2184*4882a593Smuzhiyun 	rc = usb_ep_enable(ep, d);
2185*4882a593Smuzhiyun 	if (rc)
2186*4882a593Smuzhiyun 		ERROR(common, "can't enable %s, result %d\n", ep->name, rc);
2187*4882a593Smuzhiyun 	return rc;
2188*4882a593Smuzhiyun }
2189*4882a593Smuzhiyun 
alloc_request(struct fsg_common * common,struct usb_ep * ep,struct usb_request ** preq)2190*4882a593Smuzhiyun static int alloc_request(struct fsg_common *common, struct usb_ep *ep,
2191*4882a593Smuzhiyun 		struct usb_request **preq)
2192*4882a593Smuzhiyun {
2193*4882a593Smuzhiyun 	*preq = usb_ep_alloc_request(ep, GFP_ATOMIC);
2194*4882a593Smuzhiyun 	if (*preq)
2195*4882a593Smuzhiyun 		return 0;
2196*4882a593Smuzhiyun 	ERROR(common, "can't allocate request for %s\n", ep->name);
2197*4882a593Smuzhiyun 	return -ENOMEM;
2198*4882a593Smuzhiyun }
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun /* Reset interface setting and re-init endpoint state (toggle etc). */
do_set_interface(struct fsg_common * common,struct fsg_dev * new_fsg)2201*4882a593Smuzhiyun static int do_set_interface(struct fsg_common *common, struct fsg_dev *new_fsg)
2202*4882a593Smuzhiyun {
2203*4882a593Smuzhiyun 	const struct usb_endpoint_descriptor *d;
2204*4882a593Smuzhiyun 	struct fsg_dev *fsg;
2205*4882a593Smuzhiyun 	int i, rc = 0;
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	if (common->running)
2208*4882a593Smuzhiyun 		DBG(common, "reset interface\n");
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun reset:
2211*4882a593Smuzhiyun 	/* Deallocate the requests */
2212*4882a593Smuzhiyun 	if (common->fsg) {
2213*4882a593Smuzhiyun 		fsg = common->fsg;
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2216*4882a593Smuzhiyun 			struct fsg_buffhd *bh = &common->buffhds[i];
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 			if (bh->inreq) {
2219*4882a593Smuzhiyun 				usb_ep_free_request(fsg->bulk_in, bh->inreq);
2220*4882a593Smuzhiyun 				bh->inreq = NULL;
2221*4882a593Smuzhiyun 			}
2222*4882a593Smuzhiyun 			if (bh->outreq) {
2223*4882a593Smuzhiyun 				usb_ep_free_request(fsg->bulk_out, bh->outreq);
2224*4882a593Smuzhiyun 				bh->outreq = NULL;
2225*4882a593Smuzhiyun 			}
2226*4882a593Smuzhiyun 		}
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun 		/* Disable the endpoints */
2229*4882a593Smuzhiyun 		if (fsg->bulk_in_enabled) {
2230*4882a593Smuzhiyun 			usb_ep_disable(fsg->bulk_in);
2231*4882a593Smuzhiyun 			fsg->bulk_in_enabled = 0;
2232*4882a593Smuzhiyun 		}
2233*4882a593Smuzhiyun 		if (fsg->bulk_out_enabled) {
2234*4882a593Smuzhiyun 			usb_ep_disable(fsg->bulk_out);
2235*4882a593Smuzhiyun 			fsg->bulk_out_enabled = 0;
2236*4882a593Smuzhiyun 		}
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 		common->fsg = NULL;
2239*4882a593Smuzhiyun 		/* wake_up(&common->fsg_wait); */
2240*4882a593Smuzhiyun 	}
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	common->running = 0;
2243*4882a593Smuzhiyun 	if (!new_fsg || rc)
2244*4882a593Smuzhiyun 		return rc;
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	common->fsg = new_fsg;
2247*4882a593Smuzhiyun 	fsg = common->fsg;
2248*4882a593Smuzhiyun 
2249*4882a593Smuzhiyun 	/* Enable the endpoints */
2250*4882a593Smuzhiyun 	d = fsg_ep_desc(common->gadget,
2251*4882a593Smuzhiyun 			&fsg_fs_bulk_in_desc, &fsg_hs_bulk_in_desc,
2252*4882a593Smuzhiyun 			&fsg_ss_bulk_in_desc, &fsg_ss_bulk_in_comp_desc,
2253*4882a593Smuzhiyun 			fsg->bulk_in);
2254*4882a593Smuzhiyun 	rc = enable_endpoint(common, fsg->bulk_in, d);
2255*4882a593Smuzhiyun 	if (rc)
2256*4882a593Smuzhiyun 		goto reset;
2257*4882a593Smuzhiyun 	fsg->bulk_in_enabled = 1;
2258*4882a593Smuzhiyun 
2259*4882a593Smuzhiyun 	d = fsg_ep_desc(common->gadget,
2260*4882a593Smuzhiyun 			&fsg_fs_bulk_out_desc, &fsg_hs_bulk_out_desc,
2261*4882a593Smuzhiyun 			&fsg_ss_bulk_out_desc, &fsg_ss_bulk_out_comp_desc,
2262*4882a593Smuzhiyun 			fsg->bulk_out);
2263*4882a593Smuzhiyun 	rc = enable_endpoint(common, fsg->bulk_out, d);
2264*4882a593Smuzhiyun 	if (rc)
2265*4882a593Smuzhiyun 		goto reset;
2266*4882a593Smuzhiyun 	fsg->bulk_out_enabled = 1;
2267*4882a593Smuzhiyun 	common->bulk_out_maxpacket =
2268*4882a593Smuzhiyun 				le16_to_cpu(get_unaligned(&d->wMaxPacketSize));
2269*4882a593Smuzhiyun 	generic_clear_bit(IGNORE_BULK_OUT, &fsg->atomic_bitflags);
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	/* Allocate the requests */
2272*4882a593Smuzhiyun 	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2273*4882a593Smuzhiyun 		struct fsg_buffhd	*bh = &common->buffhds[i];
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun 		rc = alloc_request(common, fsg->bulk_in, &bh->inreq);
2276*4882a593Smuzhiyun 		if (rc)
2277*4882a593Smuzhiyun 			goto reset;
2278*4882a593Smuzhiyun 		rc = alloc_request(common, fsg->bulk_out, &bh->outreq);
2279*4882a593Smuzhiyun 		if (rc)
2280*4882a593Smuzhiyun 			goto reset;
2281*4882a593Smuzhiyun 		bh->inreq->buf = bh->outreq->buf = bh->buf;
2282*4882a593Smuzhiyun 		bh->inreq->context = bh->outreq->context = bh;
2283*4882a593Smuzhiyun 		bh->inreq->complete = bulk_in_complete;
2284*4882a593Smuzhiyun 		bh->outreq->complete = bulk_out_complete;
2285*4882a593Smuzhiyun 	}
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	common->running = 1;
2288*4882a593Smuzhiyun 
2289*4882a593Smuzhiyun 	return rc;
2290*4882a593Smuzhiyun }
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun /****************************** ALT CONFIGS ******************************/
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 
fsg_set_alt(struct usb_function * f,unsigned intf,unsigned alt)2296*4882a593Smuzhiyun static int fsg_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun 	struct fsg_dev *fsg = fsg_from_func(f);
2299*4882a593Smuzhiyun 	fsg->common->new_fsg = fsg;
2300*4882a593Smuzhiyun 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2301*4882a593Smuzhiyun 	return 0;
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun 
fsg_disable(struct usb_function * f)2304*4882a593Smuzhiyun static void fsg_disable(struct usb_function *f)
2305*4882a593Smuzhiyun {
2306*4882a593Smuzhiyun 	struct fsg_dev *fsg = fsg_from_func(f);
2307*4882a593Smuzhiyun 	fsg->common->new_fsg = NULL;
2308*4882a593Smuzhiyun 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2312*4882a593Smuzhiyun 
handle_exception(struct fsg_common * common)2313*4882a593Smuzhiyun static void handle_exception(struct fsg_common *common)
2314*4882a593Smuzhiyun {
2315*4882a593Smuzhiyun 	int			i;
2316*4882a593Smuzhiyun 	struct fsg_buffhd	*bh;
2317*4882a593Smuzhiyun 	enum fsg_state		old_state;
2318*4882a593Smuzhiyun 	struct fsg_lun		*curlun;
2319*4882a593Smuzhiyun 	unsigned int		exception_req_tag;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	/* Cancel all the pending transfers */
2322*4882a593Smuzhiyun 	if (common->fsg) {
2323*4882a593Smuzhiyun 		for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2324*4882a593Smuzhiyun 			bh = &common->buffhds[i];
2325*4882a593Smuzhiyun 			if (bh->inreq_busy)
2326*4882a593Smuzhiyun 				usb_ep_dequeue(common->fsg->bulk_in, bh->inreq);
2327*4882a593Smuzhiyun 			if (bh->outreq_busy)
2328*4882a593Smuzhiyun 				usb_ep_dequeue(common->fsg->bulk_out,
2329*4882a593Smuzhiyun 					       bh->outreq);
2330*4882a593Smuzhiyun 		}
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 		/* Wait until everything is idle */
2333*4882a593Smuzhiyun 		for (;;) {
2334*4882a593Smuzhiyun 			int num_active = 0;
2335*4882a593Smuzhiyun 			for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2336*4882a593Smuzhiyun 				bh = &common->buffhds[i];
2337*4882a593Smuzhiyun 				num_active += bh->inreq_busy + bh->outreq_busy;
2338*4882a593Smuzhiyun 			}
2339*4882a593Smuzhiyun 			if (num_active == 0)
2340*4882a593Smuzhiyun 				break;
2341*4882a593Smuzhiyun 			if (sleep_thread(common))
2342*4882a593Smuzhiyun 				return;
2343*4882a593Smuzhiyun 		}
2344*4882a593Smuzhiyun 
2345*4882a593Smuzhiyun 		/* Clear out the controller's fifos */
2346*4882a593Smuzhiyun 		if (common->fsg->bulk_in_enabled)
2347*4882a593Smuzhiyun 			usb_ep_fifo_flush(common->fsg->bulk_in);
2348*4882a593Smuzhiyun 		if (common->fsg->bulk_out_enabled)
2349*4882a593Smuzhiyun 			usb_ep_fifo_flush(common->fsg->bulk_out);
2350*4882a593Smuzhiyun 	}
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	/* Reset the I/O buffer states and pointers, the SCSI
2353*4882a593Smuzhiyun 	 * state, and the exception.  Then invoke the handler. */
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	for (i = 0; i < FSG_NUM_BUFFERS; ++i) {
2356*4882a593Smuzhiyun 		bh = &common->buffhds[i];
2357*4882a593Smuzhiyun 		bh->state = BUF_STATE_EMPTY;
2358*4882a593Smuzhiyun 	}
2359*4882a593Smuzhiyun 	common->next_buffhd_to_fill = &common->buffhds[0];
2360*4882a593Smuzhiyun 	common->next_buffhd_to_drain = &common->buffhds[0];
2361*4882a593Smuzhiyun 	exception_req_tag = common->exception_req_tag;
2362*4882a593Smuzhiyun 	old_state = common->state;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	if (old_state == FSG_STATE_ABORT_BULK_OUT)
2365*4882a593Smuzhiyun 		common->state = FSG_STATE_STATUS_PHASE;
2366*4882a593Smuzhiyun 	else {
2367*4882a593Smuzhiyun 		for (i = 0; i < common->nluns; ++i) {
2368*4882a593Smuzhiyun 			curlun = &common->luns[i];
2369*4882a593Smuzhiyun 			curlun->sense_data = SS_NO_SENSE;
2370*4882a593Smuzhiyun 			curlun->info_valid = 0;
2371*4882a593Smuzhiyun 		}
2372*4882a593Smuzhiyun 		common->state = FSG_STATE_IDLE;
2373*4882a593Smuzhiyun 	}
2374*4882a593Smuzhiyun 
2375*4882a593Smuzhiyun 	/* Carry out any extra actions required for the exception */
2376*4882a593Smuzhiyun 	switch (old_state) {
2377*4882a593Smuzhiyun 	case FSG_STATE_ABORT_BULK_OUT:
2378*4882a593Smuzhiyun 		send_status(common);
2379*4882a593Smuzhiyun 
2380*4882a593Smuzhiyun 		if (common->state == FSG_STATE_STATUS_PHASE)
2381*4882a593Smuzhiyun 			common->state = FSG_STATE_IDLE;
2382*4882a593Smuzhiyun 		break;
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun 	case FSG_STATE_RESET:
2385*4882a593Smuzhiyun 		/* In case we were forced against our will to halt a
2386*4882a593Smuzhiyun 		 * bulk endpoint, clear the halt now.  (The SuperH UDC
2387*4882a593Smuzhiyun 		 * requires this.) */
2388*4882a593Smuzhiyun 		if (!fsg_is_set(common))
2389*4882a593Smuzhiyun 			break;
2390*4882a593Smuzhiyun 		if (test_and_clear_bit(IGNORE_BULK_OUT,
2391*4882a593Smuzhiyun 				       &common->fsg->atomic_bitflags))
2392*4882a593Smuzhiyun 			usb_ep_clear_halt(common->fsg->bulk_in);
2393*4882a593Smuzhiyun 
2394*4882a593Smuzhiyun 		if (common->ep0_req_tag == exception_req_tag)
2395*4882a593Smuzhiyun 			ep0_queue(common);	/* Complete the status stage */
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun 		break;
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	case FSG_STATE_CONFIG_CHANGE:
2400*4882a593Smuzhiyun 		do_set_interface(common, common->new_fsg);
2401*4882a593Smuzhiyun 		break;
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	case FSG_STATE_EXIT:
2404*4882a593Smuzhiyun 	case FSG_STATE_TERMINATED:
2405*4882a593Smuzhiyun 		do_set_interface(common, NULL);		/* Free resources */
2406*4882a593Smuzhiyun 		common->state = FSG_STATE_TERMINATED;	/* Stop the thread */
2407*4882a593Smuzhiyun 		break;
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	case FSG_STATE_INTERFACE_CHANGE:
2410*4882a593Smuzhiyun 	case FSG_STATE_DISCONNECT:
2411*4882a593Smuzhiyun 	case FSG_STATE_COMMAND_PHASE:
2412*4882a593Smuzhiyun 	case FSG_STATE_DATA_PHASE:
2413*4882a593Smuzhiyun 	case FSG_STATE_STATUS_PHASE:
2414*4882a593Smuzhiyun 	case FSG_STATE_IDLE:
2415*4882a593Smuzhiyun 		break;
2416*4882a593Smuzhiyun 	}
2417*4882a593Smuzhiyun }
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2420*4882a593Smuzhiyun 
fsg_main_thread(void * common_)2421*4882a593Smuzhiyun int fsg_main_thread(void *common_)
2422*4882a593Smuzhiyun {
2423*4882a593Smuzhiyun 	int ret;
2424*4882a593Smuzhiyun 	struct fsg_common	*common = the_fsg_common;
2425*4882a593Smuzhiyun 	/* The main loop */
2426*4882a593Smuzhiyun 	do {
2427*4882a593Smuzhiyun 		if (exception_in_progress(common)) {
2428*4882a593Smuzhiyun 			handle_exception(common);
2429*4882a593Smuzhiyun 			continue;
2430*4882a593Smuzhiyun 		}
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 		if (!common->running) {
2433*4882a593Smuzhiyun 			ret = sleep_thread(common);
2434*4882a593Smuzhiyun 			if (ret)
2435*4882a593Smuzhiyun 				return ret;
2436*4882a593Smuzhiyun 
2437*4882a593Smuzhiyun 			continue;
2438*4882a593Smuzhiyun 		}
2439*4882a593Smuzhiyun 
2440*4882a593Smuzhiyun 		ret = get_next_command(common);
2441*4882a593Smuzhiyun 		if (ret)
2442*4882a593Smuzhiyun 			return ret;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 		if (!exception_in_progress(common))
2445*4882a593Smuzhiyun 			common->state = FSG_STATE_DATA_PHASE;
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 		if (do_scsi_command(common) || finish_reply(common))
2448*4882a593Smuzhiyun 			continue;
2449*4882a593Smuzhiyun 
2450*4882a593Smuzhiyun 		if (!exception_in_progress(common))
2451*4882a593Smuzhiyun 			common->state = FSG_STATE_STATUS_PHASE;
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 		if (send_status(common))
2454*4882a593Smuzhiyun 			continue;
2455*4882a593Smuzhiyun 
2456*4882a593Smuzhiyun 		if (!exception_in_progress(common))
2457*4882a593Smuzhiyun 			common->state = FSG_STATE_IDLE;
2458*4882a593Smuzhiyun 	} while (0);
2459*4882a593Smuzhiyun 
2460*4882a593Smuzhiyun 	common->thread_task = NULL;
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun 	return 0;
2463*4882a593Smuzhiyun }
2464*4882a593Smuzhiyun 
2465*4882a593Smuzhiyun static void fsg_common_release(struct kref *ref);
2466*4882a593Smuzhiyun 
fsg_common_init(struct fsg_common * common,struct usb_composite_dev * cdev)2467*4882a593Smuzhiyun static struct fsg_common *fsg_common_init(struct fsg_common *common,
2468*4882a593Smuzhiyun 					  struct usb_composite_dev *cdev)
2469*4882a593Smuzhiyun {
2470*4882a593Smuzhiyun 	struct usb_gadget *gadget = cdev->gadget;
2471*4882a593Smuzhiyun 	struct fsg_buffhd *bh;
2472*4882a593Smuzhiyun 	struct fsg_lun *curlun;
2473*4882a593Smuzhiyun 	int nluns, i, rc;
2474*4882a593Smuzhiyun 
2475*4882a593Smuzhiyun 	/* Find out how many LUNs there should be */
2476*4882a593Smuzhiyun 	nluns = ums_count;
2477*4882a593Smuzhiyun 	if (nluns < 1 || nluns > FSG_MAX_LUNS) {
2478*4882a593Smuzhiyun 		printf("invalid number of LUNs: %u\n", nluns);
2479*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
2480*4882a593Smuzhiyun 	}
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	/* Allocate? */
2483*4882a593Smuzhiyun 	if (!common) {
2484*4882a593Smuzhiyun 		common = calloc(sizeof(*common), 1);
2485*4882a593Smuzhiyun 		if (!common)
2486*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
2487*4882a593Smuzhiyun 		common->free_storage_on_release = 1;
2488*4882a593Smuzhiyun 	} else {
2489*4882a593Smuzhiyun 		memset(common, 0, sizeof(*common));
2490*4882a593Smuzhiyun 		common->free_storage_on_release = 0;
2491*4882a593Smuzhiyun 	}
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	common->ops = NULL;
2494*4882a593Smuzhiyun 	common->private_data = NULL;
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 	common->gadget = gadget;
2497*4882a593Smuzhiyun 	common->ep0 = gadget->ep0;
2498*4882a593Smuzhiyun 	common->ep0req = cdev->req;
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun 	/* Maybe allocate device-global string IDs, and patch descriptors */
2501*4882a593Smuzhiyun 	if (fsg_strings[FSG_STRING_INTERFACE].id == 0) {
2502*4882a593Smuzhiyun 		rc = usb_string_id(cdev);
2503*4882a593Smuzhiyun 		if (unlikely(rc < 0))
2504*4882a593Smuzhiyun 			goto error_release;
2505*4882a593Smuzhiyun 		fsg_strings[FSG_STRING_INTERFACE].id = rc;
2506*4882a593Smuzhiyun 		fsg_intf_desc.iInterface = rc;
2507*4882a593Smuzhiyun 	}
2508*4882a593Smuzhiyun 
2509*4882a593Smuzhiyun 	/* Create the LUNs, open their backing files, and register the
2510*4882a593Smuzhiyun 	 * LUN devices in sysfs. */
2511*4882a593Smuzhiyun 	curlun = calloc(nluns, sizeof *curlun);
2512*4882a593Smuzhiyun 	if (!curlun) {
2513*4882a593Smuzhiyun 		rc = -ENOMEM;
2514*4882a593Smuzhiyun 		goto error_release;
2515*4882a593Smuzhiyun 	}
2516*4882a593Smuzhiyun 	common->nluns = nluns;
2517*4882a593Smuzhiyun 
2518*4882a593Smuzhiyun 	for (i = 0; i < nluns; i++) {
2519*4882a593Smuzhiyun 		common->luns[i].removable = 1;
2520*4882a593Smuzhiyun 
2521*4882a593Smuzhiyun 		rc = fsg_lun_open(&common->luns[i], ums[i].num_sectors, "");
2522*4882a593Smuzhiyun 		if (rc)
2523*4882a593Smuzhiyun 			goto error_luns;
2524*4882a593Smuzhiyun 	}
2525*4882a593Smuzhiyun 	common->lun = 0;
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	/* Data buffers cyclic list */
2528*4882a593Smuzhiyun 	bh = common->buffhds;
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	i = FSG_NUM_BUFFERS;
2531*4882a593Smuzhiyun 	goto buffhds_first_it;
2532*4882a593Smuzhiyun 	do {
2533*4882a593Smuzhiyun 		bh->next = bh + 1;
2534*4882a593Smuzhiyun 		++bh;
2535*4882a593Smuzhiyun buffhds_first_it:
2536*4882a593Smuzhiyun 		bh->inreq_busy = 0;
2537*4882a593Smuzhiyun 		bh->outreq_busy = 0;
2538*4882a593Smuzhiyun 		bh->buf = memalign(CONFIG_SYS_CACHELINE_SIZE, FSG_BUFLEN);
2539*4882a593Smuzhiyun 		if (unlikely(!bh->buf)) {
2540*4882a593Smuzhiyun 			rc = -ENOMEM;
2541*4882a593Smuzhiyun 			goto error_release;
2542*4882a593Smuzhiyun 		}
2543*4882a593Smuzhiyun 	} while (--i);
2544*4882a593Smuzhiyun 	bh->next = common->buffhds;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	snprintf(common->inquiry_string, sizeof common->inquiry_string,
2547*4882a593Smuzhiyun 		 "%-8s%-16s%04x",
2548*4882a593Smuzhiyun 		 "Linux   ",
2549*4882a593Smuzhiyun 		 "File-Store Gadget",
2550*4882a593Smuzhiyun 		 0xffff);
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	/* Some peripheral controllers are known not to be able to
2553*4882a593Smuzhiyun 	 * halt bulk endpoints correctly.  If one of them is present,
2554*4882a593Smuzhiyun 	 * disable stalls.
2555*4882a593Smuzhiyun 	 */
2556*4882a593Smuzhiyun 
2557*4882a593Smuzhiyun 	/* Tell the thread to start working */
2558*4882a593Smuzhiyun 	common->thread_task =
2559*4882a593Smuzhiyun 		kthread_create(fsg_main_thread, common,
2560*4882a593Smuzhiyun 			       OR(cfg->thread_name, "file-storage"));
2561*4882a593Smuzhiyun 	if (IS_ERR(common->thread_task)) {
2562*4882a593Smuzhiyun 		rc = PTR_ERR(common->thread_task);
2563*4882a593Smuzhiyun 		goto error_release;
2564*4882a593Smuzhiyun 	}
2565*4882a593Smuzhiyun 
2566*4882a593Smuzhiyun #undef OR
2567*4882a593Smuzhiyun 	/* Information */
2568*4882a593Smuzhiyun 	INFO(common, FSG_DRIVER_DESC ", version: " FSG_DRIVER_VERSION "\n");
2569*4882a593Smuzhiyun 	INFO(common, "Number of LUNs=%d\n", common->nluns);
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	return common;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun error_luns:
2574*4882a593Smuzhiyun 	common->nluns = i + 1;
2575*4882a593Smuzhiyun error_release:
2576*4882a593Smuzhiyun 	common->state = FSG_STATE_TERMINATED;	/* The thread is dead */
2577*4882a593Smuzhiyun 	/* Call fsg_common_release() directly, ref might be not
2578*4882a593Smuzhiyun 	 * initialised */
2579*4882a593Smuzhiyun 	fsg_common_release(&common->ref);
2580*4882a593Smuzhiyun 	return ERR_PTR(rc);
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun 
fsg_common_release(struct kref * ref)2583*4882a593Smuzhiyun static void fsg_common_release(struct kref *ref)
2584*4882a593Smuzhiyun {
2585*4882a593Smuzhiyun 	struct fsg_common *common = container_of(ref, struct fsg_common, ref);
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	/* If the thread isn't already dead, tell it to exit now */
2588*4882a593Smuzhiyun 	if (common->state != FSG_STATE_TERMINATED) {
2589*4882a593Smuzhiyun 		raise_exception(common, FSG_STATE_EXIT);
2590*4882a593Smuzhiyun 		wait_for_completion(&common->thread_notifier);
2591*4882a593Smuzhiyun 	}
2592*4882a593Smuzhiyun 
2593*4882a593Smuzhiyun 	if (likely(common->luns)) {
2594*4882a593Smuzhiyun 		struct fsg_lun *lun = common->luns;
2595*4882a593Smuzhiyun 		unsigned i = common->nluns;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 		/* In error recovery common->nluns may be zero. */
2598*4882a593Smuzhiyun 		for (; i; --i, ++lun)
2599*4882a593Smuzhiyun 			fsg_lun_close(lun);
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun 		kfree(common->luns);
2602*4882a593Smuzhiyun 	}
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 	{
2605*4882a593Smuzhiyun 		struct fsg_buffhd *bh = common->buffhds;
2606*4882a593Smuzhiyun 		unsigned i = FSG_NUM_BUFFERS;
2607*4882a593Smuzhiyun 		do {
2608*4882a593Smuzhiyun 			kfree(bh->buf);
2609*4882a593Smuzhiyun 		} while (++bh, --i);
2610*4882a593Smuzhiyun 	}
2611*4882a593Smuzhiyun 
2612*4882a593Smuzhiyun 	if (common->free_storage_on_release)
2613*4882a593Smuzhiyun 		kfree(common);
2614*4882a593Smuzhiyun }
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun /**
2620*4882a593Smuzhiyun  * usb_copy_descriptors - copy a vector of USB descriptors
2621*4882a593Smuzhiyun  * @src: null-terminated vector to copy
2622*4882a593Smuzhiyun  * Context: initialization code, which may sleep
2623*4882a593Smuzhiyun  *
2624*4882a593Smuzhiyun  * This makes a copy of a vector of USB descriptors.  Its primary use
2625*4882a593Smuzhiyun  * is to support usb_function objects which can have multiple copies,
2626*4882a593Smuzhiyun  * each needing different descriptors.  Functions may have static
2627*4882a593Smuzhiyun  * tables of descriptors, which are used as templates and customized
2628*4882a593Smuzhiyun  * with identifiers (for interfaces, strings, endpoints, and more)
2629*4882a593Smuzhiyun  * as needed by a given function instance.
2630*4882a593Smuzhiyun  */
2631*4882a593Smuzhiyun struct usb_descriptor_header **
usb_copy_descriptors(struct usb_descriptor_header ** src)2632*4882a593Smuzhiyun usb_copy_descriptors(struct usb_descriptor_header **src)
2633*4882a593Smuzhiyun {
2634*4882a593Smuzhiyun 	struct usb_descriptor_header **tmp;
2635*4882a593Smuzhiyun 	unsigned bytes;
2636*4882a593Smuzhiyun 	unsigned n_desc;
2637*4882a593Smuzhiyun 	void *mem;
2638*4882a593Smuzhiyun 	struct usb_descriptor_header **ret;
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 	/* count descriptors and their sizes; then add vector size */
2641*4882a593Smuzhiyun 	for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++)
2642*4882a593Smuzhiyun 		bytes += (*tmp)->bLength;
2643*4882a593Smuzhiyun 	bytes += (n_desc + 1) * sizeof(*tmp);
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 	mem = memalign(CONFIG_SYS_CACHELINE_SIZE, bytes);
2646*4882a593Smuzhiyun 	if (!mem)
2647*4882a593Smuzhiyun 		return NULL;
2648*4882a593Smuzhiyun 
2649*4882a593Smuzhiyun 	/* fill in pointers starting at "tmp",
2650*4882a593Smuzhiyun 	 * to descriptors copied starting at "mem";
2651*4882a593Smuzhiyun 	 * and return "ret"
2652*4882a593Smuzhiyun 	 */
2653*4882a593Smuzhiyun 	tmp = mem;
2654*4882a593Smuzhiyun 	ret = mem;
2655*4882a593Smuzhiyun 	mem += (n_desc + 1) * sizeof(*tmp);
2656*4882a593Smuzhiyun 	while (*src) {
2657*4882a593Smuzhiyun 		memcpy(mem, *src, (*src)->bLength);
2658*4882a593Smuzhiyun 		*tmp = mem;
2659*4882a593Smuzhiyun 		tmp++;
2660*4882a593Smuzhiyun 		mem += (*src)->bLength;
2661*4882a593Smuzhiyun 		src++;
2662*4882a593Smuzhiyun 	}
2663*4882a593Smuzhiyun 	*tmp = NULL;
2664*4882a593Smuzhiyun 
2665*4882a593Smuzhiyun 	return ret;
2666*4882a593Smuzhiyun }
2667*4882a593Smuzhiyun 
fsg_unbind(struct usb_configuration * c,struct usb_function * f)2668*4882a593Smuzhiyun static void fsg_unbind(struct usb_configuration *c, struct usb_function *f)
2669*4882a593Smuzhiyun {
2670*4882a593Smuzhiyun 	struct fsg_dev		*fsg = fsg_from_func(f);
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	DBG(fsg, "unbind\n");
2673*4882a593Smuzhiyun 	if (fsg->common->fsg == fsg) {
2674*4882a593Smuzhiyun 		fsg->common->new_fsg = NULL;
2675*4882a593Smuzhiyun 		raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
2676*4882a593Smuzhiyun 	}
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	free(fsg->function.descriptors);
2679*4882a593Smuzhiyun 	free(fsg->function.hs_descriptors);
2680*4882a593Smuzhiyun 	kfree(fsg);
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun 
fsg_bind(struct usb_configuration * c,struct usb_function * f)2683*4882a593Smuzhiyun static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
2684*4882a593Smuzhiyun {
2685*4882a593Smuzhiyun 	struct fsg_dev		*fsg = fsg_from_func(f);
2686*4882a593Smuzhiyun 	struct usb_gadget	*gadget = c->cdev->gadget;
2687*4882a593Smuzhiyun 	int			i;
2688*4882a593Smuzhiyun 	struct usb_ep		*ep;
2689*4882a593Smuzhiyun 	fsg->gadget = gadget;
2690*4882a593Smuzhiyun 
2691*4882a593Smuzhiyun 	/* New interface */
2692*4882a593Smuzhiyun 	i = usb_interface_id(c, f);
2693*4882a593Smuzhiyun 	if (i < 0)
2694*4882a593Smuzhiyun 		return i;
2695*4882a593Smuzhiyun 	fsg_intf_desc.bInterfaceNumber = i;
2696*4882a593Smuzhiyun 	fsg->interface_number = i;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	/* Find all the endpoints we will use */
2699*4882a593Smuzhiyun 	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_in_desc);
2700*4882a593Smuzhiyun 	if (!ep)
2701*4882a593Smuzhiyun 		goto autoconf_fail;
2702*4882a593Smuzhiyun 	ep->driver_data = fsg->common;	/* claim the endpoint */
2703*4882a593Smuzhiyun 	fsg->bulk_in = ep;
2704*4882a593Smuzhiyun 
2705*4882a593Smuzhiyun 	ep = usb_ep_autoconfig(gadget, &fsg_fs_bulk_out_desc);
2706*4882a593Smuzhiyun 	if (!ep)
2707*4882a593Smuzhiyun 		goto autoconf_fail;
2708*4882a593Smuzhiyun 	ep->driver_data = fsg->common;	/* claim the endpoint */
2709*4882a593Smuzhiyun 	fsg->bulk_out = ep;
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun 	/* Copy descriptors */
2712*4882a593Smuzhiyun 	if (IS_RKUSB_UMS_DNL(c->cdev->driver->name))
2713*4882a593Smuzhiyun 		f->descriptors = usb_copy_descriptors(rkusb_fs_function);
2714*4882a593Smuzhiyun 	else
2715*4882a593Smuzhiyun 		f->descriptors = usb_copy_descriptors(fsg_fs_function);
2716*4882a593Smuzhiyun 	if (unlikely(!f->descriptors))
2717*4882a593Smuzhiyun 		return -ENOMEM;
2718*4882a593Smuzhiyun 
2719*4882a593Smuzhiyun 	if (gadget_is_dualspeed(gadget)) {
2720*4882a593Smuzhiyun 		/* Assume endpoint addresses are the same for both speeds */
2721*4882a593Smuzhiyun 		fsg_hs_bulk_in_desc.bEndpointAddress =
2722*4882a593Smuzhiyun 			fsg_fs_bulk_in_desc.bEndpointAddress;
2723*4882a593Smuzhiyun 		fsg_hs_bulk_out_desc.bEndpointAddress =
2724*4882a593Smuzhiyun 			fsg_fs_bulk_out_desc.bEndpointAddress;
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 		if (IS_RKUSB_UMS_DNL(c->cdev->driver->name))
2727*4882a593Smuzhiyun 			f->hs_descriptors =
2728*4882a593Smuzhiyun 				usb_copy_descriptors(rkusb_hs_function);
2729*4882a593Smuzhiyun 		else
2730*4882a593Smuzhiyun 			f->hs_descriptors =
2731*4882a593Smuzhiyun 				usb_copy_descriptors(fsg_hs_function);
2732*4882a593Smuzhiyun 		if (unlikely(!f->hs_descriptors)) {
2733*4882a593Smuzhiyun 			free(f->descriptors);
2734*4882a593Smuzhiyun 			return -ENOMEM;
2735*4882a593Smuzhiyun 		}
2736*4882a593Smuzhiyun 	}
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	if (gadget_is_superspeed(gadget)) {
2739*4882a593Smuzhiyun 		/* Assume endpoint addresses are the same as full speed */
2740*4882a593Smuzhiyun 		fsg_ss_bulk_in_desc.bEndpointAddress =
2741*4882a593Smuzhiyun 			fsg_fs_bulk_in_desc.bEndpointAddress;
2742*4882a593Smuzhiyun 		fsg_ss_bulk_out_desc.bEndpointAddress =
2743*4882a593Smuzhiyun 			fsg_fs_bulk_out_desc.bEndpointAddress;
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun #ifdef CONFIG_CMD_ROCKUSB
2746*4882a593Smuzhiyun 		if (IS_RKUSB_UMS_DNL(c->cdev->driver->name))
2747*4882a593Smuzhiyun 			f->ss_descriptors =
2748*4882a593Smuzhiyun 				usb_copy_descriptors(rkusb_ss_function);
2749*4882a593Smuzhiyun #endif
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 		if (unlikely(!f->ss_descriptors)) {
2752*4882a593Smuzhiyun 			free(f->descriptors);
2753*4882a593Smuzhiyun 			return -ENOMEM;
2754*4882a593Smuzhiyun 		}
2755*4882a593Smuzhiyun 	}
2756*4882a593Smuzhiyun 	return 0;
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun autoconf_fail:
2759*4882a593Smuzhiyun 	ERROR(fsg, "unable to autoconfigure all endpoints\n");
2760*4882a593Smuzhiyun 	return -ENOTSUPP;
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun 
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun /****************************** ADD FUNCTION ******************************/
2765*4882a593Smuzhiyun 
2766*4882a593Smuzhiyun static struct usb_gadget_strings *fsg_strings_array[] = {
2767*4882a593Smuzhiyun 	&fsg_stringtab,
2768*4882a593Smuzhiyun 	NULL,
2769*4882a593Smuzhiyun };
2770*4882a593Smuzhiyun 
fsg_bind_config(struct usb_composite_dev * cdev,struct usb_configuration * c,struct fsg_common * common)2771*4882a593Smuzhiyun static int fsg_bind_config(struct usb_composite_dev *cdev,
2772*4882a593Smuzhiyun 			   struct usb_configuration *c,
2773*4882a593Smuzhiyun 			   struct fsg_common *common)
2774*4882a593Smuzhiyun {
2775*4882a593Smuzhiyun 	struct fsg_dev *fsg;
2776*4882a593Smuzhiyun 	int rc;
2777*4882a593Smuzhiyun 
2778*4882a593Smuzhiyun 	fsg = calloc(1, sizeof *fsg);
2779*4882a593Smuzhiyun 	if (!fsg)
2780*4882a593Smuzhiyun 		return -ENOMEM;
2781*4882a593Smuzhiyun 	fsg->function.name        = FSG_DRIVER_DESC;
2782*4882a593Smuzhiyun 	fsg->function.strings     = fsg_strings_array;
2783*4882a593Smuzhiyun 	fsg->function.bind        = fsg_bind;
2784*4882a593Smuzhiyun 	fsg->function.unbind      = fsg_unbind;
2785*4882a593Smuzhiyun 	fsg->function.setup       = fsg_setup;
2786*4882a593Smuzhiyun 	fsg->function.set_alt     = fsg_set_alt;
2787*4882a593Smuzhiyun 	fsg->function.disable     = fsg_disable;
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun 	fsg->common               = common;
2790*4882a593Smuzhiyun 	common->fsg               = fsg;
2791*4882a593Smuzhiyun 	/* Our caller holds a reference to common structure so we
2792*4882a593Smuzhiyun 	 * don't have to be worry about it being freed until we return
2793*4882a593Smuzhiyun 	 * from this function.  So instead of incrementing counter now
2794*4882a593Smuzhiyun 	 * and decrement in error recovery we increment it only when
2795*4882a593Smuzhiyun 	 * call to usb_add_function() was successful. */
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	rc = usb_add_function(c, &fsg->function);
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	if (rc)
2800*4882a593Smuzhiyun 		kfree(fsg);
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 	return rc;
2803*4882a593Smuzhiyun }
2804*4882a593Smuzhiyun 
fsg_add(struct usb_configuration * c)2805*4882a593Smuzhiyun int fsg_add(struct usb_configuration *c)
2806*4882a593Smuzhiyun {
2807*4882a593Smuzhiyun 	struct fsg_common *fsg_common;
2808*4882a593Smuzhiyun 
2809*4882a593Smuzhiyun 	fsg_common = fsg_common_init(NULL, c->cdev);
2810*4882a593Smuzhiyun 
2811*4882a593Smuzhiyun 	fsg_common->vendor_name = 0;
2812*4882a593Smuzhiyun 	fsg_common->product_name = 0;
2813*4882a593Smuzhiyun 	fsg_common->release = 0xffff;
2814*4882a593Smuzhiyun 
2815*4882a593Smuzhiyun 	fsg_common->ops = NULL;
2816*4882a593Smuzhiyun 	fsg_common->private_data = NULL;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	the_fsg_common = fsg_common;
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	return fsg_bind_config(c->cdev, c, fsg_common);
2821*4882a593Smuzhiyun }
2822*4882a593Smuzhiyun 
fsg_init(struct ums * ums_devs,int count)2823*4882a593Smuzhiyun int fsg_init(struct ums *ums_devs, int count)
2824*4882a593Smuzhiyun {
2825*4882a593Smuzhiyun 	ums = ums_devs;
2826*4882a593Smuzhiyun 	ums_count = count;
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	return 0;
2829*4882a593Smuzhiyun }
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun DECLARE_GADGET_BIND_CALLBACK(usb_dnl_ums, fsg_add);
2832