xref: /OK3568_Linux_fs/kernel/drivers/usb/usbip/stub_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2003-2008 Takahiro Hirofuchi
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <asm/byteorder.h>
7*4882a593Smuzhiyun #include <linux/kthread.h>
8*4882a593Smuzhiyun #include <linux/usb.h>
9*4882a593Smuzhiyun #include <linux/usb/hcd.h>
10*4882a593Smuzhiyun #include <linux/scatterlist.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "usbip_common.h"
13*4882a593Smuzhiyun #include "stub.h"
14*4882a593Smuzhiyun 
is_clear_halt_cmd(struct urb * urb)15*4882a593Smuzhiyun static int is_clear_halt_cmd(struct urb *urb)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	return (req->bRequest == USB_REQ_CLEAR_FEATURE) &&
22*4882a593Smuzhiyun 	       (req->bRequestType == USB_RECIP_ENDPOINT) &&
23*4882a593Smuzhiyun 	       (req->wValue == USB_ENDPOINT_HALT);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
is_set_interface_cmd(struct urb * urb)26*4882a593Smuzhiyun static int is_set_interface_cmd(struct urb *urb)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	return (req->bRequest == USB_REQ_SET_INTERFACE) &&
33*4882a593Smuzhiyun 		(req->bRequestType == USB_RECIP_INTERFACE);
34*4882a593Smuzhiyun }
35*4882a593Smuzhiyun 
is_set_configuration_cmd(struct urb * urb)36*4882a593Smuzhiyun static int is_set_configuration_cmd(struct urb *urb)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	return (req->bRequest == USB_REQ_SET_CONFIGURATION) &&
43*4882a593Smuzhiyun 		(req->bRequestType == USB_RECIP_DEVICE);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
is_reset_device_cmd(struct urb * urb)46*4882a593Smuzhiyun static int is_reset_device_cmd(struct urb *urb)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
49*4882a593Smuzhiyun 	__u16 value;
50*4882a593Smuzhiyun 	__u16 index;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
53*4882a593Smuzhiyun 	value = le16_to_cpu(req->wValue);
54*4882a593Smuzhiyun 	index = le16_to_cpu(req->wIndex);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if ((req->bRequest == USB_REQ_SET_FEATURE) &&
57*4882a593Smuzhiyun 	    (req->bRequestType == USB_RT_PORT) &&
58*4882a593Smuzhiyun 	    (value == USB_PORT_FEAT_RESET)) {
59*4882a593Smuzhiyun 		usbip_dbg_stub_rx("reset_device_cmd, port %u\n", index);
60*4882a593Smuzhiyun 		return 1;
61*4882a593Smuzhiyun 	} else
62*4882a593Smuzhiyun 		return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
tweak_clear_halt_cmd(struct urb * urb)65*4882a593Smuzhiyun static int tweak_clear_halt_cmd(struct urb *urb)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
68*4882a593Smuzhiyun 	int target_endp;
69*4882a593Smuzhiyun 	int target_dir;
70*4882a593Smuzhiyun 	int target_pipe;
71*4882a593Smuzhiyun 	int ret;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/*
76*4882a593Smuzhiyun 	 * The stalled endpoint is specified in the wIndex value. The endpoint
77*4882a593Smuzhiyun 	 * of the urb is the target of this clear_halt request (i.e., control
78*4882a593Smuzhiyun 	 * endpoint).
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	target_endp = le16_to_cpu(req->wIndex) & 0x000f;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* the stalled endpoint direction is IN or OUT?. USB_DIR_IN is 0x80.  */
83*4882a593Smuzhiyun 	target_dir = le16_to_cpu(req->wIndex) & 0x0080;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (target_dir)
86*4882a593Smuzhiyun 		target_pipe = usb_rcvctrlpipe(urb->dev, target_endp);
87*4882a593Smuzhiyun 	else
88*4882a593Smuzhiyun 		target_pipe = usb_sndctrlpipe(urb->dev, target_endp);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	ret = usb_clear_halt(urb->dev, target_pipe);
91*4882a593Smuzhiyun 	if (ret < 0)
92*4882a593Smuzhiyun 		dev_err(&urb->dev->dev,
93*4882a593Smuzhiyun 			"usb_clear_halt error: devnum %d endp %d ret %d\n",
94*4882a593Smuzhiyun 			urb->dev->devnum, target_endp, ret);
95*4882a593Smuzhiyun 	else
96*4882a593Smuzhiyun 		dev_info(&urb->dev->dev,
97*4882a593Smuzhiyun 			 "usb_clear_halt done: devnum %d endp %d\n",
98*4882a593Smuzhiyun 			 urb->dev->devnum, target_endp);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return ret;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
tweak_set_interface_cmd(struct urb * urb)103*4882a593Smuzhiyun static int tweak_set_interface_cmd(struct urb *urb)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
106*4882a593Smuzhiyun 	__u16 alternate;
107*4882a593Smuzhiyun 	__u16 interface;
108*4882a593Smuzhiyun 	int ret;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
111*4882a593Smuzhiyun 	alternate = le16_to_cpu(req->wValue);
112*4882a593Smuzhiyun 	interface = le16_to_cpu(req->wIndex);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	usbip_dbg_stub_rx("set_interface: inf %u alt %u\n",
115*4882a593Smuzhiyun 			  interface, alternate);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	ret = usb_set_interface(urb->dev, interface, alternate);
118*4882a593Smuzhiyun 	if (ret < 0)
119*4882a593Smuzhiyun 		dev_err(&urb->dev->dev,
120*4882a593Smuzhiyun 			"usb_set_interface error: inf %u alt %u ret %d\n",
121*4882a593Smuzhiyun 			interface, alternate, ret);
122*4882a593Smuzhiyun 	else
123*4882a593Smuzhiyun 		dev_info(&urb->dev->dev,
124*4882a593Smuzhiyun 			"usb_set_interface done: inf %u alt %u\n",
125*4882a593Smuzhiyun 			interface, alternate);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	return ret;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
tweak_set_configuration_cmd(struct urb * urb)130*4882a593Smuzhiyun static int tweak_set_configuration_cmd(struct urb *urb)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct stub_priv *priv = (struct stub_priv *) urb->context;
133*4882a593Smuzhiyun 	struct stub_device *sdev = priv->sdev;
134*4882a593Smuzhiyun 	struct usb_ctrlrequest *req;
135*4882a593Smuzhiyun 	__u16 config;
136*4882a593Smuzhiyun 	int err;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	req = (struct usb_ctrlrequest *) urb->setup_packet;
139*4882a593Smuzhiyun 	config = le16_to_cpu(req->wValue);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	usb_lock_device(sdev->udev);
142*4882a593Smuzhiyun 	err = usb_set_configuration(sdev->udev, config);
143*4882a593Smuzhiyun 	usb_unlock_device(sdev->udev);
144*4882a593Smuzhiyun 	if (err && err != -ENODEV)
145*4882a593Smuzhiyun 		dev_err(&sdev->udev->dev, "can't set config #%d, error %d\n",
146*4882a593Smuzhiyun 			config, err);
147*4882a593Smuzhiyun 	return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
tweak_reset_device_cmd(struct urb * urb)150*4882a593Smuzhiyun static int tweak_reset_device_cmd(struct urb *urb)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	struct stub_priv *priv = (struct stub_priv *) urb->context;
153*4882a593Smuzhiyun 	struct stub_device *sdev = priv->sdev;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (usb_lock_device_for_reset(sdev->udev, NULL) < 0) {
158*4882a593Smuzhiyun 		dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
159*4882a593Smuzhiyun 		return 0;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 	usb_reset_device(sdev->udev);
162*4882a593Smuzhiyun 	usb_unlock_device(sdev->udev);
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	return 0;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /*
168*4882a593Smuzhiyun  * clear_halt, set_interface, and set_configuration require special tricks.
169*4882a593Smuzhiyun  */
tweak_special_requests(struct urb * urb)170*4882a593Smuzhiyun static void tweak_special_requests(struct urb *urb)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	if (!urb || !urb->setup_packet)
173*4882a593Smuzhiyun 		return;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (usb_pipetype(urb->pipe) != PIPE_CONTROL)
176*4882a593Smuzhiyun 		return;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	if (is_clear_halt_cmd(urb))
179*4882a593Smuzhiyun 		/* tweak clear_halt */
180*4882a593Smuzhiyun 		 tweak_clear_halt_cmd(urb);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	else if (is_set_interface_cmd(urb))
183*4882a593Smuzhiyun 		/* tweak set_interface */
184*4882a593Smuzhiyun 		tweak_set_interface_cmd(urb);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	else if (is_set_configuration_cmd(urb))
187*4882a593Smuzhiyun 		/* tweak set_configuration */
188*4882a593Smuzhiyun 		tweak_set_configuration_cmd(urb);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	else if (is_reset_device_cmd(urb))
191*4882a593Smuzhiyun 		tweak_reset_device_cmd(urb);
192*4882a593Smuzhiyun 	else
193*4882a593Smuzhiyun 		usbip_dbg_stub_rx("no need to tweak\n");
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /*
197*4882a593Smuzhiyun  * stub_recv_unlink() unlinks the URB by a call to usb_unlink_urb().
198*4882a593Smuzhiyun  * By unlinking the urb asynchronously, stub_rx can continuously
199*4882a593Smuzhiyun  * process coming urbs.  Even if the urb is unlinked, its completion
200*4882a593Smuzhiyun  * handler will be called and stub_tx will send a return pdu.
201*4882a593Smuzhiyun  *
202*4882a593Smuzhiyun  * See also comments about unlinking strategy in vhci_hcd.c.
203*4882a593Smuzhiyun  */
stub_recv_cmd_unlink(struct stub_device * sdev,struct usbip_header * pdu)204*4882a593Smuzhiyun static int stub_recv_cmd_unlink(struct stub_device *sdev,
205*4882a593Smuzhiyun 				struct usbip_header *pdu)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	int ret, i;
208*4882a593Smuzhiyun 	unsigned long flags;
209*4882a593Smuzhiyun 	struct stub_priv *priv;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	spin_lock_irqsave(&sdev->priv_lock, flags);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	list_for_each_entry(priv, &sdev->priv_init, list) {
214*4882a593Smuzhiyun 		if (priv->seqnum != pdu->u.cmd_unlink.seqnum)
215*4882a593Smuzhiyun 			continue;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 		/*
218*4882a593Smuzhiyun 		 * This matched urb is not completed yet (i.e., be in
219*4882a593Smuzhiyun 		 * flight in usb hcd hardware/driver). Now we are
220*4882a593Smuzhiyun 		 * cancelling it. The unlinking flag means that we are
221*4882a593Smuzhiyun 		 * now not going to return the normal result pdu of a
222*4882a593Smuzhiyun 		 * submission request, but going to return a result pdu
223*4882a593Smuzhiyun 		 * of the unlink request.
224*4882a593Smuzhiyun 		 */
225*4882a593Smuzhiyun 		priv->unlinking = 1;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		/*
228*4882a593Smuzhiyun 		 * In the case that unlinking flag is on, prev->seqnum
229*4882a593Smuzhiyun 		 * is changed from the seqnum of the cancelling urb to
230*4882a593Smuzhiyun 		 * the seqnum of the unlink request. This will be used
231*4882a593Smuzhiyun 		 * to make the result pdu of the unlink request.
232*4882a593Smuzhiyun 		 */
233*4882a593Smuzhiyun 		priv->seqnum = pdu->base.seqnum;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sdev->priv_lock, flags);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		/*
238*4882a593Smuzhiyun 		 * usb_unlink_urb() is now out of spinlocking to avoid
239*4882a593Smuzhiyun 		 * spinlock recursion since stub_complete() is
240*4882a593Smuzhiyun 		 * sometimes called in this context but not in the
241*4882a593Smuzhiyun 		 * interrupt context.  If stub_complete() is executed
242*4882a593Smuzhiyun 		 * before we call usb_unlink_urb(), usb_unlink_urb()
243*4882a593Smuzhiyun 		 * will return an error value. In this case, stub_tx
244*4882a593Smuzhiyun 		 * will return the result pdu of this unlink request
245*4882a593Smuzhiyun 		 * though submission is completed and actual unlinking
246*4882a593Smuzhiyun 		 * is not executed. OK?
247*4882a593Smuzhiyun 		 */
248*4882a593Smuzhiyun 		/* In the above case, urb->status is not -ECONNRESET,
249*4882a593Smuzhiyun 		 * so a driver in a client host will know the failure
250*4882a593Smuzhiyun 		 * of the unlink request ?
251*4882a593Smuzhiyun 		 */
252*4882a593Smuzhiyun 		for (i = priv->completed_urbs; i < priv->num_urbs; i++) {
253*4882a593Smuzhiyun 			ret = usb_unlink_urb(priv->urbs[i]);
254*4882a593Smuzhiyun 			if (ret != -EINPROGRESS)
255*4882a593Smuzhiyun 				dev_err(&priv->urbs[i]->dev->dev,
256*4882a593Smuzhiyun 					"failed to unlink %d/%d urb of seqnum %lu, ret %d\n",
257*4882a593Smuzhiyun 					i + 1, priv->num_urbs,
258*4882a593Smuzhiyun 					priv->seqnum, ret);
259*4882a593Smuzhiyun 		}
260*4882a593Smuzhiyun 		return 0;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	usbip_dbg_stub_rx("seqnum %d is not pending\n",
264*4882a593Smuzhiyun 			  pdu->u.cmd_unlink.seqnum);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/*
267*4882a593Smuzhiyun 	 * The urb of the unlink target is not found in priv_init queue. It was
268*4882a593Smuzhiyun 	 * already completed and its results is/was going to be sent by a
269*4882a593Smuzhiyun 	 * CMD_RET pdu. In this case, usb_unlink_urb() is not needed. We only
270*4882a593Smuzhiyun 	 * return the completeness of this unlink request to vhci_hcd.
271*4882a593Smuzhiyun 	 */
272*4882a593Smuzhiyun 	stub_enqueue_ret_unlink(sdev, pdu->base.seqnum, 0);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	spin_unlock_irqrestore(&sdev->priv_lock, flags);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return 0;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
valid_request(struct stub_device * sdev,struct usbip_header * pdu)279*4882a593Smuzhiyun static int valid_request(struct stub_device *sdev, struct usbip_header *pdu)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct usbip_device *ud = &sdev->ud;
282*4882a593Smuzhiyun 	int valid = 0;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (pdu->base.devid == sdev->devid) {
285*4882a593Smuzhiyun 		spin_lock_irq(&ud->lock);
286*4882a593Smuzhiyun 		if (ud->status == SDEV_ST_USED) {
287*4882a593Smuzhiyun 			/* A request is valid. */
288*4882a593Smuzhiyun 			valid = 1;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 		spin_unlock_irq(&ud->lock);
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return valid;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
stub_priv_alloc(struct stub_device * sdev,struct usbip_header * pdu)296*4882a593Smuzhiyun static struct stub_priv *stub_priv_alloc(struct stub_device *sdev,
297*4882a593Smuzhiyun 					 struct usbip_header *pdu)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct stub_priv *priv;
300*4882a593Smuzhiyun 	struct usbip_device *ud = &sdev->ud;
301*4882a593Smuzhiyun 	unsigned long flags;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	spin_lock_irqsave(&sdev->priv_lock, flags);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	priv = kmem_cache_zalloc(stub_priv_cache, GFP_ATOMIC);
306*4882a593Smuzhiyun 	if (!priv) {
307*4882a593Smuzhiyun 		dev_err(&sdev->udev->dev, "alloc stub_priv\n");
308*4882a593Smuzhiyun 		spin_unlock_irqrestore(&sdev->priv_lock, flags);
309*4882a593Smuzhiyun 		usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
310*4882a593Smuzhiyun 		return NULL;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	priv->seqnum = pdu->base.seqnum;
314*4882a593Smuzhiyun 	priv->sdev = sdev;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/*
317*4882a593Smuzhiyun 	 * After a stub_priv is linked to a list_head,
318*4882a593Smuzhiyun 	 * our error handler can free allocated data.
319*4882a593Smuzhiyun 	 */
320*4882a593Smuzhiyun 	list_add_tail(&priv->list, &sdev->priv_init);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	spin_unlock_irqrestore(&sdev->priv_lock, flags);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	return priv;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
get_pipe(struct stub_device * sdev,struct usbip_header * pdu)327*4882a593Smuzhiyun static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	struct usb_device *udev = sdev->udev;
330*4882a593Smuzhiyun 	struct usb_host_endpoint *ep;
331*4882a593Smuzhiyun 	struct usb_endpoint_descriptor *epd = NULL;
332*4882a593Smuzhiyun 	int epnum = pdu->base.ep;
333*4882a593Smuzhiyun 	int dir = pdu->base.direction;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (epnum < 0 || epnum > 15)
336*4882a593Smuzhiyun 		goto err_ret;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (dir == USBIP_DIR_IN)
339*4882a593Smuzhiyun 		ep = udev->ep_in[epnum & 0x7f];
340*4882a593Smuzhiyun 	else
341*4882a593Smuzhiyun 		ep = udev->ep_out[epnum & 0x7f];
342*4882a593Smuzhiyun 	if (!ep)
343*4882a593Smuzhiyun 		goto err_ret;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	epd = &ep->desc;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	if (usb_endpoint_xfer_control(epd)) {
348*4882a593Smuzhiyun 		if (dir == USBIP_DIR_OUT)
349*4882a593Smuzhiyun 			return usb_sndctrlpipe(udev, epnum);
350*4882a593Smuzhiyun 		else
351*4882a593Smuzhiyun 			return usb_rcvctrlpipe(udev, epnum);
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	if (usb_endpoint_xfer_bulk(epd)) {
355*4882a593Smuzhiyun 		if (dir == USBIP_DIR_OUT)
356*4882a593Smuzhiyun 			return usb_sndbulkpipe(udev, epnum);
357*4882a593Smuzhiyun 		else
358*4882a593Smuzhiyun 			return usb_rcvbulkpipe(udev, epnum);
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	if (usb_endpoint_xfer_int(epd)) {
362*4882a593Smuzhiyun 		if (dir == USBIP_DIR_OUT)
363*4882a593Smuzhiyun 			return usb_sndintpipe(udev, epnum);
364*4882a593Smuzhiyun 		else
365*4882a593Smuzhiyun 			return usb_rcvintpipe(udev, epnum);
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (usb_endpoint_xfer_isoc(epd)) {
369*4882a593Smuzhiyun 		/* validate number of packets */
370*4882a593Smuzhiyun 		if (pdu->u.cmd_submit.number_of_packets < 0 ||
371*4882a593Smuzhiyun 		    pdu->u.cmd_submit.number_of_packets >
372*4882a593Smuzhiyun 		    USBIP_MAX_ISO_PACKETS) {
373*4882a593Smuzhiyun 			dev_err(&sdev->udev->dev,
374*4882a593Smuzhiyun 				"CMD_SUBMIT: isoc invalid num packets %d\n",
375*4882a593Smuzhiyun 				pdu->u.cmd_submit.number_of_packets);
376*4882a593Smuzhiyun 			return -1;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 		if (dir == USBIP_DIR_OUT)
379*4882a593Smuzhiyun 			return usb_sndisocpipe(udev, epnum);
380*4882a593Smuzhiyun 		else
381*4882a593Smuzhiyun 			return usb_rcvisocpipe(udev, epnum);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun err_ret:
385*4882a593Smuzhiyun 	/* NOT REACHED */
386*4882a593Smuzhiyun 	dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum);
387*4882a593Smuzhiyun 	return -1;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun 
masking_bogus_flags(struct urb * urb)390*4882a593Smuzhiyun static void masking_bogus_flags(struct urb *urb)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	int				xfertype;
393*4882a593Smuzhiyun 	struct usb_device		*dev;
394*4882a593Smuzhiyun 	struct usb_host_endpoint	*ep;
395*4882a593Smuzhiyun 	int				is_out;
396*4882a593Smuzhiyun 	unsigned int	allowed;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (!urb || urb->hcpriv || !urb->complete)
399*4882a593Smuzhiyun 		return;
400*4882a593Smuzhiyun 	dev = urb->dev;
401*4882a593Smuzhiyun 	if ((!dev) || (dev->state < USB_STATE_UNAUTHENTICATED))
402*4882a593Smuzhiyun 		return;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	ep = (usb_pipein(urb->pipe) ? dev->ep_in : dev->ep_out)
405*4882a593Smuzhiyun 		[usb_pipeendpoint(urb->pipe)];
406*4882a593Smuzhiyun 	if (!ep)
407*4882a593Smuzhiyun 		return;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	xfertype = usb_endpoint_type(&ep->desc);
410*4882a593Smuzhiyun 	if (xfertype == USB_ENDPOINT_XFER_CONTROL) {
411*4882a593Smuzhiyun 		struct usb_ctrlrequest *setup =
412*4882a593Smuzhiyun 			(struct usb_ctrlrequest *) urb->setup_packet;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 		if (!setup)
415*4882a593Smuzhiyun 			return;
416*4882a593Smuzhiyun 		is_out = !(setup->bRequestType & USB_DIR_IN) ||
417*4882a593Smuzhiyun 			!setup->wLength;
418*4882a593Smuzhiyun 	} else {
419*4882a593Smuzhiyun 		is_out = usb_endpoint_dir_out(&ep->desc);
420*4882a593Smuzhiyun 	}
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* enforce simple/standard policy */
423*4882a593Smuzhiyun 	allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_INTERRUPT |
424*4882a593Smuzhiyun 		   URB_DIR_MASK | URB_FREE_BUFFER);
425*4882a593Smuzhiyun 	switch (xfertype) {
426*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_BULK:
427*4882a593Smuzhiyun 		if (is_out)
428*4882a593Smuzhiyun 			allowed |= URB_ZERO_PACKET;
429*4882a593Smuzhiyun 		fallthrough;
430*4882a593Smuzhiyun 	default:			/* all non-iso endpoints */
431*4882a593Smuzhiyun 		if (!is_out)
432*4882a593Smuzhiyun 			allowed |= URB_SHORT_NOT_OK;
433*4882a593Smuzhiyun 		break;
434*4882a593Smuzhiyun 	case USB_ENDPOINT_XFER_ISOC:
435*4882a593Smuzhiyun 		allowed |= URB_ISO_ASAP;
436*4882a593Smuzhiyun 		break;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 	urb->transfer_flags &= allowed;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun 
stub_recv_xbuff(struct usbip_device * ud,struct stub_priv * priv)441*4882a593Smuzhiyun static int stub_recv_xbuff(struct usbip_device *ud, struct stub_priv *priv)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	int ret;
444*4882a593Smuzhiyun 	int i;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	for (i = 0; i < priv->num_urbs; i++) {
447*4882a593Smuzhiyun 		ret = usbip_recv_xbuff(ud, priv->urbs[i]);
448*4882a593Smuzhiyun 		if (ret < 0)
449*4882a593Smuzhiyun 			break;
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	return ret;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
stub_recv_cmd_submit(struct stub_device * sdev,struct usbip_header * pdu)455*4882a593Smuzhiyun static void stub_recv_cmd_submit(struct stub_device *sdev,
456*4882a593Smuzhiyun 				 struct usbip_header *pdu)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	struct stub_priv *priv;
459*4882a593Smuzhiyun 	struct usbip_device *ud = &sdev->ud;
460*4882a593Smuzhiyun 	struct usb_device *udev = sdev->udev;
461*4882a593Smuzhiyun 	struct scatterlist *sgl = NULL, *sg;
462*4882a593Smuzhiyun 	void *buffer = NULL;
463*4882a593Smuzhiyun 	unsigned long long buf_len;
464*4882a593Smuzhiyun 	int nents;
465*4882a593Smuzhiyun 	int num_urbs = 1;
466*4882a593Smuzhiyun 	int pipe = get_pipe(sdev, pdu);
467*4882a593Smuzhiyun 	int use_sg = pdu->u.cmd_submit.transfer_flags & URB_DMA_MAP_SG;
468*4882a593Smuzhiyun 	int support_sg = 1;
469*4882a593Smuzhiyun 	int np = 0;
470*4882a593Smuzhiyun 	int ret, i;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	if (pipe == -1)
473*4882a593Smuzhiyun 		return;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	/*
476*4882a593Smuzhiyun 	 * Smatch reported the error case where use_sg is true and buf_len is 0.
477*4882a593Smuzhiyun 	 * In this case, It adds SDEV_EVENT_ERROR_MALLOC and stub_priv will be
478*4882a593Smuzhiyun 	 * released by stub event handler and connection will be shut down.
479*4882a593Smuzhiyun 	 */
480*4882a593Smuzhiyun 	priv = stub_priv_alloc(sdev, pdu);
481*4882a593Smuzhiyun 	if (!priv)
482*4882a593Smuzhiyun 		return;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	buf_len = (unsigned long long)pdu->u.cmd_submit.transfer_buffer_length;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (use_sg && !buf_len) {
487*4882a593Smuzhiyun 		dev_err(&udev->dev, "sg buffer with zero length\n");
488*4882a593Smuzhiyun 		goto err_malloc;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* allocate urb transfer buffer, if needed */
492*4882a593Smuzhiyun 	if (buf_len) {
493*4882a593Smuzhiyun 		if (use_sg) {
494*4882a593Smuzhiyun 			sgl = sgl_alloc(buf_len, GFP_KERNEL, &nents);
495*4882a593Smuzhiyun 			if (!sgl)
496*4882a593Smuzhiyun 				goto err_malloc;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 			/* Check if the server's HCD supports SG */
499*4882a593Smuzhiyun 			if (!udev->bus->sg_tablesize) {
500*4882a593Smuzhiyun 				/*
501*4882a593Smuzhiyun 				 * If the server's HCD doesn't support SG, break
502*4882a593Smuzhiyun 				 * a single SG request into several URBs and map
503*4882a593Smuzhiyun 				 * each SG list entry to corresponding URB
504*4882a593Smuzhiyun 				 * buffer. The previously allocated SG list is
505*4882a593Smuzhiyun 				 * stored in priv->sgl (If the server's HCD
506*4882a593Smuzhiyun 				 * support SG, SG list is stored only in
507*4882a593Smuzhiyun 				 * urb->sg) and it is used as an indicator that
508*4882a593Smuzhiyun 				 * the server split single SG request into
509*4882a593Smuzhiyun 				 * several URBs. Later, priv->sgl is used by
510*4882a593Smuzhiyun 				 * stub_complete() and stub_send_ret_submit() to
511*4882a593Smuzhiyun 				 * reassemble the divied URBs.
512*4882a593Smuzhiyun 				 */
513*4882a593Smuzhiyun 				support_sg = 0;
514*4882a593Smuzhiyun 				num_urbs = nents;
515*4882a593Smuzhiyun 				priv->completed_urbs = 0;
516*4882a593Smuzhiyun 				pdu->u.cmd_submit.transfer_flags &=
517*4882a593Smuzhiyun 								~URB_DMA_MAP_SG;
518*4882a593Smuzhiyun 			}
519*4882a593Smuzhiyun 		} else {
520*4882a593Smuzhiyun 			buffer = kzalloc(buf_len, GFP_KERNEL);
521*4882a593Smuzhiyun 			if (!buffer)
522*4882a593Smuzhiyun 				goto err_malloc;
523*4882a593Smuzhiyun 		}
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* allocate urb array */
527*4882a593Smuzhiyun 	priv->num_urbs = num_urbs;
528*4882a593Smuzhiyun 	priv->urbs = kmalloc_array(num_urbs, sizeof(*priv->urbs), GFP_KERNEL);
529*4882a593Smuzhiyun 	if (!priv->urbs)
530*4882a593Smuzhiyun 		goto err_urbs;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/* setup a urb */
533*4882a593Smuzhiyun 	if (support_sg) {
534*4882a593Smuzhiyun 		if (usb_pipeisoc(pipe))
535*4882a593Smuzhiyun 			np = pdu->u.cmd_submit.number_of_packets;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		priv->urbs[0] = usb_alloc_urb(np, GFP_KERNEL);
538*4882a593Smuzhiyun 		if (!priv->urbs[0])
539*4882a593Smuzhiyun 			goto err_urb;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 		if (buf_len) {
542*4882a593Smuzhiyun 			if (use_sg) {
543*4882a593Smuzhiyun 				priv->urbs[0]->sg = sgl;
544*4882a593Smuzhiyun 				priv->urbs[0]->num_sgs = nents;
545*4882a593Smuzhiyun 				priv->urbs[0]->transfer_buffer = NULL;
546*4882a593Smuzhiyun 			} else {
547*4882a593Smuzhiyun 				priv->urbs[0]->transfer_buffer = buffer;
548*4882a593Smuzhiyun 			}
549*4882a593Smuzhiyun 		}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		/* copy urb setup packet */
552*4882a593Smuzhiyun 		priv->urbs[0]->setup_packet = kmemdup(&pdu->u.cmd_submit.setup,
553*4882a593Smuzhiyun 					8, GFP_KERNEL);
554*4882a593Smuzhiyun 		if (!priv->urbs[0]->setup_packet) {
555*4882a593Smuzhiyun 			usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
556*4882a593Smuzhiyun 			return;
557*4882a593Smuzhiyun 		}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		usbip_pack_pdu(pdu, priv->urbs[0], USBIP_CMD_SUBMIT, 0);
560*4882a593Smuzhiyun 	} else {
561*4882a593Smuzhiyun 		for_each_sg(sgl, sg, nents, i) {
562*4882a593Smuzhiyun 			priv->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
563*4882a593Smuzhiyun 			/* The URBs which is previously allocated will be freed
564*4882a593Smuzhiyun 			 * in stub_device_cleanup_urbs() if error occurs.
565*4882a593Smuzhiyun 			 */
566*4882a593Smuzhiyun 			if (!priv->urbs[i])
567*4882a593Smuzhiyun 				goto err_urb;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 			usbip_pack_pdu(pdu, priv->urbs[i], USBIP_CMD_SUBMIT, 0);
570*4882a593Smuzhiyun 			priv->urbs[i]->transfer_buffer = sg_virt(sg);
571*4882a593Smuzhiyun 			priv->urbs[i]->transfer_buffer_length = sg->length;
572*4882a593Smuzhiyun 		}
573*4882a593Smuzhiyun 		priv->sgl = sgl;
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	for (i = 0; i < num_urbs; i++) {
577*4882a593Smuzhiyun 		/* set other members from the base header of pdu */
578*4882a593Smuzhiyun 		priv->urbs[i]->context = (void *) priv;
579*4882a593Smuzhiyun 		priv->urbs[i]->dev = udev;
580*4882a593Smuzhiyun 		priv->urbs[i]->pipe = pipe;
581*4882a593Smuzhiyun 		priv->urbs[i]->complete = stub_complete;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		/* no need to submit an intercepted request, but harmless? */
584*4882a593Smuzhiyun 		tweak_special_requests(priv->urbs[i]);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 		masking_bogus_flags(priv->urbs[i]);
587*4882a593Smuzhiyun 	}
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	if (stub_recv_xbuff(ud, priv) < 0)
590*4882a593Smuzhiyun 		return;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (usbip_recv_iso(ud, priv->urbs[0]) < 0)
593*4882a593Smuzhiyun 		return;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	/* urb is now ready to submit */
596*4882a593Smuzhiyun 	for (i = 0; i < priv->num_urbs; i++) {
597*4882a593Smuzhiyun 		ret = usb_submit_urb(priv->urbs[i], GFP_KERNEL);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 		if (ret == 0)
600*4882a593Smuzhiyun 			usbip_dbg_stub_rx("submit urb ok, seqnum %u\n",
601*4882a593Smuzhiyun 					pdu->base.seqnum);
602*4882a593Smuzhiyun 		else {
603*4882a593Smuzhiyun 			dev_err(&udev->dev, "submit_urb error, %d\n", ret);
604*4882a593Smuzhiyun 			usbip_dump_header(pdu);
605*4882a593Smuzhiyun 			usbip_dump_urb(priv->urbs[i]);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 			/*
608*4882a593Smuzhiyun 			 * Pessimistic.
609*4882a593Smuzhiyun 			 * This connection will be discarded.
610*4882a593Smuzhiyun 			 */
611*4882a593Smuzhiyun 			usbip_event_add(ud, SDEV_EVENT_ERROR_SUBMIT);
612*4882a593Smuzhiyun 			break;
613*4882a593Smuzhiyun 		}
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	usbip_dbg_stub_rx("Leave\n");
617*4882a593Smuzhiyun 	return;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun err_urb:
620*4882a593Smuzhiyun 	kfree(priv->urbs);
621*4882a593Smuzhiyun err_urbs:
622*4882a593Smuzhiyun 	kfree(buffer);
623*4882a593Smuzhiyun 	sgl_free(sgl);
624*4882a593Smuzhiyun err_malloc:
625*4882a593Smuzhiyun 	usbip_event_add(ud, SDEV_EVENT_ERROR_MALLOC);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /* recv a pdu */
stub_rx_pdu(struct usbip_device * ud)629*4882a593Smuzhiyun static void stub_rx_pdu(struct usbip_device *ud)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	int ret;
632*4882a593Smuzhiyun 	struct usbip_header pdu;
633*4882a593Smuzhiyun 	struct stub_device *sdev = container_of(ud, struct stub_device, ud);
634*4882a593Smuzhiyun 	struct device *dev = &sdev->udev->dev;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	usbip_dbg_stub_rx("Enter\n");
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	memset(&pdu, 0, sizeof(pdu));
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/* receive a pdu header */
641*4882a593Smuzhiyun 	ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
642*4882a593Smuzhiyun 	if (ret != sizeof(pdu)) {
643*4882a593Smuzhiyun 		dev_err(dev, "recv a header, %d\n", ret);
644*4882a593Smuzhiyun 		usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
645*4882a593Smuzhiyun 		return;
646*4882a593Smuzhiyun 	}
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	usbip_header_correct_endian(&pdu, 0);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (usbip_dbg_flag_stub_rx)
651*4882a593Smuzhiyun 		usbip_dump_header(&pdu);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	if (!valid_request(sdev, &pdu)) {
654*4882a593Smuzhiyun 		dev_err(dev, "recv invalid request\n");
655*4882a593Smuzhiyun 		usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
656*4882a593Smuzhiyun 		return;
657*4882a593Smuzhiyun 	}
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	switch (pdu.base.command) {
660*4882a593Smuzhiyun 	case USBIP_CMD_UNLINK:
661*4882a593Smuzhiyun 		stub_recv_cmd_unlink(sdev, &pdu);
662*4882a593Smuzhiyun 		break;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	case USBIP_CMD_SUBMIT:
665*4882a593Smuzhiyun 		stub_recv_cmd_submit(sdev, &pdu);
666*4882a593Smuzhiyun 		break;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	default:
669*4882a593Smuzhiyun 		/* NOTREACHED */
670*4882a593Smuzhiyun 		dev_err(dev, "unknown pdu\n");
671*4882a593Smuzhiyun 		usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
672*4882a593Smuzhiyun 		break;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
stub_rx_loop(void * data)676*4882a593Smuzhiyun int stub_rx_loop(void *data)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct usbip_device *ud = data;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	while (!kthread_should_stop()) {
681*4882a593Smuzhiyun 		if (usbip_event_happened(ud))
682*4882a593Smuzhiyun 			break;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 		stub_rx_pdu(ud);
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return 0;
688*4882a593Smuzhiyun }
689