1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Intel Wireless WiMAX Connection 2400m
3*4882a593Smuzhiyun * USB RX handling
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
9*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
10*4882a593Smuzhiyun * are met:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
13*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
14*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
15*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
16*4882a593Smuzhiyun * the documentation and/or other materials provided with the
17*4882a593Smuzhiyun * distribution.
18*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
19*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
20*4882a593Smuzhiyun * from this software without specific prior written permission.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Intel Corporation <linux-wimax@intel.com>
36*4882a593Smuzhiyun * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37*4882a593Smuzhiyun * - Initial implementation
38*4882a593Smuzhiyun * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39*4882a593Smuzhiyun * - Use skb_clone(), break up processing in chunks
40*4882a593Smuzhiyun * - Split transport/device specific
41*4882a593Smuzhiyun * - Make buffer size dynamic to exert less memory pressure
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun * This handles the RX path on USB.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * When a notification is received that says 'there is RX data ready',
47*4882a593Smuzhiyun * we call i2400mu_rx_kick(); that wakes up the RX kthread, which
48*4882a593Smuzhiyun * reads a buffer from USB and passes it to i2400m_rx() in the generic
49*4882a593Smuzhiyun * handling code. The RX buffer has an specific format that is
50*4882a593Smuzhiyun * described in rx.c.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun * We use a kernel thread in a loop because:
53*4882a593Smuzhiyun *
54*4882a593Smuzhiyun * - we want to be able to call the USB power management get/put
55*4882a593Smuzhiyun * functions (blocking) before each transaction.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * - We might get a lot of notifications and we don't want to submit
58*4882a593Smuzhiyun * a zillion reads; by serializing, we are throttling.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * - RX data processing can get heavy enough so that it is not
61*4882a593Smuzhiyun * appropriate for doing it in the USB callback; thus we run it in a
62*4882a593Smuzhiyun * process context.
63*4882a593Smuzhiyun *
64*4882a593Smuzhiyun * We provide a read buffer of an arbitrary size (short of a page); if
65*4882a593Smuzhiyun * the callback reports -EOVERFLOW, it means it was too small, so we
66*4882a593Smuzhiyun * just double the size and retry (being careful to append, as
67*4882a593Smuzhiyun * sometimes the device provided some data). Every now and then we
68*4882a593Smuzhiyun * check if the average packet size is smaller than the current packet
69*4882a593Smuzhiyun * size and if so, we halve it. At the end, the size of the
70*4882a593Smuzhiyun * preallocated buffer should be following the average received
71*4882a593Smuzhiyun * transaction size, adapting dynamically to it.
72*4882a593Smuzhiyun *
73*4882a593Smuzhiyun * ROADMAP
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun * i2400mu_rx_kick() Called from notif.c when we get a
76*4882a593Smuzhiyun * 'data ready' notification
77*4882a593Smuzhiyun * i2400mu_rxd() Kernel RX daemon
78*4882a593Smuzhiyun * i2400mu_rx() Receive USB data
79*4882a593Smuzhiyun * i2400m_rx() Send data to generic i2400m RX handling
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * i2400mu_rx_setup() called from i2400mu_bus_dev_start()
82*4882a593Smuzhiyun *
83*4882a593Smuzhiyun * i2400mu_rx_release() called from i2400mu_bus_dev_stop()
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun #include <linux/workqueue.h>
86*4882a593Smuzhiyun #include <linux/slab.h>
87*4882a593Smuzhiyun #include <linux/usb.h>
88*4882a593Smuzhiyun #include "i2400m-usb.h"
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define D_SUBMODULE rx
92*4882a593Smuzhiyun #include "usb-debug-levels.h"
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Dynamic RX size
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * We can't let the rx_size be a multiple of 512 bytes (the RX
98*4882a593Smuzhiyun * endpoint's max packet size). On some USB host controllers (we
99*4882a593Smuzhiyun * haven't been able to fully characterize which), if the device is
100*4882a593Smuzhiyun * about to send (for example) X bytes and we only post a buffer to
101*4882a593Smuzhiyun * receive n*512, it will fail to mark that as babble (so that
102*4882a593Smuzhiyun * i2400mu_rx() [case -EOVERFLOW] can resize the buffer and get the
103*4882a593Smuzhiyun * rest).
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun * So on growing or shrinking, if it is a multiple of the
106*4882a593Smuzhiyun * maxpacketsize, we remove some (instead of incresing some, so in a
107*4882a593Smuzhiyun * buddy allocator we try to waste less space).
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun * Note we also need a hook for this on i2400mu_rx() -- when we do the
110*4882a593Smuzhiyun * first read, we are sure we won't hit this spot because
111*4882a593Smuzhiyun * i240mm->rx_size has been set properly. However, if we have to
112*4882a593Smuzhiyun * double because of -EOVERFLOW, when we launch the read to get the
113*4882a593Smuzhiyun * rest of the data, we *have* to make sure that also is not a
114*4882a593Smuzhiyun * multiple of the max_pkt_size.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static
i2400mu_rx_size_grow(struct i2400mu * i2400mu)118*4882a593Smuzhiyun size_t i2400mu_rx_size_grow(struct i2400mu *i2400mu)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
121*4882a593Smuzhiyun size_t rx_size;
122*4882a593Smuzhiyun const size_t max_pkt_size = 512;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun rx_size = 2 * i2400mu->rx_size;
125*4882a593Smuzhiyun if (rx_size % max_pkt_size == 0) {
126*4882a593Smuzhiyun rx_size -= 8;
127*4882a593Smuzhiyun d_printf(1, dev,
128*4882a593Smuzhiyun "RX: expected size grew to %zu [adjusted -8] "
129*4882a593Smuzhiyun "from %zu\n",
130*4882a593Smuzhiyun rx_size, i2400mu->rx_size);
131*4882a593Smuzhiyun } else
132*4882a593Smuzhiyun d_printf(1, dev,
133*4882a593Smuzhiyun "RX: expected size grew to %zu from %zu\n",
134*4882a593Smuzhiyun rx_size, i2400mu->rx_size);
135*4882a593Smuzhiyun return rx_size;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun static
i2400mu_rx_size_maybe_shrink(struct i2400mu * i2400mu)140*4882a593Smuzhiyun void i2400mu_rx_size_maybe_shrink(struct i2400mu *i2400mu)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun const size_t max_pkt_size = 512;
143*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (unlikely(i2400mu->rx_size_cnt >= 100
146*4882a593Smuzhiyun && i2400mu->rx_size_auto_shrink)) {
147*4882a593Smuzhiyun size_t avg_rx_size =
148*4882a593Smuzhiyun i2400mu->rx_size_acc / i2400mu->rx_size_cnt;
149*4882a593Smuzhiyun size_t new_rx_size = i2400mu->rx_size / 2;
150*4882a593Smuzhiyun if (avg_rx_size < new_rx_size) {
151*4882a593Smuzhiyun if (new_rx_size % max_pkt_size == 0) {
152*4882a593Smuzhiyun new_rx_size -= 8;
153*4882a593Smuzhiyun d_printf(1, dev,
154*4882a593Smuzhiyun "RX: expected size shrank to %zu "
155*4882a593Smuzhiyun "[adjusted -8] from %zu\n",
156*4882a593Smuzhiyun new_rx_size, i2400mu->rx_size);
157*4882a593Smuzhiyun } else
158*4882a593Smuzhiyun d_printf(1, dev,
159*4882a593Smuzhiyun "RX: expected size shrank to %zu "
160*4882a593Smuzhiyun "from %zu\n",
161*4882a593Smuzhiyun new_rx_size, i2400mu->rx_size);
162*4882a593Smuzhiyun i2400mu->rx_size = new_rx_size;
163*4882a593Smuzhiyun i2400mu->rx_size_cnt = 0;
164*4882a593Smuzhiyun i2400mu->rx_size_acc = i2400mu->rx_size;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /*
170*4882a593Smuzhiyun * Receive a message with payloads from the USB bus into an skb
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * @i2400mu: USB device descriptor
173*4882a593Smuzhiyun * @rx_skb: skb where to place the received message
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Deals with all the USB-specifics of receiving, dynamically
176*4882a593Smuzhiyun * increasing the buffer size if so needed. Returns the payload in the
177*4882a593Smuzhiyun * skb, ready to process. On a zero-length packet, we retry.
178*4882a593Smuzhiyun *
179*4882a593Smuzhiyun * On soft USB errors, we retry (until they become too frequent and
180*4882a593Smuzhiyun * then are promoted to hard); on hard USB errors, we reset the
181*4882a593Smuzhiyun * device. On other errors (skb realloacation, we just drop it and
182*4882a593Smuzhiyun * hope for the next invocation to solve it).
183*4882a593Smuzhiyun *
184*4882a593Smuzhiyun * Returns: pointer to the skb if ok, ERR_PTR on error.
185*4882a593Smuzhiyun * NOTE: this function might realloc the skb (if it is too small),
186*4882a593Smuzhiyun * so always update with the one returned.
187*4882a593Smuzhiyun * ERR_PTR() is < 0 on error.
188*4882a593Smuzhiyun * Will return NULL if it cannot reallocate -- this can be
189*4882a593Smuzhiyun * considered a transient retryable error.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun static
i2400mu_rx(struct i2400mu * i2400mu,struct sk_buff * rx_skb)192*4882a593Smuzhiyun struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun int result = 0;
195*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
196*4882a593Smuzhiyun int usb_pipe, read_size, rx_size, do_autopm;
197*4882a593Smuzhiyun struct usb_endpoint_descriptor *epd;
198*4882a593Smuzhiyun const size_t max_pkt_size = 512;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
201*4882a593Smuzhiyun do_autopm = atomic_read(&i2400mu->do_autopm);
202*4882a593Smuzhiyun result = do_autopm ?
203*4882a593Smuzhiyun usb_autopm_get_interface(i2400mu->usb_iface) : 0;
204*4882a593Smuzhiyun if (result < 0) {
205*4882a593Smuzhiyun dev_err(dev, "RX: can't get autopm: %d\n", result);
206*4882a593Smuzhiyun do_autopm = 0;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
209*4882a593Smuzhiyun usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
210*4882a593Smuzhiyun retry:
211*4882a593Smuzhiyun rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
212*4882a593Smuzhiyun if (unlikely(rx_size % max_pkt_size == 0)) {
213*4882a593Smuzhiyun rx_size -= 8;
214*4882a593Smuzhiyun d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun result = usb_bulk_msg(
217*4882a593Smuzhiyun i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
218*4882a593Smuzhiyun rx_size, &read_size, 200);
219*4882a593Smuzhiyun usb_mark_last_busy(i2400mu->usb_dev);
220*4882a593Smuzhiyun switch (result) {
221*4882a593Smuzhiyun case 0:
222*4882a593Smuzhiyun if (read_size == 0)
223*4882a593Smuzhiyun goto retry; /* ZLP, just resubmit */
224*4882a593Smuzhiyun skb_put(rx_skb, read_size);
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun case -EPIPE:
227*4882a593Smuzhiyun /*
228*4882a593Smuzhiyun * Stall -- maybe the device is choking with our
229*4882a593Smuzhiyun * requests. Clear it and give it some time. If they
230*4882a593Smuzhiyun * happen to often, it might be another symptom, so we
231*4882a593Smuzhiyun * reset.
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * No error handling for usb_clear_halt(0; if it
234*4882a593Smuzhiyun * works, the retry works; if it fails, this switch
235*4882a593Smuzhiyun * does the error handling for us.
236*4882a593Smuzhiyun */
237*4882a593Smuzhiyun if (edc_inc(&i2400mu->urb_edc,
238*4882a593Smuzhiyun 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
239*4882a593Smuzhiyun dev_err(dev, "BM-CMD: too many stalls in "
240*4882a593Smuzhiyun "URB; resetting device\n");
241*4882a593Smuzhiyun goto do_reset;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun usb_clear_halt(i2400mu->usb_dev, usb_pipe);
244*4882a593Smuzhiyun msleep(10); /* give the device some time */
245*4882a593Smuzhiyun goto retry;
246*4882a593Smuzhiyun case -EINVAL: /* while removing driver */
247*4882a593Smuzhiyun case -ENODEV: /* dev disconnect ... */
248*4882a593Smuzhiyun case -ENOENT: /* just ignore it */
249*4882a593Smuzhiyun case -ESHUTDOWN:
250*4882a593Smuzhiyun case -ECONNRESET:
251*4882a593Smuzhiyun break;
252*4882a593Smuzhiyun case -EOVERFLOW: { /* too small, reallocate */
253*4882a593Smuzhiyun struct sk_buff *new_skb;
254*4882a593Smuzhiyun rx_size = i2400mu_rx_size_grow(i2400mu);
255*4882a593Smuzhiyun if (rx_size <= (1 << 16)) /* cap it */
256*4882a593Smuzhiyun i2400mu->rx_size = rx_size;
257*4882a593Smuzhiyun else if (printk_ratelimit()) {
258*4882a593Smuzhiyun dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
259*4882a593Smuzhiyun result = -EINVAL;
260*4882a593Smuzhiyun goto out;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun skb_put(rx_skb, read_size);
263*4882a593Smuzhiyun new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
264*4882a593Smuzhiyun GFP_KERNEL);
265*4882a593Smuzhiyun if (new_skb == NULL) {
266*4882a593Smuzhiyun kfree_skb(rx_skb);
267*4882a593Smuzhiyun rx_skb = NULL;
268*4882a593Smuzhiyun goto out; /* drop it...*/
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun kfree_skb(rx_skb);
271*4882a593Smuzhiyun rx_skb = new_skb;
272*4882a593Smuzhiyun i2400mu->rx_size_cnt = 0;
273*4882a593Smuzhiyun i2400mu->rx_size_acc = i2400mu->rx_size;
274*4882a593Smuzhiyun d_printf(1, dev, "RX: size changed to %d, received %d, "
275*4882a593Smuzhiyun "copied %d, capacity %ld\n",
276*4882a593Smuzhiyun rx_size, read_size, rx_skb->len,
277*4882a593Smuzhiyun (long) skb_end_offset(new_skb));
278*4882a593Smuzhiyun goto retry;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun /* In most cases, it happens due to the hardware scheduling a
281*4882a593Smuzhiyun * read when there was no data - unfortunately, we have no way
282*4882a593Smuzhiyun * to tell this timeout from a USB timeout. So we just ignore
283*4882a593Smuzhiyun * it. */
284*4882a593Smuzhiyun case -ETIMEDOUT:
285*4882a593Smuzhiyun dev_err(dev, "RX: timeout: %d\n", result);
286*4882a593Smuzhiyun result = 0;
287*4882a593Smuzhiyun break;
288*4882a593Smuzhiyun default: /* Any error */
289*4882a593Smuzhiyun if (edc_inc(&i2400mu->urb_edc,
290*4882a593Smuzhiyun EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
291*4882a593Smuzhiyun goto error_reset;
292*4882a593Smuzhiyun dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
293*4882a593Smuzhiyun goto retry;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun out:
296*4882a593Smuzhiyun if (do_autopm)
297*4882a593Smuzhiyun usb_autopm_put_interface(i2400mu->usb_iface);
298*4882a593Smuzhiyun d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
299*4882a593Smuzhiyun return rx_skb;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun error_reset:
302*4882a593Smuzhiyun dev_err(dev, "RX: maximum errors in URB exceeded; "
303*4882a593Smuzhiyun "resetting device\n");
304*4882a593Smuzhiyun do_reset:
305*4882a593Smuzhiyun usb_queue_reset_device(i2400mu->usb_iface);
306*4882a593Smuzhiyun rx_skb = ERR_PTR(result);
307*4882a593Smuzhiyun goto out;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun /*
312*4882a593Smuzhiyun * Kernel thread for USB reception of data
313*4882a593Smuzhiyun *
314*4882a593Smuzhiyun * This thread waits for a kick; once kicked, it will allocate an skb
315*4882a593Smuzhiyun * and receive a single message to it from USB (using
316*4882a593Smuzhiyun * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
317*4882a593Smuzhiyun * code for processing.
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * When done processing, it runs some dirty statistics to verify if
320*4882a593Smuzhiyun * the last 100 messages received were smaller than half of the
321*4882a593Smuzhiyun * current RX buffer size. In that case, the RX buffer size is
322*4882a593Smuzhiyun * halved. This will helps lowering the pressure on the memory
323*4882a593Smuzhiyun * allocator.
324*4882a593Smuzhiyun *
325*4882a593Smuzhiyun * Hard errors force the thread to exit.
326*4882a593Smuzhiyun */
327*4882a593Smuzhiyun static
i2400mu_rxd(void * _i2400mu)328*4882a593Smuzhiyun int i2400mu_rxd(void *_i2400mu)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun int result = 0;
331*4882a593Smuzhiyun struct i2400mu *i2400mu = _i2400mu;
332*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
333*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
334*4882a593Smuzhiyun struct net_device *net_dev = i2400m->wimax_dev.net_dev;
335*4882a593Smuzhiyun size_t pending;
336*4882a593Smuzhiyun int rx_size;
337*4882a593Smuzhiyun struct sk_buff *rx_skb;
338*4882a593Smuzhiyun unsigned long flags;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
341*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->rx_lock, flags);
342*4882a593Smuzhiyun BUG_ON(i2400mu->rx_kthread != NULL);
343*4882a593Smuzhiyun i2400mu->rx_kthread = current;
344*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->rx_lock, flags);
345*4882a593Smuzhiyun while (1) {
346*4882a593Smuzhiyun d_printf(2, dev, "RX: waiting for messages\n");
347*4882a593Smuzhiyun pending = 0;
348*4882a593Smuzhiyun wait_event_interruptible(
349*4882a593Smuzhiyun i2400mu->rx_wq,
350*4882a593Smuzhiyun (kthread_should_stop() /* check this first! */
351*4882a593Smuzhiyun || (pending = atomic_read(&i2400mu->rx_pending_count)))
352*4882a593Smuzhiyun );
353*4882a593Smuzhiyun if (kthread_should_stop())
354*4882a593Smuzhiyun break;
355*4882a593Smuzhiyun if (pending == 0)
356*4882a593Smuzhiyun continue;
357*4882a593Smuzhiyun rx_size = i2400mu->rx_size;
358*4882a593Smuzhiyun d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
359*4882a593Smuzhiyun rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
360*4882a593Smuzhiyun if (rx_skb == NULL) {
361*4882a593Smuzhiyun dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
362*4882a593Smuzhiyun rx_size);
363*4882a593Smuzhiyun msleep(50); /* give it some time? */
364*4882a593Smuzhiyun continue;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Receive the message with the payloads */
368*4882a593Smuzhiyun rx_skb = i2400mu_rx(i2400mu, rx_skb);
369*4882a593Smuzhiyun result = PTR_ERR(rx_skb);
370*4882a593Smuzhiyun if (IS_ERR(rx_skb))
371*4882a593Smuzhiyun goto out;
372*4882a593Smuzhiyun atomic_dec(&i2400mu->rx_pending_count);
373*4882a593Smuzhiyun if (rx_skb == NULL || rx_skb->len == 0) {
374*4882a593Smuzhiyun /* some "ignorable" condition */
375*4882a593Smuzhiyun kfree_skb(rx_skb);
376*4882a593Smuzhiyun continue;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* Deliver the message to the generic i2400m code */
380*4882a593Smuzhiyun i2400mu->rx_size_cnt++;
381*4882a593Smuzhiyun i2400mu->rx_size_acc += rx_skb->len;
382*4882a593Smuzhiyun result = i2400m_rx(i2400m, rx_skb);
383*4882a593Smuzhiyun if (result == -EIO
384*4882a593Smuzhiyun && edc_inc(&i2400mu->urb_edc,
385*4882a593Smuzhiyun EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
386*4882a593Smuzhiyun goto error_reset;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* Maybe adjust RX buffer size */
390*4882a593Smuzhiyun i2400mu_rx_size_maybe_shrink(i2400mu);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun result = 0;
393*4882a593Smuzhiyun out:
394*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->rx_lock, flags);
395*4882a593Smuzhiyun i2400mu->rx_kthread = NULL;
396*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->rx_lock, flags);
397*4882a593Smuzhiyun d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
398*4882a593Smuzhiyun return result;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun error_reset:
401*4882a593Smuzhiyun dev_err(dev, "RX: maximum errors in received buffer exceeded; "
402*4882a593Smuzhiyun "resetting device\n");
403*4882a593Smuzhiyun usb_queue_reset_device(i2400mu->usb_iface);
404*4882a593Smuzhiyun goto out;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /*
409*4882a593Smuzhiyun * Start reading from the device
410*4882a593Smuzhiyun *
411*4882a593Smuzhiyun * @i2400m: device instance
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * Notify the RX thread that there is data pending.
414*4882a593Smuzhiyun */
i2400mu_rx_kick(struct i2400mu * i2400mu)415*4882a593Smuzhiyun void i2400mu_rx_kick(struct i2400mu *i2400mu)
416*4882a593Smuzhiyun {
417*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
418*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun d_fnstart(3, dev, "(i2400mu %p)\n", i2400m);
421*4882a593Smuzhiyun atomic_inc(&i2400mu->rx_pending_count);
422*4882a593Smuzhiyun wake_up_all(&i2400mu->rx_wq);
423*4882a593Smuzhiyun d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun
i2400mu_rx_setup(struct i2400mu * i2400mu)427*4882a593Smuzhiyun int i2400mu_rx_setup(struct i2400mu *i2400mu)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun int result = 0;
430*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
431*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
432*4882a593Smuzhiyun struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
433*4882a593Smuzhiyun struct task_struct *kthread;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
436*4882a593Smuzhiyun wimax_dev->name);
437*4882a593Smuzhiyun /* the kthread function sets i2400mu->rx_thread */
438*4882a593Smuzhiyun if (IS_ERR(kthread)) {
439*4882a593Smuzhiyun result = PTR_ERR(kthread);
440*4882a593Smuzhiyun dev_err(dev, "RX: cannot start thread: %d\n", result);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun return result;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun
i2400mu_rx_release(struct i2400mu * i2400mu)446*4882a593Smuzhiyun void i2400mu_rx_release(struct i2400mu *i2400mu)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun unsigned long flags;
449*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
450*4882a593Smuzhiyun struct device *dev = i2400m_dev(i2400m);
451*4882a593Smuzhiyun struct task_struct *kthread;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->rx_lock, flags);
454*4882a593Smuzhiyun kthread = i2400mu->rx_kthread;
455*4882a593Smuzhiyun i2400mu->rx_kthread = NULL;
456*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->rx_lock, flags);
457*4882a593Smuzhiyun if (kthread)
458*4882a593Smuzhiyun kthread_stop(kthread);
459*4882a593Smuzhiyun else
460*4882a593Smuzhiyun d_printf(1, dev, "RX: kthread had already exited\n");
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463