1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Intel Wireless WiMAX Connection 2400m
3*4882a593Smuzhiyun * USB specific TX handling
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2007-2008 Intel Corporation. All rights reserved.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or without
9*4882a593Smuzhiyun * modification, are permitted provided that the following conditions
10*4882a593Smuzhiyun * are met:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * * Redistributions of source code must retain the above copyright
13*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer.
14*4882a593Smuzhiyun * * Redistributions in binary form must reproduce the above copyright
15*4882a593Smuzhiyun * notice, this list of conditions and the following disclaimer in
16*4882a593Smuzhiyun * the documentation and/or other materials provided with the
17*4882a593Smuzhiyun * distribution.
18*4882a593Smuzhiyun * * Neither the name of Intel Corporation nor the names of its
19*4882a593Smuzhiyun * contributors may be used to endorse or promote products derived
20*4882a593Smuzhiyun * from this software without specific prior written permission.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23*4882a593Smuzhiyun * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24*4882a593Smuzhiyun * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25*4882a593Smuzhiyun * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26*4882a593Smuzhiyun * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27*4882a593Smuzhiyun * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28*4882a593Smuzhiyun * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29*4882a593Smuzhiyun * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30*4882a593Smuzhiyun * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31*4882a593Smuzhiyun * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32*4882a593Smuzhiyun * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * Intel Corporation <linux-wimax@intel.com>
36*4882a593Smuzhiyun * Yanir Lubetkin <yanirx.lubetkin@intel.com>
37*4882a593Smuzhiyun * - Initial implementation
38*4882a593Smuzhiyun * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
39*4882a593Smuzhiyun * - Split transport/device specific
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * Takes the TX messages in the i2400m's driver TX FIFO and sends them
43*4882a593Smuzhiyun * to the device until there are no more.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * If we fail sending the message, we just drop it. There isn't much
46*4882a593Smuzhiyun * we can do at this point. We could also retry, but the USB stack has
47*4882a593Smuzhiyun * already retried and still failed, so there is not much of a
48*4882a593Smuzhiyun * point. As well, most of the traffic is network, which has recovery
49*4882a593Smuzhiyun * methods for dropped packets.
50*4882a593Smuzhiyun *
51*4882a593Smuzhiyun * For sending we just obtain a FIFO buffer to send, send it to the
52*4882a593Smuzhiyun * USB bulk out, tell the TX FIFO code we have sent it; query for
53*4882a593Smuzhiyun * another one, etc... until done.
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun * We use a thread so we can call usb_autopm_enable() and
56*4882a593Smuzhiyun * usb_autopm_disable() for each transaction; this way when the device
57*4882a593Smuzhiyun * goes idle, it will suspend. It also has less overhead than a
58*4882a593Smuzhiyun * dedicated workqueue, as it is being used for a single task.
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * ROADMAP
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * i2400mu_tx_setup()
63*4882a593Smuzhiyun * i2400mu_tx_release()
64*4882a593Smuzhiyun *
65*4882a593Smuzhiyun * i2400mu_bus_tx_kick() - Called by the tx.c code when there
66*4882a593Smuzhiyun * is new data in the FIFO.
67*4882a593Smuzhiyun * i2400mu_txd()
68*4882a593Smuzhiyun * i2400m_tx_msg_get()
69*4882a593Smuzhiyun * i2400m_tx_msg_sent()
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun #include "i2400m-usb.h"
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun #define D_SUBMODULE tx
75*4882a593Smuzhiyun #include "usb-debug-levels.h"
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Get the next TX message in the TX FIFO and send it to the device
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Note that any iteration consumes a message to be sent, no matter if
82*4882a593Smuzhiyun * it succeeds or fails (we have no real way to retry or complain).
83*4882a593Smuzhiyun *
84*4882a593Smuzhiyun * Return: 0 if ok, < 0 errno code on hard error.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun static
i2400mu_tx(struct i2400mu * i2400mu,struct i2400m_msg_hdr * tx_msg,size_t tx_msg_size)87*4882a593Smuzhiyun int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
88*4882a593Smuzhiyun size_t tx_msg_size)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun int result = 0;
91*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
92*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
93*4882a593Smuzhiyun int usb_pipe, sent_size, do_autopm;
94*4882a593Smuzhiyun struct usb_endpoint_descriptor *epd;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
97*4882a593Smuzhiyun do_autopm = atomic_read(&i2400mu->do_autopm);
98*4882a593Smuzhiyun result = do_autopm ?
99*4882a593Smuzhiyun usb_autopm_get_interface(i2400mu->usb_iface) : 0;
100*4882a593Smuzhiyun if (result < 0) {
101*4882a593Smuzhiyun dev_err(dev, "TX: can't get autopm: %d\n", result);
102*4882a593Smuzhiyun do_autopm = 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
105*4882a593Smuzhiyun usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
106*4882a593Smuzhiyun retry:
107*4882a593Smuzhiyun result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
108*4882a593Smuzhiyun tx_msg, tx_msg_size, &sent_size, 200);
109*4882a593Smuzhiyun usb_mark_last_busy(i2400mu->usb_dev);
110*4882a593Smuzhiyun switch (result) {
111*4882a593Smuzhiyun case 0:
112*4882a593Smuzhiyun if (sent_size != tx_msg_size) { /* Too short? drop it */
113*4882a593Smuzhiyun dev_err(dev, "TX: short write (%d B vs %zu "
114*4882a593Smuzhiyun "expected)\n", sent_size, tx_msg_size);
115*4882a593Smuzhiyun result = -EIO;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun case -EPIPE:
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun * Stall -- maybe the device is choking with our
121*4882a593Smuzhiyun * requests. Clear it and give it some time. If they
122*4882a593Smuzhiyun * happen to often, it might be another symptom, so we
123*4882a593Smuzhiyun * reset.
124*4882a593Smuzhiyun *
125*4882a593Smuzhiyun * No error handling for usb_clear_halt(0; if it
126*4882a593Smuzhiyun * works, the retry works; if it fails, this switch
127*4882a593Smuzhiyun * does the error handling for us.
128*4882a593Smuzhiyun */
129*4882a593Smuzhiyun if (edc_inc(&i2400mu->urb_edc,
130*4882a593Smuzhiyun 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
131*4882a593Smuzhiyun dev_err(dev, "BM-CMD: too many stalls in "
132*4882a593Smuzhiyun "URB; resetting device\n");
133*4882a593Smuzhiyun usb_queue_reset_device(i2400mu->usb_iface);
134*4882a593Smuzhiyun } else {
135*4882a593Smuzhiyun usb_clear_halt(i2400mu->usb_dev, usb_pipe);
136*4882a593Smuzhiyun msleep(10); /* give the device some time */
137*4882a593Smuzhiyun goto retry;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun fallthrough;
140*4882a593Smuzhiyun case -EINVAL: /* while removing driver */
141*4882a593Smuzhiyun case -ENODEV: /* dev disconnect ... */
142*4882a593Smuzhiyun case -ENOENT: /* just ignore it */
143*4882a593Smuzhiyun case -ESHUTDOWN: /* and exit */
144*4882a593Smuzhiyun case -ECONNRESET:
145*4882a593Smuzhiyun result = -ESHUTDOWN;
146*4882a593Smuzhiyun break;
147*4882a593Smuzhiyun default: /* Some error? */
148*4882a593Smuzhiyun if (edc_inc(&i2400mu->urb_edc,
149*4882a593Smuzhiyun EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
150*4882a593Smuzhiyun dev_err(dev, "TX: maximum errors in URB "
151*4882a593Smuzhiyun "exceeded; resetting device\n");
152*4882a593Smuzhiyun usb_queue_reset_device(i2400mu->usb_iface);
153*4882a593Smuzhiyun } else {
154*4882a593Smuzhiyun dev_err(dev, "TX: cannot send URB; retrying. "
155*4882a593Smuzhiyun "tx_msg @%zu %zu B [%d sent]: %d\n",
156*4882a593Smuzhiyun (void *) tx_msg - i2400m->tx_buf,
157*4882a593Smuzhiyun tx_msg_size, sent_size, result);
158*4882a593Smuzhiyun goto retry;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun if (do_autopm)
162*4882a593Smuzhiyun usb_autopm_put_interface(i2400mu->usb_iface);
163*4882a593Smuzhiyun d_fnend(4, dev, "(i2400mu %p) = result\n", i2400mu);
164*4882a593Smuzhiyun return result;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun * Get the next TX message in the TX FIFO and send it to the device
170*4882a593Smuzhiyun *
171*4882a593Smuzhiyun * Note we exit the loop if i2400mu_tx() fails; that function only
172*4882a593Smuzhiyun * fails on hard error (failing to tx a buffer not being one of them,
173*4882a593Smuzhiyun * see its doc).
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun * Return: 0
176*4882a593Smuzhiyun */
177*4882a593Smuzhiyun static
i2400mu_txd(void * _i2400mu)178*4882a593Smuzhiyun int i2400mu_txd(void *_i2400mu)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct i2400mu *i2400mu = _i2400mu;
181*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
182*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
183*4882a593Smuzhiyun struct i2400m_msg_hdr *tx_msg;
184*4882a593Smuzhiyun size_t tx_msg_size;
185*4882a593Smuzhiyun unsigned long flags;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->tx_lock, flags);
190*4882a593Smuzhiyun BUG_ON(i2400mu->tx_kthread != NULL);
191*4882a593Smuzhiyun i2400mu->tx_kthread = current;
192*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->tx_lock, flags);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun while (1) {
195*4882a593Smuzhiyun d_printf(2, dev, "TX: waiting for messages\n");
196*4882a593Smuzhiyun tx_msg = NULL;
197*4882a593Smuzhiyun wait_event_interruptible(
198*4882a593Smuzhiyun i2400mu->tx_wq,
199*4882a593Smuzhiyun (kthread_should_stop() /* check this first! */
200*4882a593Smuzhiyun || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size)))
201*4882a593Smuzhiyun );
202*4882a593Smuzhiyun if (kthread_should_stop())
203*4882a593Smuzhiyun break;
204*4882a593Smuzhiyun WARN_ON(tx_msg == NULL); /* should not happen...*/
205*4882a593Smuzhiyun d_printf(2, dev, "TX: submitting %zu bytes\n", tx_msg_size);
206*4882a593Smuzhiyun d_dump(5, dev, tx_msg, tx_msg_size);
207*4882a593Smuzhiyun /* Yeah, we ignore errors ... not much we can do */
208*4882a593Smuzhiyun i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
209*4882a593Smuzhiyun i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->tx_lock, flags);
213*4882a593Smuzhiyun i2400mu->tx_kthread = NULL;
214*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->tx_lock, flags);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
217*4882a593Smuzhiyun return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /*
222*4882a593Smuzhiyun * i2400m TX engine notifies us that there is data in the FIFO ready
223*4882a593Smuzhiyun * for TX
224*4882a593Smuzhiyun *
225*4882a593Smuzhiyun * If there is a URB in flight, don't do anything; when it finishes,
226*4882a593Smuzhiyun * it will see there is data in the FIFO and send it. Else, just
227*4882a593Smuzhiyun * submit a write.
228*4882a593Smuzhiyun */
i2400mu_bus_tx_kick(struct i2400m * i2400m)229*4882a593Smuzhiyun void i2400mu_bus_tx_kick(struct i2400m *i2400m)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct i2400mu *i2400mu = container_of(i2400m, struct i2400mu, i2400m);
232*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun d_fnstart(3, dev, "(i2400m %p) = void\n", i2400m);
235*4882a593Smuzhiyun wake_up_all(&i2400mu->tx_wq);
236*4882a593Smuzhiyun d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun
i2400mu_tx_setup(struct i2400mu * i2400mu)240*4882a593Smuzhiyun int i2400mu_tx_setup(struct i2400mu *i2400mu)
241*4882a593Smuzhiyun {
242*4882a593Smuzhiyun int result = 0;
243*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
244*4882a593Smuzhiyun struct device *dev = &i2400mu->usb_iface->dev;
245*4882a593Smuzhiyun struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
246*4882a593Smuzhiyun struct task_struct *kthread;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
249*4882a593Smuzhiyun wimax_dev->name);
250*4882a593Smuzhiyun /* the kthread function sets i2400mu->tx_thread */
251*4882a593Smuzhiyun if (IS_ERR(kthread)) {
252*4882a593Smuzhiyun result = PTR_ERR(kthread);
253*4882a593Smuzhiyun dev_err(dev, "TX: cannot start thread: %d\n", result);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun return result;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
i2400mu_tx_release(struct i2400mu * i2400mu)258*4882a593Smuzhiyun void i2400mu_tx_release(struct i2400mu *i2400mu)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun unsigned long flags;
261*4882a593Smuzhiyun struct i2400m *i2400m = &i2400mu->i2400m;
262*4882a593Smuzhiyun struct device *dev = i2400m_dev(i2400m);
263*4882a593Smuzhiyun struct task_struct *kthread;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun spin_lock_irqsave(&i2400m->tx_lock, flags);
266*4882a593Smuzhiyun kthread = i2400mu->tx_kthread;
267*4882a593Smuzhiyun i2400mu->tx_kthread = NULL;
268*4882a593Smuzhiyun spin_unlock_irqrestore(&i2400m->tx_lock, flags);
269*4882a593Smuzhiyun if (kthread)
270*4882a593Smuzhiyun kthread_stop(kthread);
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun d_printf(1, dev, "TX: kthread had already exited\n");
273*4882a593Smuzhiyun }
274