1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl>
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "mt7601u.h"
7*4882a593Smuzhiyun #include "dma.h"
8*4882a593Smuzhiyun #include "usb.h"
9*4882a593Smuzhiyun #include "trace.h"
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
12*4882a593Smuzhiyun struct mt7601u_dma_buf_rx *e, gfp_t gfp);
13*4882a593Smuzhiyun
ieee80211_get_hdrlen_from_buf(const u8 * data,unsigned len)14*4882a593Smuzhiyun static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data;
17*4882a593Smuzhiyun unsigned int hdrlen;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun if (unlikely(len < 10))
20*4882a593Smuzhiyun return 0;
21*4882a593Smuzhiyun hdrlen = ieee80211_hdrlen(hdr->frame_control);
22*4882a593Smuzhiyun if (unlikely(hdrlen > len))
23*4882a593Smuzhiyun return 0;
24*4882a593Smuzhiyun return hdrlen;
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct sk_buff *
mt7601u_rx_skb_from_seg(struct mt7601u_dev * dev,struct mt7601u_rxwi * rxwi,void * data,u32 seg_len,u32 truesize,struct page * p)28*4882a593Smuzhiyun mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi,
29*4882a593Smuzhiyun void *data, u32 seg_len, u32 truesize, struct page *p)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun struct sk_buff *skb;
32*4882a593Smuzhiyun u32 true_len, hdr_len = 0, copy, frag;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC);
35*4882a593Smuzhiyun if (!skb)
36*4882a593Smuzhiyun return NULL;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun true_len = mt76_mac_process_rx(dev, skb, data, rxwi);
39*4882a593Smuzhiyun if (!true_len || true_len > seg_len)
40*4882a593Smuzhiyun goto bad_frame;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len);
43*4882a593Smuzhiyun if (!hdr_len)
44*4882a593Smuzhiyun goto bad_frame;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) {
47*4882a593Smuzhiyun skb_put_data(skb, data, hdr_len);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun data += hdr_len + 2;
50*4882a593Smuzhiyun true_len -= hdr_len;
51*4882a593Smuzhiyun hdr_len = 0;
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* If not doing paged RX allocated skb will always have enough space */
55*4882a593Smuzhiyun copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8;
56*4882a593Smuzhiyun frag = true_len - copy;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun skb_put_data(skb, data, copy);
59*4882a593Smuzhiyun data += copy;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun if (frag) {
62*4882a593Smuzhiyun skb_add_rx_frag(skb, 0, p, data - page_address(p),
63*4882a593Smuzhiyun frag, truesize);
64*4882a593Smuzhiyun get_page(p);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return skb;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun bad_frame:
70*4882a593Smuzhiyun dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n",
71*4882a593Smuzhiyun true_len, hdr_len);
72*4882a593Smuzhiyun dev_kfree_skb(skb);
73*4882a593Smuzhiyun return NULL;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
mt7601u_rx_process_seg(struct mt7601u_dev * dev,u8 * data,u32 seg_len,struct page * p)76*4882a593Smuzhiyun static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data,
77*4882a593Smuzhiyun u32 seg_len, struct page *p)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun struct sk_buff *skb;
80*4882a593Smuzhiyun struct mt7601u_rxwi *rxwi;
81*4882a593Smuzhiyun u32 fce_info, truesize = seg_len;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* DMA_INFO field at the beginning of the segment contains only some of
84*4882a593Smuzhiyun * the information, we need to read the FCE descriptor from the end.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN);
87*4882a593Smuzhiyun seg_len -= MT_FCE_INFO_LEN;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun data += MT_DMA_HDR_LEN;
90*4882a593Smuzhiyun seg_len -= MT_DMA_HDR_LEN;
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun rxwi = (struct mt7601u_rxwi *) data;
93*4882a593Smuzhiyun data += sizeof(struct mt7601u_rxwi);
94*4882a593Smuzhiyun seg_len -= sizeof(struct mt7601u_rxwi);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2]))
97*4882a593Smuzhiyun dev_err_once(dev->dev, "Error: RXWI zero fields are set\n");
98*4882a593Smuzhiyun if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info)))
99*4882a593Smuzhiyun dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n");
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun trace_mt_rx(dev, rxwi, fce_info);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p);
104*4882a593Smuzhiyun if (!skb)
105*4882a593Smuzhiyun return;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun spin_lock(&dev->mac_lock);
108*4882a593Smuzhiyun ieee80211_rx(dev->hw, skb);
109*4882a593Smuzhiyun spin_unlock(&dev->mac_lock);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
mt7601u_rx_next_seg_len(u8 * data,u32 data_len)112*4882a593Smuzhiyun static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN +
115*4882a593Smuzhiyun sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN;
116*4882a593Smuzhiyun u16 dma_len = get_unaligned_le16(data);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun if (data_len < min_seg_len ||
119*4882a593Smuzhiyun WARN_ON_ONCE(!dma_len) ||
120*4882a593Smuzhiyun WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) ||
121*4882a593Smuzhiyun WARN_ON_ONCE(dma_len & 0x3))
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun return MT_DMA_HDRS + dma_len;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun static void
mt7601u_rx_process_entry(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e)128*4882a593Smuzhiyun mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun u32 seg_len, data_len = e->urb->actual_length;
131*4882a593Smuzhiyun u8 *data = page_address(e->p);
132*4882a593Smuzhiyun struct page *new_p = NULL;
133*4882a593Smuzhiyun int cnt = 0;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state))
136*4882a593Smuzhiyun return;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun /* Copy if there is very little data in the buffer. */
139*4882a593Smuzhiyun if (data_len > 512)
140*4882a593Smuzhiyun new_p = dev_alloc_pages(MT_RX_ORDER);
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) {
143*4882a593Smuzhiyun mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun data_len -= seg_len;
146*4882a593Smuzhiyun data += seg_len;
147*4882a593Smuzhiyun cnt++;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (cnt > 1)
151*4882a593Smuzhiyun trace_mt_rx_dma_aggr(dev, cnt, !!new_p);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (new_p) {
154*4882a593Smuzhiyun /* we have one extra ref from the allocator */
155*4882a593Smuzhiyun put_page(e->p);
156*4882a593Smuzhiyun e->p = new_p;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static struct mt7601u_dma_buf_rx *
mt7601u_rx_get_pending_entry(struct mt7601u_dev * dev)161*4882a593Smuzhiyun mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct mt7601u_rx_queue *q = &dev->rx_q;
164*4882a593Smuzhiyun struct mt7601u_dma_buf_rx *buf = NULL;
165*4882a593Smuzhiyun unsigned long flags;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun spin_lock_irqsave(&dev->rx_lock, flags);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun if (!q->pending)
170*4882a593Smuzhiyun goto out;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun buf = &q->e[q->start];
173*4882a593Smuzhiyun q->pending--;
174*4882a593Smuzhiyun q->start = (q->start + 1) % q->entries;
175*4882a593Smuzhiyun out:
176*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->rx_lock, flags);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return buf;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
mt7601u_complete_rx(struct urb * urb)181*4882a593Smuzhiyun static void mt7601u_complete_rx(struct urb *urb)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun struct mt7601u_dev *dev = urb->context;
184*4882a593Smuzhiyun struct mt7601u_rx_queue *q = &dev->rx_q;
185*4882a593Smuzhiyun unsigned long flags;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* do no schedule rx tasklet if urb has been unlinked
188*4882a593Smuzhiyun * or the device has been removed
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun switch (urb->status) {
191*4882a593Smuzhiyun case -ECONNRESET:
192*4882a593Smuzhiyun case -ESHUTDOWN:
193*4882a593Smuzhiyun case -ENOENT:
194*4882a593Smuzhiyun return;
195*4882a593Smuzhiyun default:
196*4882a593Smuzhiyun dev_err_ratelimited(dev->dev, "rx urb failed: %d\n",
197*4882a593Smuzhiyun urb->status);
198*4882a593Smuzhiyun fallthrough;
199*4882a593Smuzhiyun case 0:
200*4882a593Smuzhiyun break;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun spin_lock_irqsave(&dev->rx_lock, flags);
204*4882a593Smuzhiyun if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch"))
205*4882a593Smuzhiyun goto out;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun q->end = (q->end + 1) % q->entries;
208*4882a593Smuzhiyun q->pending++;
209*4882a593Smuzhiyun tasklet_schedule(&dev->rx_tasklet);
210*4882a593Smuzhiyun out:
211*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->rx_lock, flags);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
mt7601u_rx_tasklet(unsigned long data)214*4882a593Smuzhiyun static void mt7601u_rx_tasklet(unsigned long data)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
217*4882a593Smuzhiyun struct mt7601u_dma_buf_rx *e;
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun while ((e = mt7601u_rx_get_pending_entry(dev))) {
220*4882a593Smuzhiyun if (e->urb->status)
221*4882a593Smuzhiyun continue;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun mt7601u_rx_process_entry(dev, e);
224*4882a593Smuzhiyun mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
mt7601u_complete_tx(struct urb * urb)228*4882a593Smuzhiyun static void mt7601u_complete_tx(struct urb *urb)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun struct mt7601u_tx_queue *q = urb->context;
231*4882a593Smuzhiyun struct mt7601u_dev *dev = q->dev;
232*4882a593Smuzhiyun struct sk_buff *skb;
233*4882a593Smuzhiyun unsigned long flags;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun switch (urb->status) {
236*4882a593Smuzhiyun case -ECONNRESET:
237*4882a593Smuzhiyun case -ESHUTDOWN:
238*4882a593Smuzhiyun case -ENOENT:
239*4882a593Smuzhiyun return;
240*4882a593Smuzhiyun default:
241*4882a593Smuzhiyun dev_err_ratelimited(dev->dev, "tx urb failed: %d\n",
242*4882a593Smuzhiyun urb->status);
243*4882a593Smuzhiyun fallthrough;
244*4882a593Smuzhiyun case 0:
245*4882a593Smuzhiyun break;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun spin_lock_irqsave(&dev->tx_lock, flags);
249*4882a593Smuzhiyun if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch"))
250*4882a593Smuzhiyun goto out;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun skb = q->e[q->start].skb;
253*4882a593Smuzhiyun q->e[q->start].skb = NULL;
254*4882a593Smuzhiyun trace_mt_tx_dma_done(dev, skb);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun __skb_queue_tail(&dev->tx_skb_done, skb);
257*4882a593Smuzhiyun tasklet_schedule(&dev->tx_tasklet);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun if (q->used == q->entries - q->entries / 8)
260*4882a593Smuzhiyun ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb));
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun q->start = (q->start + 1) % q->entries;
263*4882a593Smuzhiyun q->used--;
264*4882a593Smuzhiyun out:
265*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->tx_lock, flags);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
mt7601u_tx_tasklet(unsigned long data)268*4882a593Smuzhiyun static void mt7601u_tx_tasklet(unsigned long data)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct mt7601u_dev *dev = (struct mt7601u_dev *) data;
271*4882a593Smuzhiyun struct sk_buff_head skbs;
272*4882a593Smuzhiyun unsigned long flags;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun __skb_queue_head_init(&skbs);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun spin_lock_irqsave(&dev->tx_lock, flags);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun set_bit(MT7601U_STATE_MORE_STATS, &dev->state);
279*4882a593Smuzhiyun if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state))
280*4882a593Smuzhiyun queue_delayed_work(dev->stat_wq, &dev->stat_work,
281*4882a593Smuzhiyun msecs_to_jiffies(10));
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun skb_queue_splice_init(&dev->tx_skb_done, &skbs);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->tx_lock, flags);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun while (!skb_queue_empty(&skbs)) {
288*4882a593Smuzhiyun struct sk_buff *skb = __skb_dequeue(&skbs);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun mt7601u_tx_status(dev, skb);
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
mt7601u_dma_submit_tx(struct mt7601u_dev * dev,struct sk_buff * skb,u8 ep)294*4882a593Smuzhiyun static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev,
295*4882a593Smuzhiyun struct sk_buff *skb, u8 ep)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
298*4882a593Smuzhiyun unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]);
299*4882a593Smuzhiyun struct mt7601u_dma_buf_tx *e;
300*4882a593Smuzhiyun struct mt7601u_tx_queue *q = &dev->tx_q[ep];
301*4882a593Smuzhiyun unsigned long flags;
302*4882a593Smuzhiyun int ret;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun spin_lock_irqsave(&dev->tx_lock, flags);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun if (WARN_ON(q->entries <= q->used)) {
307*4882a593Smuzhiyun ret = -ENOSPC;
308*4882a593Smuzhiyun goto out;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun e = &q->e[q->end];
312*4882a593Smuzhiyun usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len,
313*4882a593Smuzhiyun mt7601u_complete_tx, q);
314*4882a593Smuzhiyun ret = usb_submit_urb(e->urb, GFP_ATOMIC);
315*4882a593Smuzhiyun if (ret) {
316*4882a593Smuzhiyun /* Special-handle ENODEV from TX urb submission because it will
317*4882a593Smuzhiyun * often be the first ENODEV we see after device is removed.
318*4882a593Smuzhiyun */
319*4882a593Smuzhiyun if (ret == -ENODEV)
320*4882a593Smuzhiyun set_bit(MT7601U_STATE_REMOVED, &dev->state);
321*4882a593Smuzhiyun else
322*4882a593Smuzhiyun dev_err(dev->dev, "Error: TX urb submit failed:%d\n",
323*4882a593Smuzhiyun ret);
324*4882a593Smuzhiyun goto out;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun q->end = (q->end + 1) % q->entries;
328*4882a593Smuzhiyun q->used++;
329*4882a593Smuzhiyun e->skb = skb;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (q->used >= q->entries)
332*4882a593Smuzhiyun ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
333*4882a593Smuzhiyun out:
334*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->tx_lock, flags);
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun return ret;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun /* Map hardware Q to USB endpoint number */
q2ep(u8 qid)340*4882a593Smuzhiyun static u8 q2ep(u8 qid)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun /* TODO: take management packets to queue 5 */
343*4882a593Smuzhiyun return qid + 1;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun /* Map USB endpoint number to Q id in the DMA engine */
ep2dmaq(u8 ep)347*4882a593Smuzhiyun static enum mt76_qsel ep2dmaq(u8 ep)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun if (ep == 5)
350*4882a593Smuzhiyun return MT_QSEL_MGMT;
351*4882a593Smuzhiyun return MT_QSEL_EDCA;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
mt7601u_dma_enqueue_tx(struct mt7601u_dev * dev,struct sk_buff * skb,struct mt76_wcid * wcid,int hw_q)354*4882a593Smuzhiyun int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb,
355*4882a593Smuzhiyun struct mt76_wcid *wcid, int hw_q)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun u8 ep = q2ep(hw_q);
358*4882a593Smuzhiyun u32 dma_flags;
359*4882a593Smuzhiyun int ret;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun dma_flags = MT_TXD_PKT_INFO_80211;
362*4882a593Smuzhiyun if (wcid->hw_key_idx == 0xff)
363*4882a593Smuzhiyun dma_flags |= MT_TXD_PKT_INFO_WIV;
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags);
366*4882a593Smuzhiyun if (ret)
367*4882a593Smuzhiyun return ret;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun ret = mt7601u_dma_submit_tx(dev, skb, ep);
370*4882a593Smuzhiyun if (ret) {
371*4882a593Smuzhiyun ieee80211_free_txskb(dev->hw, skb);
372*4882a593Smuzhiyun return ret;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun return 0;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
mt7601u_kill_rx(struct mt7601u_dev * dev)378*4882a593Smuzhiyun static void mt7601u_kill_rx(struct mt7601u_dev *dev)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun int i;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun for (i = 0; i < dev->rx_q.entries; i++)
383*4882a593Smuzhiyun usb_poison_urb(dev->rx_q.e[i].urb);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
mt7601u_submit_rx_buf(struct mt7601u_dev * dev,struct mt7601u_dma_buf_rx * e,gfp_t gfp)386*4882a593Smuzhiyun static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev,
387*4882a593Smuzhiyun struct mt7601u_dma_buf_rx *e, gfp_t gfp)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun struct usb_device *usb_dev = mt7601u_to_usb_dev(dev);
390*4882a593Smuzhiyun u8 *buf = page_address(e->p);
391*4882a593Smuzhiyun unsigned pipe;
392*4882a593Smuzhiyun int ret;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE,
397*4882a593Smuzhiyun mt7601u_complete_rx, dev);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun trace_mt_submit_urb(dev, e->urb);
400*4882a593Smuzhiyun ret = usb_submit_urb(e->urb, gfp);
401*4882a593Smuzhiyun if (ret)
402*4882a593Smuzhiyun dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun return ret;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
mt7601u_submit_rx(struct mt7601u_dev * dev)407*4882a593Smuzhiyun static int mt7601u_submit_rx(struct mt7601u_dev *dev)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun int i, ret;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun for (i = 0; i < dev->rx_q.entries; i++) {
412*4882a593Smuzhiyun ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL);
413*4882a593Smuzhiyun if (ret)
414*4882a593Smuzhiyun return ret;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
mt7601u_free_rx(struct mt7601u_dev * dev)420*4882a593Smuzhiyun static void mt7601u_free_rx(struct mt7601u_dev *dev)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun int i;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun for (i = 0; i < dev->rx_q.entries; i++) {
425*4882a593Smuzhiyun __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER);
426*4882a593Smuzhiyun usb_free_urb(dev->rx_q.e[i].urb);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
mt7601u_alloc_rx(struct mt7601u_dev * dev)430*4882a593Smuzhiyun static int mt7601u_alloc_rx(struct mt7601u_dev *dev)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun int i;
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun memset(&dev->rx_q, 0, sizeof(dev->rx_q));
435*4882a593Smuzhiyun dev->rx_q.dev = dev;
436*4882a593Smuzhiyun dev->rx_q.entries = N_RX_ENTRIES;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun for (i = 0; i < N_RX_ENTRIES; i++) {
439*4882a593Smuzhiyun dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
440*4882a593Smuzhiyun dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p)
443*4882a593Smuzhiyun return -ENOMEM;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun return 0;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
mt7601u_free_tx_queue(struct mt7601u_tx_queue * q)449*4882a593Smuzhiyun static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun int i;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun for (i = 0; i < q->entries; i++) {
454*4882a593Smuzhiyun usb_poison_urb(q->e[i].urb);
455*4882a593Smuzhiyun if (q->e[i].skb)
456*4882a593Smuzhiyun mt7601u_tx_status(q->dev, q->e[i].skb);
457*4882a593Smuzhiyun usb_free_urb(q->e[i].urb);
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
mt7601u_free_tx(struct mt7601u_dev * dev)461*4882a593Smuzhiyun static void mt7601u_free_tx(struct mt7601u_dev *dev)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun int i;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (!dev->tx_q)
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun for (i = 0; i < __MT_EP_OUT_MAX; i++)
469*4882a593Smuzhiyun mt7601u_free_tx_queue(&dev->tx_q[i]);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
mt7601u_alloc_tx_queue(struct mt7601u_dev * dev,struct mt7601u_tx_queue * q)472*4882a593Smuzhiyun static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev,
473*4882a593Smuzhiyun struct mt7601u_tx_queue *q)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun int i;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun q->dev = dev;
478*4882a593Smuzhiyun q->entries = N_TX_ENTRIES;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun for (i = 0; i < N_TX_ENTRIES; i++) {
481*4882a593Smuzhiyun q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL);
482*4882a593Smuzhiyun if (!q->e[i].urb)
483*4882a593Smuzhiyun return -ENOMEM;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun return 0;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
mt7601u_alloc_tx(struct mt7601u_dev * dev)489*4882a593Smuzhiyun static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun int i;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX,
494*4882a593Smuzhiyun sizeof(*dev->tx_q), GFP_KERNEL);
495*4882a593Smuzhiyun if (!dev->tx_q)
496*4882a593Smuzhiyun return -ENOMEM;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun for (i = 0; i < __MT_EP_OUT_MAX; i++)
499*4882a593Smuzhiyun if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i]))
500*4882a593Smuzhiyun return -ENOMEM;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun return 0;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
mt7601u_dma_init(struct mt7601u_dev * dev)505*4882a593Smuzhiyun int mt7601u_dma_init(struct mt7601u_dev *dev)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun int ret = -ENOMEM;
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun tasklet_init(&dev->tx_tasklet, mt7601u_tx_tasklet, (unsigned long) dev);
510*4882a593Smuzhiyun tasklet_init(&dev->rx_tasklet, mt7601u_rx_tasklet, (unsigned long) dev);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun ret = mt7601u_alloc_tx(dev);
513*4882a593Smuzhiyun if (ret)
514*4882a593Smuzhiyun goto err;
515*4882a593Smuzhiyun ret = mt7601u_alloc_rx(dev);
516*4882a593Smuzhiyun if (ret)
517*4882a593Smuzhiyun goto err;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun ret = mt7601u_submit_rx(dev);
520*4882a593Smuzhiyun if (ret)
521*4882a593Smuzhiyun goto err;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun return 0;
524*4882a593Smuzhiyun err:
525*4882a593Smuzhiyun mt7601u_dma_cleanup(dev);
526*4882a593Smuzhiyun return ret;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
mt7601u_dma_cleanup(struct mt7601u_dev * dev)529*4882a593Smuzhiyun void mt7601u_dma_cleanup(struct mt7601u_dev *dev)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun mt7601u_kill_rx(dev);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun tasklet_kill(&dev->rx_tasklet);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun mt7601u_free_rx(dev);
536*4882a593Smuzhiyun mt7601u_free_tx(dev);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun tasklet_kill(&dev->tx_tasklet);
539*4882a593Smuzhiyun }
540