xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/mediatek/mt76/dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/dma-mapping.h>
7*4882a593Smuzhiyun #include "mt76.h"
8*4882a593Smuzhiyun #include "dma.h"
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev * dev)11*4882a593Smuzhiyun mt76_alloc_txwi(struct mt76_dev *dev)
12*4882a593Smuzhiyun {
13*4882a593Smuzhiyun 	struct mt76_txwi_cache *t;
14*4882a593Smuzhiyun 	dma_addr_t addr;
15*4882a593Smuzhiyun 	u8 *txwi;
16*4882a593Smuzhiyun 	int size;
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
19*4882a593Smuzhiyun 	txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
20*4882a593Smuzhiyun 	if (!txwi)
21*4882a593Smuzhiyun 		return NULL;
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
24*4882a593Smuzhiyun 			      DMA_TO_DEVICE);
25*4882a593Smuzhiyun 	t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
26*4882a593Smuzhiyun 	t->dma_addr = addr;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	return t;
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev * dev)32*4882a593Smuzhiyun __mt76_get_txwi(struct mt76_dev *dev)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	struct mt76_txwi_cache *t = NULL;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	spin_lock(&dev->lock);
37*4882a593Smuzhiyun 	if (!list_empty(&dev->txwi_cache)) {
38*4882a593Smuzhiyun 		t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
39*4882a593Smuzhiyun 				     list);
40*4882a593Smuzhiyun 		list_del(&t->list);
41*4882a593Smuzhiyun 	}
42*4882a593Smuzhiyun 	spin_unlock(&dev->lock);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	return t;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev * dev)48*4882a593Smuzhiyun mt76_get_txwi(struct mt76_dev *dev)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (t)
53*4882a593Smuzhiyun 		return t;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return mt76_alloc_txwi(dev);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun void
mt76_put_txwi(struct mt76_dev * dev,struct mt76_txwi_cache * t)59*4882a593Smuzhiyun mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	if (!t)
62*4882a593Smuzhiyun 		return;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	spin_lock(&dev->lock);
65*4882a593Smuzhiyun 	list_add(&t->list, &dev->txwi_cache);
66*4882a593Smuzhiyun 	spin_unlock(&dev->lock);
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76_put_txwi);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun static void
mt76_free_pending_txwi(struct mt76_dev * dev)71*4882a593Smuzhiyun mt76_free_pending_txwi(struct mt76_dev *dev)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct mt76_txwi_cache *t;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	local_bh_disable();
76*4882a593Smuzhiyun 	while ((t = __mt76_get_txwi(dev)) != NULL)
77*4882a593Smuzhiyun 		dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
78*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
79*4882a593Smuzhiyun 	local_bh_enable();
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun static int
mt76_dma_alloc_queue(struct mt76_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize,u32 ring_base)83*4882a593Smuzhiyun mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
84*4882a593Smuzhiyun 		     int idx, int n_desc, int bufsize,
85*4882a593Smuzhiyun 		     u32 ring_base)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	int size;
88*4882a593Smuzhiyun 	int i;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	spin_lock_init(&q->lock);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
93*4882a593Smuzhiyun 	q->ndesc = n_desc;
94*4882a593Smuzhiyun 	q->buf_size = bufsize;
95*4882a593Smuzhiyun 	q->hw_idx = idx;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	size = q->ndesc * sizeof(struct mt76_desc);
98*4882a593Smuzhiyun 	q->desc = dmam_alloc_coherent(dev->dev, size, &q->desc_dma, GFP_KERNEL);
99*4882a593Smuzhiyun 	if (!q->desc)
100*4882a593Smuzhiyun 		return -ENOMEM;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	size = q->ndesc * sizeof(*q->entry);
103*4882a593Smuzhiyun 	q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
104*4882a593Smuzhiyun 	if (!q->entry)
105*4882a593Smuzhiyun 		return -ENOMEM;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/* clear descriptors */
108*4882a593Smuzhiyun 	for (i = 0; i < q->ndesc; i++)
109*4882a593Smuzhiyun 		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	writel(q->desc_dma, &q->regs->desc_base);
112*4882a593Smuzhiyun 	writel(0, &q->regs->cpu_idx);
113*4882a593Smuzhiyun 	writel(0, &q->regs->dma_idx);
114*4882a593Smuzhiyun 	writel(q->ndesc, &q->regs->ring_size);
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun static int
mt76_dma_add_buf(struct mt76_dev * dev,struct mt76_queue * q,struct mt76_queue_buf * buf,int nbufs,u32 info,struct sk_buff * skb,void * txwi)120*4882a593Smuzhiyun mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
121*4882a593Smuzhiyun 		 struct mt76_queue_buf *buf, int nbufs, u32 info,
122*4882a593Smuzhiyun 		 struct sk_buff *skb, void *txwi)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	struct mt76_queue_entry *entry;
125*4882a593Smuzhiyun 	struct mt76_desc *desc;
126*4882a593Smuzhiyun 	u32 ctrl;
127*4882a593Smuzhiyun 	int i, idx = -1;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	if (txwi) {
130*4882a593Smuzhiyun 		q->entry[q->head].txwi = DMA_DUMMY_DATA;
131*4882a593Smuzhiyun 		q->entry[q->head].skip_buf0 = true;
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	for (i = 0; i < nbufs; i += 2, buf += 2) {
135*4882a593Smuzhiyun 		u32 buf0 = buf[0].addr, buf1 = 0;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		idx = q->head;
138*4882a593Smuzhiyun 		q->head = (q->head + 1) % q->ndesc;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 		desc = &q->desc[idx];
141*4882a593Smuzhiyun 		entry = &q->entry[idx];
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 		if (buf[0].skip_unmap)
144*4882a593Smuzhiyun 			entry->skip_buf0 = true;
145*4882a593Smuzhiyun 		entry->skip_buf1 = i == nbufs - 1;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		entry->dma_addr[0] = buf[0].addr;
148*4882a593Smuzhiyun 		entry->dma_len[0] = buf[0].len;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
151*4882a593Smuzhiyun 		if (i < nbufs - 1) {
152*4882a593Smuzhiyun 			entry->dma_addr[1] = buf[1].addr;
153*4882a593Smuzhiyun 			entry->dma_len[1] = buf[1].len;
154*4882a593Smuzhiyun 			buf1 = buf[1].addr;
155*4882a593Smuzhiyun 			ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
156*4882a593Smuzhiyun 			if (buf[1].skip_unmap)
157*4882a593Smuzhiyun 				entry->skip_buf1 = true;
158*4882a593Smuzhiyun 		}
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 		if (i == nbufs - 1)
161*4882a593Smuzhiyun 			ctrl |= MT_DMA_CTL_LAST_SEC0;
162*4882a593Smuzhiyun 		else if (i == nbufs - 2)
163*4882a593Smuzhiyun 			ctrl |= MT_DMA_CTL_LAST_SEC1;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
166*4882a593Smuzhiyun 		WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
167*4882a593Smuzhiyun 		WRITE_ONCE(desc->info, cpu_to_le32(info));
168*4882a593Smuzhiyun 		WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 		q->queued++;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	q->entry[idx].txwi = txwi;
174*4882a593Smuzhiyun 	q->entry[idx].skb = skb;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return idx;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun static void
mt76_dma_tx_cleanup_idx(struct mt76_dev * dev,struct mt76_queue * q,int idx,struct mt76_queue_entry * prev_e)180*4882a593Smuzhiyun mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
181*4882a593Smuzhiyun 			struct mt76_queue_entry *prev_e)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct mt76_queue_entry *e = &q->entry[idx];
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (!e->skip_buf0)
186*4882a593Smuzhiyun 		dma_unmap_single(dev->dev, e->dma_addr[0], e->dma_len[0],
187*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	if (!e->skip_buf1)
190*4882a593Smuzhiyun 		dma_unmap_single(dev->dev, e->dma_addr[1], e->dma_len[1],
191*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (e->txwi == DMA_DUMMY_DATA)
194*4882a593Smuzhiyun 		e->txwi = NULL;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	if (e->skb == DMA_DUMMY_DATA)
197*4882a593Smuzhiyun 		e->skb = NULL;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	*prev_e = *e;
200*4882a593Smuzhiyun 	memset(e, 0, sizeof(*e));
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun static void
mt76_dma_sync_idx(struct mt76_dev * dev,struct mt76_queue * q)204*4882a593Smuzhiyun mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	writel(q->desc_dma, &q->regs->desc_base);
207*4882a593Smuzhiyun 	writel(q->ndesc, &q->regs->ring_size);
208*4882a593Smuzhiyun 	q->head = readl(&q->regs->dma_idx);
209*4882a593Smuzhiyun 	q->tail = q->head;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun static void
mt76_dma_kick_queue(struct mt76_dev * dev,struct mt76_queue * q)213*4882a593Smuzhiyun mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	wmb();
216*4882a593Smuzhiyun 	writel(q->head, &q->regs->cpu_idx);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun static void
mt76_dma_tx_cleanup(struct mt76_dev * dev,enum mt76_txq_id qid,bool flush)220*4882a593Smuzhiyun mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
223*4882a593Smuzhiyun 	struct mt76_queue_entry entry;
224*4882a593Smuzhiyun 	bool wake = false;
225*4882a593Smuzhiyun 	int last;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (!q)
228*4882a593Smuzhiyun 		return;
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (flush)
231*4882a593Smuzhiyun 		last = -1;
232*4882a593Smuzhiyun 	else
233*4882a593Smuzhiyun 		last = readl(&q->regs->dma_idx);
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	while (q->queued > 0 && q->tail != last) {
236*4882a593Smuzhiyun 		mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
237*4882a593Smuzhiyun 		mt76_queue_tx_complete(dev, q, &entry);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 		if (entry.txwi) {
240*4882a593Smuzhiyun 			if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
241*4882a593Smuzhiyun 				mt76_put_txwi(dev, entry.txwi);
242*4882a593Smuzhiyun 			wake = !flush;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (!flush && q->tail == last)
246*4882a593Smuzhiyun 			last = readl(&q->regs->dma_idx);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (flush) {
251*4882a593Smuzhiyun 		spin_lock_bh(&q->lock);
252*4882a593Smuzhiyun 		mt76_dma_sync_idx(dev, q);
253*4882a593Smuzhiyun 		mt76_dma_kick_queue(dev, q);
254*4882a593Smuzhiyun 		spin_unlock_bh(&q->lock);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	wake = wake && q->stopped &&
258*4882a593Smuzhiyun 	       qid < IEEE80211_NUM_ACS && q->queued < q->ndesc - 8;
259*4882a593Smuzhiyun 	if (wake)
260*4882a593Smuzhiyun 		q->stopped = false;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	if (!q->queued)
263*4882a593Smuzhiyun 		wake_up(&dev->tx_wait);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if (wake)
266*4882a593Smuzhiyun 		ieee80211_wake_queue(dev->hw, qid);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun static void *
mt76_dma_get_buf(struct mt76_dev * dev,struct mt76_queue * q,int idx,int * len,u32 * info,bool * more)270*4882a593Smuzhiyun mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
271*4882a593Smuzhiyun 		 int *len, u32 *info, bool *more)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct mt76_queue_entry *e = &q->entry[idx];
274*4882a593Smuzhiyun 	struct mt76_desc *desc = &q->desc[idx];
275*4882a593Smuzhiyun 	dma_addr_t buf_addr;
276*4882a593Smuzhiyun 	void *buf = e->buf;
277*4882a593Smuzhiyun 	int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	buf_addr = e->dma_addr[0];
280*4882a593Smuzhiyun 	if (len) {
281*4882a593Smuzhiyun 		u32 ctl = le32_to_cpu(READ_ONCE(desc->ctrl));
282*4882a593Smuzhiyun 		*len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctl);
283*4882a593Smuzhiyun 		*more = !(ctl & MT_DMA_CTL_LAST_SEC0);
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (info)
287*4882a593Smuzhiyun 		*info = le32_to_cpu(desc->info);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	dma_unmap_single(dev->dev, buf_addr, buf_len, DMA_FROM_DEVICE);
290*4882a593Smuzhiyun 	e->buf = NULL;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	return buf;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun static void *
mt76_dma_dequeue(struct mt76_dev * dev,struct mt76_queue * q,bool flush,int * len,u32 * info,bool * more)296*4882a593Smuzhiyun mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
297*4882a593Smuzhiyun 		 int *len, u32 *info, bool *more)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	int idx = q->tail;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	*more = false;
302*4882a593Smuzhiyun 	if (!q->queued)
303*4882a593Smuzhiyun 		return NULL;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (flush)
306*4882a593Smuzhiyun 		q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
307*4882a593Smuzhiyun 	else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
308*4882a593Smuzhiyun 		return NULL;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	q->tail = (q->tail + 1) % q->ndesc;
311*4882a593Smuzhiyun 	q->queued--;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	return mt76_dma_get_buf(dev, q, idx, len, info, more);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun static int
mt76_dma_tx_queue_skb_raw(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,u32 tx_info)317*4882a593Smuzhiyun mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
318*4882a593Smuzhiyun 			  struct sk_buff *skb, u32 tx_info)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
321*4882a593Smuzhiyun 	struct mt76_queue_buf buf = {};
322*4882a593Smuzhiyun 	dma_addr_t addr;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (q->queued + 1 >= q->ndesc - 1)
325*4882a593Smuzhiyun 		goto error;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	addr = dma_map_single(dev->dev, skb->data, skb->len,
328*4882a593Smuzhiyun 			      DMA_TO_DEVICE);
329*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev->dev, addr)))
330*4882a593Smuzhiyun 		goto error;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	buf.addr = addr;
333*4882a593Smuzhiyun 	buf.len = skb->len;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
336*4882a593Smuzhiyun 	mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
337*4882a593Smuzhiyun 	mt76_dma_kick_queue(dev, q);
338*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return 0;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun error:
343*4882a593Smuzhiyun 	dev_kfree_skb(skb);
344*4882a593Smuzhiyun 	return -ENOMEM;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static int
mt76_dma_tx_queue_skb(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)348*4882a593Smuzhiyun mt76_dma_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
349*4882a593Smuzhiyun 		      struct sk_buff *skb, struct mt76_wcid *wcid,
350*4882a593Smuzhiyun 		      struct ieee80211_sta *sta)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
353*4882a593Smuzhiyun 	struct mt76_tx_info tx_info = {
354*4882a593Smuzhiyun 		.skb = skb,
355*4882a593Smuzhiyun 	};
356*4882a593Smuzhiyun 	struct ieee80211_hw *hw;
357*4882a593Smuzhiyun 	int len, n = 0, ret = -ENOMEM;
358*4882a593Smuzhiyun 	struct mt76_txwi_cache *t;
359*4882a593Smuzhiyun 	struct sk_buff *iter;
360*4882a593Smuzhiyun 	dma_addr_t addr;
361*4882a593Smuzhiyun 	u8 *txwi;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	t = mt76_get_txwi(dev);
364*4882a593Smuzhiyun 	if (!t) {
365*4882a593Smuzhiyun 		hw = mt76_tx_status_get_hw(dev, skb);
366*4882a593Smuzhiyun 		ieee80211_free_txskb(hw, skb);
367*4882a593Smuzhiyun 		return -ENOMEM;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun 	txwi = mt76_get_txwi_ptr(dev, t);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	skb->prev = skb->next = NULL;
372*4882a593Smuzhiyun 	if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
373*4882a593Smuzhiyun 		mt76_insert_hdr_pad(skb);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	len = skb_headlen(skb);
376*4882a593Smuzhiyun 	addr = dma_map_single(dev->dev, skb->data, len, DMA_TO_DEVICE);
377*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev->dev, addr)))
378*4882a593Smuzhiyun 		goto free;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	tx_info.buf[n].addr = t->dma_addr;
381*4882a593Smuzhiyun 	tx_info.buf[n++].len = dev->drv->txwi_size;
382*4882a593Smuzhiyun 	tx_info.buf[n].addr = addr;
383*4882a593Smuzhiyun 	tx_info.buf[n++].len = len;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	skb_walk_frags(skb, iter) {
386*4882a593Smuzhiyun 		if (n == ARRAY_SIZE(tx_info.buf))
387*4882a593Smuzhiyun 			goto unmap;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		addr = dma_map_single(dev->dev, iter->data, iter->len,
390*4882a593Smuzhiyun 				      DMA_TO_DEVICE);
391*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(dev->dev, addr)))
392*4882a593Smuzhiyun 			goto unmap;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 		tx_info.buf[n].addr = addr;
395*4882a593Smuzhiyun 		tx_info.buf[n++].len = iter->len;
396*4882a593Smuzhiyun 	}
397*4882a593Smuzhiyun 	tx_info.nbuf = n;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
400*4882a593Smuzhiyun 		ret = -ENOMEM;
401*4882a593Smuzhiyun 		goto unmap;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dev->dev, t->dma_addr, dev->drv->txwi_size,
405*4882a593Smuzhiyun 				DMA_TO_DEVICE);
406*4882a593Smuzhiyun 	ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
407*4882a593Smuzhiyun 	dma_sync_single_for_device(dev->dev, t->dma_addr, dev->drv->txwi_size,
408*4882a593Smuzhiyun 				   DMA_TO_DEVICE);
409*4882a593Smuzhiyun 	if (ret < 0)
410*4882a593Smuzhiyun 		goto unmap;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
413*4882a593Smuzhiyun 				tx_info.info, tx_info.skb, t);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun unmap:
416*4882a593Smuzhiyun 	for (n--; n > 0; n--)
417*4882a593Smuzhiyun 		dma_unmap_single(dev->dev, tx_info.buf[n].addr,
418*4882a593Smuzhiyun 				 tx_info.buf[n].len, DMA_TO_DEVICE);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun free:
421*4882a593Smuzhiyun #ifdef CONFIG_NL80211_TESTMODE
422*4882a593Smuzhiyun 	/* fix tx_done accounting on queue overflow */
423*4882a593Smuzhiyun 	if (tx_info.skb == dev->test.tx_skb)
424*4882a593Smuzhiyun 		dev->test.tx_done--;
425*4882a593Smuzhiyun #endif
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	dev_kfree_skb(tx_info.skb);
428*4882a593Smuzhiyun 	mt76_put_txwi(dev, t);
429*4882a593Smuzhiyun 	return ret;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun static int
mt76_dma_rx_fill(struct mt76_dev * dev,struct mt76_queue * q)433*4882a593Smuzhiyun mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	dma_addr_t addr;
436*4882a593Smuzhiyun 	void *buf;
437*4882a593Smuzhiyun 	int frames = 0;
438*4882a593Smuzhiyun 	int len = SKB_WITH_OVERHEAD(q->buf_size);
439*4882a593Smuzhiyun 	int offset = q->buf_offset;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	while (q->queued < q->ndesc - 1) {
444*4882a593Smuzhiyun 		struct mt76_queue_buf qbuf;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		buf = page_frag_alloc(&q->rx_page, q->buf_size, GFP_ATOMIC);
447*4882a593Smuzhiyun 		if (!buf)
448*4882a593Smuzhiyun 			break;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		addr = dma_map_single(dev->dev, buf, len, DMA_FROM_DEVICE);
451*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(dev->dev, addr))) {
452*4882a593Smuzhiyun 			skb_free_frag(buf);
453*4882a593Smuzhiyun 			break;
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 		qbuf.addr = addr + offset;
457*4882a593Smuzhiyun 		qbuf.len = len - offset;
458*4882a593Smuzhiyun 		qbuf.skip_unmap = false;
459*4882a593Smuzhiyun 		mt76_dma_add_buf(dev, q, &qbuf, 1, 0, buf, NULL);
460*4882a593Smuzhiyun 		frames++;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	if (frames)
464*4882a593Smuzhiyun 		mt76_dma_kick_queue(dev, q);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	return frames;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun static void
mt76_dma_rx_cleanup(struct mt76_dev * dev,struct mt76_queue * q)472*4882a593Smuzhiyun mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun 	struct page *page;
475*4882a593Smuzhiyun 	void *buf;
476*4882a593Smuzhiyun 	bool more;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
479*4882a593Smuzhiyun 	do {
480*4882a593Smuzhiyun 		buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more);
481*4882a593Smuzhiyun 		if (!buf)
482*4882a593Smuzhiyun 			break;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		skb_free_frag(buf);
485*4882a593Smuzhiyun 	} while (1);
486*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (!q->rx_page.va)
489*4882a593Smuzhiyun 		return;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	page = virt_to_page(q->rx_page.va);
492*4882a593Smuzhiyun 	__page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
493*4882a593Smuzhiyun 	memset(&q->rx_page, 0, sizeof(q->rx_page));
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun static void
mt76_dma_rx_reset(struct mt76_dev * dev,enum mt76_rxq_id qid)497*4882a593Smuzhiyun mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct mt76_queue *q = &dev->q_rx[qid];
500*4882a593Smuzhiyun 	int i;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	for (i = 0; i < q->ndesc; i++)
503*4882a593Smuzhiyun 		q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	mt76_dma_rx_cleanup(dev, q);
506*4882a593Smuzhiyun 	mt76_dma_sync_idx(dev, q);
507*4882a593Smuzhiyun 	mt76_dma_rx_fill(dev, q);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (!q->rx_head)
510*4882a593Smuzhiyun 		return;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	dev_kfree_skb(q->rx_head);
513*4882a593Smuzhiyun 	q->rx_head = NULL;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun static void
mt76_add_fragment(struct mt76_dev * dev,struct mt76_queue * q,void * data,int len,bool more)517*4882a593Smuzhiyun mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
518*4882a593Smuzhiyun 		  int len, bool more)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct sk_buff *skb = q->rx_head;
521*4882a593Smuzhiyun 	struct skb_shared_info *shinfo = skb_shinfo(skb);
522*4882a593Smuzhiyun 	int nr_frags = shinfo->nr_frags;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
525*4882a593Smuzhiyun 		struct page *page = virt_to_head_page(data);
526*4882a593Smuzhiyun 		int offset = data - page_address(page) + q->buf_offset;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
529*4882a593Smuzhiyun 	} else {
530*4882a593Smuzhiyun 		skb_free_frag(data);
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	if (more)
534*4882a593Smuzhiyun 		return;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	q->rx_head = NULL;
537*4882a593Smuzhiyun 	if (nr_frags < ARRAY_SIZE(shinfo->frags))
538*4882a593Smuzhiyun 		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
539*4882a593Smuzhiyun 	else
540*4882a593Smuzhiyun 		dev_kfree_skb(skb);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun static int
mt76_dma_rx_process(struct mt76_dev * dev,struct mt76_queue * q,int budget)544*4882a593Smuzhiyun mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	int len, data_len, done = 0;
547*4882a593Smuzhiyun 	struct sk_buff *skb;
548*4882a593Smuzhiyun 	unsigned char *data;
549*4882a593Smuzhiyun 	bool more;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	while (done < budget) {
552*4882a593Smuzhiyun 		u32 info;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 		data = mt76_dma_dequeue(dev, q, false, &len, &info, &more);
555*4882a593Smuzhiyun 		if (!data)
556*4882a593Smuzhiyun 			break;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		if (q->rx_head)
559*4882a593Smuzhiyun 			data_len = q->buf_size;
560*4882a593Smuzhiyun 		else
561*4882a593Smuzhiyun 			data_len = SKB_WITH_OVERHEAD(q->buf_size);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		if (data_len < len + q->buf_offset) {
564*4882a593Smuzhiyun 			dev_kfree_skb(q->rx_head);
565*4882a593Smuzhiyun 			q->rx_head = NULL;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 			skb_free_frag(data);
568*4882a593Smuzhiyun 			continue;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 		if (q->rx_head) {
572*4882a593Smuzhiyun 			mt76_add_fragment(dev, q, data, len, more);
573*4882a593Smuzhiyun 			continue;
574*4882a593Smuzhiyun 		}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 		skb = build_skb(data, q->buf_size);
577*4882a593Smuzhiyun 		if (!skb) {
578*4882a593Smuzhiyun 			skb_free_frag(data);
579*4882a593Smuzhiyun 			continue;
580*4882a593Smuzhiyun 		}
581*4882a593Smuzhiyun 		skb_reserve(skb, q->buf_offset);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		if (q == &dev->q_rx[MT_RXQ_MCU]) {
584*4882a593Smuzhiyun 			u32 *rxfce = (u32 *)skb->cb;
585*4882a593Smuzhiyun 			*rxfce = info;
586*4882a593Smuzhiyun 		}
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 		__skb_put(skb, len);
589*4882a593Smuzhiyun 		done++;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 		if (more) {
592*4882a593Smuzhiyun 			q->rx_head = skb;
593*4882a593Smuzhiyun 			continue;
594*4882a593Smuzhiyun 		}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		dev->drv->rx_skb(dev, q - dev->q_rx, skb);
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	mt76_dma_rx_fill(dev, q);
600*4882a593Smuzhiyun 	return done;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun static int
mt76_dma_rx_poll(struct napi_struct * napi,int budget)604*4882a593Smuzhiyun mt76_dma_rx_poll(struct napi_struct *napi, int budget)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun 	struct mt76_dev *dev;
607*4882a593Smuzhiyun 	int qid, done = 0, cur;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	dev = container_of(napi->dev, struct mt76_dev, napi_dev);
610*4882a593Smuzhiyun 	qid = napi - dev->napi;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	local_bh_disable();
613*4882a593Smuzhiyun 	rcu_read_lock();
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	do {
616*4882a593Smuzhiyun 		cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
617*4882a593Smuzhiyun 		mt76_rx_poll_complete(dev, qid, napi);
618*4882a593Smuzhiyun 		done += cur;
619*4882a593Smuzhiyun 	} while (cur && done < budget);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	rcu_read_unlock();
622*4882a593Smuzhiyun 	local_bh_enable();
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (done < budget && napi_complete(napi))
625*4882a593Smuzhiyun 		dev->drv->rx_poll_complete(dev, qid);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return done;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun static int
mt76_dma_init(struct mt76_dev * dev)631*4882a593Smuzhiyun mt76_dma_init(struct mt76_dev *dev)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	int i;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	init_dummy_netdev(&dev->napi_dev);
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	mt76_for_each_q_rx(dev, i) {
638*4882a593Smuzhiyun 		netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll,
639*4882a593Smuzhiyun 			       64);
640*4882a593Smuzhiyun 		mt76_dma_rx_fill(dev, &dev->q_rx[i]);
641*4882a593Smuzhiyun 		napi_enable(&dev->napi[i]);
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	return 0;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun static const struct mt76_queue_ops mt76_dma_ops = {
648*4882a593Smuzhiyun 	.init = mt76_dma_init,
649*4882a593Smuzhiyun 	.alloc = mt76_dma_alloc_queue,
650*4882a593Smuzhiyun 	.tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
651*4882a593Smuzhiyun 	.tx_queue_skb = mt76_dma_tx_queue_skb,
652*4882a593Smuzhiyun 	.tx_cleanup = mt76_dma_tx_cleanup,
653*4882a593Smuzhiyun 	.rx_reset = mt76_dma_rx_reset,
654*4882a593Smuzhiyun 	.kick = mt76_dma_kick_queue,
655*4882a593Smuzhiyun };
656*4882a593Smuzhiyun 
mt76_dma_attach(struct mt76_dev * dev)657*4882a593Smuzhiyun void mt76_dma_attach(struct mt76_dev *dev)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	dev->queue_ops = &mt76_dma_ops;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76_dma_attach);
662*4882a593Smuzhiyun 
mt76_dma_cleanup(struct mt76_dev * dev)663*4882a593Smuzhiyun void mt76_dma_cleanup(struct mt76_dev *dev)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	int i;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	mt76_worker_disable(&dev->tx_worker);
668*4882a593Smuzhiyun 	netif_napi_del(&dev->tx_napi);
669*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(dev->q_tx); i++)
670*4882a593Smuzhiyun 		mt76_dma_tx_cleanup(dev, i, true);
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	mt76_for_each_q_rx(dev, i) {
673*4882a593Smuzhiyun 		netif_napi_del(&dev->napi[i]);
674*4882a593Smuzhiyun 		mt76_dma_rx_cleanup(dev, &dev->q_rx[i]);
675*4882a593Smuzhiyun 	}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	mt76_free_pending_txwi(dev);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76_dma_cleanup);
680