1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #include "mt7603.h"
4*4882a593Smuzhiyun #include "mac.h"
5*4882a593Smuzhiyun #include "../dma.h"
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun static int
mt7603_init_tx_queue(struct mt7603_dev * dev,int qid,int idx,int n_desc)8*4882a593Smuzhiyun mt7603_init_tx_queue(struct mt7603_dev *dev, int qid, int idx, int n_desc)
9*4882a593Smuzhiyun {
10*4882a593Smuzhiyun struct mt76_queue *hwq;
11*4882a593Smuzhiyun int err;
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
14*4882a593Smuzhiyun if (!hwq)
15*4882a593Smuzhiyun return -ENOMEM;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
18*4882a593Smuzhiyun if (err < 0)
19*4882a593Smuzhiyun return err;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun dev->mt76.q_tx[qid] = hwq;
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun mt7603_irq_enable(dev, MT_INT_TX_DONE(idx));
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun return 0;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun static void
mt7603_rx_loopback_skb(struct mt7603_dev * dev,struct sk_buff * skb)29*4882a593Smuzhiyun mt7603_rx_loopback_skb(struct mt7603_dev *dev, struct sk_buff *skb)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun static const u8 tid_to_ac[8] = {
32*4882a593Smuzhiyun IEEE80211_AC_BE,
33*4882a593Smuzhiyun IEEE80211_AC_BK,
34*4882a593Smuzhiyun IEEE80211_AC_BK,
35*4882a593Smuzhiyun IEEE80211_AC_BE,
36*4882a593Smuzhiyun IEEE80211_AC_VI,
37*4882a593Smuzhiyun IEEE80211_AC_VI,
38*4882a593Smuzhiyun IEEE80211_AC_VO,
39*4882a593Smuzhiyun IEEE80211_AC_VO
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun __le32 *txd = (__le32 *)skb->data;
42*4882a593Smuzhiyun struct ieee80211_hdr *hdr;
43*4882a593Smuzhiyun struct ieee80211_sta *sta;
44*4882a593Smuzhiyun struct mt7603_sta *msta;
45*4882a593Smuzhiyun struct mt76_wcid *wcid;
46*4882a593Smuzhiyun void *priv;
47*4882a593Smuzhiyun int idx;
48*4882a593Smuzhiyun u32 val;
49*4882a593Smuzhiyun u8 tid = 0;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (skb->len < MT_TXD_SIZE + sizeof(struct ieee80211_hdr))
52*4882a593Smuzhiyun goto free;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun val = le32_to_cpu(txd[1]);
55*4882a593Smuzhiyun idx = FIELD_GET(MT_TXD1_WLAN_IDX, val);
56*4882a593Smuzhiyun skb->priority = FIELD_GET(MT_TXD1_TID, val);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun if (idx >= MT7603_WTBL_STA - 1)
59*4882a593Smuzhiyun goto free;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun wcid = rcu_dereference(dev->mt76.wcid[idx]);
62*4882a593Smuzhiyun if (!wcid)
63*4882a593Smuzhiyun goto free;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun priv = msta = container_of(wcid, struct mt7603_sta, wcid);
66*4882a593Smuzhiyun val = le32_to_cpu(txd[0]);
67*4882a593Smuzhiyun val &= ~(MT_TXD0_P_IDX | MT_TXD0_Q_IDX);
68*4882a593Smuzhiyun val |= FIELD_PREP(MT_TXD0_Q_IDX, MT_TX_HW_QUEUE_MGMT);
69*4882a593Smuzhiyun txd[0] = cpu_to_le32(val);
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun sta = container_of(priv, struct ieee80211_sta, drv_priv);
72*4882a593Smuzhiyun hdr = (struct ieee80211_hdr *)&skb->data[MT_TXD_SIZE];
73*4882a593Smuzhiyun if (ieee80211_is_data_qos(hdr->frame_control))
74*4882a593Smuzhiyun tid = *ieee80211_get_qos_ctl(hdr) &
75*4882a593Smuzhiyun IEEE80211_QOS_CTL_TAG1D_MASK;
76*4882a593Smuzhiyun skb_set_queue_mapping(skb, tid_to_ac[tid]);
77*4882a593Smuzhiyun ieee80211_sta_set_buffered(sta, tid, true);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun spin_lock_bh(&dev->ps_lock);
80*4882a593Smuzhiyun __skb_queue_tail(&msta->psq, skb);
81*4882a593Smuzhiyun if (skb_queue_len(&msta->psq) >= 64) {
82*4882a593Smuzhiyun skb = __skb_dequeue(&msta->psq);
83*4882a593Smuzhiyun dev_kfree_skb(skb);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun spin_unlock_bh(&dev->ps_lock);
86*4882a593Smuzhiyun return;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun free:
89*4882a593Smuzhiyun dev_kfree_skb(skb);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
mt7603_queue_rx_skb(struct mt76_dev * mdev,enum mt76_rxq_id q,struct sk_buff * skb)92*4882a593Smuzhiyun void mt7603_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
93*4882a593Smuzhiyun struct sk_buff *skb)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun struct mt7603_dev *dev = container_of(mdev, struct mt7603_dev, mt76);
96*4882a593Smuzhiyun __le32 *rxd = (__le32 *)skb->data;
97*4882a593Smuzhiyun __le32 *end = (__le32 *)&skb->data[skb->len];
98*4882a593Smuzhiyun enum rx_pkt_type type;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (q == MT_RXQ_MCU) {
103*4882a593Smuzhiyun if (type == PKT_TYPE_RX_EVENT)
104*4882a593Smuzhiyun mt76_mcu_rx_event(&dev->mt76, skb);
105*4882a593Smuzhiyun else
106*4882a593Smuzhiyun mt7603_rx_loopback_skb(dev, skb);
107*4882a593Smuzhiyun return;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun switch (type) {
111*4882a593Smuzhiyun case PKT_TYPE_TXS:
112*4882a593Smuzhiyun for (rxd++; rxd + 5 <= end; rxd += 5)
113*4882a593Smuzhiyun mt7603_mac_add_txs(dev, rxd);
114*4882a593Smuzhiyun dev_kfree_skb(skb);
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun case PKT_TYPE_RX_EVENT:
117*4882a593Smuzhiyun mt76_mcu_rx_event(&dev->mt76, skb);
118*4882a593Smuzhiyun return;
119*4882a593Smuzhiyun case PKT_TYPE_NORMAL:
120*4882a593Smuzhiyun if (mt7603_mac_fill_rx(dev, skb) == 0) {
121*4882a593Smuzhiyun mt76_rx(&dev->mt76, q, skb);
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun fallthrough;
125*4882a593Smuzhiyun default:
126*4882a593Smuzhiyun dev_kfree_skb(skb);
127*4882a593Smuzhiyun break;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static int
mt7603_init_rx_queue(struct mt7603_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize)132*4882a593Smuzhiyun mt7603_init_rx_queue(struct mt7603_dev *dev, struct mt76_queue *q,
133*4882a593Smuzhiyun int idx, int n_desc, int bufsize)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun int err;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
138*4882a593Smuzhiyun MT_RX_RING_BASE);
139*4882a593Smuzhiyun if (err < 0)
140*4882a593Smuzhiyun return err;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun mt7603_irq_enable(dev, MT_INT_RX_DONE(idx));
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return 0;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
mt7603_poll_tx(struct napi_struct * napi,int budget)147*4882a593Smuzhiyun static int mt7603_poll_tx(struct napi_struct *napi, int budget)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun struct mt7603_dev *dev;
150*4882a593Smuzhiyun int i;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun dev = container_of(napi, struct mt7603_dev, mt76.tx_napi);
153*4882a593Smuzhiyun dev->tx_dma_check = 0;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun for (i = MT_TXQ_MCU; i >= 0; i--)
156*4882a593Smuzhiyun mt76_queue_tx_cleanup(dev, i, false);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun if (napi_complete_done(napi, 0))
159*4882a593Smuzhiyun mt7603_irq_enable(dev, MT_INT_TX_DONE_ALL);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun for (i = MT_TXQ_MCU; i >= 0; i--)
162*4882a593Smuzhiyun mt76_queue_tx_cleanup(dev, i, false);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun mt7603_mac_sta_poll(dev);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun mt76_worker_schedule(&dev->mt76.tx_worker);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return 0;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
mt7603_dma_init(struct mt7603_dev * dev)171*4882a593Smuzhiyun int mt7603_dma_init(struct mt7603_dev *dev)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun static const u8 wmm_queue_map[] = {
174*4882a593Smuzhiyun [IEEE80211_AC_BK] = 0,
175*4882a593Smuzhiyun [IEEE80211_AC_BE] = 1,
176*4882a593Smuzhiyun [IEEE80211_AC_VI] = 2,
177*4882a593Smuzhiyun [IEEE80211_AC_VO] = 3,
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun int ret;
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun mt76_dma_attach(&dev->mt76);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun mt76_clear(dev, MT_WPDMA_GLO_CFG,
185*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_TX_DMA_EN |
186*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_RX_DMA_EN |
187*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
188*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
191*4882a593Smuzhiyun mt7603_pse_client_reset(dev);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(wmm_queue_map); i++) {
194*4882a593Smuzhiyun ret = mt7603_init_tx_queue(dev, i, wmm_queue_map[i],
195*4882a593Smuzhiyun MT7603_TX_RING_SIZE);
196*4882a593Smuzhiyun if (ret)
197*4882a593Smuzhiyun return ret;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun ret = mt7603_init_tx_queue(dev, MT_TXQ_PSD,
201*4882a593Smuzhiyun MT_TX_HW_QUEUE_MGMT, MT7603_PSD_RING_SIZE);
202*4882a593Smuzhiyun if (ret)
203*4882a593Smuzhiyun return ret;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun ret = mt7603_init_tx_queue(dev, MT_TXQ_MCU,
206*4882a593Smuzhiyun MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
207*4882a593Smuzhiyun if (ret)
208*4882a593Smuzhiyun return ret;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun ret = mt7603_init_tx_queue(dev, MT_TXQ_BEACON,
211*4882a593Smuzhiyun MT_TX_HW_QUEUE_BCN, MT_MCU_RING_SIZE);
212*4882a593Smuzhiyun if (ret)
213*4882a593Smuzhiyun return ret;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun ret = mt7603_init_tx_queue(dev, MT_TXQ_CAB,
216*4882a593Smuzhiyun MT_TX_HW_QUEUE_BMC, MT_MCU_RING_SIZE);
217*4882a593Smuzhiyun if (ret)
218*4882a593Smuzhiyun return ret;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
221*4882a593Smuzhiyun MT7603_MCU_RX_RING_SIZE, MT_RX_BUF_SIZE);
222*4882a593Smuzhiyun if (ret)
223*4882a593Smuzhiyun return ret;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun ret = mt7603_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MAIN], 0,
226*4882a593Smuzhiyun MT7603_RX_RING_SIZE, MT_RX_BUF_SIZE);
227*4882a593Smuzhiyun if (ret)
228*4882a593Smuzhiyun return ret;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun mt76_wr(dev, MT_DELAY_INT_CFG, 0);
231*4882a593Smuzhiyun ret = mt76_init_queues(dev);
232*4882a593Smuzhiyun if (ret)
233*4882a593Smuzhiyun return ret;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
236*4882a593Smuzhiyun mt7603_poll_tx, NAPI_POLL_WEIGHT);
237*4882a593Smuzhiyun napi_enable(&dev->mt76.tx_napi);
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun return 0;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
mt7603_dma_cleanup(struct mt7603_dev * dev)242*4882a593Smuzhiyun void mt7603_dma_cleanup(struct mt7603_dev *dev)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun mt76_clear(dev, MT_WPDMA_GLO_CFG,
245*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_TX_DMA_EN |
246*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_RX_DMA_EN |
247*4882a593Smuzhiyun MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun mt76_dma_cleanup(&dev->mt76);
250*4882a593Smuzhiyun }
251