xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4*4882a593Smuzhiyun  * Copyright (C) 2018 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/irq.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include "mt76x02.h"
11*4882a593Smuzhiyun #include "mt76x02_mcu.h"
12*4882a593Smuzhiyun #include "trace.h"
13*4882a593Smuzhiyun 
mt76x02_pre_tbtt_tasklet(unsigned long arg)14*4882a593Smuzhiyun static void mt76x02_pre_tbtt_tasklet(unsigned long arg)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	struct mt76x02_dev *dev = (struct mt76x02_dev *)arg;
17*4882a593Smuzhiyun 	struct mt76_queue *q = dev->mt76.q_tx[MT_TXQ_PSD];
18*4882a593Smuzhiyun 	struct beacon_bc_data data = {};
19*4882a593Smuzhiyun 	struct sk_buff *skb;
20*4882a593Smuzhiyun 	int i;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	if (mt76_hw(dev)->conf.flags & IEEE80211_CONF_OFFCHANNEL)
23*4882a593Smuzhiyun 		return;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	mt76x02_resync_beacon_timer(dev);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	/* Prevent corrupt transmissions during update */
28*4882a593Smuzhiyun 	mt76_set(dev, MT_BCN_BYPASS_MASK, 0xffff);
29*4882a593Smuzhiyun 	dev->beacon_data_count = 0;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev),
32*4882a593Smuzhiyun 		IEEE80211_IFACE_ITER_RESUME_ALL,
33*4882a593Smuzhiyun 		mt76x02_update_beacon_iter, dev);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	mt76_wr(dev, MT_BCN_BYPASS_MASK,
36*4882a593Smuzhiyun 		0xff00 | ~(0xff00 >> dev->beacon_data_count));
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 	mt76_csa_check(&dev->mt76);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (dev->mt76.csa_complete)
41*4882a593Smuzhiyun 		return;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	mt76x02_enqueue_buffered_bc(dev, &data, 8);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (!skb_queue_len(&data.q))
46*4882a593Smuzhiyun 		return;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(data.tail); i++) {
49*4882a593Smuzhiyun 		if (!data.tail[i])
50*4882a593Smuzhiyun 			continue;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 		mt76_skb_set_moredata(data.tail[i], false);
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
56*4882a593Smuzhiyun 	while ((skb = __skb_dequeue(&data.q)) != NULL) {
57*4882a593Smuzhiyun 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
58*4882a593Smuzhiyun 		struct ieee80211_vif *vif = info->control.vif;
59*4882a593Smuzhiyun 		struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 		mt76_tx_queue_skb(dev, MT_TXQ_PSD, skb, &mvif->group_wcid,
62*4882a593Smuzhiyun 				  NULL);
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
mt76x02e_pre_tbtt_enable(struct mt76x02_dev * dev,bool en)67*4882a593Smuzhiyun static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	if (en)
70*4882a593Smuzhiyun 		tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
71*4882a593Smuzhiyun 	else
72*4882a593Smuzhiyun 		tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
mt76x02e_beacon_enable(struct mt76x02_dev * dev,bool en)75*4882a593Smuzhiyun static void mt76x02e_beacon_enable(struct mt76x02_dev *dev, bool en)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en);
78*4882a593Smuzhiyun 	if (en)
79*4882a593Smuzhiyun 		mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
80*4882a593Smuzhiyun 	else
81*4882a593Smuzhiyun 		mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
mt76x02e_init_beacon_config(struct mt76x02_dev * dev)84*4882a593Smuzhiyun void mt76x02e_init_beacon_config(struct mt76x02_dev *dev)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	static const struct mt76x02_beacon_ops beacon_ops = {
87*4882a593Smuzhiyun 		.nslots = 8,
88*4882a593Smuzhiyun 		.slot_size = 1024,
89*4882a593Smuzhiyun 		.pre_tbtt_enable = mt76x02e_pre_tbtt_enable,
90*4882a593Smuzhiyun 		.beacon_enable = mt76x02e_beacon_enable,
91*4882a593Smuzhiyun 	};
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	dev->beacon_ops = &beacon_ops;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* Fire a pre-TBTT interrupt 8 ms before TBTT */
96*4882a593Smuzhiyun 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT,
97*4882a593Smuzhiyun 		       8 << 4);
98*4882a593Smuzhiyun 	mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER,
99*4882a593Smuzhiyun 		       MT_DFS_GP_INTERVAL);
100*4882a593Smuzhiyun 	mt76_wr(dev, MT_INT_TIMER_EN, 0);
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	mt76x02_init_beacon_config(dev);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02e_init_beacon_config);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun static int
mt76x02_init_tx_queue(struct mt76x02_dev * dev,int qid,int idx,int n_desc)107*4882a593Smuzhiyun mt76x02_init_tx_queue(struct mt76x02_dev *dev, int qid, int idx, int n_desc)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct mt76_queue *hwq;
110*4882a593Smuzhiyun 	int err;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	hwq = devm_kzalloc(dev->mt76.dev, sizeof(*hwq), GFP_KERNEL);
113*4882a593Smuzhiyun 	if (!hwq)
114*4882a593Smuzhiyun 		return -ENOMEM;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	err = mt76_queue_alloc(dev, hwq, idx, n_desc, 0, MT_TX_RING_BASE);
117*4882a593Smuzhiyun 	if (err < 0)
118*4882a593Smuzhiyun 		return err;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	dev->mt76.q_tx[qid] = hwq;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	mt76x02_irq_enable(dev, MT_INT_TX_DONE(idx));
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	return 0;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static int
mt76x02_init_rx_queue(struct mt76x02_dev * dev,struct mt76_queue * q,int idx,int n_desc,int bufsize)128*4882a593Smuzhiyun mt76x02_init_rx_queue(struct mt76x02_dev *dev, struct mt76_queue *q,
129*4882a593Smuzhiyun 		      int idx, int n_desc, int bufsize)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	int err;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	err = mt76_queue_alloc(dev, q, idx, n_desc, bufsize,
134*4882a593Smuzhiyun 			       MT_RX_RING_BASE);
135*4882a593Smuzhiyun 	if (err < 0)
136*4882a593Smuzhiyun 		return err;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(idx));
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
mt76x02_process_tx_status_fifo(struct mt76x02_dev * dev)143*4882a593Smuzhiyun static void mt76x02_process_tx_status_fifo(struct mt76x02_dev *dev)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	struct mt76x02_tx_status stat;
146*4882a593Smuzhiyun 	u8 update = 1;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	while (kfifo_get(&dev->txstatus_fifo, &stat))
149*4882a593Smuzhiyun 		mt76x02_send_tx_status(dev, &stat, &update);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
mt76x02_tx_worker(struct mt76_worker * w)152*4882a593Smuzhiyun static void mt76x02_tx_worker(struct mt76_worker *w)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct mt76x02_dev *dev;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	dev = container_of(w, struct mt76x02_dev, mt76.tx_worker);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	mt76x02_mac_poll_tx_status(dev, false);
159*4882a593Smuzhiyun 	mt76x02_process_tx_status_fifo(dev);
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	mt76_txq_schedule_all(&dev->mphy);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
mt76x02_poll_tx(struct napi_struct * napi,int budget)164*4882a593Smuzhiyun static int mt76x02_poll_tx(struct napi_struct *napi, int budget)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct mt76x02_dev *dev = container_of(napi, struct mt76x02_dev,
167*4882a593Smuzhiyun 					       mt76.tx_napi);
168*4882a593Smuzhiyun 	int i;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	mt76x02_mac_poll_tx_status(dev, false);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	for (i = MT_TXQ_MCU; i >= 0; i--)
173*4882a593Smuzhiyun 		mt76_queue_tx_cleanup(dev, i, false);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (napi_complete_done(napi, 0))
176*4882a593Smuzhiyun 		mt76x02_irq_enable(dev, MT_INT_TX_DONE_ALL);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for (i = MT_TXQ_MCU; i >= 0; i--)
179*4882a593Smuzhiyun 		mt76_queue_tx_cleanup(dev, i, false);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	mt76_worker_schedule(&dev->mt76.tx_worker);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
mt76x02_dma_init(struct mt76x02_dev * dev)186*4882a593Smuzhiyun int mt76x02_dma_init(struct mt76x02_dev *dev)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	struct mt76_txwi_cache __maybe_unused *t;
189*4882a593Smuzhiyun 	int i, ret, fifo_size;
190*4882a593Smuzhiyun 	struct mt76_queue *q;
191*4882a593Smuzhiyun 	void *status_fifo;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct mt76x02_rxwi) > MT_RX_HEADROOM);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	fifo_size = roundup_pow_of_two(32 * sizeof(struct mt76x02_tx_status));
196*4882a593Smuzhiyun 	status_fifo = devm_kzalloc(dev->mt76.dev, fifo_size, GFP_KERNEL);
197*4882a593Smuzhiyun 	if (!status_fifo)
198*4882a593Smuzhiyun 		return -ENOMEM;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	dev->mt76.tx_worker.fn = mt76x02_tx_worker;
201*4882a593Smuzhiyun 	tasklet_init(&dev->mt76.pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet,
202*4882a593Smuzhiyun 		     (unsigned long)dev);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	spin_lock_init(&dev->txstatus_fifo_lock);
205*4882a593Smuzhiyun 	kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	mt76_dma_attach(&dev->mt76);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	mt76_wr(dev, MT_WPDMA_RST_IDX, ~0);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
212*4882a593Smuzhiyun 		ret = mt76x02_init_tx_queue(dev, i, mt76_ac_to_hwq(i),
213*4882a593Smuzhiyun 					    MT76x02_TX_RING_SIZE);
214*4882a593Smuzhiyun 		if (ret)
215*4882a593Smuzhiyun 			return ret;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	ret = mt76x02_init_tx_queue(dev, MT_TXQ_PSD,
219*4882a593Smuzhiyun 				    MT_TX_HW_QUEUE_MGMT, MT76x02_PSD_RING_SIZE);
220*4882a593Smuzhiyun 	if (ret)
221*4882a593Smuzhiyun 		return ret;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	ret = mt76x02_init_tx_queue(dev, MT_TXQ_MCU,
224*4882a593Smuzhiyun 				    MT_TX_HW_QUEUE_MCU, MT_MCU_RING_SIZE);
225*4882a593Smuzhiyun 	if (ret)
226*4882a593Smuzhiyun 		return ret;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	ret = mt76x02_init_rx_queue(dev, &dev->mt76.q_rx[MT_RXQ_MCU], 1,
229*4882a593Smuzhiyun 				    MT_MCU_RING_SIZE, MT_RX_BUF_SIZE);
230*4882a593Smuzhiyun 	if (ret)
231*4882a593Smuzhiyun 		return ret;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	q = &dev->mt76.q_rx[MT_RXQ_MAIN];
234*4882a593Smuzhiyun 	q->buf_offset = MT_RX_HEADROOM - sizeof(struct mt76x02_rxwi);
235*4882a593Smuzhiyun 	ret = mt76x02_init_rx_queue(dev, q, 0, MT76X02_RX_RING_SIZE,
236*4882a593Smuzhiyun 				    MT_RX_BUF_SIZE);
237*4882a593Smuzhiyun 	if (ret)
238*4882a593Smuzhiyun 		return ret;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	ret = mt76_init_queues(dev);
241*4882a593Smuzhiyun 	if (ret)
242*4882a593Smuzhiyun 		return ret;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	netif_tx_napi_add(&dev->mt76.napi_dev, &dev->mt76.tx_napi,
245*4882a593Smuzhiyun 			  mt76x02_poll_tx, NAPI_POLL_WEIGHT);
246*4882a593Smuzhiyun 	napi_enable(&dev->mt76.tx_napi);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_dma_init);
251*4882a593Smuzhiyun 
mt76x02_rx_poll_complete(struct mt76_dev * mdev,enum mt76_rxq_id q)252*4882a593Smuzhiyun void mt76x02_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct mt76x02_dev *dev;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	dev = container_of(mdev, struct mt76x02_dev, mt76);
257*4882a593Smuzhiyun 	mt76x02_irq_enable(dev, MT_INT_RX_DONE(q));
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_rx_poll_complete);
260*4882a593Smuzhiyun 
mt76x02_irq_handler(int irq,void * dev_instance)261*4882a593Smuzhiyun irqreturn_t mt76x02_irq_handler(int irq, void *dev_instance)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct mt76x02_dev *dev = dev_instance;
264*4882a593Smuzhiyun 	u32 intr, mask;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	intr = mt76_rr(dev, MT_INT_SOURCE_CSR);
267*4882a593Smuzhiyun 	intr &= dev->mt76.mmio.irqmask;
268*4882a593Smuzhiyun 	mt76_wr(dev, MT_INT_SOURCE_CSR, intr);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (!test_bit(MT76_STATE_INITIALIZED, &dev->mphy.state))
271*4882a593Smuzhiyun 		return IRQ_NONE;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	trace_dev_irq(&dev->mt76, intr, dev->mt76.mmio.irqmask);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	mask = intr & (MT_INT_RX_DONE_ALL | MT_INT_GPTIMER);
276*4882a593Smuzhiyun 	if (intr & (MT_INT_TX_DONE_ALL | MT_INT_TX_STAT))
277*4882a593Smuzhiyun 		mask |= MT_INT_TX_DONE_ALL;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	mt76x02_irq_disable(dev, mask);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	if (intr & MT_INT_RX_DONE(0))
282*4882a593Smuzhiyun 		napi_schedule(&dev->mt76.napi[0]);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	if (intr & MT_INT_RX_DONE(1))
285*4882a593Smuzhiyun 		napi_schedule(&dev->mt76.napi[1]);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (intr & MT_INT_PRE_TBTT)
288*4882a593Smuzhiyun 		tasklet_schedule(&dev->mt76.pre_tbtt_tasklet);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	/* send buffered multicast frames now */
291*4882a593Smuzhiyun 	if (intr & MT_INT_TBTT) {
292*4882a593Smuzhiyun 		if (dev->mt76.csa_complete)
293*4882a593Smuzhiyun 			mt76_csa_finish(&dev->mt76);
294*4882a593Smuzhiyun 		else
295*4882a593Smuzhiyun 			mt76_queue_kick(dev, dev->mt76.q_tx[MT_TXQ_PSD]);
296*4882a593Smuzhiyun 	}
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (intr & MT_INT_TX_STAT)
299*4882a593Smuzhiyun 		mt76x02_mac_poll_tx_status(dev, true);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	if (intr & (MT_INT_TX_STAT | MT_INT_TX_DONE_ALL))
302*4882a593Smuzhiyun 		napi_schedule(&dev->mt76.tx_napi);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (intr & MT_INT_GPTIMER)
305*4882a593Smuzhiyun 		tasklet_schedule(&dev->dfs_pd.dfs_tasklet);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return IRQ_HANDLED;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_irq_handler);
310*4882a593Smuzhiyun 
mt76x02_dma_enable(struct mt76x02_dev * dev)311*4882a593Smuzhiyun static void mt76x02_dma_enable(struct mt76x02_dev *dev)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	u32 val;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	mt76_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX);
316*4882a593Smuzhiyun 	mt76x02_wait_for_wpdma(&dev->mt76, 1000);
317*4882a593Smuzhiyun 	usleep_range(50, 100);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	val = FIELD_PREP(MT_WPDMA_GLO_CFG_DMA_BURST_SIZE, 3) |
320*4882a593Smuzhiyun 	      MT_WPDMA_GLO_CFG_TX_DMA_EN |
321*4882a593Smuzhiyun 	      MT_WPDMA_GLO_CFG_RX_DMA_EN;
322*4882a593Smuzhiyun 	mt76_set(dev, MT_WPDMA_GLO_CFG, val);
323*4882a593Smuzhiyun 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
324*4882a593Smuzhiyun 		   MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
mt76x02_dma_disable(struct mt76x02_dev * dev)327*4882a593Smuzhiyun void mt76x02_dma_disable(struct mt76x02_dev *dev)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	u32 val = mt76_rr(dev, MT_WPDMA_GLO_CFG);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	val &= MT_WPDMA_GLO_CFG_DMA_BURST_SIZE |
332*4882a593Smuzhiyun 	       MT_WPDMA_GLO_CFG_BIG_ENDIAN |
333*4882a593Smuzhiyun 	       MT_WPDMA_GLO_CFG_HDR_SEG_LEN;
334*4882a593Smuzhiyun 	val |= MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE;
335*4882a593Smuzhiyun 	mt76_wr(dev, MT_WPDMA_GLO_CFG, val);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_dma_disable);
338*4882a593Smuzhiyun 
mt76x02_mac_start(struct mt76x02_dev * dev)339*4882a593Smuzhiyun void mt76x02_mac_start(struct mt76x02_dev *dev)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	mt76x02_mac_reset_counters(dev);
342*4882a593Smuzhiyun 	mt76x02_dma_enable(dev);
343*4882a593Smuzhiyun 	mt76_wr(dev, MT_RX_FILTR_CFG, dev->mt76.rxfilter);
344*4882a593Smuzhiyun 	mt76_wr(dev, MT_MAC_SYS_CTRL,
345*4882a593Smuzhiyun 		MT_MAC_SYS_CTRL_ENABLE_TX |
346*4882a593Smuzhiyun 		MT_MAC_SYS_CTRL_ENABLE_RX);
347*4882a593Smuzhiyun 	mt76x02_irq_enable(dev,
348*4882a593Smuzhiyun 			   MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
349*4882a593Smuzhiyun 			   MT_INT_TX_STAT);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_mac_start);
352*4882a593Smuzhiyun 
mt76x02_tx_hang(struct mt76x02_dev * dev)353*4882a593Smuzhiyun static bool mt76x02_tx_hang(struct mt76x02_dev *dev)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	u32 dma_idx, prev_dma_idx;
356*4882a593Smuzhiyun 	struct mt76_queue *q;
357*4882a593Smuzhiyun 	int i;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
360*4882a593Smuzhiyun 		q = dev->mt76.q_tx[i];
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 		if (!q->queued)
363*4882a593Smuzhiyun 			continue;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		prev_dma_idx = dev->mt76.tx_dma_idx[i];
366*4882a593Smuzhiyun 		dma_idx = readl(&q->regs->dma_idx);
367*4882a593Smuzhiyun 		dev->mt76.tx_dma_idx[i] = dma_idx;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		if (prev_dma_idx == dma_idx)
370*4882a593Smuzhiyun 			break;
371*4882a593Smuzhiyun 	}
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return i < 4;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
mt76x02_key_sync(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key,void * data)376*4882a593Smuzhiyun static void mt76x02_key_sync(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
377*4882a593Smuzhiyun 			     struct ieee80211_sta *sta,
378*4882a593Smuzhiyun 			     struct ieee80211_key_conf *key, void *data)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct mt76x02_dev *dev = hw->priv;
381*4882a593Smuzhiyun 	struct mt76_wcid *wcid;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	if (!sta)
384*4882a593Smuzhiyun 		return;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	wcid = (struct mt76_wcid *)sta->drv_priv;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	if (wcid->hw_key_idx != key->keyidx || wcid->sw_iv)
389*4882a593Smuzhiyun 		return;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	mt76x02_mac_wcid_sync_pn(dev, wcid->idx, key);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
mt76x02_reset_state(struct mt76x02_dev * dev)394*4882a593Smuzhiyun static void mt76x02_reset_state(struct mt76x02_dev *dev)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	int i;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	lockdep_assert_held(&dev->mt76.mutex);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	clear_bit(MT76_STATE_RUNNING, &dev->mphy.state);
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	rcu_read_lock();
403*4882a593Smuzhiyun 	ieee80211_iter_keys_rcu(dev->mt76.hw, NULL, mt76x02_key_sync, NULL);
404*4882a593Smuzhiyun 	rcu_read_unlock();
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	for (i = 0; i < MT76x02_N_WCIDS; i++) {
407*4882a593Smuzhiyun 		struct ieee80211_sta *sta;
408*4882a593Smuzhiyun 		struct ieee80211_vif *vif;
409*4882a593Smuzhiyun 		struct mt76x02_sta *msta;
410*4882a593Smuzhiyun 		struct mt76_wcid *wcid;
411*4882a593Smuzhiyun 		void *priv;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		wcid = rcu_dereference_protected(dev->mt76.wcid[i],
414*4882a593Smuzhiyun 					lockdep_is_held(&dev->mt76.mutex));
415*4882a593Smuzhiyun 		if (!wcid)
416*4882a593Smuzhiyun 			continue;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		rcu_assign_pointer(dev->mt76.wcid[i], NULL);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 		priv = msta = container_of(wcid, struct mt76x02_sta, wcid);
421*4882a593Smuzhiyun 		sta = container_of(priv, struct ieee80211_sta, drv_priv);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 		priv = msta->vif;
424*4882a593Smuzhiyun 		vif = container_of(priv, struct ieee80211_vif, drv_priv);
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 		__mt76_sta_remove(&dev->mt76, vif, sta);
427*4882a593Smuzhiyun 		memset(msta, 0, sizeof(*msta));
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	dev->mphy.vif_mask = 0;
431*4882a593Smuzhiyun 	dev->mt76.beacon_mask = 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
mt76x02_watchdog_reset(struct mt76x02_dev * dev)434*4882a593Smuzhiyun static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun 	u32 mask = dev->mt76.mmio.irqmask;
437*4882a593Smuzhiyun 	bool restart = dev->mt76.mcu_ops->mcu_restart;
438*4882a593Smuzhiyun 	int i;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	ieee80211_stop_queues(dev->mt76.hw);
441*4882a593Smuzhiyun 	set_bit(MT76_RESET, &dev->mphy.state);
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	tasklet_disable(&dev->mt76.pre_tbtt_tasklet);
444*4882a593Smuzhiyun 	mt76_worker_disable(&dev->mt76.tx_worker);
445*4882a593Smuzhiyun 	napi_disable(&dev->mt76.tx_napi);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	mt76_for_each_q_rx(&dev->mt76, i) {
448*4882a593Smuzhiyun 		napi_disable(&dev->mt76.napi[i]);
449*4882a593Smuzhiyun 	}
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	mutex_lock(&dev->mt76.mutex);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	dev->mcu_timeout = 0;
454*4882a593Smuzhiyun 	if (restart)
455*4882a593Smuzhiyun 		mt76x02_reset_state(dev);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	if (dev->mt76.beacon_mask)
458*4882a593Smuzhiyun 		mt76_clear(dev, MT_BEACON_TIME_CFG,
459*4882a593Smuzhiyun 			   MT_BEACON_TIME_CFG_BEACON_TX |
460*4882a593Smuzhiyun 			   MT_BEACON_TIME_CFG_TBTT_EN);
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	mt76x02_irq_disable(dev, mask);
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* perform device reset */
465*4882a593Smuzhiyun 	mt76_clear(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
466*4882a593Smuzhiyun 	mt76_wr(dev, MT_MAC_SYS_CTRL, 0);
467*4882a593Smuzhiyun 	mt76_clear(dev, MT_WPDMA_GLO_CFG,
468*4882a593Smuzhiyun 		   MT_WPDMA_GLO_CFG_TX_DMA_EN | MT_WPDMA_GLO_CFG_RX_DMA_EN);
469*4882a593Smuzhiyun 	usleep_range(5000, 10000);
470*4882a593Smuzhiyun 	mt76_wr(dev, MT_INT_SOURCE_CSR, 0xffffffff);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	/* let fw reset DMA */
473*4882a593Smuzhiyun 	mt76_set(dev, 0x734, 0x3);
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	if (restart)
476*4882a593Smuzhiyun 		mt76_mcu_restart(dev);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	for (i = 0; i < __MT_TXQ_MAX; i++)
479*4882a593Smuzhiyun 		mt76_queue_tx_cleanup(dev, i, true);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	mt76_for_each_q_rx(&dev->mt76, i) {
482*4882a593Smuzhiyun 		mt76_queue_rx_reset(dev, i);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	mt76x02_mac_start(dev);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (dev->ed_monitor)
488*4882a593Smuzhiyun 		mt76_set(dev, MT_TXOP_CTRL_CFG, MT_TXOP_ED_CCA_EN);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (dev->mt76.beacon_mask && !restart)
491*4882a593Smuzhiyun 		mt76_set(dev, MT_BEACON_TIME_CFG,
492*4882a593Smuzhiyun 			 MT_BEACON_TIME_CFG_BEACON_TX |
493*4882a593Smuzhiyun 			 MT_BEACON_TIME_CFG_TBTT_EN);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	mt76x02_irq_enable(dev, mask);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	mutex_unlock(&dev->mt76.mutex);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	clear_bit(MT76_RESET, &dev->mphy.state);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	mt76_worker_enable(&dev->mt76.tx_worker);
502*4882a593Smuzhiyun 	napi_enable(&dev->mt76.tx_napi);
503*4882a593Smuzhiyun 	napi_schedule(&dev->mt76.tx_napi);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	mt76_for_each_q_rx(&dev->mt76, i) {
508*4882a593Smuzhiyun 		napi_enable(&dev->mt76.napi[i]);
509*4882a593Smuzhiyun 		napi_schedule(&dev->mt76.napi[i]);
510*4882a593Smuzhiyun 	}
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	if (restart) {
513*4882a593Smuzhiyun 		set_bit(MT76_RESTART, &dev->mphy.state);
514*4882a593Smuzhiyun 		mt76x02_mcu_function_select(dev, Q_SELECT, 1);
515*4882a593Smuzhiyun 		ieee80211_restart_hw(dev->mt76.hw);
516*4882a593Smuzhiyun 	} else {
517*4882a593Smuzhiyun 		ieee80211_wake_queues(dev->mt76.hw);
518*4882a593Smuzhiyun 		mt76_txq_schedule_all(&dev->mphy);
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
mt76x02_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)522*4882a593Smuzhiyun void mt76x02_reconfig_complete(struct ieee80211_hw *hw,
523*4882a593Smuzhiyun 			       enum ieee80211_reconfig_type reconfig_type)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun 	struct mt76x02_dev *dev = hw->priv;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
528*4882a593Smuzhiyun 		return;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	clear_bit(MT76_RESTART, &dev->mphy.state);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76x02_reconfig_complete);
533*4882a593Smuzhiyun 
mt76x02_check_tx_hang(struct mt76x02_dev * dev)534*4882a593Smuzhiyun static void mt76x02_check_tx_hang(struct mt76x02_dev *dev)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	if (test_bit(MT76_RESTART, &dev->mphy.state))
537*4882a593Smuzhiyun 		return;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (mt76x02_tx_hang(dev)) {
540*4882a593Smuzhiyun 		if (++dev->tx_hang_check >= MT_TX_HANG_TH)
541*4882a593Smuzhiyun 			goto restart;
542*4882a593Smuzhiyun 	} else {
543*4882a593Smuzhiyun 		dev->tx_hang_check = 0;
544*4882a593Smuzhiyun 	}
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (dev->mcu_timeout)
547*4882a593Smuzhiyun 		goto restart;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	return;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun restart:
552*4882a593Smuzhiyun 	mt76x02_watchdog_reset(dev);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	dev->tx_hang_reset++;
555*4882a593Smuzhiyun 	dev->tx_hang_check = 0;
556*4882a593Smuzhiyun 	memset(dev->mt76.tx_dma_idx, 0xff,
557*4882a593Smuzhiyun 	       sizeof(dev->mt76.tx_dma_idx));
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun 
mt76x02_wdt_work(struct work_struct * work)560*4882a593Smuzhiyun void mt76x02_wdt_work(struct work_struct *work)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev,
563*4882a593Smuzhiyun 					       wdt_work.work);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	mt76x02_check_tx_hang(dev);
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	ieee80211_queue_delayed_work(mt76_hw(dev), &dev->wdt_work,
568*4882a593Smuzhiyun 				     MT_WATCHDOG_TIME);
569*4882a593Smuzhiyun }
570