xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/mediatek/mt76/sdio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /* Copyright (C) 2020 MediaTek Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This file is written based on mt76/usb.c.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Felix Fietkau <nbd@nbd.name>
7*4882a593Smuzhiyun  *	   Lorenzo Bianconi <lorenzo@kernel.org>
8*4882a593Smuzhiyun  *	   Sean Wang <sean.wang@mediatek.com>
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/iopoll.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mmc/sdio_func.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/kthread.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "mt76.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static int
mt76s_alloc_rx_queue(struct mt76_dev * dev,enum mt76_rxq_id qid)21*4882a593Smuzhiyun mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	struct mt76_queue *q = &dev->q_rx[qid];
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	spin_lock_init(&q->lock);
26*4882a593Smuzhiyun 	q->entry = devm_kcalloc(dev->dev,
27*4882a593Smuzhiyun 				MT_NUM_RX_ENTRIES, sizeof(*q->entry),
28*4882a593Smuzhiyun 				GFP_KERNEL);
29*4882a593Smuzhiyun 	if (!q->entry)
30*4882a593Smuzhiyun 		return -ENOMEM;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	q->ndesc = MT_NUM_RX_ENTRIES;
33*4882a593Smuzhiyun 	q->head = q->tail = 0;
34*4882a593Smuzhiyun 	q->queued = 0;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	return 0;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun 
mt76s_alloc_tx(struct mt76_dev * dev)39*4882a593Smuzhiyun static int mt76s_alloc_tx(struct mt76_dev *dev)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct mt76_queue *q;
42*4882a593Smuzhiyun 	int i;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	for (i = 0; i < MT_TXQ_MCU_WA; i++) {
45*4882a593Smuzhiyun 		q = devm_kzalloc(dev->dev, sizeof(*q), GFP_KERNEL);
46*4882a593Smuzhiyun 		if (!q)
47*4882a593Smuzhiyun 			return -ENOMEM;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 		spin_lock_init(&q->lock);
50*4882a593Smuzhiyun 		q->hw_idx = i;
51*4882a593Smuzhiyun 		dev->q_tx[i] = q;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 		q->entry = devm_kcalloc(dev->dev,
54*4882a593Smuzhiyun 					MT_NUM_TX_ENTRIES, sizeof(*q->entry),
55*4882a593Smuzhiyun 					GFP_KERNEL);
56*4882a593Smuzhiyun 		if (!q->entry)
57*4882a593Smuzhiyun 			return -ENOMEM;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 		q->ndesc = MT_NUM_TX_ENTRIES;
60*4882a593Smuzhiyun 	}
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
mt76s_stop_txrx(struct mt76_dev * dev)65*4882a593Smuzhiyun void mt76s_stop_txrx(struct mt76_dev *dev)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun 	struct mt76_sdio *sdio = &dev->sdio;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	cancel_work_sync(&sdio->tx.xmit_work);
70*4882a593Smuzhiyun 	cancel_work_sync(&sdio->tx.status_work);
71*4882a593Smuzhiyun 	cancel_work_sync(&sdio->rx.recv_work);
72*4882a593Smuzhiyun 	cancel_work_sync(&sdio->rx.net_work);
73*4882a593Smuzhiyun 	cancel_work_sync(&sdio->stat_work);
74*4882a593Smuzhiyun 	clear_bit(MT76_READING_STATS, &dev->phy.state);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	mt76_tx_status_check(dev, NULL, true);
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76s_stop_txrx);
79*4882a593Smuzhiyun 
mt76s_alloc_queues(struct mt76_dev * dev)80*4882a593Smuzhiyun int mt76s_alloc_queues(struct mt76_dev *dev)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	int err;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
85*4882a593Smuzhiyun 	if (err < 0)
86*4882a593Smuzhiyun 		return err;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	return mt76s_alloc_tx(dev);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static struct mt76_queue_entry *
mt76s_get_next_rx_entry(struct mt76_queue * q)93*4882a593Smuzhiyun mt76s_get_next_rx_entry(struct mt76_queue *q)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct mt76_queue_entry *e = NULL;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
98*4882a593Smuzhiyun 	if (q->queued > 0) {
99*4882a593Smuzhiyun 		e = &q->entry[q->tail];
100*4882a593Smuzhiyun 		q->tail = (q->tail + 1) % q->ndesc;
101*4882a593Smuzhiyun 		q->queued--;
102*4882a593Smuzhiyun 	}
103*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return e;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun static int
mt76s_process_rx_queue(struct mt76_dev * dev,struct mt76_queue * q)109*4882a593Smuzhiyun mt76s_process_rx_queue(struct mt76_dev *dev, struct mt76_queue *q)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	int qid = q - &dev->q_rx[MT_RXQ_MAIN];
112*4882a593Smuzhiyun 	int nframes = 0;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	while (true) {
115*4882a593Smuzhiyun 		struct mt76_queue_entry *e;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state))
118*4882a593Smuzhiyun 			break;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 		e = mt76s_get_next_rx_entry(q);
121*4882a593Smuzhiyun 		if (!e || !e->skb)
122*4882a593Smuzhiyun 			break;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 		dev->drv->rx_skb(dev, MT_RXQ_MAIN, e->skb);
125*4882a593Smuzhiyun 		e->skb = NULL;
126*4882a593Smuzhiyun 		nframes++;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 	if (qid == MT_RXQ_MAIN)
129*4882a593Smuzhiyun 		mt76_rx_poll_complete(dev, MT_RXQ_MAIN, NULL);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return nframes;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
mt76s_process_tx_queue(struct mt76_dev * dev,enum mt76_txq_id qid)134*4882a593Smuzhiyun static void mt76s_process_tx_queue(struct mt76_dev *dev, enum mt76_txq_id qid)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
137*4882a593Smuzhiyun 	struct mt76_queue_entry entry;
138*4882a593Smuzhiyun 	bool wake;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	while (q->queued > 0) {
141*4882a593Smuzhiyun 		if (!q->entry[q->tail].done)
142*4882a593Smuzhiyun 			break;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 		entry = q->entry[q->tail];
145*4882a593Smuzhiyun 		q->entry[q->tail].done = false;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 		if (qid == MT_TXQ_MCU) {
148*4882a593Smuzhiyun 			dev_kfree_skb(entry.skb);
149*4882a593Smuzhiyun 			entry.skb = NULL;
150*4882a593Smuzhiyun 		}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		mt76_queue_tx_complete(dev, q, &entry);
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	wake = q->stopped && q->queued < q->ndesc - 8;
156*4882a593Smuzhiyun 	if (wake)
157*4882a593Smuzhiyun 		q->stopped = false;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	if (!q->queued)
160*4882a593Smuzhiyun 		wake_up(&dev->tx_wait);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	if (qid == MT_TXQ_MCU)
163*4882a593Smuzhiyun 		return;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	mt76_txq_schedule(&dev->phy, qid);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (wake)
168*4882a593Smuzhiyun 		ieee80211_wake_queue(dev->hw, qid);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
mt76s_tx_status_data(struct work_struct * work)171*4882a593Smuzhiyun static void mt76s_tx_status_data(struct work_struct *work)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct mt76_sdio *sdio;
174*4882a593Smuzhiyun 	struct mt76_dev *dev;
175*4882a593Smuzhiyun 	u8 update = 1;
176*4882a593Smuzhiyun 	u16 count = 0;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	sdio = container_of(work, struct mt76_sdio, stat_work);
179*4882a593Smuzhiyun 	dev = container_of(sdio, struct mt76_dev, sdio);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	while (true) {
182*4882a593Smuzhiyun 		if (test_bit(MT76_REMOVED, &dev->phy.state))
183*4882a593Smuzhiyun 			break;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		if (!dev->drv->tx_status_data(dev, &update))
186*4882a593Smuzhiyun 			break;
187*4882a593Smuzhiyun 		count++;
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	if (count && test_bit(MT76_STATE_RUNNING, &dev->phy.state))
191*4882a593Smuzhiyun 		queue_work(dev->wq, &sdio->stat_work);
192*4882a593Smuzhiyun 	else
193*4882a593Smuzhiyun 		clear_bit(MT76_READING_STATS, &dev->phy.state);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun static int
mt76s_tx_queue_skb(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,struct mt76_wcid * wcid,struct ieee80211_sta * sta)197*4882a593Smuzhiyun mt76s_tx_queue_skb(struct mt76_dev *dev, enum mt76_txq_id qid,
198*4882a593Smuzhiyun 		   struct sk_buff *skb, struct mt76_wcid *wcid,
199*4882a593Smuzhiyun 		   struct ieee80211_sta *sta)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
202*4882a593Smuzhiyun 	struct mt76_tx_info tx_info = {
203*4882a593Smuzhiyun 		.skb = skb,
204*4882a593Smuzhiyun 	};
205*4882a593Smuzhiyun 	int err, len = skb->len;
206*4882a593Smuzhiyun 	u16 idx = q->head;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	if (q->queued == q->ndesc)
209*4882a593Smuzhiyun 		return -ENOSPC;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	skb->prev = skb->next = NULL;
212*4882a593Smuzhiyun 	err = dev->drv->tx_prepare_skb(dev, NULL, qid, wcid, sta, &tx_info);
213*4882a593Smuzhiyun 	if (err < 0)
214*4882a593Smuzhiyun 		return err;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	q->entry[q->head].skb = tx_info.skb;
217*4882a593Smuzhiyun 	q->entry[q->head].buf_sz = len;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	smp_wmb();
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	q->head = (q->head + 1) % q->ndesc;
222*4882a593Smuzhiyun 	q->queued++;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return idx;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun static int
mt76s_tx_queue_skb_raw(struct mt76_dev * dev,enum mt76_txq_id qid,struct sk_buff * skb,u32 tx_info)228*4882a593Smuzhiyun mt76s_tx_queue_skb_raw(struct mt76_dev *dev, enum mt76_txq_id qid,
229*4882a593Smuzhiyun 		       struct sk_buff *skb, u32 tx_info)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct mt76_queue *q = dev->q_tx[qid];
232*4882a593Smuzhiyun 	int ret = -ENOSPC, len = skb->len, pad;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (q->queued == q->ndesc)
235*4882a593Smuzhiyun 		goto error;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	pad = round_up(skb->len, 4) - skb->len;
238*4882a593Smuzhiyun 	ret = mt76_skb_adjust_pad(skb, pad);
239*4882a593Smuzhiyun 	if (ret)
240*4882a593Smuzhiyun 		goto error;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	spin_lock_bh(&q->lock);
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	q->entry[q->head].buf_sz = len;
245*4882a593Smuzhiyun 	q->entry[q->head].skb = skb;
246*4882a593Smuzhiyun 	q->head = (q->head + 1) % q->ndesc;
247*4882a593Smuzhiyun 	q->queued++;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	spin_unlock_bh(&q->lock);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	return 0;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun error:
254*4882a593Smuzhiyun 	dev_kfree_skb(skb);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	return ret;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
mt76s_tx_kick(struct mt76_dev * dev,struct mt76_queue * q)259*4882a593Smuzhiyun static void mt76s_tx_kick(struct mt76_dev *dev, struct mt76_queue *q)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct mt76_sdio *sdio = &dev->sdio;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	queue_work(sdio->txrx_wq, &sdio->tx.xmit_work);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun static const struct mt76_queue_ops sdio_queue_ops = {
267*4882a593Smuzhiyun 	.tx_queue_skb = mt76s_tx_queue_skb,
268*4882a593Smuzhiyun 	.kick = mt76s_tx_kick,
269*4882a593Smuzhiyun 	.tx_queue_skb_raw = mt76s_tx_queue_skb_raw,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun 
mt76s_tx_work(struct work_struct * work)272*4882a593Smuzhiyun static void mt76s_tx_work(struct work_struct *work)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
275*4882a593Smuzhiyun 					      tx.status_work);
276*4882a593Smuzhiyun 	struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
277*4882a593Smuzhiyun 	int i;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	for (i = 0; i < MT_TXQ_MCU_WA; i++)
280*4882a593Smuzhiyun 		mt76s_process_tx_queue(dev, i);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	if (dev->drv->tx_status_data &&
283*4882a593Smuzhiyun 	    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
284*4882a593Smuzhiyun 		queue_work(dev->wq, &dev->sdio.stat_work);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
mt76s_rx_work(struct work_struct * work)287*4882a593Smuzhiyun static void mt76s_rx_work(struct work_struct *work)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun 	struct mt76_sdio *sdio = container_of(work, struct mt76_sdio,
290*4882a593Smuzhiyun 					      rx.net_work);
291*4882a593Smuzhiyun 	struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
292*4882a593Smuzhiyun 	int i;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* rx processing */
295*4882a593Smuzhiyun 	local_bh_disable();
296*4882a593Smuzhiyun 	rcu_read_lock();
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	mt76_for_each_q_rx(dev, i)
299*4882a593Smuzhiyun 		mt76s_process_rx_queue(dev, &dev->q_rx[i]);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	rcu_read_unlock();
302*4882a593Smuzhiyun 	local_bh_enable();
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
mt76s_deinit(struct mt76_dev * dev)305*4882a593Smuzhiyun void mt76s_deinit(struct mt76_dev *dev)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	struct mt76_sdio *sdio = &dev->sdio;
308*4882a593Smuzhiyun 	int i;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	mt76s_stop_txrx(dev);
311*4882a593Smuzhiyun 	if (sdio->txrx_wq) {
312*4882a593Smuzhiyun 		destroy_workqueue(sdio->txrx_wq);
313*4882a593Smuzhiyun 		sdio->txrx_wq = NULL;
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	sdio_claim_host(sdio->func);
317*4882a593Smuzhiyun 	sdio_release_irq(sdio->func);
318*4882a593Smuzhiyun 	sdio_release_host(sdio->func);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	mt76_for_each_q_rx(dev, i) {
321*4882a593Smuzhiyun 		struct mt76_queue *q = &dev->q_rx[i];
322*4882a593Smuzhiyun 		int j;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 		for (j = 0; j < q->ndesc; j++) {
325*4882a593Smuzhiyun 			struct mt76_queue_entry *e = &q->entry[j];
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 			if (!e->skb)
328*4882a593Smuzhiyun 				continue;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 			dev_kfree_skb(e->skb);
331*4882a593Smuzhiyun 			e->skb = NULL;
332*4882a593Smuzhiyun 		}
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76s_deinit);
336*4882a593Smuzhiyun 
mt76s_init(struct mt76_dev * dev,struct sdio_func * func,const struct mt76_bus_ops * bus_ops)337*4882a593Smuzhiyun int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
338*4882a593Smuzhiyun 	       const struct mt76_bus_ops *bus_ops)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun 	struct mt76_sdio *sdio = &dev->sdio;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	sdio->txrx_wq = alloc_workqueue("mt76s_txrx_wq",
343*4882a593Smuzhiyun 					WQ_UNBOUND | WQ_HIGHPRI,
344*4882a593Smuzhiyun 					WQ_UNBOUND_MAX_ACTIVE);
345*4882a593Smuzhiyun 	if (!sdio->txrx_wq)
346*4882a593Smuzhiyun 		return -ENOMEM;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	INIT_WORK(&sdio->stat_work, mt76s_tx_status_data);
349*4882a593Smuzhiyun 	INIT_WORK(&sdio->tx.status_work, mt76s_tx_work);
350*4882a593Smuzhiyun 	INIT_WORK(&sdio->rx.net_work, mt76s_rx_work);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	mutex_init(&sdio->sched.lock);
353*4882a593Smuzhiyun 	dev->queue_ops = &sdio_queue_ops;
354*4882a593Smuzhiyun 	dev->bus = bus_ops;
355*4882a593Smuzhiyun 	dev->sdio.func = func;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mt76s_init);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
362*4882a593Smuzhiyun MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
363*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
364