xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/hci/phl_trx_sdio.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 - 2021 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_TRX_SDIO_C_
16 #include "../phl_headers.h"
17 
18 #ifdef SDIO_TX_THREAD
19 #define XMIT_BUFFER_RETRY_LIMIT		0x100	/* > 0xFF: No limit */
20 #endif /* SDIO_TX_THREAD */
21 
22 struct rtw_tx_buf {
23 #ifdef SDIO_TX_THREAD
24 	_os_list list;
25 
26 	enum rtw_packet_type tag;	/* return queue type */
27 	u8 mgnt_pkt;			/* used for xmit management frame */
28 	u8 retry;			/* TX retry count */
29 #endif
30 
31 	u8 *buffer;			/* DMA:able scratch buffer */
32 	u32 buf_len;			/* buffer size */
33 	u32 used_len;			/* total valid data size */
34 	u8 dma_ch;
35 	u8 agg_cnt;			/* bus aggregation nubmer */
36 #ifndef PHL_SDIO_TX_AGG_MAX
37 #define PHL_SDIO_TX_AGG_MAX	50
38 #endif
39 	u16 pkt_len[PHL_SDIO_TX_AGG_MAX];
40 	u8 wp_offset[PHL_SDIO_TX_AGG_MAX];
41 };
42 
43 #ifdef SDIO_TX_THREAD
44 struct rtw_tx_buf_ring {
45 	struct rtw_tx_buf *txbufblock;
46 	u32 block_cnt_alloc;		/* Total number of rtw_tx_buf allocated */
47 	u32 total_blocks_size;		/* block_cnt_alloc * sizeof(rtw_tx_buf) */
48 
49 	struct phl_queue idle_list;
50 	struct phl_queue busy_list;		/* ready to send buffer list */
51 	struct phl_queue mgnt_idle_list;	/* management buffer list */
52 	struct phl_queue mgnt_busy_list;	/* ready to send management buffer list */
53 };
54 #endif /* SDIO_TX_THREAD */
55 
56 struct rtw_rx_buf_ring {
57 	struct rtw_rx_buf *rxbufblock;
58 	u32 block_cnt_alloc;	/* Total number of rtw_rx_buf allocated */
59 	u32 total_blocks_size;	/* block_cnt_alloc * sizeof(rtw_rx_buf) */
60 
61 	struct phl_queue idle_rxbuf_list;
62 	struct phl_queue busy_rxbuf_list;
63 	struct phl_queue pend_rxbuf_list;
64 };
65 
66 #ifdef SDIO_TX_THREAD
phl_tx_sdio_wake_thrd(struct phl_info_t * phl_info)67 void phl_tx_sdio_wake_thrd(struct phl_info_t *phl_info)
68 {
69 	struct hci_info_t *hci = phl_info->hci;
70 
71 
72 	_os_sema_up(phl_to_drvpriv(phl_info), &hci->tx_thrd_sema);
73 }
74 
enqueue_txbuf(struct phl_info_t * phl_info,struct phl_queue * pool_list,struct rtw_tx_buf * txbuf,enum list_pos pos)75 static void enqueue_txbuf(struct phl_info_t *phl_info,
76 			  struct phl_queue *pool_list, struct rtw_tx_buf *txbuf,
77 			  enum list_pos pos)
78 {
79 	void *drv = phl_to_drvpriv(phl_info);
80 
81 
82 	pq_push(drv, pool_list, &txbuf->list, pos, _ps);
83 }
84 
dequeue_txbuf(struct phl_info_t * phl_info,struct phl_queue * pool_list)85 static struct rtw_tx_buf *dequeue_txbuf(struct phl_info_t *phl_info,
86 					struct phl_queue *pool_list)
87 {
88 	struct rtw_tx_buf *txbuf = NULL;
89 	void *drv = phl_to_drvpriv(phl_info);
90 	_os_list *buflist;
91 	u8 res;
92 
93 
94 	res = pq_pop(drv, pool_list, &buflist, _first, _ps);
95 	if (!res)
96 		return NULL;
97 
98 	txbuf = list_entry(buflist, struct rtw_tx_buf, list);
99 
100 	return txbuf;
101 }
102 
alloc_txbuf(struct phl_info_t * phl_info,struct rtw_tx_buf_ring * pool,u8 tid)103 static struct rtw_tx_buf* alloc_txbuf(struct phl_info_t *phl_info,
104 				      struct rtw_tx_buf_ring *pool, u8 tid)
105 {
106 	struct rtw_tx_buf *txbuf = NULL;
107 
108 
109 	if (tid == RTW_PHL_RING_CAT_MGNT) {
110 		txbuf = dequeue_txbuf(phl_info, &pool->mgnt_idle_list);
111 		if (txbuf)
112 			return txbuf;
113 	}
114 
115 	txbuf = dequeue_txbuf(phl_info, &pool->idle_list);
116 	if (!txbuf)
117 		return NULL;
118 
119 	if (tid == RTW_PHL_RING_CAT_MGNT)
120 		txbuf->mgnt_pkt = true;
121 
122 	return txbuf;
123 }
124 
125 /*
126  * Enqueue tx buffer to queue tail and notify TX I/O thread to send.
127  * Usually this function would be called outside TX I/O thread.
128  */
enqueue_busy_txbuf(struct phl_info_t * phl_info,struct rtw_tx_buf_ring * pool,struct rtw_tx_buf * txbuf)129 static void enqueue_busy_txbuf(struct phl_info_t *phl_info,
130 			       struct rtw_tx_buf_ring *pool,
131 			       struct rtw_tx_buf *txbuf)
132 {
133 	if ((txbuf->tag == RTW_PHL_PKT_TYPE_MGNT) || (txbuf->mgnt_pkt))
134 		enqueue_txbuf(phl_info, &pool->mgnt_busy_list, txbuf, _tail);
135 	else
136 		enqueue_txbuf(phl_info, &pool->busy_list, txbuf, _tail);
137 
138 	phl_tx_sdio_wake_thrd(phl_info);
139 }
140 
141 /*
142  * Enqueue tx buffer to queue head but without notifying TX I/O thread again.
143  * Usually this function would be called inside TX I/O thread.
144  */
enqueue_busy_txbuf_to_head(struct phl_info_t * phl_info,struct rtw_tx_buf_ring * pool,struct rtw_tx_buf * txbuf)145 static void enqueue_busy_txbuf_to_head(struct phl_info_t *phl_info,
146 				       struct rtw_tx_buf_ring *pool,
147 				       struct rtw_tx_buf *txbuf)
148 {
149 	if ((txbuf->tag == RTW_PHL_PKT_TYPE_MGNT) || (txbuf->mgnt_pkt))
150 		enqueue_txbuf(phl_info, &pool->mgnt_busy_list, txbuf, _first);
151 	else
152 		enqueue_txbuf(phl_info, &pool->busy_list, txbuf, _first);
153 }
154 
dequeue_busy_txbuf(struct phl_info_t * phl_info,struct rtw_tx_buf_ring * pool)155 static struct rtw_tx_buf* dequeue_busy_txbuf(struct phl_info_t *phl_info,
156 					     struct rtw_tx_buf_ring *pool)
157 {
158 	struct rtw_tx_buf *txbuf = NULL;
159 
160 
161 	txbuf = dequeue_txbuf(phl_info, &pool->mgnt_busy_list);
162 	if (txbuf)
163 		return txbuf;
164 
165 	return dequeue_txbuf(phl_info, &pool->busy_list);
166 }
167 
free_txbuf(struct phl_info_t * phl_info,struct rtw_tx_buf_ring * pool,struct rtw_tx_buf * txbuf)168 static void free_txbuf(struct phl_info_t *phl_info,
169 		       struct rtw_tx_buf_ring *pool, struct rtw_tx_buf *txbuf)
170 {
171 	struct phl_queue *pool_list;
172 
173 
174 	txbuf->retry = 0;
175 	txbuf->used_len = 0;
176 	txbuf->agg_cnt = 0;
177 	if (txbuf->tag == RTW_PHL_PKT_TYPE_MGNT) {
178 		pool_list = &pool->mgnt_idle_list;
179 	} else {
180 		txbuf->mgnt_pkt = false;
181 		pool_list = &pool->idle_list;
182 	}
183 
184 	enqueue_txbuf(phl_info, pool_list, txbuf, _tail);
185 }
186 
187 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
188 #ifndef RTW_WKARD_SDIO_TX_USE_YIELD
189 /**
190  * txbuf_wait - waits for idle tx buffer (w/timeout)
191  * @phl_info:	pointer of struct phl_info_t
192  * @timeout:	timeout value in millisecond
193  *
194  * This waits for either a tx buffer has been returned to idle list or for a
195  * specified timeout to expire. The timeout is in millisecond.
196  *
197  * Return: 0 if timed out, 1 if tx buffer idle list is not empty, and positive
198  * if completed.
199  */
txbuf_wait(struct phl_info_t * phl_info,int timeout)200 static int txbuf_wait(struct phl_info_t *phl_info, int timeout)
201 {
202 	void *drv = phl_to_drvpriv(phl_info);
203 	struct hci_info_t *hci = phl_info->hci;
204 	struct rtw_tx_buf_ring *tx_pool = (struct rtw_tx_buf_ring *)hci->txbuf_pool;
205 	_os_event event;
206 	_os_spinlockfg sp_flags;
207 	int ret = 1;
208 
209 
210 	_os_spinlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
211 	if (tx_pool->idle_list.cnt == 0) {
212 		_os_event_init(drv, &event);
213 		hci->tx_buf_event = &event;
214 		_os_spinunlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
215 		ret = _os_event_wait(drv, &event, timeout);
216 		_os_spinlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
217 		if (hci->tx_buf_event)
218 			hci->tx_buf_event = NULL;
219 		_os_event_free(drv, &event);
220 	}
221 	_os_spinunlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
222 
223 	return ret;
224 }
225 
txbuf_set_ready(struct phl_info_t * phl_info)226 static void txbuf_set_ready(struct phl_info_t *phl_info)
227 {
228 	void *drv = phl_to_drvpriv(phl_info);
229 	struct hci_info_t *hci = phl_info->hci;
230 	_os_spinlockfg sp_flags;
231 
232 
233 	_os_spinlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
234 	if (hci->tx_buf_event) {
235 		_os_event_set(drv, hci->tx_buf_event);
236 		hci->tx_buf_event = NULL;
237 	}
238 	_os_spinunlock(drv, &hci->tx_buf_lock, _ps, &sp_flags);
239 }
240 #endif /* !RTW_WKARD_SDIO_TX_USE_YIELD */
241 #endif /* CONFIG_PHL_SDIO_TX_CB_THREAD */
242 #endif /* SDIO_TX_THREAD */
243 
enqueue_rxbuf(struct phl_info_t * phl_info,struct phl_queue * pool_list,struct rtw_rx_buf * rxbuf)244 static void enqueue_rxbuf(struct phl_info_t *phl_info,
245 			  struct phl_queue *pool_list, struct rtw_rx_buf *rxbuf)
246 {
247 	void *drv = phl_to_drvpriv(phl_info);
248 	_os_spinlockfg sp_flags;
249 
250 
251 	_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
252 	list_add_tail(&rxbuf->list, &pool_list->queue);
253 	pool_list->cnt++;
254 	_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
255 }
256 
dequeue_rxbuf(struct phl_info_t * phl_info,struct phl_queue * pool_list)257 static struct rtw_rx_buf *dequeue_rxbuf(struct phl_info_t *phl_info,
258 					struct phl_queue *pool_list)
259 {
260 	struct rtw_rx_buf *rxbuf = NULL;
261 	void *drv = phl_to_drvpriv(phl_info);
262 	_os_spinlockfg sp_flags;
263 
264 
265 	_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
266 	if (!list_empty(&pool_list->queue)) {
267 		rxbuf = list_first_entry(&pool_list->queue, struct rtw_rx_buf,
268 					 list);
269 		list_del(&rxbuf->list);
270 		pool_list->cnt--;
271 	}
272 	_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
273 
274 	return rxbuf;
275 }
276 
277 /*
278  * Return RTW_PHL_STATUS_RESOURCE if space in txbuf is not enough for tx_req,
279  * or RTW_PHL_STATUS_SUCCESS for tx_req has already been handled and recycled.
280  */
_phl_prepare_tx_sdio(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req,struct rtw_tx_buf * txbuf)281 static enum rtw_phl_status _phl_prepare_tx_sdio(struct phl_info_t *phl_info,
282 					        struct rtw_xmit_req *tx_req,
283 					        struct rtw_tx_buf *txbuf)
284 {
285 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
286 	enum rtw_hal_status hstatus;
287 	void *drv_priv = phl_to_drvpriv(phl_info);
288 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
289 	u32 align_size = 8;
290 	u32 wd_len = 48; /* default set to max 48 bytes */
291 	u32 used_len;
292 	u32 total_len;
293 	u8 *tx_buf_data;
294 	struct rtw_pkt_buf_list *pkt_buf = NULL;
295 	u8 i = 0;
296 
297 
298 	if (txbuf->agg_cnt == PHL_SDIO_TX_AGG_MAX)
299 		return RTW_PHL_STATUS_RESOURCE;
300 
301 	used_len = _ALIGN(txbuf->used_len, align_size);
302 	total_len = used_len + wd_len + tx_req->total_len;
303 	if (total_len > txbuf->buf_len) {
304 		if (txbuf->agg_cnt)
305 			return RTW_PHL_STATUS_RESOURCE;
306 
307 		PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
308 			  "%s: unexpected tx size(%d + %d)!!\n",
309 			  __FUNCTION__, tx_req->total_len, wd_len);
310 		/* drop, skip this packet */
311 		goto recycle;
312 	}
313 
314 	tx_buf_data = txbuf->buffer + used_len;	/* align start address */
315 
316 	hstatus = rtw_hal_fill_txdesc(phl_info->hal, tx_req, tx_buf_data, &wd_len);
317 	if (hstatus != RTW_HAL_STATUS_SUCCESS) {
318 		PHL_TRACE(COMP_PHL_DBG|COMP_PHL_XMIT, _PHL_ERR_,
319 			  "%s: Fail to fill txdesc!(0x%x)\n",
320 			  __FUNCTION__, hstatus);
321 		/* drop, skip this packet */
322 		goto recycle;
323 	}
324 
325 	tx_buf_data += wd_len;
326 
327 	pkt_buf = (struct rtw_pkt_buf_list *)tx_req->pkt_list;
328 	for (i = 0; i < tx_req->pkt_cnt; i++, pkt_buf++) {
329 		_os_mem_cpy(drv_priv, tx_buf_data,
330 			    pkt_buf->vir_addr, pkt_buf->length);
331 		tx_buf_data += pkt_buf->length;
332 	}
333 
334 	txbuf->used_len = used_len + wd_len + tx_req->total_len;
335 	txbuf->pkt_len[txbuf->agg_cnt] = tx_req->mdata.pktlen;
336 	txbuf->wp_offset[txbuf->agg_cnt] = tx_req->mdata.wp_offset;
337 	txbuf->agg_cnt++;
338 	if (txbuf->agg_cnt == 1) {
339 		txbuf->dma_ch = tx_req->mdata.dma_ch;
340 	} else {
341 		/* update first packet's txagg_num of wd */
342 		txbuf->buffer[5] = txbuf->agg_cnt;
343 		/* Todo: update checksum field */
344 	}
345 
346 recycle:
347 	if (RTW_PHL_TREQ_TYPE_TEST_PATTERN == tx_req->treq_type) {
348 		if (ops->tx_test_recycle) {
349 			pstatus = ops->tx_test_recycle(phl_info, tx_req);
350 			if (pstatus != RTW_PHL_STATUS_SUCCESS) {
351 				PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
352 					  "%s: tx_test_recycle fail!! (%d)\n",
353 					  __FUNCTION__, pstatus);
354 			}
355 		}
356 	} else {
357 		/* RTW_PHL_TREQ_TYPE_NORMAL == tx_req->treq_type */
358 		if (ops->tx_recycle) {
359 			pstatus = ops->tx_recycle(drv_priv, tx_req);
360 			if (pstatus != RTW_PHL_STATUS_SUCCESS)
361 				PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
362 					  "%s: tx recycle fail!! (%d)\n",
363 					  __FUNCTION__, pstatus);
364 		}
365 	}
366 
367 	return RTW_PHL_STATUS_SUCCESS;
368 }
369 
_phl_tx_flow_ctrl_sdio(struct phl_info_t * phl_info,_os_list * sta_list)370 static void _phl_tx_flow_ctrl_sdio(struct phl_info_t *phl_info,
371 						      _os_list *sta_list)
372 {
373 	phl_tx_flow_ctrl(phl_info, sta_list);
374 }
375 
phl_handle_xmit_ring_sdio(struct phl_info_t * phl_info,struct phl_ring_status * ring_sts)376 static enum rtw_phl_status phl_handle_xmit_ring_sdio(
377 					struct phl_info_t *phl_info,
378 					struct phl_ring_status *ring_sts)
379 {
380 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
381 	struct rtw_phl_tx_ring *tring = ring_sts->ring_ptr;
382 	struct rtw_xmit_req *tx_req = NULL;
383 	u16 rptr = 0;
384 	void *drv_priv = phl_to_drvpriv(phl_info);
385 #ifdef SDIO_TX_THREAD
386 	struct rtw_tx_buf_ring *tx_pool = (struct rtw_tx_buf_ring *)phl_info->hci->txbuf_pool;
387 	struct rtw_tx_buf *txbuf = NULL;
388 #else
389 	struct rtw_tx_buf *txbuf = (struct rtw_tx_buf *)phl_info->hci->txbuf_pool;
390 #endif
391 
392 
393 	do {
394 		rptr = (u16)_os_atomic_read(drv_priv, &tring->phl_next_idx);
395 
396 		tx_req = (struct rtw_xmit_req *)tring->entry[rptr];
397 		if (!tx_req) {
398 			PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
399 				  "%s: tx_req is NULL!\n", __FUNCTION__);
400 			pstatus = RTW_PHL_STATUS_FAILURE;
401 			break;
402 		}
403 		tx_req->mdata.macid = ring_sts->macid;
404 		tx_req->mdata.band = ring_sts->band;
405 		tx_req->mdata.wmm = ring_sts->wmm;
406 		tx_req->mdata.hal_port = ring_sts->port;
407 		/*tx_req->mdata.mbssid = ring_sts->mbssid;*/
408 		tx_req->mdata.tid = tring->tid;
409 		tx_req->mdata.dma_ch = tring->dma_ch;
410 		tx_req->mdata.pktlen = (u16)tx_req->total_len;
411 
412 #ifdef SDIO_TX_THREAD
413 get_txbuf:
414 		if (!txbuf) {
415 			txbuf = alloc_txbuf(phl_info, tx_pool, tring->tid);
416 			if (!txbuf) {
417 				pstatus = RTW_PHL_STATUS_RESOURCE;
418 				break;
419 			}
420 		}
421 #endif /* SDIO_TX_THREAD */
422 		pstatus = _phl_prepare_tx_sdio(phl_info, tx_req, txbuf);
423 #ifdef SDIO_TX_THREAD
424 		if (pstatus == RTW_PHL_STATUS_RESOURCE) {
425 			/* enqueue txbuf */
426 			enqueue_busy_txbuf(phl_info, tx_pool, txbuf);
427 			txbuf = NULL;
428 			goto get_txbuf;
429 		}
430 		if (pstatus != RTW_PHL_STATUS_SUCCESS) {
431 			/* impossible case, never entered for now */
432 			PHL_TRACE(COMP_PHL_DBG|COMP_PHL_XMIT, _PHL_ERR_,
433 				  "%s: prepare tx fail!(%d)\n",
434 				  __FUNCTION__, pstatus);
435 			break;
436 		}
437 #else /* !SDIO_TX_THREAD */
438 		if (pstatus != RTW_PHL_STATUS_SUCCESS) {
439 			if (pstatus == RTW_PHL_STATUS_RESOURCE) {
440 				pstatus = RTW_PHL_STATUS_SUCCESS;
441 			} else {
442 				PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
443 					  "%s: prepare tx fail!(%d)\n",
444 					  __FUNCTION__, pstatus);
445 			}
446 			break;
447 		}
448 #endif /* !SDIO_TX_THREAD */
449 
450 		_os_atomic_set(drv_priv, &tring->phl_idx, rptr);
451 
452 		ring_sts->req_busy--;
453 		/* TODO: aggregate more packets! */
454 		if (!ring_sts->req_busy)
455 			break;
456 
457 		if ((rptr + 1) >= MAX_PHL_TX_RING_ENTRY_NUM)
458 			_os_atomic_set(drv_priv, &tring->phl_next_idx, 0);
459 		else
460 			_os_atomic_inc(drv_priv, &tring->phl_next_idx);
461 	} while (1);
462 
463 #ifdef SDIO_TX_THREAD
464 	if (txbuf)
465 		enqueue_busy_txbuf(phl_info, tx_pool, txbuf);
466 #endif /* SDIO_TX_THREAD */
467 	phl_release_ring_sts(phl_info, ring_sts);
468 
469 	return pstatus;
470 }
471 
472 #ifdef SDIO_TX_THREAD
phl_tx_sdio_thrd_hdl(void * context)473 static int phl_tx_sdio_thrd_hdl(void *context)
474 {
475 	struct phl_info_t *phl = context;
476 	struct hci_info_t *hci = phl->hci;
477 	struct rtw_tx_buf_ring *tx_pool;
478 	struct rtw_tx_buf *txbuf = NULL;
479 	void *drv = phl_to_drvpriv(phl);
480 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl->hal);
481 	struct bus_cap_t *bus_cap = &hal_com->bus_cap;
482 	enum rtw_hal_status hstatus;
483 	u16 retry_time = bus_cap->tx_buf_retry_lmt ? bus_cap->tx_buf_retry_lmt : XMIT_BUFFER_RETRY_LIMIT;
484 
485 	#ifdef RTW_XMIT_THREAD_HIGH_PRIORITY
486 	#ifdef PLATFORM_LINUX
487 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
488 		sched_set_fifo_low(current);
489 	#else
490 		struct sched_param param = { .sched_priority = 1 };
491 
492 		sched_setscheduler(current, SCHED_FIFO, &param);
493 	#endif
494 	#endif /* PLATFORM_LINUX */
495 	#endif /*RTW_XMIT_THREAD_HIGH_PRIORITY*/
496 
497 
498 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "SDIO: tx thread start retry_time=%d\n" ,retry_time);
499 
500 	tx_pool = (struct rtw_tx_buf_ring *)hci->txbuf_pool;
501 	while (1) {
502 		_os_sema_down(drv, &hci->tx_thrd_sema);
503 
504 check_stop:
505 		if (_os_thread_check_stop(drv, &hci->tx_thrd))
506 			break;
507 
508 		txbuf = dequeue_busy_txbuf(phl, tx_pool);
509 		if (!txbuf)
510 			continue;
511 		_os_atomic_set(drv, &phl->phl_sw_tx_more, 0);
512 		hstatus = rtw_hal_sdio_tx(phl->hal, txbuf->dma_ch, txbuf->buffer,
513 					  txbuf->used_len, txbuf->agg_cnt,
514 					  txbuf->pkt_len, txbuf->wp_offset);
515 		if (hstatus != RTW_HAL_STATUS_SUCCESS) {
516 			bool overflow;
517 
518 			if ((hstatus == RTW_HAL_STATUS_RESOURCE)
519 			    && (txbuf->retry < retry_time)) {
520 				txbuf->retry++;
521 				enqueue_busy_txbuf_to_head(phl, tx_pool, txbuf);
522 				/* Todo: What to do when TX FIFO not ready? */
523 				goto check_stop;
524 			}
525 
526 			/* Keep overflow bit(bit31) */
527 			overflow = (hci->tx_drop_cnt & BIT31) ? true : false;
528 			hci->tx_drop_cnt++;
529 			if (overflow)
530 				hci->tx_drop_cnt |= BIT31;
531 
532 			/* Show msg on 2^n times */
533 			if (!(hci->tx_drop_cnt & (hci->tx_drop_cnt - 1))) {
534 				PHL_TRACE(COMP_PHL_DBG|COMP_PHL_XMIT, _PHL_ERR_,
535 					  "%s: drop!(%d) type=%u mgnt=%u len=%u "
536 					  "agg_cnt=%u drop_cnt=%u%s\n",
537 					  __FUNCTION__, hstatus, txbuf->tag, txbuf->mgnt_pkt,
538 					  txbuf->used_len, txbuf->agg_cnt,
539 					  hci->tx_drop_cnt & ~BIT31,
540 					  (hci->tx_drop_cnt & BIT31) ? "(overflow)" : "");
541 			}
542 		}
543 		free_txbuf(phl, tx_pool, txbuf);
544 		/* if agg thread is waiting tx buffer, notify it */
545 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
546 #ifndef RTW_WKARD_SDIO_TX_USE_YIELD
547 		txbuf_set_ready(phl);
548 #endif /* !RTW_WKARD_SDIO_TX_USE_YIELD */
549 #else /* CONFIG_PHL_SDIO_TX_CB_THREAD */
550 		phl_schedule_handler(phl->phl_com, &phl->phl_tx_handler);
551 #endif /* CONFIG_PHL_SDIO_TX_CB_THREAD */
552 
553 		/* check stop and if more txbuf for tx */
554 		goto check_stop;
555 	}
556 
557 	_os_thread_wait_stop(drv, &hci->tx_thrd);
558 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "SDIO: tx thread down\n");
559 
560 	return 0;
561 }
562 #else /* !SDIO_TX_THREAD */
_phl_tx_sdio(struct phl_info_t * phl_info)563 static enum rtw_phl_status _phl_tx_sdio(struct phl_info_t *phl_info)
564 {
565 	enum rtw_hal_status hstatus;
566 	struct hci_info_t *hci = phl_info->hci;
567 	struct rtw_tx_buf *txbuf = (struct rtw_tx_buf*)hci->txbuf_pool;
568 
569 
570 	if (!txbuf->buffer || !txbuf->buf_len)
571 		return RTW_PHL_STATUS_SUCCESS;
572 
573 	hstatus = rtw_hal_sdio_tx(phl_info->hal, txbuf->dma_ch, txbuf->buffer,
574 				  txbuf->used_len, txbuf->agg_cnt,
575 				  txbuf->pkt_len, txbuf->wp_offset);
576 	txbuf->used_len = 0;
577 	txbuf->agg_cnt = 0;
578 	if (hstatus == RTW_HAL_STATUS_SUCCESS)
579 		return RTW_PHL_STATUS_SUCCESS;
580 
581 	return RTW_PHL_STATUS_FAILURE;
582 }
583 #endif /* !SDIO_TX_THREAD */
584 
phl_tx_check_status_sdio(struct phl_info_t * phl_info)585 static enum rtw_phl_status phl_tx_check_status_sdio(struct phl_info_t *phl_info)
586 {
587 	void *drv = phl_to_drvpriv(phl_info);
588 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
589 
590 	if (PHL_TX_STATUS_STOP_INPROGRESS ==
591 		_os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->phl_sw_tx_sts)){
592 			_os_atomic_set(drv, &phl_info->phl_sw_tx_sts, PHL_TX_STATUS_SW_PAUSE);
593 			pstatus = RTW_PHL_STATUS_FAILURE;
594 	}
595 	return pstatus;
596 }
597 
_phl_tx_callback_sdio(void * context)598 static void _phl_tx_callback_sdio(void *context)
599 {
600 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
601 	struct rtw_phl_handler *phl_handler;
602 	struct phl_info_t *phl_info;
603 	struct phl_ring_status *ring_sts = NULL, *t;
604 	_os_list sta_list;
605 	bool tx_pause = false;
606 #ifdef SDIO_TX_THREAD
607 	bool rsrc;
608 #endif /* SDIO_TX_THREAD */
609 
610 
611 	phl_handler = (struct rtw_phl_handler *)phl_container_of(context,
612 						     struct rtw_phl_handler,
613 						     os_handler);
614 	phl_info = (struct phl_info_t *)phl_handler->context;
615 
616 	INIT_LIST_HEAD(&sta_list);
617 
618 	pstatus = phl_tx_check_status_sdio(phl_info);
619 	if (pstatus == RTW_PHL_STATUS_FAILURE)
620 		goto end;
621 
622 	/* check datapath sw state */
623 	tx_pause = phl_datapath_chk_trx_pause(phl_info, PHL_CTRL_TX);
624 	if (true == tx_pause)
625 		goto end;
626 
627 #ifdef CONFIG_POWER_SAVE
628 	/* check ps state when tx is not paused */
629 	if (false == phl_ps_is_datapath_allowed(phl_info)) {
630 		PHL_WARN("%s(): datapath is not allowed now... may in low power.\n", __func__);
631 		goto end;
632 	}
633 #endif
634 	do {
635 		if (!phl_check_xmit_ring_resource(phl_info, &sta_list))
636 			break;
637 
638 		/* phl_info->t_fctrl_result would be filled inside phl_tx_flow_ctrl() */
639 		phl_tx_flow_ctrl(phl_info, &sta_list);
640 
641 #ifdef SDIO_TX_THREAD
642 		rsrc = false;	/* default suppose no enough tx resource */
643 #endif /* SDIO_TX_THREAD */
644 		phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
645 				       &phl_info->t_fctrl_result, list) {
646 			list_del(&ring_sts->list);
647 
648 			/* ring_sts would be release inside phl_handle_xmit_ring_sdio() */
649 			pstatus = phl_handle_xmit_ring_sdio(phl_info, ring_sts);
650 
651 #ifdef SDIO_TX_THREAD
652 			if (pstatus != RTW_PHL_STATUS_RESOURCE)
653 				rsrc = true; /* some tx data has been sent */
654 #else /* !SDIO_TX_THREAD */
655 			pstatus = _phl_tx_sdio(phl_info);
656 			if (pstatus != RTW_PHL_STATUS_SUCCESS) {
657 				u32 drop = phl_info->hci->tx_drop_cnt;
658 
659 				/* Keep overflow bit(bit31) */
660 				phl_info->hci->tx_drop_cnt = (drop & BIT31)
661 							    | (drop + 1);
662 				drop = phl_info->hci->tx_drop_cnt;
663 				/* Show msg on 2^n times */
664 				if (!(drop & (drop - 1))) {
665 					PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
666 						  "%s: phl_tx fail!(%d) drop cnt=%u%s\n",
667 						  __FUNCTION__, pstatus,
668 						  drop & ~BIT31,
669 						  drop & BIT31 ? "(overflow)" : "");
670 				}
671 			}
672 #endif /* !SDIO_TX_THREAD */
673 		}
674 
675 		pstatus = phl_tx_check_status_sdio(phl_info);
676 		if (pstatus == RTW_PHL_STATUS_FAILURE)
677 			break;
678 
679 		phl_free_deferred_tx_ring(phl_info);
680 
681 #ifdef SDIO_TX_THREAD
682 		/*
683 		 * Break loop when no txbuf for tx data and this function would
684 		 * be schedule again when I/O thread return txbuf.
685 		 */
686 		if (!rsrc) {
687 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
688 #ifdef RTW_WKARD_SDIO_TX_USE_YIELD
689 			_os_yield(phl_to_drvpriv(phl_info));
690 #else /* !RTW_WKARD_SDIO_TX_USE_YIELD */
691 			txbuf_wait(phl_info, 1);
692 #endif /* !RTW_WKARD_SDIO_TX_USE_YIELD */
693 #else /* !CONFIG_PHL_SDIO_TX_CB_THREAD */
694 			break;
695 #endif /* !CONFIG_PHL_SDIO_TX_CB_THREAD */
696 		}
697 #endif /* SDIO_TX_THREAD */
698 	} while (1);
699 
700 end:
701 	phl_free_deferred_tx_ring(phl_info);
702 }
703 
704 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
phl_tx_callback_sdio(void * context)705 static void phl_tx_callback_sdio(void *context)
706 {
707 	struct rtw_phl_handler *phl_handler;
708 	struct phl_info_t *phl_info;
709 	void *d;
710 
711 	#ifdef RTW_XMIT_THREAD_CB_HIGH_PRIORITY
712 	#ifdef PLATFORM_LINUX
713 	#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
714 	sched_set_fifo_low(current);
715 	#else
716 		struct sched_param param = { .sched_priority = 1 };
717 
718 		sched_setscheduler(current, SCHED_FIFO, &param);
719 	#endif
720 	#endif /* PLATFORM_LINUX */
721 	#endif /* RTW_XMIT_THREAD_CB_HIGH_PRIORITY */
722 
723 	phl_handler = (struct rtw_phl_handler *)phl_container_of(context,
724 							struct rtw_phl_handler,
725 							os_handler);
726 	phl_info = (struct phl_info_t *)phl_handler->context;
727 	d = phl_to_drvpriv(phl_info);
728 
729 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "SDIO: %s start\n",
730 		  phl_handler->cb_name);
731 
732 	while (1) {
733 		_os_sema_down(d, &(phl_handler->os_handler.os_sema));
734 
735 		if (_os_thread_check_stop(d, (_os_thread*)context))
736 			break;
737 
738 		_phl_tx_callback_sdio(context);
739 	}
740 
741 	_os_thread_wait_stop(d, (_os_thread*)context);
742 	_os_sema_free(d, &(phl_handler->os_handler.os_sema));
743 
744 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "SDIO: %s down\n",
745 		  phl_handler->cb_name);
746 }
747 #else /* !CONFIG_PHL_SDIO_TX_CB_THREAD */
748 #define phl_tx_callback_sdio	_phl_tx_callback_sdio
749 #endif /* !CONFIG_PHL_SDIO_TX_CB_THREAD */
750 
phl_prepare_tx_sdio(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req)751 static enum rtw_phl_status phl_prepare_tx_sdio(struct phl_info_t *phl_info,
752 					       struct rtw_xmit_req *tx_req)
753 {
754 	/* not implement yet */
755 	return RTW_PHL_STATUS_FAILURE;
756 }
757 
phl_tx_sdio(struct phl_info_t * phl_info)758 static enum rtw_phl_status phl_tx_sdio(struct phl_info_t *phl_info)
759 {
760 	/* not implement yet */
761 	return RTW_PHL_STATUS_FAILURE;
762 }
763 
phl_recycle_rx_buf_sdio(struct phl_info_t * phl,void * r,u8 ch,enum rtw_rx_type type)764 static enum rtw_phl_status phl_recycle_rx_buf_sdio(struct phl_info_t *phl,
765 						   void *r, u8 ch,
766 						   enum rtw_rx_type type)
767 {
768 	struct hci_info_t *hci_info = (struct hci_info_t *)phl->hci;
769 	struct rtw_rx_buf_ring *rx_pool = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
770 	struct rtw_rx_buf *rxbuf = r;
771 	void *drv_priv = phl_to_drvpriv(phl);
772 
773 #if 1
774 	/* Just for debugging, could consider to disable in the future */
775 	if (!_os_atomic_read(drv_priv, &rxbuf->ref)) {
776 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
777 			  "%s: ref count error! (%px)\n", __FUNCTION__, rxbuf);
778 		_os_warn_on(1);
779 		return RTW_PHL_STATUS_SUCCESS;
780 	}
781 #endif
782 
783 	if (!_os_atomic_dec_return(drv_priv, &rxbuf->ref))
784 		enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
785 
786 	return RTW_PHL_STATUS_SUCCESS;
787 }
phl_rx_handle_normal(struct phl_info_t * phl,struct rtw_phl_rx_pkt * phl_rx)788 void phl_rx_handle_normal(struct phl_info_t *phl,
789 					 struct rtw_phl_rx_pkt *phl_rx)
790 {
791  	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
792 	_os_list frames;
793 	FUNCIN_WSTS(pstatus);
794 
795 	do {
796 		INIT_LIST_HEAD(&frames);
797 
798 		if (phl_rx->r.mdata.rx_rate <= RTW_DATA_RATE_HE_NSS4_MCS11)
799 			phl->phl_com->phl_stats.rx_rate_nmr[phl_rx->r.mdata.rx_rate]++;
800 
801 		pstatus = phl_rx_reorder(phl, phl_rx, &frames);
802 
803 		if (pstatus != RTW_PHL_STATUS_SUCCESS) {
804 			PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
805 					"%s: phl_rx_reorder"
806 					" FAIL! (%d)\n",
807 					__FUNCTION__, pstatus);
808 			break;
809 		}
810 		phl_handle_rx_frame_list(phl, &frames);
811 	} while (0);
812 
813 	FUNCOUT_WSTS(pstatus);
814 
815 }
816 
phl_rx_sdio(struct phl_info_t * phl)817 static enum rtw_phl_status phl_rx_sdio(struct phl_info_t *phl)
818 {
819 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
820 	enum rtw_hal_status hstatus;
821 	struct hci_info_t *hci_info = (struct hci_info_t *)phl->hci;
822 	struct rtw_phl_rx_pkt *phl_rx = NULL;
823 	struct rtw_rx_buf_ring *rx_pool = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
824 	struct rtw_rx_buf *rxbuf = NULL;
825 	void *drv_priv = phl_to_drvpriv(phl);
826 #ifndef CONFIG_PHL_RX_PSTS_PER_PKT
827 	_os_list frames;
828 #endif
829 	u32 len;
830 	bool flag = true;
831 	u8 i;
832 	u8 mfrag = 0, frag_num = 0;
833 	u16 netbuf_len = 0;
834 
835 
836 	do {
837 		if (rxbuf) {
838 			len = rtw_hal_sdio_parse_rx(phl->hal, rxbuf);
839 			if (!len) {
840 				if (!_os_atomic_dec_return(drv_priv, &rxbuf->ref))
841 					enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
842 				rxbuf = NULL;
843 				continue;
844 			}
845 		} else {
846 			rxbuf = dequeue_rxbuf(phl, &rx_pool->pend_rxbuf_list);
847 			if (!rxbuf) {
848 #ifdef CONFIG_PHL_SDIO_READ_RXFF_IN_INT
849 				rxbuf = dequeue_rxbuf(phl, &rx_pool->busy_rxbuf_list);
850 				if (!rxbuf)
851 					break;
852 
853 				len = rtw_hal_sdio_parse_rx(phl->hal, rxbuf);
854 				if (!len) {
855 					enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
856 					break;
857 				}
858 #else
859 				rxbuf = dequeue_rxbuf(phl, &rx_pool->idle_rxbuf_list);
860 				if (!rxbuf) {
861 					pstatus = RTW_PHL_STATUS_RESOURCE;
862 					break;
863 				}
864 
865 				len = rtw_hal_sdio_rx(phl->hal, rxbuf);
866 				if (!len) {
867 					enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
868 					break;
869 				}
870 #endif
871 				_os_atomic_set(drv_priv, &rxbuf->ref, 1);
872 			}
873 		}
874 
875 		for (i = rxbuf->agg_start; i < rxbuf->agg_cnt; i++) {
876 			phl_rx = rtw_phl_query_phl_rx(phl);
877 			if (!phl_rx) {
878 				pstatus = RTW_PHL_STATUS_RESOURCE;
879 				/* No enough resource to handle rx data, */
880 				/* so maybe take a break */
881 				rxbuf->agg_start = i;
882 				enqueue_rxbuf(phl, &rx_pool->pend_rxbuf_list, rxbuf);
883 				rxbuf = NULL;
884 				flag = false;
885 
886 #ifdef PHL_RX_BATCH_IND
887 				_phl_indic_new_rxpkt(phl);
888 #endif
889 
890 				break;
891 			}
892 
893 			phl_rx->type = rxbuf->pkt[i].meta.rpkt_type;
894 			phl_rx->rxbuf_ptr = (u8*)rxbuf;
895 			_os_atomic_inc(drv_priv, &rxbuf->ref);
896 			phl_rx->r.os_priv = NULL;
897 			_os_mem_cpy(phl->phl_com->drv_priv,
898 				    &phl_rx->r.mdata, &rxbuf->pkt[i].meta,
899 				    sizeof(struct rtw_r_meta_data));
900 			/*phl_rx->r.shortcut_id;*/
901 			phl_rx->r.pkt_cnt = 1;
902 			phl_rx->r.pkt_list->vir_addr = rxbuf->pkt[i].pkt;
903 			phl_rx->r.pkt_list->length = rxbuf->pkt[i].pkt_len;
904 
905 			/* length include WD and packet size */
906 			len = ((u32)(rxbuf->pkt[i].pkt - rxbuf->pkt[i].wd))
907 			      + rxbuf->pkt[i].pkt_len;
908 			hstatus = rtw_hal_handle_rx_buffer(phl->phl_com,
909 							   phl->hal,
910 							   rxbuf->pkt[i].wd,
911 							   len, phl_rx);
912 			if (hstatus != RTW_HAL_STATUS_SUCCESS) {
913 				PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
914 					  "%s: hal_handle_rx FAIL! (%d)"
915 					  " type=0x%X len=%u\n",
916 					  __FUNCTION__, hstatus,
917 					  phl_rx->type, len);
918 			}
919 
920 			switch (phl_rx->type) {
921 			case RTW_RX_TYPE_WIFI:
922 
923 #ifdef CONFIG_PHL_SDIO_RX_NETBUF_ALLOC_IN_PHL
924 				{
925 					u8 *netbuf = NULL;
926 					void *drv = phl_to_drvpriv(phl);
927 
928 					/* Pre-alloc netbuf and replace pkt_list[0].vir_addr */
929 
930 					/* For first fragment packet, driver need allocate 1536 to defrag packet.*/
931 					mfrag = PHL_GET_80211_HDR_MORE_FRAG(phl_rx->r.pkt_list[0].vir_addr);
932 					frag_num = PHL_GET_80211_HDR_FRAG_NUM(phl_rx->r.pkt_list[0].vir_addr);
933 
934 					if (mfrag == 1 && frag_num == 0) {
935 						if (phl_rx->r.pkt_list[0].length < RTW_MAX_ETH_PKT_LEN)
936 							netbuf_len = RTW_MAX_ETH_PKT_LEN;
937 						else
938 							netbuf_len = phl_rx->r.pkt_list[0].length;
939 					} else {
940 						netbuf_len = phl_rx->r.pkt_list[0].length;
941 					}
942 
943 					netbuf = _os_alloc_netbuf(drv,
944 								netbuf_len,
945 								&(phl_rx->r.os_priv));
946 
947 					if (netbuf) {
948 						_os_mem_cpy(drv, netbuf,
949 							phl_rx->r.pkt_list[0].vir_addr, phl_rx->r.pkt_list[0].length);
950 						phl_rx->r.pkt_list[0].vir_addr = netbuf;
951 
952 						phl_rx->r.os_netbuf_len = netbuf_len;
953 						phl_rx->rxbuf_ptr = NULL;
954 						_os_atomic_dec(drv_priv, &rxbuf->ref);
955 					}
956 				}
957 #endif
958 
959 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
960 				if (false == phl_rx_proc_wait_phy_sts(phl, phl_rx)) {
961 					PHL_TRACE(COMP_PHL_PSTS, _PHL_DEBUG_,
962 						  "phl_rx_proc_wait_phy_sts() return false \n");
963 					phl_rx_handle_normal(phl, phl_rx);
964 				} else {
965 					pstatus = RTW_PHL_STATUS_SUCCESS;
966 				}
967 				/*
968 				 * phl_rx already has been took over by
969 				 * phl_rx_reorder(), so clear it here.
970 				 */
971 				phl_rx = NULL;
972 #else
973 				INIT_LIST_HEAD(&frames);
974 				pstatus = phl_rx_reorder(phl, phl_rx, &frames);
975 				/*
976 				 * phl_rx already has been took over by
977 				 * phl_rx_reorder(), so clear it here.
978 				 */
979 				phl_rx = NULL;
980 				if (pstatus != RTW_PHL_STATUS_SUCCESS) {
981 					PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
982 						  "%s: phl_rx_reorder"
983 						  " FAIL! (%d)\n",
984 						  __FUNCTION__, pstatus);
985 					break;
986 				}
987 				phl_handle_rx_frame_list(phl, &frames);
988 
989 #endif
990 				break;
991 
992 			case RTW_RX_TYPE_PPDU_STATUS:
993 				phl_rx_proc_ppdu_sts(phl, phl_rx);
994 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
995 				phl_rx_proc_phy_sts(phl, phl_rx);
996 #endif
997 				break;
998 
999 			default:
1000 				break;
1001 			}
1002 
1003 			if (phl_rx) {
1004 				_os_atomic_dec(drv_priv, &rxbuf->ref);
1005 				phl_release_phl_rx(phl, phl_rx);
1006 				phl_rx = NULL;
1007 			}
1008 		}
1009 
1010 		if (rxbuf) {
1011 			if (rxbuf->next_ptr) {
1012 				rxbuf->len -= (u32)(rxbuf->next_ptr - rxbuf->ptr);
1013 				rxbuf->ptr = rxbuf->next_ptr;
1014 				rxbuf->next_ptr = NULL;
1015 				continue;
1016 			}
1017 			if (!_os_atomic_dec_return(drv_priv, &rxbuf->ref))
1018 				enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
1019 			rxbuf = NULL;
1020 
1021 #ifdef PHL_RX_BATCH_IND
1022 			if (phl->rx_new_pending)
1023 				_phl_indic_new_rxpkt(phl);
1024 #endif
1025 		}
1026 	} while (flag);
1027 
1028 	return pstatus;
1029 }
1030 
phl_rx_stop_sdio(struct phl_info_t * phl)1031 static void phl_rx_stop_sdio(struct phl_info_t *phl)
1032 {
1033 	void *drv = phl_to_drvpriv(phl);
1034 
1035 	_os_atomic_set(drv, &phl->phl_sw_rx_sts, PHL_RX_STATUS_SW_PAUSE);
1036 }
1037 
_phl_rx_callback_sdio(void * context)1038 static void _phl_rx_callback_sdio(void *context)
1039 {
1040 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1041 	struct rtw_phl_handler *phl_handler;
1042 	struct phl_info_t *phl_info;
1043 	bool rx_pause = false;
1044 
1045 	phl_handler = (struct rtw_phl_handler *)phl_container_of(context,
1046 						     struct rtw_phl_handler,
1047 						     os_handler);
1048 	phl_info = (struct phl_info_t *)phl_handler->context;
1049 
1050 	/* check datapath sw state */
1051 	rx_pause = phl_datapath_chk_trx_pause(phl_info, PHL_CTRL_RX);
1052 	if (true == rx_pause)
1053 		goto end;
1054 
1055 	if (false == phl_check_recv_ring_resource(phl_info))
1056 		goto chk_stop;
1057 
1058 	pstatus = phl_rx_sdio(phl_info);
1059 	if (pstatus == RTW_PHL_STATUS_RESOURCE) {
1060 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
1061 			  "%s: resource starvation!\n", __FUNCTION__);
1062 	} else if (pstatus != RTW_PHL_STATUS_SUCCESS) {
1063 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1064 			  "%s: phl_rx fail!(%d)\n", __FUNCTION__, pstatus);
1065 	}
1066 
1067 chk_stop:
1068 	if (PHL_RX_STATUS_STOP_INPROGRESS ==
1069 	    _os_atomic_read(phl_to_drvpriv(phl_info), &phl_info->phl_sw_rx_sts))
1070 		phl_rx_stop_sdio(phl_info);
1071 end:
1072 	/* enable rx interrupt*/
1073 	rtw_hal_config_interrupt(phl_info->hal , RTW_PHL_RESUME_RX_INT);
1074 }
1075 
phl_rx_callback_sdio(void * context)1076 static void phl_rx_callback_sdio(void *context)
1077 {
1078 #ifdef CONFIG_PHL_SDIO_RX_CB_THREAD
1079 #ifdef RTW_RECV_THREAD_HIGH_PRIORITY
1080 #ifdef PLATFORM_LINUX
1081 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 9, 0))
1082 	sched_set_fifo_low(current);
1083 #else
1084 	struct sched_param param = { .sched_priority = 1 };
1085 
1086 	sched_setscheduler(current, SCHED_FIFO, &param);
1087 #endif
1088 #endif /* PLATFORM_LINUX */
1089 #endif /*RTW_RECV_THREAD_HIGH_PRIORITY*/
1090 	struct rtw_phl_handler *phl_handler;
1091 	struct phl_info_t *phl_info;
1092 	void *d;
1093 
1094 	phl_handler = (struct rtw_phl_handler *)phl_container_of(context,
1095 						     struct rtw_phl_handler,
1096 						     os_handler);
1097 	phl_info = (struct phl_info_t *)phl_handler->context;
1098 	d = phl_to_drvpriv(phl_info);
1099 
1100 	while (1) {
1101 		_os_sema_down(d, &(phl_handler->os_handler.os_sema));
1102 
1103 		if (_os_thread_check_stop(d, (_os_thread*)context))
1104 			break;
1105 
1106 		_phl_rx_callback_sdio(context);
1107 	}
1108 
1109 	_os_thread_wait_stop(d, (_os_thread*)context);
1110 	_os_sema_free(d, &(phl_handler->os_handler.os_sema));
1111 	return;
1112 #else
1113 	_phl_rx_callback_sdio(context);
1114 #endif
1115  }
1116 
phl_register_trx_hdlr_sdio(struct phl_info_t * phl)1117 static enum rtw_phl_status phl_register_trx_hdlr_sdio(struct phl_info_t *phl)
1118 {
1119 	struct rtw_phl_handler *tx_handler = &phl->phl_tx_handler;
1120 	struct rtw_phl_handler *rx_handler = &phl->phl_rx_handler;
1121 	void *drv = phl_to_drvpriv(phl);
1122 	enum rtw_phl_status pstatus;
1123 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
1124 	const char *tx_hdl_cb_name = "RTW_TX_CB_THREAD";
1125 #endif
1126 
1127 #ifdef CONFIG_PHL_SDIO_RX_CB_THREAD
1128 	const char *rx_hdl_cb_name = "RTW_RX_CB_THREAD";
1129 #endif
1130 
1131 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
1132 	tx_handler->type = RTW_PHL_HANDLER_PRIO_NORMAL;
1133 	_os_strncpy(tx_handler->cb_name, tx_hdl_cb_name,
1134 		(strlen(tx_hdl_cb_name) > RTW_PHL_HANDLER_CB_NAME_LEN) ?
1135 		RTW_PHL_HANDLER_CB_NAME_LEN : strlen(tx_hdl_cb_name));
1136 #else
1137 	tx_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
1138 #endif
1139 	tx_handler->callback = phl_tx_callback_sdio;
1140 	tx_handler->context = phl;
1141 	tx_handler->drv_priv = drv;
1142 	pstatus = phl_register_handler(phl->phl_com, tx_handler);
1143 	if (pstatus != RTW_PHL_STATUS_SUCCESS)
1144 		return pstatus;
1145 
1146 #ifdef CONFIG_PHL_SDIO_RX_CB_THREAD
1147 	rx_handler->type = RTW_PHL_HANDLER_PRIO_NORMAL;
1148 	_os_strncpy(rx_handler->cb_name, rx_hdl_cb_name,
1149 		(strlen(rx_hdl_cb_name) > RTW_PHL_HANDLER_CB_NAME_LEN) ?
1150 		RTW_PHL_HANDLER_CB_NAME_LEN : strlen(rx_hdl_cb_name));
1151 #else
1152 	rx_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
1153 #endif
1154 	rx_handler->callback = phl_rx_callback_sdio;
1155 	rx_handler->context = phl;
1156 	rx_handler->drv_priv = drv;
1157 	pstatus = phl_register_handler(phl->phl_com, rx_handler);
1158 	if (pstatus != RTW_PHL_STATUS_SUCCESS)
1159 		return pstatus;
1160 
1161 	return RTW_PHL_STATUS_SUCCESS;
1162 }
1163 
1164 #ifdef CONFIG_PHL_SDIO_READ_RXFF_IN_INT
phl_recv_rxfifo_sdio(struct phl_info_t * phl)1165 static enum rtw_phl_status phl_recv_rxfifo_sdio(struct phl_info_t *phl)
1166 {
1167 	struct hci_info_t *hci_info = (struct hci_info_t *)phl->hci;
1168 	struct rtw_rx_buf_ring *rx_pool = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
1169 	struct rtw_rx_buf *rxbuf = NULL;
1170 	u32 len;
1171 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
1172 
1173 	do {
1174 		rxbuf = dequeue_rxbuf(phl, &rx_pool->idle_rxbuf_list);
1175 		if (!rxbuf) {
1176 			pstatus = RTW_PHL_STATUS_RESOURCE;
1177 			break;
1178 		}
1179 
1180 		len = rtw_hal_sdio_rx(phl->hal, rxbuf);
1181 		if (!len) {
1182 			enqueue_rxbuf(phl, &rx_pool->idle_rxbuf_list, rxbuf);
1183 			break;
1184 		}
1185 
1186 		enqueue_rxbuf(phl, &rx_pool->busy_rxbuf_list, rxbuf);
1187 		rtw_phl_start_rx_process(phl);
1188 	} while (1);
1189 
1190 	return pstatus;
1191 }
1192 #endif
1193 
phl_trx_deinit_sdio(struct phl_info_t * phl_info)1194 static void phl_trx_deinit_sdio(struct phl_info_t *phl_info)
1195 {
1196 	struct hci_info_t *hci = phl_info->hci;
1197 	void *drv = phl_to_drvpriv(phl_info);
1198 	struct rtw_rx_buf_ring *rx_pool;
1199 	struct rtw_rx_buf *rxbuf;
1200 	struct rtw_tx_buf *txbuf;
1201 #ifdef SDIO_TX_THREAD
1202 	struct rtw_tx_buf_ring *tx_pool;
1203 	u32 i;
1204 #endif
1205 
1206 
1207 	FUNCIN();
1208 
1209 	/* TODO: stop RX callback */
1210 	/* TODO: stop TX callback */
1211 
1212 	/* freee RX resource */
1213 	if (hci->rxbuf_pool) {
1214 		rx_pool = (struct rtw_rx_buf_ring *)hci->rxbuf_pool;
1215 
1216 		while (rx_pool->idle_rxbuf_list.cnt) {
1217 			rxbuf = dequeue_rxbuf(phl_info, &rx_pool->idle_rxbuf_list);
1218 			if (!rxbuf)
1219 				break;
1220 			_os_kmem_free(drv, rxbuf->buffer, rxbuf->buf_len);
1221 		}
1222 
1223 		while (rx_pool->pend_rxbuf_list.cnt) {
1224 			rxbuf = dequeue_rxbuf(phl_info, &rx_pool->pend_rxbuf_list);
1225 			if (!rxbuf)
1226 				break;
1227 			_os_kmem_free(drv, rxbuf->buffer, rxbuf->buf_len);
1228 		}
1229 
1230 		while (rx_pool->busy_rxbuf_list.cnt) {
1231 			rxbuf = dequeue_rxbuf(phl_info, &rx_pool->busy_rxbuf_list);
1232 			if (!rxbuf)
1233 				break;
1234 			_os_kmem_free(drv, rxbuf->buffer, rxbuf->buf_len);
1235 		}
1236 
1237 		pq_deinit(drv, &rx_pool->idle_rxbuf_list);
1238 		pq_deinit(drv, &rx_pool->busy_rxbuf_list);
1239 		pq_deinit(drv, &rx_pool->pend_rxbuf_list);
1240 
1241 		_os_mem_free(drv, rx_pool->rxbufblock,
1242 			     rx_pool->total_blocks_size);
1243 		_os_mem_free(drv, hci->rxbuf_pool,
1244 			     sizeof(struct rtw_rx_buf_ring));
1245 		hci->rxbuf_pool = NULL;
1246 	}
1247 
1248 	/* freee TX resource */
1249 #ifdef SDIO_TX_THREAD
1250 	_os_thread_stop(drv, &hci->tx_thrd);
1251 	phl_tx_sdio_wake_thrd(phl_info);
1252 	_os_thread_deinit(drv, &hci->tx_thrd);
1253 
1254 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
1255 #ifndef RTW_WKARD_SDIO_TX_USE_YIELD
1256 	_os_spinlock_free(drv, &hci->tx_buf_lock);
1257 #endif /* !RTW_WKARD_SDIO_TX_USE_YIELD */
1258 #endif /* CONFIG_PHL_SDIO_TX_CB_THREAD */
1259 
1260 	if (hci->txbuf_pool) {
1261 		tx_pool = (struct rtw_tx_buf_ring *)hci->txbuf_pool;
1262 		hci->txbuf_pool = NULL;
1263 
1264 		txbuf = (struct rtw_tx_buf *)tx_pool->txbufblock;
1265 		for (i = 0; i < tx_pool->block_cnt_alloc; i++) {
1266 			list_del(&txbuf->list);
1267 			_os_kmem_free(drv, txbuf->buffer, txbuf->buf_len);
1268 			txbuf->buffer = NULL;
1269 			txbuf->buf_len = 0;
1270 			txbuf++;
1271 		}
1272 
1273 		pq_deinit(drv, &tx_pool->idle_list);
1274 		pq_deinit(drv, &tx_pool->busy_list);
1275 		pq_deinit(drv, &tx_pool->mgnt_idle_list);
1276 		pq_deinit(drv, &tx_pool->mgnt_busy_list);
1277 
1278 		_os_mem_free(drv, tx_pool->txbufblock,
1279 			     tx_pool->total_blocks_size);
1280 		_os_mem_free(drv, tx_pool,
1281 			     sizeof(struct rtw_tx_buf_ring));
1282 	}
1283 #else /* !SDIO_TX_THREAD */
1284 	if (hci->txbuf_pool) {
1285 		txbuf = (struct rtw_tx_buf*)hci->txbuf_pool;
1286 		hci->txbuf_pool = NULL;
1287 		if (txbuf->buffer)
1288 			_os_kmem_free(drv, txbuf->buffer, txbuf->buf_len);
1289 		_os_mem_free(drv, txbuf, sizeof(struct rtw_tx_buf));
1290 	}
1291 #endif /* !SDIO_TX_THREAD */
1292 	if (hci->tx_drop_cnt) {
1293 		PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_,
1294 			  "%s: tx_drop_cnt=%u%s\n", __FUNCTION__,
1295 			  hci->tx_drop_cnt & ~BIT31,
1296 			  hci->tx_drop_cnt & BIT31 ? "(overflow)" : "");
1297 		hci->tx_drop_cnt = 0;
1298 	}
1299 
1300 	FUNCOUT();
1301 }
1302 
phl_trx_init_sdio(struct phl_info_t * phl_info)1303 static enum rtw_phl_status phl_trx_init_sdio(struct phl_info_t *phl_info)
1304 {
1305 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1306 	struct hci_info_t *hci = phl_info->hci;
1307 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1308 	struct bus_cap_t *bus_cap = &hal_com->bus_cap;
1309 	void *drv = phl_to_drvpriv(phl_info);
1310 #ifdef SDIO_TX_THREAD
1311 	struct rtw_tx_buf_ring *tx_pool;
1312 #endif
1313 	struct rtw_tx_buf *txbuf;
1314 	struct rtw_rx_buf_ring *rx_pool;
1315 	struct rtw_rx_buf *rxbuf;
1316 	u32 i;
1317 
1318 
1319 	FUNCIN_WSTS(pstatus);
1320 	PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_DEBUG_,
1321 		  "%s: tx_buf_num(%u)\n", __FUNCTION__, bus_cap->tx_buf_num);
1322 	PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_DEBUG_,
1323 		  "%s: tx_buf_size(%u)\n", __FUNCTION__, bus_cap->tx_buf_size);
1324 	PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_DEBUG_,
1325 		  "%s: mgnt_buf_num(%u)\n", __FUNCTION__, bus_cap->tx_mgnt_buf_num);
1326 	PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_DEBUG_,
1327 		  "%s: mgnt_buf_size(%u)\n", __FUNCTION__, bus_cap->tx_mgnt_buf_size);
1328 	PHL_TRACE(COMP_PHL_RECV|COMP_PHL_DBG, _PHL_DEBUG_,
1329 		  "%s: rx_buf_num(%u)\n", __FUNCTION__, bus_cap->rx_buf_num);
1330 	PHL_TRACE(COMP_PHL_RECV|COMP_PHL_DBG, _PHL_DEBUG_,
1331 		  "%s: rx_buf_size(%u)\n", __FUNCTION__, bus_cap->rx_buf_size);
1332 
1333 	do {
1334 #ifdef SDIO_TX_THREAD
1335 		hci->txbuf_pool = _os_mem_alloc(drv,
1336 						sizeof(struct rtw_tx_buf_ring));
1337 		if (!hci->txbuf_pool)
1338 			break;
1339 		tx_pool = (struct rtw_tx_buf_ring*)hci->txbuf_pool;
1340 		tx_pool->block_cnt_alloc = bus_cap->tx_mgnt_buf_num
1341 					   + bus_cap->tx_buf_num;
1342 		tx_pool->total_blocks_size = tx_pool->block_cnt_alloc
1343 					     * sizeof(struct rtw_tx_buf);
1344 		tx_pool->txbufblock = _os_mem_alloc(drv,
1345 						tx_pool->total_blocks_size);
1346 		if (!tx_pool->txbufblock)
1347 			break;
1348 
1349 		pq_init(drv, &tx_pool->idle_list);
1350 		pq_init(drv, &tx_pool->busy_list);
1351 		pq_init(drv, &tx_pool->mgnt_idle_list);
1352 		pq_init(drv, &tx_pool->mgnt_busy_list);
1353 
1354 		txbuf = (struct rtw_tx_buf *)tx_pool->txbufblock;
1355 		for (i = 0; i < bus_cap->tx_mgnt_buf_num; i++, txbuf++) {
1356 			txbuf->tag = RTW_PHL_PKT_TYPE_MGNT;
1357 			txbuf->buf_len = bus_cap->tx_mgnt_buf_size;
1358 			txbuf->buffer = _os_kmem_alloc(drv, txbuf->buf_len);
1359 			if (!txbuf->buffer)
1360 				break;
1361 			INIT_LIST_HEAD(&txbuf->list);
1362 			enqueue_txbuf(phl_info, &tx_pool->mgnt_idle_list, txbuf, _tail);
1363 		}
1364 		if (i != bus_cap->tx_mgnt_buf_num)
1365 			break;
1366 		for (; i < tx_pool->block_cnt_alloc; i++, txbuf++) {
1367 			txbuf->tag = RTW_PHL_PKT_TYPE_DATA;
1368 			txbuf->buf_len = bus_cap->tx_buf_size;
1369 			txbuf->buffer = _os_kmem_alloc(drv, txbuf->buf_len);
1370 			if (!txbuf->buffer)
1371 				break;
1372 			INIT_LIST_HEAD(&txbuf->list);
1373 			enqueue_txbuf(phl_info, &tx_pool->idle_list, txbuf, _tail);
1374 		}
1375 		if (i != tx_pool->block_cnt_alloc)
1376 			break;
1377 
1378 #ifdef CONFIG_PHL_SDIO_TX_CB_THREAD
1379 #ifndef RTW_WKARD_SDIO_TX_USE_YIELD
1380 		_os_spinlock_init(drv, &hci->tx_buf_lock);
1381 #endif /* !RTW_WKARD_SDIO_TX_USE_YIELD */
1382 #endif /* CONFIG_PHL_SDIO_TX_CB_THREAD */
1383 #else /* !SDIO_TX_THREAD */
1384 		hci->txbuf_pool = _os_mem_alloc(drv, sizeof(struct rtw_tx_buf));
1385 		if (!hci->txbuf_pool)
1386 			break;
1387 		txbuf = (struct rtw_tx_buf*)hci->txbuf_pool;
1388 		txbuf->buf_len = bus_cap->tx_buf_size;
1389 		txbuf->buffer = _os_kmem_alloc(drv, txbuf->buf_len);
1390 #endif /* !SDIO_TX_THREAD */
1391 
1392 		hci->rxbuf_pool = _os_mem_alloc(drv,
1393 						sizeof(struct rtw_rx_buf_ring));
1394 		if (!hci->rxbuf_pool)
1395 			break;
1396 		rx_pool = (struct rtw_rx_buf_ring*)hci->rxbuf_pool;
1397 		rx_pool->block_cnt_alloc = bus_cap->rx_buf_num;
1398 		rx_pool->total_blocks_size = rx_pool->block_cnt_alloc
1399 					     * sizeof(struct rtw_rx_buf);
1400 		rx_pool->rxbufblock = _os_mem_alloc(drv,
1401 						rx_pool->total_blocks_size);
1402 		if (!rx_pool->rxbufblock)
1403 			break;
1404 
1405 		pq_init(drv, &rx_pool->idle_rxbuf_list);
1406 		pq_init(drv, &rx_pool->busy_rxbuf_list);
1407 		pq_init(drv, &rx_pool->pend_rxbuf_list);
1408 
1409 		rxbuf = (struct rtw_rx_buf *)rx_pool->rxbufblock;
1410 		for (i = 0; i < rx_pool->block_cnt_alloc; i++) {
1411 			rxbuf->buf_len = bus_cap->rx_buf_size;
1412 			rxbuf->buffer = _os_kmem_alloc(drv, rxbuf->buf_len);
1413 			if (!rxbuf->buffer)
1414 				break;
1415 
1416 			INIT_LIST_HEAD(&rxbuf->list);
1417 			enqueue_rxbuf(phl_info, &rx_pool->idle_rxbuf_list,
1418 				      rxbuf);
1419 
1420 			rxbuf++;
1421 		}
1422 		if (i != rx_pool->block_cnt_alloc)
1423 			break;
1424 
1425 		pstatus = phl_register_trx_hdlr_sdio(phl_info);
1426 
1427 #ifdef SDIO_TX_THREAD
1428 		_os_sema_init(drv, &hci->tx_thrd_sema, 0);
1429 		_os_thread_init(drv, &hci->tx_thrd, phl_tx_sdio_thrd_hdl,
1430 				phl_info, "rtw_sdio_tx");
1431 		_os_thread_schedule(drv, &hci->tx_thrd);
1432 #endif
1433 	} while (false);
1434 
1435 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
1436 		phl_trx_deinit_sdio(phl_info);
1437 
1438 	FUNCOUT_WSTS(pstatus);
1439 
1440 	return pstatus;
1441 }
1442 
_rx_buf_size_kb(struct phl_info_t * phl)1443 static u8 _rx_buf_size_kb(struct phl_info_t *phl)
1444 {
1445 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl->hal);
1446 	struct bus_cap_t *bus_cap = &hal_com->bus_cap;
1447 	u32 size;
1448 
1449 
1450 	size = bus_cap->rx_buf_size >> 10; /* change unit to KB */
1451 	return (u8)((size > 0xFF) ? 0xFF : size);
1452 }
1453 
phl_trx_cfg_sdio(struct phl_info_t * phl)1454 static enum rtw_phl_status phl_trx_cfg_sdio(struct phl_info_t *phl)
1455 {
1456 	/*
1457 	 * Default TX setting.
1458 	 * Include following setting:
1459 	 * a. Release tx size limitation of (32K-4) bytes.
1460 	 */
1461 	rtw_hal_sdio_tx_cfg(phl->hal);
1462 
1463 	/*
1464 	 * default RX agg setting form mac team,
1465 	 * timeout threshold 32us, size threshold is depended on rx buffer size.
1466 	 * RX agg setting still need to be optimized by IC and real case.
1467 	 */
1468 	rtw_hal_sdio_rx_agg_cfg(phl->hal, true, 1, 32, _rx_buf_size_kb(phl), 0);
1469 
1470 	return RTW_PHL_STATUS_SUCCESS;
1471 }
1472 
phl_trx_stop_sdio(struct phl_info_t * phl)1473 static void phl_trx_stop_sdio(struct phl_info_t *phl)
1474 {
1475 }
1476 
1477 #define _PLTFM_TX_TIMEOUT	50	/* unit: ms */
phl_pltfm_tx_sdio(struct phl_info_t * phl,void * pkt)1478 static enum rtw_phl_status phl_pltfm_tx_sdio(struct phl_info_t *phl, void *pkt)
1479 {
1480 	struct rtw_h2c_pkt *h2c_pkt = (struct rtw_h2c_pkt *)pkt;
1481 	u8 dma_ch;
1482 	u32 start;
1483 	enum rtw_hal_status res;
1484 
1485 
1486 	dma_ch = rtw_hal_get_fwcmd_queue_idx(phl->hal);
1487 
1488 	start = _os_get_cur_time_ms();
1489 	do {
1490 		res = rtw_hal_sdio_tx(phl->hal, dma_ch, h2c_pkt->vir_head,
1491 				      h2c_pkt->data_len, 1, NULL, NULL);
1492 		if (res == RTW_HAL_STATUS_RESOURCE) {
1493 			if (phl_get_passing_time_ms(start) < _PLTFM_TX_TIMEOUT)
1494 				continue;
1495 
1496 			PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_ERR_,
1497 				  "%s: pltfm_tx timeout(> %u ms)!\n",
1498 				  __FUNCTION__, _PLTFM_TX_TIMEOUT);
1499 		}
1500 		break;
1501 	} while (1);
1502 
1503 	phl_enqueue_idle_h2c_pkt(phl, h2c_pkt);
1504 	if (res == RTW_HAL_STATUS_SUCCESS)
1505 		return RTW_PHL_STATUS_SUCCESS;
1506 
1507 	PHL_TRACE(COMP_PHL_XMIT|COMP_PHL_DBG, _PHL_ERR_,
1508 		  "%s: pltfm_tx fail!(0x%x)\n", __FUNCTION__, res);
1509 	return RTW_PHL_STATUS_FAILURE;
1510 }
1511 
phl_free_h2c_pkt_buf_sdio(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt)1512 static void phl_free_h2c_pkt_buf_sdio(struct phl_info_t *phl_info,
1513 				      struct rtw_h2c_pkt *h2c_pkt)
1514 {
1515 	if (!h2c_pkt->vir_head || !h2c_pkt->buf_len)
1516 		return;
1517 
1518 	_os_kmem_free(phl_to_drvpriv(phl_info),
1519 		      h2c_pkt->vir_head, h2c_pkt->buf_len);
1520 	h2c_pkt->vir_head = NULL;
1521 	h2c_pkt->buf_len = 0;
1522 }
1523 
phl_alloc_h2c_pkt_buf_sdio(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt,u32 buf_len)1524 static enum rtw_phl_status phl_alloc_h2c_pkt_buf_sdio(
1525 				struct phl_info_t *phl_info,
1526 				struct rtw_h2c_pkt *h2c_pkt, u32 buf_len)
1527 {
1528 	void *buf = NULL;
1529 
1530 
1531 	buf = _os_kmem_alloc(phl_to_drvpriv(phl_info), buf_len);
1532 	if (!buf)
1533 		return RTW_PHL_STATUS_FAILURE;
1534 
1535 	h2c_pkt->vir_head = buf;
1536 	h2c_pkt->buf_len = buf_len;
1537 
1538 	return RTW_PHL_STATUS_SUCCESS;
1539 }
1540 
phl_trx_reset_sdio(struct phl_info_t * phl,u8 type)1541 static void phl_trx_reset_sdio(struct phl_info_t *phl, u8 type)
1542 {
1543 	struct rtw_phl_com_t *phl_com = phl->phl_com;
1544 	struct rtw_stats *phl_stats = &phl_com->phl_stats;
1545 
1546 	if (PHL_CTRL_TX & type) {
1547 		phl_reset_tx_stats(phl_stats);
1548 	}
1549 
1550 	if (PHL_CTRL_RX & type) {
1551 		phl_reset_rx_stats(phl_stats);
1552 	}
1553 }
1554 
phl_tx_resume_sdio(struct phl_info_t * phl_info)1555 static void phl_tx_resume_sdio(struct phl_info_t *phl_info)
1556 {
1557 	void *drv = phl_to_drvpriv(phl_info);
1558 
1559 	_os_atomic_set(drv, &phl_info->phl_sw_tx_sts, PHL_TX_STATUS_RUNNING);
1560 }
1561 
phl_rx_resume_sdio(struct phl_info_t * phl_info)1562 static void phl_rx_resume_sdio(struct phl_info_t *phl_info)
1563 {
1564 	void *drv = phl_to_drvpriv(phl_info);
1565 
1566 	_os_atomic_set(drv, &phl_info->phl_sw_rx_sts, PHL_RX_STATUS_RUNNING);
1567 }
1568 
phl_trx_resume_sdio(struct phl_info_t * phl,u8 type)1569 static void phl_trx_resume_sdio(struct phl_info_t *phl, u8 type)
1570 {
1571 	if (PHL_CTRL_TX & type)
1572 		phl_tx_resume_sdio(phl);
1573 
1574 	if (PHL_CTRL_RX & type)
1575 		phl_rx_resume_sdio(phl);
1576 }
1577 
phl_req_tx_stop_sdio(struct phl_info_t * phl)1578 static void phl_req_tx_stop_sdio(struct phl_info_t *phl)
1579 {
1580 	void *drv = phl_to_drvpriv(phl);
1581 
1582 	_os_atomic_set(drv, &phl->phl_sw_tx_sts,
1583 		PHL_TX_STATUS_STOP_INPROGRESS);
1584 }
1585 
phl_req_rx_stop_sdio(struct phl_info_t * phl)1586 static void phl_req_rx_stop_sdio(struct phl_info_t *phl)
1587 {
1588 	void *drv = phl_to_drvpriv(phl);
1589 
1590 	_os_atomic_set(drv, &phl->phl_sw_rx_sts,
1591 		PHL_RX_STATUS_STOP_INPROGRESS);
1592 }
1593 
phl_is_tx_pause_sdio(struct phl_info_t * phl)1594 static bool phl_is_tx_pause_sdio(struct phl_info_t *phl)
1595 {
1596 	void *drvpriv = phl_to_drvpriv(phl);
1597 
1598 	if (PHL_TX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv,
1599 		&phl->phl_sw_tx_sts))
1600 		return true;
1601 	else
1602 	return false;
1603 }
1604 
phl_is_rx_pause_sdio(struct phl_info_t * phl)1605 static bool phl_is_rx_pause_sdio(struct phl_info_t *phl)
1606 {
1607 	void *drvpriv = phl_to_drvpriv(phl);
1608 
1609 	if (PHL_RX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv,
1610 		&phl->phl_sw_rx_sts)) {
1611 		if (true == rtw_phl_is_phl_rx_idle(phl))
1612 			return true;
1613 		else
1614 			return false;
1615 	} else {
1616 		return false;
1617 	}
1618 }
1619 
phl_get_txbd_buf_sdio(struct phl_info_t * phl)1620 static void *phl_get_txbd_buf_sdio(struct phl_info_t *phl)
1621 {
1622 	return NULL;
1623 }
1624 
phl_get_rxbd_buf_sdio(struct phl_info_t * phl)1625 static void *phl_get_rxbd_buf_sdio(struct phl_info_t *phl)
1626 {
1627 	return NULL;
1628 }
1629 
phl_recycle_rx_pkt_sdio(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1630 void phl_recycle_rx_pkt_sdio(struct phl_info_t *phl_info,
1631 				struct rtw_phl_rx_pkt *phl_rx)
1632 {
1633 
1634 	if (phl_rx->r.os_priv)
1635 		_os_free_netbuf(phl_to_drvpriv(phl_info),
1636 			phl_rx->r.pkt_list[0].vir_addr,
1637 			phl_rx->r.os_netbuf_len,
1638 			phl_rx->r.os_priv);
1639 
1640 	phl_recycle_rx_buf(phl_info, phl_rx);
1641 }
1642 
1643 
phl_tx_watchdog_sdio(struct phl_info_t * phl_info)1644 void phl_tx_watchdog_sdio(struct phl_info_t *phl_info)
1645 {
1646 
1647 }
1648 
1649 static struct phl_hci_trx_ops ops_sdio = {
1650 	.hci_trx_init = phl_trx_init_sdio,
1651 	.hci_trx_deinit = phl_trx_deinit_sdio,
1652 
1653 	.prepare_tx = phl_prepare_tx_sdio,
1654 	.recycle_rx_buf = phl_recycle_rx_buf_sdio,
1655 	.tx = phl_tx_sdio,
1656 	.rx = phl_rx_sdio,
1657 	.trx_cfg = phl_trx_cfg_sdio,
1658 	.trx_stop = phl_trx_stop_sdio,
1659 	.pltfm_tx = phl_pltfm_tx_sdio,
1660 	.free_h2c_pkt_buf = phl_free_h2c_pkt_buf_sdio,
1661  	.alloc_h2c_pkt_buf = phl_alloc_h2c_pkt_buf_sdio,
1662 	.trx_reset = phl_trx_reset_sdio,
1663 	.trx_resume = phl_trx_resume_sdio,
1664 	.req_tx_stop = phl_req_tx_stop_sdio,
1665 	.req_rx_stop = phl_req_rx_stop_sdio,
1666 	.is_tx_pause = phl_is_tx_pause_sdio,
1667 	.is_rx_pause = phl_is_rx_pause_sdio,
1668 	.get_txbd_buf = phl_get_txbd_buf_sdio,
1669 	.get_rxbd_buf = phl_get_rxbd_buf_sdio,
1670 	.recycle_rx_pkt = phl_recycle_rx_pkt_sdio,
1671 	.register_trx_hdlr = phl_register_trx_hdlr_sdio,
1672 	.rx_handle_normal = phl_rx_handle_normal,
1673 	.tx_watchdog = phl_tx_watchdog_sdio,
1674 #ifdef CONFIG_PHL_SDIO_READ_RXFF_IN_INT
1675 	.recv_rxfifo = phl_recv_rxfifo_sdio,
1676 #endif
1677 };
1678 
phl_hook_trx_ops_sdio(struct phl_info_t * phl_info)1679 enum rtw_phl_status phl_hook_trx_ops_sdio(struct phl_info_t *phl_info)
1680 {
1681 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1682 
1683 	if (NULL != phl_info) {
1684 		phl_info->hci_trx_ops = &ops_sdio;
1685 		pstatus = RTW_PHL_STATUS_SUCCESS;
1686 	}
1687 
1688 	return pstatus;
1689 }
1690