xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/hci/phl_trx_pcie.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_TRX_PCIE_C_
16 #include "../phl_headers.h"
17 #include "phl_trx_pcie.h"
18 
19 #define target_in_area(target, start, end) \
20 	((target < start || target > end) ? false : true)
21 void phl_recycle_payload(struct phl_info_t *phl_info, u8 dma_ch, u16 wp_seq,
22 			 u8 txsts);
23 
phl_dump_link_list(void * phl,_os_list * list_head,u8 type)24 void phl_dump_link_list(void *phl, _os_list *list_head, u8 type)
25 {
26 	struct rtw_wd_page *wd_page = NULL, *t = NULL;
27 	struct rtw_h2c_pkt *h2c_pkt = NULL, *h2c_t = NULL;
28 	struct rtw_phl_tring_list *phl_tring_list = NULL, *phl_t = NULL;
29 	struct phl_ring_status *ring_sts = NULL, *rsts_t = NULL;
30 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
31 	void *drv_priv = phl_to_drvpriv(phl_info);
32 	u8 *vir_addr = NULL;
33 	u32 i = 0, j = 0;
34 	u16 phl_idx = 0, phl_next_idx = 0;
35 
36 	switch (type) {
37 	case TYPE_WD_PAGE:
38 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "===Dump WD Page===\n");
39 		phl_list_for_loop_safe(wd_page, t, struct rtw_wd_page,
40 						list_head, list) {
41 			vir_addr = (u8 *)wd_page->vir_addr;
42 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "vir_addr = %p, %x; phy_addr_l = %x; phy_addr_h = %x\n",
43 					vir_addr, *vir_addr,
44 					wd_page->phy_addr_l,
45 					wd_page->phy_addr_h);
46 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "cache = %d; buf_len = %d, wp_seq = %d\n",
47 					wd_page->cache, wd_page->buf_len,
48 					wd_page->wp_seq);
49 		}
50 		break;
51 	case TYPE_PHL_RING:
52 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "===Dump PHL Ring===\n");
53 		phl_list_for_loop_safe(phl_tring_list, phl_t,
54 					struct rtw_phl_tring_list,
55 					list_head, list) {
56 
57 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
58 				"-- macid = %d, band = %d, wmm = %d --\n",
59 					phl_tring_list->macid,
60 					phl_tring_list->band,
61 					phl_tring_list->wmm);
62 
63 			for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
64 				phl_idx = (u16)_os_atomic_read(drv_priv,
65 						&phl_tring_list->phl_ring[i].phl_idx);
66 				phl_next_idx = (u16)_os_atomic_read(drv_priv,
67 						&phl_tring_list->phl_ring[i].phl_next_idx);
68 
69 				PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
70 						"tid = %d\n"
71 						"dma_ch = %d\n"
72 						"tx_thres = %d\n"
73 						"core_idx = %d\n"
74 						"phl_idx = %d\n"
75 						"phl_next_idx = %d\n",
76 						phl_tring_list->phl_ring[i].tid,
77 						phl_tring_list->phl_ring[i].dma_ch,
78 						phl_tring_list->phl_ring[i].tx_thres,
79 						phl_tring_list->phl_ring[i].core_idx,
80 						phl_idx,
81 						phl_next_idx);
82 
83 				for (j = 0; j < 5; j++) {
84 					PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
85 							"entry[%d] = %p\n",
86 							j,
87 					phl_tring_list->phl_ring[i].entry[j]);
88 				}
89 			}
90 		}
91 		break;
92 	case TYPE_RING_STS:
93 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "===Dump PHL Ring status===\n");
94 		phl_list_for_loop_safe(ring_sts, rsts_t, struct phl_ring_status,
95 					list_head, list) {
96 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
97 					"macid = %d\n"
98 					"req_busy = %d\n"
99 					"ring_ptr = %p\n",
100 					ring_sts->macid,
101 					ring_sts->req_busy,
102 					ring_sts->ring_ptr);
103 		}
104 		break;
105 	case TYPE_H2C_PKT:
106 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "===Dump H2C PKT===\n");
107 		phl_list_for_loop_safe(h2c_pkt, h2c_t, struct rtw_h2c_pkt,
108 					list_head, list) {
109 			vir_addr = (u8 *)h2c_pkt->vir_head;
110 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "vir_addr = %p, %x; phy_addr_l = %x; phy_addr_h = %x\n",
111 					vir_addr, *vir_addr,
112 					h2c_pkt->phy_addr_l,
113 					h2c_pkt->phy_addr_h);
114 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "type = %d; cache = %d; buf_len = %d\n",
115 					h2c_pkt->type, h2c_pkt->cache, h2c_pkt->buf_len);
116 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "vir_head = %p; vir_data = %p; vir_tail = %p; vir_end = %p\n",
117 					(u8 *)h2c_pkt->vir_head,
118 					(u8 *)h2c_pkt->vir_data,
119 					(u8 *)h2c_pkt->vir_tail,
120 					(u8 *)h2c_pkt->vir_end);
121 		}
122 		break;
123 	default :
124 		break;
125 	}
126 }
127 
_phl_dump_wp_stats(struct phl_info_t * phl_info)128 void _phl_dump_wp_stats(struct phl_info_t *phl_info)
129 {
130 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
131 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
132 	struct rtw_wp_rpt_stats *rpt_stats = NULL;
133 	u8 ch = 0;
134 
135 	rpt_stats = (struct rtw_wp_rpt_stats *)hal_com->trx_stat.wp_rpt_stats;
136 
137 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
138 		  "\n== wp report statistics == \n");
139 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
140 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
141 			  "ch			: %u\n", (int)ch);
142 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
143 			  "busy count		: %u\n",
144 			  (int)rpt_stats[ch].busy_cnt);
145 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
146 			  "ok count		: %u\n",
147 			  (int)rpt_stats[ch].tx_ok_cnt);
148 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
149 			  "retry fail count	: %u\n",
150 			  (int)rpt_stats[ch].rty_fail_cnt);
151 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
152 			  "lifetime drop count	: %u\n",
153 			  (int)rpt_stats[ch].lifetime_drop_cnt);
154 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
155 			  "macid drop count	: %u\n",
156 			  (int)rpt_stats[ch].macid_drop_cnt);
157 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
158 			  "sw drop count		: %u\n",
159 			  (int)rpt_stats[ch].sw_drop_cnt);
160 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
161 			  "recycle fail count	: %u\n",
162 			  (int)rpt_stats[ch].recycle_fail_cnt);
163 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
164 			  "delay ok count			: %u\n",
165 			  (int)rpt_stats[ch].delay_tx_ok_cnt);
166 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
167 			  "delay retry fail count		: %u\n",
168 			  (int)rpt_stats[ch].delay_rty_fail_cnt);
169 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
170 			  "delay lifetime drop count	: %u\n",
171 			  (int)rpt_stats[ch].delay_lifetime_drop_cnt);
172 		PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
173 			  "delay macid drop count		: %u\n",
174 			  (int)rpt_stats[ch].delay_macid_drop_cnt);
175 
176 	}
177 }
178 
_phl_dump_busy_wp(struct phl_info_t * phl_info)179 void _phl_dump_busy_wp(struct phl_info_t *phl_info)
180 {
181 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
182 	struct rtw_wd_page_ring *wd_ring = NULL;
183 	struct rtw_xmit_req *treq = NULL;
184 	void *ptr = NULL;
185 	u16 wp_seq = 0;
186 	u8 ch = 0;
187 
188 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
189 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
190 		  "\n== dump busy wp == \n");
191 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
192 
193 		for (wp_seq = 0; wp_seq < WP_MAX_SEQ_NUMBER; wp_seq++) {
194 			if (NULL != wd_ring[ch].wp_tag[wp_seq].ptr) {
195 				ptr = wd_ring[ch].wp_tag[wp_seq].ptr;
196 				treq = (struct rtw_xmit_req *)ptr;
197 				PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
198 					  "dma_ch = %d, wp_seq = 0x%x, ptr = %p!\n",
199 					  ch, wp_seq, ptr);
200 				PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
201 					  "wifi seq = %d\n",
202 					  treq->mdata.sw_seq);
203 			}
204 		}
205 	}
206 
207 
208 }
209 
_phl_check_recycle(u16 target,u16 rptr,u16 wptr,u16 bndy)210 u8 _phl_check_recycle(u16 target, u16 rptr, u16 wptr, u16 bndy)
211 {
212 	u8 recycle = false;
213 	u8 init = 0;	/* starting point */
214 
215 	if (wptr > rptr) {
216 		if (true == target_in_area(target, wptr, (bndy-1)))
217 			recycle = true;
218 		else if (true == target_in_area(target, init, rptr))
219 			recycle = true;
220 		else
221 			recycle = false;
222 
223 	} else if (rptr > wptr) {
224 		if (true == target_in_area(target, wptr, rptr))
225 			recycle = true;
226 		else
227 			recycle = false;
228 	} else {
229 		recycle = true;
230 	}
231 
232 	return recycle;
233 }
234 
phl_tx_start_pcie(struct phl_info_t * phl_info)235 void phl_tx_start_pcie(struct phl_info_t *phl_info)
236 {
237 	void *drv = phl_to_drvpriv(phl_info);
238 	_os_atomic_set(drv, &phl_info->phl_sw_tx_sts, PHL_TX_STATUS_RUNNING);
239 }
240 
phl_tx_resume_pcie(struct phl_info_t * phl_info)241 void phl_tx_resume_pcie(struct phl_info_t *phl_info)
242 {
243 	void *drv = phl_to_drvpriv(phl_info);
244 	_os_atomic_set(drv, &phl_info->phl_sw_tx_sts, PHL_TX_STATUS_RUNNING);
245 }
246 
phl_req_tx_stop_pcie(struct phl_info_t * phl_info)247 void phl_req_tx_stop_pcie(struct phl_info_t *phl_info)
248 {
249 	void *drv = phl_to_drvpriv(phl_info);
250 	_os_atomic_set(drv, &phl_info->phl_sw_tx_sts,
251 				PHL_TX_STATUS_STOP_INPROGRESS);
252 }
253 
phl_tx_stop_pcie(struct phl_info_t * phl_info)254 void phl_tx_stop_pcie(struct phl_info_t *phl_info)
255 {
256 	void *drv = phl_to_drvpriv(phl_info);
257 	_os_atomic_set(drv, &phl_info->phl_sw_tx_sts, PHL_TX_STATUS_SW_PAUSE);
258 }
259 
phl_is_tx_sw_pause_pcie(struct phl_info_t * phl_info)260 bool phl_is_tx_sw_pause_pcie(struct phl_info_t *phl_info)
261 {
262 	void *drvpriv = phl_to_drvpriv(phl_info);
263 
264 	if (PHL_TX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv,
265 								&phl_info->phl_sw_tx_sts))
266 		return true;
267 	else
268 		return false;
269 
270 }
271 
phl_rx_start_pcie(struct phl_info_t * phl_info)272 void phl_rx_start_pcie(struct phl_info_t *phl_info)
273 {
274 	void *drv = phl_to_drvpriv(phl_info);
275 	_os_atomic_set(drv, &phl_info->phl_sw_rx_sts, PHL_RX_STATUS_RUNNING);
276 }
277 
phl_rx_resume_pcie(struct phl_info_t * phl_info)278 void phl_rx_resume_pcie(struct phl_info_t *phl_info)
279 {
280 	void *drv = phl_to_drvpriv(phl_info);
281 	_os_atomic_set(drv, &phl_info->phl_sw_rx_sts, PHL_RX_STATUS_RUNNING);
282 }
283 
phl_req_rx_stop_pcie(struct phl_info_t * phl_info)284 void phl_req_rx_stop_pcie(struct phl_info_t *phl_info)
285 {
286 	void *drv = phl_to_drvpriv(phl_info);
287 	_os_atomic_set(drv, &phl_info->phl_sw_rx_sts,
288 				PHL_RX_STATUS_STOP_INPROGRESS);
289 }
290 
phl_rx_stop_pcie(struct phl_info_t * phl_info)291 void phl_rx_stop_pcie(struct phl_info_t *phl_info)
292 {
293 	void *drv = phl_to_drvpriv(phl_info);
294 	_os_atomic_set(drv, &phl_info->phl_sw_rx_sts, PHL_RX_STATUS_SW_PAUSE);
295 }
296 
phl_is_rx_sw_pause_pcie(struct phl_info_t * phl_info)297 bool phl_is_rx_sw_pause_pcie(struct phl_info_t *phl_info)
298 {
299 	void *drvpriv = phl_to_drvpriv(phl_info);
300 
301 	if (PHL_RX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv,
302 								&phl_info->phl_sw_rx_sts)) {
303 		if (true == rtw_phl_is_phl_rx_idle(phl_info))
304 			return true;
305 		else
306 			return false;
307 	} else {
308 		return false;
309 	}
310 }
311 
312 #ifdef RTW_WKARD_DYNAMIC_LTR
_phl_judge_idle_ltr_switching_conditions(struct phl_info_t * phl_info,u16 macid)313 static bool _phl_judge_idle_ltr_switching_conditions(
314 	struct phl_info_t *phl_info, u16 macid)
315 {
316 	struct rtw_phl_stainfo_t *sta_info = NULL;
317 	struct rtw_stats *stats = &phl_info->phl_com->phl_stats;
318 	u16 ltr_thre = phl_info->phl_com->bus_sw_cap.ltr_sw_ctrl_thre;
319 	u8 tx_thre = 0, rx_thre = 0;
320 	u32 last_time = phl_ltr_get_last_trigger_time(phl_info->phl_com);
321 
322 	tx_thre = ltr_thre >> 8;
323 	rx_thre = (u8)(ltr_thre & 0xFF);
324 
325 	sta_info = rtw_phl_get_stainfo_by_macid(phl_info, macid);
326 
327 	if (!rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal))
328 		return false;
329 
330 	if (sta_info == NULL)
331 		return false;
332 
333 	if (sta_info->wrole == NULL)
334 		return false;
335 
336 	if (stats->tx_traffic.lvl > tx_thre)
337 		return false;
338 
339 	if (stats->rx_traffic.lvl > rx_thre)
340 		return false;
341 
342 	if (RTW_PCIE_LTR_SW_IDLE == phl_ltr_get_cur_state(phl_info->phl_com))
343 		return false;
344 
345 	if (phl_get_passing_time_us(last_time) < 500)
346 		return false;
347 
348 	return true;
349 
350 }
_phl_judge_act_ltr_switching_conditions(struct phl_info_t * phl_info,u8 ch)351 static bool _phl_judge_act_ltr_switching_conditions(
352 	struct phl_info_t *phl_info, u8 ch)
353 {
354 	u32 last_time = phl_ltr_get_last_trigger_time(phl_info->phl_com);
355 	u8 fwcmd_queue_idx = 0;
356 
357 	fwcmd_queue_idx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
358 
359 	if (!rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal))
360 		return true;
361 
362 	if (ch == fwcmd_queue_idx)
363 		return true;
364 
365 	if (RTW_PCIE_LTR_SW_ACT == phl_ltr_get_cur_state(phl_info->phl_com))
366 		return true;
367 
368 	if (phl_get_passing_time_us(last_time) < 500)
369 		return false;
370 
371 	return true;
372 }
373 
_phl_act_ltr_update_stats(struct phl_info_t * phl_info,bool success,u8 ch,u16 pending_wd_page_cnt)374 static void _phl_act_ltr_update_stats(struct phl_info_t *phl_info,
375 		bool success, u8 ch, u16 pending_wd_page_cnt)
376 {
377 	static bool bdly = false;
378 	static u32 dly_start_time = 0;
379 	struct phl_msg msg = {0};
380 	struct phl_msg_attribute attr = {0};
381 
382 	if (!rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal))
383 		return;
384 
385 	if (success) {
386 		/* only those have been delayed last time*/
387 		if (bdly) {
388 			PHL_INFO("%s() ch(%u), %u packets be transmitted after defering %uus\n"
389 				, __func__, ch,	pending_wd_page_cnt,
390 				phl_get_passing_time_us(dly_start_time));
391 			rtw_hal_ltr_update_stats(phl_info->hal, true);
392 		}
393 		bdly = false;
394 	} else {
395 
396 		/* the first packet that is going to defer */
397 		if (false == bdly) {
398 			dly_start_time = _os_get_cur_time_us();
399 			/* send messages to trigger tx */
400 			msg.band_idx = HW_BAND_0;
401 			SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_TX);
402 			SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_LTR_TX_DLY);
403 			phl_msg_hub_send(phl_info, &attr, &msg);
404 		}
405 
406 		PHL_DBG("%s() ch(%u), %u packets be delayed\n", __func__,
407 							ch,	pending_wd_page_cnt);
408 
409 		rtw_hal_ltr_update_stats(phl_info->hal, false);
410 		bdly = true;
411 	}
412 }
413 
_phl_switch_act_ltr(struct phl_info_t * phl_info,u8 tx_dma_ch)414 static void _phl_switch_act_ltr(struct phl_info_t *phl_info, u8 tx_dma_ch)
415 {
416 	u8 fwcmd_queue_idx = 0;
417 
418 	if (!rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal))
419 		return;
420 
421 	if (RTW_PCIE_LTR_SW_ACT == phl_ltr_get_cur_state(phl_info->phl_com))
422 		return;
423 
424 	fwcmd_queue_idx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
425 
426 	if (tx_dma_ch != fwcmd_queue_idx)
427 		phl_ltr_sw_trigger(phl_info->phl_com, phl_info->hal,
428 			RTW_PCIE_LTR_SW_ACT);
429 
430 }
431 
_phl_switch_idle_ltr(struct phl_info_t * phl_info,struct rtw_wp_rpt_stats * rpt_stats)432 static void _phl_switch_idle_ltr(struct phl_info_t *phl_info,
433 					struct rtw_wp_rpt_stats *rpt_stats)
434 {
435 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
436 	u8 ch = 0;
437 	bool bempty = 1;
438 	u8 fwcmd_queue_idx = 0;
439 
440 	fwcmd_queue_idx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
441 
442 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
443 		if (ch == fwcmd_queue_idx)
444 			continue;
445 		if (rpt_stats[ch].busy_cnt != 0)
446 			bempty = 0;
447 	}
448 
449 	if (bempty)
450 		phl_ltr_sw_trigger(phl_info->phl_com, phl_info->hal,
451 			RTW_PCIE_LTR_SW_IDLE);
452 
453 }
454 #endif
455 
456 #ifdef RTW_WKARD_TXBD_UPD_LMT
457 static void
_phl_free_h2c_work_ring(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)458 _phl_free_h2c_work_ring(struct phl_info_t *phl_info,
459 			struct rtw_wd_page_ring *wd_page_ring)
460 {
461 	void *drv_priv = phl_to_drvpriv(phl_info);
462 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
463 	struct rtw_h2c_work *h2c_work = &wd_page_ring->h2c_work;
464 	struct rtw_h2c_pkt *cmd = h2c_work->cmd;
465 	struct rtw_h2c_pkt *data = h2c_work->data;
466 	struct rtw_h2c_pkt *ldata = h2c_work->ldata;
467 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
468 	u16 i = 0, buf_num = 0;
469 
470 	buf_num = hal_spec->txbd_multi_tag;
471 
472 	if (NULL != cmd) {
473 		for (i = 0; i < buf_num; i++) {
474 			if (NULL == cmd->vir_head)
475 				continue;
476 			hci_trx_ops->free_h2c_pkt_buf(phl_info, cmd);
477 			cmd->vir_head = NULL;
478 			cmd->cache = false;
479 			cmd++;
480 		}
481 		_os_mem_free(drv_priv, h2c_work->cmd,
482 			     buf_num * sizeof(*h2c_work->cmd));
483 	}
484 	if (NULL != data) {
485 		for (i = 0; i < buf_num; i++) {
486 			if (NULL == data->vir_head)
487 				continue;
488 			hci_trx_ops->free_h2c_pkt_buf(phl_info, data);
489 			data->vir_head = NULL;
490 			data->cache = false;
491 			data++;
492 		}
493 		_os_mem_free(drv_priv, h2c_work->data,
494 			     buf_num * sizeof(*h2c_work->data));
495 	}
496 	if (NULL != ldata) {
497 		for (i = 0; i < buf_num; i++) {
498 			if (NULL == ldata->vir_head)
499 				continue;
500 			hci_trx_ops->free_h2c_pkt_buf(phl_info, ldata);
501 			ldata->vir_head = NULL;
502 			ldata->cache = false;
503 			ldata++;
504 		}
505 		_os_mem_free(drv_priv, h2c_work->ldata,
506 			     buf_num * sizeof(*h2c_work->ldata));
507 	}
508 
509 	if (NULL != h2c_work->cmd_ring) {
510 		_os_mem_free(drv_priv, h2c_work->cmd_ring,
511 			     buf_num * sizeof(struct rtw_h2c_pkt *));
512         }
513 	if (NULL != h2c_work->data_ring) {
514 		_os_mem_free(drv_priv, h2c_work->data_ring,
515 			     buf_num * sizeof(struct rtw_h2c_pkt *));
516         }
517 	if (NULL != h2c_work->ldata_ring) {
518 		_os_mem_free(drv_priv, h2c_work->ldata_ring,
519 			     buf_num * sizeof(struct rtw_h2c_pkt *));
520         }
521 	h2c_work->cmd_cnt = 0;
522 	h2c_work->cmd_idx = 0;
523 	h2c_work->data_cnt = 0;
524 	h2c_work->data_idx = 0;
525 	h2c_work->ldata_cnt = 0;
526 	h2c_work->ldata_idx = 0;
527 	_os_spinlock_free(drv_priv,	&h2c_work->lock);
528 }
529 
530 
531 static enum rtw_phl_status
_phl_alloc_h2c_work_ring(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)532 _phl_alloc_h2c_work_ring(struct phl_info_t *phl_info,
533 			 struct rtw_wd_page_ring *wd_page_ring)
534 {
535 	enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
536 	void *drv_priv = phl_to_drvpriv(phl_info);
537 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
538 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
539 	struct rtw_h2c_work *h2c_work = &wd_page_ring->h2c_work;
540 	struct rtw_h2c_pkt *cmd = NULL, *data =  NULL, *ldata = NULL;
541 	u16 buf_num = 0, i = 0;
542 
543 	buf_num = hal_spec->txbd_multi_tag;
544 	_os_spinlock_init(drv_priv, &h2c_work->lock);
545 
546 	h2c_work->cmd = _os_mem_alloc(drv_priv, buf_num * sizeof(*cmd));
547 	h2c_work->data = _os_mem_alloc(drv_priv, buf_num * sizeof(*data));
548 	h2c_work->ldata = _os_mem_alloc(drv_priv, buf_num * sizeof(*ldata));
549 
550 	if (!h2c_work->cmd || !h2c_work->data || !h2c_work->ldata) {
551 		psts = RTW_PHL_STATUS_RESOURCE;
552 		goto out;
553 	}
554 	cmd = h2c_work->cmd;
555 	data = h2c_work->data;
556 	ldata = h2c_work->ldata;
557 
558 	_os_mem_set(drv_priv, cmd, 0, buf_num * sizeof(*cmd));
559 	_os_mem_set(drv_priv, data, 0, buf_num * sizeof(*data));
560 	_os_mem_set(drv_priv, ldata, 0, buf_num * sizeof(*ldata));
561 
562 	h2c_work->cmd_ring =
563 		_os_mem_alloc(drv_priv,
564 			       buf_num * sizeof(struct rtw_h2c_pkt *));
565 	h2c_work->data_ring =
566 		_os_mem_alloc(drv_priv,
567 			       buf_num * sizeof(struct rtw_h2c_pkt *));
568 	h2c_work->ldata_ring =
569 		_os_mem_alloc(drv_priv,
570 			       buf_num * sizeof(struct rtw_h2c_pkt *));
571 
572 	if (!h2c_work->cmd_ring || !h2c_work->data_ring ||
573 	    !h2c_work->ldata_ring) {
574 		psts = RTW_PHL_STATUS_RESOURCE;
575 		goto out;
576 	}
577 	_os_mem_set(drv_priv, h2c_work->cmd_ring, 0,
578 		    buf_num * sizeof(struct rtw_h2c_pkt *));
579 	_os_mem_set(drv_priv, h2c_work->data_ring, 0,
580 		    buf_num * sizeof(struct rtw_h2c_pkt *));
581 	_os_mem_set(drv_priv, h2c_work->ldata_ring, 0,
582 		    buf_num * sizeof(struct rtw_h2c_pkt *));
583 
584 	for (i = 0; i < buf_num; i++) {
585 		cmd->type = H2CB_TYPE_CMD;
586 		cmd->cache = false;
587 		cmd->buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_CMD_LEN;
588 		hci_trx_ops->alloc_h2c_pkt_buf(phl_info, cmd, cmd->buf_len);
589 		if (NULL == cmd->vir_head) {
590 			psts = RTW_PHL_STATUS_RESOURCE;
591 			goto out;
592 		}
593 		cmd->vir_data = cmd->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
594 		cmd->vir_tail = cmd->vir_data;
595 		cmd->vir_end = cmd->vir_data + H2C_CMD_LEN;
596 		INIT_LIST_HEAD(&cmd->list);
597 		h2c_work->cmd_ring[i] = cmd;
598 		h2c_work->cmd_cnt++;
599 		cmd++;
600 	}
601 	for (i = 0; i < buf_num; i++) {
602 		data->type = H2CB_TYPE_DATA;
603 		data->cache = false;
604 		data->buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_DATA_LEN;
605 		hci_trx_ops->alloc_h2c_pkt_buf(phl_info, data, data->buf_len);
606 		if (NULL == data->vir_head) {
607 			psts = RTW_PHL_STATUS_RESOURCE;
608 			goto out;
609 		}
610 		data->vir_data = data->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
611 		data->vir_tail = data->vir_data;
612 		data->vir_end = data->vir_data + H2C_DATA_LEN;
613 		INIT_LIST_HEAD(&data->list);
614 		h2c_work->data_ring[i] = data;
615 		h2c_work->data_cnt++;
616 		data++;
617 	}
618 	for (i = 0; i < buf_num; i++) {
619 		ldata->type = H2CB_TYPE_LONG_DATA;
620 		ldata->cache = false;
621 		ldata->buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN +
622 				 H2C_LONG_DATA_LEN;
623 		hci_trx_ops->alloc_h2c_pkt_buf(phl_info, ldata, ldata->buf_len);
624 		if (NULL == ldata->vir_head) {
625 			psts = RTW_PHL_STATUS_RESOURCE;
626 			goto out;
627 		}
628 		ldata->vir_data = ldata->vir_head + FWCMD_HDR_LEN +
629 				 _WD_BODY_LEN;
630 		ldata->vir_tail = ldata->vir_data;
631 		ldata->vir_end = ldata->vir_data + H2C_LONG_DATA_LEN;
632 		INIT_LIST_HEAD(&ldata->list);
633 		h2c_work->ldata_ring[i] = ldata;
634 		h2c_work->ldata_cnt++;
635 		ldata++;
636 	}
637 
638 	h2c_work->cmd_idx = 0;
639 	h2c_work->data_idx = 0;
640 	h2c_work->ldata_idx = 0;
641 	psts = RTW_PHL_STATUS_SUCCESS;
642 
643 out:
644 	if (RTW_PHL_STATUS_SUCCESS != psts) {
645 		_phl_free_h2c_work_ring(phl_info, wd_page_ring);
646 		h2c_work->cmd = NULL;
647 		h2c_work->data = NULL;
648 		h2c_work->ldata = NULL;
649 		h2c_work->cmd_ring = NULL;
650 		h2c_work->data_ring = NULL;
651 		h2c_work->ldata_ring = NULL;
652 	}
653 
654 	return psts;
655 }
656 
657 
658 static void
_phl_free_wd_work_ring(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)659 _phl_free_wd_work_ring(struct phl_info_t *phl_info,
660 		       struct rtw_wd_page_ring *wd_page_ring)
661 {
662 	void *drv_priv = phl_to_drvpriv(phl_info);
663 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
664 	u16 i = 0, buf_num = 0;
665 
666 	buf_num = hal_spec->txbd_multi_tag;
667 
668 	if (NULL != wd_page_ring->wd_work) {
669 		for (i = 0; i < buf_num; i++) {
670 
671 			if (NULL == wd_page_ring->wd_work[i].vir_addr)
672 				continue;
673 
674 			wd_page_ring->wd_work[i].wp_seq = WP_RESERVED_SEQ;
675 			_os_shmem_free(drv_priv, phl_info->hci->wd_dma_pool,
676 			       	wd_page_ring->wd_work[i].vir_addr,
677 			       	(_dma *)&wd_page_ring->wd_work[i].phy_addr_l,
678 			       	(_dma *)&wd_page_ring->wd_work[i].phy_addr_h,
679 			       	wd_page_ring->wd_work[i].buf_len,
680 				wd_page_ring->wd_work[i].cache,
681 				PCI_DMA_FROMDEVICE,
682 				wd_page_ring->wd_work[i].os_rsvd[0]);
683 			wd_page_ring->wd_work[i].vir_addr = NULL;
684 			wd_page_ring->wd_work[i].cache = 0;
685 		}
686 
687 		_os_mem_free(drv_priv, wd_page_ring->wd_work,
688 			      buf_num * sizeof(*wd_page_ring->wd_work));
689 		wd_page_ring->wd_work = NULL;
690 	}
691 
692 	if (NULL != wd_page_ring->wd_work_ring) {
693 		_os_mem_free(drv_priv, wd_page_ring->wd_work_ring,
694 			      buf_num * sizeof(struct rtw_wd_page *));
695 		wd_page_ring->wd_work_ring = NULL;
696         }
697 	wd_page_ring->wd_work_cnt = 0;
698 	wd_page_ring->wd_work_idx = 0;
699 
700 }
701 
702 static enum rtw_phl_status
_phl_alloc_wd_work_ring(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)703 _phl_alloc_wd_work_ring(struct phl_info_t *phl_info,
704 			struct rtw_wd_page_ring *wd_page_ring)
705 {
706 	enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
707 	void *drv_priv = phl_to_drvpriv(phl_info);
708 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
709 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
710 	struct rtw_wd_page *wd_work = NULL;
711 	void *dma_pool = NULL;
712 	u32 buf_len = 0;
713 	u16 buf_num = 0, i = 0;
714 
715 	buf_num = hal_spec->txbd_multi_tag;
716 
717 	wd_page_ring->wd_work = _os_mem_alloc(drv_priv,
718 					       buf_num * sizeof(*wd_work));
719 	if (!wd_page_ring->wd_work) {
720 		psts = RTW_PHL_STATUS_RESOURCE;
721 		goto out;
722 	}
723 	wd_work = wd_page_ring->wd_work;
724 	_os_mem_set(drv_priv, wd_work, 0, buf_num * sizeof(*wd_work));
725 
726 	wd_page_ring->wd_work_ring =
727 		_os_mem_alloc(drv_priv,
728 			       buf_num * sizeof(struct rtw_wd_page *));
729 	if (!wd_page_ring->wd_work_ring) {
730 		psts = RTW_PHL_STATUS_RESOURCE;
731 		goto out;
732 	}
733 	_os_mem_set(drv_priv, wd_page_ring->wd_work_ring, 0,
734 		    buf_num * sizeof(struct rtw_wd_page *));
735 
736 	for (i = 0; i < buf_num; i++) {
737 #ifdef CONFIG_DMA_TX_USE_COHERENT_MEM
738 		wd_work[i].cache = POOL_ADDR;
739 		dma_pool = phl_info->hci->wd_dma_pool;
740 #else
741 		wd_work[i].cache = VIRTUAL_ADDR;
742 #endif
743 		buf_len = WD_PAGE_SIZE;
744 		wd_work[i].vir_addr = _os_shmem_alloc(drv_priv, dma_pool,
745 					(_dma *)&wd_work[i].phy_addr_l,
746 					(_dma *)&wd_work[i].phy_addr_h,
747 					buf_len,
748 					wd_work[i].cache,
749 					PCI_DMA_TODEVICE,
750 					&wd_work[i].os_rsvd[0]);
751  		if (NULL == wd_work[i].vir_addr) {
752 			psts = RTW_PHL_STATUS_RESOURCE;
753 			goto out;
754 		}
755 		wd_work[i].buf_len = buf_len;
756 		wd_work[i].wp_seq = WP_RESERVED_SEQ;
757 		INIT_LIST_HEAD(&wd_work[i].list);
758 
759 		wd_page_ring->wd_work_ring[i] = &wd_work[i];
760 		wd_page_ring->wd_work_cnt++;
761 		/* hana_todo now check 4 byte align only */
762 		/* if ((unsigned long)wd_page_buf & 0xF) { */
763 		/* 	res = _FAIL; */
764 		/* 	break; */
765 		/* } */
766 	}
767 
768 	wd_page_ring->wd_work_idx = 0;
769 	psts = RTW_PHL_STATUS_SUCCESS;
770 
771 out:
772 	if (RTW_PHL_STATUS_SUCCESS != psts) {
773 		_phl_free_wd_work_ring(phl_info, wd_page_ring);
774 		wd_page_ring->wd_work = NULL;
775 		wd_page_ring->wd_work_ring = NULL;
776 	}
777 
778 	return psts;
779 }
780 #else
781 #define _phl_free_h2c_work_ring(_phl, _ring)
782 #define _phl_alloc_h2c_work_ring(_phl, _ring) RTW_PHL_STATUS_SUCCESS
783 #define _phl_free_wd_work_ring(_phl, _ring)
784 #define _phl_alloc_wd_work_ring(_phl, _ring) RTW_PHL_STATUS_SUCCESS
785 #endif
786 
enqueue_pending_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,struct rtw_wd_page * wd_page,u8 pos)787 static enum rtw_phl_status enqueue_pending_wd_page(struct phl_info_t *phl_info,
788 				struct rtw_wd_page_ring *wd_page_ring,
789 				struct rtw_wd_page *wd_page, u8 pos)
790 {
791 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
792 	void *drv_priv = phl_to_drvpriv(phl_info);
793 	_os_list *list = &wd_page_ring->pending_wd_page_list;
794 
795 	if (wd_page != NULL) {
796 		_os_spinlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
797 
798 		if (_tail == pos)
799 			list_add_tail(&wd_page->list, list);
800 		else if (_first == pos)
801 			list_add(&wd_page->list, list);
802 
803 		wd_page_ring->pending_wd_page_cnt++;
804 
805 		_os_spinunlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
806 
807 		pstatus = RTW_PHL_STATUS_SUCCESS;
808 	}
809 
810 	return pstatus;
811 }
812 
enqueue_busy_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,struct rtw_wd_page * wd_page,u8 pos)813 static enum rtw_phl_status enqueue_busy_wd_page(struct phl_info_t *phl_info,
814 				struct rtw_wd_page_ring *wd_page_ring,
815 				struct rtw_wd_page *wd_page, u8 pos)
816 {
817 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
818 	void *drv_priv = phl_to_drvpriv(phl_info);
819 	_os_list *list = &wd_page_ring->busy_wd_page_list;
820 
821 	if (wd_page != NULL) {
822 		_os_spinlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
823 
824 		if (_tail == pos)
825 			list_add_tail(&wd_page->list, list);
826 		else if (_first == pos)
827 			list_add(&wd_page->list, list);
828 
829 		wd_page_ring->busy_wd_page_cnt++;
830 
831 		_os_spinunlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
832 
833 		pstatus = RTW_PHL_STATUS_SUCCESS;
834 	}
835 
836 	return pstatus;
837 }
838 
839 
enqueue_idle_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,struct rtw_wd_page * wd_page)840 static enum rtw_phl_status enqueue_idle_wd_page(
841 				struct phl_info_t *phl_info,
842 				struct rtw_wd_page_ring *wd_page_ring,
843 				struct rtw_wd_page *wd_page)
844 {
845 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
846 	void *drv_priv = phl_to_drvpriv(phl_info);
847 	_os_list *list = &wd_page_ring->idle_wd_page_list;
848 
849 	if (wd_page != NULL) {
850 		_os_mem_set(phl_to_drvpriv(phl_info), wd_page->vir_addr, 0,
851 					WD_PAGE_SIZE);
852 		wd_page->buf_len = WD_PAGE_SIZE;
853 		wd_page->wp_seq = WP_RESERVED_SEQ;
854 		wd_page->host_idx = 0;
855 		INIT_LIST_HEAD(&wd_page->list);
856 
857 		_os_spinlock(drv_priv, &wd_page_ring->idle_lock, _bh, NULL);
858 
859 		list_add_tail(&wd_page->list, list);
860 		wd_page_ring->idle_wd_page_cnt++;
861 
862 		_os_spinunlock(drv_priv, &wd_page_ring->idle_lock, _bh, NULL);
863 
864 		pstatus = RTW_PHL_STATUS_SUCCESS;
865 	}
866 
867 	return pstatus;
868 }
869 
870 #ifdef RTW_WKARD_TXBD_UPD_LMT
enqueue_h2c_work_ring(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c)871 static enum rtw_phl_status enqueue_h2c_work_ring(
872 				struct phl_info_t *phl_info,
873 				struct rtw_h2c_pkt *h2c)
874 {
875 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
876 	void *drv_priv = phl_to_drvpriv(phl_info);
877 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
878 	struct rtw_wd_page_ring *wd_ring = NULL;
879 	struct rtw_h2c_work *h2c_work = NULL;
880 	struct rtw_h2c_pkt *work_done_h2c = NULL;
881 	struct rtw_h2c_pkt **ring = NULL;
882 	u16 *idx = 0, *cnt = 0;
883 	u8 fwcmd_qidx = 0;
884 
885 	fwcmd_qidx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
886 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
887 	h2c_work = &wd_ring[fwcmd_qidx].h2c_work;
888 
889 	if (h2c == NULL)
890 		goto out;
891 
892 	_os_spinlock(drv_priv, &h2c_work->lock, _bh, NULL);
893 
894 	if (H2CB_TYPE_CMD == h2c->type) {
895 		ring = h2c_work->cmd_ring;
896 		idx = &h2c_work->cmd_idx;
897 		cnt = &h2c_work->cmd_cnt;
898 	} else if (H2CB_TYPE_DATA == h2c->type) {
899 		ring = h2c_work->data_ring;
900 		idx = &h2c_work->data_idx;
901 		cnt = &h2c_work->data_cnt;
902 	} else if (H2CB_TYPE_LONG_DATA == h2c->type) {
903 		ring = h2c_work->ldata_ring;
904 		idx = &h2c_work->ldata_idx;
905 		cnt = &h2c_work->ldata_cnt;
906 	} else {
907 		_os_spinunlock(drv_priv, &h2c_work->lock, _bh, NULL);
908 		goto out;
909 	}
910 
911 	work_done_h2c = ring[*idx];
912 	ring[*idx] = h2c;
913 	*idx = (*idx + 1) % *cnt;
914 
915 	_os_spinunlock(drv_priv, &h2c_work->lock, _bh, NULL);
916 
917 	pstatus = phl_enqueue_idle_h2c_pkt(phl_info, work_done_h2c);
918 
919 out:
920 	return pstatus;
921 }
922 
enqueue_wd_work_ring(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,struct rtw_wd_page * wd_page)923 static enum rtw_phl_status enqueue_wd_work_ring(
924 				struct phl_info_t *phl_info,
925 				struct rtw_wd_page_ring *wd_page_ring,
926 				struct rtw_wd_page *wd_page)
927 {
928 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
929 	void *drv_priv = phl_to_drvpriv(phl_info);
930 	struct rtw_wd_page *work_done_wd = NULL;
931 	struct rtw_wd_page **ring = wd_page_ring->wd_work_ring;
932 
933 	if (wd_page != NULL) {
934 
935 		_os_spinlock(drv_priv, &wd_page_ring->work_lock, _bh, NULL);
936 
937 		work_done_wd = ring[wd_page_ring->wd_work_idx];
938 		ring[wd_page_ring->wd_work_idx] = wd_page;
939 		wd_page_ring->wd_work_idx =
940 		    (wd_page_ring->wd_work_idx + 1) % wd_page_ring->wd_work_cnt;
941 
942 		_os_spinunlock(drv_priv, &wd_page_ring->work_lock, _bh, NULL);
943 
944 		enqueue_idle_wd_page(phl_info, wd_page_ring, work_done_wd);
945 
946 		pstatus = RTW_PHL_STATUS_SUCCESS;
947 	}
948 
949 	return pstatus;
950 }
951 #else
952 #define enqueue_h2c_work_ring(_phl, _h2c) RTW_PHL_STATUS_FAILURE
953 #define enqueue_wd_work_ring(_phl, _ring, _wd) RTW_PHL_STATUS_FAILURE
954 #endif
955 
956 
query_pending_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)957 static struct rtw_wd_page *query_pending_wd_page(struct phl_info_t *phl_info,
958 				struct rtw_wd_page_ring *wd_page_ring)
959 {
960 	void *drv_priv = phl_to_drvpriv(phl_info);
961 	_os_list *pending_list = &wd_page_ring->pending_wd_page_list;
962 	struct rtw_wd_page *wd_page = NULL;
963 
964 	_os_spinlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
965 
966 	if (true == list_empty(pending_list)) {
967 		wd_page = NULL;
968 	} else {
969 		wd_page = list_first_entry(pending_list, struct rtw_wd_page,
970 						list);
971 		wd_page_ring->pending_wd_page_cnt--;
972 		list_del(&wd_page->list);
973 	}
974 
975 	_os_spinunlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
976 
977 	return wd_page;
978 }
979 
980 
query_idle_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring)981 static struct rtw_wd_page *query_idle_wd_page(struct phl_info_t *phl_info,
982 				struct rtw_wd_page_ring *wd_page_ring)
983 {
984 	void *drv_priv = phl_to_drvpriv(phl_info);
985 	_os_list *idle_list = &wd_page_ring->idle_wd_page_list;
986 	struct rtw_wd_page *wd_page = NULL;
987 
988 	_os_spinlock(drv_priv, &wd_page_ring->idle_lock, _bh, NULL);
989 
990 	if (true == list_empty(idle_list)) {
991 		wd_page = NULL;
992 	} else {
993 		wd_page = list_first_entry(idle_list, struct rtw_wd_page, list);
994 		wd_page_ring->idle_wd_page_cnt--;
995 		list_del(&wd_page->list);
996 	}
997 
998 	_os_spinunlock(drv_priv, &wd_page_ring->idle_lock, _bh, NULL);
999 
1000 	return wd_page;
1001 }
1002 
rtw_release_target_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,struct rtw_wd_page * wd_page)1003 static enum rtw_phl_status rtw_release_target_wd_page(
1004 					struct phl_info_t *phl_info,
1005 					struct rtw_wd_page_ring *wd_page_ring,
1006 					struct rtw_wd_page *wd_page)
1007 {
1008 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1009 
1010 	if (wd_page_ring != NULL && wd_page != NULL) {
1011 		enqueue_idle_wd_page(phl_info, wd_page_ring, wd_page);
1012 		pstatus = RTW_PHL_STATUS_SUCCESS;
1013 	}
1014 
1015 	return pstatus;
1016 }
1017 
rtw_release_pending_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,u16 release_num)1018 static enum rtw_phl_status rtw_release_pending_wd_page(
1019 				struct phl_info_t *phl_info,
1020 				struct rtw_wd_page_ring *wd_page_ring,
1021 				u16 release_num)
1022 {
1023 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1024 	_os_list *list = &wd_page_ring->pending_wd_page_list;
1025 	struct rtw_wd_page *wd_page = NULL;
1026 
1027 	if (wd_page_ring != NULL) {
1028 		while (release_num > 0 && true != list_empty(list)) {
1029 
1030 			wd_page = query_pending_wd_page(phl_info, wd_page_ring);
1031 
1032 			enqueue_idle_wd_page(phl_info, wd_page_ring, wd_page);
1033 
1034 			release_num--;
1035 		}
1036 		pstatus = RTW_PHL_STATUS_SUCCESS;
1037 	}
1038 	return pstatus;
1039 }
1040 
rtw_release_busy_wd_page(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,u16 release_num)1041 static enum rtw_phl_status rtw_release_busy_wd_page(
1042 				struct phl_info_t *phl_info,
1043 				struct rtw_wd_page_ring *wd_page_ring,
1044 				u16 release_num)
1045 {
1046 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1047 	_os_list *list = &wd_page_ring->busy_wd_page_list;
1048 	struct rtw_wd_page *wd_page = NULL;
1049 	void *drv_priv = phl_to_drvpriv(phl_info);
1050 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
1051 
1052 	if (wd_page_ring != NULL) {
1053 		_os_spinlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
1054 
1055 		while (release_num > 0 && true != list_empty(list)) {
1056 
1057 			wd_page = list_first_entry(list, struct rtw_wd_page,
1058 							list);
1059 			wd_page_ring->busy_wd_page_cnt--;
1060 			list_del(&wd_page->list);
1061 
1062 			_os_spinunlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
1063 			if (true == hal_spec->txbd_upd_lmt) {
1064 				pstatus = enqueue_wd_work_ring(phl_info,
1065 							       wd_page_ring,
1066 							       wd_page);
1067 			} else {
1068 				pstatus = enqueue_idle_wd_page(phl_info,
1069 							       wd_page_ring,
1070 							       wd_page);
1071 			}
1072 			_os_spinlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
1073 
1074 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
1075 				break;
1076 			release_num--;
1077 		}
1078 		_os_spinunlock(drv_priv, &wd_page_ring->busy_lock, _bh, NULL);
1079 
1080 	}
1081 	return pstatus;
1082 }
1083 
_phl_reset_txbd(struct phl_info_t * phl_info,struct tx_base_desc * txbd)1084 static void _phl_reset_txbd(struct phl_info_t *phl_info,
1085 				struct tx_base_desc *txbd)
1086 {
1087 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1088 	_os_mem_set(phl_to_drvpriv(phl_info), txbd->vir_addr, 0, txbd->buf_len);
1089 	txbd->host_idx = 0;
1090 	txbd->avail_num = (u16)hal_com->bus_cap.txbd_num;
1091 }
_phl_reset_wp_tag(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_page_ring,u8 dma_ch)1092 static void _phl_reset_wp_tag(struct phl_info_t *phl_info,
1093 			struct rtw_wd_page_ring *wd_page_ring, u8 dma_ch)
1094 {
1095 	u16 wp_seq = 0;
1096 
1097 	for (wp_seq = 0; wp_seq < WP_MAX_SEQ_NUMBER; wp_seq++) {
1098 		if (NULL != wd_page_ring->wp_tag[wp_seq].ptr)
1099 			phl_recycle_payload(phl_info, dma_ch, wp_seq,
1100 					    TX_STATUS_TX_FAIL_SW_DROP);
1101 	}
1102 }
1103 
1104 
enqueue_pending_h2c_pkt(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pkt_pool,struct rtw_h2c_pkt * h2c_pkt,u8 pos)1105 static enum rtw_phl_status enqueue_pending_h2c_pkt(struct phl_info_t *phl_info,
1106 				struct phl_h2c_pkt_pool *h2c_pkt_pool,
1107 				struct rtw_h2c_pkt *h2c_pkt, u8 pos)
1108 {
1109 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1110 #if 0
1111 	void *drv_priv = phl_to_drvpriv(phl_info);
1112 	_os_list *list = &wd_page_ring->pending_wd_page_list;
1113 
1114 	if (wd_page != NULL) {
1115 		_os_spinlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
1116 
1117 		if (_tail == pos)
1118 			list_add_tail(&wd_page->list, list);
1119 		else if (_first == pos)
1120 			list_add(&wd_page->list, list);
1121 
1122 		wd_page_ring->pending_wd_page_cnt++;
1123 
1124 		_os_spinunlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
1125 
1126 		pstatus = RTW_PHL_STATUS_SUCCESS;
1127 	}
1128 #endif
1129 	return pstatus;
1130 }
1131 
query_pending_h2c_pkt(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pkt_pool)1132 static struct rtw_h2c_pkt *query_pending_h2c_pkt(struct phl_info_t *phl_info,
1133 				struct phl_h2c_pkt_pool *h2c_pkt_pool)
1134 {
1135 	//void *drv_priv = phl_to_drvpriv(phl_info);
1136 	//_os_list *pending_list = &wd_page_ring->pending_wd_page_list;
1137 	struct rtw_h2c_pkt *h2c_pkt = NULL;
1138 #if 0
1139 	_os_spinlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
1140 
1141 	if (true == list_empty(pending_list)) {
1142 		wd_page = NULL;
1143 	} else {
1144 		wd_page = list_first_entry(pending_list, struct rtw_wd_page,
1145 						list);
1146 		wd_page_ring->pending_wd_page_cnt--;
1147 		list_del(&wd_page->list);
1148 	}
1149 
1150 	_os_spinunlock(drv_priv, &wd_page_ring->pending_lock, _bh, NULL);
1151 #endif
1152 	return h2c_pkt;
1153 }
1154 
phl_release_busy_h2c_pkt(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pkt_pool,u16 release_num)1155 static enum rtw_phl_status phl_release_busy_h2c_pkt(
1156 				struct phl_info_t *phl_info,
1157 				struct phl_h2c_pkt_pool *h2c_pkt_pool,
1158 				u16 release_num)
1159 {
1160 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1161 	_os_list *list = &h2c_pkt_pool->busy_h2c_pkt_list.queue;
1162 	struct rtw_h2c_pkt *h2c_pkt = NULL;
1163 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
1164 
1165 	if (h2c_pkt_pool != NULL) {
1166 
1167 		while (release_num > 0 && true != list_empty(list)) {
1168 			h2c_pkt = phl_query_busy_h2c_pkt(phl_info);
1169 
1170 			if (!h2c_pkt)
1171 				break;
1172 
1173 			if (true == hal_spec->txbd_upd_lmt) {
1174 				pstatus = enqueue_h2c_work_ring(phl_info,
1175 								h2c_pkt);
1176 			} else {
1177 				pstatus = phl_enqueue_idle_h2c_pkt(phl_info,
1178 								   h2c_pkt);
1179 			}
1180 
1181 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
1182 				break;
1183 			release_num--;
1184 		}
1185 	}
1186 	return pstatus;
1187 }
1188 
phl_tx_reset_pcie(struct phl_info_t * phl_info)1189 static void phl_tx_reset_pcie(struct phl_info_t *phl_info)
1190 {
1191 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
1192 	struct rtw_wd_page_ring *wd_ring = NULL;
1193 	struct tx_base_desc *txbd = NULL;
1194 	struct phl_h2c_pkt_pool *h2c_pool = NULL;
1195 	u8 ch = 0;
1196 
1197 	txbd = (struct tx_base_desc *)hci_info->txbd_buf;
1198 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
1199 	h2c_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
1200 
1201 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
1202 		_phl_reset_txbd(phl_info, &txbd[ch]);
1203 		rtw_release_busy_wd_page(phl_info, &wd_ring[ch],
1204 					 wd_ring[ch].busy_wd_page_cnt);
1205 		rtw_release_pending_wd_page(phl_info, &wd_ring[ch],
1206 					 wd_ring[ch].pending_wd_page_cnt);
1207 		wd_ring[ch].cur_hw_res = 0;
1208 		_phl_reset_wp_tag(phl_info, &wd_ring[ch], ch);
1209 	}
1210 
1211 	phl_release_busy_h2c_pkt(phl_info, h2c_pool,
1212 				 (u16)h2c_pool->busy_h2c_pkt_list.cnt);
1213 
1214 	phl_dump_h2c_pool_stats(phl_info->h2c_pool);
1215 }
1216 
1217 
1218 
1219 #ifdef CONFIG_DYNAMIC_RX_BUF
_phl_alloc_dynamic_rxbuf_pcie(struct rtw_rx_buf * rx_buf,struct phl_info_t * phl_info)1220 void _phl_alloc_dynamic_rxbuf_pcie(struct rtw_rx_buf *rx_buf,
1221 					struct phl_info_t *phl_info)
1222 {
1223 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1224 	void *drv_priv = phl_to_drvpriv(phl_info);
1225 	u32 buf_len = RX_BUF_SIZE;
1226 
1227 	if (rx_buf->reuse) {
1228 		rx_buf->reuse = false;
1229 		_os_pkt_buf_map_rx(drv_priv,
1230 				(_dma *)&rx_buf->phy_addr_l,
1231 				(_dma *)&rx_buf->phy_addr_h,
1232 				RX_BUF_SIZE + PHL_RX_HEADROOM,
1233 				rx_buf->os_priv);
1234 		return;
1235 	}
1236 
1237 	if (rx_buf != NULL) {
1238 #ifdef CONFIG_DMA_RX_USE_COHERENT_MEM
1239 		rx_buf->cache = false;
1240 #else
1241 		rx_buf->cache = VIRTUAL_ADDR;
1242 #endif
1243 		rx_buf->vir_addr = _os_pkt_buf_alloc_rx(
1244 				drv_priv,
1245 				(_dma *)&rx_buf->phy_addr_l,
1246 				(_dma *)&rx_buf->phy_addr_h,
1247 				buf_len,
1248 				rx_buf->cache,
1249 				&rx_buf->os_priv);
1250 		if (NULL == rx_buf->vir_addr) {
1251 			sts = RTW_PHL_STATUS_RESOURCE;
1252 		} else {
1253 			rx_buf->buf_len = buf_len;
1254 			rx_buf->dynamic = 1;
1255 			rx_buf->reuse = false;
1256 			/* enqueue_idle_rx_buf(phl_info, rx_buf_ring, rx_buf); */
1257 			sts = RTW_PHL_STATUS_SUCCESS;
1258 		}
1259 	}
1260 }
1261 #endif
1262 
1263 
enqueue_busy_rx_buf(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring,struct rtw_rx_buf * rx_buf,u8 pos)1264 static enum rtw_phl_status enqueue_busy_rx_buf(
1265 				struct phl_info_t *phl_info,
1266 				struct rtw_rx_buf_ring *rx_buf_ring,
1267 				struct rtw_rx_buf *rx_buf, u8 pos)
1268 {
1269 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1270 	_os_list *list = &rx_buf_ring->busy_rxbuf_list;
1271 
1272 	if (rx_buf != NULL) {
1273 		_os_spinlock(phl_to_drvpriv(phl_info),
1274 				&rx_buf_ring->busy_rxbuf_lock, _bh, NULL);
1275 		if (_tail == pos)
1276 			list_add_tail(&rx_buf->list, list);
1277 		else if (_first == pos)
1278 			list_add(&rx_buf->list, list);
1279 
1280 		rx_buf_ring->busy_rxbuf_cnt++;
1281 		_os_spinunlock(phl_to_drvpriv(phl_info),
1282 				&rx_buf_ring->busy_rxbuf_lock, _bh, NULL);
1283 		pstatus = RTW_PHL_STATUS_SUCCESS;
1284 	}
1285 
1286 	return pstatus;
1287 }
1288 
1289 
enqueue_idle_rx_buf(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring,struct rtw_rx_buf * rx_buf)1290 static enum rtw_phl_status enqueue_idle_rx_buf(
1291 				struct phl_info_t *phl_info,
1292 				struct rtw_rx_buf_ring *rx_buf_ring,
1293 				struct rtw_rx_buf *rx_buf)
1294 {
1295 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1296 	_os_list *list = &rx_buf_ring->idle_rxbuf_list;
1297 
1298 	if (rx_buf != NULL) {
1299 		_os_mem_set(phl_to_drvpriv(phl_info), rx_buf->vir_addr, 0,
1300 					RX_BUF_SIZE);
1301 		rx_buf->buf_len = RX_BUF_SIZE;
1302 		INIT_LIST_HEAD(&rx_buf->list);
1303 
1304 		_os_spinlock(phl_to_drvpriv(phl_info),
1305 					&rx_buf_ring->idle_rxbuf_lock, _bh, NULL);
1306 		list_add_tail(&rx_buf->list, list);
1307 		rx_buf_ring->idle_rxbuf_cnt++;
1308 		_os_spinunlock(phl_to_drvpriv(phl_info),
1309 					&rx_buf_ring->idle_rxbuf_lock, _bh, NULL);
1310 
1311 		pstatus = RTW_PHL_STATUS_SUCCESS;
1312 	}
1313 
1314 	return pstatus;
1315 }
1316 
query_busy_rx_buf(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring)1317 static struct rtw_rx_buf *query_busy_rx_buf(struct phl_info_t *phl_info,
1318 					struct rtw_rx_buf_ring *rx_buf_ring)
1319 {
1320 	_os_list *busy_list = &rx_buf_ring->busy_rxbuf_list;
1321 	struct rtw_rx_buf *rx_buf = NULL;
1322 
1323 	_os_spinlock(phl_to_drvpriv(phl_info),
1324 			&rx_buf_ring->busy_rxbuf_lock, _bh, NULL);
1325 	if (true == list_empty(busy_list)) {
1326 		rx_buf = NULL;
1327 	} else {
1328 		rx_buf = list_first_entry(busy_list, struct rtw_rx_buf, list);
1329 		rx_buf_ring->busy_rxbuf_cnt--;
1330 		list_del(&rx_buf->list);
1331 	}
1332 	_os_spinunlock(phl_to_drvpriv(phl_info),
1333 			&rx_buf_ring->busy_rxbuf_lock, _bh, NULL);
1334 	return rx_buf;
1335 }
1336 
query_idle_rx_buf(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring)1337 static struct rtw_rx_buf *query_idle_rx_buf(struct phl_info_t *phl_info,
1338 					struct rtw_rx_buf_ring *rx_buf_ring)
1339 {
1340 	_os_list *idle_list = &rx_buf_ring->idle_rxbuf_list;
1341 	struct rtw_rx_buf *rx_buf = NULL;
1342 
1343 	_os_spinlock(phl_to_drvpriv(phl_info),
1344 			&rx_buf_ring->idle_rxbuf_lock, _bh, NULL);
1345 	if (true == list_empty(idle_list)) {
1346 		rx_buf = NULL;
1347 	} else {
1348 		rx_buf = list_first_entry(idle_list, struct rtw_rx_buf, list);
1349 		rx_buf_ring->idle_rxbuf_cnt--;
1350 		list_del(&rx_buf->list);
1351 	}
1352 	_os_spinunlock(phl_to_drvpriv(phl_info),
1353 			&rx_buf_ring->idle_rxbuf_lock, _bh, NULL);
1354 
1355 	return rx_buf;
1356 }
1357 
1358 enum rtw_phl_status
phl_release_target_rx_buf(struct phl_info_t * phl_info,void * r,u8 ch,enum rtw_rx_type type)1359 phl_release_target_rx_buf(struct phl_info_t *phl_info, void *r, u8 ch,
1360 				enum rtw_rx_type type)
1361 {
1362 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
1363 	struct rtw_rx_buf_ring *rx_buf_ring = NULL;
1364 	struct rtw_rx_buf *rx_buf = (struct rtw_rx_buf *)r;
1365 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1366 
1367 	rx_buf_ring = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
1368 
1369 #ifdef CONFIG_DYNAMIC_RX_BUF
1370 	if(rx_buf && (type == RTW_RX_TYPE_WIFI))
1371 		_phl_alloc_dynamic_rxbuf_pcie(rx_buf, phl_info);
1372 #endif
1373 	if (&rx_buf_ring[ch] != NULL && rx_buf != NULL) {
1374 		enqueue_idle_rx_buf(phl_info, &rx_buf_ring[ch], rx_buf);
1375 		pstatus = RTW_PHL_STATUS_SUCCESS;
1376 	}
1377 
1378 	return pstatus;
1379 }
1380 
phl_release_busy_rx_buf(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring,u16 release_num)1381 static enum rtw_phl_status phl_release_busy_rx_buf(
1382 				struct phl_info_t *phl_info,
1383 				struct rtw_rx_buf_ring *rx_buf_ring,
1384 				u16 release_num)
1385 {
1386 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1387 	struct rtw_rx_buf *rx_buf = NULL;
1388 
1389 	if (rx_buf_ring != NULL) {
1390 
1391 		while (release_num > 0) {
1392 			rx_buf = query_busy_rx_buf(phl_info, rx_buf_ring);
1393 			if (NULL == rx_buf)
1394 				break;
1395 			enqueue_idle_rx_buf(phl_info, rx_buf_ring, rx_buf);
1396 			release_num--;
1397 		}
1398 		pstatus = RTW_PHL_STATUS_SUCCESS;
1399 	}
1400 	return pstatus;
1401 }
1402 
1403 
1404 
1405 /* static void rtl8852ae_free_wd_page_buf(_adapter *adapter, void *vir_addr, */
1406 /* 				dma_addr_t *bus_addr, size_t size) */
1407 /* { */
1408 /* 	struct platform_ops *ops = &adapter->platform_func; */
1409 /* 	struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); */
1410 /* 	struct pci_dev *pdev = dvobj->ppcidev; */
1411 
1412 /* 	FUNCIN(); */
1413 /* 	ops->free_cache_mem(pdev,vir_addr, bus_addr, size, PCI_DMA_TODEVICE); */
1414 
1415 /* 	/\* NONCACHE hana_todo */
1416 /* 	 * ops->alloc_noncache_mem(pdev, vir_addr, bus_addr, size); */
1417 /* 	 *\/ */
1418 /* 	FUNCOUT(); */
1419 /* } */
1420 
_phl_free_rxbuf_pcie(struct phl_info_t * phl_info,struct rtw_rx_buf * rx_buf)1421 static void _phl_free_rxbuf_pcie(struct phl_info_t *phl_info,
1422 				 struct rtw_rx_buf *rx_buf)
1423 {
1424 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1425 	u16 rxbuf_num = (u16)hal_com->bus_cap.rxbuf_num;
1426 	u16 i = 0;
1427 
1428 	if (NULL != rx_buf) {
1429 		for (i = 0; i < rxbuf_num; i++) {
1430 
1431 			if (NULL == rx_buf[i].vir_addr)
1432 				continue;
1433 			_os_pkt_buf_free_rx(phl_to_drvpriv(phl_info),
1434 								rx_buf[i].vir_addr,
1435 								(_dma)rx_buf[i].phy_addr_l,
1436 								(_dma)rx_buf[i].phy_addr_h,
1437 								rx_buf[i].buf_len,
1438 								rx_buf[i].cache,
1439 								rx_buf[i].os_priv);
1440 			rx_buf[i].vir_addr = NULL;
1441 			rx_buf[i].cache = 0;
1442 		}
1443 
1444 		_os_mem_free(phl_to_drvpriv(phl_info), rx_buf,
1445 					sizeof(struct rtw_rx_buf) * rxbuf_num);
1446 	}
1447 }
1448 
_phl_free_rxbuf_pool_pcie(struct phl_info_t * phl_info,u8 * rxbuf_pool,u8 ch_num)1449 static void _phl_free_rxbuf_pool_pcie(struct phl_info_t *phl_info,
1450 						u8 *rxbuf_pool, u8 ch_num)
1451 {
1452 	struct rtw_rx_buf_ring *ring = NULL;
1453 	u8 i = 0;
1454 
1455 	FUNCIN();
1456 		ring = (struct rtw_rx_buf_ring *)rxbuf_pool;
1457 	if (NULL != ring) {
1458 		for (i = 0; i < ch_num; i++) {
1459 
1460 			ring[i].idle_rxbuf_cnt = 0;
1461 
1462 			if (NULL == ring[i].rx_buf)
1463 				continue;
1464 
1465 			_phl_free_rxbuf_pcie(phl_info, ring[i].rx_buf);
1466 			ring[i].rx_buf = NULL;
1467 			_os_spinlock_free(phl_to_drvpriv(phl_info),
1468 					&ring->idle_rxbuf_lock);
1469 			_os_spinlock_free(phl_to_drvpriv(phl_info),
1470 					&ring->busy_rxbuf_lock);
1471 		}
1472 		_os_mem_free(phl_to_drvpriv(phl_info), ring,
1473 					sizeof(struct rtw_rx_buf_ring) * ch_num);
1474 	}
1475 
1476 	FUNCOUT();
1477 }
1478 
_phl_destory_dma_pool_pcie(struct phl_info_t * phl_info,struct dma_pool * pool)1479 static void _phl_destory_dma_pool_pcie(struct phl_info_t *phl_info, struct dma_pool *pool)
1480 {
1481 #ifdef CONFIG_DMA_TX_USE_COHERENT_MEM
1482 	_os_dma_pool_destory(phl_to_drvpriv(phl_info), pool);
1483 #endif
1484 }
1485 
1486 /* static void *rtl8852ae_alloc_wd_page_buf(_adapter *adapter, */
1487 /* 					 dma_addr_t *bus_addr, size_t size) */
1488 /* { */
1489 /* 	struct platform_ops *ops = &adapter->platform_func; */
1490 /* 	struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); */
1491 /* 	struct pci_dev *pdev = dvobj->ppcidev; */
1492 /* 	void *vir_addr = NULL; */
1493 
1494 /* 	FUNCIN(); */
1495 /* 	vir_addr = ops->alloc_cache_mem(pdev, bus_addr, size, PCI_DMA_TODEVICE); */
1496 
1497 /* 	/\* NONCACHE hana_todo */
1498 /* 	 * vir_addr = ops->alloc_noncache_mem(pdev, bus_addr, size); */
1499 /* 	 *\/ */
1500 
1501 /* 	FUNCOUT(); */
1502 /* 	return vir_addr; */
1503 /* } */
1504 
1505 static 	struct rtw_rx_buf *
_phl_alloc_rxbuf_pcie(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring)1506 _phl_alloc_rxbuf_pcie(struct phl_info_t *phl_info,
1507 				struct rtw_rx_buf_ring *rx_buf_ring)
1508 {
1509 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1510 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1511 	struct rtw_rx_buf *rx_buf = NULL;
1512 	u32 buf_len = 0;
1513 	u16 rxbuf_num = (u16)hal_com->bus_cap.rxbuf_num;
1514 	void *drv_priv = phl_to_drvpriv(phl_info);
1515 	int i;
1516 
1517 	buf_len = sizeof(*rx_buf) * rxbuf_num;
1518 	rx_buf = _os_mem_alloc(drv_priv, buf_len);
1519 	if (rx_buf != NULL) {
1520 		for (i = 0; i < rxbuf_num; i++) {
1521 #ifdef CONFIG_DMA_RX_USE_COHERENT_MEM
1522 			rx_buf[i].cache = false;
1523 #else
1524 			rx_buf[i].cache = VIRTUAL_ADDR;
1525 #endif
1526 			buf_len = RX_BUF_SIZE;
1527 			rx_buf[i].vir_addr = _os_pkt_buf_alloc_rx(
1528 					phl_to_drvpriv(phl_info),
1529 					(_dma *)&rx_buf[i].phy_addr_l,
1530 					(_dma *)&rx_buf[i].phy_addr_h,
1531 					buf_len,
1532 					rx_buf[i].cache,
1533 					&rx_buf[i].os_priv);
1534 			if (NULL == rx_buf[i].vir_addr) {
1535 				pstatus = RTW_PHL_STATUS_RESOURCE;
1536 				break;
1537 			}
1538 			rx_buf[i].buf_len = buf_len;
1539 			rx_buf[i].dynamic = 0;
1540 #ifdef CONFIG_DYNAMIC_RX_BUF
1541 			rx_buf[i].reuse = false;
1542 #endif
1543 
1544 			INIT_LIST_HEAD(&rx_buf[i].list);
1545 			enqueue_idle_rx_buf(phl_info, rx_buf_ring, &rx_buf[i]);
1546 			pstatus = RTW_PHL_STATUS_SUCCESS;
1547 				/* hana_todo now check 4 byte align only */
1548 			/* if ((unsigned long)wd_page_buf & 0xF) { */
1549 			/* 	res = _FAIL; */
1550 			/* 	break; */
1551 			/* } */
1552 		}
1553 	}
1554 
1555 	if (RTW_PHL_STATUS_SUCCESS != pstatus) {
1556 		_phl_free_rxbuf_pcie(phl_info, rx_buf);
1557 		rx_buf = NULL;
1558 	}
1559 	return rx_buf;
1560 }
1561 
1562 
1563 
1564 static enum rtw_phl_status
_phl_alloc_rxbuf_pool_pcie(struct phl_info_t * phl_info,u8 ch_num)1565 _phl_alloc_rxbuf_pool_pcie(struct phl_info_t *phl_info, u8 ch_num)
1566 {
1567 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1568 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1569 	struct rtw_rx_buf_ring *rx_buf_ring = NULL;
1570 	struct rtw_rx_buf *rx_buf = NULL;
1571 	u32 buf_len = 0;
1572 	u16 rxbuf_num = (u16)hal_com->bus_cap.rxbuf_num;
1573 	int i;
1574 	FUNCIN_WSTS(pstatus);
1575 
1576 	buf_len = sizeof(*rx_buf_ring) * ch_num;
1577 	rx_buf_ring = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1578 
1579 	if (NULL != rx_buf_ring) {
1580 		for (i = 0; i < ch_num; i++) {
1581 			_os_spinlock_init(phl_to_drvpriv(phl_info),
1582 					&rx_buf_ring[i].idle_rxbuf_lock);
1583 			_os_spinlock_init(phl_to_drvpriv(phl_info),
1584 					&rx_buf_ring[i].busy_rxbuf_lock);
1585 			INIT_LIST_HEAD(&rx_buf_ring[i].idle_rxbuf_list);
1586 			INIT_LIST_HEAD(&rx_buf_ring[i].busy_rxbuf_list);
1587 			rx_buf = _phl_alloc_rxbuf_pcie(phl_info,
1588 							&rx_buf_ring[i]);
1589 			if (NULL == rx_buf) {
1590 				pstatus = RTW_PHL_STATUS_RESOURCE;
1591 				break;
1592 			}
1593 			rx_buf_ring[i].rx_buf = rx_buf;
1594 			rx_buf_ring[i].idle_rxbuf_cnt = rxbuf_num;
1595 			rx_buf_ring[i].busy_rxbuf_cnt = 0;
1596 			pstatus = RTW_PHL_STATUS_SUCCESS;
1597 		}
1598 	}
1599 
1600 	if (RTW_PHL_STATUS_SUCCESS == pstatus) {
1601 		phl_info->hci->rxbuf_pool = (u8 *)rx_buf_ring;
1602 	} else
1603 		_phl_free_rxbuf_pool_pcie(phl_info, (u8 *)rx_buf_ring, ch_num);
1604 	FUNCOUT_WSTS(pstatus);
1605 
1606 	return pstatus;
1607 }
1608 
1609 
1610 
1611 
1612 /* static void rtl8852ae_free_wd_page_buf(_adapter *adapter, void *vir_addr, */
1613 /* 				dma_addr_t *bus_addr, size_t size) */
1614 /* { */
1615 /* 	struct platform_ops *ops = &adapter->platform_func; */
1616 /* 	struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); */
1617 /* 	struct pci_dev *pdev = dvobj->ppcidev; */
1618 
1619 /* 	FUNCIN(); */
1620 /* 	ops->free_cache_mem(pdev,vir_addr, bus_addr, size, PCI_DMA_TODEVICE); */
1621 
1622 /* 	/\* NONCACHE hana_todo */
1623 /* 	 * ops->alloc_noncache_mem(pdev, vir_addr, bus_addr, size); */
1624 /* 	 *\/ */
1625 /* 	FUNCOUT(); */
1626 /* } */
1627 
_phl_free_wd_page_pcie(struct phl_info_t * phl_info,struct rtw_wd_page * wd_page)1628 static void _phl_free_wd_page_pcie(struct phl_info_t *phl_info,
1629 					struct rtw_wd_page *wd_page)
1630 {
1631 	u16 i = 0;
1632 
1633 	if (NULL != wd_page) {
1634 		for (i = 0; i < MAX_WD_PAGE_NUM; i++) {
1635 
1636 			if (NULL == wd_page[i].vir_addr)
1637 				continue;
1638 
1639 			wd_page[i].wp_seq = WP_RESERVED_SEQ;
1640 			_os_shmem_free(phl_to_drvpriv(phl_info),
1641 						phl_info->hci->wd_dma_pool,
1642 						wd_page[i].vir_addr,
1643 						(_dma *)&wd_page[i].phy_addr_l,
1644 						(_dma *)&wd_page[i].phy_addr_h,
1645 						wd_page[i].buf_len,
1646 						wd_page[i].cache,
1647 						PCI_DMA_FROMDEVICE,
1648 						wd_page[i].os_rsvd[0]);
1649 			wd_page[i].vir_addr = NULL;
1650 			wd_page[i].cache = 0;
1651 		}
1652 
1653 		_os_mem_free(phl_to_drvpriv(phl_info), wd_page,
1654 					sizeof(struct rtw_wd_page) * MAX_WD_PAGE_NUM);
1655 	}
1656 }
1657 
_phl_free_wd_ring_pcie(struct phl_info_t * phl_info,u8 * wd_page_buf,u8 ch_num)1658 static void _phl_free_wd_ring_pcie(struct phl_info_t *phl_info, u8 *wd_page_buf,
1659 					u8 ch_num)
1660 {
1661 	struct rtw_wd_page_ring *wd_page_ring = NULL;
1662 	void *drv_priv = phl_to_drvpriv(phl_info);
1663 	u8 i = 0;
1664 	FUNCIN();
1665 
1666 	wd_page_ring = (struct rtw_wd_page_ring *)wd_page_buf;
1667 	if (NULL != wd_page_ring) {
1668 		for (i = 0; i < ch_num; i++) {
1669 
1670 			wd_page_ring[i].idle_wd_page_cnt = 0;
1671 
1672 			if (NULL == wd_page_ring[i].wd_page)
1673 				continue;
1674 
1675 			if (i == rtw_hal_get_fwcmd_queue_idx(phl_info->hal)) {
1676 				_phl_free_h2c_work_ring(phl_info,
1677 							&wd_page_ring[i]);
1678 			}
1679 			_phl_free_wd_work_ring(phl_info, &wd_page_ring[i]);
1680 			_phl_free_wd_page_pcie(phl_info,
1681 						wd_page_ring[i].wd_page);
1682 			wd_page_ring[i].wd_page = NULL;
1683 			_os_spinlock_free(drv_priv,
1684 						&wd_page_ring[i].idle_lock);
1685 			_os_spinlock_free(drv_priv,
1686 						&wd_page_ring[i].busy_lock);
1687 			_os_spinlock_free(drv_priv,
1688 						&wd_page_ring[i].pending_lock);
1689 			_os_spinlock_free(drv_priv,
1690 						&wd_page_ring[i].work_lock);
1691 			_os_spinlock_free(drv_priv,
1692 						&wd_page_ring[i].wp_tag_lock);
1693 		}
1694 		_os_mem_free(phl_to_drvpriv(phl_info), wd_page_ring,
1695 						sizeof(struct rtw_wd_page_ring) * ch_num);
1696 	}
1697 	FUNCOUT();
1698 }
1699 
1700 /* static void *rtl8852ae_alloc_wd_page_buf(_adapter *adapter, */
1701 /* 					 dma_addr_t *bus_addr, size_t size) */
1702 /* { */
1703 /* 	struct platform_ops *ops = &adapter->platform_func; */
1704 /* 	struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); */
1705 /* 	struct pci_dev *pdev = dvobj->ppcidev; */
1706 /* 	void *vir_addr = NULL; */
1707 
1708 /* 	FUNCIN(); */
1709 /* 	vir_addr = ops->alloc_cache_mem(pdev, bus_addr, size, PCI_DMA_TODEVICE); */
1710 
1711 /* 	/\* NONCACHE hana_todo */
1712 /* 	 * vir_addr = ops->alloc_noncache_mem(pdev, bus_addr, size); */
1713 /* 	 *\/ */
1714 
1715 /* 	FUNCOUT(); */
1716 /* 	return vir_addr; */
1717 /* } */
1718 
_phl_alloc_wd_page_pcie(struct phl_info_t * phl_info,_os_list * list)1719 static struct rtw_wd_page *_phl_alloc_wd_page_pcie(
1720 			struct phl_info_t *phl_info, _os_list *list)
1721 {
1722 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1723 	struct rtw_wd_page *wd_page = NULL;
1724 	void *dma_pool = NULL;
1725 	u32 buf_len = 0;
1726 	int i;
1727 
1728 	buf_len = sizeof(*wd_page) * MAX_WD_PAGE_NUM;
1729 	wd_page = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1730 	if (wd_page != NULL) {
1731 		for (i = 0; i < MAX_WD_PAGE_NUM; i++) {
1732 #ifdef CONFIG_DMA_TX_USE_COHERENT_MEM
1733 			wd_page[i].cache = POOL_ADDR;
1734 			dma_pool = phl_info->hci->wd_dma_pool;
1735 #else
1736 			wd_page[i].cache = VIRTUAL_ADDR;
1737 #endif
1738 			buf_len = WD_PAGE_SIZE;
1739 			wd_page[i].vir_addr = _os_shmem_alloc(
1740 						phl_to_drvpriv(phl_info), dma_pool,
1741 						(_dma *)&wd_page[i].phy_addr_l,
1742 						(_dma *)&wd_page[i].phy_addr_h,
1743 						buf_len,
1744 						wd_page[i].cache,
1745 						PCI_DMA_TODEVICE,
1746 						&wd_page[i].os_rsvd[0]);
1747 			if (NULL == wd_page[i].vir_addr) {
1748 				pstatus = RTW_PHL_STATUS_RESOURCE;
1749 				break;
1750 			}
1751 			wd_page[i].buf_len = buf_len;
1752 			wd_page[i].wp_seq = WP_RESERVED_SEQ;
1753 			INIT_LIST_HEAD(&wd_page[i].list);
1754 
1755 			list_add_tail(&wd_page[i].list, list);
1756 
1757 			pstatus = RTW_PHL_STATUS_SUCCESS;
1758 				/* hana_todo now check 4 byte align only */
1759 			/* if ((unsigned long)wd_page_buf & 0xF) { */
1760 			/* 	res = _FAIL; */
1761 			/* 	break; */
1762 			/* } */
1763 		}
1764 	}
1765 
1766 	if (RTW_PHL_STATUS_SUCCESS != pstatus) {
1767 		_phl_free_wd_page_pcie(phl_info, wd_page);
1768 		wd_page = NULL;
1769 	}
1770 
1771 	return wd_page;
1772 }
1773 
1774 
1775 
1776 static enum rtw_phl_status
_phl_alloc_wd_ring_pcie(struct phl_info_t * phl_info,u8 ch_num)1777 _phl_alloc_wd_ring_pcie(struct phl_info_t *phl_info, u8 ch_num)
1778 {
1779 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1780 	struct rtw_wd_page_ring *wd_page_ring = NULL;
1781 	struct rtw_wd_page *wd_page = NULL;
1782 	void *drv_priv = NULL;
1783 	u32 buf_len = 0;
1784 	int i;
1785 
1786 	FUNCIN_WSTS(pstatus);
1787 	drv_priv = phl_to_drvpriv(phl_info);
1788 	buf_len = sizeof(struct rtw_wd_page_ring) * ch_num;
1789 	wd_page_ring = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1790 	if (NULL != wd_page_ring) {
1791 		for (i = 0; i < ch_num; i++) {
1792 			INIT_LIST_HEAD(&wd_page_ring[i].idle_wd_page_list);
1793 			INIT_LIST_HEAD(&wd_page_ring[i].busy_wd_page_list);
1794 			INIT_LIST_HEAD(&wd_page_ring[i].pending_wd_page_list);
1795 			_os_spinlock_init(drv_priv,
1796 						&wd_page_ring[i].idle_lock);
1797 			_os_spinlock_init(drv_priv,
1798 						&wd_page_ring[i].busy_lock);
1799 			_os_spinlock_init(drv_priv,
1800 						&wd_page_ring[i].pending_lock);
1801 			_os_spinlock_init(drv_priv,
1802 						&wd_page_ring[i].work_lock);
1803 			_os_spinlock_init(drv_priv,
1804 						&wd_page_ring[i].wp_tag_lock);
1805 
1806 			wd_page = _phl_alloc_wd_page_pcie(phl_info,
1807 					&wd_page_ring[i].idle_wd_page_list);
1808 			if (NULL == wd_page) {
1809 				pstatus = RTW_PHL_STATUS_RESOURCE;
1810 				break;
1811 			}
1812 
1813 			pstatus = _phl_alloc_wd_work_ring(phl_info,
1814 							  &wd_page_ring[i]);
1815 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
1816 				break;
1817 
1818 			if (i == rtw_hal_get_fwcmd_queue_idx(phl_info->hal)) {
1819 				pstatus = _phl_alloc_h2c_work_ring(phl_info,
1820 							     &wd_page_ring[i]);
1821 				if (RTW_PHL_STATUS_SUCCESS != pstatus)
1822 					break;
1823 			}
1824 			wd_page_ring[i].wd_page = wd_page;
1825 			wd_page_ring[i].idle_wd_page_cnt = MAX_WD_PAGE_NUM;
1826 			wd_page_ring[i].busy_wd_page_cnt = 0;
1827 			wd_page_ring[i].pending_wd_page_cnt = 0;
1828 			wd_page_ring[i].wp_seq = 1;
1829 			pstatus = RTW_PHL_STATUS_SUCCESS;
1830 		}
1831 	}
1832 
1833 	if (RTW_PHL_STATUS_SUCCESS == pstatus) {
1834 		phl_info->hci->wd_ring = (u8 *)wd_page_ring;
1835 	} else
1836 		_phl_free_wd_ring_pcie(phl_info, (u8 *)wd_page_ring, ch_num);
1837 	FUNCOUT_WSTS(pstatus);
1838 
1839 	return pstatus;
1840 }
1841 
_phl_free_h2c_pkt_buf_pcie(struct phl_info_t * phl_info,struct rtw_h2c_pkt * _h2c_pkt)1842 static void _phl_free_h2c_pkt_buf_pcie(struct phl_info_t *phl_info,
1843 				struct rtw_h2c_pkt *_h2c_pkt)
1844 {
1845 	struct rtw_h2c_pkt *h2c_pkt = _h2c_pkt;
1846 
1847 	_os_shmem_free(phl_to_drvpriv(phl_info), NULL,
1848 				h2c_pkt->vir_head,
1849 				(_dma *)&h2c_pkt->phy_addr_l,
1850 				(_dma *)&h2c_pkt->phy_addr_h,
1851 				h2c_pkt->buf_len,
1852 				h2c_pkt->cache,
1853 				PCI_DMA_FROMDEVICE,
1854 				h2c_pkt->os_rsvd[0]);
1855 }
1856 
_phl_alloc_h2c_pkt_buf_pcie(struct phl_info_t * phl_info,struct rtw_h2c_pkt * _h2c_pkt,u32 buf_len)1857 enum rtw_phl_status _phl_alloc_h2c_pkt_buf_pcie(struct phl_info_t *phl_info,
1858 	struct rtw_h2c_pkt *_h2c_pkt, u32 buf_len)
1859 {
1860 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1861 	struct rtw_h2c_pkt *h2c_pkt = _h2c_pkt;
1862 
1863 	h2c_pkt->vir_head = _os_shmem_alloc(
1864 				phl_to_drvpriv(phl_info), NULL,
1865 				(_dma *)&h2c_pkt->phy_addr_l,
1866 				(_dma *)&h2c_pkt->phy_addr_h,
1867 				buf_len,
1868 				h2c_pkt->cache,
1869 				PCI_DMA_TODEVICE,
1870 				&h2c_pkt->os_rsvd[0]);
1871 
1872 	if (h2c_pkt->vir_head)
1873 		pstatus = RTW_PHL_STATUS_SUCCESS;
1874 
1875 	return pstatus;
1876 }
1877 
_phl_free_rxbd_pcie(struct phl_info_t * phl_info,u8 * rxbd_buf,u8 ch_num)1878 static void _phl_free_rxbd_pcie(struct phl_info_t *phl_info,
1879 						u8 *rxbd_buf, u8 ch_num)
1880 {
1881 	struct rx_base_desc *rxbd = (struct rx_base_desc *)rxbd_buf;
1882 	u8 i = 0;
1883 
1884 	FUNCIN();
1885 
1886 	if (NULL != rxbd) {
1887 		for (i = 0; i < ch_num; i++) {
1888 
1889 			if (NULL == rxbd[i].vir_addr)
1890 				continue;
1891 			_os_shmem_free(phl_to_drvpriv(phl_info), NULL,
1892 						rxbd[i].vir_addr,
1893 						(_dma *)&rxbd[i].phy_addr_l,
1894 						(_dma *)&rxbd[i].phy_addr_h,
1895 						rxbd[i].buf_len,
1896 						rxbd[i].cache,
1897 						PCI_DMA_FROMDEVICE,
1898 						rxbd[i].os_rsvd[0]);
1899 			rxbd[i].vir_addr = NULL;
1900 			rxbd[i].cache = 0;
1901 		}
1902 
1903 		_os_mem_free(phl_to_drvpriv(phl_info), rxbd,
1904 					sizeof(struct rx_base_desc) * ch_num);
1905 	}
1906 	FUNCOUT();
1907 }
1908 
1909 
1910 static enum rtw_phl_status
_phl_alloc_rxbd_pcie(struct phl_info_t * phl_info,u8 ch_num)1911 _phl_alloc_rxbd_pcie(struct phl_info_t *phl_info, u8 ch_num)
1912 {
1913 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1914 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1915 	struct rx_base_desc *rxbd = NULL;
1916 	u32 buf_len = 0;
1917 	u16 rxbd_num = (u16)hal_com->bus_cap.rxbd_num;
1918 	u8 addr_info_size = hal_com->bus_hw_cap.rxbd_len;
1919 	u8 i = 0;
1920 	FUNCIN_WSTS(pstatus);
1921 
1922 	buf_len = sizeof(struct rx_base_desc) * ch_num;
1923 	rxbd = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1924 	if (NULL != rxbd) {
1925 		for (i = 0; i < ch_num; i++) {
1926 			rxbd[i].cache = DMA_ADDR;
1927 			buf_len = addr_info_size * rxbd_num;
1928 			rxbd[i].vir_addr = _os_shmem_alloc(
1929 						phl_to_drvpriv(phl_info), NULL,
1930 						(_dma *)&rxbd[i].phy_addr_l,
1931 						(_dma *)&rxbd[i].phy_addr_h,
1932 						buf_len,
1933 						rxbd[i].cache,
1934 						PCI_DMA_TODEVICE,
1935 						&rxbd[i].os_rsvd[0]);
1936 			if (NULL == rxbd[i].vir_addr) {
1937 				pstatus = RTW_PHL_STATUS_RESOURCE;
1938 				break;
1939 			}
1940 			rxbd[i].buf_len = buf_len;
1941 			rxbd[i].host_idx = 0;
1942 			rxbd[i].hw_idx = 0;
1943 			pstatus = RTW_PHL_STATUS_SUCCESS;
1944 		}
1945 	}
1946 
1947 	if (RTW_PHL_STATUS_SUCCESS == pstatus)
1948 		phl_info->hci->rxbd_buf = (u8 *)rxbd;
1949 	else
1950 		_phl_free_rxbd_pcie(phl_info, (u8 *)rxbd, ch_num);
1951 	FUNCOUT_WSTS(pstatus);
1952 
1953 	return pstatus;
1954 }
1955 
1956 
_phl_free_txbd_pcie(struct phl_info_t * phl_info,u8 * txbd_buf,u8 ch_num)1957 static void _phl_free_txbd_pcie(struct phl_info_t *phl_info, u8 *txbd_buf,
1958 				u8 ch_num)
1959 {
1960 	struct tx_base_desc *txbd = (struct tx_base_desc *)txbd_buf;
1961 	u8 i = 0;
1962 	FUNCIN();
1963 
1964 	if (NULL != txbd) {
1965 		for (i = 0; i < ch_num; i++) {
1966 
1967 			if (NULL == txbd[i].vir_addr)
1968 				continue;
1969 			_os_shmem_free(phl_to_drvpriv(phl_info), NULL,
1970 						txbd[i].vir_addr,
1971 						(_dma *)&txbd[i].phy_addr_l,
1972 						(_dma *)&txbd[i].phy_addr_h,
1973 						txbd[i].buf_len,
1974 						txbd[i].cache,
1975 						PCI_DMA_FROMDEVICE,
1976 						txbd[i].os_rsvd[0]);
1977 			txbd[i].vir_addr = NULL;
1978 			txbd[i].cache = 0;
1979 			_os_spinlock_free(phl_to_drvpriv(phl_info),
1980 						&txbd[i].txbd_lock);
1981 		}
1982 
1983 		_os_mem_free(phl_to_drvpriv(phl_info), txbd,
1984 						sizeof(struct tx_base_desc) * ch_num);
1985 	}
1986 
1987 	FUNCOUT();
1988 }
1989 
1990 
1991 
1992 static enum rtw_phl_status
_phl_alloc_txbd_pcie(struct phl_info_t * phl_info,u8 ch_num)1993 _phl_alloc_txbd_pcie(struct phl_info_t *phl_info, u8 ch_num)
1994 {
1995 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1996 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
1997 	struct tx_base_desc *txbd = NULL;
1998 	u32 buf_len = 0;
1999 	u16 txbd_num = (u16)hal_com->bus_cap.txbd_num;
2000 	u8 addr_info_size = hal_com->bus_hw_cap.txbd_len;
2001 	u8 i = 0;
2002 	FUNCIN_WSTS(pstatus);
2003 
2004 	buf_len = sizeof(struct tx_base_desc) * ch_num;
2005 	txbd = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
2006 	if (NULL != txbd) {
2007 		for (i = 0; i < ch_num; i++) {
2008 			txbd[i].cache = DMA_ADDR;
2009 			buf_len = addr_info_size * txbd_num;
2010 			txbd[i].vir_addr = _os_shmem_alloc(
2011 						phl_to_drvpriv(phl_info), NULL,
2012 						(_dma *)&txbd[i].phy_addr_l,
2013 						(_dma *)&txbd[i].phy_addr_h,
2014 						buf_len,
2015 						txbd[i].cache,
2016 						PCI_DMA_TODEVICE,
2017 						&txbd[i].os_rsvd[0]);
2018 			if (NULL == txbd[i].vir_addr) {
2019 				pstatus = RTW_PHL_STATUS_RESOURCE;
2020 				break;
2021 			}
2022 			txbd[i].buf_len = buf_len;
2023 			txbd[i].avail_num = txbd_num;
2024 			_os_spinlock_init(phl_to_drvpriv(phl_info),
2025 						&txbd[i].txbd_lock);
2026 			pstatus = RTW_PHL_STATUS_SUCCESS;
2027 		}
2028 	}
2029 
2030 	if (RTW_PHL_STATUS_SUCCESS == pstatus)
2031 		phl_info->hci->txbd_buf = (u8 *)txbd;
2032 	else
2033 		_phl_free_txbd_pcie(phl_info, (u8 *)txbd, ch_num);
2034 	FUNCOUT_WSTS(pstatus);
2035 
2036 	return pstatus;
2037 }
2038 
_phl_update_default_rx_bd(struct phl_info_t * phl_info)2039 enum rtw_phl_status _phl_update_default_rx_bd(struct phl_info_t *phl_info)
2040 {
2041 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2042 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
2043 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
2044 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
2045 	struct rx_base_desc *rxbd = NULL;
2046 	struct rtw_rx_buf_ring *ring = NULL;
2047 	struct rtw_rx_buf *rxbuf = NULL;
2048 	u32 i = 0, j = 0;
2049 
2050 	rxbd = (struct rx_base_desc *)hci_info->rxbd_buf;
2051 	ring = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
2052 	for (i = 0; i < hci_info->total_rxch_num; i++) {
2053 		for (j = 0; j < hal_com->bus_cap.rxbd_num; j++) {
2054 			rxbuf = query_idle_rx_buf(phl_info, &ring[i]);
2055 			if (NULL == rxbuf) {
2056 				PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
2057 					"[WARNING] there is no resource for rx bd default setting\n");
2058 				pstatus = RTW_PHL_STATUS_RESOURCE;
2059 				break;
2060 			}
2061 
2062 			hstatus = rtw_hal_update_rxbd(phl_info->hal, &rxbd[i],
2063 								rxbuf);
2064 			if (RTW_HAL_STATUS_SUCCESS == hstatus) {
2065 				enqueue_busy_rx_buf(phl_info, &ring[i], rxbuf, _tail);
2066 				pstatus = RTW_PHL_STATUS_SUCCESS;
2067 			} else {
2068 				enqueue_idle_rx_buf(phl_info, &ring[i], rxbuf);
2069 				PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] update rx bd fail\n");
2070 				pstatus = RTW_PHL_STATUS_FAILURE;
2071 				break;
2072 			}
2073 		}
2074 
2075 	}
2076 
2077 	return pstatus;
2078 }
2079 
_phl_reset_rxbd(struct phl_info_t * phl_info,struct rx_base_desc * rxbd)2080 static void _phl_reset_rxbd(struct phl_info_t *phl_info,
2081 					struct rx_base_desc *rxbd)
2082 {
2083 	_os_mem_set(phl_to_drvpriv(phl_info), rxbd->vir_addr, 0, rxbd->buf_len);
2084 	rxbd->host_idx = 0;
2085 	rxbd->hw_idx = 0;
2086 }
2087 
2088 
phl_rx_reset_pcie(struct phl_info_t * phl_info)2089 static void phl_rx_reset_pcie(struct phl_info_t *phl_info)
2090 {
2091 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
2092 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
2093 	struct hal_spec_t *hal_spec = &phl_com->hal_spec;
2094 	struct rx_base_desc *rxbd = NULL;
2095 	struct rtw_rx_buf_ring *ring = NULL;
2096 	u8 ch = 0;
2097 
2098 	rxbd = (struct rx_base_desc *)hci_info->rxbd_buf;
2099 	ring = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
2100 
2101 	for (ch = 0; ch < hci_info->total_rxch_num; ch++) {
2102 		_phl_reset_rxbd(phl_info, &rxbd[ch]);
2103 		phl_release_busy_rx_buf(phl_info, &ring[ch],
2104 					ring[ch].busy_rxbuf_cnt);
2105 	}
2106 	hal_spec->rx_tag[0] = 0;
2107 	hal_spec->rx_tag[1] = 0;
2108 	_phl_update_default_rx_bd(phl_info);
2109 
2110 }
2111 
2112 
_phl_sort_ring_by_hw_res(struct phl_info_t * phl_info)2113 void _phl_sort_ring_by_hw_res(struct phl_info_t *phl_info)
2114 {
2115 	_os_list *t_fctrl_result = &phl_info->t_fctrl_result;
2116 	struct phl_ring_status *ring_sts, *t;
2117 	u16 hw_res = 0, host_idx = 0, hw_idx = 0;
2118 	u32 avail = 0, no_res = 0;
2119 	_os_list *no_res_first = NULL;
2120 
2121 	phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
2122 					t_fctrl_result, list) {
2123 
2124 		if (ring_sts->ring_ptr->dma_ch > 32)
2125 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
2126 			"[WARNING] dma channel number larger than record map\n");
2127 
2128 		if (no_res & (BIT0 << ring_sts->ring_ptr->dma_ch)) {
2129 			if (&ring_sts->list == no_res_first)
2130 				break;
2131 			list_del(&ring_sts->list);
2132 			list_add_tail(&ring_sts->list, t_fctrl_result);
2133 			continue;
2134 		} else if (avail & (BIT0 << ring_sts->ring_ptr->dma_ch)) {
2135 			continue;
2136 		}
2137 
2138 		hw_res = rtw_hal_tx_res_query(phl_info->hal,
2139 						ring_sts->ring_ptr->dma_ch,
2140 						&host_idx, &hw_idx);
2141 		if (0 == hw_res) {
2142 			if (no_res_first == NULL)
2143 				no_res_first = &ring_sts->list;
2144 			list_del(&ring_sts->list);
2145 			list_add_tail(&ring_sts->list, t_fctrl_result);
2146 			no_res = no_res | (BIT0 << ring_sts->ring_ptr->dma_ch);
2147 		} else {
2148 			avail = avail | (BIT0 << ring_sts->ring_ptr->dma_ch);
2149 		}
2150 	}
2151 }
2152 
_phl_tx_flow_ctrl_pcie(struct phl_info_t * phl_info,_os_list * sta_list)2153 void _phl_tx_flow_ctrl_pcie(struct phl_info_t *phl_info, _os_list *sta_list)
2154 {
2155 	/* _phl_sort_ring_by_hw_res(phl_info); */
2156 	phl_tx_flow_ctrl(phl_info, sta_list);
2157 }
2158 
_phl_handle_xmit_ring_pcie(struct phl_info_t * phl_info,struct phl_ring_status * ring_sts)2159 static enum rtw_phl_status _phl_handle_xmit_ring_pcie
2160 						(struct phl_info_t *phl_info,
2161 						struct phl_ring_status *ring_sts)
2162 {
2163 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2164 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
2165 	struct rtw_phl_tx_ring *tring = ring_sts->ring_ptr;
2166 	struct rtw_xmit_req *tx_req = NULL;
2167 	u16 rptr = 0, next_idx = 0;
2168 	void *drv_priv = phl_to_drvpriv(phl_info);
2169 
2170 	while (0 != ring_sts->req_busy) {
2171 		rptr = (u16)_os_atomic_read(drv_priv, &tring->phl_next_idx);
2172 
2173 		tx_req = (struct rtw_xmit_req *)tring->entry[rptr];
2174 		if (NULL == tx_req)  {
2175 			PHL_ERR("tx_req is NULL!\n");
2176 			break;
2177 		}
2178 		tx_req->mdata.macid = ring_sts->macid;
2179 		tx_req->mdata.band = ring_sts->band;
2180 		tx_req->mdata.wmm = ring_sts->wmm;
2181 		tx_req->mdata.hal_port = ring_sts->port;
2182 		/*tx_req->mdata.mbssid = ring_sts->mbssid;*/
2183 		tx_req->mdata.tid = tring->tid;
2184 		tx_req->mdata.dma_ch = tring->dma_ch;
2185 		pstatus = hci_trx_ops->prepare_tx(phl_info, tx_req);
2186 
2187 		if (RTW_PHL_STATUS_SUCCESS == pstatus) {
2188 			ring_sts->req_busy--;
2189 
2190 			/* hana_todo, workaround here to update phl_index */
2191 			_os_atomic_set(drv_priv, &tring->phl_idx, rptr);
2192 
2193 			if (0 != ring_sts->req_busy) {
2194 				next_idx = rptr + 1;
2195 
2196 				if (next_idx >= MAX_PHL_RING_ENTRY_NUM) {
2197 					_os_atomic_set(drv_priv,
2198 						       &tring->phl_next_idx, 0);
2199 
2200 				} else {
2201 					_os_atomic_inc(drv_priv,
2202 						       &tring->phl_next_idx);
2203 				}
2204 			}
2205 		} else {
2206 			PHL_INFO("HCI prepare tx fail\n");
2207 			break;
2208 		}
2209 	}
2210 
2211 	return pstatus;
2212 }
2213 
_phl_tx_callback_pcie(void * context)2214 static void _phl_tx_callback_pcie(void *context)
2215 {
2216 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2217 	struct rtw_phl_handler *phl_handler
2218 		= (struct rtw_phl_handler *)phl_container_of(context,
2219 							struct rtw_phl_handler,
2220 							os_handler);
2221 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
2222 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
2223 	struct phl_ring_status *ring_sts = NULL, *t;
2224 	void *drvpriv = phl_to_drvpriv(phl_info);
2225 	_os_list sta_list;
2226 	bool tx_pause = false;
2227 
2228 	FUNCIN_WSTS(pstatus);
2229 	INIT_LIST_HEAD(&sta_list);
2230 
2231 	/* check datapath sw state */
2232 	tx_pause = phl_datapath_chk_trx_pause(phl_info, PHL_CTRL_TX);
2233 	if (true == tx_pause)
2234 		goto end;
2235 
2236 #ifdef CONFIG_POWER_SAVE
2237 	/* check ps state when tx is not paused */
2238 	if (false == phl_ps_is_datapath_allowed(phl_info)) {
2239 		PHL_WARN("%s(): datapath is not allowed now... may in low power.\n", __func__);
2240 		goto chk_stop;
2241 	}
2242 #endif
2243 
2244 	if (true == phl_check_xmit_ring_resource(phl_info, &sta_list)) {
2245 		_phl_tx_flow_ctrl_pcie(phl_info, &sta_list);
2246 
2247 		phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
2248 		                       &phl_info->t_fctrl_result, list) {
2249 			list_del(&ring_sts->list);
2250 			_phl_handle_xmit_ring_pcie(phl_info, ring_sts);
2251 			phl_release_ring_sts(phl_info, ring_sts);
2252 		}
2253 	}
2254 
2255 	pstatus = hci_trx_ops->tx(phl_info);
2256 	if (RTW_PHL_STATUS_FAILURE == pstatus) {
2257 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] phl_tx fail!\n");
2258 	}
2259 
2260 #ifdef CONFIG_POWER_SAVE
2261 chk_stop:
2262 #endif
2263 	if (PHL_TX_STATUS_STOP_INPROGRESS ==
2264 	    _os_atomic_read(drvpriv, &phl_info->phl_sw_tx_sts)) {
2265 		PHL_WARN("PHL_TX_STATUS_STOP_INPROGRESS, going to stop sw tx.\n");
2266 		phl_tx_stop_pcie(phl_info);
2267 	}
2268 
2269 end:
2270 	phl_free_deferred_tx_ring(phl_info);
2271 
2272 	FUNCOUT_WSTS(pstatus);
2273 }
2274 
2275 
_phl_check_rx_hw_resource(struct phl_info_t * phl_info)2276 static u8 _phl_check_rx_hw_resource(struct phl_info_t *phl_info)
2277 {
2278 	struct hci_info_t *hci_info = phl_info->hci;
2279 	u16 hw_res = 0, host_idx = 0, hw_idx = 0;
2280 	u8 i = 0;
2281 	u8 avail = 0;
2282 
2283 	for (i = 0; i < hci_info->total_rxch_num; i++) {
2284 		hw_res = rtw_hal_rx_res_query(phl_info->hal,
2285 							i,
2286 							&host_idx, &hw_idx);
2287 
2288 		if (0 != hw_res) {
2289 			avail = true;
2290 			break;
2291 		} else {
2292 			avail = false;
2293 		}
2294 	}
2295 
2296 	return avail;
2297 }
2298 
_phl_rx_callback_pcie(void * context)2299 static void _phl_rx_callback_pcie(void *context)
2300 {
2301 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2302 	struct rtw_phl_handler *phl_handler
2303 		= (struct rtw_phl_handler *)phl_container_of(context,
2304 							struct rtw_phl_handler,
2305 							os_handler);
2306 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
2307 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
2308 	void *drvpriv = phl_to_drvpriv(phl_info);
2309 	bool rx_pause = false;
2310 #ifdef CONFIG_SYNC_INTERRUPT
2311 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
2312 #endif /* CONFIG_SYNC_INTERRUPT */
2313 
2314 	FUNCIN_WSTS(pstatus);
2315 
2316 	/* check datapath sw state */
2317 	rx_pause = phl_datapath_chk_trx_pause(phl_info, PHL_CTRL_RX);
2318 	if (true == rx_pause)
2319 		goto end;
2320 
2321 	do {
2322 		if (false == phl_check_recv_ring_resource(phl_info))
2323 			break;
2324 
2325 		pstatus = hci_trx_ops->rx(phl_info);
2326 
2327 		if (RTW_PHL_STATUS_FAILURE == pstatus) {
2328 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] phl_rx fail!\n");
2329 		}
2330 	} while (false);
2331 
2332 	if (PHL_RX_STATUS_STOP_INPROGRESS ==
2333 	    _os_atomic_read(drvpriv, &phl_info->phl_sw_rx_sts)) {
2334 		phl_rx_stop_pcie(phl_info);
2335 	}
2336 
2337 end:
2338 	/* restore int mask of rx */
2339 	rtw_hal_restore_rx_interrupt(phl_info->hal);
2340 #ifdef CONFIG_SYNC_INTERRUPT
2341 	ops->interrupt_restore(phl_to_drvpriv(phl_info), true);
2342 #endif /* CONFIG_SYNC_INTERRUPT */
2343 
2344 	FUNCOUT_WSTS(pstatus);
2345 
2346 }
_phl_fill_tx_meta_data(struct rtw_xmit_req * tx_req,u16 packet_len)2347 void _phl_fill_tx_meta_data(struct rtw_xmit_req *tx_req,
2348                             u16 packet_len)
2349 {
2350 	tx_req->mdata.wp_offset = 56;
2351 	tx_req->mdata.wd_page_size = 1;
2352 	tx_req->mdata.addr_info_num = tx_req->pkt_cnt;
2353 	tx_req->mdata.pktlen = packet_len;
2354 }
2355 
2356 
2357 
phl_trx_resume_pcie(struct phl_info_t * phl_info,u8 type)2358 void phl_trx_resume_pcie(struct phl_info_t *phl_info, u8 type)
2359 {
2360 	if (PHL_CTRL_TX & type)
2361 		phl_tx_resume_pcie(phl_info);
2362 
2363 	if (PHL_CTRL_RX & type)
2364 		phl_rx_resume_pcie(phl_info);
2365 }
2366 
phl_trx_reset_pcie(struct phl_info_t * phl_info,u8 type)2367 void phl_trx_reset_pcie(struct phl_info_t *phl_info, u8 type)
2368 {
2369 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
2370 	struct rtw_stats *phl_stats = &phl_com->phl_stats;
2371 
2372 	PHL_INFO("%s\n", __func__);
2373 
2374 	if (PHL_CTRL_TX & type) {
2375 		phl_tx_reset_pcie(phl_info);
2376 		phl_reset_tx_stats(phl_stats);
2377 	}
2378 
2379 	if (PHL_CTRL_RX & type) {
2380 		phl_rx_reset_pcie(phl_info);
2381 		phl_reset_rx_stats(phl_stats);
2382 	}
2383 }
2384 
phl_trx_stop_pcie(struct phl_info_t * phl_info)2385 void phl_trx_stop_pcie(struct phl_info_t *phl_info)
2386 {
2387 	phl_trx_reset_pcie(phl_info, PHL_CTRL_TX|PHL_CTRL_RX);
2388 }
2389 
phl_trx_deinit_pcie(struct phl_info_t * phl_info)2390 void phl_trx_deinit_pcie(struct phl_info_t *phl_info)
2391 {
2392 	struct hci_info_t *hci_info = phl_info->hci;
2393 	FUNCIN();
2394 	_phl_free_rxbuf_pool_pcie(phl_info, hci_info->rxbuf_pool,
2395 					hci_info->total_rxch_num);
2396 	hci_info->rxbuf_pool = NULL;
2397 
2398 	_phl_free_rxbd_pcie(phl_info, hci_info->rxbd_buf,
2399 					hci_info->total_rxch_num);
2400 	hci_info->rxbd_buf = NULL;
2401 
2402 	_phl_free_wd_ring_pcie(phl_info, hci_info->wd_ring,
2403 					hci_info->total_txch_num);
2404 	hci_info->wd_ring = NULL;
2405 
2406 	_phl_free_txbd_pcie(phl_info, hci_info->txbd_buf,
2407 					hci_info->total_txch_num);
2408 	hci_info->txbd_buf = NULL;
2409 
2410 	_phl_destory_dma_pool_pcie(phl_info, hci_info->wd_dma_pool);
2411 
2412 	FUNCOUT();
2413 }
2414 
phl_trx_init_pcie(struct phl_info_t * phl_info)2415 enum rtw_phl_status phl_trx_init_pcie(struct phl_info_t *phl_info)
2416 {
2417 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2418 	struct hci_info_t *hci_info = phl_info->hci;
2419 	struct rtw_phl_handler *tx_handler = &phl_info->phl_tx_handler;
2420 	struct rtw_phl_handler *rx_handler = &phl_info->phl_rx_handler;
2421 	struct rtw_phl_handler *ser_handler = &phl_info->phl_ser_handler;
2422 	void *drv_priv = phl_to_drvpriv(phl_info);
2423 	u8 txch_num = 0, rxch_num = 0;
2424 	u16 i = 0;
2425 
2426 	FUNCIN_WSTS(pstatus);
2427 
2428 #ifdef CONFIG_DMA_TX_USE_COHERENT_MEM
2429 	/* init DMA pool */
2430 	hci_info->wd_dma_pool = _os_dma_pool_create(drv_priv, "wd_page_pool", WD_PAGE_SIZE);
2431 
2432 	if (hci_info->wd_dma_pool == NULL)
2433 		return pstatus;
2434 #endif
2435 	do {
2436 #ifdef CONFIG_PHL_CPU_BALANCE_TX
2437 		_os_workitem *workitem = &tx_handler->os_handler.u.workitem;
2438 		_os_workitem_config_cpu(drv_priv, workitem, "TX_PHL_0", CPU_ID_TX_PHL_0);
2439 		tx_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
2440 #else
2441 		tx_handler->type = RTW_PHL_HANDLER_PRIO_HIGH; /* tasklet */
2442 #endif
2443 		tx_handler->callback = _phl_tx_callback_pcie;
2444 		tx_handler->context = phl_info;
2445 		tx_handler->drv_priv = drv_priv;
2446 		pstatus = phl_register_handler(phl_info->phl_com, tx_handler);
2447 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2448 			break;
2449 
2450 		ser_handler->type = RTW_PHL_HANDLER_PRIO_HIGH; /* tasklet */
2451 		ser_handler->callback = phl_ser_send_check;
2452 		ser_handler->context = phl_info;
2453 		ser_handler->drv_priv = drv_priv;
2454 		pstatus = phl_register_handler(phl_info->phl_com, ser_handler);
2455 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2456 			break;
2457 
2458 #ifdef CONFIG_DMA_TX_USE_COHERENT_MEM
2459 		/* avoid dma_free_coherent() being called in atomic context */
2460 		rx_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
2461 #else
2462 		rx_handler->type = RTW_PHL_HANDLER_PRIO_HIGH;
2463 #endif
2464 		rx_handler->callback = _phl_rx_callback_pcie;
2465 		rx_handler->context = phl_info;
2466 		rx_handler->drv_priv = drv_priv;
2467 		pstatus = phl_register_handler(phl_info->phl_com, rx_handler);
2468 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2469 			break;
2470 		/* pcie tx sw resource */
2471 		txch_num = rtw_hal_query_txch_num(phl_info->hal);
2472 		hci_info->total_txch_num = txch_num;
2473 		/* allocate tx bd */
2474 		pstatus = _phl_alloc_txbd_pcie(phl_info, txch_num);
2475 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2476 			break;
2477 		/* allocate wd page */
2478 		pstatus = _phl_alloc_wd_ring_pcie(phl_info, txch_num);
2479 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2480 			break;
2481 
2482 		for (i = 0; i < PHL_MACID_MAX_NUM; i++)
2483 			hci_info->wp_seq[i] = WP_RESERVED_SEQ;
2484 
2485 		/* pcie rx sw resource */
2486 		rxch_num = rtw_hal_query_rxch_num(phl_info->hal);
2487 		hci_info->total_rxch_num = rxch_num;
2488 
2489 		pstatus = _phl_alloc_rxbd_pcie(phl_info, rxch_num);
2490 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2491 			break;
2492 		/* allocate wd page */
2493 		pstatus = _phl_alloc_rxbuf_pool_pcie(phl_info, rxch_num);
2494 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2495 			break;
2496 
2497 	} while (false);
2498 
2499 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
2500 		phl_trx_deinit_pcie(phl_info);
2501 	else
2502 		pstatus = _phl_update_default_rx_bd(phl_info);
2503 
2504 	FUNCOUT_WSTS(pstatus);
2505 	return pstatus;
2506 }
2507 
2508 
phl_trx_config_pcie(struct phl_info_t * phl_info)2509 enum rtw_phl_status phl_trx_config_pcie(struct phl_info_t *phl_info)
2510 {
2511 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2512 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
2513 	struct hci_info_t *hci_info = phl_info->hci;
2514 
2515 	do {
2516 		hstatus = rtw_hal_trx_init(phl_info->hal, hci_info->txbd_buf,
2517 						hci_info->rxbd_buf);
2518 		if (RTW_HAL_STATUS_SUCCESS != hstatus) {
2519 			PHL_ERR("rtw_hal_trx_init fail with status 0x%08X\n",
2520 				hstatus);
2521 			pstatus = RTW_PHL_STATUS_FAILURE;
2522 			break;
2523 		}
2524 		else {
2525 			pstatus = RTW_PHL_STATUS_SUCCESS;
2526 		}
2527 
2528 		phl_tx_start_pcie(phl_info);
2529 		phl_rx_start_pcie(phl_info);
2530 
2531 	} while (false);
2532 
2533 	return pstatus;
2534 }
2535 
2536 #ifdef CONFIG_PHL_TXSC
_phl_txsc_apply_shortcut(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req,struct rtw_phl_stainfo_t * phl_sta,struct rtw_phl_pkt_req * phl_pkt_req)2537 u8 *_phl_txsc_apply_shortcut(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req,
2538 							struct rtw_phl_stainfo_t *phl_sta, struct rtw_phl_pkt_req *phl_pkt_req)
2539 {
2540 	struct phl_txsc_entry *ptxsc = NULL;
2541 
2542 	if (phl_sta == NULL)
2543 		return (u8 *)ptxsc;
2544 
2545 	if (tx_req->shortcut_id >= PHL_TXSC_ENTRY_NUM) {
2546 		PHL_ERR("[PHL][TXSC] wrong shortcut_id:%d, plz check !!!\n", tx_req->shortcut_id);
2547 		return (u8 *)ptxsc;
2548 	}
2549 
2550 	ptxsc = &phl_sta->phl_txsc[tx_req->shortcut_id];
2551 
2552 	if ((tx_req->treq_type == RTW_PHL_TREQ_TYPE_CORE_TXSC)) {
2553 
2554 		if (ptxsc == NULL) {
2555 			PHL_ERR("[txsc][phl] fetal err: ptxsc = NULL, plz check.\n");
2556 			return (u8 *)ptxsc;
2557 		}
2558 
2559 		if (!ptxsc->txsc_wd_cached) {
2560 			PHL_ERR("[txsc][phl] fetal err: txsc_wd_cached = 0, plz check.\n");
2561 			return (u8 *)ptxsc;
2562 		}
2563 
2564 		_os_mem_cpy(phl_info, phl_pkt_req->wd_page, ptxsc->txsc_wd_cache, ptxsc->txsc_wd_len);
2565 		phl_pkt_req->wd_len = ptxsc->txsc_wd_len;
2566 
2567 		/* update pktlen in wd_page, wd_body[8:15] = pktsize */
2568 		#if 0
2569 		packet_len = cpu_to_le16(tx_req->mdata.pktlen);
2570 		_os_mem_cpy(phl_info, phl_pkt_req.wd_page+8, &packet_len, sizeof(u16));
2571 		#endif
2572 
2573 		ptxsc->txsc_cache_hit++;
2574 	}
2575 
2576 	return (u8 *)ptxsc;
2577 }
2578 
2579 enum rtw_phl_status
_phl_txsc_add_shortcut(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req,struct rtw_phl_pkt_req * phl_pkt_req,struct phl_txsc_entry * ptxsc)2580 _phl_txsc_add_shortcut(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req,
2581 								struct rtw_phl_pkt_req *phl_pkt_req, struct phl_txsc_entry *ptxsc)
2582 {
2583 
2584 	if (tx_req->shortcut_id >= PHL_TXSC_ENTRY_NUM) {
2585 		PHL_ERR("[PHL][TXSC] wrong shortcut_id:%d, plz check.\n", tx_req->shortcut_id);
2586 		return RTW_PHL_STATUS_FAILURE;
2587 	}
2588 
2589 	if (ptxsc == NULL) {
2590 		PHL_ERR("[txsc][phl] fetal err: ptxsc = NULL, shortcut_id = %d, plz check.\n", tx_req->shortcut_id);
2591 		return RTW_PHL_STATUS_FAILURE;
2592 	}
2593 
2594 	if (tx_req->treq_type & RTW_PHL_TREQ_TYPE_PHL_UPDATE_TXSC) {
2595 
2596 		_os_mem_set(phl_info, ptxsc, 0x0, sizeof(struct phl_txsc_entry));
2597 		_os_mem_cpy(phl_info, ptxsc->txsc_wd_cache, phl_pkt_req->wd_page, phl_pkt_req->wd_len);
2598 
2599 		ptxsc->txsc_wd_len = phl_pkt_req->wd_len;
2600 		ptxsc->txsc_wd_cached = true;
2601 
2602 		#if 0
2603 		PHL_PRINT("\n[txsc][phl] shortcut_id:%d, wd_page cached, len:%d. SMH: %u (%u)\n\n",
2604 			tx_req->shortcut_id, ptxsc->txsc_wd_len, tx_req->mdata.smh_en,
2605 			tx_req->treq_type);
2606 		#endif
2607 
2608 		tx_req->treq_type &= ~RTW_PHL_TREQ_TYPE_PHL_UPDATE_TXSC;
2609 		if (tx_req->treq_type != RTW_PHL_TREQ_TYPE_PHL_ADD_TXSC)
2610 			PHL_PRINT("Updated WD for request type %u\n", tx_req->treq_type);
2611 	}
2612 
2613 	return RTW_PHL_STATUS_SUCCESS;
2614 }
2615 #endif
2616 
2617 enum rtw_phl_status
phl_prepare_tx_pcie(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req)2618 phl_prepare_tx_pcie(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req)
2619 {
2620 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2621 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
2622 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
2623  	struct rtw_wp_rpt_stats *rpt_stats =
2624 		(struct rtw_wp_rpt_stats *)hal_com->trx_stat.wp_rpt_stats;
2625 	struct hci_info_t *hci_info = NULL;
2626 	struct rtw_pkt_buf_list *pkt_buf = NULL;
2627 	struct rtw_wd_page_ring *wd_ring = NULL;
2628 	struct rtw_wd_page *wd_page = NULL;
2629 	struct rtw_phl_pkt_req phl_pkt_req;
2630 	void *ptr = NULL;
2631 	u16 packet_len = 0, wp_seq = 0;
2632 	u8 dma_ch = 0, i = 0;
2633 	u16 mid = 0;
2634 #ifdef CONFIG_PHL_TXSC
2635 	struct phl_txsc_entry *ptxsc = NULL;
2636 	struct rtw_phl_stainfo_t *phl_sta = rtw_phl_get_stainfo_by_macid(phl_info, tx_req->mdata.macid);
2637 #endif
2638 	FUNCIN_WSTS(pstatus);
2639 	do {
2640 		if (NULL == phl_info->hci) {
2641 			PHL_ERR("phl_info->hci is NULL!\n");
2642 			break;
2643 		}
2644 		hci_info = (struct hci_info_t *)phl_info->hci;
2645 		wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
2646 
2647 		if (NULL == tx_req)  {
2648 			PHL_ERR("tx_req is NULL!\n");
2649 			break;
2650 		}
2651 		mid = tx_req->mdata.macid;
2652 		dma_ch = tx_req->mdata.dma_ch;
2653 		wp_seq = wd_ring[dma_ch].wp_seq;
2654 
2655 		if (NULL != wd_ring[dma_ch].wp_tag[wp_seq].ptr) {
2656 			ptr = wd_ring[dma_ch].wp_tag[wp_seq].ptr;
2657 			PHL_ERR("wp_tag out of resource!\n");
2658 			PHL_ERR("stuck wp info:\n");
2659 			PHL_ERR("dma_ch = %d, wp_seq = 0x%x, ptr = %p!\n",
2660 				dma_ch, wp_seq, ptr);
2661 			PHL_ERR("wifi seq = %d\n",
2662 				((struct rtw_xmit_req *)ptr)->mdata.sw_seq);
2663 			_phl_dump_busy_wp(phl_info);
2664 			break;
2665 		}
2666 
2667 		pkt_buf = (struct rtw_pkt_buf_list *)&tx_req->pkt_list[0];
2668 		for (i = 0; i < tx_req->pkt_cnt; i++) {
2669 			packet_len += pkt_buf->length;
2670 			pkt_buf++;
2671 		}
2672 
2673 		tx_req->total_len = packet_len;
2674 
2675 		wd_page = query_idle_wd_page(phl_info, &wd_ring[dma_ch]);
2676 		if (NULL == wd_page) {
2677 			PHL_ERR("query idle wd page fail!\n");
2678 			PHL_ERR("dma_ch = %d, idle wd num = %d, "
2679 				"busy wd num = %d, pending wd num = %d\n",
2680 				dma_ch,
2681 				wd_ring[dma_ch].idle_wd_page_cnt,
2682 				wd_ring[dma_ch].busy_wd_page_cnt,
2683 				wd_ring[dma_ch].pending_wd_page_cnt);
2684 			if (wd_ring[dma_ch].busy_wd_page_cnt > MAX_WD_PAGE_NUM * 4 / 5)
2685 				rtw_hal_tx_dbg_status_dump(phl_info->hal);
2686 			break;
2687 		}
2688 		/* hana_todo */
2689 		_phl_fill_tx_meta_data(tx_req, packet_len);
2690 
2691 		phl_pkt_req.wd_page = wd_page->vir_addr;
2692 
2693 		phl_pkt_req.wp_seq = wp_seq;
2694 		phl_pkt_req.tx_req = tx_req;
2695 
2696 #ifdef CONFIG_PHL_TXSC
2697 		phl_pkt_req.wd_len = 0;
2698 		ptxsc = (struct phl_txsc_entry *)_phl_txsc_apply_shortcut(phl_info, tx_req, phl_sta, &phl_pkt_req);
2699 #endif
2700 
2701 		hstatus = rtw_hal_update_wd_page(phl_info->hal, &phl_pkt_req);
2702 		wd_page->buf_len = phl_pkt_req.wd_len;
2703 
2704 		if (RTW_HAL_STATUS_SUCCESS == hstatus) {
2705 			hci_info->wp_seq[mid] = phl_pkt_req.wp_seq;
2706 			enqueue_pending_wd_page(phl_info, &wd_ring[dma_ch],
2707 						wd_page, _tail);
2708 			tx_req->tx_time = _os_get_cur_time_ms();
2709 #ifdef CONFIG_PHL_TX_DBG
2710 			if (tx_req->tx_dbg.en_dbg) {
2711 				tx_req->tx_dbg.enq_pending_wd_t =
2712 						_os_get_cur_time_us();
2713 			}
2714 #endif /* CONFIG_PHL_TX_DBG */
2715 			_os_spinlock(phl_to_drvpriv(phl_info),
2716 				     &wd_ring[dma_ch].wp_tag_lock,
2717 				     _bh, NULL);
2718 			PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
2719 				  "update tx req(%p) in ch(%d) with wp_seq(0x%x) and wifi seq(%d)!\n",
2720 				  tx_req, dma_ch, wp_seq, tx_req->mdata.sw_seq);
2721 			wd_ring[dma_ch].wp_tag[wp_seq].ptr = (u8 *)tx_req;
2722 			rpt_stats[dma_ch].busy_cnt++;
2723 			_os_spinunlock(phl_to_drvpriv(phl_info),
2724 				       &wd_ring[dma_ch].wp_tag_lock,
2725 				       _bh, NULL);
2726 
2727 			wp_seq = (wp_seq + 1) % WP_MAX_SEQ_NUMBER;
2728 			if (0 == wp_seq)
2729 				wp_seq = 1;
2730 
2731 			wd_ring[dma_ch].wp_seq = wp_seq;
2732 
2733 			pstatus = RTW_PHL_STATUS_SUCCESS;
2734 
2735 			//wb wd page
2736 			if(wd_page->cache == VIRTUAL_ADDR) {
2737 				PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "[%s] wd page cache wback \n",
2738 					__FUNCTION__);
2739 				_os_cache_wback(phl_to_drvpriv(phl_info),
2740 					(_dma *)&wd_page->phy_addr_l,
2741 					(_dma *)&wd_page->phy_addr_h,
2742 					wd_page->buf_len, PCI_DMA_TODEVICE);
2743 			}
2744 
2745 #ifdef CONFIG_PHL_TXSC
2746 			_phl_txsc_add_shortcut(phl_info, tx_req, &phl_pkt_req, ptxsc);
2747 #endif
2748 
2749 			break;
2750 		} else {
2751 			rtw_release_target_wd_page(phl_info, &wd_ring[dma_ch],
2752 						wd_page);
2753 			pstatus = RTW_PHL_STATUS_FAILURE;
2754 			break;
2755 		}
2756 	} while(false);
2757 	FUNCOUT_WSTS(pstatus);
2758 	return pstatus;
2759 }
2760 
2761 static enum rtw_phl_status
phl_handle_pending_wd(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_ring,u16 txcnt,u8 ch)2762 phl_handle_pending_wd(struct phl_info_t *phl_info,
2763 				struct rtw_wd_page_ring *wd_ring,
2764 				u16 txcnt, u8 ch)
2765 {
2766 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2767 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
2768 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
2769 	struct tx_base_desc *txbd = NULL;
2770 	struct rtw_wd_page *wd = NULL;
2771 	u16 cnt = 0;
2772 
2773 #ifdef RTW_WKARD_DYNAMIC_LTR
2774 	if (true != _phl_judge_act_ltr_switching_conditions(phl_info, ch)) {
2775 		_phl_act_ltr_update_stats(phl_info, false, ch,
2776 		                          wd_ring->pending_wd_page_cnt);
2777 		return RTW_PHL_STATUS_FAILURE;
2778 	} else {
2779 		_phl_act_ltr_update_stats(phl_info, true, ch,
2780 		                          wd_ring->pending_wd_page_cnt);
2781 	}
2782 #endif
2783 
2784 	txbd = (struct tx_base_desc *)hci_info->txbd_buf;
2785 	while (txcnt > cnt) {
2786 		wd = query_pending_wd_page(phl_info, wd_ring);
2787 
2788 		if (NULL == wd) {
2789 			pstatus = RTW_PHL_STATUS_RESOURCE;
2790 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "query Tx pending WD fail!\n");
2791 			break;
2792 		}
2793 
2794 		wd->ls = 1;//tmp set LS=1
2795 		hstatus = rtw_hal_update_txbd(phl_info->hal, txbd, wd, ch, 1);
2796 		if (RTW_HAL_STATUS_SUCCESS == hstatus) {
2797 			enqueue_busy_wd_page(phl_info, wd_ring, wd, _tail);
2798 			pstatus = RTW_PHL_STATUS_SUCCESS;
2799 		} else {
2800 			enqueue_pending_wd_page(phl_info, wd_ring, wd, _first);
2801 			pstatus = RTW_PHL_STATUS_RESOURCE;
2802 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "update Tx BD fail!\n");
2803 			break;
2804 		}
2805 
2806 		cnt++;
2807 	}
2808 
2809 	if (RTW_PHL_STATUS_SUCCESS == pstatus) {
2810 #ifdef RTW_WKARD_DYNAMIC_LTR
2811 		_phl_switch_act_ltr(phl_info, ch);
2812 #endif
2813 		hstatus = rtw_hal_trigger_txstart(phl_info->hal, txbd, ch);
2814 		if (RTW_HAL_STATUS_SUCCESS != hstatus) {
2815 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "update txbd idx fail!\n");
2816 			pstatus = RTW_PHL_STATUS_FAILURE;
2817 		} else {
2818 			#ifdef CONFIG_POWER_SAVE
2819 			phl_ps_tx_pkt_ntfy(phl_info);
2820 			#endif
2821 			if (wd_ring->cur_hw_res > cnt)
2822 				wd_ring->cur_hw_res -= cnt;
2823 			else
2824 				wd_ring->cur_hw_res = 0;
2825 		}
2826 	}
2827 
2828 	return pstatus;
2829 }
2830 
2831 
2832 static enum rtw_phl_status
phl_handle_busy_wd(struct phl_info_t * phl_info,struct rtw_wd_page_ring * wd_ring,u16 hw_idx)2833 phl_handle_busy_wd(struct phl_info_t *phl_info,
2834                    struct rtw_wd_page_ring *wd_ring, u16 hw_idx)
2835 {
2836 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2837 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
2838 	void *drv_priv = phl_to_drvpriv(phl_info);
2839 	_os_list *list = &wd_ring->busy_wd_page_list;
2840 	struct rtw_wd_page *wd = NULL;
2841 	u16 bndy = (u16)hal_com->bus_cap.txbd_num;
2842 	u16 target = 0;
2843 	u16 release_num = 0;
2844 
2845 	do {
2846 		_os_spinlock(drv_priv, &wd_ring->busy_lock, _bh, NULL);
2847 
2848 		if (list_empty(list)) {
2849 			pstatus = RTW_PHL_STATUS_SUCCESS;
2850 			_os_spinunlock(drv_priv, &wd_ring->busy_lock, _bh, NULL);
2851 			break;
2852 		}
2853 
2854 		if (wd_ring->busy_wd_page_cnt > (bndy - 1)) {
2855 			release_num = wd_ring->busy_wd_page_cnt - (bndy - 1);
2856 			_os_spinunlock(drv_priv, &wd_ring->busy_lock, _bh, NULL);
2857 			pstatus = rtw_release_busy_wd_page(phl_info, wd_ring,
2858 								release_num);
2859 
2860 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
2861 				break;
2862 			else
2863 				_os_spinlock(drv_priv, &wd_ring->busy_lock, _bh, NULL);
2864 		}
2865 
2866 		wd = list_first_entry(list, struct rtw_wd_page, list);
2867 		target = wd->host_idx;
2868 
2869 		if (hw_idx >= target)
2870 			release_num = ((hw_idx - target) + 1) % bndy;
2871 		else
2872 			release_num = ((bndy - target) + (hw_idx + 1)) % bndy;
2873 
2874 		_os_spinunlock(drv_priv, &wd_ring->busy_lock, _bh, NULL);
2875 
2876 		pstatus = rtw_release_busy_wd_page(phl_info, wd_ring,
2877 							release_num);
2878 
2879 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2880 			break;
2881 	} while (false);
2882 
2883 	return pstatus;
2884 }
2885 
phl_recycle_busy_wd(struct phl_info_t * phl_info)2886 enum rtw_phl_status phl_recycle_busy_wd(struct phl_info_t *phl_info)
2887 {
2888 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2889 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
2890 	struct rtw_wd_page_ring *wd_ring = NULL;
2891 	u16 hw_res = 0, host_idx = 0, hw_idx = 0;
2892 	u8 ch = 0;
2893 	FUNCIN_WSTS(pstatus);
2894 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
2895 
2896 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
2897 		hw_res = rtw_hal_tx_res_query(phl_info->hal, ch, &host_idx,
2898 							&hw_idx);
2899 		pstatus = phl_handle_busy_wd(phl_info, &wd_ring[ch], hw_idx);
2900 	}
2901 
2902 	FUNCOUT_WSTS(pstatus);
2903 	return pstatus;
2904 }
2905 
2906 static enum rtw_phl_status
phl_handle_busy_h2c(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pool,u16 hw_idx)2907 phl_handle_busy_h2c(struct phl_info_t *phl_info,
2908 			struct phl_h2c_pkt_pool *h2c_pool, u16 hw_idx)
2909 {
2910 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2911 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
2912 	void *drv_priv = phl_to_drvpriv(phl_info);
2913 	struct phl_queue *queue = &h2c_pool->busy_h2c_pkt_list;
2914 	_os_list *list = &h2c_pool->busy_h2c_pkt_list.queue;
2915 	struct rtw_h2c_pkt *h2c = NULL;
2916 	u16 bndy = (u16)hal_com->bus_cap.txbd_num;
2917 	u16 target = 0;
2918 	u16 release_num = 0;
2919 	u16 tmp_cnt = 0;
2920 
2921 	do {
2922 		_os_spinlock(drv_priv, &queue->lock, _bh, NULL);
2923 
2924 		if (list_empty(list)) {
2925 			pstatus = RTW_PHL_STATUS_SUCCESS;
2926 
2927 			_os_spinunlock(drv_priv, &queue->lock, _bh, NULL);
2928 			break;
2929 		}
2930 
2931 		tmp_cnt = (u16)queue->cnt;
2932 		if (tmp_cnt > (bndy - 1)) {
2933 			release_num = tmp_cnt - (bndy - 1);
2934 			_os_spinunlock(drv_priv, &queue->lock, _bh, NULL);
2935 			pstatus = phl_release_busy_h2c_pkt(phl_info, h2c_pool,
2936 							release_num);
2937 
2938 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
2939 				break;
2940 			else
2941 				_os_spinlock(drv_priv, &queue->lock, _bh, NULL);
2942 		}
2943 
2944 		h2c = list_first_entry(list, struct rtw_h2c_pkt, list);
2945 		target = h2c->host_idx;
2946 
2947 		if (hw_idx >= target)
2948 			release_num = ((hw_idx - target) + 1) % bndy;
2949 		else
2950 			release_num = ((bndy - target) + (hw_idx + 1)) % bndy;
2951 
2952 		_os_spinunlock(drv_priv, &queue->lock, _bh, NULL);
2953 
2954 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : release_num %d.\n", __func__, release_num);
2955 
2956 		pstatus = phl_release_busy_h2c_pkt(phl_info, h2c_pool,
2957 							release_num);
2958 
2959 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
2960 			break;
2961 	} while (false);
2962 
2963 	return pstatus;
2964 }
2965 
phl_recycle_busy_h2c(struct phl_info_t * phl_info)2966 enum rtw_phl_status phl_recycle_busy_h2c(struct phl_info_t *phl_info)
2967 {
2968 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2969 	struct phl_h2c_pkt_pool *h2c_pool = NULL;
2970 	u16 hw_res = 0, host_idx = 0, hw_idx = 0;
2971 	u8 fwcmd_queue_idx = 0;
2972 
2973 	FUNCIN_WSTS(pstatus);
2974 	h2c_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
2975 	_os_spinlock(phl_to_drvpriv(phl_info), &h2c_pool->recycle_lock, _bh, NULL);
2976 	fwcmd_queue_idx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
2977 
2978 	hw_res = rtw_hal_tx_res_query(phl_info->hal, fwcmd_queue_idx, &host_idx,
2979 						&hw_idx);
2980 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : host idx %d, hw_idx %d.\n",
2981 			  __func__, host_idx, hw_idx);
2982 	pstatus = phl_handle_busy_h2c(phl_info, h2c_pool, hw_idx);
2983 	_os_spinunlock(phl_to_drvpriv(phl_info), &h2c_pool->recycle_lock, _bh, NULL);
2984 	FUNCOUT_WSTS(pstatus);
2985 	return pstatus;
2986 }
2987 
phl_tx_pcie(struct phl_info_t * phl_info)2988 static enum rtw_phl_status phl_tx_pcie(struct phl_info_t *phl_info)
2989 {
2990 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
2991 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
2992 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
2993 	struct rtw_wd_page_ring *wd_ring = NULL;
2994 	u16 hw_res = 0, host_idx = 0, hw_idx = 0, txcnt = 0;
2995 	u8 ch = 0;
2996 	FUNCIN_WSTS(pstatus);
2997 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
2998 
2999 	for (ch = 0; ch < hci_info->total_txch_num; ch++) {
3000 #ifndef RTW_WKARD_WIN_TRX_BALANCE
3001 		/* if wd_ring is empty, do not read hw_idx for saving cpu cycle */
3002 		if (wd_ring[ch].pending_wd_page_cnt == 0 && wd_ring[ch].busy_wd_page_cnt == 0){
3003 			pstatus = RTW_PHL_STATUS_SUCCESS;
3004 			continue;
3005 		}
3006 #endif
3007 		/* hana_todo skip fwcmd queue */
3008 		if (wd_ring[ch].cur_hw_res < hal_com->bus_cap.read_txbd_th ||
3009 		    wd_ring[ch].pending_wd_page_cnt > wd_ring[ch].cur_hw_res) {
3010 			hw_res = rtw_hal_tx_res_query(phl_info->hal, ch, &host_idx,
3011 			                              &hw_idx);
3012 			wd_ring[ch].cur_hw_res = hw_res;
3013 			pstatus = phl_handle_busy_wd(phl_info, &wd_ring[ch], hw_idx);
3014 
3015 			if (RTW_PHL_STATUS_FAILURE == pstatus)
3016 				continue;
3017 		} else {
3018 			hw_res = wd_ring[ch].cur_hw_res;
3019 		}
3020 
3021 		if (list_empty(&wd_ring[ch].pending_wd_page_list))
3022 			continue;
3023 
3024 		if (0 == hw_res) {
3025 			continue;
3026 
3027 		} else {
3028 			txcnt = (hw_res < wd_ring[ch].pending_wd_page_cnt) ?
3029 				hw_res : wd_ring[ch].pending_wd_page_cnt;
3030 
3031 			pstatus = phl_handle_pending_wd(phl_info, &wd_ring[ch],
3032 							txcnt, ch);
3033 
3034 			if (RTW_PHL_STATUS_SUCCESS != pstatus)
3035 				continue;
3036 		}
3037 	}
3038 	FUNCOUT_WSTS(pstatus);
3039 	return pstatus;
3040 }
3041 
3042 
_phl_refill_rxbd(struct phl_info_t * phl_info,void * rx_buf_ring,struct rx_base_desc * rxbd,u8 ch,u16 refill_cnt)3043 enum rtw_phl_status _phl_refill_rxbd(struct phl_info_t *phl_info,
3044 					void* rx_buf_ring,
3045 					struct rx_base_desc *rxbd,
3046 					u8 ch, u16 refill_cnt)
3047 {
3048 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3049 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
3050 	struct rtw_rx_buf *rxbuf = NULL;
3051 	u16 cnt = 0;
3052 
3053 	for (cnt = 0; cnt < refill_cnt; cnt++) {
3054 		rxbuf = query_idle_rx_buf(phl_info, rx_buf_ring);
3055 		if (NULL == rxbuf) {
3056 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3057 				"[WARNING] there is no resource for rx bd refill setting\n");
3058 			pstatus = RTW_PHL_STATUS_RESOURCE;
3059 			break;
3060 		}
3061 		hstatus = rtw_hal_update_rxbd(phl_info->hal, rxbd, rxbuf);
3062 		if (RTW_HAL_STATUS_SUCCESS != hstatus) {
3063 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3064 				"[WARNING] update rxbd fail\n");
3065 			pstatus = RTW_PHL_STATUS_FAILURE;
3066 			break;
3067 		}
3068 		enqueue_busy_rx_buf(phl_info, rx_buf_ring, rxbuf, _tail);
3069 		pstatus = RTW_PHL_STATUS_SUCCESS;
3070 	}
3071 
3072 	/* hana_todo */
3073 	/* wmb(); */
3074 
3075 	if (cnt) {
3076 		hstatus = rtw_hal_notify_rxdone(phl_info->hal, rxbd, ch, cnt);
3077 		if (RTW_HAL_STATUS_SUCCESS != hstatus) {
3078 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3079 				"[WARNING] notify rxdone fail\n");
3080 			pstatus = RTW_PHL_STATUS_FAILURE;
3081 		}
3082 	}
3083 	return pstatus;
3084 }
3085 
phl_get_single_rx(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring,u8 ch,struct rtw_phl_rx_pkt ** pphl_rx)3086 enum rtw_phl_status phl_get_single_rx(struct phl_info_t *phl_info,
3087 					 struct rtw_rx_buf_ring *rx_buf_ring,
3088 					 u8 ch,
3089 					 struct rtw_phl_rx_pkt **pphl_rx)
3090 {
3091 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3092 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
3093 	struct rtw_phl_rx_pkt *phl_rx = NULL;
3094 	struct rtw_rx_buf *rxbuf = NULL;
3095 	struct hal_spec_t *hal_spec = phl_get_ic_spec(phl_info->phl_com);
3096 	u16 buf_size = 0;
3097 
3098 	phl_rx = rtw_phl_query_phl_rx(phl_info);
3099 	rxbuf = query_busy_rx_buf(phl_info, rx_buf_ring);
3100 
3101 	do {
3102 		if (NULL == phl_rx) {
3103 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "%s(%d) phl_rx out of resource\n",
3104 				__func__, __LINE__);
3105 				pstatus = RTW_PHL_STATUS_FAILURE;
3106 			break;
3107 		}
3108 		if (NULL == rxbuf) {
3109 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "%s(%d) [WARNING] queried NULL rxbuf\n",
3110 				__func__, __LINE__);
3111 				pstatus = RTW_PHL_STATUS_FAILURE;
3112 			break;
3113 		}
3114 
3115 		phl_rx->rxbuf_ptr = (u8 *)rxbuf;
3116 
3117 		if(rxbuf->cache == VIRTUAL_ADDR){
3118 		_os_cache_inv(phl_to_drvpriv(phl_info),
3119 			(_dma *)&rxbuf->phy_addr_l,
3120 			(_dma *)&rxbuf->phy_addr_h,
3121 			hal_spec->rx_bd_info_sz,
3122 			PCI_DMA_FROMDEVICE);
3123 		}
3124 		if (true != rtw_hal_check_rxrdy(phl_info->phl_com,
3125 						phl_info->hal,
3126 						rxbuf->vir_addr,
3127 						ch)) {
3128 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "RX:%s(%d) packet not ready\n",
3129 					__func__, __LINE__);
3130 				pstatus = RTW_PHL_STATUS_FAILURE;
3131 #ifdef RTW_WKARD_98D_RXTAG
3132 			phl_rx->rxbuf_ptr = (u8 *)rxbuf;
3133 			goto drop;
3134 #endif
3135 			break;
3136 		}
3137 
3138 		if (true != rtw_hal_handle_rxbd_info(phl_info->hal,
3139 								rxbuf->vir_addr,
3140 								&buf_size))
3141 			goto drop;
3142 
3143 #ifdef CONFIG_DYNAMIC_RX_BUF
3144 		phl_rx->r.os_priv = rxbuf->os_priv;
3145 #endif
3146 
3147 		/* hana_todo handle rx amsdu */
3148 		if(rxbuf->cache == VIRTUAL_ADDR){
3149 		_os_cache_inv(phl_to_drvpriv(phl_info),
3150 			(_dma *)&rxbuf->phy_addr_l,
3151 			(_dma *)&rxbuf->phy_addr_h,
3152 				buf_size,
3153 			PCI_DMA_FROMDEVICE);
3154 		}
3155 
3156 		rxbuf->hw_write_size = buf_size;
3157 		hstatus = rtw_hal_handle_rx_buffer(phl_info->phl_com,
3158 							phl_info->hal,
3159 							rxbuf->vir_addr,
3160 							buf_size, phl_rx);
3161 
3162 		if (RTW_HAL_STATUS_SUCCESS != hstatus)
3163 			goto drop;
3164 
3165 		pstatus = RTW_PHL_STATUS_SUCCESS;
3166 
3167 	} while (false);
3168 
3169 	if (RTW_PHL_STATUS_SUCCESS != pstatus) {
3170 		/* hana_todo cache validate api */
3171 		if (NULL != rxbuf) {
3172 			enqueue_busy_rx_buf(phl_info, rx_buf_ring, rxbuf, _first);
3173 		}
3174 
3175 		if (NULL != phl_rx) {
3176 			phl_release_phl_rx(phl_info, phl_rx);
3177 			phl_rx = NULL;
3178 		}
3179 	}
3180 	*pphl_rx = phl_rx;
3181 
3182 	return pstatus;
3183 
3184 drop:
3185 #ifdef CONFIG_DYNAMIC_RX_BUF
3186 	/* avoid re-allocating buffer carried on rxbuf */
3187 	phl_rx->type = RTW_RX_TYPE_MAX;
3188 #endif
3189 	phl_rx->r.mdata.dma_ch = ch;
3190 	phl_recycle_rx_buf(phl_info, phl_rx);
3191 
3192 	return RTW_PHL_STATUS_FRAME_DROP;
3193 }
3194 
3195 #define PHL_RX_HEADROOM 0
phl_rx_handle_normal(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)3196 void phl_rx_handle_normal(struct phl_info_t *phl_info,
3197 						 struct rtw_phl_rx_pkt *phl_rx)
3198 {
3199 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3200 	_os_list frames;
3201 #ifdef CONFIG_DYNAMIC_RX_BUF
3202 	struct rtw_rx_buf *rxbuf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
3203 #endif
3204 	FUNCIN_WSTS(pstatus);
3205 	INIT_LIST_HEAD(&frames);
3206 
3207 	/* unmap rx buffer */
3208 #ifdef CONFIG_DYNAMIC_RX_BUF
3209 	_os_pkt_buf_unmap_rx(phl_to_drvpriv(phl_info), rxbuf->phy_addr_l,
3210 		rxbuf->phy_addr_h, RX_BUF_SIZE+PHL_RX_HEADROOM);
3211 #endif
3212 
3213 	/* stat : rx rate counter */
3214 	if (phl_rx->r.mdata.rx_rate <= RTW_DATA_RATE_HE_NSS4_MCS11)
3215 		phl_info->phl_com->phl_stats.rx_rate_nmr[phl_rx->r.mdata.rx_rate]++;
3216 
3217 	pstatus = phl_rx_reorder(phl_info, phl_rx, &frames);
3218 	if (pstatus == RTW_PHL_STATUS_SUCCESS)
3219 		phl_handle_rx_frame_list(phl_info, &frames);
3220 	else
3221 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]handle normal rx error (0x%08X)!\n", pstatus);
3222 
3223 	FUNCOUT_WSTS(pstatus);
3224 }
3225 
_phl_wp_rpt_statistics(struct phl_info_t * phl_info,u8 ch,u16 wp_seq,u8 txsts,struct rtw_xmit_req * treq)3226 void _phl_wp_rpt_statistics(struct phl_info_t *phl_info, u8 ch, u16 wp_seq,
3227 			    u8 txsts, struct rtw_xmit_req *treq)
3228 {
3229 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
3230 	struct rtw_wp_rpt_stats *rpt_stats = NULL;
3231 	u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
3232 
3233 	rpt_stats = (struct rtw_wp_rpt_stats *)hal_com->trx_stat.wp_rpt_stats;
3234 
3235 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3236 		  "recycle tx req(%p) in ch(%d) with wp_seq(0x%x) and wifi seq(%d)!\n",
3237 		  treq, ch, wp_seq, treq->mdata.sw_seq);
3238 
3239 #ifdef CONFIG_PHL_TX_DBG
3240 	if (treq->tx_dbg.en_dbg) {
3241 		treq->tx_dbg.recycle_wd_t = _os_get_cur_time_us();
3242 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "tx dbg rpt: macid(%02d), tx_dbg_pkt_type(%02d), txsts(%d), sw_seq(%04d), total tx time(%08d) us\n",
3243 			treq->mdata.macid, treq->tx_dbg.tx_dbg_pkt_type, txsts,
3244 			treq->mdata.sw_seq, phl_get_passing_time_us(
3245 			treq->tx_dbg.core_add_tx_t));
3246 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "tx dbg rpt: core_add_tx_t(0x%08x), enq_pending_wd_t(0x%08x), recycle_wd_t(0x%08x)\n",
3247 			treq->tx_dbg.core_add_tx_t,
3248 			treq->tx_dbg.enq_pending_wd_t,
3249 			treq->tx_dbg.recycle_wd_t);
3250 
3251 		if(TX_STATUS_TX_DONE != txsts) {
3252 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "tx dbg rpt: tx fail(%d)\n", txsts);
3253 			if(NULL != treq->tx_dbg.statecb) {
3254 				treq->tx_dbg.statecb(phl_to_drvpriv(phl_info), treq->tx_dbg.pctx, false);
3255 			}
3256 		} else {
3257 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "tx dbg rpt: tx done(%d)\n", txsts);
3258 			if(NULL != treq->tx_dbg.statecb) {
3259 				treq->tx_dbg.statecb(phl_to_drvpriv(phl_info), treq->tx_dbg.pctx, true);
3260 			}
3261 		}
3262 	}
3263 #endif /* CONFIG_PHL_TX_DBG */
3264 
3265 	if (cur_time >= treq->tx_time)
3266 		diff_t = cur_time - treq->tx_time;
3267 	else
3268 		diff_t = RTW_U32_MAX - treq->tx_time + cur_time;
3269 
3270 	if (diff_t > WP_DELAY_THRES_MS) {
3271 		if (TX_STATUS_TX_DONE == txsts)
3272 			rpt_stats[ch].delay_tx_ok_cnt++;
3273 		else if (TX_STATUS_TX_FAIL_REACH_RTY_LMT == txsts)
3274 			rpt_stats[ch].delay_rty_fail_cnt++;
3275 		else if (TX_STATUS_TX_FAIL_LIFETIME_DROP == txsts)
3276 			rpt_stats[ch].delay_lifetime_drop_cnt++;
3277 		else if (TX_STATUS_TX_FAIL_MACID_DROP == txsts)
3278 			rpt_stats[ch].delay_macid_drop_cnt++;
3279 	} else {
3280 		if (TX_STATUS_TX_DONE == txsts)
3281 			rpt_stats[ch].tx_ok_cnt++;
3282 		else if (TX_STATUS_TX_FAIL_REACH_RTY_LMT == txsts)
3283 			rpt_stats[ch].rty_fail_cnt++;
3284 		else if (TX_STATUS_TX_FAIL_LIFETIME_DROP == txsts)
3285 			rpt_stats[ch].lifetime_drop_cnt++;
3286 		else if (TX_STATUS_TX_FAIL_MACID_DROP == txsts)
3287 			rpt_stats[ch].macid_drop_cnt++;
3288 	}
3289 }
3290 
3291 
_phl_wp_rpt_chk_txsts(struct phl_info_t * phl_info,u8 ch,u16 wp_seq,u8 txsts,struct rtw_xmit_req * treq)3292 void _phl_wp_rpt_chk_txsts(struct phl_info_t *phl_info, u8 ch, u16 wp_seq,
3293 			    u8 txsts, struct rtw_xmit_req *treq)
3294 {
3295 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
3296 	struct rtw_wp_rpt_stats *rpt_stats = NULL;
3297 	struct rtw_pkt_buf_list *pkt_buf = (struct rtw_pkt_buf_list *)treq->pkt_list;
3298 	int i;
3299 
3300 	rpt_stats = (struct rtw_wp_rpt_stats *)hal_com->trx_stat.wp_rpt_stats;
3301 
3302 	if(TX_STATUS_TX_DONE != txsts) {
3303 		if (TX_STATUS_TX_FAIL_REACH_RTY_LMT == txsts) {
3304 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3305 				"this wp is tx fail (REACH_RTY_LMT): wp(%p), ch(%d), wp_seq(0x%x), macid(%d), Sw SN(%d), tid(%d), Rty_lmt_en/cnt(%d/%d)!\n",
3306 				treq, ch, wp_seq, treq->mdata.macid,
3307 				treq->mdata.sw_seq, treq->mdata.tid,
3308 				treq->mdata.data_tx_cnt_lmt_en,
3309 				treq->mdata.data_tx_cnt_lmt);
3310 		} else if (TX_STATUS_TX_FAIL_LIFETIME_DROP == txsts) {
3311 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3312 				"this wp is tx fail (LIFETIME_DROP): wp(%p), ch(%d), wp_seq(0x%x), macid(%d), Sw SN(%d), tid(%d), Rty_lmt_en/cnt(%d/%d)!\n",
3313 				treq, ch, wp_seq, treq->mdata.macid,
3314 				treq->mdata.sw_seq, treq->mdata.tid,
3315 				treq->mdata.data_tx_cnt_lmt_en,
3316 				treq->mdata.data_tx_cnt_lmt);
3317 		} else if (TX_STATUS_TX_FAIL_MACID_DROP == txsts) {
3318 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3319 				"this wp is tx fail (MACID_DROP): wp(%p), ch(%d), wp_seq(0x%x), macid(%d), Sw SN(%d), tid(%d), Rty_lmt_en/cnt(%d/%d)!\n",
3320 				treq, ch, wp_seq, treq->mdata.macid,
3321 				treq->mdata.sw_seq, treq->mdata.tid,
3322 				treq->mdata.data_tx_cnt_lmt_en,
3323 				treq->mdata.data_tx_cnt_lmt);
3324 		} else if(TX_STATUS_TX_FAIL_SW_DROP == txsts) {
3325 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3326 				"this wp is tx fail (SW_DROP): wp(%p), ch(%d), wp_seq(0x%x), macid(%d), Sw SN(%d), tid(%d), Rty_lmt_en/cnt(%d/%d)!\n",
3327 				treq, ch, wp_seq, treq->mdata.macid,
3328 				treq->mdata.sw_seq, treq->mdata.tid,
3329 				treq->mdata.data_tx_cnt_lmt_en,
3330 				treq->mdata.data_tx_cnt_lmt);
3331 		} else {
3332 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
3333 				"this wp is tx fail (UNKNOWN(%d)): wp(%p), ch(%d), wp_seq(0x%x), macid(%d), Sw SN(%d), tid(%d), Rty_lmt_en/cnt(%d/%d)!\n",
3334 				txsts, treq, ch, wp_seq, treq->mdata.macid,
3335 				treq->mdata.sw_seq, treq->mdata.tid,
3336 				treq->mdata.data_tx_cnt_lmt_en,
3337 				treq->mdata.data_tx_cnt_lmt);
3338 		}
3339 
3340 		/* dump tx fail mac hdr */
3341 		if(MAC_HDR_LEN <= pkt_buf[0].length) {
3342 			PHL_DATA(COMP_PHL_XMIT, _PHL_INFO_, "=== Dump Tx MAC HDR ===");
3343 			for (i = 0; i < MAC_HDR_LEN; i++) {
3344 				if (!(i % 8))
3345 					PHL_DATA(COMP_PHL_XMIT, _PHL_INFO_, "\n");
3346 				PHL_DATA(COMP_PHL_XMIT, _PHL_INFO_, "%02X ", pkt_buf[0].vir_addr[i]);
3347 			}
3348 			PHL_DATA(COMP_PHL_XMIT, _PHL_INFO_, "\n");
3349 		}
3350 	}
3351 
3352 	if (treq->txfb) {
3353 		treq->txfb->txsts = txsts;
3354 		if (treq->txfb->txfb_cb)
3355 			treq->txfb->txfb_cb(treq->txfb);
3356 	}
3357 }
3358 
phl_recycle_payload(struct phl_info_t * phl_info,u8 dma_ch,u16 wp_seq,u8 txsts)3359 void phl_recycle_payload(struct phl_info_t *phl_info, u8 dma_ch, u16 wp_seq,
3360 			 u8 txsts)
3361 {
3362 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
3363 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
3364 	struct rtw_wp_rpt_stats *rpt_stats =
3365 		(struct rtw_wp_rpt_stats *)hal_com->trx_stat.wp_rpt_stats;
3366 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
3367 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
3368 	struct rtw_wd_page_ring *wd_ring = NULL;
3369 	struct rtw_xmit_req *treq = NULL;
3370 	u16 macid = 0;
3371 
3372 	wd_ring = (struct rtw_wd_page_ring *)hci_info->wd_ring;
3373 	treq = (struct rtw_xmit_req *)wd_ring[dma_ch].wp_tag[wp_seq].ptr;
3374 
3375 	if (NULL == treq)
3376 		goto end;
3377 
3378 	macid = treq->mdata.macid;
3379 
3380 	_phl_wp_rpt_statistics(phl_info, dma_ch, wp_seq, txsts, treq);
3381 	_phl_wp_rpt_chk_txsts(phl_info, dma_ch, wp_seq, txsts, treq);
3382 
3383 	if (RTW_PHL_TREQ_TYPE_TEST_PATTERN == treq->treq_type) {
3384 		if (NULL == ops->tx_test_recycle)
3385 			goto end;
3386 		PHL_INFO("call tx_test_recycle\n");
3387 		sts = ops->tx_test_recycle(phl_info, treq);
3388 	} else if (RTW_PHL_TREQ_TYPE_NORMAL == treq->treq_type
3389 #if defined(CONFIG_CORE_TXSC) || defined(CONFIG_PHL_TXSC)
3390 		   || RTW_PHL_TREQ_TYPE_CORE_TXSC == treq->treq_type
3391 		   || RTW_PHL_TREQ_TYPE_PHL_ADD_TXSC == treq->treq_type
3392 #endif
3393 	) {
3394 		if (NULL == ops->tx_recycle)
3395 			goto end;
3396 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "call tx_recycle\n");
3397 		sts = ops->tx_recycle(phl_to_drvpriv(phl_info), treq);
3398 	}
3399 
3400 end:
3401 	if (RTW_PHL_STATUS_SUCCESS != sts) {
3402 		PHL_WARN("tx recycle fail\n");
3403 		rpt_stats[dma_ch].recycle_fail_cnt++;
3404 	} else {
3405 		_os_spinlock(phl_to_drvpriv(phl_info),
3406 			     &wd_ring[dma_ch].wp_tag_lock,
3407 			     _bh, NULL);
3408 		wd_ring[dma_ch].wp_tag[wp_seq].ptr = NULL;
3409 		rpt_stats[dma_ch].busy_cnt--;
3410 
3411 #ifdef RTW_WKARD_DYNAMIC_LTR
3412 		if (true ==
3413 		    _phl_judge_idle_ltr_switching_conditions(phl_info, macid))
3414 			_phl_switch_idle_ltr(phl_info, rpt_stats);
3415 #endif
3416 		_os_spinunlock(phl_to_drvpriv(phl_info),
3417 			       &wd_ring[dma_ch].wp_tag_lock,
3418 			       _bh, NULL);
3419 	}
3420 	/* phl_indic_pkt_complete(phl_info); */
3421 }
3422 
3423 /**
3424  * Use this function to check if this report is valid
3425  * @wp_seq: the wp sequence parsing from target report
3426  * @phl_rx: rx structure including the starting vir_addr of whole rxbuf
3427  * return true if this recycle report is valid
3428  * otherwise, execute rx buffer debug dump and return false
3429  */
_phl_chk_wp_report(u16 wp_seq,struct rtw_phl_rx_pkt * phl_rx)3430 bool _phl_chk_wp_report(u16 wp_seq, struct rtw_phl_rx_pkt *phl_rx)
3431 {
3432 	bool ret = false;
3433 	u32 i = 0, len = 0;
3434 	struct rtw_rx_buf *rxbuf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
3435 
3436 	if (wp_seq < WP_MAX_SEQ_NUMBER) {
3437 		ret = true;
3438 	} else {
3439 		ret = false;
3440 
3441 		PHL_WARN("ERROR Recycle Report!! get recycle report with invalid wp_seq(%d), skip this offest and get next offset\n",
3442 		         wp_seq);
3443 		PHL_DATA(COMP_PHL_DBG, _PHL_INFO_, "=== Dump ERROR Recycle Report ===");
3444 		len = (rxbuf->hw_write_size > 0x200) ?
3445 			0x200 : rxbuf->hw_write_size;
3446 		for (i = 0; i < len; i++) {
3447 			if (!(i % 8))
3448 				PHL_DATA(COMP_PHL_DBG, _PHL_INFO_, "\n");
3449 			PHL_DATA(COMP_PHL_DBG, _PHL_INFO_, "%02X ",
3450 			         rxbuf->vir_addr[i]);
3451 		}
3452 		PHL_DATA(COMP_PHL_DBG, _PHL_INFO_, "\n");
3453 	}
3454 
3455 	return ret;
3456 }
3457 
_phl_rx_handle_wp_report(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)3458 void _phl_rx_handle_wp_report(struct phl_info_t *phl_info,
3459                               struct rtw_phl_rx_pkt *phl_rx)
3460 {
3461 	struct rtw_recv_pkt *r = &phl_rx->r;
3462 	u8 *pkt = NULL;
3463 	u16 pkt_len = 0;
3464 	u16 wp_seq = 0, rsize = 0;
3465 	u8 sw_retry = 0, dma_ch = 0, txsts = 0;
3466 	u8 macid = 0, ac_queue = 0;
3467 
3468 	pkt = r->pkt_list[0].vir_addr;
3469 	pkt_len = r->pkt_list[0].length;
3470 
3471 	while (pkt_len > 0) {
3472 		rsize = rtw_hal_handle_wp_rpt(phl_info->hal, pkt, pkt_len,
3473 					      &sw_retry, &dma_ch, &wp_seq,
3474 					      &macid, &ac_queue, &txsts);
3475 
3476 		if (0 == rsize)
3477 			break;
3478 
3479 		if (false == _phl_chk_wp_report(wp_seq, phl_rx)) {
3480 			pkt += rsize;
3481 			pkt_len -= rsize;
3482 			continue;
3483 		}
3484 
3485 		#ifdef CONFIG_PHL_RELEASE_RPT_ENABLE
3486 		phl_rx_wp_report_record_sts(phl_info, macid, ac_queue, txsts);
3487 		#endif
3488 
3489 		if (false == sw_retry) {
3490 			phl_recycle_payload(phl_info, dma_ch, wp_seq, txsts);
3491 		} else {
3492 			/* hana_todo handle sw retry */
3493 			phl_recycle_payload(phl_info, dma_ch, wp_seq, txsts);
3494 		}
3495 		pkt += rsize;
3496 		pkt_len -= rsize;
3497 	}
3498 }
3499 
3500 
phl_rx_process_pcie(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)3501 static void phl_rx_process_pcie(struct phl_info_t *phl_info,
3502 							struct rtw_phl_rx_pkt *phl_rx)
3503 {
3504 	switch (phl_rx->type) {
3505 	case RTW_RX_TYPE_WIFI:
3506 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
3507 		if (false == phl_rx_proc_wait_phy_sts(phl_info, phl_rx)) {
3508 			PHL_TRACE(COMP_PHL_PSTS, _PHL_DEBUG_,
3509 				  "phl_rx_proc_wait_phy_sts() return false \n");
3510 			phl_rx_handle_normal(phl_info, phl_rx);
3511 		}
3512 #else
3513 		phl_rx_handle_normal(phl_info, phl_rx);
3514 #endif
3515 		break;
3516 	case RTW_RX_TYPE_TX_WP_RELEASE_HOST:
3517 		_phl_rx_handle_wp_report(phl_info, phl_rx);
3518 		phl_recycle_rx_buf(phl_info, phl_rx);
3519 		break;
3520 	case RTW_RX_TYPE_PPDU_STATUS:
3521 		phl_rx_proc_ppdu_sts(phl_info, phl_rx);
3522 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
3523 		phl_rx_proc_phy_sts(phl_info, phl_rx);
3524 #endif
3525 		phl_recycle_rx_buf(phl_info, phl_rx);
3526 		break;
3527 	case RTW_RX_TYPE_C2H:
3528 		phl_recycle_rx_buf(phl_info, phl_rx);
3529 		break;
3530 	case RTW_RX_TYPE_CHANNEL_INFO:
3531 	case RTW_RX_TYPE_TX_RPT:
3532 	case RTW_RX_TYPE_DFS_RPT:
3533 	case RTW_RX_TYPE_MAX:
3534 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "phl_rx_process_pcie(): Unsupported case:%d, please check it\n",
3535 				phl_rx->type);
3536 		phl_recycle_rx_buf(phl_info, phl_rx);
3537 		break;
3538 	default :
3539 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] unrecognize rx type\n");
3540 		phl_recycle_rx_buf(phl_info, phl_rx);
3541 		break;
3542 	}
3543 }
3544 
_phl_get_idle_rxbuf_cnt(struct phl_info_t * phl_info,struct rtw_rx_buf_ring * rx_buf_ring)3545 static u16 _phl_get_idle_rxbuf_cnt(struct phl_info_t *phl_info,
3546 					struct rtw_rx_buf_ring *rx_buf_ring)
3547 {
3548 	return rx_buf_ring->idle_rxbuf_cnt;
3549 }
3550 
phl_rx_pcie(struct phl_info_t * phl_info)3551 static enum rtw_phl_status phl_rx_pcie(struct phl_info_t *phl_info)
3552 {
3553 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3554 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
3555 	struct rtw_hal_com_t *hal_com = rtw_hal_get_halcom(phl_info->hal);
3556 	struct rtw_rx_buf_ring *rx_buf_ring = NULL;
3557 	struct rx_base_desc *rxbd = NULL;
3558 	struct rtw_phl_rx_pkt *phl_rx = NULL;
3559 	u16 i = 0, rxcnt = 0, idle_rxbuf_cnt = 0;
3560 	u8 ch = 0;
3561 
3562 	FUNCIN_WSTS(pstatus);
3563 
3564 	rx_buf_ring = (struct rtw_rx_buf_ring *)hci_info->rxbuf_pool;
3565 	rxbd = (struct rx_base_desc *)hci_info->rxbd_buf;
3566 
3567 	for (ch = 0; ch < hci_info->total_rxch_num; ch++) {
3568 		rxcnt = phl_calc_avail_rptr(rxbd[ch].host_idx, rxbd[ch].hw_idx,
3569 		                            (u16)hal_com->bus_cap.rxbd_num);
3570 		if (rxcnt == 0) {
3571 			PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,
3572 				"no avail hw rx\n");
3573 			pstatus = RTW_PHL_STATUS_SUCCESS;
3574 			continue;
3575 		}
3576 
3577 		idle_rxbuf_cnt = _phl_get_idle_rxbuf_cnt(phl_info,
3578 							 &rx_buf_ring[ch]);
3579 
3580 		if (idle_rxbuf_cnt == 0) {
3581 			PHL_WARN("%s, idle rxbuf is empty. (ch = %d)\n",
3582 				 __func__, ch);
3583 			phl_dump_all_sta_rx_info(phl_info);
3584 			PHL_INFO("phl_rx_ring stored rx number = %d\n",
3585 				 rtw_phl_query_new_rx_num(phl_info));
3586 #ifdef PHL_RX_BATCH_IND
3587 			if (ch == 0)
3588 				_phl_indic_new_rxpkt(phl_info);
3589 #endif
3590 			pstatus = RTW_PHL_STATUS_SUCCESS;
3591 			continue;
3592 		}
3593 
3594 		/* only handle affordable amount of rxpkt */
3595 		if (rxcnt > idle_rxbuf_cnt) {
3596 			PHL_WARN("rxcnt %d is lager than idle rxbuf cnt %d.\n", rxcnt, idle_rxbuf_cnt);
3597 			rxcnt = idle_rxbuf_cnt;
3598 		}
3599 
3600 		for (i = 0; i < rxcnt; i++) {
3601 			pstatus = phl_get_single_rx(phl_info, &rx_buf_ring[ch],
3602 							ch, &phl_rx);
3603 			if (RTW_PHL_STATUS_FRAME_DROP == pstatus)
3604 				continue;
3605 			if (NULL == phl_rx) {
3606 				rxcnt = i;
3607 				break;
3608 			}
3609 
3610 			/* hana_todo */
3611 			phl_rx->r.mdata.dma_ch = ch;
3612 			phl_rx_process_pcie(phl_info, phl_rx);
3613 		}
3614 
3615 #ifdef PHL_RX_BATCH_IND
3616 		if (ch == 0 && phl_info->rx_new_pending)
3617 			_phl_indic_new_rxpkt(phl_info);
3618 #endif
3619 
3620 		pstatus = _phl_refill_rxbd(phl_info, &rx_buf_ring[ch],
3621 							&rxbd[ch], ch, rxcnt);
3622 
3623 		if (RTW_PHL_STATUS_RESOURCE == pstatus)
3624 			PHL_WARN("%s, rxcnt is not refilled %d.\n", __func__ , rxcnt);
3625 
3626 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
3627 			continue;
3628 	}
3629 
3630 	FUNCOUT_WSTS(pstatus);
3631 
3632 	return pstatus;
3633 }
3634 
phl_pltfm_tx_pcie(struct phl_info_t * phl_info,void * pkt)3635 enum rtw_phl_status phl_pltfm_tx_pcie(struct phl_info_t *phl_info, void *pkt)
3636 {
3637 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3638 	enum rtw_hal_status hstatus = RTW_HAL_STATUS_FAILURE;
3639 	struct rtw_h2c_pkt *h2c_pkt = (struct rtw_h2c_pkt *)pkt;
3640 	struct tx_base_desc *txbd = NULL;
3641 	struct phl_h2c_pkt_pool *h2c_pool = NULL;
3642 	struct rtw_wd_page wd;
3643 	u8 fwcmd_queue_idx = 0;
3644 
3645 	txbd = (struct tx_base_desc *)phl_info->hci->txbd_buf;
3646 	h2c_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
3647 
3648 	_os_mem_set(phl_to_drvpriv(phl_info), &wd, 0, sizeof(wd));
3649 	/* fowart h2c pkt information into the format of wd page */
3650 	wd.phy_addr_l = h2c_pkt->phy_addr_l;
3651 	wd.phy_addr_h= h2c_pkt->phy_addr_h;
3652 	wd.buf_len = h2c_pkt->data_len;
3653 	wd.ls = 1;
3654 
3655 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : wd.phy_addr_l %x, wd.phy_addr_h %x\n", __func__ , wd.phy_addr_l, wd.phy_addr_h);
3656 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : buf_len %x.\n", __func__ , wd.buf_len);
3657 
3658 	fwcmd_queue_idx = rtw_hal_get_fwcmd_queue_idx(phl_info->hal);
3659 
3660 	_os_spinlock(phl_to_drvpriv(phl_info), &txbd[fwcmd_queue_idx].txbd_lock, _bh, NULL);
3661 	hstatus = rtw_hal_update_txbd(phl_info->hal, txbd, &wd, fwcmd_queue_idx, 1);
3662 	_os_spinunlock(phl_to_drvpriv(phl_info), &txbd[fwcmd_queue_idx].txbd_lock, _bh, NULL);
3663 
3664 	h2c_pkt->host_idx = wd.host_idx;
3665 
3666 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : h2c_pkt->host_idx %d.\n", __func__, h2c_pkt->host_idx);
3667 
3668 	if (RTW_HAL_STATUS_SUCCESS == hstatus)
3669 			pstatus = phl_enqueue_busy_h2c_pkt(phl_info, h2c_pkt, _tail);
3670 
3671 	if (RTW_PHL_STATUS_SUCCESS == pstatus) {
3672 		_os_spinlock(phl_to_drvpriv(phl_info), &txbd[fwcmd_queue_idx].txbd_lock, _bh, NULL);
3673 		hstatus = rtw_hal_trigger_txstart(phl_info->hal, txbd, fwcmd_queue_idx);
3674 		_os_spinunlock(phl_to_drvpriv(phl_info), &txbd[fwcmd_queue_idx].txbd_lock, _bh, NULL);
3675 		if (RTW_HAL_STATUS_SUCCESS != hstatus) {
3676 			PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING]update Tx RW ptr fail!\n");
3677 			pstatus = RTW_PHL_STATUS_FAILURE;
3678 		}
3679 	}
3680 
3681 	return pstatus;
3682 }
3683 
phl_get_txbd_buf_pcie(struct phl_info_t * phl_info)3684 void *phl_get_txbd_buf_pcie(struct phl_info_t *phl_info)
3685 {
3686 	struct hci_info_t *hci_info = phl_info->hci;
3687 
3688 	return hci_info->txbd_buf;
3689 }
3690 
phl_get_rxbd_buf_pcie(struct phl_info_t * phl_info)3691 void *phl_get_rxbd_buf_pcie(struct phl_info_t *phl_info)
3692 {
3693 	struct hci_info_t *hci_info = phl_info->hci;
3694 
3695 	return hci_info->rxbd_buf;
3696 }
3697 
phl_recycle_rx_pkt_pcie(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)3698 void phl_recycle_rx_pkt_pcie(struct phl_info_t *phl_info,
3699 				struct rtw_phl_rx_pkt *phl_rx)
3700 {
3701 #ifdef CONFIG_DYNAMIC_RX_BUF
3702 	struct rtw_rx_buf *rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
3703 
3704 	rx_buf->reuse = true;
3705 #endif
3706 
3707 	phl_recycle_rx_buf(phl_info, phl_rx);
3708 }
3709 
phl_register_trx_hdlr_pcie(struct phl_info_t * phl_info)3710 enum rtw_phl_status phl_register_trx_hdlr_pcie(struct phl_info_t *phl_info)
3711 {
3712 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3713 	struct rtw_phl_handler *tx_handler = &phl_info->phl_tx_handler;
3714 	struct rtw_phl_handler *rx_handler = &phl_info->phl_rx_handler;
3715 	void *drv_priv = phl_to_drvpriv(phl_info);
3716 
3717 	tx_handler->type = RTW_PHL_HANDLER_PRIO_HIGH; /* tasklet */
3718 	tx_handler->callback = _phl_tx_callback_pcie;
3719 	tx_handler->context = phl_info;
3720 	tx_handler->drv_priv = drv_priv;
3721 	pstatus = phl_register_handler(phl_info->phl_com, tx_handler);
3722 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
3723 		PHL_ERR("%s : register tx_handler fail.\n", __FUNCTION__);
3724 
3725 	rx_handler->type = RTW_PHL_HANDLER_PRIO_HIGH;
3726 	rx_handler->callback = _phl_rx_callback_pcie;
3727 	rx_handler->context = phl_info;
3728 	rx_handler->drv_priv = drv_priv;
3729 	pstatus = phl_register_handler(phl_info->phl_com, rx_handler);
3730 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
3731 		PHL_ERR("%s : register rx_handler fail.\n", __FUNCTION__);
3732 
3733 	return pstatus;
3734 }
3735 
3736 void
phl_tx_watchdog_pcie(struct phl_info_t * phl_info)3737 phl_tx_watchdog_pcie(struct phl_info_t *phl_info)
3738 {
3739 	struct rtw_stats *phl_stats = NULL;
3740 
3741 	phl_stats = &phl_info->phl_com->phl_stats;
3742 
3743 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3744 		  "\n=== Tx statistics === \n");
3745 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3746 		"\nunicast tx bytes	: %llu\n", phl_stats->tx_byte_uni);
3747 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3748 		"total tx bytes		: %llu\n", phl_stats->tx_byte_total);
3749 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3750 		 "tx throughput		: %d(kbps)\n",
3751 			 (int)phl_stats->tx_tp_kbits);
3752 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3753 		"last tx time		: %d(ms)\n",
3754 			 (int)phl_stats->last_tx_time_ms);
3755 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3756 		"tx request num to phl	: %d\n",
3757 			 (int)phl_stats->txreq_num);
3758 
3759 	#ifdef RTW_WKARD_DYNAMIC_LTR
3760 	if (rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal)) {
3761 		PHL_INFO(
3762 			"ltr sw ctrl 			: %u\n",
3763 			rtw_hal_ltr_is_sw_ctrl(phl_info->phl_com, phl_info->hal) ? 1 : 0);
3764 		PHL_INFO(
3765 			"ltr current state 		: %u\n",
3766 			phl_ltr_get_cur_state(phl_info->phl_com));
3767 		PHL_INFO(
3768 			"ltr active trigger cnt : %lu\n",
3769 			phl_ltr_get_tri_cnt(phl_info->phl_com, RTW_PCIE_LTR_SW_ACT));
3770 		PHL_INFO(
3771 			"ltr idle trigger cnt   : %lu\n",
3772 			phl_ltr_get_tri_cnt(phl_info->phl_com, RTW_PCIE_LTR_SW_IDLE));
3773 		PHL_INFO(
3774 			"ltr last trigger time  : %lu\n",
3775 			phl_ltr_get_last_trigger_time(phl_info->phl_com));
3776 	}
3777 	#endif
3778 
3779 	_phl_dump_wp_stats(phl_info);
3780 
3781 	_phl_dump_busy_wp(phl_info);
3782 
3783 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
3784 		  "\n===================== \n");
3785 
3786 }
3787 
3788 void
phl_read_hw_rx(struct phl_info_t * phl_info,enum rx_channel_type rx_ch)3789 phl_read_hw_rx(struct phl_info_t *phl_info, enum rx_channel_type rx_ch)
3790 {
3791 	struct hci_info_t *hci_info = (struct hci_info_t *)phl_info->hci;
3792 	struct rx_base_desc *rxbd = NULL;
3793 	u16 host_idx = 0;
3794 
3795 	if (rx_ch >= hci_info->total_rxch_num)
3796 		return;
3797 
3798 	rxbd = (struct rx_base_desc *)hci_info->rxbd_buf;
3799 	rtw_hal_rx_res_query(phl_info->hal, rx_ch, &host_idx,
3800 	                     &rxbd[rx_ch].hw_idx);
3801 }
3802 
3803 static struct phl_hci_trx_ops ops= {0};
phl_hci_trx_ops_init(void)3804 void phl_hci_trx_ops_init(void)
3805 {
3806 	ops.hci_trx_init = phl_trx_init_pcie;
3807 	ops.hci_trx_deinit = phl_trx_deinit_pcie;
3808 	ops.prepare_tx = phl_prepare_tx_pcie;
3809 	ops.recycle_rx_buf = phl_release_target_rx_buf;
3810 	ops.tx = phl_tx_pcie;
3811 	ops.rx = phl_rx_pcie;
3812 	ops.trx_cfg = phl_trx_config_pcie;
3813 	ops.trx_stop = phl_trx_stop_pcie;
3814 	ops.recycle_busy_wd = phl_recycle_busy_wd;
3815 	ops.recycle_busy_h2c = phl_recycle_busy_h2c;
3816 	ops.read_hw_rx = phl_read_hw_rx;
3817 	ops.pltfm_tx = phl_pltfm_tx_pcie;
3818 	ops.alloc_h2c_pkt_buf = _phl_alloc_h2c_pkt_buf_pcie;
3819 	ops.free_h2c_pkt_buf = _phl_free_h2c_pkt_buf_pcie;
3820 	ops.trx_reset = phl_trx_reset_pcie;
3821 	ops.trx_resume = phl_trx_resume_pcie;
3822 	ops.req_tx_stop = phl_req_tx_stop_pcie;
3823 	ops.req_rx_stop = phl_req_rx_stop_pcie;
3824 	ops.is_tx_pause = phl_is_tx_sw_pause_pcie;
3825 	ops.is_rx_pause = phl_is_rx_sw_pause_pcie;
3826 	ops.get_txbd_buf = phl_get_txbd_buf_pcie;
3827 	ops.get_rxbd_buf = phl_get_rxbd_buf_pcie;
3828 	ops.recycle_rx_pkt = phl_recycle_rx_pkt_pcie;
3829 	ops.register_trx_hdlr = phl_register_trx_hdlr_pcie;
3830 	ops.rx_handle_normal = phl_rx_handle_normal;
3831 	ops.tx_watchdog = phl_tx_watchdog_pcie;
3832 }
3833 
3834 
phl_hook_trx_ops_pci(struct phl_info_t * phl_info)3835 enum rtw_phl_status phl_hook_trx_ops_pci(struct phl_info_t *phl_info)
3836 {
3837 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3838 
3839 	if (NULL != phl_info) {
3840 		phl_hci_trx_ops_init();
3841 		phl_info->hci_trx_ops = &ops;
3842 		pstatus = RTW_PHL_STATUS_SUCCESS;
3843 	}
3844 
3845 	return pstatus;
3846 }
3847 
phl_cmd_set_l2_leave(struct phl_info_t * phl_info)3848 enum rtw_phl_status phl_cmd_set_l2_leave(struct phl_info_t *phl_info)
3849 {
3850 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
3851 
3852 #ifdef CONFIG_CMD_DISP
3853 	pstatus = phl_cmd_enqueue(phl_info, HW_BAND_0, MSG_EVT_HAL_SET_L2_LEAVE, NULL, 0, NULL, PHL_CMD_WAIT, 0);
3854 
3855 	if (is_cmd_failure(pstatus)) {
3856 		/* Send cmd success, but wait cmd fail*/
3857 		pstatus = RTW_PHL_STATUS_FAILURE;
3858 	} else if (pstatus != RTW_PHL_STATUS_SUCCESS) {
3859 		/* Send cmd fail */
3860 		pstatus = RTW_PHL_STATUS_FAILURE;
3861 	}
3862 #else
3863 	if (rtw_hal_set_l2_leave(phl_info->hal) == RTW_HAL_STATUS_SUCCESS)
3864 		pstatus = RTW_PHL_STATUS_SUCCESS;
3865 #endif
3866 	return pstatus;
3867 }
3868