xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/phl_tx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_TX_C_
16 #include "phl_headers.h"
17 
18 /**
19  * this function will be used in read / write pointer mechanism and
20  * return the number of available read pointer
21  * @rptr: input, the read pointer
22  * @wptr: input, the write pointer
23  * @bndy: input, the boundary of read / write pointer mechanism
24  */
phl_calc_avail_rptr(u16 rptr,u16 wptr,u16 bndy)25 u16 phl_calc_avail_rptr(u16 rptr, u16 wptr, u16 bndy)
26 {
27 	u16 avail_rptr = 0;
28 
29 	if (wptr >= rptr)
30 		avail_rptr = wptr - rptr;
31 	else if (rptr > wptr)
32 		avail_rptr = wptr + (bndy - rptr);
33 
34 	return avail_rptr;
35 }
36 
37 
38 /**
39  * this function will be used in read / write pointer mechanism and
40  * return the number of available write pointer
41  * @rptr: input, the read pointer
42  * @wptr: input, the write pointer
43  * @bndy: input, the boundary of read / write pointer mechanism
44  */
phl_calc_avail_wptr(u16 rptr,u16 wptr,u16 bndy)45 u16 phl_calc_avail_wptr(u16 rptr, u16 wptr, u16 bndy)
46 {
47 	u16 avail_wptr = 0;
48 
49 	if (rptr > wptr)
50 		avail_wptr = rptr - wptr - 1;
51 	else if (wptr >= rptr)
52 		avail_wptr = rptr + (bndy - wptr) - 1;
53 
54 	return avail_wptr;
55 }
56 
phl_dump_sorted_ring(_os_list * sorted_ring)57 void phl_dump_sorted_ring(_os_list *sorted_ring)
58 {
59 	struct phl_ring_status *ring_sts;
60 	u16 i = 0;
61 
62 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump sorted ring==\n");
63 
64 	phl_list_for_loop(ring_sts, struct phl_ring_status, sorted_ring,
65 			       list) {
66 		i++;
67 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
68 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
69 			  ring_sts->macid);
70 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
71 			  ring_sts->req_busy);
72 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
73 			  ring_sts->ring_ptr->tid);
74 	}
75 }
76 
phl_dump_tx_plan(_os_list * sta_list)77 void phl_dump_tx_plan(_os_list *sta_list)
78 {
79 	struct phl_tx_plan *tx_plan;
80 	u16 i = 0;
81 
82 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx plan==\n");
83 
84 	phl_list_for_loop(tx_plan, struct phl_tx_plan, sta_list,
85 			       list) {
86 		i++;
87 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==tx plan %d==\n", i);
88 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->sleep = %d\n",
89 			  tx_plan->sleep);
90 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_mgnt = %d\n",
91 			  tx_plan->has_mgnt);
92 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_hiq = %d\n",
93 			  tx_plan->has_hiq);
94 		phl_dump_sorted_ring(&tx_plan->sorted_ring);
95 	}
96 }
97 
phl_dump_t_fctrl_result(_os_list * t_fctrl_result)98 void phl_dump_t_fctrl_result(_os_list *t_fctrl_result)
99 {
100 	struct phl_ring_status *ring_sts;
101 	u16 i = 0;
102 
103 	PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx flow control result==\n");
104 
105 	phl_list_for_loop(ring_sts, struct phl_ring_status, t_fctrl_result,
106 						   list) {
107 		i++;
108 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
109 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
110 			  ring_sts->macid);
111 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
112 			  ring_sts->req_busy);
113 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
114 			  ring_sts->ring_ptr->tid);
115 	}
116 }
117 
phl_dump_tx_stats(struct rtw_stats * stats)118 void phl_dump_tx_stats(struct rtw_stats *stats)
119 {
120 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
121 		  "Dump Tx statistics\n"
122 		  "tx_byte_uni = %lld\n"
123 		  "tx_byte_total = %lld\n"
124 		  "tx_tp_kbits = %d\n"
125 		  "last_tx_time_ms = %d\n",
126 		  stats->tx_byte_uni,
127 		  stats->tx_byte_total,
128 		  stats->tx_tp_kbits,
129 		  stats->last_tx_time_ms);
130 }
131 
phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool * h2c_pkt_pool)132 void phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool *h2c_pkt_pool)
133 {
134 	PHL_INFO("[h2c_stats] idle cmd %d, idle data %d, idle ldata %d, busy h2c %d.\n",
135 				h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt,
136 				h2c_pkt_pool->idle_h2c_pkt_data_list.cnt,
137 				h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt,
138 				h2c_pkt_pool->busy_h2c_pkt_list.cnt);
139 }
140 
phl_reset_tx_stats(struct rtw_stats * stats)141 void phl_reset_tx_stats(struct rtw_stats *stats)
142 {
143 	stats->tx_byte_uni = 0;
144 	stats->tx_byte_total = 0;
145 	stats->tx_tp_kbits = 0;
146 	stats->last_tx_time_ms = 0;
147 	stats->txtp.last_calc_time_ms = 0;
148 	stats->txtp.last_calc_time_ms = 0;
149 	stats->tx_traffic.lvl = RTW_TFC_IDLE;
150 	stats->tx_traffic.sts = 0;
151 }
152 
phl_tfc_lvl_to_str(u8 lvl)153 const char *phl_tfc_lvl_to_str(u8 lvl)
154 {
155 	switch (lvl) {
156 	case RTW_TFC_IDLE:
157 		return "IDLE";
158 	case RTW_TFC_ULTRA_LOW:
159 		return "ULTRA_LOW";
160 	case RTW_TFC_LOW:
161 		return "LOW";
162 	case RTW_TFC_MID:
163 		return "MID";
164 	case RTW_TFC_HIGH:
165 		return "HIGH";
166 	default:
167 		return "-";
168 	}
169 }
170 
171 void
phl_tx_traffic_upd(struct rtw_stats * sts)172 phl_tx_traffic_upd(struct rtw_stats *sts)
173 {
174 	u32 tp_k = 0, tp_m = 0;
175 	enum rtw_tfc_lvl tx_tfc_lvl = RTW_TFC_IDLE;
176 	tp_k = sts->tx_tp_kbits;
177 	tp_m = sts->tx_tp_kbits >> 10;
178 
179 	if (tp_m >= TX_HIGH_TP_THRES_MBPS)
180 		tx_tfc_lvl = RTW_TFC_HIGH;
181 	else if (tp_m >= TX_MID_TP_THRES_MBPS)
182 		tx_tfc_lvl = RTW_TFC_MID;
183 	else if (tp_m >= TX_LOW_TP_THRES_MBPS)
184 		tx_tfc_lvl = RTW_TFC_LOW;
185 	else if (tp_k >= TX_ULTRA_LOW_TP_THRES_KBPS)
186 		tx_tfc_lvl = RTW_TFC_ULTRA_LOW;
187 	else
188 		tx_tfc_lvl = RTW_TFC_IDLE;
189 
190 	if (sts->tx_traffic.lvl > tx_tfc_lvl) {
191 		sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
192 		sts->tx_traffic.lvl = tx_tfc_lvl;
193 	} else if (sts->tx_traffic.lvl < tx_tfc_lvl) {
194 		sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
195 		sts->tx_traffic.lvl = tx_tfc_lvl;
196 	} else if (sts->tx_traffic.sts &
197 		(TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
198 		sts->tx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
199 					 TRAFFIC_DECREASE);
200 	}
201 }
202 
phl_update_tx_stats(struct rtw_stats * stats,struct rtw_xmit_req * tx_req)203 void phl_update_tx_stats(struct rtw_stats *stats, struct rtw_xmit_req *tx_req)
204 {
205 	u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
206 	u64 diff_bits = 0;
207 
208 	stats->last_tx_time_ms = cur_time;
209 	stats->tx_byte_total += tx_req->total_len;
210 
211 	stats->txreq_num++;
212 	if (tx_req->mdata.bc == 0 && tx_req->mdata.mc == 0)
213 		stats->tx_byte_uni += tx_req->total_len;
214 
215 	if (0 == stats->txtp.last_calc_time_ms ||
216 	    0 == stats->txtp.last_calc_bits) {
217 		stats->txtp.last_calc_time_ms = stats->last_tx_time_ms;
218 		stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
219 	} else {
220 		if (cur_time >= stats->txtp.last_calc_time_ms) {
221 			diff_t = cur_time - stats->txtp.last_calc_time_ms;
222 		} else {
223 			diff_t = RTW_U32_MAX - stats->txtp.last_calc_time_ms +
224 				cur_time + 1;
225 		}
226 		if (diff_t > TXTP_CALC_DIFF_MS && stats->tx_byte_uni != 0) {
227 			diff_bits = (stats->tx_byte_uni * 8) -
228 				stats->txtp.last_calc_bits;
229 			stats->tx_tp_kbits = (u32)_os_division64(diff_bits,
230 								 diff_t);
231 			stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
232 			stats->txtp.last_calc_time_ms = cur_time;
233 		}
234 	}
235 }
236 
phl_tx_statistics(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req)237 void phl_tx_statistics(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req)
238 {
239 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
240 	struct rtw_stats *phl_stats = &phl_com->phl_stats;
241 	struct rtw_stats *sta_stats = NULL;
242 	struct rtw_phl_stainfo_t *sta = NULL;
243 	u16 macid = tx_req->mdata.macid;
244 
245 	if (!phl_macid_is_valid(phl_info, macid))
246 		goto dev_stat;
247 
248 	sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
249 
250 	if (NULL == sta)
251 		goto dev_stat;
252 	sta_stats = &sta->stats;
253 
254 	phl_update_tx_stats(sta_stats, tx_req);
255 dev_stat:
256 	phl_update_tx_stats(phl_stats, tx_req);
257 }
258 
259 
_phl_free_phl_tring_list(void * phl,struct rtw_phl_tring_list * ring_list)260 static void _phl_free_phl_tring_list(void *phl,
261 				struct rtw_phl_tring_list *ring_list)
262 {
263 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
264 	void *drv_priv = phl_to_drvpriv(phl_info);
265 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
266 	struct rtw_phl_tx_ring *ring;
267 	struct rtw_xmit_req *tx_req;
268 	u16 rptr = 0;
269 	u8 i = 0;
270 
271 	for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
272 		ring = &ring_list->phl_ring[i];
273 		rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
274 
275 		while (rptr != ring->core_idx) {
276 			rptr += 1;
277 			if (rptr >= MAX_PHL_RING_ENTRY_NUM)
278 				rptr = 0;
279 			tx_req = (struct rtw_xmit_req *)ring->entry[rptr];
280 			if (NULL == tx_req)
281 				break;
282 			ops->tx_recycle(drv_priv, tx_req);
283 		}
284 	}
285 	_os_kmem_free(drv_priv, ring_list, sizeof(*ring_list));
286 }
287 
288 
_phl_init_tx_plan(struct phl_tx_plan * tx_plan)289 void _phl_init_tx_plan(struct phl_tx_plan * tx_plan)
290 {
291 	INIT_LIST_HEAD(&tx_plan->list);
292 	tx_plan->sleep = false;
293 	tx_plan->has_mgnt = false;
294 	tx_plan->has_hiq = false;
295 	INIT_LIST_HEAD(&tx_plan->sorted_ring);
296 }
297 
298 
299 static struct rtw_phl_tring_list *
_phl_allocate_phl_tring_list(void * phl,u16 macid,u8 hw_band,u8 hw_wmm,u8 hw_port)300 _phl_allocate_phl_tring_list(void *phl, u16 macid,
301 			u8 hw_band, u8 hw_wmm, u8 hw_port)
302 {
303 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
304 	struct rtw_phl_tring_list *phl_tring_list = NULL;
305 	void *drv_priv = NULL;
306 	u32 buf_len = 0;
307 	u8 i = 0, dma_ch = 0;
308 
309 	drv_priv = phl_to_drvpriv(phl_info);
310 
311 	buf_len = sizeof(struct rtw_phl_tring_list);
312 	phl_tring_list = (struct rtw_phl_tring_list *)_os_kmem_alloc(drv_priv,
313 								buf_len);
314 
315 	if (NULL != phl_tring_list) {
316 		_os_mem_set(drv_priv, phl_tring_list, 0, buf_len);
317 		INIT_LIST_HEAD(&phl_tring_list->list);
318 		phl_tring_list->macid = macid;
319 		phl_tring_list->band = hw_band;
320 		phl_tring_list->wmm = hw_wmm;
321 		phl_tring_list->port = hw_port;
322 		/*phl_tring_list->mbssid = hw_mbssid*/
323 
324 		for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
325 			phl_tring_list->phl_ring[i].tid = i;
326 			dma_ch = rtw_hal_tx_chnl_mapping(phl_info->hal, macid,
327 							 i, hw_band);
328 			phl_tring_list->phl_ring[i].dma_ch = dma_ch;
329 		}
330 		_phl_init_tx_plan(&phl_tring_list->tx_plan);
331 	}
332 
333 	return phl_tring_list;
334 }
335 
336 enum rtw_phl_status
phl_register_tx_ring(void * phl,u16 macid,u8 hw_band,u8 hw_wmm,u8 hw_port)337 phl_register_tx_ring(void *phl, u16 macid, u8 hw_band, u8 hw_wmm, u8 hw_port)
338 {
339 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
340 	void *drv_priv = phl_to_drvpriv(phl_info);
341 	struct rtw_phl_tring_list *phl_tring_list = NULL;
342 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
343 	_os_list *ring_list = NULL;
344 
345 	phl_tring_list = _phl_allocate_phl_tring_list(phl, macid, hw_band, hw_wmm, hw_port);
346 
347 	if (NULL != phl_tring_list) {
348 		ring_list = &phl_info->t_ring_list;
349 		_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
350 		list_add_tail(&phl_tring_list->list, ring_list);
351 		_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
352 
353 		phl_status = RTW_PHL_STATUS_SUCCESS;
354 	}
355 
356 	return phl_status;
357 }
358 
359 
360 
phl_deregister_tx_ring(void * phl,u16 macid)361 enum rtw_phl_status phl_deregister_tx_ring(void *phl, u16 macid)
362 {
363 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
364 	void *drv_priv = phl_to_drvpriv(phl_info);
365 	struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
366 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
367 	_os_list *ring_list = NULL;
368 
369 	ring_list = &phl_info->t_ring_list;
370 
371 	_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
372 
373 	phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
374 					ring_list, list) {
375 		if (macid == phl_tring_list->macid) {
376 			list_del(&phl_tring_list->list);
377 			phl_status = RTW_PHL_STATUS_SUCCESS;
378 			break;
379 		}
380 	}
381 
382 	_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
383 
384 	if (RTW_PHL_STATUS_SUCCESS == phl_status) {
385 		/* defer the free operation to avoid racing with _phl_tx_callback_xxx */
386 		_os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
387 		list_add_tail(&phl_tring_list->list, &phl_info->t_ring_free_list);
388 		_os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
389 	}
390 
391 	return phl_status;
392 }
393 
phl_free_deferred_tx_ring(struct phl_info_t * phl_info)394 void phl_free_deferred_tx_ring(struct phl_info_t *phl_info)
395 {
396 	void *drv_priv = phl_to_drvpriv(phl_info);
397 	struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
398 	_os_list *ring_list = NULL;
399 
400 	ring_list = &phl_info->t_ring_free_list;
401 
402 	_os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
403 	if (list_empty(ring_list) == false) {
404 		phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
405 						ring_list, list) {
406 			list_del(&phl_tring_list->list);
407 			_phl_free_phl_tring_list(phl_info, phl_tring_list);
408 		}
409 	}
410 	_os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
411 }
412 
413 
phl_alloc_ring_sts(struct phl_info_t * phl_info)414 struct phl_ring_status *phl_alloc_ring_sts(struct phl_info_t *phl_info)
415 {
416 	struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
417 	struct phl_ring_status *ring_sts = NULL;
418 
419 	_os_spinlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
420 
421 	if (false == list_empty(&ring_sts_pool->idle)) {
422 		ring_sts = list_first_entry(&ring_sts_pool->idle,
423 					struct phl_ring_status, list);
424 		list_del(&ring_sts->list);
425 	}
426 
427 	_os_spinunlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
428 
429 	return ring_sts;
430 }
431 
phl_release_ring_sts(struct phl_info_t * phl_info,struct phl_ring_status * ring_sts)432 void phl_release_ring_sts(struct phl_info_t *phl_info,
433 				struct phl_ring_status *ring_sts)
434 {
435 	struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
436 	void *drv_priv = NULL;
437 
438 	drv_priv = phl_to_drvpriv(phl_info);
439 
440 	_os_spinlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
441 	_os_mem_set(drv_priv, ring_sts, 0, sizeof(*ring_sts));
442 	INIT_LIST_HEAD(&ring_sts->list);
443 	list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
444 	_os_spinunlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
445 }
446 
447 
_phl_ring_status_deinit(struct phl_info_t * phl_info)448 void _phl_ring_status_deinit(struct phl_info_t *phl_info)
449 {
450 	struct phl_ring_sts_pool *ring_sts_pool = NULL;
451 	u16 buf_len = 0;
452 	void *drv_priv = NULL;
453 	FUNCIN();
454 	drv_priv = phl_to_drvpriv(phl_info);
455 	ring_sts_pool = (struct phl_ring_sts_pool *)phl_info->ring_sts_pool;
456 	if (NULL != ring_sts_pool) {
457 		buf_len = sizeof(struct phl_ring_sts_pool);
458 		_os_spinlock_free(drv_priv, &ring_sts_pool->idle_lock);
459 		_os_spinlock_free(drv_priv, &ring_sts_pool->busy_lock);
460 		_os_mem_free(drv_priv, ring_sts_pool, buf_len);
461 	}
462 	FUNCOUT();
463 }
464 
465 
_phl_ring_status_init(struct phl_info_t * phl_info)466 enum rtw_phl_status _phl_ring_status_init(struct phl_info_t *phl_info)
467 {
468 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
469 	struct phl_ring_sts_pool *ring_sts_pool = NULL;
470 	struct phl_ring_status *ring_sts = NULL;
471 	void *drv_priv = NULL;
472 	u16 buf_len = 0;
473 	u8 i = 0;
474 	FUNCIN_WSTS(pstatus);
475 
476 	drv_priv = phl_to_drvpriv(phl_info);
477 	buf_len = sizeof(struct phl_ring_sts_pool);
478 	ring_sts_pool =
479 			(struct phl_ring_sts_pool *)_os_mem_alloc(drv_priv, buf_len);
480 
481 	if (NULL != ring_sts_pool) {
482 		_os_mem_set(drv_priv, ring_sts_pool, 0, buf_len);
483 		INIT_LIST_HEAD(&ring_sts_pool->idle);
484 		INIT_LIST_HEAD(&ring_sts_pool->busy);
485 		_os_spinlock_init(drv_priv, &ring_sts_pool->idle_lock);
486 		_os_spinlock_init(drv_priv, &ring_sts_pool->busy_lock);
487 
488 		for (i = 0; i < MAX_PHL_RING_STATUS_NUMBER; i++) {
489 			ring_sts = &ring_sts_pool->ring_sts[i];
490 			INIT_LIST_HEAD(&ring_sts->list);
491 			_os_spinlock(drv_priv,
492 					(void *)&ring_sts_pool->idle_lock, _bh, NULL);
493 			list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
494 			_os_spinunlock(drv_priv,
495 					(void *)&ring_sts_pool->idle_lock, _bh, NULL);
496 		}
497 
498 		phl_info->ring_sts_pool = ring_sts_pool;
499 
500 		pstatus = RTW_PHL_STATUS_SUCCESS;
501 	}
502 
503 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
504 		_phl_ring_status_deinit(phl_info);
505 	FUNCOUT_WSTS(pstatus);
506 
507 	return pstatus;
508 }
509 
510 struct phl_ring_status *
_phl_check_ring_status(struct phl_info_t * phl_info,struct rtw_phl_tx_ring * ring,struct rtw_phl_tring_list * tring_list)511 _phl_check_ring_status(struct phl_info_t *phl_info,
512 					struct rtw_phl_tx_ring *ring,
513 					struct rtw_phl_tring_list *tring_list)
514 {
515 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
516 	struct phl_ring_status *ring_sts = NULL;
517 	u16 avail = 0, rptr = 0;
518 	void *drv_priv = phl_to_drvpriv(phl_info);
519 
520 	do {
521 		rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
522 
523 		avail = phl_calc_avail_rptr(rptr, ring->core_idx,
524 					MAX_PHL_RING_ENTRY_NUM);
525 		if (0 == avail) {
526 			ring_sts = NULL;
527 			pstatus = RTW_PHL_STATUS_SUCCESS;
528 			break;
529 		} else {
530 			ring_sts = phl_alloc_ring_sts(phl_info);
531 			if (NULL == ring_sts) {
532 				PHL_ERR("query ring status fail!\n");
533 				pstatus = RTW_PHL_STATUS_RESOURCE;
534 				break;
535 			}
536 			ring_sts->macid = tring_list->macid;
537 			ring_sts->band = tring_list->band;
538 			ring_sts->wmm = tring_list->wmm;
539 			ring_sts->port = tring_list->port;
540 			/*ring_sts->mbssid = tring_list->mbssid;*/
541 			ring_sts->req_busy = avail;
542 			ring_sts->ring_ptr = ring;
543 
544 			rptr += 1;
545 
546 			if (rptr >= MAX_PHL_RING_ENTRY_NUM)
547 				_os_atomic_set(drv_priv, &ring->phl_next_idx, 0);
548 			else
549 				_os_atomic_set(drv_priv, &ring->phl_next_idx, rptr);
550 
551 			pstatus = RTW_PHL_STATUS_SUCCESS;
552 			break;
553 		}
554 	} while (false);
555 
556 	return ring_sts;
557 }
558 
_phl_reset_tx_plan(struct phl_info_t * phl_info,struct phl_tx_plan * tx_plan)559 void _phl_reset_tx_plan(struct phl_info_t *phl_info,
560 			 struct phl_tx_plan *tx_plan)
561 {
562 	struct phl_ring_status *ring_sts, *t;
563 
564 	INIT_LIST_HEAD(&tx_plan->list);
565         tx_plan->sleep = false;
566 	tx_plan->has_mgnt = false;
567 	tx_plan->has_hiq = false;
568 	phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
569 			       &tx_plan->sorted_ring, list) {
570 		list_del(&ring_sts->list);
571 		phl_release_ring_sts(phl_info, ring_sts);
572 	}
573 	INIT_LIST_HEAD(&tx_plan->sorted_ring);
574 }
575 
576 
_phl_sort_ring_by_tid(struct phl_ring_status * ring_sts,struct phl_tx_plan * tx_plan,enum rtw_phl_ring_cat cat)577 void _phl_sort_ring_by_tid(struct phl_ring_status *ring_sts,
578 			   struct phl_tx_plan *tx_plan,
579 			   enum rtw_phl_ring_cat cat)
580 {
581 	struct phl_ring_status *last_sts = NULL;
582 
583 	if (ring_sts->ring_ptr->tid == 1) {
584 		list_add_tail(&ring_sts->list,
585 			      &tx_plan->sorted_ring);
586 	} else if (ring_sts->ring_ptr->tid == 2) {
587 		if (list_empty(&tx_plan->sorted_ring)) {
588 			list_add_tail(&ring_sts->list,
589 				      &tx_plan->sorted_ring);
590 		} else {
591 			last_sts = list_last_entry(&tx_plan->sorted_ring,
592 					struct phl_ring_status, list);
593 			if (1 == last_sts->ring_ptr->tid) {
594 				__list_add(&ring_sts->list,
595 					   _get_prev(&last_sts->list),
596 					   &last_sts->list);
597 			} else {
598 				list_add_tail(&ring_sts->list,
599 					      &tx_plan->sorted_ring);
600                         }
601 		}
602 	} else {
603 		list_add(&ring_sts->list,
604 			 &tx_plan->sorted_ring);
605 		if (RTW_PHL_RING_CAT_MGNT == cat)
606 			tx_plan->has_mgnt = true;
607 		else if (RTW_PHL_RING_CAT_HIQ == cat)
608 			tx_plan->has_hiq = true;
609 	}
610 
611 }
612 
_phl_check_tring_list(struct phl_info_t * phl_info,struct rtw_phl_tring_list * tring_list,_os_list * sta_list)613 void _phl_check_tring_list(struct phl_info_t *phl_info,
614 			   struct rtw_phl_tring_list *tring_list,
615 			   _os_list *sta_list)
616 {
617 	struct phl_ring_status *ring_sts = NULL;
618 	struct rtw_phl_tx_ring *ring = NULL;
619 	struct phl_tx_plan *tx_plan = &tring_list->tx_plan;
620 	u8 i = 0;
621 
622 	for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
623 
624 		ring = &tring_list->phl_ring[i];
625 
626 		ring_sts = _phl_check_ring_status(phl_info, ring, tring_list);
627 
628 		if (NULL != ring_sts) {
629 			_phl_sort_ring_by_tid(ring_sts, tx_plan, i);
630 		} else {
631 			continue;
632 		}
633 	}
634 	/* hana_todo: check this macid is sleep or not */
635 	if (!list_empty(&tx_plan->sorted_ring)) {
636 		list_add_tail(&tx_plan->list, sta_list);
637 	}
638 }
639 
phl_check_xmit_ring_resource(struct phl_info_t * phl_info,_os_list * sta_list)640 u8 phl_check_xmit_ring_resource(struct phl_info_t *phl_info, _os_list *sta_list)
641 {
642 	void *drvpriv = phl_to_drvpriv(phl_info);
643 	_os_list *tring_list_head = &phl_info->t_ring_list;
644 	struct rtw_phl_tring_list *tring_list, *t;
645 
646 	_os_spinlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
647 	phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
648 				tring_list_head, list) {
649 		_phl_check_tring_list(phl_info, tring_list, sta_list);
650 	}
651 #ifdef SDIO_TX_THREAD
652 	/**
653 	* when SDIO_TX_THREAD is enabled,
654 	* clearing variable "phl_sw_tx_more" in function "phl_tx_sdio_thrd_hdl"
655 	*/
656 #else
657 	_os_atomic_set(drvpriv, &phl_info->phl_sw_tx_more, 0);
658 #endif
659 	_os_spinunlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
660 
661 	if (true == list_empty(sta_list))
662 		return false;
663 	else
664 		return true;
665 }
666 
phl_tx_flow_ctrl(struct phl_info_t * phl_info,_os_list * sta_list)667 void phl_tx_flow_ctrl(struct phl_info_t *phl_info, _os_list *sta_list)
668 {
669 	_os_list *t_fctrl_result = &phl_info->t_fctrl_result;
670 	_os_list *tid_entry[MAX_PHL_RING_CAT_NUM] = {0};
671 	struct phl_tx_plan *tx_plan, *tp;
672 	struct phl_ring_status *ring_sts = NULL, *ts;
673 	u8 tid = 0;
674 
675 	_os_mem_set(phl_to_drvpriv(phl_info), tid_entry, 0,
676 		    sizeof(_os_list *) * MAX_PHL_RING_CAT_NUM);
677 
678 	phl_list_for_loop_safe(tx_plan, tp, struct phl_tx_plan, sta_list,
679 			       list) {
680 		/* drop power saving station */
681 		if (true == tx_plan->sleep) {
682 			list_del(&tx_plan->list);
683 			_phl_reset_tx_plan(phl_info, tx_plan);
684 			continue;
685 		}
686 
687 		if (true == tx_plan->has_hiq) {
688 			ring_sts = list_first_entry(&tx_plan->sorted_ring,
689 						struct phl_ring_status, list);
690 			list_del(&ring_sts->list);
691 			list_add(&ring_sts->list, t_fctrl_result);
692 		}
693 
694 		if (true == tx_plan->has_mgnt) {
695 			ring_sts = list_first_entry(&tx_plan->sorted_ring,
696 						struct phl_ring_status, list);
697 			list_del(&ring_sts->list);
698 			list_add(&ring_sts->list, t_fctrl_result);
699 		}
700 
701 		/* todo: drop station which has reached tx limit */
702 
703 		phl_list_for_loop_safe(ring_sts, ts, struct phl_ring_status,
704 				       &tx_plan->sorted_ring, list) {
705 			list_del(&ring_sts->list);
706 			tid = ring_sts->ring_ptr->tid;
707 			/* todo: drop tid which has reached tx limit */
708 			/* sw tx cnt limit */
709 			if (NULL == tid_entry[tid]) {
710 				list_add_tail(&ring_sts->list, t_fctrl_result);
711 			} else {
712 				__list_add(&ring_sts->list, tid_entry[tid],
713 					   _get_next(tid_entry[tid]));
714 			}
715 			tid_entry[tid] = &ring_sts->list;
716 		}
717 
718 		/* clear tx plan */
719 		list_del(&tx_plan->list);
720 		_phl_reset_tx_plan(phl_info, tx_plan);
721 	}
722 }
723 
phl_register_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)724 enum rtw_phl_status phl_register_handler(struct rtw_phl_com_t *phl_com,
725 					 struct rtw_phl_handler *handler)
726 {
727 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
728 	_os_tasklet *tasklet = NULL;
729 	_os_workitem *workitem = NULL;
730 	void *drv_priv = phlcom_to_drvpriv(phl_com);
731 
732 	FUNCIN_WSTS(phl_status);
733 
734 	if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
735 		tasklet = &handler->os_handler.u.tasklet;
736 		phl_status = _os_tasklet_init(drv_priv, tasklet,
737 										handler->callback, handler);
738 	} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
739 		workitem = &handler->os_handler.u.workitem;
740 		phl_status = _os_workitem_init(drv_priv, workitem,
741 										handler->callback, workitem);
742 	} else {
743 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
744 				handler->type);
745 	}
746 
747 	if (RTW_PHL_STATUS_SUCCESS != phl_status)
748 		phl_deregister_handler(phl_com, handler);
749 
750 	FUNCOUT_WSTS(phl_status);
751 	return phl_status;
752 }
753 
phl_deregister_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)754 enum rtw_phl_status phl_deregister_handler(
755 	struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
756 {
757 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
758 	_os_tasklet *tasklet = NULL;
759 	_os_workitem *workitem = NULL;
760 	void *drv_priv = phlcom_to_drvpriv(phl_com);
761 
762 	FUNCIN_WSTS(phl_status);
763 
764 	if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
765 		tasklet = &handler->os_handler.u.tasklet;
766 		phl_status = _os_tasklet_deinit(drv_priv, tasklet);
767 	} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
768 		workitem = &handler->os_handler.u.workitem;
769 		phl_status = _os_workitem_deinit(drv_priv, workitem);
770 	} else {
771 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
772 				handler->type);
773 	}
774 
775 	if (RTW_PHL_STATUS_SUCCESS != phl_status) {
776 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
777 			"[WARNING] deregister handler fail (status = 0x%08X)\n",
778 			phl_status);
779 	}
780 
781 	FUNCOUT_WSTS(phl_status);
782 	return phl_status;
783 }
784 
phl_schedule_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)785 enum rtw_phl_status phl_schedule_handler(
786 	struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
787 {
788 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
789 	_os_tasklet *tasklet = NULL;
790 	_os_workitem *workitem = NULL;
791 	void *drv_priv = phlcom_to_drvpriv(phl_com);
792 
793 	FUNCIN_WSTS(phl_status);
794 
795 	if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
796 		tasklet = &handler->os_handler.u.tasklet;
797 		phl_status = _os_tasklet_schedule(drv_priv, tasklet);
798 	} else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
799 		workitem = &handler->os_handler.u.workitem;
800 		phl_status = _os_workitem_schedule(drv_priv, workitem);
801 	} else {
802 		PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
803 				handler->type);
804 	}
805 
806 	FUNCOUT_WSTS(phl_status);
807 	return phl_status;
808 }
809 
enqueue_h2c_pkt(struct phl_info_t * phl_info,struct phl_queue * pool_list,struct rtw_h2c_pkt * h2c_pkt,u8 pos)810 static enum rtw_phl_status enqueue_h2c_pkt(
811 					struct phl_info_t *phl_info,
812 					struct phl_queue	*pool_list,
813 					struct rtw_h2c_pkt *h2c_pkt, u8 pos)
814 {
815 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
816 	void *drv = phl_to_drvpriv(phl_info);
817 	_os_spinlockfg sp_flags;
818 
819 
820 	if (h2c_pkt != NULL) {
821 		_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
822 		if (_tail == pos)
823 			list_add_tail(&h2c_pkt->list, &pool_list->queue);
824 		else if (_first == pos)
825 			list_add(&h2c_pkt->list, &pool_list->queue);
826 		pool_list->cnt++;
827 		_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
828 
829 		pstatus = RTW_PHL_STATUS_SUCCESS;
830 	}
831 
832 	return pstatus;
833 }
834 
dequeue_h2c_pkt(struct phl_info_t * phl_info,struct phl_queue * pool_list)835 static struct rtw_h2c_pkt *dequeue_h2c_pkt(struct phl_info_t *phl_info,
836 	struct phl_queue *pool_list)
837 {
838 	struct rtw_h2c_pkt *h2c_pkt = NULL;
839 	void *drv = phl_to_drvpriv(phl_info);
840 	_os_spinlockfg sp_flags;
841 
842 	_os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
843 	if (list_empty(&pool_list->queue)) {
844 		h2c_pkt = NULL;
845 	} else {
846 		h2c_pkt = list_first_entry(&pool_list->queue, struct rtw_h2c_pkt, list);
847 
848 		list_del(&h2c_pkt->list);
849 		pool_list->cnt--;
850 	}
851 	_os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
852 	return h2c_pkt;
853 }
854 
_phl_reset_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt,u32 buf_len)855 static void _phl_reset_h2c_pkt(struct phl_info_t *phl_info,
856 							struct rtw_h2c_pkt *h2c_pkt,
857 							u32 buf_len)
858 {
859 	enum rtw_h2c_pkt_type type = h2c_pkt->type;
860 
861 	_os_mem_set(phl_to_drvpriv(phl_info), h2c_pkt->vir_head, 0, buf_len);
862 	h2c_pkt->buf_len = buf_len;
863 	h2c_pkt->id = 0;
864 	h2c_pkt->host_idx = 0;
865 	h2c_pkt->data_len = 0;
866 	h2c_pkt->h2c_seq = 0;
867 
868 	switch (type) {
869 	case H2CB_TYPE_CMD:
870 		h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
871 		h2c_pkt->vir_tail = h2c_pkt->vir_data;
872 		h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
873 		break;
874 	case H2CB_TYPE_DATA:
875 		h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
876 		h2c_pkt->vir_tail = h2c_pkt->vir_data;
877 		h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
878 		break;
879 	case H2CB_TYPE_LONG_DATA:
880 		h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
881 		h2c_pkt->vir_tail = h2c_pkt->vir_data;
882 		h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
883 		break;
884 	case H2CB_TYPE_MAX:
885 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unsupported case:%d, please check it\n",
886 				type);
887 		break;
888 	default:
889 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unrecognize case:%d, please check it\n",
890 				type);
891 		break;
892 	}
893 
894 }
895 
phl_enqueue_busy_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt,u8 pos)896 enum rtw_phl_status phl_enqueue_busy_h2c_pkt(struct phl_info_t *phl_info,
897 				struct rtw_h2c_pkt *h2c_pkt, u8 pos)
898 {
899 	struct phl_h2c_pkt_pool *h2c_pkt_pool =
900 		(struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
901 	struct phl_queue *queue = &h2c_pkt_pool->busy_h2c_pkt_list;
902 
903 	return enqueue_h2c_pkt(phl_info, queue, h2c_pkt, pos);
904 }
905 
phl_enqueue_idle_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt)906 enum rtw_phl_status phl_enqueue_idle_h2c_pkt(
907 				struct phl_info_t *phl_info,
908 				struct rtw_h2c_pkt *h2c_pkt)
909 {
910 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
911 	struct phl_h2c_pkt_pool *h2c_pkt_pool =
912 		(struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
913 	struct phl_queue *queue = NULL;
914 	int *idle_cnt = NULL;
915 	u32 buf_len = 0;
916 
917 	if (!h2c_pkt)
918 		return pstatus;
919 
920 	switch (h2c_pkt->type) {
921 	case H2CB_TYPE_CMD:
922 		buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_CMD_LEN;
923 		queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
924 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
925 		break;
926 	case H2CB_TYPE_DATA:
927 		buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_DATA_LEN;
928 		queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
929 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
930 		break;
931 	case H2CB_TYPE_LONG_DATA:
932 		buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_LONG_DATA_LEN;
933 		queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
934 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
935 		break;
936 	case H2CB_TYPE_MAX:
937 		PHL_ERR("%s : cannot find the matching case(%d).\n",
938 				__func__, h2c_pkt->type);
939 		break;
940 	default:
941 		PHL_ERR("%s : cannot find the matching cases(%d).\n",
942 				__func__, h2c_pkt->type);
943 		break;
944 	}
945 
946 	_phl_reset_h2c_pkt(phl_info, h2c_pkt, buf_len);
947 
948 	pstatus = enqueue_h2c_pkt(phl_info, queue, h2c_pkt, _tail);
949 
950 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : remaining %d (type %d).\n",
951 			  __func__, *idle_cnt, h2c_pkt->type);
952 
953 	return pstatus;
954 }
955 
phl_query_busy_h2c_pkt(struct phl_info_t * phl_info)956 struct rtw_h2c_pkt *phl_query_busy_h2c_pkt(struct phl_info_t *phl_info)
957 {
958 	struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
959 	struct rtw_h2c_pkt *h2c_pkt = NULL;
960 	struct phl_queue *queue = NULL;
961 
962 	h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
963 	queue = &h2c_pkt_pool->busy_h2c_pkt_list;
964 
965 	h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
966 
967 	return h2c_pkt;
968 }
969 
phl_query_idle_h2c_pkt(struct phl_info_t * phl_info,u8 type)970 struct rtw_h2c_pkt *phl_query_idle_h2c_pkt(struct phl_info_t *phl_info, u8 type)
971 {
972 	struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
973 	struct rtw_h2c_pkt *h2c_pkt = NULL;
974 	enum rtw_h2c_pkt_type h2c_type = (enum rtw_h2c_pkt_type)type;
975 	struct phl_queue *queue = NULL;
976 	int *idle_cnt = NULL;
977 
978 	h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
979 
980 	switch (h2c_type) {
981 	case H2CB_TYPE_CMD:
982 		queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
983 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
984 		break;
985 	case H2CB_TYPE_DATA:
986 		queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
987 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
988 		break;
989 	case H2CB_TYPE_LONG_DATA:
990 		queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
991 		idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
992 		break;
993 	case H2CB_TYPE_MAX:
994 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unsupported case:%d, please check it\n",
995 				h2c_type);
996 		break;
997 	default:
998 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unrecognize case:%d, please check it\n",
999 				h2c_type);
1000 		break;
1001 	}
1002 	PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,
1003 		  "phl_query_idle_h2c_pkt => remaining %d (type %d).\n",
1004 		  *idle_cnt, h2c_type);
1005 
1006 	h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
1007 
1008 	return h2c_pkt;
1009 }
1010 
1011 #if 0
1012 static enum rtw_phl_status phl_release_target_h2c_pkt(
1013 					struct phl_info_t *phl_info,
1014 					struct phl_h2c_pkt_pool *h2c_pkt_pool,
1015 					struct rtw_h2c_pkt *h2c_pkt)
1016 {
1017 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1018 
1019 	if (h2c_pkt_pool != NULL && h2c_pkt != NULL) {
1020 		phl_enqueue_idle_h2c_pkt(phl_info, h2c_pkt);
1021 		pstatus = RTW_PHL_STATUS_SUCCESS;
1022 	}
1023 
1024 	return pstatus;
1025 }
1026 #endif
1027 
_phl_free_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt_buf)1028 static void _phl_free_h2c_pkt(struct phl_info_t *phl_info,
1029 					struct rtw_h2c_pkt *h2c_pkt_buf)
1030 {
1031 	u16 i = 0;
1032 	struct rtw_h2c_pkt *h2c_pkt = h2c_pkt_buf;
1033 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1034 
1035 	if (NULL != h2c_pkt) {
1036 		for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
1037 			if (NULL == h2c_pkt->vir_head)
1038 				continue;
1039 			hci_trx_ops->free_h2c_pkt_buf(phl_info, h2c_pkt);
1040 			h2c_pkt->vir_head = NULL;
1041 			h2c_pkt->cache = false;
1042 			h2c_pkt++;
1043 		}
1044 
1045 		_os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_buf,
1046 					sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM);
1047 		h2c_pkt_buf = NULL;
1048 	}
1049 }
1050 
_phl_alloc_h2c_pkt(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pool)1051 struct rtw_h2c_pkt *_phl_alloc_h2c_pkt(struct phl_info_t *phl_info,
1052 	struct phl_h2c_pkt_pool *h2c_pool)
1053 {
1054 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1055 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1056 	struct rtw_h2c_pkt *h2c_pkt = NULL;
1057 	struct rtw_h2c_pkt *h2c_pkt_root = NULL;
1058 	struct phl_h2c_pkt_pool *h2c_pkt_pool = h2c_pool;
1059 	u32 buf_len = 0;
1060 	int i;
1061 
1062 	buf_len = sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM;
1063 	h2c_pkt_root = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1064 	h2c_pkt = h2c_pkt_root;
1065 	if (h2c_pkt != NULL) {
1066 		for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
1067 			h2c_pkt->cache = false;
1068 			buf_len = get_h2c_size_by_range(i);
1069 			hci_trx_ops->alloc_h2c_pkt_buf(phl_info, h2c_pkt, buf_len);
1070 
1071 			if (NULL == h2c_pkt->vir_head) {
1072 				pstatus = RTW_PHL_STATUS_RESOURCE;
1073 				break;
1074 			}
1075 
1076 			h2c_pkt->buf_len = buf_len;
1077 			h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
1078 			h2c_pkt->vir_tail = h2c_pkt->vir_data;
1079 			INIT_LIST_HEAD(&h2c_pkt->list);
1080 			if (i < _H2CB_CMD_QLEN) {
1081 				h2c_pkt->type = H2CB_TYPE_CMD;
1082 				h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
1083 				enqueue_h2c_pkt(phl_info,
1084 					&h2c_pkt_pool->idle_h2c_pkt_cmd_list, h2c_pkt, _tail);
1085 			} else if (i < _H2CB_CMD_QLEN + _H2CB_DATA_QLEN) {
1086 				h2c_pkt->type = H2CB_TYPE_DATA;
1087 				h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
1088 				enqueue_h2c_pkt(phl_info,
1089 					&h2c_pkt_pool->idle_h2c_pkt_data_list, h2c_pkt, _tail);
1090 			} else {
1091 				h2c_pkt->type = H2CB_TYPE_LONG_DATA;
1092 				h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
1093 				enqueue_h2c_pkt(phl_info,
1094 					&h2c_pkt_pool->idle_h2c_pkt_ldata_list, h2c_pkt, _tail);
1095 			}
1096 			h2c_pkt++;
1097 			pstatus = RTW_PHL_STATUS_SUCCESS;
1098 		}
1099 	}
1100 
1101 	if (RTW_PHL_STATUS_SUCCESS != pstatus) {
1102 		_phl_free_h2c_pkt(phl_info, h2c_pkt_root);
1103 		h2c_pkt_root = NULL;
1104 	}
1105 
1106 	return h2c_pkt_root;
1107 }
1108 
_phl_free_h2c_pool(struct phl_info_t * phl_info)1109 static void _phl_free_h2c_pool(struct phl_info_t *phl_info)
1110 {
1111 	struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
1112 	void *drv_priv = phl_to_drvpriv(phl_info);
1113 
1114 	FUNCIN();
1115 
1116 	h2c_pkt_pool = phl_info->h2c_pool;
1117 	if (NULL != h2c_pkt_pool) {
1118 		h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
1119 		h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
1120 		h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
1121 
1122 		_phl_free_h2c_pkt(phl_info, h2c_pkt_pool->h2c_pkt_buf);
1123 		h2c_pkt_pool->h2c_pkt_buf = NULL;
1124 		_os_spinlock_free(drv_priv,
1125 					&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1126 		_os_spinlock_free(drv_priv,
1127 					&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1128 		_os_spinlock_free(drv_priv,
1129 					&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1130 		_os_spinlock_free(drv_priv,
1131 					&h2c_pkt_pool->busy_h2c_pkt_list.lock);
1132 		_os_spinlock_free(drv_priv,
1133 					&h2c_pkt_pool->recycle_lock);
1134 		_os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_pool,
1135 					sizeof(struct phl_h2c_pkt_pool));
1136 	}
1137 	FUNCOUT();
1138 }
1139 
1140 enum rtw_phl_status
_phl_alloc_h2c_pool(struct phl_info_t * phl_info)1141 _phl_alloc_h2c_pool(struct phl_info_t *phl_info)
1142 {
1143 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1144 	struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
1145 	struct rtw_h2c_pkt *h2c_pkt_buf = NULL;
1146 	void *drv_priv = NULL;
1147 
1148 	FUNCIN_WSTS(pstatus);
1149 	drv_priv = phl_to_drvpriv(phl_info);
1150 
1151 	h2c_pkt_pool = _os_mem_alloc(drv_priv, sizeof(struct phl_h2c_pkt_pool));
1152 	if (NULL != h2c_pkt_pool) {
1153 
1154 		INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_cmd_list.queue);
1155 		h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
1156 
1157 		INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_data_list.queue);
1158 		h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
1159 
1160 		INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_ldata_list.queue);
1161 		h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
1162 
1163 		INIT_LIST_HEAD(&h2c_pkt_pool->busy_h2c_pkt_list.queue);
1164 		h2c_pkt_pool->busy_h2c_pkt_list.cnt = 0;
1165 
1166 		_os_spinlock_init(drv_priv,
1167 					&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1168 		_os_spinlock_init(drv_priv,
1169 					&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1170 		_os_spinlock_init(drv_priv,
1171 					&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1172 		_os_spinlock_init(drv_priv,
1173 					&h2c_pkt_pool->busy_h2c_pkt_list.lock);
1174 		_os_spinlock_init(drv_priv,
1175 					&h2c_pkt_pool->recycle_lock);
1176 
1177 		h2c_pkt_buf = _phl_alloc_h2c_pkt(phl_info, h2c_pkt_pool);
1178 
1179 		if (NULL == h2c_pkt_buf) {
1180 			_os_spinlock_free(drv_priv,
1181 					&h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1182 			_os_spinlock_free(drv_priv,
1183 					&h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1184 			_os_spinlock_free(drv_priv,
1185 					&h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1186 			_os_spinlock_free(drv_priv,
1187 					&h2c_pkt_pool->busy_h2c_pkt_list.lock);
1188 			_os_spinlock_free(drv_priv,
1189 					&h2c_pkt_pool->recycle_lock);
1190 			_os_mem_free(drv_priv, h2c_pkt_pool, sizeof(struct phl_h2c_pkt_pool));
1191 			h2c_pkt_pool = NULL;
1192 			pstatus = RTW_PHL_STATUS_RESOURCE;
1193 		} else {
1194 			h2c_pkt_pool->h2c_pkt_buf = h2c_pkt_buf;
1195 			pstatus = RTW_PHL_STATUS_SUCCESS;
1196 		}
1197 	}
1198 
1199 	if (RTW_PHL_STATUS_SUCCESS == pstatus)
1200 		phl_info->h2c_pool = h2c_pkt_pool;
1201 
1202 	FUNCOUT_WSTS(pstatus);
1203 
1204 	return pstatus;
1205 }
1206 
1207 void
phl_trx_free_handler(void * phl)1208 phl_trx_free_handler(void *phl)
1209 {
1210 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1211 	struct rtw_phl_handler *tx_handler = &phl_info->phl_tx_handler;
1212 	struct rtw_phl_handler *rx_handler = &phl_info->phl_rx_handler;
1213 	struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
1214 
1215 	FUNCIN();
1216 
1217 	phl_deregister_handler(phl_info->phl_com, event_handler);
1218 	phl_deregister_handler(phl_info->phl_com, rx_handler);
1219 	phl_deregister_handler(phl_info->phl_com, tx_handler);
1220 
1221 	FUNCOUT();
1222 }
1223 
1224 void
phl_trx_free_sw_rsc(void * phl)1225 phl_trx_free_sw_rsc(void *phl)
1226 {
1227 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1228 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1229 	void *drv_priv = NULL;
1230 
1231 	FUNCIN();
1232 
1233 	drv_priv = phl_to_drvpriv(phl_info);
1234 
1235 	_phl_free_h2c_pool(phl_info);
1236 
1237 	hci_trx_ops->hci_trx_deinit(phl_info);
1238 
1239 	phl_rx_deinit(phl_info);
1240 
1241 	_phl_ring_status_deinit(phl_info);
1242 
1243 	_os_spinlock_free(drv_priv, &phl_info->t_ring_list_lock);
1244 	_os_spinlock_free(drv_priv, &phl_info->rx_ring_lock);
1245 	_os_spinlock_free(drv_priv, &phl_info->t_fctrl_result_lock);
1246 	_os_spinlock_free(drv_priv, &phl_info->t_ring_free_list_lock);
1247 
1248 	FUNCOUT();
1249 }
1250 
phl_datapath_start(struct phl_info_t * phl_info)1251 enum rtw_phl_status phl_datapath_start(struct phl_info_t *phl_info)
1252 {
1253 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1254 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1255 
1256 	do {
1257 		pstatus = hci_trx_ops->trx_cfg(phl_info);
1258 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1259 			break;
1260 		rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_START, HW_BAND_MAX);
1261 	}while (false);
1262 
1263 	return pstatus;
1264 }
1265 
phl_datapath_stop(struct phl_info_t * phl_info)1266 void phl_datapath_stop(struct phl_info_t *phl_info)
1267 {
1268 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1269 
1270 	hci_trx_ops->trx_stop(phl_info);
1271 	rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_STOP, HW_BAND_MAX);
1272 	phl_free_deferred_tx_ring(phl_info);
1273 }
1274 
phl_datapath_deinit(struct phl_info_t * phl_info)1275 void phl_datapath_deinit(struct phl_info_t *phl_info)
1276 {
1277 	phl_trx_free_handler(phl_info);
1278 	phl_trx_free_sw_rsc(phl_info);
1279 }
1280 
phl_datapath_init(struct phl_info_t * phl_info)1281 enum rtw_phl_status phl_datapath_init(struct phl_info_t *phl_info)
1282 {
1283 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1284 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1285 	struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
1286 	void *drv_priv = NULL;
1287 	FUNCIN_WSTS(pstatus);
1288 	drv_priv = phl_to_drvpriv(phl_info);
1289 
1290 	do {
1291 #ifdef CONFIG_PHL_CPU_BALANCE_RX
1292 		_os_workitem *workitem = &event_handler->os_handler.u.workitem;
1293 #endif
1294 		INIT_LIST_HEAD(&phl_info->t_ring_list);
1295 		INIT_LIST_HEAD(&phl_info->t_fctrl_result);
1296 		INIT_LIST_HEAD(&phl_info->t_ring_free_list);
1297 		_os_spinlock_init(drv_priv, &phl_info->t_ring_list_lock);
1298 		_os_spinlock_init(drv_priv, &phl_info->rx_ring_lock);
1299 		_os_spinlock_init(drv_priv, &phl_info->t_fctrl_result_lock);
1300 		_os_spinlock_init(drv_priv, &phl_info->t_ring_free_list_lock);
1301 
1302 #ifdef CONFIG_PHL_CPU_BALANCE_RX
1303 		event_handler->type = RTW_PHL_HANDLER_PRIO_LOW;
1304 		_os_workitem_config_cpu(drv_priv, workitem, "RX_PHL_0", CPU_ID_RX_CORE_0);
1305 #else
1306 		event_handler->type = RTW_PHL_HANDLER_PRIO_HIGH;
1307 #endif
1308 		event_handler->callback = phl_event_indicator;
1309 		event_handler->context = phl_info;
1310 		event_handler->drv_priv = drv_priv;
1311 		event_handler->status = 0;
1312 		pstatus = phl_register_handler(phl_info->phl_com, event_handler);
1313 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1314 			break;
1315 
1316 		pstatus = _phl_ring_status_init(phl_info);
1317 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1318 			break;
1319 
1320 		pstatus = phl_rx_init(phl_info);
1321 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1322 			break;
1323 
1324 		pstatus = hci_trx_ops->hci_trx_init(phl_info);
1325 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1326 			break;
1327 
1328 		/* allocate h2c pkt */
1329 		pstatus = _phl_alloc_h2c_pool(phl_info);
1330 		if (RTW_PHL_STATUS_SUCCESS != pstatus)
1331 			break;
1332 
1333 	}while (false);
1334 
1335 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
1336 		phl_datapath_deinit(phl_info);
1337 
1338 	FUNCOUT_WSTS(pstatus);
1339 
1340 	return pstatus;
1341 }
1342 
1343 static enum rtw_phl_status
_phl_tx_pwr_notify(void * phl)1344 _phl_tx_pwr_notify(void *phl)
1345 {
1346 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
1347 
1348 #ifdef SDIO_TX_THREAD
1349 	phl_tx_sdio_wake_thrd((struct phl_info_t *)phl);
1350 #else
1351 	pstatus = rtw_phl_tx_req_notify(phl);
1352 #endif
1353 
1354 	return pstatus;
1355 }
1356 #ifdef CONFIG_POWER_SAVE
_phl_req_pwr_cb(void * priv,struct phl_msg * msg)1357 static void _phl_req_pwr_cb(void *priv, struct phl_msg *msg)
1358 {
1359 	struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1360 
1361 	if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
1362 		_os_atomic_set(phl_to_drvpriv(phl_info),
1363 		               &phl_info->phl_sw_tx_req_pwr,
1364 		               0);
1365 	else
1366 		_os_atomic_set(phl_to_drvpriv(phl_info),
1367 		               &phl_info->phl_sw_rx_req_pwr,
1368 		               0);
1369 
1370 	if (IS_MSG_FAIL(msg->msg_id) || IS_MSG_CANCEL(msg->msg_id)) {
1371 		PHL_WARN("%s(): request power failure.\n", __func__);
1372 		return;
1373 	}
1374 
1375 	if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
1376 		_phl_tx_pwr_notify(priv);
1377 	else if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_RX)
1378 		rtw_phl_start_rx_process(priv);
1379 }
1380 
_phl_datapath_req_pwr(struct phl_info_t * phl_info,u8 type)1381 static void _phl_datapath_req_pwr(struct phl_info_t *phl_info, u8 type)
1382 {
1383 	enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
1384 	struct phl_msg msg = {0};
1385 	struct phl_msg_attribute attr = {0};
1386 
1387 	PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
1388 	          "%s(): [DATA_CTRL] SW datapath paused by ps module and request power\n",
1389 	          __func__);
1390 
1391 	SET_MSG_MDL_ID_FIELD(msg.msg_id, ((type == PHL_CTRL_TX) ? PHL_MDL_TX : PHL_MDL_RX));
1392 	SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_TRX_PWR_REQ);
1393 
1394 	attr.completion.completion = _phl_req_pwr_cb;
1395 	attr.completion.priv = phl_info;
1396 
1397 	/* shall set req_pwr flag first before sending req_pwr msg */
1398 	if (PHL_CTRL_TX == type)
1399 		_os_atomic_set(phl_to_drvpriv(phl_info),
1400 		               &phl_info->phl_sw_tx_req_pwr,
1401 		               1);
1402 	else
1403 		_os_atomic_set(phl_to_drvpriv(phl_info),
1404 		               &phl_info->phl_sw_rx_req_pwr,
1405 		               1);
1406 
1407 	psts = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
1408 	if (RTW_PHL_STATUS_SUCCESS != psts) {
1409 		PHL_WARN("%s(): CANNOT send msg to request power.\n", __func__);
1410 		if (PHL_CTRL_TX == type)
1411 			_os_atomic_set(phl_to_drvpriv(phl_info),
1412 			               &phl_info->phl_sw_tx_req_pwr,
1413 			               0);
1414 		else
1415 			_os_atomic_set(phl_to_drvpriv(phl_info),
1416 			               &phl_info->phl_sw_rx_req_pwr,
1417 			               0);
1418 	}
1419 }
1420 
_phl_datapath_chk_pwr(struct phl_info_t * phl_info,u8 type)1421 static bool _phl_datapath_chk_pwr(struct phl_info_t *phl_info, u8 type)
1422 {
1423 	void *drvpriv = phl_to_drvpriv(phl_info);
1424 	enum data_ctrl_mdl pause_id = 0;
1425 	_os_atomic *trx_more;
1426 	_os_atomic *req_pwr;
1427 
1428 	if (type == PHL_CTRL_TX) {
1429 		pause_id = phl_info->pause_tx_id;
1430 		trx_more = &phl_info->phl_sw_tx_more;
1431 		req_pwr = &phl_info->phl_sw_tx_req_pwr;
1432 	} else {
1433 		pause_id = phl_info->pause_rx_id;
1434 		trx_more = &phl_info->phl_sw_rx_more;
1435 		req_pwr = &phl_info->phl_sw_rx_req_pwr;
1436 	}
1437 
1438 	if (pause_id & ~(DATA_CTRL_MDL_PS)) {
1439 		PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
1440 		          "%s(): [DATA_CTRL] SW datapath paused by module(0x%x)\n",
1441 		          __func__,
1442 		          pause_id);
1443 		return false;
1444 	}
1445 
1446 	if (true == _os_atomic_read(drvpriv, trx_more) &&
1447 	    false == _os_atomic_read(drvpriv, req_pwr))
1448 		_phl_datapath_req_pwr(phl_info, type);
1449 
1450 	return true;
1451 }
1452 #endif
phl_datapath_chk_trx_pause(struct phl_info_t * phl_info,u8 type)1453 bool phl_datapath_chk_trx_pause(struct phl_info_t *phl_info, u8 type)
1454 {
1455 	void *drvpriv = phl_to_drvpriv(phl_info);
1456 	_os_atomic *sw_sts;
1457 
1458 	if (type == PHL_CTRL_TX)
1459 		sw_sts = &phl_info->phl_sw_tx_sts;
1460 	else
1461 		sw_sts = &phl_info->phl_sw_rx_sts;
1462 
1463 	if (PHL_TX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv, sw_sts)) {
1464 #ifdef CONFIG_POWER_SAVE
1465 		_phl_datapath_chk_pwr(phl_info, type);
1466 #endif
1467 		return true;
1468 	}
1469 
1470 	return false;
1471 }
1472 
rtw_phl_tx_stop(void * phl)1473 void rtw_phl_tx_stop(void *phl)
1474 {
1475 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1476 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1477 
1478 	/* Pause SW Tx */
1479 	hci_trx_ops->req_tx_stop(phl_info);
1480 }
1481 
rtw_phl_tx_resume(void * phl)1482 void rtw_phl_tx_resume(void *phl)
1483 {
1484 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1485 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1486 
1487 	/* Resume SW Tx */
1488 	hci_trx_ops->trx_resume(phl_info, PHL_CTRL_TX);
1489 }
1490 
1491 
rtw_phl_tx_req_notify(void * phl)1492 enum rtw_phl_status rtw_phl_tx_req_notify(void *phl)
1493 {
1494 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1495 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1496 
1497 	pstatus = phl_schedule_handler(phl_info->phl_com,
1498 					&phl_info->phl_tx_handler);
1499 
1500 	return pstatus;
1501 }
1502 
rtw_phl_add_tx_req(void * phl,struct rtw_xmit_req * tx_req)1503 enum rtw_phl_status rtw_phl_add_tx_req(void *phl,
1504 				struct rtw_xmit_req *tx_req)
1505 {
1506 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1507 	struct rtw_phl_tring_list *tring_list, *t;
1508 	struct rtw_phl_tx_ring *ring = NULL;
1509 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1510 	void *drv_priv = NULL;
1511 	_os_list *list_head = &phl_info->t_ring_list;
1512 	u16 macid = tx_req->mdata.macid;
1513 	u8 tid = tx_req->mdata.tid;
1514 	u16 ring_res = 0, rptr = 0;
1515 
1516 	drv_priv = phl_to_drvpriv(phl_info);
1517 
1518 	_os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
1519 
1520 	phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
1521 				list_head, list) {
1522 		if (macid != tring_list->macid) {
1523 			continue;
1524 		} else {
1525 			/* hana_todo check mgnt frame case */
1526 			ring = &tring_list->phl_ring[tid];
1527 			break;
1528 		}
1529 	}
1530 
1531 	if (NULL != ring) {
1532 		rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1533 
1534 		ring_res = phl_calc_avail_wptr(rptr, ring->core_idx,
1535 						MAX_PHL_RING_ENTRY_NUM);
1536 		if (ring_res > 0) {
1537 			ring->core_idx =
1538 				(ring->core_idx + 1) % MAX_PHL_RING_ENTRY_NUM;
1539 			ring->entry[ring->core_idx] = (u8 *)tx_req;
1540 			phl_tx_statistics(phl_info, tx_req);
1541 #ifdef CONFIG_PHL_TX_DBG
1542 			if (tx_req->tx_dbg.en_dbg) {
1543 				tx_req->tx_dbg.core_add_tx_t =
1544 						_os_get_cur_time_us();
1545 			}
1546 #endif /* CONFIG_PHL_TX_DBG */
1547 			_os_atomic_set(drv_priv, &phl_info->phl_sw_tx_more, 1);
1548 			pstatus = RTW_PHL_STATUS_SUCCESS;
1549 		} else {
1550 			PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "no ring resource to add new tx request!\n");
1551 			pstatus = RTW_PHL_STATUS_RESOURCE;
1552 		}
1553 	}
1554 
1555 	_os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
1556 
1557 	return pstatus;
1558 }
1559 
rtw_phl_tring_rsc(void * phl,u16 macid,u8 tid)1560 u16 rtw_phl_tring_rsc(void *phl, u16 macid, u8 tid)
1561 {
1562 	struct rtw_phl_tring_list *tring_list, *t;
1563 	struct rtw_phl_tx_ring *ring = NULL;
1564 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1565 	void *drv_priv = NULL;
1566 	_os_list *list_head = &phl_info->t_ring_list;
1567 	u16 ring_res = 0, rptr = 0;
1568 
1569 	drv_priv = phl_to_drvpriv(phl_info);
1570 
1571 	phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
1572 				list_head, list) {
1573 		if (macid != tring_list->macid) {
1574 			continue;
1575 		} else {
1576 			/* hana_todo check mgnt frame case */
1577 			ring = &tring_list->phl_ring[tid];
1578 			break;
1579 		}
1580 	}
1581 
1582 	if (NULL != ring) {
1583 		rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1584 
1585 		ring_res = phl_calc_avail_rptr(rptr, ring->core_idx,
1586 						MAX_PHL_RING_ENTRY_NUM);
1587 
1588 	}
1589 
1590 	return ring_res;
1591 }
1592 
1593 
phl_indic_pkt_complete(void * phl)1594 enum rtw_phl_status phl_indic_pkt_complete(void *phl)
1595 {
1596 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1597 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1598 	struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
1599 	void *drv_priv = phl_to_drvpriv(phl_info);
1600 
1601 	do {
1602 		_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1603 		evt_info->evt_bitmap |= RTW_PHL_EVT_TX_RECYCLE;
1604 		_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1605 
1606 		pstatus = phl_schedule_handler(phl_info->phl_com,
1607 							&phl_info->phl_event_handler);
1608 	} while (false);
1609 
1610 	return pstatus;
1611 }
1612 
rtw_phl_recycle_tx_buf(void * phl,u8 * tx_buf_ptr)1613 enum rtw_phl_status rtw_phl_recycle_tx_buf(void *phl, u8 *tx_buf_ptr)
1614 {
1615 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1616 #ifdef CONFIG_USB_HCI
1617 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1618 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1619 
1620 	pstatus = hci_trx_ops->recycle_tx_buf(phl, tx_buf_ptr);
1621 
1622 #endif
1623 	return pstatus;
1624 }
1625 
1626 
1627 static enum rtw_phl_status
_phl_cfg_tx_ampdu(void * phl,struct rtw_phl_stainfo_t * sta)1628 _phl_cfg_tx_ampdu(void *phl, struct rtw_phl_stainfo_t *sta)
1629 {
1630 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1631 	enum rtw_hal_status hsts = RTW_HAL_STATUS_FAILURE;
1632 
1633 	hsts = rtw_hal_cfg_tx_ampdu(phl_info->hal, sta);
1634 	if (RTW_HAL_STATUS_SUCCESS != hsts)
1635 		goto fail;
1636 
1637 	return RTW_PHL_STATUS_SUCCESS;
1638 
1639 fail:
1640 	return RTW_PHL_STATUS_FAILURE;
1641 }
1642 
1643 #ifdef CONFIG_CMD_DISP
1644 enum rtw_phl_status
phl_cmd_cfg_ampdu_hdl(struct phl_info_t * phl_info,u8 * param)1645 phl_cmd_cfg_ampdu_hdl(struct phl_info_t *phl_info, u8 *param)
1646 {
1647 	struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)param;
1648 
1649 	PHL_INFO(" %s(), sta = %p !\n", __func__, sta);
1650 
1651 	return _phl_cfg_tx_ampdu(phl_info, sta);
1652 }
1653 #endif
1654 
1655 enum rtw_phl_status
rtw_phl_cmd_cfg_ampdu(void * phl,struct rtw_wifi_role_t * wrole,struct rtw_phl_stainfo_t * sta,enum phl_cmd_type cmd_type,u32 cmd_timeout)1656 rtw_phl_cmd_cfg_ampdu(void *phl,
1657 			struct rtw_wifi_role_t *wrole,
1658 			struct rtw_phl_stainfo_t *sta,
1659 			enum phl_cmd_type cmd_type,
1660 			u32 cmd_timeout)
1661 {
1662 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1663 #ifdef CONFIG_CMD_DISP
1664 	sts = phl_cmd_enqueue(phl,
1665 			wrole->hw_band,
1666 			MSG_EVT_CFG_AMPDU,
1667 			(u8 *)sta, 0,
1668 			NULL,
1669 			cmd_type, cmd_timeout);
1670 
1671 	if (is_cmd_failure(sts)) {
1672 		/* Send cmd success, but wait cmd fail*/
1673 		sts = RTW_PHL_STATUS_FAILURE;
1674 	} else if (sts != RTW_PHL_STATUS_SUCCESS) {
1675 		/* Send cmd fail */
1676 		sts = RTW_PHL_STATUS_FAILURE;
1677 	}
1678 
1679 	return sts;
1680 #else
1681 	PHL_ERR("%s : CONFIG_CMD_DISP need to be enabled for MSG_EVT_CFG_AMPDU !! \n", __func__);
1682 
1683 	return sts;
1684 #endif
1685 }
1686 
1687 void
phl_tx_watchdog(struct phl_info_t * phl_info)1688 phl_tx_watchdog(struct phl_info_t *phl_info)
1689 {
1690 	struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
1691 	struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1692 
1693 	phl_tx_traffic_upd(phl_stats);
1694 	trx_ops->tx_watchdog(phl_info);
1695 }
1696 
_phl_get_ctrl_mdl(enum phl_module_id id)1697 enum data_ctrl_mdl _phl_get_ctrl_mdl(enum phl_module_id id)
1698 {
1699 	enum data_ctrl_mdl ctrl_mdl = DATA_CTRL_MDL_NONE;
1700 
1701 	switch (id) {
1702 	case PHL_MDL_PHY_MGNT:
1703 		ctrl_mdl = DATA_CTRL_MDL_CMD_CTRLER;
1704 		break;
1705 	case PHL_MDL_SER:
1706 		ctrl_mdl = DATA_CTRL_MDL_SER;
1707 		break;
1708 	case PHL_MDL_POWER_MGNT:
1709 		ctrl_mdl = DATA_CTRL_MDL_PS;
1710 		break;
1711 	default:
1712 		PHL_WARN("Unknown PHL module(%d) try to control datapath and is skipped!\n",
1713 			 id);
1714 		ctrl_mdl = DATA_CTRL_MDL_NONE;
1715 		break;
1716 	}
1717 
1718 	return ctrl_mdl;
1719 }
1720 
1721 
1722 enum rtw_phl_status
_phl_poll_hw_tx_done(void)1723 _phl_poll_hw_tx_done(void)
1724 {
1725 	PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Polling hw tx done is not supported now\n");
1726 
1727 	return RTW_PHL_STATUS_FAILURE;
1728 }
1729 
1730 enum rtw_phl_status
_phl_hw_tx_resume(void)1731 _phl_hw_tx_resume(void)
1732 {
1733 	PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Resume hw tx not is supported now\n");
1734 
1735 	return RTW_PHL_STATUS_FAILURE;
1736 }
1737 
1738 enum rtw_phl_status
_phl_sw_tx_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)1739 _phl_sw_tx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
1740 {
1741 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1742 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1743 	enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1744 
1745 	if (!TEST_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl)) {
1746 		PHL_WARN("[DATA_CTRL] module %d resume sw tx fail, sw tx is paused by module 0x%x\n",
1747 		         ctl->id, phl_info->pause_tx_id);
1748 		return sts;
1749 	}
1750 
1751 	CLEAR_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1752 
1753 	if (DATA_CTRL_MDL_NONE != phl_info->pause_tx_id) {
1754 		PHL_WARN("[DATA_CTRL] sw tx is still paused by tx pause id = 0x%x\n",
1755 		         phl_info->pause_tx_id);
1756 
1757 		sts = RTW_PHL_STATUS_SUCCESS;
1758 	} else {
1759 		ops->trx_resume(phl_info, PHL_CTRL_TX);
1760 
1761 		sts = rtw_phl_tx_req_notify(phl_info);
1762 	}
1763 
1764 	return sts;
1765 }
1766 
1767 void
_phl_sw_tx_rst(struct phl_info_t * phl_info)1768 _phl_sw_tx_rst(struct phl_info_t *phl_info)
1769 {
1770 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1771 
1772 	ops->trx_reset(phl_info, PHL_CTRL_TX);
1773 }
1774 
1775 enum rtw_phl_status
_phl_sw_tx_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,bool rst_sw)1776 _phl_sw_tx_pause(struct phl_info_t *phl_info,
1777                  struct phl_data_ctl_t *ctl,
1778                  bool rst_sw)
1779 {
1780 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1781 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1782 	void *drv = phl_to_drvpriv(phl_info);
1783 	u32 i = 0;
1784 	enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1785 
1786 	if (PHL_TX_STATUS_SW_PAUSE ==
1787 	    _os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
1788 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1789 		          "[DATA_CTRL] SW tx has been paused by module(0x%x)\n",
1790 		          phl_info->pause_tx_id);
1791 
1792 		SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1793 
1794 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1795 		          "[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
1796 		          phl_info->pause_tx_id, ctl->id);
1797 
1798 		sts = RTW_PHL_STATUS_SUCCESS;
1799 		return sts;
1800 	}
1801 
1802 	if (PHL_TX_STATUS_STOP_INPROGRESS ==
1803 	    _os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
1804 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1805 		          "[DATA_CTRL] SW tx has been requested to pause by module(0x%x)\n",
1806 		          phl_info->pause_tx_id);
1807 
1808 		SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1809 
1810 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1811 		          "[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
1812 		          phl_info->pause_tx_id, ctl->id);
1813 
1814 		sts = RTW_PHL_STATUS_SUCCESS;
1815 		return sts;
1816 	}
1817 
1818 	/* requset sw tx to stop */
1819 	ops->req_tx_stop(phl_info);
1820 
1821 	/*
1822 	 * notify sw tx one last time
1823 	 * and poll if it receviced the stop request and paused itself
1824 	 */
1825 	if (RTW_PHL_STATUS_SUCCESS == rtw_phl_tx_req_notify(phl_info)) {
1826 		for (i = 0; i < POLL_SW_TX_PAUSE_CNT; i++) {
1827 			if (true == ops->is_tx_pause(phl_info)) {
1828 				SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1829 				sts = RTW_PHL_STATUS_SUCCESS;
1830 				break;
1831 			}
1832 			_os_sleep_ms(drv, POLL_SW_TX_PAUSE_MS);
1833 		}
1834 
1835 		if (RTW_PHL_STATUS_SUCCESS != sts) {
1836 			SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1837 			sts = RTW_PHL_STATUS_CMD_TIMEOUT;
1838 			PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
1839 			          "[DATA_CTRL] Module(%d) polling sw tx pause timeout (%d ms)!\n",
1840 			          ctl->id,
1841 			          (POLL_SW_TX_PAUSE_MS * POLL_SW_TX_PAUSE_CNT));
1842 		} else {
1843 			if (true == rst_sw) {
1844 				PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_,
1845 				          "[DATA_CTRL] Pause Tx with reset is not supported now! requested by module(%d)\n",
1846 				          ctl->id);
1847 			}
1848 		}
1849 	} else {
1850 		PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Schedule sw tx process fail!\n");
1851 	}
1852 
1853 	return sts;
1854 }
1855 
1856 enum rtw_phl_status
_phl_poll_hw_rx_done(void)1857 _phl_poll_hw_rx_done(void)
1858 {
1859 	PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Polling hw rx done is not supported now\n");
1860 
1861 	return RTW_PHL_STATUS_FAILURE;
1862 }
1863 
1864 enum rtw_phl_status
_phl_hw_rx_resume(void)1865 _phl_hw_rx_resume(void)
1866 {
1867 	PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Resume hw rx not is supported now\n");
1868 
1869 	return RTW_PHL_STATUS_FAILURE;
1870 }
1871 
1872 enum rtw_phl_status
_phl_sw_rx_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)1873 _phl_sw_rx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
1874 {
1875 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1876 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1877 	enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1878 
1879 	if (!TEST_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl)) {
1880 		PHL_WARN("[DATA_CTRL] module %d resume sw rx fail, sw rx is paused by module 0x%x\n",
1881 		         ctl->id, phl_info->pause_rx_id);
1882 		return sts;
1883 	}
1884 
1885 	CLEAR_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1886 
1887 	if (DATA_CTRL_MDL_NONE != phl_info->pause_rx_id) {
1888 		PHL_WARN("[DATA_CTRL] sw rx is still paused by rx pause id = 0x%x\n",
1889 		         phl_info->pause_rx_id);
1890 
1891 		sts = RTW_PHL_STATUS_SUCCESS;
1892 	} else {
1893 		ops->trx_resume(phl_info, PHL_CTRL_RX);
1894 
1895 		sts = rtw_phl_start_rx_process(phl_info);
1896 	}
1897 
1898 	return sts;
1899 }
1900 
1901 void
_phl_sw_rx_rst(struct phl_info_t * phl_info)1902 _phl_sw_rx_rst(struct phl_info_t *phl_info)
1903 {
1904 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1905 
1906 	ops->trx_reset(phl_info, PHL_CTRL_RX);
1907 }
1908 
1909 enum rtw_phl_status
_phl_sw_rx_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,bool rst_sw)1910 _phl_sw_rx_pause(struct phl_info_t *phl_info,
1911                  struct phl_data_ctl_t *ctl,
1912                  bool rst_sw)
1913 {
1914 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1915 	struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1916 	void *drv = phl_to_drvpriv(phl_info);
1917 	u32 i = 0;
1918 	enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1919 
1920 	if (PHL_RX_STATUS_SW_PAUSE ==
1921 	    _os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
1922 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1923 		          "[DATA_CTRL] SW rx has been paused by module(0x%x)\n",
1924 		          phl_info->pause_rx_id);
1925 
1926 		SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1927 
1928 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1929 		          "[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
1930 		          phl_info->pause_rx_id, ctl->id);
1931 		sts = RTW_PHL_STATUS_SUCCESS;
1932 		return sts;
1933 	}
1934 
1935 	if (PHL_RX_STATUS_STOP_INPROGRESS ==
1936 	    _os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
1937 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1938 		          "[DATA_CTRL] SW rx has been requested to pause by module(0x%x)\n",
1939 		          phl_info->pause_rx_id);
1940 
1941 		SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1942 
1943 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1944 		          "[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
1945 		          phl_info->pause_rx_id, ctl->id);
1946 		sts = RTW_PHL_STATUS_SUCCESS;
1947 		return sts;
1948 	}
1949 
1950 	/* requset sw rx to stop */
1951 	ops->req_rx_stop(phl_info);
1952 
1953 	/*
1954 	 * notify sw rx one last time
1955 	 * and poll if it receviced the stop request and paused itself
1956 	 */
1957 	if (RTW_PHL_STATUS_SUCCESS == rtw_phl_start_rx_process(phl_info)) {
1958 		for (i = 0; i < POLL_SW_RX_PAUSE_CNT; i++) {
1959 			if (true == ops->is_rx_pause(phl_info)) {
1960 				SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1961 				sts = RTW_PHL_STATUS_SUCCESS;
1962 				break;
1963 			}
1964 			_os_sleep_ms(drv, POLL_SW_RX_PAUSE_MS);
1965 		}
1966 
1967 		if (RTW_PHL_STATUS_SUCCESS != sts) {
1968 			SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1969 			sts = RTW_PHL_STATUS_CMD_TIMEOUT;
1970 			PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1971 			          "[DATA_CTRL] Module(%d) polling sw rx pause timeout (%d ms)!\n",
1972 			          ctl->id,
1973 			          (POLL_SW_RX_PAUSE_MS * POLL_SW_RX_PAUSE_CNT));
1974 		} else {
1975 			if (true == rst_sw) {
1976 				PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
1977 				          "[DATA_CTRL] Pause Rx with reset is not supported now! requested by module(%d)\n",
1978 				          ctl->id);
1979 			}
1980 		}
1981 	} else {
1982 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Schedule sw rx process fail!\n");
1983 	}
1984 
1985 	return sts;
1986 }
1987 
1988 enum rtw_phl_status
_phl_hw_trx_rst_resume(struct phl_info_t * phl_info)1989 _phl_hw_trx_rst_resume(struct phl_info_t *phl_info)
1990 {
1991 	void *drv = phl_to_drvpriv(phl_info);
1992 
1993 	if (false == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
1994 		PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is not paused\n");
1995 	}
1996 
1997 	if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2) !=
1998 	    RTW_HAL_STATUS_SUCCESS) {
1999 		PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Reset and Resume HW T/Rx fail\n");
2000 		return RTW_PHL_STATUS_FAILURE;
2001 	} else {
2002 		_os_atomic_set(drv, &phl_info->is_hw_trx_pause, false);
2003 		return RTW_PHL_STATUS_SUCCESS;
2004 	}
2005 }
2006 
2007 enum rtw_phl_status
_phl_hw_trx_pause(struct phl_info_t * phl_info)2008 _phl_hw_trx_pause(struct phl_info_t *phl_info)
2009 {
2010 	void *drv = phl_to_drvpriv(phl_info);
2011 
2012 	if (true == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
2013 		PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is already paused\n");
2014 	}
2015 
2016 	if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1) !=
2017 	    RTW_HAL_STATUS_SUCCESS) {
2018 		PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Pause HW T/Rx fail\n");
2019 		return RTW_PHL_STATUS_FAILURE;
2020 	} else {
2021 		_os_atomic_set(drv, &phl_info->is_hw_trx_pause, true);
2022 		return RTW_PHL_STATUS_SUCCESS;
2023 	}
2024 }
2025 
2026 enum rtw_phl_status
_phl_trx_sw_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)2027 _phl_trx_sw_pause(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
2028 {
2029 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2030 
2031 	do {
2032 		sts = _phl_sw_tx_pause(phl_info, ctl, false);
2033 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2034 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
2035 			break;
2036 		}
2037 
2038 		sts = _phl_sw_rx_pause(phl_info, ctl, false);
2039 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2040 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
2041 			break;
2042 		}
2043 	} while (false);
2044 
2045 	return sts;
2046 }
2047 
2048 enum rtw_phl_status
_phl_trx_sw_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)2049 _phl_trx_sw_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
2050 {
2051 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2052 
2053 	do {
2054 		sts = _phl_sw_tx_resume(phl_info, ctl);
2055 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2056 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
2057 			break;
2058 		}
2059 
2060 		sts = _phl_sw_rx_resume(phl_info, ctl);
2061 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2062 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
2063 			break;
2064 		}
2065 	} while (false);
2066 
2067 	return sts;
2068 }
2069 
2070 enum rtw_phl_status
_phl_trx_pause_w_rst(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2071 _phl_trx_pause_w_rst(struct phl_info_t *phl_info,
2072                      struct phl_data_ctl_t *ctl,
2073                      struct phl_msg *msg)
2074 {
2075 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2076 	enum data_ctrl_err_code *err_sts = NULL;
2077 
2078 	if (msg->outbuf && msg->outlen == sizeof(*err_sts))
2079 		err_sts = (enum data_ctrl_err_code *)msg->outbuf;
2080 
2081 	do {
2082 		sts = _phl_sw_tx_pause(phl_info, ctl, false);
2083 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2084 			if (err_sts) {
2085 				if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
2086 					*err_sts = CTRL_ERR_SW_TX_PAUSE_POLLTO;
2087 				else
2088 					*err_sts = CTRL_ERR_SW_TX_PAUSE_FAIL;
2089 			}
2090 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2091 			break;
2092 		}
2093 
2094 		sts = _phl_hw_trx_pause(phl_info);
2095 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2096 			if (err_sts)
2097 				*err_sts = CTRL_ERR_HW_TRX_PAUSE_FAIL;
2098 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause HW T/Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2099 			break;
2100 		}
2101 
2102 		sts = _phl_sw_rx_pause(phl_info, ctl, false);
2103 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2104 			if (err_sts) {
2105 				if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
2106 					*err_sts = CTRL_ERR_SW_RX_PAUSE_POLLTO;
2107 				else
2108 					*err_sts = CTRL_ERR_SW_RX_PAUSE_FAIL;
2109 			}
2110 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2111 			break;
2112 		}
2113 
2114 		_phl_sw_tx_rst(phl_info);
2115 		_phl_sw_rx_rst(phl_info);
2116 	} while (false);
2117 
2118 	return sts;
2119 }
2120 
2121 enum rtw_phl_status
_phl_trx_resume_w_rst(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2122 _phl_trx_resume_w_rst(struct phl_info_t *phl_info,
2123                       struct phl_data_ctl_t *ctl,
2124                       struct phl_msg *msg)
2125 {
2126 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2127 	enum data_ctrl_err_code *err_sts = NULL;
2128 
2129 	if (msg->outbuf && msg->outlen == sizeof(*err_sts))
2130 		err_sts = (enum data_ctrl_err_code *)msg->outbuf;
2131 
2132 	do {
2133 		sts = _phl_sw_rx_resume(phl_info, ctl);
2134 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2135 			if (err_sts)
2136 				*err_sts = CTRL_ERR_SW_RX_RESUME_FAIL;
2137 
2138 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2139 			break;
2140 		}
2141 
2142 		sts = _phl_hw_trx_rst_resume(phl_info);
2143 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2144 			if (err_sts)
2145 				*err_sts = CTRL_ERR_HW_TRX_RESUME_FAIL;
2146 
2147 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume HW T/Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2148 			break;
2149 		}
2150 
2151 		sts = _phl_sw_tx_resume(phl_info, ctl);
2152 		if (RTW_PHL_STATUS_SUCCESS != sts) {
2153 			if (err_sts)
2154 				*err_sts = CTRL_ERR_SW_TX_RESUME_FAIL;
2155 
2156 			PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2157 			break;
2158 		}
2159 	} while (false);
2160 
2161 	return sts;
2162 }
2163 
2164 enum rtw_phl_status
phl_data_ctrler(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2165 phl_data_ctrler(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl,
2166 		struct phl_msg *msg)
2167 {
2168 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2169 
2170 	if (NULL == ctl) {
2171 		PHL_WARN("phl_tx_ctrler(): input ctl is NULL\n");
2172 		return RTW_PHL_STATUS_FAILURE;
2173 	}
2174 
2175 	switch (ctl->cmd) {
2176 	case PHL_DATA_CTL_HW_TRX_RST_RESUME:
2177 		sts = _phl_hw_trx_rst_resume(phl_info);
2178 		break;
2179 	case PHL_DATA_CTL_HW_TRX_PAUSE:
2180 		sts = _phl_hw_trx_pause(phl_info);
2181 		break;
2182 	case PHL_DATA_CTL_SW_TX_RESUME:
2183 		sts = _phl_sw_tx_resume(phl_info, ctl);
2184 		break;
2185 	case PHL_DATA_CTL_SW_RX_RESUME:
2186 		sts = _phl_sw_rx_resume(phl_info, ctl);
2187 		break;
2188 	case PHL_DATA_CTL_SW_TX_PAUSE:
2189 		sts = _phl_sw_tx_pause(phl_info, ctl, false);
2190 		break;
2191 	case PHL_DATA_CTL_SW_RX_PAUSE:
2192 		sts = _phl_sw_rx_pause(phl_info, ctl, false);
2193 		break;
2194 	case PHL_DATA_CTL_SW_TX_RESET:
2195 		_phl_sw_tx_rst(phl_info);
2196 		sts = RTW_PHL_STATUS_SUCCESS;
2197 		break;
2198 	case PHL_DATA_CTL_SW_RX_RESET:
2199 		_phl_sw_rx_rst(phl_info);
2200 		sts = RTW_PHL_STATUS_SUCCESS;
2201 		break;
2202 	case PHL_DATA_CTL_TRX_SW_PAUSE:
2203 		sts = _phl_trx_sw_pause(phl_info, ctl);
2204 		break;
2205 	case PHL_DATA_CTL_TRX_SW_RESUME:
2206 		sts = _phl_trx_sw_resume(phl_info, ctl);
2207 		break;
2208 	case PHL_DATA_CTL_TRX_PAUSE_W_RST:
2209 		sts = _phl_trx_pause_w_rst(phl_info, ctl, msg);
2210 		break;
2211 	case PHL_DATA_CTL_TRX_RESUME_W_RST:
2212 		sts = _phl_trx_resume_w_rst(phl_info, ctl, msg);
2213 		break;
2214 	default:
2215 		PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
2216 		          "Unknown data control command(%d)!\n", ctl->cmd);
2217 		break;
2218 	}
2219 	return sts;
2220 }
2221