1 /******************************************************************************
2 *
3 * Copyright(c) 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _PHL_TX_C_
16 #include "phl_headers.h"
17
18 /**
19 * this function will be used in read / write pointer mechanism and
20 * return the number of available read pointer
21 * @rptr: input, the read pointer
22 * @wptr: input, the write pointer
23 * @bndy: input, the boundary of read / write pointer mechanism
24 */
phl_calc_avail_rptr(u16 rptr,u16 wptr,u16 bndy)25 u16 phl_calc_avail_rptr(u16 rptr, u16 wptr, u16 bndy)
26 {
27 u16 avail_rptr = 0;
28
29 if (wptr >= rptr)
30 avail_rptr = wptr - rptr;
31 else if (rptr > wptr)
32 avail_rptr = wptr + (bndy - rptr);
33
34 return avail_rptr;
35 }
36
37
38 /**
39 * this function will be used in read / write pointer mechanism and
40 * return the number of available write pointer
41 * @rptr: input, the read pointer
42 * @wptr: input, the write pointer
43 * @bndy: input, the boundary of read / write pointer mechanism
44 */
phl_calc_avail_wptr(u16 rptr,u16 wptr,u16 bndy)45 u16 phl_calc_avail_wptr(u16 rptr, u16 wptr, u16 bndy)
46 {
47 u16 avail_wptr = 0;
48
49 if (rptr > wptr)
50 avail_wptr = rptr - wptr - 1;
51 else if (wptr >= rptr)
52 avail_wptr = rptr + (bndy - wptr) - 1;
53
54 return avail_wptr;
55 }
56
phl_dump_sorted_ring(_os_list * sorted_ring)57 void phl_dump_sorted_ring(_os_list *sorted_ring)
58 {
59 struct phl_ring_status *ring_sts;
60 u16 i = 0;
61
62 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump sorted ring==\n");
63
64 phl_list_for_loop(ring_sts, struct phl_ring_status, sorted_ring,
65 list) {
66 i++;
67 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
68 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
69 ring_sts->macid);
70 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
71 ring_sts->req_busy);
72 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
73 ring_sts->ring_ptr->tid);
74 }
75 }
76
phl_dump_tx_plan(_os_list * sta_list)77 void phl_dump_tx_plan(_os_list *sta_list)
78 {
79 struct phl_tx_plan *tx_plan;
80 u16 i = 0;
81
82 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx plan==\n");
83
84 phl_list_for_loop(tx_plan, struct phl_tx_plan, sta_list,
85 list) {
86 i++;
87 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==tx plan %d==\n", i);
88 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->sleep = %d\n",
89 tx_plan->sleep);
90 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_mgnt = %d\n",
91 tx_plan->has_mgnt);
92 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "tx_plan->has_hiq = %d\n",
93 tx_plan->has_hiq);
94 phl_dump_sorted_ring(&tx_plan->sorted_ring);
95 }
96 }
97
phl_dump_t_fctrl_result(_os_list * t_fctrl_result)98 void phl_dump_t_fctrl_result(_os_list *t_fctrl_result)
99 {
100 struct phl_ring_status *ring_sts;
101 u16 i = 0;
102
103 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==dump tx flow control result==\n");
104
105 phl_list_for_loop(ring_sts, struct phl_ring_status, t_fctrl_result,
106 list) {
107 i++;
108 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "==ring %d==\n", i);
109 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->macid = %d\n",
110 ring_sts->macid);
111 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->req_busy = %d\n",
112 ring_sts->req_busy);
113 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_, "ring_sts->ring_ptr->tid = %d\n",
114 ring_sts->ring_ptr->tid);
115 }
116 }
117
phl_dump_tx_stats(struct rtw_stats * stats)118 void phl_dump_tx_stats(struct rtw_stats *stats)
119 {
120 PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
121 "Dump Tx statistics\n"
122 "tx_byte_uni = %lld\n"
123 "tx_byte_total = %lld\n"
124 "tx_tp_kbits = %d\n"
125 "last_tx_time_ms = %d\n",
126 stats->tx_byte_uni,
127 stats->tx_byte_total,
128 stats->tx_tp_kbits,
129 stats->last_tx_time_ms);
130 }
131
phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool * h2c_pkt_pool)132 void phl_dump_h2c_pool_stats(struct phl_h2c_pkt_pool *h2c_pkt_pool)
133 {
134 PHL_INFO("[h2c_stats] idle cmd %d, idle data %d, idle ldata %d, busy h2c %d.\n",
135 h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt,
136 h2c_pkt_pool->idle_h2c_pkt_data_list.cnt,
137 h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt,
138 h2c_pkt_pool->busy_h2c_pkt_list.cnt);
139 }
140
phl_reset_tx_stats(struct rtw_stats * stats)141 void phl_reset_tx_stats(struct rtw_stats *stats)
142 {
143 stats->tx_byte_uni = 0;
144 stats->tx_byte_total = 0;
145 stats->tx_tp_kbits = 0;
146 stats->last_tx_time_ms = 0;
147 stats->txtp.last_calc_time_ms = 0;
148 stats->txtp.last_calc_time_ms = 0;
149 stats->tx_traffic.lvl = RTW_TFC_IDLE;
150 stats->tx_traffic.sts = 0;
151 }
152
phl_tfc_lvl_to_str(u8 lvl)153 const char *phl_tfc_lvl_to_str(u8 lvl)
154 {
155 switch (lvl) {
156 case RTW_TFC_IDLE:
157 return "IDLE";
158 case RTW_TFC_ULTRA_LOW:
159 return "ULTRA_LOW";
160 case RTW_TFC_LOW:
161 return "LOW";
162 case RTW_TFC_MID:
163 return "MID";
164 case RTW_TFC_HIGH:
165 return "HIGH";
166 default:
167 return "-";
168 }
169 }
170
171 void
phl_tx_traffic_upd(struct rtw_stats * sts)172 phl_tx_traffic_upd(struct rtw_stats *sts)
173 {
174 u32 tp_k = 0, tp_m = 0;
175 enum rtw_tfc_lvl tx_tfc_lvl = RTW_TFC_IDLE;
176 tp_k = sts->tx_tp_kbits;
177 tp_m = sts->tx_tp_kbits >> 10;
178
179 if (tp_m >= TX_HIGH_TP_THRES_MBPS)
180 tx_tfc_lvl = RTW_TFC_HIGH;
181 else if (tp_m >= TX_MID_TP_THRES_MBPS)
182 tx_tfc_lvl = RTW_TFC_MID;
183 else if (tp_m >= TX_LOW_TP_THRES_MBPS)
184 tx_tfc_lvl = RTW_TFC_LOW;
185 else if (tp_k >= TX_ULTRA_LOW_TP_THRES_KBPS)
186 tx_tfc_lvl = RTW_TFC_ULTRA_LOW;
187 else
188 tx_tfc_lvl = RTW_TFC_IDLE;
189
190 if (sts->tx_traffic.lvl > tx_tfc_lvl) {
191 sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
192 sts->tx_traffic.lvl = tx_tfc_lvl;
193 } else if (sts->tx_traffic.lvl < tx_tfc_lvl) {
194 sts->tx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
195 sts->tx_traffic.lvl = tx_tfc_lvl;
196 } else if (sts->tx_traffic.sts &
197 (TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
198 sts->tx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
199 TRAFFIC_DECREASE);
200 }
201 }
202
phl_update_tx_stats(struct rtw_stats * stats,struct rtw_xmit_req * tx_req)203 void phl_update_tx_stats(struct rtw_stats *stats, struct rtw_xmit_req *tx_req)
204 {
205 u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
206 u64 diff_bits = 0;
207
208 stats->last_tx_time_ms = cur_time;
209 stats->tx_byte_total += tx_req->total_len;
210
211 stats->txreq_num++;
212 if (tx_req->mdata.bc == 0 && tx_req->mdata.mc == 0)
213 stats->tx_byte_uni += tx_req->total_len;
214
215 if (0 == stats->txtp.last_calc_time_ms ||
216 0 == stats->txtp.last_calc_bits) {
217 stats->txtp.last_calc_time_ms = stats->last_tx_time_ms;
218 stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
219 } else {
220 if (cur_time >= stats->txtp.last_calc_time_ms) {
221 diff_t = cur_time - stats->txtp.last_calc_time_ms;
222 } else {
223 diff_t = RTW_U32_MAX - stats->txtp.last_calc_time_ms +
224 cur_time + 1;
225 }
226 if (diff_t > TXTP_CALC_DIFF_MS && stats->tx_byte_uni != 0) {
227 diff_bits = (stats->tx_byte_uni * 8) -
228 stats->txtp.last_calc_bits;
229 stats->tx_tp_kbits = (u32)_os_division64(diff_bits,
230 diff_t);
231 stats->txtp.last_calc_bits = stats->tx_byte_uni * 8;
232 stats->txtp.last_calc_time_ms = cur_time;
233 }
234 }
235 }
236
phl_tx_statistics(struct phl_info_t * phl_info,struct rtw_xmit_req * tx_req)237 void phl_tx_statistics(struct phl_info_t *phl_info, struct rtw_xmit_req *tx_req)
238 {
239 struct rtw_phl_com_t *phl_com = phl_info->phl_com;
240 struct rtw_stats *phl_stats = &phl_com->phl_stats;
241 struct rtw_stats *sta_stats = NULL;
242 struct rtw_phl_stainfo_t *sta = NULL;
243 u16 macid = tx_req->mdata.macid;
244
245 if (!phl_macid_is_valid(phl_info, macid))
246 goto dev_stat;
247
248 sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
249
250 if (NULL == sta)
251 goto dev_stat;
252 sta_stats = &sta->stats;
253
254 phl_update_tx_stats(sta_stats, tx_req);
255 dev_stat:
256 phl_update_tx_stats(phl_stats, tx_req);
257 }
258
259
_phl_free_phl_tring_list(void * phl,struct rtw_phl_tring_list * ring_list)260 static void _phl_free_phl_tring_list(void *phl,
261 struct rtw_phl_tring_list *ring_list)
262 {
263 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
264 void *drv_priv = phl_to_drvpriv(phl_info);
265 struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
266 struct rtw_phl_tx_ring *ring;
267 struct rtw_xmit_req *tx_req;
268 u16 rptr = 0;
269 u8 i = 0;
270
271 for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
272 ring = &ring_list->phl_ring[i];
273 rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
274
275 while (rptr != ring->core_idx) {
276 rptr += 1;
277 if (rptr >= MAX_PHL_TX_RING_ENTRY_NUM)
278 rptr = 0;
279 tx_req = (struct rtw_xmit_req *)ring->entry[rptr];
280 if (NULL == tx_req)
281 break;
282 ops->tx_recycle(drv_priv, tx_req);
283 }
284 }
285 _os_kmem_free(drv_priv, ring_list, sizeof(*ring_list));
286 }
287
288
_phl_init_tx_plan(struct phl_tx_plan * tx_plan)289 void _phl_init_tx_plan(struct phl_tx_plan * tx_plan)
290 {
291 INIT_LIST_HEAD(&tx_plan->list);
292 tx_plan->sleep = false;
293 tx_plan->has_mgnt = false;
294 tx_plan->has_hiq = false;
295 INIT_LIST_HEAD(&tx_plan->sorted_ring);
296 }
297
298
299 static struct rtw_phl_tring_list *
_phl_allocate_phl_tring_list(void * phl,u16 macid,u8 hw_band,u8 hw_wmm,u8 hw_port)300 _phl_allocate_phl_tring_list(void *phl, u16 macid,
301 u8 hw_band, u8 hw_wmm, u8 hw_port)
302 {
303 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
304 struct rtw_phl_tring_list *phl_tring_list = NULL;
305 void *drv_priv = NULL;
306 u32 buf_len = 0;
307 u8 i = 0, dma_ch = 0;
308
309 drv_priv = phl_to_drvpriv(phl_info);
310
311 buf_len = sizeof(struct rtw_phl_tring_list);
312 phl_tring_list = (struct rtw_phl_tring_list *)_os_kmem_alloc(drv_priv,
313 buf_len);
314
315 if (NULL != phl_tring_list) {
316 _os_mem_set(drv_priv, phl_tring_list, 0, buf_len);
317 INIT_LIST_HEAD(&phl_tring_list->list);
318 phl_tring_list->macid = macid;
319 phl_tring_list->band = hw_band;
320 phl_tring_list->wmm = hw_wmm;
321 phl_tring_list->port = hw_port;
322 /*phl_tring_list->mbssid = hw_mbssid*/
323
324 for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
325 phl_tring_list->phl_ring[i].tid = i;
326 dma_ch = rtw_hal_tx_chnl_mapping(phl_info->hal, macid,
327 i, hw_band);
328 phl_tring_list->phl_ring[i].dma_ch = dma_ch;
329 }
330 _phl_init_tx_plan(&phl_tring_list->tx_plan);
331 }
332
333 return phl_tring_list;
334 }
335
336 enum rtw_phl_status
phl_register_tx_ring(void * phl,u16 macid,u8 hw_band,u8 hw_wmm,u8 hw_port)337 phl_register_tx_ring(void *phl, u16 macid, u8 hw_band, u8 hw_wmm, u8 hw_port)
338 {
339 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
340 void *drv_priv = phl_to_drvpriv(phl_info);
341 struct rtw_phl_tring_list *phl_tring_list = NULL;
342 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
343 _os_list *ring_list = NULL;
344
345 phl_tring_list = _phl_allocate_phl_tring_list(phl, macid, hw_band, hw_wmm, hw_port);
346
347 if (NULL != phl_tring_list) {
348 ring_list = &phl_info->t_ring_list;
349 _os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
350 list_add_tail(&phl_tring_list->list, ring_list);
351 _os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
352
353 phl_status = RTW_PHL_STATUS_SUCCESS;
354 }
355
356 return phl_status;
357 }
358
359
360
phl_deregister_tx_ring(void * phl,u16 macid)361 enum rtw_phl_status phl_deregister_tx_ring(void *phl, u16 macid)
362 {
363 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
364 void *drv_priv = phl_to_drvpriv(phl_info);
365 struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
366 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
367 _os_list *ring_list = NULL;
368
369 ring_list = &phl_info->t_ring_list;
370
371 _os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
372
373 phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
374 ring_list, list) {
375 if (macid == phl_tring_list->macid) {
376 list_del(&phl_tring_list->list);
377 phl_status = RTW_PHL_STATUS_SUCCESS;
378 break;
379 }
380 }
381
382 _os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
383
384 if (RTW_PHL_STATUS_SUCCESS == phl_status) {
385 /* defer the free operation to avoid racing with _phl_tx_callback_xxx */
386 _os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
387 list_add_tail(&phl_tring_list->list, &phl_info->t_ring_free_list);
388 _os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
389 }
390
391 return phl_status;
392 }
393
phl_free_deferred_tx_ring(struct phl_info_t * phl_info)394 void phl_free_deferred_tx_ring(struct phl_info_t *phl_info)
395 {
396 void *drv_priv = phl_to_drvpriv(phl_info);
397 struct rtw_phl_tring_list *phl_tring_list = NULL, *t;
398 _os_list *ring_list = NULL;
399
400 ring_list = &phl_info->t_ring_free_list;
401
402 _os_spinlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
403 if (list_empty(ring_list) == false) {
404 phl_list_for_loop_safe(phl_tring_list, t, struct rtw_phl_tring_list,
405 ring_list, list) {
406 list_del(&phl_tring_list->list);
407 _phl_free_phl_tring_list(phl_info, phl_tring_list);
408 }
409 }
410 _os_spinunlock(drv_priv, &phl_info->t_ring_free_list_lock, _bh, NULL);
411 }
412
413
phl_alloc_ring_sts(struct phl_info_t * phl_info)414 struct phl_ring_status *phl_alloc_ring_sts(struct phl_info_t *phl_info)
415 {
416 struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
417 struct phl_ring_status *ring_sts = NULL;
418
419 _os_spinlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
420
421 if (false == list_empty(&ring_sts_pool->idle)) {
422 ring_sts = list_first_entry(&ring_sts_pool->idle,
423 struct phl_ring_status, list);
424 list_del(&ring_sts->list);
425 }
426
427 _os_spinunlock(phl_to_drvpriv(phl_info), &ring_sts_pool->idle_lock, _bh, NULL);
428
429 return ring_sts;
430 }
431
phl_release_ring_sts(struct phl_info_t * phl_info,struct phl_ring_status * ring_sts)432 void phl_release_ring_sts(struct phl_info_t *phl_info,
433 struct phl_ring_status *ring_sts)
434 {
435 struct phl_ring_sts_pool *ring_sts_pool = phl_info->ring_sts_pool;
436 void *drv_priv = NULL;
437
438 drv_priv = phl_to_drvpriv(phl_info);
439
440 _os_spinlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
441 _os_mem_set(drv_priv, ring_sts, 0, sizeof(*ring_sts));
442 INIT_LIST_HEAD(&ring_sts->list);
443 list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
444 _os_spinunlock(drv_priv, &ring_sts_pool->idle_lock, _bh, NULL);
445 }
446
447
_phl_ring_status_deinit(struct phl_info_t * phl_info)448 void _phl_ring_status_deinit(struct phl_info_t *phl_info)
449 {
450 struct phl_ring_sts_pool *ring_sts_pool = NULL;
451 u16 buf_len = 0;
452 void *drv_priv = NULL;
453 FUNCIN();
454 drv_priv = phl_to_drvpriv(phl_info);
455 ring_sts_pool = (struct phl_ring_sts_pool *)phl_info->ring_sts_pool;
456 if (NULL != ring_sts_pool) {
457 buf_len = sizeof(struct phl_ring_sts_pool);
458 _os_spinlock_free(drv_priv, &ring_sts_pool->idle_lock);
459 _os_spinlock_free(drv_priv, &ring_sts_pool->busy_lock);
460 _os_mem_free(drv_priv, ring_sts_pool, buf_len);
461 }
462 FUNCOUT();
463 }
464
465
_phl_ring_status_init(struct phl_info_t * phl_info)466 enum rtw_phl_status _phl_ring_status_init(struct phl_info_t *phl_info)
467 {
468 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
469 struct phl_ring_sts_pool *ring_sts_pool = NULL;
470 struct phl_ring_status *ring_sts = NULL;
471 void *drv_priv = NULL;
472 u16 buf_len = 0;
473 u8 i = 0;
474 FUNCIN_WSTS(pstatus);
475
476 drv_priv = phl_to_drvpriv(phl_info);
477 buf_len = sizeof(struct phl_ring_sts_pool);
478 ring_sts_pool =
479 (struct phl_ring_sts_pool *)_os_mem_alloc(drv_priv, buf_len);
480
481 if (NULL != ring_sts_pool) {
482 _os_mem_set(drv_priv, ring_sts_pool, 0, buf_len);
483 INIT_LIST_HEAD(&ring_sts_pool->idle);
484 INIT_LIST_HEAD(&ring_sts_pool->busy);
485 _os_spinlock_init(drv_priv, &ring_sts_pool->idle_lock);
486 _os_spinlock_init(drv_priv, &ring_sts_pool->busy_lock);
487
488 for (i = 0; i < MAX_PHL_RING_STATUS_NUMBER; i++) {
489 ring_sts = &ring_sts_pool->ring_sts[i];
490 INIT_LIST_HEAD(&ring_sts->list);
491 _os_spinlock(drv_priv,
492 (void *)&ring_sts_pool->idle_lock, _bh, NULL);
493 list_add_tail(&ring_sts->list, &ring_sts_pool->idle);
494 _os_spinunlock(drv_priv,
495 (void *)&ring_sts_pool->idle_lock, _bh, NULL);
496 }
497
498 phl_info->ring_sts_pool = ring_sts_pool;
499
500 pstatus = RTW_PHL_STATUS_SUCCESS;
501 }
502
503 if (RTW_PHL_STATUS_SUCCESS != pstatus)
504 _phl_ring_status_deinit(phl_info);
505 FUNCOUT_WSTS(pstatus);
506
507 return pstatus;
508 }
509
510 struct phl_ring_status *
_phl_check_ring_status(struct phl_info_t * phl_info,struct rtw_phl_tx_ring * ring,struct rtw_phl_tring_list * tring_list)511 _phl_check_ring_status(struct phl_info_t *phl_info,
512 struct rtw_phl_tx_ring *ring,
513 struct rtw_phl_tring_list *tring_list)
514 {
515 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
516 struct phl_ring_status *ring_sts = NULL;
517 u16 avail = 0, rptr = 0;
518 void *drv_priv = phl_to_drvpriv(phl_info);
519
520 do {
521 rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
522
523 avail = phl_calc_avail_rptr(rptr, ring->core_idx,
524 MAX_PHL_TX_RING_ENTRY_NUM);
525 if (0 == avail) {
526 ring_sts = NULL;
527 pstatus = RTW_PHL_STATUS_SUCCESS;
528 break;
529 } else {
530 ring_sts = phl_alloc_ring_sts(phl_info);
531 if (NULL == ring_sts) {
532 PHL_ERR("query ring status fail!\n");
533 pstatus = RTW_PHL_STATUS_RESOURCE;
534 break;
535 }
536 ring_sts->macid = tring_list->macid;
537 ring_sts->band = tring_list->band;
538 ring_sts->wmm = tring_list->wmm;
539 ring_sts->port = tring_list->port;
540 /*ring_sts->mbssid = tring_list->mbssid;*/
541 ring_sts->req_busy = avail;
542 ring_sts->ring_ptr = ring;
543
544 rptr += 1;
545
546 if (rptr >= MAX_PHL_TX_RING_ENTRY_NUM)
547 _os_atomic_set(drv_priv, &ring->phl_next_idx, 0);
548 else
549 _os_atomic_set(drv_priv, &ring->phl_next_idx, rptr);
550
551 pstatus = RTW_PHL_STATUS_SUCCESS;
552 break;
553 }
554 } while (false);
555
556 return ring_sts;
557 }
558
_phl_reset_tx_plan(struct phl_info_t * phl_info,struct phl_tx_plan * tx_plan)559 void _phl_reset_tx_plan(struct phl_info_t *phl_info,
560 struct phl_tx_plan *tx_plan)
561 {
562 struct phl_ring_status *ring_sts, *t;
563
564 INIT_LIST_HEAD(&tx_plan->list);
565 tx_plan->sleep = false;
566 tx_plan->has_mgnt = false;
567 tx_plan->has_hiq = false;
568 phl_list_for_loop_safe(ring_sts, t, struct phl_ring_status,
569 &tx_plan->sorted_ring, list) {
570 list_del(&ring_sts->list);
571 phl_release_ring_sts(phl_info, ring_sts);
572 }
573 INIT_LIST_HEAD(&tx_plan->sorted_ring);
574 }
575
576
_phl_sort_ring_by_tid(struct phl_ring_status * ring_sts,struct phl_tx_plan * tx_plan,enum rtw_phl_ring_cat cat)577 void _phl_sort_ring_by_tid(struct phl_ring_status *ring_sts,
578 struct phl_tx_plan *tx_plan,
579 enum rtw_phl_ring_cat cat)
580 {
581 struct phl_ring_status *last_sts = NULL;
582
583 if (ring_sts->ring_ptr->tid == 1) {
584 list_add_tail(&ring_sts->list,
585 &tx_plan->sorted_ring);
586 } else if (ring_sts->ring_ptr->tid == 2) {
587 if (list_empty(&tx_plan->sorted_ring)) {
588 list_add_tail(&ring_sts->list,
589 &tx_plan->sorted_ring);
590 } else {
591 last_sts = list_last_entry(&tx_plan->sorted_ring,
592 struct phl_ring_status, list);
593 if (1 == last_sts->ring_ptr->tid) {
594 __list_add(&ring_sts->list,
595 _get_prev(&last_sts->list),
596 &last_sts->list);
597 } else {
598 list_add_tail(&ring_sts->list,
599 &tx_plan->sorted_ring);
600 }
601 }
602 } else {
603 list_add(&ring_sts->list,
604 &tx_plan->sorted_ring);
605 if (RTW_PHL_RING_CAT_MGNT == cat)
606 tx_plan->has_mgnt = true;
607 else if (RTW_PHL_RING_CAT_HIQ == cat)
608 tx_plan->has_hiq = true;
609 }
610
611 }
612
_phl_check_tring_list(struct phl_info_t * phl_info,struct rtw_phl_tring_list * tring_list,_os_list * sta_list)613 void _phl_check_tring_list(struct phl_info_t *phl_info,
614 struct rtw_phl_tring_list *tring_list,
615 _os_list *sta_list)
616 {
617 struct phl_ring_status *ring_sts = NULL;
618 struct rtw_phl_tx_ring *ring = NULL;
619 struct phl_tx_plan *tx_plan = &tring_list->tx_plan;
620 u8 i = 0;
621
622 for (i = 0; i < MAX_PHL_RING_CAT_NUM; i++) {
623
624 ring = &tring_list->phl_ring[i];
625
626 ring_sts = _phl_check_ring_status(phl_info, ring, tring_list);
627
628 if (NULL != ring_sts) {
629 _phl_sort_ring_by_tid(ring_sts, tx_plan, i);
630 } else {
631 continue;
632 }
633 }
634 /* hana_todo: check this macid is sleep or not */
635 if (!list_empty(&tx_plan->sorted_ring)) {
636 list_add_tail(&tx_plan->list, sta_list);
637 }
638 }
639
phl_check_xmit_ring_resource(struct phl_info_t * phl_info,_os_list * sta_list)640 u8 phl_check_xmit_ring_resource(struct phl_info_t *phl_info, _os_list *sta_list)
641 {
642 void *drvpriv = phl_to_drvpriv(phl_info);
643 _os_list *tring_list_head = &phl_info->t_ring_list;
644 struct rtw_phl_tring_list *tring_list, *t;
645
646 _os_spinlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
647 phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
648 tring_list_head, list) {
649 _phl_check_tring_list(phl_info, tring_list, sta_list);
650 }
651 #ifdef SDIO_TX_THREAD
652 /**
653 * when SDIO_TX_THREAD is enabled,
654 * clearing variable "phl_sw_tx_more" in function "phl_tx_sdio_thrd_hdl"
655 */
656 #else
657 _os_atomic_set(drvpriv, &phl_info->phl_sw_tx_more, 0);
658 #endif
659 _os_spinunlock(drvpriv, &phl_info->t_ring_list_lock, _bh, NULL);
660
661 if (true == list_empty(sta_list))
662 return false;
663 else
664 return true;
665 }
666
phl_tx_flow_ctrl(struct phl_info_t * phl_info,_os_list * sta_list)667 void phl_tx_flow_ctrl(struct phl_info_t *phl_info, _os_list *sta_list)
668 {
669 _os_list *t_fctrl_result = &phl_info->t_fctrl_result;
670 _os_list *tid_entry[MAX_PHL_RING_CAT_NUM] = {0};
671 struct phl_tx_plan *tx_plan, *tp;
672 struct phl_ring_status *ring_sts = NULL, *ts;
673 u8 tid = 0;
674
675 _os_mem_set(phl_to_drvpriv(phl_info), tid_entry, 0,
676 sizeof(_os_list *) * MAX_PHL_RING_CAT_NUM);
677
678 phl_list_for_loop_safe(tx_plan, tp, struct phl_tx_plan, sta_list,
679 list) {
680 /* drop power saving station */
681 if (true == tx_plan->sleep) {
682 list_del(&tx_plan->list);
683 _phl_reset_tx_plan(phl_info, tx_plan);
684 continue;
685 }
686
687 if (true == tx_plan->has_hiq) {
688 ring_sts = list_first_entry(&tx_plan->sorted_ring,
689 struct phl_ring_status, list);
690 list_del(&ring_sts->list);
691 list_add(&ring_sts->list, t_fctrl_result);
692 }
693
694 if (true == tx_plan->has_mgnt) {
695 ring_sts = list_first_entry(&tx_plan->sorted_ring,
696 struct phl_ring_status, list);
697 list_del(&ring_sts->list);
698 list_add(&ring_sts->list, t_fctrl_result);
699 }
700
701 /* todo: drop station which has reached tx limit */
702
703 phl_list_for_loop_safe(ring_sts, ts, struct phl_ring_status,
704 &tx_plan->sorted_ring, list) {
705 list_del(&ring_sts->list);
706 tid = ring_sts->ring_ptr->tid;
707 /* todo: drop tid which has reached tx limit */
708 /* sw tx cnt limit */
709 if (NULL == tid_entry[tid]) {
710 list_add_tail(&ring_sts->list, t_fctrl_result);
711 } else {
712 __list_add(&ring_sts->list, tid_entry[tid],
713 _get_next(tid_entry[tid]));
714 }
715 tid_entry[tid] = &ring_sts->list;
716 }
717
718 /* clear tx plan */
719 list_del(&tx_plan->list);
720 _phl_reset_tx_plan(phl_info, tx_plan);
721 }
722 }
723
phl_register_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)724 enum rtw_phl_status phl_register_handler(struct rtw_phl_com_t *phl_com,
725 struct rtw_phl_handler *handler)
726 {
727 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
728 _os_tasklet *tasklet = NULL;
729 _os_workitem *workitem = NULL;
730 void *drv_priv = phlcom_to_drvpriv(phl_com);
731
732 FUNCIN_WSTS(phl_status);
733
734 if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
735 tasklet = &handler->os_handler.u.tasklet;
736 phl_status = _os_tasklet_init(drv_priv, tasklet,
737 handler->callback, handler);
738 } else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
739 workitem = &handler->os_handler.u.workitem;
740 phl_status = _os_workitem_init(drv_priv, workitem,
741 handler->callback, workitem);
742 } else if (handler->type == RTW_PHL_HANDLER_PRIO_NORMAL) {
743 _os_sema_init(drv_priv, &(handler->os_handler.os_sema), 0);
744 handler->os_handler.created = 0;
745 phl_status = RTW_PHL_STATUS_SUCCESS;
746 } else {
747 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
748 handler->type);
749 }
750
751 if (RTW_PHL_STATUS_SUCCESS != phl_status)
752 phl_deregister_handler(phl_com, handler);
753
754 FUNCOUT_WSTS(phl_status);
755 return phl_status;
756 }
757
phl_deregister_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)758 enum rtw_phl_status phl_deregister_handler(
759 struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
760 {
761 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
762 _os_tasklet *tasklet = NULL;
763 _os_workitem *workitem = NULL;
764 _os_thread *thread = NULL;
765 void *drv_priv = phlcom_to_drvpriv(phl_com);
766
767 FUNCIN_WSTS(phl_status);
768
769 if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
770 tasklet = &handler->os_handler.u.tasklet;
771 phl_status = _os_tasklet_deinit(drv_priv, tasklet);
772 } else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
773 workitem = &handler->os_handler.u.workitem;
774 phl_status = _os_workitem_deinit(drv_priv, workitem);
775 } else if (handler->type == RTW_PHL_HANDLER_PRIO_NORMAL) {
776 thread = &handler->os_handler.u.thread;
777 if (handler->os_handler.created == 1) {
778 _os_thread_stop(drv_priv, thread);
779 _os_sema_up(drv_priv, &(handler->os_handler.os_sema));
780 _os_thread_deinit(drv_priv, thread);
781 }
782 phl_status = RTW_PHL_STATUS_SUCCESS;
783 } else {
784 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
785 handler->type);
786 }
787
788 if (RTW_PHL_STATUS_SUCCESS != phl_status) {
789 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_,
790 "[WARNING] deregister handler fail (status = 0x%08X)\n",
791 phl_status);
792 }
793
794 FUNCOUT_WSTS(phl_status);
795 return phl_status;
796 }
797
phl_schedule_handler(struct rtw_phl_com_t * phl_com,struct rtw_phl_handler * handler)798 enum rtw_phl_status phl_schedule_handler(
799 struct rtw_phl_com_t *phl_com, struct rtw_phl_handler *handler)
800 {
801 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
802 _os_tasklet *tasklet = NULL;
803 _os_workitem *workitem = NULL;
804 _os_thread *thread = NULL;
805 void *drv_priv = phlcom_to_drvpriv(phl_com);
806
807 FUNCIN_WSTS(phl_status);
808
809 if (handler->type == RTW_PHL_HANDLER_PRIO_HIGH) {
810 tasklet = &handler->os_handler.u.tasklet;
811 phl_status = _os_tasklet_schedule(drv_priv, tasklet);
812 } else if (handler->type == RTW_PHL_HANDLER_PRIO_LOW) {
813 workitem = &handler->os_handler.u.workitem;
814 phl_status = _os_workitem_schedule(drv_priv, workitem);
815 } else if (handler->type == RTW_PHL_HANDLER_PRIO_NORMAL) {
816 thread = &handler->os_handler.u.thread;
817 if (handler->os_handler.created == 0) {
818 phl_status = _os_thread_init(drv_priv, thread, (int(*)(void*))handler->callback,
819 thread, handler->cb_name);
820 if (phl_status == RTW_PHL_STATUS_SUCCESS) {
821 handler->os_handler.created = 1;
822 _os_sema_up(drv_priv, &(handler->os_handler.os_sema));
823 }
824 } else {
825 _os_sema_up(drv_priv, &(handler->os_handler.os_sema));
826 phl_status = RTW_PHL_STATUS_SUCCESS;
827 }
828 } else {
829 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "[WARNING] unknown handle type(%d)\n",
830 handler->type);
831 }
832
833 FUNCOUT_WSTS(phl_status);
834 return phl_status;
835 }
836
enqueue_h2c_pkt(struct phl_info_t * phl_info,struct phl_queue * pool_list,struct rtw_h2c_pkt * h2c_pkt,u8 pos)837 static enum rtw_phl_status enqueue_h2c_pkt(
838 struct phl_info_t *phl_info,
839 struct phl_queue *pool_list,
840 struct rtw_h2c_pkt *h2c_pkt, u8 pos)
841 {
842 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
843 void *drv = phl_to_drvpriv(phl_info);
844 _os_spinlockfg sp_flags;
845
846
847 if (h2c_pkt != NULL) {
848 _os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
849 if (_tail == pos)
850 list_add_tail(&h2c_pkt->list, &pool_list->queue);
851 else if (_first == pos)
852 list_add(&h2c_pkt->list, &pool_list->queue);
853 pool_list->cnt++;
854 _os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
855
856 pstatus = RTW_PHL_STATUS_SUCCESS;
857 }
858
859 return pstatus;
860 }
861
dequeue_h2c_pkt(struct phl_info_t * phl_info,struct phl_queue * pool_list)862 static struct rtw_h2c_pkt *dequeue_h2c_pkt(struct phl_info_t *phl_info,
863 struct phl_queue *pool_list)
864 {
865 struct rtw_h2c_pkt *h2c_pkt = NULL;
866 void *drv = phl_to_drvpriv(phl_info);
867 _os_spinlockfg sp_flags;
868
869 _os_spinlock(drv, &pool_list->lock, _irq, &sp_flags);
870 if (list_empty(&pool_list->queue)) {
871 h2c_pkt = NULL;
872 } else {
873 h2c_pkt = list_first_entry(&pool_list->queue, struct rtw_h2c_pkt, list);
874
875 list_del(&h2c_pkt->list);
876 pool_list->cnt--;
877 }
878 _os_spinunlock(drv, &pool_list->lock, _irq, &sp_flags);
879 return h2c_pkt;
880 }
881
_phl_reset_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt,u32 buf_len)882 static void _phl_reset_h2c_pkt(struct phl_info_t *phl_info,
883 struct rtw_h2c_pkt *h2c_pkt,
884 u32 buf_len)
885 {
886 enum rtw_h2c_pkt_type type = h2c_pkt->type;
887
888 _os_mem_set(phl_to_drvpriv(phl_info), h2c_pkt->vir_head, 0, buf_len);
889 h2c_pkt->buf_len = buf_len;
890 h2c_pkt->id = 0;
891 h2c_pkt->host_idx = 0;
892 h2c_pkt->data_len = 0;
893 h2c_pkt->h2c_seq = 0;
894
895 switch (type) {
896 case H2CB_TYPE_CMD:
897 h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
898 h2c_pkt->vir_tail = h2c_pkt->vir_data;
899 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
900 break;
901 case H2CB_TYPE_DATA:
902 h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
903 h2c_pkt->vir_tail = h2c_pkt->vir_data;
904 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
905 break;
906 case H2CB_TYPE_LONG_DATA:
907 h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
908 h2c_pkt->vir_tail = h2c_pkt->vir_data;
909 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
910 break;
911 case H2CB_TYPE_MAX:
912 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unsupported case:%d, please check it\n",
913 type);
914 break;
915 default:
916 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "_phl_reset_h2c_pkt(): Unrecognize case:%d, please check it\n",
917 type);
918 break;
919 }
920
921 }
922
phl_enqueue_busy_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt,u8 pos)923 enum rtw_phl_status phl_enqueue_busy_h2c_pkt(struct phl_info_t *phl_info,
924 struct rtw_h2c_pkt *h2c_pkt, u8 pos)
925 {
926 struct phl_h2c_pkt_pool *h2c_pkt_pool =
927 (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
928 struct phl_queue *queue = &h2c_pkt_pool->busy_h2c_pkt_list;
929
930 return enqueue_h2c_pkt(phl_info, queue, h2c_pkt, pos);
931 }
932
phl_enqueue_idle_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt)933 enum rtw_phl_status phl_enqueue_idle_h2c_pkt(
934 struct phl_info_t *phl_info,
935 struct rtw_h2c_pkt *h2c_pkt)
936 {
937 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
938 struct phl_h2c_pkt_pool *h2c_pkt_pool =
939 (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
940 struct phl_queue *queue = NULL;
941 int *idle_cnt = NULL;
942 u32 buf_len = 0;
943
944 if (!h2c_pkt)
945 return pstatus;
946
947 switch (h2c_pkt->type) {
948 case H2CB_TYPE_CMD:
949 buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_CMD_LEN;
950 queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
951 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
952 break;
953 case H2CB_TYPE_DATA:
954 buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_DATA_LEN;
955 queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
956 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
957 break;
958 case H2CB_TYPE_LONG_DATA:
959 buf_len = FWCMD_HDR_LEN + _WD_BODY_LEN + H2C_LONG_DATA_LEN;
960 queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
961 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
962 break;
963 case H2CB_TYPE_MAX:
964 PHL_ERR("%s : cannot find the matching case(%d).\n",
965 __func__, h2c_pkt->type);
966 break;
967 default:
968 PHL_ERR("%s : cannot find the matching cases(%d).\n",
969 __func__, h2c_pkt->type);
970 break;
971 }
972
973 _phl_reset_h2c_pkt(phl_info, h2c_pkt, buf_len);
974
975 pstatus = enqueue_h2c_pkt(phl_info, queue, h2c_pkt, _tail);
976
977 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s : remaining %d (type %d).\n",
978 __func__, *idle_cnt, h2c_pkt->type);
979
980 return pstatus;
981 }
982
phl_query_busy_h2c_pkt(struct phl_info_t * phl_info)983 struct rtw_h2c_pkt *phl_query_busy_h2c_pkt(struct phl_info_t *phl_info)
984 {
985 struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
986 struct rtw_h2c_pkt *h2c_pkt = NULL;
987 struct phl_queue *queue = NULL;
988
989 h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
990 queue = &h2c_pkt_pool->busy_h2c_pkt_list;
991
992 h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
993
994 return h2c_pkt;
995 }
996
phl_query_idle_h2c_pkt(struct phl_info_t * phl_info,u8 type)997 struct rtw_h2c_pkt *phl_query_idle_h2c_pkt(struct phl_info_t *phl_info, u8 type)
998 {
999 struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
1000 struct rtw_h2c_pkt *h2c_pkt = NULL;
1001 enum rtw_h2c_pkt_type h2c_type = (enum rtw_h2c_pkt_type)type;
1002 struct phl_queue *queue = NULL;
1003 int *idle_cnt = NULL;
1004
1005 h2c_pkt_pool = (struct phl_h2c_pkt_pool *)phl_info->h2c_pool;
1006
1007 switch (h2c_type) {
1008 case H2CB_TYPE_CMD:
1009 queue = &h2c_pkt_pool->idle_h2c_pkt_cmd_list;
1010 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt;
1011 break;
1012 case H2CB_TYPE_DATA:
1013 queue = &h2c_pkt_pool->idle_h2c_pkt_data_list;
1014 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_data_list.cnt;
1015 break;
1016 case H2CB_TYPE_LONG_DATA:
1017 queue = &h2c_pkt_pool->idle_h2c_pkt_ldata_list;
1018 idle_cnt = &h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt;
1019 break;
1020 case H2CB_TYPE_MAX:
1021 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unsupported case:%d, please check it\n",
1022 h2c_type);
1023 break;
1024 default:
1025 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "phl_query_idle_h2c_pkt(): Unrecognize case:%d, please check it\n",
1026 h2c_type);
1027 break;
1028 }
1029 PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,
1030 "phl_query_idle_h2c_pkt => remaining %d (type %d).\n",
1031 *idle_cnt, h2c_type);
1032
1033 h2c_pkt = dequeue_h2c_pkt(phl_info, queue);
1034
1035 return h2c_pkt;
1036 }
1037
1038 #if 0
1039 static enum rtw_phl_status phl_release_target_h2c_pkt(
1040 struct phl_info_t *phl_info,
1041 struct phl_h2c_pkt_pool *h2c_pkt_pool,
1042 struct rtw_h2c_pkt *h2c_pkt)
1043 {
1044 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1045
1046 if (h2c_pkt_pool != NULL && h2c_pkt != NULL) {
1047 phl_enqueue_idle_h2c_pkt(phl_info, h2c_pkt);
1048 pstatus = RTW_PHL_STATUS_SUCCESS;
1049 }
1050
1051 return pstatus;
1052 }
1053 #endif
1054
_phl_free_h2c_pkt(struct phl_info_t * phl_info,struct rtw_h2c_pkt * h2c_pkt_buf)1055 static void _phl_free_h2c_pkt(struct phl_info_t *phl_info,
1056 struct rtw_h2c_pkt *h2c_pkt_buf)
1057 {
1058 u16 i = 0;
1059 struct rtw_h2c_pkt *h2c_pkt = h2c_pkt_buf;
1060 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1061
1062 if (NULL != h2c_pkt) {
1063 for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
1064 if (NULL == h2c_pkt->vir_head)
1065 continue;
1066 hci_trx_ops->free_h2c_pkt_buf(phl_info, h2c_pkt);
1067 h2c_pkt->vir_head = NULL;
1068 h2c_pkt->cache = false;
1069 h2c_pkt++;
1070 }
1071
1072 _os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_buf,
1073 sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM);
1074 h2c_pkt_buf = NULL;
1075 }
1076 }
1077
_phl_alloc_h2c_pkt(struct phl_info_t * phl_info,struct phl_h2c_pkt_pool * h2c_pool)1078 struct rtw_h2c_pkt *_phl_alloc_h2c_pkt(struct phl_info_t *phl_info,
1079 struct phl_h2c_pkt_pool *h2c_pool)
1080 {
1081 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1082 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1083 struct rtw_h2c_pkt *h2c_pkt = NULL;
1084 struct rtw_h2c_pkt *h2c_pkt_root = NULL;
1085 struct phl_h2c_pkt_pool *h2c_pkt_pool = h2c_pool;
1086 u32 buf_len = 0;
1087 int i;
1088
1089 buf_len = sizeof(struct rtw_h2c_pkt) * MAX_H2C_PKT_NUM;
1090 h2c_pkt_root = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
1091 h2c_pkt = h2c_pkt_root;
1092 if (h2c_pkt != NULL) {
1093 for (i = 0; i < MAX_H2C_PKT_NUM; i++) {
1094 h2c_pkt->cache = false;
1095 buf_len = get_h2c_size_by_range(i);
1096 hci_trx_ops->alloc_h2c_pkt_buf(phl_info, h2c_pkt, buf_len);
1097
1098 if (NULL == h2c_pkt->vir_head) {
1099 pstatus = RTW_PHL_STATUS_RESOURCE;
1100 break;
1101 }
1102
1103 h2c_pkt->buf_len = buf_len;
1104 h2c_pkt->vir_data = h2c_pkt->vir_head + FWCMD_HDR_LEN + _WD_BODY_LEN;
1105 h2c_pkt->vir_tail = h2c_pkt->vir_data;
1106 INIT_LIST_HEAD(&h2c_pkt->list);
1107 if (i < _H2CB_CMD_QLEN) {
1108 h2c_pkt->type = H2CB_TYPE_CMD;
1109 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_CMD_LEN;
1110 enqueue_h2c_pkt(phl_info,
1111 &h2c_pkt_pool->idle_h2c_pkt_cmd_list, h2c_pkt, _tail);
1112 } else if (i < _H2CB_CMD_QLEN + _H2CB_DATA_QLEN) {
1113 h2c_pkt->type = H2CB_TYPE_DATA;
1114 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_DATA_LEN;
1115 enqueue_h2c_pkt(phl_info,
1116 &h2c_pkt_pool->idle_h2c_pkt_data_list, h2c_pkt, _tail);
1117 } else {
1118 h2c_pkt->type = H2CB_TYPE_LONG_DATA;
1119 h2c_pkt->vir_end = h2c_pkt->vir_data + H2C_LONG_DATA_LEN;
1120 enqueue_h2c_pkt(phl_info,
1121 &h2c_pkt_pool->idle_h2c_pkt_ldata_list, h2c_pkt, _tail);
1122 }
1123 h2c_pkt++;
1124 pstatus = RTW_PHL_STATUS_SUCCESS;
1125 }
1126 }
1127
1128 if (RTW_PHL_STATUS_SUCCESS != pstatus) {
1129 _phl_free_h2c_pkt(phl_info, h2c_pkt_root);
1130 h2c_pkt_root = NULL;
1131 }
1132
1133 return h2c_pkt_root;
1134 }
1135
_phl_free_h2c_pool(struct phl_info_t * phl_info)1136 static void _phl_free_h2c_pool(struct phl_info_t *phl_info)
1137 {
1138 struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
1139 void *drv_priv = phl_to_drvpriv(phl_info);
1140
1141 FUNCIN();
1142
1143 h2c_pkt_pool = phl_info->h2c_pool;
1144 if (NULL != h2c_pkt_pool) {
1145 h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
1146 h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
1147 h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
1148
1149 _phl_free_h2c_pkt(phl_info, h2c_pkt_pool->h2c_pkt_buf);
1150 h2c_pkt_pool->h2c_pkt_buf = NULL;
1151 _os_spinlock_free(drv_priv,
1152 &h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1153 _os_spinlock_free(drv_priv,
1154 &h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1155 _os_spinlock_free(drv_priv,
1156 &h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1157 _os_spinlock_free(drv_priv,
1158 &h2c_pkt_pool->busy_h2c_pkt_list.lock);
1159 _os_spinlock_free(drv_priv,
1160 &h2c_pkt_pool->recycle_lock);
1161 _os_mem_free(phl_to_drvpriv(phl_info), h2c_pkt_pool,
1162 sizeof(struct phl_h2c_pkt_pool));
1163 }
1164 FUNCOUT();
1165 }
1166
1167 enum rtw_phl_status
_phl_alloc_h2c_pool(struct phl_info_t * phl_info)1168 _phl_alloc_h2c_pool(struct phl_info_t *phl_info)
1169 {
1170 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1171 struct phl_h2c_pkt_pool *h2c_pkt_pool = NULL;
1172 struct rtw_h2c_pkt *h2c_pkt_buf = NULL;
1173 void *drv_priv = NULL;
1174
1175 FUNCIN_WSTS(pstatus);
1176 drv_priv = phl_to_drvpriv(phl_info);
1177
1178 h2c_pkt_pool = _os_mem_alloc(drv_priv, sizeof(struct phl_h2c_pkt_pool));
1179 if (NULL != h2c_pkt_pool) {
1180
1181 INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_cmd_list.queue);
1182 h2c_pkt_pool->idle_h2c_pkt_cmd_list.cnt = 0;
1183
1184 INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_data_list.queue);
1185 h2c_pkt_pool->idle_h2c_pkt_data_list.cnt = 0;
1186
1187 INIT_LIST_HEAD(&h2c_pkt_pool->idle_h2c_pkt_ldata_list.queue);
1188 h2c_pkt_pool->idle_h2c_pkt_ldata_list.cnt = 0;
1189
1190 INIT_LIST_HEAD(&h2c_pkt_pool->busy_h2c_pkt_list.queue);
1191 h2c_pkt_pool->busy_h2c_pkt_list.cnt = 0;
1192
1193 _os_spinlock_init(drv_priv,
1194 &h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1195 _os_spinlock_init(drv_priv,
1196 &h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1197 _os_spinlock_init(drv_priv,
1198 &h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1199 _os_spinlock_init(drv_priv,
1200 &h2c_pkt_pool->busy_h2c_pkt_list.lock);
1201 _os_spinlock_init(drv_priv,
1202 &h2c_pkt_pool->recycle_lock);
1203
1204 h2c_pkt_buf = _phl_alloc_h2c_pkt(phl_info, h2c_pkt_pool);
1205
1206 if (NULL == h2c_pkt_buf) {
1207 _os_spinlock_free(drv_priv,
1208 &h2c_pkt_pool->idle_h2c_pkt_cmd_list.lock);
1209 _os_spinlock_free(drv_priv,
1210 &h2c_pkt_pool->idle_h2c_pkt_data_list.lock);
1211 _os_spinlock_free(drv_priv,
1212 &h2c_pkt_pool->idle_h2c_pkt_ldata_list.lock);
1213 _os_spinlock_free(drv_priv,
1214 &h2c_pkt_pool->busy_h2c_pkt_list.lock);
1215 _os_spinlock_free(drv_priv,
1216 &h2c_pkt_pool->recycle_lock);
1217 _os_mem_free(drv_priv, h2c_pkt_pool, sizeof(struct phl_h2c_pkt_pool));
1218 h2c_pkt_pool = NULL;
1219 pstatus = RTW_PHL_STATUS_RESOURCE;
1220 } else {
1221 h2c_pkt_pool->h2c_pkt_buf = h2c_pkt_buf;
1222 pstatus = RTW_PHL_STATUS_SUCCESS;
1223 }
1224 }
1225
1226 if (RTW_PHL_STATUS_SUCCESS == pstatus)
1227 phl_info->h2c_pool = h2c_pkt_pool;
1228
1229 FUNCOUT_WSTS(pstatus);
1230
1231 return pstatus;
1232 }
1233
1234 void
phl_trx_free_handler(void * phl)1235 phl_trx_free_handler(void *phl)
1236 {
1237 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1238 struct rtw_phl_handler *tx_handler = &phl_info->phl_tx_handler;
1239 struct rtw_phl_handler *rx_handler = &phl_info->phl_rx_handler;
1240 struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
1241
1242 FUNCIN();
1243
1244 phl_deregister_handler(phl_info->phl_com, event_handler);
1245 phl_deregister_handler(phl_info->phl_com, rx_handler);
1246 phl_deregister_handler(phl_info->phl_com, tx_handler);
1247
1248 FUNCOUT();
1249 }
1250
1251 void
phl_trx_free_sw_rsc(void * phl)1252 phl_trx_free_sw_rsc(void *phl)
1253 {
1254 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1255 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1256 void *drv_priv = NULL;
1257
1258 FUNCIN();
1259
1260 drv_priv = phl_to_drvpriv(phl_info);
1261
1262 _phl_free_h2c_pool(phl_info);
1263
1264 hci_trx_ops->hci_trx_deinit(phl_info);
1265
1266 phl_rx_deinit(phl_info);
1267
1268 _phl_ring_status_deinit(phl_info);
1269
1270 _os_spinlock_free(drv_priv, &phl_info->t_ring_list_lock);
1271 _os_spinlock_free(drv_priv, &phl_info->rx_ring_lock);
1272 _os_spinlock_free(drv_priv, &phl_info->t_fctrl_result_lock);
1273 _os_spinlock_free(drv_priv, &phl_info->t_ring_free_list_lock);
1274
1275 FUNCOUT();
1276 }
1277
phl_datapath_start(struct phl_info_t * phl_info)1278 enum rtw_phl_status phl_datapath_start(struct phl_info_t *phl_info)
1279 {
1280 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1281 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1282
1283 do {
1284 pstatus = hci_trx_ops->trx_cfg(phl_info);
1285 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1286 break;
1287 rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_START, HW_BAND_MAX);
1288 }while (false);
1289
1290 return pstatus;
1291 }
1292
phl_datapath_stop(struct phl_info_t * phl_info)1293 void phl_datapath_stop(struct phl_info_t *phl_info)
1294 {
1295 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1296
1297 hci_trx_ops->trx_stop(phl_info);
1298 rtw_hal_notification(phl_info->hal, MSG_EVT_DATA_PATH_STOP, HW_BAND_MAX);
1299 phl_free_deferred_tx_ring(phl_info);
1300 }
1301
phl_datapath_deinit(struct phl_info_t * phl_info)1302 void phl_datapath_deinit(struct phl_info_t *phl_info)
1303 {
1304 phl_trx_free_handler(phl_info);
1305 phl_trx_free_sw_rsc(phl_info);
1306 }
1307
phl_datapath_init(struct phl_info_t * phl_info)1308 enum rtw_phl_status phl_datapath_init(struct phl_info_t *phl_info)
1309 {
1310 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1311 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1312 struct rtw_phl_handler *event_handler = &phl_info->phl_event_handler;
1313 void *drv_priv = NULL;
1314 FUNCIN_WSTS(pstatus);
1315 drv_priv = phl_to_drvpriv(phl_info);
1316
1317 do {
1318 INIT_LIST_HEAD(&phl_info->t_ring_list);
1319 INIT_LIST_HEAD(&phl_info->t_fctrl_result);
1320 INIT_LIST_HEAD(&phl_info->t_ring_free_list);
1321 _os_spinlock_init(drv_priv, &phl_info->t_ring_list_lock);
1322 _os_spinlock_init(drv_priv, &phl_info->rx_ring_lock);
1323 _os_spinlock_init(drv_priv, &phl_info->t_fctrl_result_lock);
1324 _os_spinlock_init(drv_priv, &phl_info->t_ring_free_list_lock);
1325
1326 event_handler->type = RTW_PHL_HANDLER_PRIO_HIGH;
1327 event_handler->callback = phl_event_indicator;
1328 event_handler->context = phl_info;
1329 event_handler->drv_priv = drv_priv;
1330 event_handler->status = 0;
1331 pstatus = phl_register_handler(phl_info->phl_com, event_handler);
1332 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1333 break;
1334
1335 pstatus = _phl_ring_status_init(phl_info);
1336 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1337 break;
1338
1339 pstatus = phl_rx_init(phl_info);
1340 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1341 break;
1342
1343 pstatus = hci_trx_ops->hci_trx_init(phl_info);
1344 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1345 break;
1346
1347 /* allocate h2c pkt */
1348 pstatus = _phl_alloc_h2c_pool(phl_info);
1349 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1350 break;
1351
1352 }while (false);
1353
1354 if (RTW_PHL_STATUS_SUCCESS != pstatus)
1355 phl_datapath_deinit(phl_info);
1356
1357 FUNCOUT_WSTS(pstatus);
1358
1359 return pstatus;
1360 }
1361
1362 static enum rtw_phl_status
_phl_tx_pwr_notify(void * phl)1363 _phl_tx_pwr_notify(void *phl)
1364 {
1365 enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
1366
1367 #ifdef SDIO_TX_THREAD
1368 phl_tx_sdio_wake_thrd((struct phl_info_t *)phl);
1369 #else
1370 pstatus = rtw_phl_tx_req_notify(phl);
1371 #endif
1372
1373 return pstatus;
1374 }
1375 #ifdef CONFIG_POWER_SAVE
_phl_req_pwr_cb(void * priv,struct phl_msg * msg)1376 static void _phl_req_pwr_cb(void *priv, struct phl_msg *msg)
1377 {
1378 struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1379
1380 if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
1381 _os_atomic_set(phl_to_drvpriv(phl_info),
1382 &phl_info->phl_sw_tx_req_pwr,
1383 0);
1384 else
1385 _os_atomic_set(phl_to_drvpriv(phl_info),
1386 &phl_info->phl_sw_rx_req_pwr,
1387 0);
1388
1389 if (IS_MSG_FAIL(msg->msg_id) || IS_MSG_CANCEL(msg->msg_id)) {
1390 PHL_WARN("%s(): request power failure.\n", __func__);
1391 return;
1392 }
1393
1394 if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_TX)
1395 _phl_tx_pwr_notify(priv);
1396 else if (MSG_MDL_ID_FIELD(msg->msg_id) == PHL_MDL_RX)
1397 rtw_phl_start_rx_process(priv);
1398 }
1399
_phl_datapath_req_pwr(struct phl_info_t * phl_info,u8 type)1400 static void _phl_datapath_req_pwr(struct phl_info_t *phl_info, u8 type)
1401 {
1402 enum rtw_phl_status psts = RTW_PHL_STATUS_FAILURE;
1403 struct phl_msg msg = {0};
1404 struct phl_msg_attribute attr = {0};
1405
1406 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
1407 "%s(): [DATA_CTRL] SW datapath paused by ps module and request power\n",
1408 __func__);
1409
1410 SET_MSG_MDL_ID_FIELD(msg.msg_id, ((type == PHL_CTRL_TX) ? PHL_MDL_TX : PHL_MDL_RX));
1411 SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_TRX_PWR_REQ);
1412
1413 attr.completion.completion = _phl_req_pwr_cb;
1414 attr.completion.priv = phl_info;
1415
1416 /* shall set req_pwr flag first before sending req_pwr msg */
1417 if (PHL_CTRL_TX == type)
1418 _os_atomic_set(phl_to_drvpriv(phl_info),
1419 &phl_info->phl_sw_tx_req_pwr,
1420 1);
1421 else
1422 _os_atomic_set(phl_to_drvpriv(phl_info),
1423 &phl_info->phl_sw_rx_req_pwr,
1424 1);
1425
1426 psts = phl_disp_eng_send_msg(phl_info, &msg, &attr, NULL);
1427 if (RTW_PHL_STATUS_SUCCESS != psts) {
1428 PHL_WARN("%s(): CANNOT send msg to request power.\n", __func__);
1429 if (PHL_CTRL_TX == type)
1430 _os_atomic_set(phl_to_drvpriv(phl_info),
1431 &phl_info->phl_sw_tx_req_pwr,
1432 0);
1433 else
1434 _os_atomic_set(phl_to_drvpriv(phl_info),
1435 &phl_info->phl_sw_rx_req_pwr,
1436 0);
1437 }
1438 }
1439
_phl_datapath_chk_pwr(struct phl_info_t * phl_info,u8 type)1440 static bool _phl_datapath_chk_pwr(struct phl_info_t *phl_info, u8 type)
1441 {
1442 void *drvpriv = phl_to_drvpriv(phl_info);
1443 enum data_ctrl_mdl pause_id = 0;
1444 _os_atomic *trx_more;
1445 _os_atomic *req_pwr;
1446
1447 if (type == PHL_CTRL_TX) {
1448 pause_id = phl_info->pause_tx_id;
1449 trx_more = &phl_info->phl_sw_tx_more;
1450 req_pwr = &phl_info->phl_sw_tx_req_pwr;
1451 } else {
1452 pause_id = phl_info->pause_rx_id;
1453 trx_more = &phl_info->phl_sw_rx_more;
1454 req_pwr = &phl_info->phl_sw_rx_req_pwr;
1455 }
1456
1457 if (pause_id & ~(DATA_CTRL_MDL_PS)) {
1458 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_,
1459 "%s(): [DATA_CTRL] SW datapath paused by module(0x%x)\n",
1460 __func__,
1461 pause_id);
1462 return false;
1463 }
1464
1465 if (true == _os_atomic_read(drvpriv, trx_more) &&
1466 false == _os_atomic_read(drvpriv, req_pwr))
1467 _phl_datapath_req_pwr(phl_info, type);
1468
1469 return true;
1470 }
1471 #endif
phl_datapath_chk_trx_pause(struct phl_info_t * phl_info,u8 type)1472 bool phl_datapath_chk_trx_pause(struct phl_info_t *phl_info, u8 type)
1473 {
1474 void *drvpriv = phl_to_drvpriv(phl_info);
1475 _os_atomic *sw_sts;
1476
1477 if (type == PHL_CTRL_TX)
1478 sw_sts = &phl_info->phl_sw_tx_sts;
1479 else
1480 sw_sts = &phl_info->phl_sw_rx_sts;
1481
1482 if (PHL_TX_STATUS_SW_PAUSE == _os_atomic_read(drvpriv, sw_sts)) {
1483 #ifdef CONFIG_POWER_SAVE
1484 _phl_datapath_chk_pwr(phl_info, type);
1485 #endif
1486 return true;
1487 }
1488
1489 return false;
1490 }
1491
rtw_phl_tx_stop(void * phl)1492 void rtw_phl_tx_stop(void *phl)
1493 {
1494 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1495 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1496
1497 /* Pause SW Tx */
1498 hci_trx_ops->req_tx_stop(phl_info);
1499 }
1500
rtw_phl_tx_resume(void * phl)1501 void rtw_phl_tx_resume(void *phl)
1502 {
1503 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1504 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1505
1506 /* Resume SW Tx */
1507 hci_trx_ops->trx_resume(phl_info, PHL_CTRL_TX);
1508 }
1509
1510
rtw_phl_tx_req_notify(void * phl)1511 enum rtw_phl_status rtw_phl_tx_req_notify(void *phl)
1512 {
1513 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1514 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1515
1516 pstatus = phl_schedule_handler(phl_info->phl_com,
1517 &phl_info->phl_tx_handler);
1518
1519 return pstatus;
1520 }
1521
rtw_phl_add_tx_req(void * phl,struct rtw_xmit_req * tx_req)1522 enum rtw_phl_status rtw_phl_add_tx_req(void *phl,
1523 struct rtw_xmit_req *tx_req)
1524 {
1525 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1526 struct rtw_phl_tring_list *tring_list, *t;
1527 struct rtw_phl_tx_ring *ring = NULL;
1528 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1529 void *drv_priv = NULL;
1530 _os_list *list_head = &phl_info->t_ring_list;
1531 u16 macid = tx_req->mdata.macid;
1532 u8 tid = tx_req->mdata.tid;
1533 u16 ring_res = 0, rptr = 0;
1534
1535 drv_priv = phl_to_drvpriv(phl_info);
1536
1537 _os_spinlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
1538
1539 phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
1540 list_head, list) {
1541 if (macid != tring_list->macid) {
1542 continue;
1543 } else {
1544 /* hana_todo check mgnt frame case */
1545 ring = &tring_list->phl_ring[tid];
1546 break;
1547 }
1548 }
1549
1550 if (NULL != ring) {
1551 rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1552
1553 ring_res = phl_calc_avail_wptr(rptr, ring->core_idx,
1554 MAX_PHL_TX_RING_ENTRY_NUM);
1555 if (ring_res > 0) {
1556 ring->core_idx =
1557 (ring->core_idx + 1) % MAX_PHL_TX_RING_ENTRY_NUM;
1558 ring->entry[ring->core_idx] = (u8 *)tx_req;
1559 phl_tx_statistics(phl_info, tx_req);
1560 #ifdef CONFIG_PHL_TX_DBG
1561 if (tx_req->tx_dbg.en_dbg) {
1562 tx_req->tx_dbg.core_add_tx_t =
1563 _os_get_cur_time_us();
1564 }
1565 #endif /* CONFIG_PHL_TX_DBG */
1566 _os_atomic_set(drv_priv, &phl_info->phl_sw_tx_more, 1);
1567 pstatus = RTW_PHL_STATUS_SUCCESS;
1568 } else {
1569 PHL_TRACE(COMP_PHL_DBG, _PHL_INFO_, "no ring resource to add new tx request!\n");
1570 pstatus = RTW_PHL_STATUS_RESOURCE;
1571 }
1572 }
1573
1574 _os_spinunlock(drv_priv, &phl_info->t_ring_list_lock, _bh, NULL);
1575
1576 return pstatus;
1577 }
1578
rtw_phl_tring_rsc(void * phl,u16 macid,u8 tid)1579 u16 rtw_phl_tring_rsc(void *phl, u16 macid, u8 tid)
1580 {
1581 struct rtw_phl_tring_list *tring_list, *t;
1582 struct rtw_phl_tx_ring *ring = NULL;
1583 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1584 void *drv_priv = NULL;
1585 _os_list *list_head = &phl_info->t_ring_list;
1586 u16 ring_res = 0, rptr = 0;
1587
1588 drv_priv = phl_to_drvpriv(phl_info);
1589
1590 phl_list_for_loop_safe(tring_list, t, struct rtw_phl_tring_list,
1591 list_head, list) {
1592 if (macid != tring_list->macid) {
1593 continue;
1594 } else {
1595 /* hana_todo check mgnt frame case */
1596 ring = &tring_list->phl_ring[tid];
1597 break;
1598 }
1599 }
1600
1601 if (NULL != ring) {
1602 rptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1603
1604 ring_res = phl_calc_avail_rptr(rptr, ring->core_idx,
1605 MAX_PHL_TX_RING_ENTRY_NUM);
1606
1607 }
1608
1609 return ring_res;
1610 }
1611
1612
phl_indic_pkt_complete(void * phl)1613 enum rtw_phl_status phl_indic_pkt_complete(void *phl)
1614 {
1615 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1616 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1617 struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
1618 void *drv_priv = phl_to_drvpriv(phl_info);
1619
1620 do {
1621 _os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1622 evt_info->evt_bitmap |= RTW_PHL_EVT_TX_RECYCLE;
1623 _os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1624
1625 pstatus = phl_schedule_handler(phl_info->phl_com,
1626 &phl_info->phl_event_handler);
1627 } while (false);
1628
1629 return pstatus;
1630 }
1631
rtw_phl_recycle_tx_buf(void * phl,u8 * tx_buf_ptr)1632 enum rtw_phl_status rtw_phl_recycle_tx_buf(void *phl, u8 *tx_buf_ptr)
1633 {
1634 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1635 #ifdef CONFIG_USB_HCI
1636 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1637 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1638
1639 pstatus = hci_trx_ops->recycle_tx_buf(phl, tx_buf_ptr);
1640
1641 #endif
1642 return pstatus;
1643 }
1644
1645
1646 static enum rtw_phl_status
_phl_cfg_tx_ampdu(void * phl,struct rtw_phl_stainfo_t * sta)1647 _phl_cfg_tx_ampdu(void *phl, struct rtw_phl_stainfo_t *sta)
1648 {
1649 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1650 enum rtw_hal_status hsts = RTW_HAL_STATUS_FAILURE;
1651
1652 hsts = rtw_hal_cfg_tx_ampdu(phl_info->hal, sta);
1653 if (RTW_HAL_STATUS_SUCCESS != hsts)
1654 goto fail;
1655
1656 return RTW_PHL_STATUS_SUCCESS;
1657
1658 fail:
1659 return RTW_PHL_STATUS_FAILURE;
1660 }
1661
1662 #ifdef CONFIG_CMD_DISP
1663 enum rtw_phl_status
phl_cmd_cfg_ampdu_hdl(struct phl_info_t * phl_info,u8 * param)1664 phl_cmd_cfg_ampdu_hdl(struct phl_info_t *phl_info, u8 *param)
1665 {
1666 struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)param;
1667
1668 PHL_INFO(" %s(), sta = %p !\n", __func__, sta);
1669
1670 return _phl_cfg_tx_ampdu(phl_info, sta);
1671 }
1672 #endif
1673
1674 enum rtw_phl_status
rtw_phl_cmd_cfg_ampdu(void * phl,struct rtw_wifi_role_t * wrole,struct rtw_phl_stainfo_t * sta,enum phl_cmd_type cmd_type,u32 cmd_timeout)1675 rtw_phl_cmd_cfg_ampdu(void *phl,
1676 struct rtw_wifi_role_t *wrole,
1677 struct rtw_phl_stainfo_t *sta,
1678 enum phl_cmd_type cmd_type,
1679 u32 cmd_timeout)
1680 {
1681 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1682 #ifdef CONFIG_CMD_DISP
1683 sts = phl_cmd_enqueue(phl,
1684 wrole->hw_band,
1685 MSG_EVT_CFG_AMPDU,
1686 (u8 *)sta, 0,
1687 NULL,
1688 cmd_type, cmd_timeout);
1689
1690 if (is_cmd_failure(sts)) {
1691 /* Send cmd success, but wait cmd fail*/
1692 sts = RTW_PHL_STATUS_FAILURE;
1693 } else if (sts != RTW_PHL_STATUS_SUCCESS) {
1694 /* Send cmd fail */
1695 sts = RTW_PHL_STATUS_FAILURE;
1696 }
1697
1698 return sts;
1699 #else
1700 PHL_ERR("%s : CONFIG_CMD_DISP need to be enabled for MSG_EVT_CFG_AMPDU !! \n", __func__);
1701
1702 return sts;
1703 #endif
1704 }
1705
1706 void
phl_tx_watchdog(struct phl_info_t * phl_info)1707 phl_tx_watchdog(struct phl_info_t *phl_info)
1708 {
1709 struct phl_hci_trx_ops *trx_ops = phl_info->hci_trx_ops;
1710 struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1711
1712 phl_tx_traffic_upd(phl_stats);
1713 trx_ops->tx_watchdog(phl_info);
1714 }
1715
_phl_get_ctrl_mdl(enum phl_module_id id)1716 enum data_ctrl_mdl _phl_get_ctrl_mdl(enum phl_module_id id)
1717 {
1718 enum data_ctrl_mdl ctrl_mdl = DATA_CTRL_MDL_NONE;
1719
1720 switch (id) {
1721 case PHL_MDL_PHY_MGNT:
1722 ctrl_mdl = DATA_CTRL_MDL_CMD_CTRLER;
1723 break;
1724 case PHL_MDL_SER:
1725 ctrl_mdl = DATA_CTRL_MDL_SER;
1726 break;
1727 case PHL_MDL_POWER_MGNT:
1728 ctrl_mdl = DATA_CTRL_MDL_PS;
1729 break;
1730 default:
1731 PHL_WARN("Unknown PHL module(%d) try to control datapath and is skipped!\n",
1732 id);
1733 ctrl_mdl = DATA_CTRL_MDL_NONE;
1734 break;
1735 }
1736
1737 return ctrl_mdl;
1738 }
1739
1740
1741 enum rtw_phl_status
_phl_poll_hw_tx_done(void)1742 _phl_poll_hw_tx_done(void)
1743 {
1744 PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Polling hw tx done is not supported now\n");
1745
1746 return RTW_PHL_STATUS_FAILURE;
1747 }
1748
1749 enum rtw_phl_status
_phl_hw_tx_resume(void)1750 _phl_hw_tx_resume(void)
1751 {
1752 PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Resume hw tx not is supported now\n");
1753
1754 return RTW_PHL_STATUS_FAILURE;
1755 }
1756
1757 enum rtw_phl_status
_phl_sw_tx_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)1758 _phl_sw_tx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
1759 {
1760 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1761 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1762 enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1763
1764 if (!TEST_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl)) {
1765 PHL_WARN("[DATA_CTRL] module %d resume sw tx fail, sw tx is paused by module 0x%x\n",
1766 ctl->id, phl_info->pause_tx_id);
1767 return sts;
1768 }
1769
1770 CLEAR_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1771
1772 if (DATA_CTRL_MDL_NONE != phl_info->pause_tx_id) {
1773 PHL_WARN("[DATA_CTRL] sw tx is still paused by tx pause id = 0x%x\n",
1774 phl_info->pause_tx_id);
1775
1776 sts = RTW_PHL_STATUS_SUCCESS;
1777 } else {
1778 ops->trx_resume(phl_info, PHL_CTRL_TX);
1779
1780 sts = rtw_phl_tx_req_notify(phl_info);
1781 }
1782
1783 return sts;
1784 }
1785
1786 void
_phl_sw_tx_rst(struct phl_info_t * phl_info)1787 _phl_sw_tx_rst(struct phl_info_t *phl_info)
1788 {
1789 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1790
1791 ops->trx_reset(phl_info, PHL_CTRL_TX);
1792 }
1793
1794 enum rtw_phl_status
_phl_sw_tx_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,bool rst_sw)1795 _phl_sw_tx_pause(struct phl_info_t *phl_info,
1796 struct phl_data_ctl_t *ctl,
1797 bool rst_sw)
1798 {
1799 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1800 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1801 void *drv = phl_to_drvpriv(phl_info);
1802 u32 i = 0;
1803 enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1804
1805 if (PHL_TX_STATUS_SW_PAUSE ==
1806 _os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
1807 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1808 "[DATA_CTRL] SW tx has been paused by module(0x%x)\n",
1809 phl_info->pause_tx_id);
1810
1811 SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1812
1813 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1814 "[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
1815 phl_info->pause_tx_id, ctl->id);
1816
1817 sts = RTW_PHL_STATUS_SUCCESS;
1818 return sts;
1819 }
1820
1821 if (PHL_TX_STATUS_STOP_INPROGRESS ==
1822 _os_atomic_read(drv, &phl_info->phl_sw_tx_sts)) {
1823 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1824 "[DATA_CTRL] SW tx has been requested to pause by module(0x%x)\n",
1825 phl_info->pause_tx_id);
1826
1827 SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1828
1829 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
1830 "[DATA_CTRL] Update pause sw tx id(0x%x) by module(%d)\n",
1831 phl_info->pause_tx_id, ctl->id);
1832
1833 sts = RTW_PHL_STATUS_SUCCESS;
1834 return sts;
1835 }
1836
1837 /* requset sw tx to stop */
1838 ops->req_tx_stop(phl_info);
1839
1840 /*
1841 * notify sw tx one last time
1842 * and poll if it receviced the stop request and paused itself
1843 */
1844 if (RTW_PHL_STATUS_SUCCESS == rtw_phl_tx_req_notify(phl_info)) {
1845 for (i = 0; i < POLL_SW_TX_PAUSE_CNT; i++) {
1846 if (true == ops->is_tx_pause(phl_info)) {
1847 SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1848 sts = RTW_PHL_STATUS_SUCCESS;
1849 break;
1850 }
1851 _os_sleep_ms(drv, POLL_SW_TX_PAUSE_MS);
1852 }
1853
1854 if (RTW_PHL_STATUS_SUCCESS != sts) {
1855 SET_STATUS_FLAG(phl_info->pause_tx_id, ctrl_mdl);
1856 sts = RTW_PHL_STATUS_CMD_TIMEOUT;
1857 PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_,
1858 "[DATA_CTRL] Module(%d) polling sw tx pause timeout (%d ms)!\n",
1859 ctl->id,
1860 (POLL_SW_TX_PAUSE_MS * POLL_SW_TX_PAUSE_CNT));
1861 } else {
1862 if (true == rst_sw) {
1863 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_,
1864 "[DATA_CTRL] Pause Tx with reset is not supported now! requested by module(%d)\n",
1865 ctl->id);
1866 }
1867 }
1868 } else {
1869 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Schedule sw tx process fail!\n");
1870 }
1871
1872 return sts;
1873 }
1874
1875 enum rtw_phl_status
_phl_poll_hw_rx_done(void)1876 _phl_poll_hw_rx_done(void)
1877 {
1878 PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Polling hw rx done is not supported now\n");
1879
1880 return RTW_PHL_STATUS_FAILURE;
1881 }
1882
1883 enum rtw_phl_status
_phl_hw_rx_resume(void)1884 _phl_hw_rx_resume(void)
1885 {
1886 PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "[DATA_CTRL] Resume hw rx not is supported now\n");
1887
1888 return RTW_PHL_STATUS_FAILURE;
1889 }
1890
1891 enum rtw_phl_status
_phl_sw_rx_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)1892 _phl_sw_rx_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
1893 {
1894 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1895 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1896 enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1897
1898 if (!TEST_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl)) {
1899 PHL_WARN("[DATA_CTRL] module %d resume sw rx fail, sw rx is paused by module 0x%x\n",
1900 ctl->id, phl_info->pause_rx_id);
1901 return sts;
1902 }
1903
1904 CLEAR_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1905
1906 if (DATA_CTRL_MDL_NONE != phl_info->pause_rx_id) {
1907 PHL_WARN("[DATA_CTRL] sw rx is still paused by rx pause id = 0x%x\n",
1908 phl_info->pause_rx_id);
1909
1910 sts = RTW_PHL_STATUS_SUCCESS;
1911 } else {
1912 ops->trx_resume(phl_info, PHL_CTRL_RX);
1913
1914 sts = rtw_phl_start_rx_process(phl_info);
1915 }
1916
1917 return sts;
1918 }
1919
1920 void
_phl_sw_rx_rst(struct phl_info_t * phl_info)1921 _phl_sw_rx_rst(struct phl_info_t *phl_info)
1922 {
1923 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1924
1925 ops->trx_reset(phl_info, PHL_CTRL_RX);
1926 }
1927
1928 enum rtw_phl_status
_phl_sw_rx_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,bool rst_sw)1929 _phl_sw_rx_pause(struct phl_info_t *phl_info,
1930 struct phl_data_ctl_t *ctl,
1931 bool rst_sw)
1932 {
1933 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1934 struct phl_hci_trx_ops *ops = phl_info->hci_trx_ops;
1935 void *drv = phl_to_drvpriv(phl_info);
1936 u32 i = 0;
1937 enum data_ctrl_mdl ctrl_mdl = _phl_get_ctrl_mdl(ctl->id);
1938
1939 if (PHL_RX_STATUS_SW_PAUSE ==
1940 _os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
1941 PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1942 "[DATA_CTRL] SW rx has been paused by module(0x%x)\n",
1943 phl_info->pause_rx_id);
1944
1945 SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1946
1947 PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1948 "[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
1949 phl_info->pause_rx_id, ctl->id);
1950 sts = RTW_PHL_STATUS_SUCCESS;
1951 return sts;
1952 }
1953
1954 if (PHL_RX_STATUS_STOP_INPROGRESS ==
1955 _os_atomic_read(drv, &phl_info->phl_sw_rx_sts)) {
1956 PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1957 "[DATA_CTRL] SW rx has been requested to pause by module(0x%x)\n",
1958 phl_info->pause_rx_id);
1959
1960 SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1961
1962 PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_,
1963 "[DATA_CTRL] Update pause sw rx id(0x%x) by module(%d)\n",
1964 phl_info->pause_rx_id, ctl->id);
1965 sts = RTW_PHL_STATUS_SUCCESS;
1966 return sts;
1967 }
1968
1969 /* requset sw rx to stop */
1970 ops->req_rx_stop(phl_info);
1971
1972 /*
1973 * notify sw rx one last time
1974 * and poll if it receviced the stop request and paused itself
1975 */
1976 if (RTW_PHL_STATUS_SUCCESS == rtw_phl_start_rx_process(phl_info)) {
1977 for (i = 0; i < POLL_SW_RX_PAUSE_CNT; i++) {
1978 if (true == ops->is_rx_pause(phl_info)) {
1979 SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1980 sts = RTW_PHL_STATUS_SUCCESS;
1981 break;
1982 }
1983 _os_sleep_ms(drv, POLL_SW_RX_PAUSE_MS);
1984 }
1985
1986 if (RTW_PHL_STATUS_SUCCESS != sts) {
1987 SET_STATUS_FLAG(phl_info->pause_rx_id, ctrl_mdl);
1988 sts = RTW_PHL_STATUS_CMD_TIMEOUT;
1989 PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1990 "[DATA_CTRL] Module(%d) polling sw rx pause timeout (%d ms)!\n",
1991 ctl->id,
1992 (POLL_SW_RX_PAUSE_MS * POLL_SW_RX_PAUSE_CNT));
1993 } else {
1994 if (true == rst_sw) {
1995 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
1996 "[DATA_CTRL] Pause Rx with reset is not supported now! requested by module(%d)\n",
1997 ctl->id);
1998 }
1999 }
2000 } else {
2001 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Schedule sw rx process fail!\n");
2002 }
2003
2004 return sts;
2005 }
2006
2007 enum rtw_phl_status
_phl_hw_trx_rst_resume(struct phl_info_t * phl_info)2008 _phl_hw_trx_rst_resume(struct phl_info_t *phl_info)
2009 {
2010 void *drv = phl_to_drvpriv(phl_info);
2011
2012 if (false == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
2013 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is not paused\n");
2014 }
2015
2016 if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_SER_RCVY_STEP_2) !=
2017 RTW_HAL_STATUS_SUCCESS) {
2018 PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Reset and Resume HW T/Rx fail\n");
2019 return RTW_PHL_STATUS_FAILURE;
2020 } else {
2021 _os_atomic_set(drv, &phl_info->is_hw_trx_pause, false);
2022 return RTW_PHL_STATUS_SUCCESS;
2023 }
2024 }
2025
2026 enum rtw_phl_status
_phl_hw_trx_pause(struct phl_info_t * phl_info)2027 _phl_hw_trx_pause(struct phl_info_t *phl_info)
2028 {
2029 void *drv = phl_to_drvpriv(phl_info);
2030
2031 if (true == _os_atomic_read(drv, &phl_info->is_hw_trx_pause)) {
2032 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] HW T/Rx is already paused\n");
2033 }
2034
2035 if (rtw_hal_lv1_rcvy(phl_info->hal, RTW_PHL_SER_LV1_RCVY_STEP_1) !=
2036 RTW_HAL_STATUS_SUCCESS) {
2037 PHL_TRACE(COMP_PHL_XMIT, _PHL_ERR_, "[DATA_CTRL] Pause HW T/Rx fail\n");
2038 return RTW_PHL_STATUS_FAILURE;
2039 } else {
2040 _os_atomic_set(drv, &phl_info->is_hw_trx_pause, true);
2041 return RTW_PHL_STATUS_SUCCESS;
2042 }
2043 }
2044
2045 enum rtw_phl_status
_phl_trx_sw_pause(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)2046 _phl_trx_sw_pause(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
2047 {
2048 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2049
2050 do {
2051 sts = _phl_sw_tx_pause(phl_info, ctl, false);
2052 if (RTW_PHL_STATUS_SUCCESS != sts) {
2053 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
2054 break;
2055 }
2056
2057 sts = _phl_sw_rx_pause(phl_info, ctl, false);
2058 if (RTW_PHL_STATUS_SUCCESS != sts) {
2059 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_SW_PAUSE!\n");
2060 break;
2061 }
2062 } while (false);
2063
2064 return sts;
2065 }
2066
2067 enum rtw_phl_status
_phl_trx_sw_resume(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl)2068 _phl_trx_sw_resume(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl)
2069 {
2070 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2071
2072 do {
2073 sts = _phl_sw_tx_resume(phl_info, ctl);
2074 if (RTW_PHL_STATUS_SUCCESS != sts) {
2075 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
2076 break;
2077 }
2078
2079 sts = _phl_sw_rx_resume(phl_info, ctl);
2080 if (RTW_PHL_STATUS_SUCCESS != sts) {
2081 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_SW_RESUME!\n");
2082 break;
2083 }
2084 } while (false);
2085
2086 return sts;
2087 }
2088
2089 enum rtw_phl_status
_phl_trx_pause_w_rst(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2090 _phl_trx_pause_w_rst(struct phl_info_t *phl_info,
2091 struct phl_data_ctl_t *ctl,
2092 struct phl_msg *msg)
2093 {
2094 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2095 enum data_ctrl_err_code *err_sts = NULL;
2096
2097 if (msg->outbuf && msg->outlen == sizeof(*err_sts))
2098 err_sts = (enum data_ctrl_err_code *)msg->outbuf;
2099
2100 do {
2101 sts = _phl_sw_tx_pause(phl_info, ctl, false);
2102 if (RTW_PHL_STATUS_SUCCESS != sts) {
2103 if (err_sts) {
2104 if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
2105 *err_sts = CTRL_ERR_SW_TX_PAUSE_POLLTO;
2106 else
2107 *err_sts = CTRL_ERR_SW_TX_PAUSE_FAIL;
2108 }
2109 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause SW Tx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2110 break;
2111 }
2112
2113 sts = _phl_hw_trx_pause(phl_info);
2114 if (RTW_PHL_STATUS_SUCCESS != sts) {
2115 if (err_sts)
2116 *err_sts = CTRL_ERR_HW_TRX_PAUSE_FAIL;
2117 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Pause HW T/Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2118 break;
2119 }
2120
2121 sts = _phl_sw_rx_pause(phl_info, ctl, false);
2122 if (RTW_PHL_STATUS_SUCCESS != sts) {
2123 if (err_sts) {
2124 if (RTW_PHL_STATUS_CMD_TIMEOUT == sts)
2125 *err_sts = CTRL_ERR_SW_RX_PAUSE_POLLTO;
2126 else
2127 *err_sts = CTRL_ERR_SW_RX_PAUSE_FAIL;
2128 }
2129 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Pause SW Rx fail in PHL_DATA_CTL_TRX_PAUSE_W_RST!\n");
2130 break;
2131 }
2132
2133 _phl_sw_tx_rst(phl_info);
2134 _phl_sw_rx_rst(phl_info);
2135 } while (false);
2136
2137 return sts;
2138 }
2139
2140 enum rtw_phl_status
_phl_trx_resume_w_rst(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2141 _phl_trx_resume_w_rst(struct phl_info_t *phl_info,
2142 struct phl_data_ctl_t *ctl,
2143 struct phl_msg *msg)
2144 {
2145 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2146 enum data_ctrl_err_code *err_sts = NULL;
2147
2148 if (msg->outbuf && msg->outlen == sizeof(*err_sts))
2149 err_sts = (enum data_ctrl_err_code *)msg->outbuf;
2150
2151 do {
2152 sts = _phl_sw_rx_resume(phl_info, ctl);
2153 if (RTW_PHL_STATUS_SUCCESS != sts) {
2154 if (err_sts)
2155 *err_sts = CTRL_ERR_SW_RX_RESUME_FAIL;
2156
2157 PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[DATA_CTRL] Resume SW Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2158 break;
2159 }
2160
2161 sts = _phl_hw_trx_rst_resume(phl_info);
2162 if (RTW_PHL_STATUS_SUCCESS != sts) {
2163 if (err_sts)
2164 *err_sts = CTRL_ERR_HW_TRX_RESUME_FAIL;
2165
2166 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume HW T/Rx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2167 break;
2168 }
2169
2170 sts = _phl_sw_tx_resume(phl_info, ctl);
2171 if (RTW_PHL_STATUS_SUCCESS != sts) {
2172 if (err_sts)
2173 *err_sts = CTRL_ERR_SW_TX_RESUME_FAIL;
2174
2175 PHL_TRACE(COMP_PHL_XMIT, _PHL_WARNING_, "[DATA_CTRL] Resume SW Tx fail in PHL_DATA_CTL_TRX_RESUME_W_RST!\n");
2176 break;
2177 }
2178 } while (false);
2179
2180 return sts;
2181 }
2182
2183 enum rtw_phl_status
phl_data_ctrler(struct phl_info_t * phl_info,struct phl_data_ctl_t * ctl,struct phl_msg * msg)2184 phl_data_ctrler(struct phl_info_t *phl_info, struct phl_data_ctl_t *ctl,
2185 struct phl_msg *msg)
2186 {
2187 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
2188
2189 if (NULL == ctl) {
2190 PHL_WARN("phl_tx_ctrler(): input ctl is NULL\n");
2191 return RTW_PHL_STATUS_FAILURE;
2192 }
2193
2194 switch (ctl->cmd) {
2195 case PHL_DATA_CTL_HW_TRX_RST_RESUME:
2196 sts = _phl_hw_trx_rst_resume(phl_info);
2197 break;
2198 case PHL_DATA_CTL_HW_TRX_PAUSE:
2199 sts = _phl_hw_trx_pause(phl_info);
2200 break;
2201 case PHL_DATA_CTL_SW_TX_RESUME:
2202 sts = _phl_sw_tx_resume(phl_info, ctl);
2203 break;
2204 case PHL_DATA_CTL_SW_RX_RESUME:
2205 sts = _phl_sw_rx_resume(phl_info, ctl);
2206 break;
2207 case PHL_DATA_CTL_SW_TX_PAUSE:
2208 sts = _phl_sw_tx_pause(phl_info, ctl, false);
2209 break;
2210 case PHL_DATA_CTL_SW_RX_PAUSE:
2211 sts = _phl_sw_rx_pause(phl_info, ctl, false);
2212 break;
2213 case PHL_DATA_CTL_SW_TX_RESET:
2214 _phl_sw_tx_rst(phl_info);
2215 sts = RTW_PHL_STATUS_SUCCESS;
2216 break;
2217 case PHL_DATA_CTL_SW_RX_RESET:
2218 _phl_sw_rx_rst(phl_info);
2219 sts = RTW_PHL_STATUS_SUCCESS;
2220 break;
2221 case PHL_DATA_CTL_TRX_SW_PAUSE:
2222 sts = _phl_trx_sw_pause(phl_info, ctl);
2223 break;
2224 case PHL_DATA_CTL_TRX_SW_RESUME:
2225 sts = _phl_trx_sw_resume(phl_info, ctl);
2226 break;
2227 case PHL_DATA_CTL_TRX_PAUSE_W_RST:
2228 sts = _phl_trx_pause_w_rst(phl_info, ctl, msg);
2229 break;
2230 case PHL_DATA_CTL_TRX_RESUME_W_RST:
2231 sts = _phl_trx_resume_w_rst(phl_info, ctl, msg);
2232 break;
2233 default:
2234 PHL_TRACE(COMP_PHL_XMIT, _PHL_INFO_,
2235 "Unknown data control command(%d)!\n", ctl->cmd);
2236 break;
2237 }
2238 return sts;
2239 }
2240