1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright(c) 2019 Realtek Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12*4882a593Smuzhiyun * more details.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *****************************************************************************/
15*4882a593Smuzhiyun #define _PHL_RX_C_
16*4882a593Smuzhiyun #include "phl_headers.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
rtw_phl_query_phl_rx(void * phl)19*4882a593Smuzhiyun struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
22*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
23*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
24*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun _os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (false == list_empty(&rx_pkt_pool->idle)) {
31*4882a593Smuzhiyun phl_rx = list_first_entry(&rx_pkt_pool->idle,
32*4882a593Smuzhiyun struct rtw_phl_rx_pkt, list);
33*4882a593Smuzhiyun list_del(&phl_rx->list);
34*4882a593Smuzhiyun rx_pkt_pool->idle_cnt--;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun _os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun return phl_rx;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
rtw_phl_is_phl_rx_idle(struct phl_info_t * phl_info)42*4882a593Smuzhiyun u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
45*4882a593Smuzhiyun u8 res = false;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun _os_spinlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (MAX_PHL_RING_RX_PKT_NUM == rx_pkt_pool->idle_cnt)
52*4882a593Smuzhiyun res = true;
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun res = false;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun _os_spinunlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return res;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
phl_dump_rx_stats(struct rtw_stats * stats)61*4882a593Smuzhiyun void phl_dump_rx_stats(struct rtw_stats *stats)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
64*4882a593Smuzhiyun "Dump Rx statistics\n"
65*4882a593Smuzhiyun "rx_byte_uni = %lld\n"
66*4882a593Smuzhiyun "rx_byte_total = %lld\n"
67*4882a593Smuzhiyun "rx_tp_kbits = %d\n"
68*4882a593Smuzhiyun "last_rx_time_ms = %d\n",
69*4882a593Smuzhiyun stats->rx_byte_uni,
70*4882a593Smuzhiyun stats->rx_byte_total,
71*4882a593Smuzhiyun stats->rx_tp_kbits,
72*4882a593Smuzhiyun stats->last_rx_time_ms);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
phl_reset_rx_stats(struct rtw_stats * stats)75*4882a593Smuzhiyun void phl_reset_rx_stats(struct rtw_stats *stats)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun stats->rx_byte_uni = 0;
78*4882a593Smuzhiyun stats->rx_byte_total = 0;
79*4882a593Smuzhiyun stats->rx_tp_kbits = 0;
80*4882a593Smuzhiyun stats->last_rx_time_ms = 0;
81*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = 0;
82*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = 0;
83*4882a593Smuzhiyun stats->rx_traffic.lvl = RTW_TFC_IDLE;
84*4882a593Smuzhiyun stats->rx_traffic.sts = 0;
85*4882a593Smuzhiyun stats->rx_tf_cnt = 0;
86*4882a593Smuzhiyun stats->pre_rx_tf_cnt = 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun void
phl_rx_traffic_upd(struct rtw_stats * sts)90*4882a593Smuzhiyun phl_rx_traffic_upd(struct rtw_stats *sts)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun u32 tp_k = 0, tp_m = 0;
93*4882a593Smuzhiyun enum rtw_tfc_lvl rx_tfc_lvl = RTW_TFC_IDLE;
94*4882a593Smuzhiyun tp_k = sts->rx_tp_kbits;
95*4882a593Smuzhiyun tp_m = sts->rx_tp_kbits >> 10;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (tp_m >= RX_HIGH_TP_THRES_MBPS)
98*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_HIGH;
99*4882a593Smuzhiyun else if (tp_m >= RX_MID_TP_THRES_MBPS)
100*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_MID;
101*4882a593Smuzhiyun else if (tp_m >= RX_LOW_TP_THRES_MBPS)
102*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_LOW;
103*4882a593Smuzhiyun else if (tp_k >= RX_ULTRA_LOW_TP_THRES_KBPS)
104*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_ULTRA_LOW;
105*4882a593Smuzhiyun else
106*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_IDLE;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (sts->rx_traffic.lvl > rx_tfc_lvl) {
109*4882a593Smuzhiyun sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
110*4882a593Smuzhiyun sts->rx_traffic.lvl = rx_tfc_lvl;
111*4882a593Smuzhiyun } else if (sts->rx_traffic.lvl < rx_tfc_lvl) {
112*4882a593Smuzhiyun sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
113*4882a593Smuzhiyun sts->rx_traffic.lvl = rx_tfc_lvl;
114*4882a593Smuzhiyun } else if (sts->rx_traffic.sts &
115*4882a593Smuzhiyun (TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
116*4882a593Smuzhiyun sts->rx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
117*4882a593Smuzhiyun TRAFFIC_DECREASE);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
phl_update_rx_stats(struct rtw_stats * stats,struct rtw_recv_pkt * rx_pkt)121*4882a593Smuzhiyun void phl_update_rx_stats(struct rtw_stats *stats, struct rtw_recv_pkt *rx_pkt)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
124*4882a593Smuzhiyun u64 diff_bits = 0;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun stats->last_rx_time_ms = cur_time;
127*4882a593Smuzhiyun stats->rx_byte_total += rx_pkt->mdata.pktlen;
128*4882a593Smuzhiyun if (rx_pkt->mdata.bc == 0 && rx_pkt->mdata.mc == 0)
129*4882a593Smuzhiyun stats->rx_byte_uni += rx_pkt->mdata.pktlen;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (0 == stats->rxtp.last_calc_time_ms ||
132*4882a593Smuzhiyun 0 == stats->rxtp.last_calc_bits) {
133*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = stats->last_rx_time_ms;
134*4882a593Smuzhiyun stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun if (cur_time >= stats->rxtp.last_calc_time_ms) {
137*4882a593Smuzhiyun diff_t = cur_time - stats->rxtp.last_calc_time_ms;
138*4882a593Smuzhiyun } else {
139*4882a593Smuzhiyun diff_t = RTW_U32_MAX - stats->rxtp.last_calc_time_ms +
140*4882a593Smuzhiyun cur_time + 1;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun if (diff_t > RXTP_CALC_DIFF_MS && stats->rx_byte_uni != 0) {
143*4882a593Smuzhiyun diff_bits = (stats->rx_byte_uni * 8) -
144*4882a593Smuzhiyun stats->rxtp.last_calc_bits;
145*4882a593Smuzhiyun stats->rx_tp_kbits = (u32)_os_division64(diff_bits,
146*4882a593Smuzhiyun diff_t);
147*4882a593Smuzhiyun stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
148*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = cur_time;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
phl_rx_statistics(struct phl_info_t * phl_info,struct rtw_recv_pkt * rx_pkt)153*4882a593Smuzhiyun void phl_rx_statistics(struct phl_info_t *phl_info, struct rtw_recv_pkt *rx_pkt)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
156*4882a593Smuzhiyun struct rtw_stats *phl_stats = &phl_com->phl_stats;
157*4882a593Smuzhiyun struct rtw_stats *sta_stats = NULL;
158*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
159*4882a593Smuzhiyun u16 macid = rx_pkt->mdata.macid;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (!phl_macid_is_valid(phl_info, macid))
162*4882a593Smuzhiyun goto dev_stat;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (NULL == sta)
167*4882a593Smuzhiyun goto dev_stat;
168*4882a593Smuzhiyun sta_stats = &sta->stats;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun phl_update_rx_stats(sta_stats, rx_pkt);
171*4882a593Smuzhiyun dev_stat:
172*4882a593Smuzhiyun phl_update_rx_stats(phl_stats, rx_pkt);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
phl_release_phl_rx(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)175*4882a593Smuzhiyun void phl_release_phl_rx(struct phl_info_t *phl_info,
176*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
179*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun _os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
184*4882a593Smuzhiyun _os_mem_set(phl_to_drvpriv(phl_info), &phl_rx->r, 0, sizeof(phl_rx->r));
185*4882a593Smuzhiyun phl_rx->type = RTW_RX_TYPE_MAX;
186*4882a593Smuzhiyun phl_rx->rxbuf_ptr = NULL;
187*4882a593Smuzhiyun INIT_LIST_HEAD(&phl_rx->list);
188*4882a593Smuzhiyun list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
189*4882a593Smuzhiyun rx_pkt_pool->idle_cnt++;
190*4882a593Smuzhiyun _os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
phl_free_recv_pkt_pool(struct phl_info_t * phl_info)193*4882a593Smuzhiyun static void phl_free_recv_pkt_pool(struct phl_info_t *phl_info)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
196*4882a593Smuzhiyun u32 buf_len = 0;
197*4882a593Smuzhiyun FUNCIN();
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
200*4882a593Smuzhiyun if (NULL != rx_pkt_pool) {
201*4882a593Smuzhiyun _os_spinlock_free(phl_to_drvpriv(phl_info),
202*4882a593Smuzhiyun &rx_pkt_pool->idle_lock);
203*4882a593Smuzhiyun _os_spinlock_free(phl_to_drvpriv(phl_info),
204*4882a593Smuzhiyun &rx_pkt_pool->busy_lock);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun buf_len = sizeof(*rx_pkt_pool);
207*4882a593Smuzhiyun _os_mem_free(phl_to_drvpriv(phl_info), rx_pkt_pool, buf_len);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun FUNCOUT();
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
phl_rx_deinit(struct phl_info_t * phl_info)213*4882a593Smuzhiyun void phl_rx_deinit(struct phl_info_t *phl_info)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun /* TODO: rx reorder deinit */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* TODO: peer info deinit */
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun phl_free_recv_pkt_pool(phl_info);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun
phl_alloc_recv_pkt_pool(struct phl_info_t * phl_info)223*4882a593Smuzhiyun static enum rtw_phl_status phl_alloc_recv_pkt_pool(struct phl_info_t *phl_info)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
226*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
227*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
228*4882a593Smuzhiyun u32 buf_len = 0, i = 0;
229*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun buf_len = sizeof(*rx_pkt_pool);
232*4882a593Smuzhiyun rx_pkt_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (NULL != rx_pkt_pool) {
235*4882a593Smuzhiyun _os_mem_set(phl_to_drvpriv(phl_info), rx_pkt_pool, 0, buf_len);
236*4882a593Smuzhiyun INIT_LIST_HEAD(&rx_pkt_pool->idle);
237*4882a593Smuzhiyun INIT_LIST_HEAD(&rx_pkt_pool->busy);
238*4882a593Smuzhiyun _os_spinlock_init(phl_to_drvpriv(phl_info),
239*4882a593Smuzhiyun &rx_pkt_pool->idle_lock);
240*4882a593Smuzhiyun _os_spinlock_init(phl_to_drvpriv(phl_info),
241*4882a593Smuzhiyun &rx_pkt_pool->busy_lock);
242*4882a593Smuzhiyun rx_pkt_pool->idle_cnt = 0;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun for (i = 0; i < MAX_PHL_RING_RX_PKT_NUM; i++) {
245*4882a593Smuzhiyun phl_rx = &rx_pkt_pool->phl_rx[i];
246*4882a593Smuzhiyun INIT_LIST_HEAD(&phl_rx->list);
247*4882a593Smuzhiyun list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
248*4882a593Smuzhiyun rx_pkt_pool->idle_cnt++;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun phl_info->rx_pkt_pool = rx_pkt_pool;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus)
257*4882a593Smuzhiyun phl_free_recv_pkt_pool(phl_info);
258*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return pstatus;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
phl_rx_init(struct phl_info_t * phl_info)263*4882a593Smuzhiyun enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun enum rtw_phl_status status;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Allocate rx packet pool */
268*4882a593Smuzhiyun status = phl_alloc_recv_pkt_pool(phl_info);
269*4882a593Smuzhiyun if (status != RTW_PHL_STATUS_SUCCESS)
270*4882a593Smuzhiyun return status;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* TODO: Peer info init */
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* TODO: Rx reorder init */
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
phl_recycle_rx_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)280*4882a593Smuzhiyun void phl_recycle_rx_buf(struct phl_info_t *phl_info,
281*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
284*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
285*4882a593Smuzhiyun struct rtw_rx_buf *rx_buf = NULL;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun do {
288*4882a593Smuzhiyun if (NULL == phl_rx) {
289*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]phl_rx is NULL!\n");
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "[4] %s:: [%p]\n",
296*4882a593Smuzhiyun __FUNCTION__, rx_buf);
297*4882a593Smuzhiyun if (phl_rx->rxbuf_ptr) {
298*4882a593Smuzhiyun pstatus = hci_trx_ops->recycle_rx_buf(phl_info, rx_buf,
299*4882a593Smuzhiyun phl_rx->r.mdata.dma_ch,
300*4882a593Smuzhiyun phl_rx->type);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus && phl_rx->rxbuf_ptr)
303*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]recycle hci rx buf error!\n");
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun phl_release_phl_rx(phl_info, phl_rx);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun } while (false);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
_phl_indic_new_rxpkt(struct phl_info_t * phl_info)311*4882a593Smuzhiyun void _phl_indic_new_rxpkt(struct phl_info_t *phl_info)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
314*4882a593Smuzhiyun struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
315*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
316*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun do {
319*4882a593Smuzhiyun _os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
320*4882a593Smuzhiyun evt_info->evt_bitmap |= RTW_PHL_EVT_RX;
321*4882a593Smuzhiyun _os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun pstatus = phl_schedule_handler(phl_info->phl_com,
324*4882a593Smuzhiyun &phl_info->phl_event_handler);
325*4882a593Smuzhiyun } while (false);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus)
328*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] Trigger rx indic event fail!\n");
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
333*4882a593Smuzhiyun phl_info->rx_new_pending = 0;
334*4882a593Smuzhiyun #endif
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
_phl_record_rx_stats(struct rtw_recv_pkt * recvpkt)337*4882a593Smuzhiyun void _phl_record_rx_stats(struct rtw_recv_pkt *recvpkt)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun if(NULL == recvpkt)
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun if (recvpkt->tx_sta)
342*4882a593Smuzhiyun recvpkt->tx_sta->stats.rx_rate = recvpkt->mdata.rx_rate;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
_phl_add_rx_pkt(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)345*4882a593Smuzhiyun enum rtw_phl_status _phl_add_rx_pkt(struct phl_info_t *phl_info,
346*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
349*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
350*4882a593Smuzhiyun struct rtw_recv_pkt *recvpkt = &phl_rx->r;
351*4882a593Smuzhiyun u16 ring_res = 0, wptr = 0, rptr = 0;
352*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
355*4882a593Smuzhiyun _os_spinlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (!ring)
358*4882a593Smuzhiyun goto out;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv, &ring->phl_idx);
361*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv, &ring->core_idx);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun ring_res = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
364*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
365*4882a593Smuzhiyun "[3] _phl_add_rx_pkt::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
366*4882a593Smuzhiyun _os_atomic_read(drv, &ring->phl_idx),
367*4882a593Smuzhiyun _os_atomic_read(drv, &ring->core_idx),
368*4882a593Smuzhiyun ring_res);
369*4882a593Smuzhiyun if (ring_res <= 0) {
370*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no ring resource to add new rx pkt!\n");
371*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_RESOURCE;
372*4882a593Smuzhiyun goto out;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun wptr = wptr + 1;
376*4882a593Smuzhiyun if (wptr >= MAX_PHL_RING_ENTRY_NUM)
377*4882a593Smuzhiyun wptr = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun ring->entry[wptr] = recvpkt;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (wptr)
382*4882a593Smuzhiyun _os_atomic_inc(drv, &ring->phl_idx);
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun _os_atomic_set(drv, &ring->phl_idx, 0);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
387*4882a593Smuzhiyun phl_info->rx_new_pending = 1;
388*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
389*4882a593Smuzhiyun #endif
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun out:
392*4882a593Smuzhiyun _os_spinunlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if(pstatus == RTW_PHL_STATUS_SUCCESS)
395*4882a593Smuzhiyun _phl_record_rx_stats(recvpkt);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return pstatus;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun void
phl_sta_ps_enter(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)403*4882a593Smuzhiyun phl_sta_ps_enter(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
404*4882a593Smuzhiyun struct rtw_wifi_role_t *role)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
407*4882a593Smuzhiyun enum rtw_hal_status hal_status;
408*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun _os_atomic_set(d, &sta->ps_sta, 1);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
413*4882a593Smuzhiyun "STA %02X:%02X:%02X:%02X:%02X:%02X enters PS mode, AID=%u, macid=%u, sta=0x%p\n",
414*4882a593Smuzhiyun sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
415*4882a593Smuzhiyun sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
416*4882a593Smuzhiyun sta->aid, sta->macid, sta);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun hal_status = rtw_hal_set_macid_pause(phl_info->hal,
419*4882a593Smuzhiyun sta->macid, true);
420*4882a593Smuzhiyun if (RTW_HAL_STATUS_SUCCESS != hal_status) {
421*4882a593Smuzhiyun PHL_WARN("%s(): failed to pause macid tx, macid=%u\n",
422*4882a593Smuzhiyun __FUNCTION__, sta->macid);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun if (ops->ap_ps_sta_ps_change)
426*4882a593Smuzhiyun ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, true);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun void
phl_sta_ps_exit(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)430*4882a593Smuzhiyun phl_sta_ps_exit(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
431*4882a593Smuzhiyun struct rtw_wifi_role_t *role)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
434*4882a593Smuzhiyun enum rtw_hal_status hal_status;
435*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
438*4882a593Smuzhiyun "STA %02X:%02X:%02X:%02X:%02X:%02X leaves PS mode, AID=%u, macid=%u, sta=0x%p\n",
439*4882a593Smuzhiyun sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
440*4882a593Smuzhiyun sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
441*4882a593Smuzhiyun sta->aid, sta->macid, sta);
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun _os_atomic_set(d, &sta->ps_sta, 0);
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun hal_status = rtw_hal_set_macid_pause(phl_info->hal,
446*4882a593Smuzhiyun sta->macid, false);
447*4882a593Smuzhiyun if (RTW_HAL_STATUS_SUCCESS != hal_status) {
448*4882a593Smuzhiyun PHL_WARN("%s(): failed to resume macid tx, macid=%u\n",
449*4882a593Smuzhiyun __FUNCTION__, sta->macid);
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun if (ops->ap_ps_sta_ps_change)
453*4882a593Smuzhiyun ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, false);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun void
phl_rx_handle_sta_process(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * rx)457*4882a593Smuzhiyun phl_rx_handle_sta_process(struct phl_info_t *phl_info,
458*4882a593Smuzhiyun struct rtw_phl_rx_pkt *rx)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun struct rtw_r_meta_data *m = &rx->r.mdata;
461*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
462*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
463*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun if (!phl_info->phl_com->dev_sw_cap.ap_ps)
466*4882a593Smuzhiyun return;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (m->addr_cam_vld) {
469*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, m->macid);
470*4882a593Smuzhiyun if (sta && sta->wrole)
471*4882a593Smuzhiyun role = sta->wrole;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun if (!sta) {
475*4882a593Smuzhiyun role = phl_get_wrole_by_addr(phl_info, m->mac_addr);
476*4882a593Smuzhiyun if (role)
477*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_addr(phl_info,
478*4882a593Smuzhiyun role, m->ta);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun if (!role || !sta)
482*4882a593Smuzhiyun return;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun rx->r.tx_sta = sta;
485*4882a593Smuzhiyun rx->r.rx_role = role;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_,
488*4882a593Smuzhiyun "ap-ps: more_frag=%u, frame_type=%u, role_type=%d, pwr_bit=%u, seq=%u\n",
489*4882a593Smuzhiyun m->more_frag, m->frame_type, role->type, m->pwr_bit, m->seq);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /*
492*4882a593Smuzhiyun * Change STA PS state based on the PM bit in frame control
493*4882a593Smuzhiyun */
494*4882a593Smuzhiyun if (!m->more_frag &&
495*4882a593Smuzhiyun (m->frame_type == RTW_FRAME_TYPE_DATA ||
496*4882a593Smuzhiyun m->frame_type == RTW_FRAME_TYPE_MGNT) &&
497*4882a593Smuzhiyun (role->type == PHL_RTYPE_AP ||
498*4882a593Smuzhiyun role->type == PHL_RTYPE_P2P_GO)) {
499*4882a593Smuzhiyun /* May get a @rx with macid set to our self macid, check if that
500*4882a593Smuzhiyun * happens here to avoid pausing self macid. This is put here so
501*4882a593Smuzhiyun * we wouldn't do it on our normal rx path, which degrades rx
502*4882a593Smuzhiyun * throughput significantly. */
503*4882a593Smuzhiyun if (phl_self_stainfo_chk(phl_info, role, sta))
504*4882a593Smuzhiyun return;
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun if (_os_atomic_read(d, &sta->ps_sta)) {
507*4882a593Smuzhiyun if (!m->pwr_bit)
508*4882a593Smuzhiyun phl_sta_ps_exit(phl_info, sta, role);
509*4882a593Smuzhiyun } else {
510*4882a593Smuzhiyun if (m->pwr_bit)
511*4882a593Smuzhiyun phl_sta_ps_enter(phl_info, sta, role);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun void
phl_handle_rx_frame_list(struct phl_info_t * phl_info,_os_list * frames)517*4882a593Smuzhiyun phl_handle_rx_frame_list(struct phl_info_t *phl_info,
518*4882a593Smuzhiyun _os_list *frames)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pos, *n;
521*4882a593Smuzhiyun enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
522*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun phl_list_for_loop_safe(pos, n, struct rtw_phl_rx_pkt, frames, list) {
525*4882a593Smuzhiyun list_del(&pos->list);
526*4882a593Smuzhiyun phl_rx_handle_sta_process(phl_info, pos);
527*4882a593Smuzhiyun status = _phl_add_rx_pkt(phl_info, pos);
528*4882a593Smuzhiyun if (RTW_PHL_STATUS_RESOURCE == status) {
529*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pos);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun #ifndef PHL_RX_BATCH_IND
533*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
534*4882a593Smuzhiyun #endif
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun #define SEQ_MODULO 0x1000
540*4882a593Smuzhiyun #define SEQ_MASK 0xfff
541*4882a593Smuzhiyun
seq_less(u16 sq1,u16 sq2)542*4882a593Smuzhiyun static inline int seq_less(u16 sq1, u16 sq2)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
seq_inc(u16 sq)547*4882a593Smuzhiyun static inline u16 seq_inc(u16 sq)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun return (sq + 1) & SEQ_MASK;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
seq_sub(u16 sq1,u16 sq2)552*4882a593Smuzhiyun static inline u16 seq_sub(u16 sq1, u16 sq2)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun return (sq1 - sq2) & SEQ_MASK;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
reorder_index(struct phl_tid_ampdu_rx * r,u16 seq)557*4882a593Smuzhiyun static inline u16 reorder_index(struct phl_tid_ampdu_rx *r, u16 seq)
558*4882a593Smuzhiyun {
559*4882a593Smuzhiyun return seq_sub(seq, r->ssn) % r->buf_size;
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
phl_release_reorder_frame(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,int index,_os_list * frames)562*4882a593Smuzhiyun static void phl_release_reorder_frame(struct phl_info_t *phl_info,
563*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
564*4882a593Smuzhiyun int index, _os_list *frames)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pkt = r->reorder_buf[index];
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (!pkt)
569*4882a593Smuzhiyun goto out;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* release the frame from the reorder ring buffer */
572*4882a593Smuzhiyun r->stored_mpdu_num--;
573*4882a593Smuzhiyun r->reorder_buf[index] = NULL;
574*4882a593Smuzhiyun list_add_tail(&pkt->list, frames);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun out:
577*4882a593Smuzhiyun r->head_seq_num = seq_inc(r->head_seq_num);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun #define HT_RX_REORDER_BUF_TIMEOUT_MS 100
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun /*
583*4882a593Smuzhiyun * If the MPDU at head_seq_num is ready,
584*4882a593Smuzhiyun * 1. release all subsequent MPDUs with consecutive SN and
585*4882a593Smuzhiyun * 2. if there's MPDU that is ready but left in the reordering
586*4882a593Smuzhiyun * buffer, find it and set reorder timer according to its reorder
587*4882a593Smuzhiyun * time
588*4882a593Smuzhiyun *
589*4882a593Smuzhiyun * If the MPDU at head_seq_num is not ready and there is no MPDU ready
590*4882a593Smuzhiyun * in the buffer at all, return.
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * If the MPDU at head_seq_num is not ready but there is some MPDU in
593*4882a593Smuzhiyun * the buffer that is ready, check whether any frames in the reorder
594*4882a593Smuzhiyun * buffer have timed out in the following way.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Basically, MPDUs that are not ready are purged and MPDUs that are
597*4882a593Smuzhiyun * ready are released.
598*4882a593Smuzhiyun *
599*4882a593Smuzhiyun * The process goes through all the buffer but the one at head_seq_num
600*4882a593Smuzhiyun * unless
601*4882a593Smuzhiyun * - there's a MPDU that is ready AND
602*4882a593Smuzhiyun * - there are one or more buffers that are not ready.
603*4882a593Smuzhiyun * In this case, the process is stopped, the head_seq_num becomes the
604*4882a593Smuzhiyun * first buffer that is not ready and the reorder_timer is reset based
605*4882a593Smuzhiyun * on the reorder_time of that ready MPDU.
606*4882a593Smuzhiyun */
phl_reorder_release(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,_os_list * frames)607*4882a593Smuzhiyun static void phl_reorder_release(struct phl_info_t *phl_info,
608*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r, _os_list *frames)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun /* ref ieee80211_sta_reorder_release() and wil_reorder_release() */
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun int index, i, j;
613*4882a593Smuzhiyun u32 cur_time = _os_get_cur_time_ms();
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun /* release the buffer until next missing frame */
616*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
617*4882a593Smuzhiyun if (!r->reorder_buf[index] && r->stored_mpdu_num) {
618*4882a593Smuzhiyun /*
619*4882a593Smuzhiyun * No buffers ready to be released, but check whether any
620*4882a593Smuzhiyun * frames in the reorder buffer have timed out.
621*4882a593Smuzhiyun */
622*4882a593Smuzhiyun int skipped = 1;
623*4882a593Smuzhiyun for (j = (index + 1) % r->buf_size; j != index;
624*4882a593Smuzhiyun j = (j + 1) % r->buf_size) {
625*4882a593Smuzhiyun if (!r->reorder_buf[j]) {
626*4882a593Smuzhiyun skipped++;
627*4882a593Smuzhiyun continue;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun if (skipped && cur_time < r->reorder_time[j] +
630*4882a593Smuzhiyun HT_RX_REORDER_BUF_TIMEOUT_MS)
631*4882a593Smuzhiyun goto set_release_timer;
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun /* don't leave incomplete A-MSDUs around */
634*4882a593Smuzhiyun for (i = (index + 1) % r->buf_size; i != j;
635*4882a593Smuzhiyun i = (i + 1) % r->buf_size)
636*4882a593Smuzhiyun phl_recycle_rx_buf(phl_info, r->reorder_buf[i]);
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "release an RX reorder frame due to timeout on earlier frames\n");
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, j, frames);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * Increment the head seq# also for the skipped slots.
644*4882a593Smuzhiyun */
645*4882a593Smuzhiyun r->head_seq_num =
646*4882a593Smuzhiyun (r->head_seq_num + skipped) & SEQ_MASK;
647*4882a593Smuzhiyun skipped = 0;
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun } else while (r->reorder_buf[index]) {
650*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, index, frames);
651*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun if (r->stored_mpdu_num) {
655*4882a593Smuzhiyun j = index = r->head_seq_num % r->buf_size;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun for (; j != (index - 1) % r->buf_size;
658*4882a593Smuzhiyun j = (j + 1) % r->buf_size) {
659*4882a593Smuzhiyun if (r->reorder_buf[j])
660*4882a593Smuzhiyun break;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun set_release_timer:
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (!r->removed)
666*4882a593Smuzhiyun _os_set_timer(r->drv_priv, &r->sta->reorder_timer,
667*4882a593Smuzhiyun HT_RX_REORDER_BUF_TIMEOUT_MS);
668*4882a593Smuzhiyun } else {
669*4882a593Smuzhiyun /* TODO: implementation of cancel timer on Linux is
670*4882a593Smuzhiyun del_timer_sync(), it can't be called with same spinlock
671*4882a593Smuzhiyun held with the expiration callback, that causes a potential
672*4882a593Smuzhiyun deadlock. */
673*4882a593Smuzhiyun _os_cancel_timer_async(r->drv_priv, &r->sta->reorder_timer);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
phl_sta_rx_reorder_timer_expired(void * t)677*4882a593Smuzhiyun void phl_sta_rx_reorder_timer_expired(void *t)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun /* ref sta_rx_agg_reorder_timer_expired() */
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)t;
682*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = sta->wrole->phl_com;
683*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
684*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
685*4882a593Smuzhiyun u8 i = 0;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun PHL_INFO("Rx reorder timer expired, sta=0x%p\n", sta);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
690*4882a593Smuzhiyun _os_list frames;
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
695*4882a593Smuzhiyun if (sta->tid_rx[i])
696*4882a593Smuzhiyun phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
697*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
700*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
701*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
702*4882a593Smuzhiyun #endif
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun _os_event_set(drv_priv, &sta->comp_sync);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
phl_release_reorder_frames(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,u16 head_seq_num,_os_list * frames)708*4882a593Smuzhiyun static void phl_release_reorder_frames(struct phl_info_t *phl_info,
709*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
710*4882a593Smuzhiyun u16 head_seq_num, _os_list *frames)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun /* ref ieee80211_release_reorder_frames() and
713*4882a593Smuzhiyun wil_release_reorder_frames() */
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun int index;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* note: this function is never called with
718*4882a593Smuzhiyun * hseq preceding r->head_seq_num, i.e it is always true
719*4882a593Smuzhiyun * !seq_less(hseq, r->head_seq_num)
720*4882a593Smuzhiyun * and thus on loop exit it should be
721*4882a593Smuzhiyun * r->head_seq_num == hseq
722*4882a593Smuzhiyun */
723*4882a593Smuzhiyun while (seq_less(r->head_seq_num, head_seq_num) &&
724*4882a593Smuzhiyun r->stored_mpdu_num) { /* Note: do we need to check this? */
725*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
726*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, index, frames);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun r->head_seq_num = head_seq_num;
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
rtw_phl_flush_reorder_buf(void * phl,struct rtw_phl_stainfo_t * sta)731*4882a593Smuzhiyun void rtw_phl_flush_reorder_buf(void *phl, struct rtw_phl_stainfo_t *sta)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
734*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
735*4882a593Smuzhiyun _os_list frames;
736*4882a593Smuzhiyun u8 i = 0;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun PHL_INFO("%s: sta=0x%p\n", __FUNCTION__, sta);
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
743*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
744*4882a593Smuzhiyun if (sta->tid_rx[i])
745*4882a593Smuzhiyun phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
750*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
751*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
752*4882a593Smuzhiyun #endif
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
phl_manage_sta_reorder_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * pkt,struct phl_tid_ampdu_rx * r,_os_list * frames)756*4882a593Smuzhiyun static bool phl_manage_sta_reorder_buf(struct phl_info_t *phl_info,
757*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pkt,
758*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
759*4882a593Smuzhiyun _os_list *frames)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun /* ref ieee80211_sta_manage_reorder_buf() and wil_rx_reorder() */
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun struct rtw_r_meta_data *meta = &pkt->r.mdata;
764*4882a593Smuzhiyun u16 mpdu_seq_num = meta->seq;
765*4882a593Smuzhiyun u16 head_seq_num, buf_size;
766*4882a593Smuzhiyun int index;
767*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun buf_size = r->buf_size;
770*4882a593Smuzhiyun head_seq_num = r->head_seq_num;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun /*
773*4882a593Smuzhiyun * If the current MPDU's SN is smaller than the SSN, it shouldn't
774*4882a593Smuzhiyun * be reordered.
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun if (!r->started) {
777*4882a593Smuzhiyun if (seq_less(mpdu_seq_num, head_seq_num))
778*4882a593Smuzhiyun return false;
779*4882a593Smuzhiyun r->started = true;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (r->sleep) {
783*4882a593Smuzhiyun PHL_INFO("tid = %d reorder buffer handling after wake up\n",
784*4882a593Smuzhiyun r->tid);
785*4882a593Smuzhiyun PHL_INFO("Update head seq(0x%03x) to the first rx seq(0x%03x) after wake up\n",
786*4882a593Smuzhiyun r->head_seq_num, mpdu_seq_num);
787*4882a593Smuzhiyun r->head_seq_num = mpdu_seq_num;
788*4882a593Smuzhiyun head_seq_num = r->head_seq_num;
789*4882a593Smuzhiyun r->sleep = false;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun /* frame with out of date sequence number */
793*4882a593Smuzhiyun if (seq_less(mpdu_seq_num, head_seq_num)) {
794*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
795*4882a593Smuzhiyun meta->seq, r->head_seq_num);
796*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
797*4882a593Smuzhiyun return true;
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun /*
801*4882a593Smuzhiyun * If frame the sequence number exceeds our buffering window
802*4882a593Smuzhiyun * size release some previous frames to make room for this one.
803*4882a593Smuzhiyun */
804*4882a593Smuzhiyun if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
805*4882a593Smuzhiyun head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
806*4882a593Smuzhiyun /* release stored frames up to new head to stack */
807*4882a593Smuzhiyun phl_release_reorder_frames(phl_info, r, head_seq_num, frames);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun /* Now the new frame is always in the range of the reordering buffer */
811*4882a593Smuzhiyun
812*4882a593Smuzhiyun index = reorder_index(r, mpdu_seq_num);
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun /* check if we already stored this frame */
815*4882a593Smuzhiyun if (r->reorder_buf[index]) {
816*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
817*4882a593Smuzhiyun meta->seq, r->head_seq_num);
818*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
819*4882a593Smuzhiyun return true;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * If the current MPDU is in the right order and nothing else
824*4882a593Smuzhiyun * is stored we can process it directly, no need to buffer it.
825*4882a593Smuzhiyun * If it is first but there's something stored, we may be able
826*4882a593Smuzhiyun * to release frames after this one.
827*4882a593Smuzhiyun */
828*4882a593Smuzhiyun if (mpdu_seq_num == r->head_seq_num &&
829*4882a593Smuzhiyun r->stored_mpdu_num == 0) {
830*4882a593Smuzhiyun r->head_seq_num = seq_inc(r->head_seq_num);
831*4882a593Smuzhiyun return false;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* put the frame in the reordering buffer */
835*4882a593Smuzhiyun r->reorder_buf[index] = pkt;
836*4882a593Smuzhiyun r->reorder_time[index] = _os_get_cur_time_ms();
837*4882a593Smuzhiyun r->stored_mpdu_num++;
838*4882a593Smuzhiyun phl_reorder_release(phl_info, r, frames);
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun return true;
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
phl_rx_reorder(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx,_os_list * frames)844*4882a593Smuzhiyun enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
845*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx,
846*4882a593Smuzhiyun _os_list *frames)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun /* ref wil_rx_reorder() and ieee80211_rx_reorder_ampdu() */
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
851*4882a593Smuzhiyun struct rtw_r_meta_data *meta = &phl_rx->r.mdata;
852*4882a593Smuzhiyun u16 tid = meta->tid;
853*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
854*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r;
855*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * Remove FCS if is is appended
859*4882a593Smuzhiyun * TODO: handle more than one in pkt_list
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun if (phl_info->phl_com->append_fcs) {
862*4882a593Smuzhiyun /*
863*4882a593Smuzhiyun * Only last MSDU of A-MSDU includes FCS.
864*4882a593Smuzhiyun * TODO: If A-MSDU cut processing is in HAL, should only deduct
865*4882a593Smuzhiyun * FCS from length of last one of pkt_list. For such case,
866*4882a593Smuzhiyun * phl_rx->r should have pkt_list length.
867*4882a593Smuzhiyun */
868*4882a593Smuzhiyun if (!(meta->amsdu_cut && !meta->last_msdu)) {
869*4882a593Smuzhiyun if (phl_rx->r.pkt_list[0].length <= 4) {
870*4882a593Smuzhiyun PHL_ERR("%s, pkt_list[0].length(%d) too short\n",
871*4882a593Smuzhiyun __func__, phl_rx->r.pkt_list[0].length);
872*4882a593Smuzhiyun goto drop_frame;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun phl_rx->r.pkt_list[0].length -= 4;
875*4882a593Smuzhiyun }
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (phl_is_mp_mode(phl_info->phl_com))
879*4882a593Smuzhiyun goto dont_reorder;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun if (meta->bc || meta->mc)
882*4882a593Smuzhiyun goto dont_reorder;
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (!meta->qos)
885*4882a593Smuzhiyun goto dont_reorder;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (meta->q_null)
888*4882a593Smuzhiyun goto dont_reorder;
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun /* TODO: check ba policy is either ba or normal */
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun /* if the mpdu is fragmented, don't reorder */
893*4882a593Smuzhiyun if (meta->more_frag || meta->frag_num) {
894*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
895*4882a593Smuzhiyun "Receive QoS Data with more_frag=%u, frag_num=%u\n",
896*4882a593Smuzhiyun meta->more_frag, meta->frag_num);
897*4882a593Smuzhiyun goto dont_reorder;
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun /* Use MAC ID from address CAM if this packet is address CAM matched */
901*4882a593Smuzhiyun if (meta->addr_cam_vld)
902*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, meta->macid);
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun /* Otherwise, search STA by TA */
905*4882a593Smuzhiyun if (!sta || !sta->wrole) {
906*4882a593Smuzhiyun struct rtw_wifi_role_t *wrole;
907*4882a593Smuzhiyun wrole = phl_get_wrole_by_addr(phl_info, meta->mac_addr);
908*4882a593Smuzhiyun if (wrole)
909*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_addr(phl_info,
910*4882a593Smuzhiyun wrole, meta->ta);
911*4882a593Smuzhiyun if (!wrole || !sta) {
912*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
913*4882a593Smuzhiyun "%s(): stainfo or wrole not found, cam=%u, macid=%u\n",
914*4882a593Smuzhiyun __FUNCTION__, meta->addr_cam, meta->macid);
915*4882a593Smuzhiyun goto dont_reorder;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun phl_rx->r.tx_sta = sta;
920*4882a593Smuzhiyun phl_rx->r.rx_role = sta->wrole;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun rtw_hal_set_sta_rx_sts(sta, false, meta);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun if (tid >= ARRAY_SIZE(sta->tid_rx)) {
925*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "Fail: tid (%u) index out of range (%u)\n", tid, 8);
926*4882a593Smuzhiyun goto drop_frame;
927*4882a593Smuzhiyun }
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun r = sta->tid_rx[tid];
932*4882a593Smuzhiyun if (!r) {
933*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
934*4882a593Smuzhiyun goto dont_reorder;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun if (!phl_manage_sta_reorder_buf(phl_info, phl_rx, r, frames)) {
938*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
939*4882a593Smuzhiyun goto dont_reorder;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun drop_frame:
947*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, phl_rx);
948*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun dont_reorder:
951*4882a593Smuzhiyun list_add_tail(&phl_rx->list, frames);
952*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun
phl_check_recv_ring_resource(struct phl_info_t * phl_info)956*4882a593Smuzhiyun u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info)
957*4882a593Smuzhiyun {
958*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
959*4882a593Smuzhiyun u16 avail = 0, wptr = 0, rptr = 0;
960*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
963*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
964*4882a593Smuzhiyun avail = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun if (0 == avail)
967*4882a593Smuzhiyun return false;
968*4882a593Smuzhiyun else
969*4882a593Smuzhiyun return true;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
dump_phl_rx_ring(void * phl)972*4882a593Smuzhiyun void dump_phl_rx_ring(void *phl)
973*4882a593Smuzhiyun {
974*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
975*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
976*4882a593Smuzhiyun s16 diff = 0;
977*4882a593Smuzhiyun u16 idx = 0, endidx = 0;
978*4882a593Smuzhiyun u16 phl_idx = 0, core_idx = 0;
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "===Dump PHL RX Ring===\n");
981*4882a593Smuzhiyun phl_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.phl_idx);
982*4882a593Smuzhiyun core_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.core_idx);
983*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
984*4882a593Smuzhiyun "core_idx = %d\n"
985*4882a593Smuzhiyun "phl_idx = %d\n",
986*4882a593Smuzhiyun core_idx,
987*4882a593Smuzhiyun phl_idx);
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun diff= phl_idx-core_idx;
990*4882a593Smuzhiyun if(diff < 0)
991*4882a593Smuzhiyun diff= 4096+diff;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun endidx = diff > 5 ? (core_idx+6): phl_idx;
994*4882a593Smuzhiyun for (idx = core_idx+1; idx < endidx; idx++) {
995*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "entry[%d] = %p\n", idx,
996*4882a593Smuzhiyun phl_info->phl_rx_ring.entry[idx%4096]);
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun
phl_event_indicator(void * context)1001*4882a593Smuzhiyun void phl_event_indicator(void *context)
1002*4882a593Smuzhiyun {
1003*4882a593Smuzhiyun enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1004*4882a593Smuzhiyun struct rtw_phl_handler *phl_handler
1005*4882a593Smuzhiyun = (struct rtw_phl_handler *)phl_container_of(context,
1006*4882a593Smuzhiyun struct rtw_phl_handler,
1007*4882a593Smuzhiyun os_handler);
1008*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
1009*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = NULL;
1010*4882a593Smuzhiyun struct rtw_evt_info_t *evt_info = NULL;
1011*4882a593Smuzhiyun void *drv_priv = NULL;
1012*4882a593Smuzhiyun enum rtw_phl_evt evt_bitmap = 0;
1013*4882a593Smuzhiyun FUNCIN_WSTS(sts);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun if (NULL != phl_info) {
1016*4882a593Smuzhiyun ops = &phl_info->phl_com->evt_ops;
1017*4882a593Smuzhiyun evt_info = &phl_info->phl_com->evt_info;
1018*4882a593Smuzhiyun drv_priv = phl_to_drvpriv(phl_info);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun _os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1021*4882a593Smuzhiyun evt_bitmap = evt_info->evt_bitmap;
1022*4882a593Smuzhiyun evt_info->evt_bitmap = 0;
1023*4882a593Smuzhiyun _os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun if (RTW_PHL_EVT_RX & evt_bitmap) {
1026*4882a593Smuzhiyun if (NULL != ops->rx_process) {
1027*4882a593Smuzhiyun sts = ops->rx_process(drv_priv);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun dump_phl_rx_ring(phl_info);
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun FUNCOUT_WSTS(sts);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
_phl_rx_statistics_reset(struct phl_info_t * phl_info)1036*4882a593Smuzhiyun void _phl_rx_statistics_reset(struct phl_info_t *phl_info)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1039*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
1040*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
1041*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
1042*4882a593Smuzhiyun struct phl_queue *sta_queue;
1043*4882a593Smuzhiyun u8 i;
1044*4882a593Smuzhiyun
1045*4882a593Smuzhiyun for (i = 0; i< MAX_WIFI_ROLE_NUMBER; i++) {
1046*4882a593Smuzhiyun role = &phl_com->wifi_roles[i];
1047*4882a593Smuzhiyun if (role->active && (role->mstate == MLME_LINKED)) {
1048*4882a593Smuzhiyun sta_queue = &role->assoc_sta_queue;
1049*4882a593Smuzhiyun _os_spinlock(drv, &sta_queue->lock, _bh, NULL);
1050*4882a593Smuzhiyun phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1051*4882a593Smuzhiyun &sta_queue->queue, list) {
1052*4882a593Smuzhiyun if (sta)
1053*4882a593Smuzhiyun rtw_hal_set_sta_rx_sts(sta, true, NULL);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun _os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun }
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun void
phl_rx_watchdog(struct phl_info_t * phl_info)1061*4882a593Smuzhiyun phl_rx_watchdog(struct phl_info_t *phl_info)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun phl_rx_traffic_upd(phl_stats);
1066*4882a593Smuzhiyun phl_dump_rx_stats(phl_stats);
1067*4882a593Smuzhiyun _phl_rx_statistics_reset(phl_info);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
rtw_phl_query_new_rx_num(void * phl)1070*4882a593Smuzhiyun u16 rtw_phl_query_new_rx_num(void *phl)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1073*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = NULL;
1074*4882a593Smuzhiyun u16 new_rx = 0, wptr = 0, rptr = 0;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun if (NULL != phl_info) {
1077*4882a593Smuzhiyun ring = &phl_info->phl_rx_ring;
1078*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1079*4882a593Smuzhiyun &ring->phl_idx);
1080*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1081*4882a593Smuzhiyun &ring->core_idx);
1082*4882a593Smuzhiyun new_rx = phl_calc_avail_rptr(rptr, wptr,
1083*4882a593Smuzhiyun MAX_PHL_RING_ENTRY_NUM);
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun return new_rx;
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun
rtw_phl_query_rx_pkt(void * phl)1089*4882a593Smuzhiyun struct rtw_recv_pkt *rtw_phl_query_rx_pkt(void *phl)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1092*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = NULL;
1093*4882a593Smuzhiyun struct rtw_recv_pkt *recvpkt = NULL;
1094*4882a593Smuzhiyun void *drv_priv = NULL;
1095*4882a593Smuzhiyun u16 ring_res = 0, wptr = 0, rptr = 0;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun if (NULL != phl_info) {
1098*4882a593Smuzhiyun ring = &phl_info->phl_rx_ring;
1099*4882a593Smuzhiyun drv_priv = phl_to_drvpriv(phl_info);
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1102*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun ring_res = phl_calc_avail_rptr(rptr, wptr,
1105*4882a593Smuzhiyun MAX_PHL_RING_ENTRY_NUM);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
1108*4882a593Smuzhiyun "[4] %s::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
1109*4882a593Smuzhiyun __FUNCTION__,
1110*4882a593Smuzhiyun _os_atomic_read(drv_priv, &ring->phl_idx),
1111*4882a593Smuzhiyun _os_atomic_read(drv_priv, &ring->core_idx),
1112*4882a593Smuzhiyun ring_res);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun if (ring_res > 0) {
1115*4882a593Smuzhiyun rptr = rptr + 1;
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun if (rptr >= MAX_PHL_RING_ENTRY_NUM) {
1118*4882a593Smuzhiyun rptr=0;
1119*4882a593Smuzhiyun recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1120*4882a593Smuzhiyun ring->entry[rptr]=NULL;
1121*4882a593Smuzhiyun _os_atomic_set(drv_priv, &ring->core_idx, 0);
1122*4882a593Smuzhiyun } else {
1123*4882a593Smuzhiyun recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1124*4882a593Smuzhiyun ring->entry[rptr]=NULL;
1125*4882a593Smuzhiyun _os_atomic_inc(drv_priv, &ring->core_idx);
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun if (NULL == recvpkt)
1128*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "recvpkt is NULL!\n");
1129*4882a593Smuzhiyun else
1130*4882a593Smuzhiyun phl_rx_statistics(phl_info, recvpkt);
1131*4882a593Smuzhiyun } else {
1132*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no available rx packet to query!\n");
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return recvpkt;
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
rtw_phl_return_rxbuf(void * phl,u8 * recvpkt)1139*4882a593Smuzhiyun enum rtw_phl_status rtw_phl_return_rxbuf(void *phl, u8* recvpkt)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1142*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1143*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1144*4882a593Smuzhiyun struct rtw_recv_pkt *r = (struct rtw_recv_pkt *)recvpkt;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun do {
1147*4882a593Smuzhiyun if (NULL == recvpkt)
1148*4882a593Smuzhiyun break;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun phl_rx = phl_container_of(r, struct rtw_phl_rx_pkt, r);
1151*4882a593Smuzhiyun phl_recycle_rx_buf(phl_info, phl_rx);
1152*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
1153*4882a593Smuzhiyun } while (false);
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun return pstatus;
1156*4882a593Smuzhiyun }
1157*4882a593Smuzhiyun
1158*4882a593Smuzhiyun
rtw_phl_start_rx_process(void * phl)1159*4882a593Smuzhiyun enum rtw_phl_status rtw_phl_start_rx_process(void *phl)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1162*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun pstatus = phl_schedule_handler(phl_info->phl_com,
1167*4882a593Smuzhiyun &phl_info->phl_rx_handler);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun return pstatus;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
rtw_phl_rx_bar(void * phl,struct rtw_phl_stainfo_t * sta,u8 tid,u16 seq)1174*4882a593Smuzhiyun void rtw_phl_rx_bar(void *phl, struct rtw_phl_stainfo_t *sta, u8 tid, u16 seq)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun /* ref ieee80211_rx_h_ctrl() and wil_rx_bar() */
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1179*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
1180*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r;
1181*4882a593Smuzhiyun _os_list frames;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun if (tid >= RTW_MAX_TID_NUM)
1186*4882a593Smuzhiyun goto out;
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun r = sta->tid_rx[tid];
1191*4882a593Smuzhiyun if (!r) {
1192*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR for non-existing TID %d\n", tid);
1193*4882a593Smuzhiyun goto out;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (seq_less(seq, r->head_seq_num)) {
1197*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR Seq 0x%03x preceding head 0x%03x\n",
1198*4882a593Smuzhiyun seq, r->head_seq_num);
1199*4882a593Smuzhiyun goto out;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "BAR: TID %d Seq 0x%03x head 0x%03x\n",
1203*4882a593Smuzhiyun tid, seq, r->head_seq_num);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun phl_release_reorder_frames(phl_info, r, seq, &frames);
1206*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun out:
1209*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun
rtw_phl_get_rx_status(void * phl)1212*4882a593Smuzhiyun enum rtw_rx_status rtw_phl_get_rx_status(void *phl)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
1215*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1216*4882a593Smuzhiyun enum rtw_hci_type hci_type = phl_info->phl_com->hci_type;
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun if (hci_type & RTW_HCI_USB)
1219*4882a593Smuzhiyun return rtw_hal_get_usb_status(phl_info->hal);
1220*4882a593Smuzhiyun #endif
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun return RTW_STATUS_RX_OK;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun enum rtw_phl_status
rtw_phl_enter_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1226*4882a593Smuzhiyun rtw_phl_enter_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1227*4882a593Smuzhiyun {
1228*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1229*4882a593Smuzhiyun enum rtw_hal_status status;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun status = rtw_hal_enter_mon_mode(phl_info->hal, wrole->hw_band);
1232*4882a593Smuzhiyun if (status != RTW_HAL_STATUS_SUCCESS) {
1233*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1234*4882a593Smuzhiyun "%s(): rtw_hal_enter_mon_mode() failed, status=%d",
1235*4882a593Smuzhiyun __FUNCTION__, status);
1236*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
1240*4882a593Smuzhiyun }
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun enum rtw_phl_status
rtw_phl_leave_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1243*4882a593Smuzhiyun rtw_phl_leave_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1244*4882a593Smuzhiyun {
1245*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1246*4882a593Smuzhiyun enum rtw_hal_status status;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun status = rtw_hal_leave_mon_mode(phl_info->hal, wrole->hw_band);
1249*4882a593Smuzhiyun if (status != RTW_HAL_STATUS_SUCCESS) {
1250*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1251*4882a593Smuzhiyun "%s(): rtw_hal_leave_mon_mode() failed, status=%d",
1252*4882a593Smuzhiyun __FUNCTION__, status);
1253*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
1260*4882a593Smuzhiyun void
_phl_rx_proc_frame_list(struct phl_info_t * phl_info,struct phl_queue * pq)1261*4882a593Smuzhiyun _phl_rx_proc_frame_list(struct phl_info_t *phl_info, struct phl_queue *pq)
1262*4882a593Smuzhiyun {
1263*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1264*4882a593Smuzhiyun _os_list *pkt_list = NULL;
1265*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (NULL == pq)
1268*4882a593Smuzhiyun return;
1269*4882a593Smuzhiyun if (0 == pq->cnt)
1270*4882a593Smuzhiyun return;
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1273*4882a593Smuzhiyun "_phl_rx_proc_frame_list : queue ele cnt = %d\n",
1274*4882a593Smuzhiyun pq->cnt);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun while (true == pq_pop(d, pq, &pkt_list, _first, _bh)) {
1277*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)pkt_list;
1278*4882a593Smuzhiyun phl_info->hci_trx_ops->rx_handle_normal(phl_info, phl_rx);
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun }
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * ppdu_sts)1283*4882a593Smuzhiyun phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts)
1284*4882a593Smuzhiyun {
1285*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1286*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1287*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1288*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1289*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1290*4882a593Smuzhiyun struct rtw_phl_rssi_stat *rssi_stat = &phl_info->phl_com->rssi_stat;
1291*4882a593Smuzhiyun _os_list *frame = NULL;
1292*4882a593Smuzhiyun bool upt_psts = true;
1293*4882a593Smuzhiyun u8 i = 0;
1294*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (NULL == ppdu_sts)
1297*4882a593Smuzhiyun return pstatus;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun if (false == psts_info->en_psts_per_pkt) {
1300*4882a593Smuzhiyun return pstatus;
1301*4882a593Smuzhiyun }
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1304*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1305*4882a593Smuzhiyun "ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!\n");
1306*4882a593Smuzhiyun return pstatus;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun band = (ppdu_sts->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1310*4882a593Smuzhiyun
1311*4882a593Smuzhiyun if (false == psts_info->en_ppdu_sts[band])
1312*4882a593Smuzhiyun return pstatus;
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun if (ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt[band]) {
1315*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1316*4882a593Smuzhiyun "ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt!\n");
1317*4882a593Smuzhiyun upt_psts = false;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][psts_info->cur_ppdu_cnt[band]];
1321*4882a593Smuzhiyun /* check list empty */
1322*4882a593Smuzhiyun if (0 == sts_entry->frames.cnt) {
1323*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1324*4882a593Smuzhiyun "cur_ppdu_cnt %d --> sts_entry->frames.cnt = 0\n",
1325*4882a593Smuzhiyun psts_info->cur_ppdu_cnt[band]);
1326*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
1327*4882a593Smuzhiyun return pstatus;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /* start update phy info to per pkt*/
1331*4882a593Smuzhiyun if (false == pq_get_front(d, &sts_entry->frames, &frame, _bh)) {
1332*4882a593Smuzhiyun PHL_ERR(" %s list empty\n", __FUNCTION__);
1333*4882a593Smuzhiyun return pstatus;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun /**
1336*4882a593Smuzhiyun * TODO : How to filter the case :
1337*4882a593Smuzhiyun * pkt(ppdu_cnt = 0) --> missing :psts(ppdu_cnt = 0) --> (all of the pkt, psts dropped/missing)
1338*4882a593Smuzhiyun * --> ppdu_sts(ppdu_cnt = 0)(not for the current buffered pkt.)
1339*4882a593Smuzhiyun * workaround : check rate/bw/ppdu_type/... etc
1340*4882a593Smuzhiyun **/
1341*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)frame;
1342*4882a593Smuzhiyun if (upt_psts &&
1343*4882a593Smuzhiyun ((phl_rx->r.mdata.rx_rate != ppdu_sts->r.mdata.rx_rate) ||
1344*4882a593Smuzhiyun (phl_rx->r.mdata.bw != ppdu_sts->r.mdata.bw) ||
1345*4882a593Smuzhiyun (phl_rx->r.mdata.rx_gi_ltf != ppdu_sts->r.mdata.rx_gi_ltf) ||
1346*4882a593Smuzhiyun (phl_rx->r.mdata.ppdu_type != ppdu_sts->r.mdata.ppdu_type))) {
1347*4882a593Smuzhiyun /**
1348*4882a593Smuzhiyun * ppdu status is not for the buffered pkt,
1349*4882a593Smuzhiyun * skip update phy status to phl_rx
1350*4882a593Smuzhiyun **/
1351*4882a593Smuzhiyun upt_psts = false;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun /* Get Frame Type */
1354*4882a593Smuzhiyun ppdu_sts->r.phy_info.frame_type =
1355*4882a593Smuzhiyun PHL_GET_80211_HDR_TYPE(phl_rx->r.pkt_list[0].vir_addr);
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun if ((false == ppdu_sts->r.phy_info.is_valid) &&
1358*4882a593Smuzhiyun (true == psts_info->en_fake_psts)) {
1359*4882a593Smuzhiyun if (RTW_FRAME_TYPE_MGNT == phl_rx->r.mdata.frame_type) {
1360*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1361*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_MGNT_ACAM_A1M];
1362*4882a593Smuzhiyun } else if (RTW_FRAME_TYPE_DATA == phl_rx->r.mdata.frame_type) {
1363*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1364*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_DATA_ACAM_A1M];
1365*4882a593Smuzhiyun } else if (RTW_FRAME_TYPE_CTRL == phl_rx->r.mdata.frame_type) {
1366*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1367*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_CTRL_ACAM_A1M];
1368*4882a593Smuzhiyun } else {
1369*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1370*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_UNKNOWN];
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun for(i = 0; i< RTW_PHL_MAX_RF_PATH ; i++) {
1373*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi_path[i] =
1374*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun ppdu_sts->r.phy_info.ch_idx = rtw_hal_get_cur_ch(phl_info->hal,
1377*4882a593Smuzhiyun phl_rx->r.mdata.bb_sel);
1378*4882a593Smuzhiyun ppdu_sts->r.phy_info.is_valid = true;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun do {
1382*4882a593Smuzhiyun if (false == upt_psts)
1383*4882a593Smuzhiyun break;
1384*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)frame;
1385*4882a593Smuzhiyun _os_mem_cpy(d, &(phl_rx->r.phy_info), &(ppdu_sts->r.phy_info),
1386*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_phy_info));
1387*4882a593Smuzhiyun } while ((true == psts_info->psts_ampdu) &&
1388*4882a593Smuzhiyun (pq_get_next(d, &sts_entry->frames, frame, &frame, _bh)));
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /*2. indicate the frame list*/
1391*4882a593Smuzhiyun _phl_rx_proc_frame_list(phl_info, &sts_entry->frames);
1392*4882a593Smuzhiyun /*3. reset the queue */
1393*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun return pstatus;
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun bool
phl_rx_proc_wait_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1399*4882a593Smuzhiyun phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
1400*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1403*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1404*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1405*4882a593Smuzhiyun u8 i = 0;
1406*4882a593Smuzhiyun bool ret = false;
1407*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1408*4882a593Smuzhiyun
1409*4882a593Smuzhiyun if (false == psts_info->en_psts_per_pkt) {
1410*4882a593Smuzhiyun return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun if (phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1414*4882a593Smuzhiyun PHL_ASSERT("phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!");
1415*4882a593Smuzhiyun return ret;
1416*4882a593Smuzhiyun }
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun if (false == psts_info->en_ppdu_sts[band])
1421*4882a593Smuzhiyun return ret;
1422*4882a593Smuzhiyun
1423*4882a593Smuzhiyun if (psts_info->cur_ppdu_cnt[band] != phl_rx->r.mdata.ppdu_cnt) {
1424*4882a593Smuzhiyun /* start of PPDU */
1425*4882a593Smuzhiyun /* 1. Check all of the buffer list is empty */
1426*4882a593Smuzhiyun /* only check the target rx pkt band */
1427*4882a593Smuzhiyun for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
1428*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][i];
1429*4882a593Smuzhiyun if (0 != sts_entry->frames.cnt) {
1430*4882a593Smuzhiyun /* need indicate first */
1431*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1432*4882a593Smuzhiyun "band %d ; ppdu_cnt %d queue is not empty \n",
1433*4882a593Smuzhiyun band, i);
1434*4882a593Smuzhiyun _phl_rx_proc_frame_list(phl_info,
1435*4882a593Smuzhiyun &sts_entry->frames);
1436*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun /* 2. check ppdu status filter condition */
1441*4882a593Smuzhiyun /* Filter function is supportted only if rxd = long_rxd */
1442*4882a593Smuzhiyun if ((1 == phl_rx->r.mdata.long_rxd) &&
1443*4882a593Smuzhiyun (0 != (psts_info->ppdu_sts_filter &
1444*4882a593Smuzhiyun BIT(phl_rx->r.mdata.frame_type)))) {
1445*4882a593Smuzhiyun /* 3. add new rx pkt to the tail of the queue */
1446*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1447*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1448*4882a593Smuzhiyun pq_push(d, &(sts_entry->frames), &phl_rx->list,
1449*4882a593Smuzhiyun _tail, _bh);
1450*4882a593Smuzhiyun ret = true;
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun psts_info->cur_ppdu_cnt[band] = phl_rx->r.mdata.ppdu_cnt;
1453*4882a593Smuzhiyun } else {
1454*4882a593Smuzhiyun /* 1. check ppdu status filter condition */
1455*4882a593Smuzhiyun /* Filter function is supportted only if rxd = long_rxd */
1456*4882a593Smuzhiyun if ((1 == phl_rx->r.mdata.long_rxd) &&
1457*4882a593Smuzhiyun (0 != (psts_info->ppdu_sts_filter &
1458*4882a593Smuzhiyun BIT(phl_rx->r.mdata.frame_type)))) {
1459*4882a593Smuzhiyun /* 2. add to frame list */
1460*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1461*4882a593Smuzhiyun if (0 == sts_entry->frames.cnt) {
1462*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1463*4882a593Smuzhiyun "MPDU is not the start of PPDU, but the queue is empty!!!\n");
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun pq_push(d, &(sts_entry->frames), &phl_rx->list,
1466*4882a593Smuzhiyun _tail, _bh);
1467*4882a593Smuzhiyun ret = true;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun return ret;
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun #endif
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
_phl_rx_post_proc_ppdu_sts(void * priv,struct phl_msg * msg)1476*4882a593Smuzhiyun void _phl_rx_post_proc_ppdu_sts(void* priv, struct phl_msg* msg)
1477*4882a593Smuzhiyun {
1478*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1479*4882a593Smuzhiyun if (msg->inbuf && msg->inlen){
1480*4882a593Smuzhiyun _os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun bool
_phl_rx_proc_aggr_psts_ntfy(struct phl_info_t * phl_info,struct rtw_phl_ppdu_sts_ent * ppdu_sts_ent)1485*4882a593Smuzhiyun _phl_rx_proc_aggr_psts_ntfy(struct phl_info_t *phl_info,
1486*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *ppdu_info =
1489*4882a593Smuzhiyun &phl_info->phl_com->ppdu_sts_info;
1490*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ntfy *psts_ntfy = NULL;
1491*4882a593Smuzhiyun u8 i = 0;
1492*4882a593Smuzhiyun bool ret = false;
1493*4882a593Smuzhiyun
1494*4882a593Smuzhiyun if (ppdu_info->msg_aggr_cnt == 0) {
1495*4882a593Smuzhiyun /* reset entry valid status */
1496*4882a593Smuzhiyun for (i = 0; i < MAX_PSTS_MSG_AGGR_NUM; i++) {
1497*4882a593Smuzhiyun ppdu_info->msg_aggr_buf[i].vld = false;
1498*4882a593Smuzhiyun }
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun /* copy to the buf */
1501*4882a593Smuzhiyun psts_ntfy = &ppdu_info->msg_aggr_buf[ppdu_info->msg_aggr_cnt];
1502*4882a593Smuzhiyun psts_ntfy->frame_type = ppdu_sts_ent->frame_type;
1503*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1504*4882a593Smuzhiyun &psts_ntfy->phy_info,
1505*4882a593Smuzhiyun &ppdu_sts_ent->phy_info,
1506*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_phy_info));
1507*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1508*4882a593Smuzhiyun psts_ntfy->src_mac_addr,
1509*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr,
1510*4882a593Smuzhiyun MAC_ADDRESS_LENGTH);
1511*4882a593Smuzhiyun psts_ntfy->vld = true;
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun /* update counter */
1514*4882a593Smuzhiyun ppdu_info->msg_aggr_cnt++;
1515*4882a593Smuzhiyun if (ppdu_info->msg_aggr_cnt >= MAX_PSTS_MSG_AGGR_NUM) {
1516*4882a593Smuzhiyun ppdu_info->msg_aggr_cnt = 0;
1517*4882a593Smuzhiyun ret = true;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun return ret;
1521*4882a593Smuzhiyun }
1522*4882a593Smuzhiyun #endif
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun void
phl_rx_proc_ppdu_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1525*4882a593Smuzhiyun phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *phl_rx)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun u8 i = 0;
1528*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *ppdu_info = NULL;
1529*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent = NULL;
1530*4882a593Smuzhiyun struct rtw_phl_stainfo_t *psta = NULL;
1531*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
1532*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ntfy *psts_ntfy;
1533*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1534*4882a593Smuzhiyun #endif
1535*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1536*4882a593Smuzhiyun struct rtw_rssi_info *rssi_sts;
1537*4882a593Smuzhiyun
1538*4882a593Smuzhiyun if ((NULL == phl_info) || (NULL == phl_rx))
1539*4882a593Smuzhiyun return;
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1542*4882a593Smuzhiyun ppdu_info = &phl_info->phl_com->ppdu_sts_info;
1543*4882a593Smuzhiyun ppdu_sts_ent = &ppdu_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun if (false == ppdu_sts_ent->valid)
1546*4882a593Smuzhiyun return;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun if (true == ppdu_sts_ent->phl_done)
1549*4882a593Smuzhiyun return;
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun ppdu_sts_ent->phl_done = true;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun /* update phl self varibles */
1554*4882a593Smuzhiyun for(i = 0 ; i < ppdu_sts_ent->usr_num; i++) {
1555*4882a593Smuzhiyun if (ppdu_sts_ent->sta[i].vld) {
1556*4882a593Smuzhiyun psta = rtw_phl_get_stainfo_by_macid(phl_info,
1557*4882a593Smuzhiyun ppdu_sts_ent->sta[i].macid);
1558*4882a593Smuzhiyun if (psta == NULL)
1559*4882a593Smuzhiyun continue;
1560*4882a593Smuzhiyun rssi_sts = &psta->hal_sta->rssi_stat;
1561*4882a593Smuzhiyun STA_UPDATE_MA_RSSI_FAST(rssi_sts->ma_rssi, ppdu_sts_ent->phy_info.rssi);
1562*4882a593Smuzhiyun /* update (re)associate req/resp pkt rssi */
1563*4882a593Smuzhiyun if (RTW_IS_ASOC_PKT(ppdu_sts_ent->frame_type)) {
1564*4882a593Smuzhiyun rssi_sts->assoc_rssi =
1565*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun if (RTW_IS_BEACON_OR_PROBE_RESP_PKT(
1569*4882a593Smuzhiyun ppdu_sts_ent->frame_type)) {
1570*4882a593Smuzhiyun if (0 == rssi_sts->ma_rssi_mgnt) {
1571*4882a593Smuzhiyun rssi_sts->ma_rssi_mgnt =
1572*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1573*4882a593Smuzhiyun } else {
1574*4882a593Smuzhiyun STA_UPDATE_MA_RSSI_FAST(
1575*4882a593Smuzhiyun rssi_sts->ma_rssi_mgnt,
1576*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun else {
1581*4882a593Smuzhiyun if (RTW_IS_ASOC_REQ_PKT(ppdu_sts_ent->frame_type) &&
1582*4882a593Smuzhiyun (ppdu_sts_ent->usr_num == 1)) {
1583*4882a593Smuzhiyun psta = rtw_phl_get_stainfo_by_addr_ex(phl_info,
1584*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr);
1585*4882a593Smuzhiyun if (psta) {
1586*4882a593Smuzhiyun psta->hal_sta->rssi_stat.assoc_rssi =
1587*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun #ifdef DBG_AP_CLIENT_ASSOC_RSSI
1590*4882a593Smuzhiyun PHL_INFO("%s [Rx-ASOC_REQ] - macid:%d, MAC-Addr:%02x-%02x-%02x-%02x-%02x-%02x, assoc_rssi:%d\n",
1591*4882a593Smuzhiyun __func__,
1592*4882a593Smuzhiyun psta->macid,
1593*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[0],
1594*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[1],
1595*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[2],
1596*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[3],
1597*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[4],
1598*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[5],
1599*4882a593Smuzhiyun psta->hal_sta->rssi_stat.assoc_rssi);
1600*4882a593Smuzhiyun #endif
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun }
1603*4882a593Smuzhiyun }
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
1607*4882a593Smuzhiyun /*2. prepare and send psts notify to core */
1608*4882a593Smuzhiyun if((RTW_FRAME_TYPE_BEACON == ppdu_sts_ent->frame_type) ||
1609*4882a593Smuzhiyun (RTW_FRAME_TYPE_PROBE_RESP == ppdu_sts_ent->frame_type)) {
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun if (false == _phl_rx_proc_aggr_psts_ntfy(phl_info,
1612*4882a593Smuzhiyun ppdu_sts_ent)) {
1613*4882a593Smuzhiyun return;
1614*4882a593Smuzhiyun }
1615*4882a593Smuzhiyun
1616*4882a593Smuzhiyun /* send aggr psts ntfy*/
1617*4882a593Smuzhiyun psts_ntfy = (struct rtw_phl_ppdu_sts_ntfy *)_os_kmem_alloc(d,
1618*4882a593Smuzhiyun MAX_PSTS_MSG_AGGR_NUM * sizeof(struct rtw_phl_ppdu_sts_ntfy));
1619*4882a593Smuzhiyun if (psts_ntfy == NULL) {
1620*4882a593Smuzhiyun PHL_ERR("%s: alloc ppdu sts for ntfy fail.\n", __func__);
1621*4882a593Smuzhiyun return;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1625*4882a593Smuzhiyun psts_ntfy,
1626*4882a593Smuzhiyun &ppdu_info->msg_aggr_buf,
1627*4882a593Smuzhiyun (MAX_PSTS_MSG_AGGR_NUM *
1628*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1629*4882a593Smuzhiyun
1630*4882a593Smuzhiyun msg.inbuf = (u8 *)psts_ntfy;
1631*4882a593Smuzhiyun msg.inlen = (MAX_PSTS_MSG_AGGR_NUM *
1632*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy));
1633*4882a593Smuzhiyun SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PSTS);
1634*4882a593Smuzhiyun SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_RX_PSTS);
1635*4882a593Smuzhiyun attr.completion.completion = _phl_rx_post_proc_ppdu_sts;
1636*4882a593Smuzhiyun attr.completion.priv = phl_info;
1637*4882a593Smuzhiyun if (phl_msg_hub_send(phl_info, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
1638*4882a593Smuzhiyun PHL_ERR("%s: send msg_hub failed\n", __func__);
1639*4882a593Smuzhiyun _os_kmem_free(d, psts_ntfy,
1640*4882a593Smuzhiyun (MAX_PSTS_MSG_AGGR_NUM *
1641*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun }
1644*4882a593Smuzhiyun #endif
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun
phl_rx_wp_report_record_sts(struct phl_info_t * phl_info,u8 macid,u16 ac_queue,u8 txsts)1647*4882a593Smuzhiyun void phl_rx_wp_report_record_sts(struct phl_info_t *phl_info,
1648*4882a593Smuzhiyun u8 macid, u16 ac_queue, u8 txsts)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun struct rtw_phl_stainfo_t *phl_sta = NULL;
1651*4882a593Smuzhiyun struct rtw_hal_stainfo_t *hal_sta = NULL;
1652*4882a593Smuzhiyun struct rtw_wp_rpt_stats *wp_rpt_stats= NULL;
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun phl_sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if (phl_sta) {
1657*4882a593Smuzhiyun hal_sta = phl_sta->hal_sta;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun if (hal_sta->trx_stat.wp_rpt_stats == NULL) {
1660*4882a593Smuzhiyun PHL_ERR("rtp_stats NULL\n");
1661*4882a593Smuzhiyun return;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun /* Record Per ac queue statistics */
1664*4882a593Smuzhiyun wp_rpt_stats = &hal_sta->trx_stat.wp_rpt_stats[ac_queue];
1665*4882a593Smuzhiyun
1666*4882a593Smuzhiyun _os_spinlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
1667*4882a593Smuzhiyun if (TX_STATUS_TX_DONE == txsts) {
1668*4882a593Smuzhiyun /* record total tx ok*/
1669*4882a593Smuzhiyun hal_sta->trx_stat.tx_ok_cnt++;
1670*4882a593Smuzhiyun /* record per ac queue tx ok*/
1671*4882a593Smuzhiyun wp_rpt_stats->tx_ok_cnt++;
1672*4882a593Smuzhiyun } else {
1673*4882a593Smuzhiyun /* record total tx fail*/
1674*4882a593Smuzhiyun hal_sta->trx_stat.tx_fail_cnt++;
1675*4882a593Smuzhiyun /* record per ac queue tx fail*/
1676*4882a593Smuzhiyun if (TX_STATUS_TX_FAIL_REACH_RTY_LMT == txsts)
1677*4882a593Smuzhiyun wp_rpt_stats->rty_fail_cnt++;
1678*4882a593Smuzhiyun else if (TX_STATUS_TX_FAIL_LIFETIME_DROP == txsts)
1679*4882a593Smuzhiyun wp_rpt_stats->lifetime_drop_cnt++;
1680*4882a593Smuzhiyun else if (TX_STATUS_TX_FAIL_MACID_DROP == txsts)
1681*4882a593Smuzhiyun wp_rpt_stats->macid_drop_cnt++;
1682*4882a593Smuzhiyun }
1683*4882a593Smuzhiyun _os_spinunlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
1684*4882a593Smuzhiyun
1685*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,"macid: %u, ac_queue: %u, tx_ok_cnt: %u, rty_fail_cnt: %u, "
1686*4882a593Smuzhiyun "lifetime_drop_cnt: %u, macid_drop_cnt: %u\n"
1687*4882a593Smuzhiyun , macid, ac_queue, wp_rpt_stats->tx_ok_cnt, wp_rpt_stats->rty_fail_cnt
1688*4882a593Smuzhiyun , wp_rpt_stats->lifetime_drop_cnt, wp_rpt_stats->macid_drop_cnt);
1689*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,"totoal tx ok: %u \n totoal tx fail: %u\n"
1690*4882a593Smuzhiyun , hal_sta->trx_stat.tx_ok_cnt, hal_sta->trx_stat.tx_fail_cnt);
1691*4882a593Smuzhiyun } else {
1692*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s: PHL_STA not found\n",
1693*4882a593Smuzhiyun __FUNCTION__);
1694*4882a593Smuzhiyun }
1695*4882a593Smuzhiyun }
1696*4882a593Smuzhiyun
_dump_rx_reorder_info(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta)1697*4882a593Smuzhiyun static void _dump_rx_reorder_info(struct phl_info_t *phl_info,
1698*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
1701*4882a593Smuzhiyun _os_spinlockfg sp_flags;
1702*4882a593Smuzhiyun u8 i;
1703*4882a593Smuzhiyun
1704*4882a593Smuzhiyun PHL_INFO("dump rx reorder buffer info:\n");
1705*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
1706*4882a593Smuzhiyun
1707*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1708*4882a593Smuzhiyun if (sta->tid_rx[i]) {
1709*4882a593Smuzhiyun PHL_INFO("== tid = %d ==\n", sta->tid_rx[i]->tid);
1710*4882a593Smuzhiyun PHL_INFO("head_seq_num = %d\n",
1711*4882a593Smuzhiyun sta->tid_rx[i]->head_seq_num);
1712*4882a593Smuzhiyun PHL_INFO("stored_mpdu_num = %d\n",
1713*4882a593Smuzhiyun sta->tid_rx[i]->stored_mpdu_num);
1714*4882a593Smuzhiyun PHL_INFO("ssn = %d\n", sta->tid_rx[i]->ssn);
1715*4882a593Smuzhiyun PHL_INFO("buf_size = %d\n", sta->tid_rx[i]->buf_size);
1716*4882a593Smuzhiyun PHL_INFO("started = %d\n", sta->tid_rx[i]->started);
1717*4882a593Smuzhiyun PHL_INFO("removed = %d\n", sta->tid_rx[i]->removed);
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun }
1722*4882a593Smuzhiyun
phl_dump_all_sta_rx_info(struct phl_info_t * phl_info)1723*4882a593Smuzhiyun void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info)
1724*4882a593Smuzhiyun {
1725*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1726*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
1727*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
1728*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
1729*4882a593Smuzhiyun struct phl_queue *sta_queue;
1730*4882a593Smuzhiyun _os_spinlockfg sp_flags;
1731*4882a593Smuzhiyun u8 i;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun PHL_INFO("dump all sta rx info:\n");
1734*4882a593Smuzhiyun for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
1735*4882a593Smuzhiyun role = &phl_com->wifi_roles[i];
1736*4882a593Smuzhiyun if (role->active) {
1737*4882a593Smuzhiyun PHL_INFO("wrole idx = %d\n", i);
1738*4882a593Smuzhiyun PHL_INFO("wrole type = %d\n", role->type);
1739*4882a593Smuzhiyun PHL_INFO("wrole mstate = %d\n", role->mstate);
1740*4882a593Smuzhiyun
1741*4882a593Smuzhiyun sta_queue = &role->assoc_sta_queue;
1742*4882a593Smuzhiyun _os_spinlock(drv, &sta_queue->lock, _irq, &sp_flags);
1743*4882a593Smuzhiyun phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1744*4882a593Smuzhiyun &sta_queue->queue, list) {
1745*4882a593Smuzhiyun PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x \n",
1746*4882a593Smuzhiyun __func__, sta->macid,
1747*4882a593Smuzhiyun sta->mac_addr[0],
1748*4882a593Smuzhiyun sta->mac_addr[1],
1749*4882a593Smuzhiyun sta->mac_addr[2],
1750*4882a593Smuzhiyun sta->mac_addr[3],
1751*4882a593Smuzhiyun sta->mac_addr[4],
1752*4882a593Smuzhiyun sta->mac_addr[5]);
1753*4882a593Smuzhiyun _dump_rx_reorder_info(phl_info, sta);
1754*4882a593Smuzhiyun }
1755*4882a593Smuzhiyun _os_spinunlock(drv, &sta_queue->lock, _irq, &sp_flags);
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun }
1758*4882a593Smuzhiyun }
1759*4882a593Smuzhiyun
phl_rx_dbg_dump(struct phl_info_t * phl_info,u8 band_idx)1760*4882a593Smuzhiyun void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx)
1761*4882a593Smuzhiyun {
1762*4882a593Smuzhiyun enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1763*4882a593Smuzhiyun
1764*4882a593Smuzhiyun phl_status = phl_cmd_enqueue(phl_info,
1765*4882a593Smuzhiyun band_idx,
1766*4882a593Smuzhiyun MSG_EVT_DBG_RX_DUMP,
1767*4882a593Smuzhiyun NULL,
1768*4882a593Smuzhiyun 0,
1769*4882a593Smuzhiyun NULL,
1770*4882a593Smuzhiyun PHL_CMD_NO_WAIT,
1771*4882a593Smuzhiyun 0);
1772*4882a593Smuzhiyun if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1773*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: cmd enqueue fail!\n",
1774*4882a593Smuzhiyun __func__);
1775*4882a593Smuzhiyun }
1776*4882a593Smuzhiyun }
1777