1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright(c) 2019 Realtek Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
6*4882a593Smuzhiyun * under the terms of version 2 of the GNU General Public License as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful, but WITHOUT
10*4882a593Smuzhiyun * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12*4882a593Smuzhiyun * more details.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun *****************************************************************************/
15*4882a593Smuzhiyun #define _PHL_RX_C_
16*4882a593Smuzhiyun #include "phl_headers.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun
rtw_phl_query_phl_rx(void * phl)19*4882a593Smuzhiyun struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
22*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
23*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
24*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun _os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun if (false == list_empty(&rx_pkt_pool->idle)) {
31*4882a593Smuzhiyun phl_rx = list_first_entry(&rx_pkt_pool->idle,
32*4882a593Smuzhiyun struct rtw_phl_rx_pkt, list);
33*4882a593Smuzhiyun list_del(&phl_rx->list);
34*4882a593Smuzhiyun rx_pkt_pool->idle_cnt--;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun _os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun return phl_rx;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
rtw_phl_is_phl_rx_idle(struct phl_info_t * phl_info)42*4882a593Smuzhiyun u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
45*4882a593Smuzhiyun u8 res = false;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun _os_spinlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (MAX_PHL_RING_RX_PKT_NUM == rx_pkt_pool->idle_cnt)
52*4882a593Smuzhiyun res = true;
53*4882a593Smuzhiyun else
54*4882a593Smuzhiyun res = false;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun _os_spinunlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun return res;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
phl_dump_rx_stats(struct rtw_stats * stats)61*4882a593Smuzhiyun void phl_dump_rx_stats(struct rtw_stats *stats)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
64*4882a593Smuzhiyun "Dump Rx statistics\n"
65*4882a593Smuzhiyun "rx_byte_uni = %lld\n"
66*4882a593Smuzhiyun "rx_byte_total = %lld\n"
67*4882a593Smuzhiyun "rx_tp_kbits = %d\n"
68*4882a593Smuzhiyun "last_rx_time_ms = %d\n",
69*4882a593Smuzhiyun stats->rx_byte_uni,
70*4882a593Smuzhiyun stats->rx_byte_total,
71*4882a593Smuzhiyun stats->rx_tp_kbits,
72*4882a593Smuzhiyun stats->last_rx_time_ms);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
phl_reset_rx_stats(struct rtw_stats * stats)75*4882a593Smuzhiyun void phl_reset_rx_stats(struct rtw_stats *stats)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun stats->rx_byte_uni = 0;
78*4882a593Smuzhiyun stats->rx_byte_total = 0;
79*4882a593Smuzhiyun stats->rx_tp_kbits = 0;
80*4882a593Smuzhiyun stats->last_rx_time_ms = 0;
81*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = 0;
82*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = 0;
83*4882a593Smuzhiyun stats->rx_traffic.lvl = RTW_TFC_IDLE;
84*4882a593Smuzhiyun stats->rx_traffic.sts = 0;
85*4882a593Smuzhiyun stats->rx_tf_cnt = 0;
86*4882a593Smuzhiyun stats->pre_rx_tf_cnt = 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun void
phl_rx_traffic_upd(struct rtw_stats * sts)90*4882a593Smuzhiyun phl_rx_traffic_upd(struct rtw_stats *sts)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun u32 tp_k = 0, tp_m = 0;
93*4882a593Smuzhiyun enum rtw_tfc_lvl rx_tfc_lvl = RTW_TFC_IDLE;
94*4882a593Smuzhiyun tp_k = sts->rx_tp_kbits;
95*4882a593Smuzhiyun tp_m = sts->rx_tp_kbits >> 10;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (tp_m >= RX_HIGH_TP_THRES_MBPS)
98*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_HIGH;
99*4882a593Smuzhiyun else if (tp_m >= RX_MID_TP_THRES_MBPS)
100*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_MID;
101*4882a593Smuzhiyun else if (tp_m >= RX_LOW_TP_THRES_MBPS)
102*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_LOW;
103*4882a593Smuzhiyun else if (tp_k >= RX_ULTRA_LOW_TP_THRES_KBPS)
104*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_ULTRA_LOW;
105*4882a593Smuzhiyun else
106*4882a593Smuzhiyun rx_tfc_lvl = RTW_TFC_IDLE;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (sts->rx_traffic.lvl > rx_tfc_lvl) {
109*4882a593Smuzhiyun sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
110*4882a593Smuzhiyun sts->rx_traffic.lvl = rx_tfc_lvl;
111*4882a593Smuzhiyun } else if (sts->rx_traffic.lvl < rx_tfc_lvl) {
112*4882a593Smuzhiyun sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
113*4882a593Smuzhiyun sts->rx_traffic.lvl = rx_tfc_lvl;
114*4882a593Smuzhiyun } else if (sts->rx_traffic.sts &
115*4882a593Smuzhiyun (TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
116*4882a593Smuzhiyun sts->rx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
117*4882a593Smuzhiyun TRAFFIC_DECREASE);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
phl_update_rx_stats(struct rtw_stats * stats,struct rtw_recv_pkt * rx_pkt)121*4882a593Smuzhiyun void phl_update_rx_stats(struct rtw_stats *stats, struct rtw_recv_pkt *rx_pkt)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
124*4882a593Smuzhiyun u64 diff_bits = 0;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun stats->last_rx_time_ms = cur_time;
127*4882a593Smuzhiyun stats->rx_byte_total += rx_pkt->mdata.pktlen;
128*4882a593Smuzhiyun if (rx_pkt->mdata.bc == 0 && rx_pkt->mdata.mc == 0)
129*4882a593Smuzhiyun stats->rx_byte_uni += rx_pkt->mdata.pktlen;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun if (0 == stats->rxtp.last_calc_time_ms ||
132*4882a593Smuzhiyun 0 == stats->rxtp.last_calc_bits) {
133*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = stats->last_rx_time_ms;
134*4882a593Smuzhiyun stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun if (cur_time >= stats->rxtp.last_calc_time_ms) {
137*4882a593Smuzhiyun diff_t = cur_time - stats->rxtp.last_calc_time_ms;
138*4882a593Smuzhiyun } else {
139*4882a593Smuzhiyun diff_t = RTW_U32_MAX - stats->rxtp.last_calc_time_ms +
140*4882a593Smuzhiyun cur_time + 1;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun if (diff_t > RXTP_CALC_DIFF_MS && stats->rx_byte_uni != 0) {
143*4882a593Smuzhiyun diff_bits = (stats->rx_byte_uni * 8) -
144*4882a593Smuzhiyun stats->rxtp.last_calc_bits;
145*4882a593Smuzhiyun stats->rx_tp_kbits = (u32)_os_division64(diff_bits,
146*4882a593Smuzhiyun diff_t);
147*4882a593Smuzhiyun stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
148*4882a593Smuzhiyun stats->rxtp.last_calc_time_ms = cur_time;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
phl_rx_statistics(struct phl_info_t * phl_info,struct rtw_recv_pkt * rx_pkt)153*4882a593Smuzhiyun void phl_rx_statistics(struct phl_info_t *phl_info, struct rtw_recv_pkt *rx_pkt)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
156*4882a593Smuzhiyun struct rtw_stats *phl_stats = &phl_com->phl_stats;
157*4882a593Smuzhiyun struct rtw_stats *sta_stats = NULL;
158*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
159*4882a593Smuzhiyun u16 macid = rx_pkt->mdata.macid;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (!phl_macid_is_valid(phl_info, macid))
162*4882a593Smuzhiyun goto dev_stat;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun if (NULL == sta)
167*4882a593Smuzhiyun goto dev_stat;
168*4882a593Smuzhiyun sta_stats = &sta->stats;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun phl_update_rx_stats(sta_stats, rx_pkt);
171*4882a593Smuzhiyun dev_stat:
172*4882a593Smuzhiyun phl_update_rx_stats(phl_stats, rx_pkt);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
phl_release_phl_rx(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)175*4882a593Smuzhiyun void phl_release_phl_rx(struct phl_info_t *phl_info,
176*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
179*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun _os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
184*4882a593Smuzhiyun _os_mem_set(phl_to_drvpriv(phl_info), &phl_rx->r, 0, sizeof(phl_rx->r));
185*4882a593Smuzhiyun phl_rx->type = RTW_RX_TYPE_MAX;
186*4882a593Smuzhiyun phl_rx->rxbuf_ptr = NULL;
187*4882a593Smuzhiyun INIT_LIST_HEAD(&phl_rx->list);
188*4882a593Smuzhiyun list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
189*4882a593Smuzhiyun rx_pkt_pool->idle_cnt++;
190*4882a593Smuzhiyun _os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
phl_free_recv_pkt_pool(struct phl_info_t * phl_info)193*4882a593Smuzhiyun static void phl_free_recv_pkt_pool(struct phl_info_t *phl_info)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
196*4882a593Smuzhiyun u32 buf_len = 0;
197*4882a593Smuzhiyun FUNCIN();
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
200*4882a593Smuzhiyun if (NULL != rx_pkt_pool) {
201*4882a593Smuzhiyun _os_spinlock_free(phl_to_drvpriv(phl_info),
202*4882a593Smuzhiyun &rx_pkt_pool->idle_lock);
203*4882a593Smuzhiyun _os_spinlock_free(phl_to_drvpriv(phl_info),
204*4882a593Smuzhiyun &rx_pkt_pool->busy_lock);
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun buf_len = sizeof(*rx_pkt_pool);
207*4882a593Smuzhiyun _os_mem_free(phl_to_drvpriv(phl_info), rx_pkt_pool, buf_len);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun FUNCOUT();
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
phl_rx_deinit(struct phl_info_t * phl_info)213*4882a593Smuzhiyun void phl_rx_deinit(struct phl_info_t *phl_info)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun /* TODO: rx reorder deinit */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* TODO: peer info deinit */
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun phl_free_recv_pkt_pool(phl_info);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun
phl_alloc_recv_pkt_pool(struct phl_info_t * phl_info)223*4882a593Smuzhiyun static enum rtw_phl_status phl_alloc_recv_pkt_pool(struct phl_info_t *phl_info)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
226*4882a593Smuzhiyun struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
227*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
228*4882a593Smuzhiyun u32 buf_len = 0, i = 0;
229*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun buf_len = sizeof(*rx_pkt_pool);
232*4882a593Smuzhiyun rx_pkt_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun if (NULL != rx_pkt_pool) {
235*4882a593Smuzhiyun _os_mem_set(phl_to_drvpriv(phl_info), rx_pkt_pool, 0, buf_len);
236*4882a593Smuzhiyun INIT_LIST_HEAD(&rx_pkt_pool->idle);
237*4882a593Smuzhiyun INIT_LIST_HEAD(&rx_pkt_pool->busy);
238*4882a593Smuzhiyun _os_spinlock_init(phl_to_drvpriv(phl_info),
239*4882a593Smuzhiyun &rx_pkt_pool->idle_lock);
240*4882a593Smuzhiyun _os_spinlock_init(phl_to_drvpriv(phl_info),
241*4882a593Smuzhiyun &rx_pkt_pool->busy_lock);
242*4882a593Smuzhiyun rx_pkt_pool->idle_cnt = 0;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun for (i = 0; i < MAX_PHL_RING_RX_PKT_NUM; i++) {
245*4882a593Smuzhiyun phl_rx = &rx_pkt_pool->phl_rx[i];
246*4882a593Smuzhiyun INIT_LIST_HEAD(&phl_rx->list);
247*4882a593Smuzhiyun list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
248*4882a593Smuzhiyun rx_pkt_pool->idle_cnt++;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun phl_info->rx_pkt_pool = rx_pkt_pool;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus)
257*4882a593Smuzhiyun phl_free_recv_pkt_pool(phl_info);
258*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return pstatus;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
phl_rx_init(struct phl_info_t * phl_info)263*4882a593Smuzhiyun enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun enum rtw_phl_status status;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* Allocate rx packet pool */
268*4882a593Smuzhiyun status = phl_alloc_recv_pkt_pool(phl_info);
269*4882a593Smuzhiyun if (status != RTW_PHL_STATUS_SUCCESS)
270*4882a593Smuzhiyun return status;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* TODO: Peer info init */
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* TODO: Rx reorder init */
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
phl_recycle_rx_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)280*4882a593Smuzhiyun void phl_recycle_rx_buf(struct phl_info_t *phl_info,
281*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
284*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
285*4882a593Smuzhiyun struct rtw_rx_buf *rx_buf = NULL;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun do {
288*4882a593Smuzhiyun if (NULL == phl_rx) {
289*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]phl_rx is NULL!\n");
290*4882a593Smuzhiyun break;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "[4] %s:: [%p]\n",
296*4882a593Smuzhiyun __FUNCTION__, rx_buf);
297*4882a593Smuzhiyun if (phl_rx->rxbuf_ptr) {
298*4882a593Smuzhiyun pstatus = hci_trx_ops->recycle_rx_buf(phl_info, rx_buf,
299*4882a593Smuzhiyun phl_rx->r.mdata.dma_ch,
300*4882a593Smuzhiyun phl_rx->type);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus && phl_rx->rxbuf_ptr)
303*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]recycle hci rx buf error!\n");
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun phl_release_phl_rx(phl_info, phl_rx);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun } while (false);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
_phl_indic_new_rxpkt(struct phl_info_t * phl_info)311*4882a593Smuzhiyun void _phl_indic_new_rxpkt(struct phl_info_t *phl_info)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
314*4882a593Smuzhiyun struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
315*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
316*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun do {
319*4882a593Smuzhiyun _os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
320*4882a593Smuzhiyun evt_info->evt_bitmap |= RTW_PHL_EVT_RX;
321*4882a593Smuzhiyun _os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun pstatus = phl_schedule_handler(phl_info->phl_com,
324*4882a593Smuzhiyun &phl_info->phl_event_handler);
325*4882a593Smuzhiyun } while (false);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (RTW_PHL_STATUS_SUCCESS != pstatus)
328*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] Trigger rx indic event fail!\n");
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
333*4882a593Smuzhiyun phl_info->rx_new_pending = 0;
334*4882a593Smuzhiyun #endif
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
_phl_record_rx_stats(struct rtw_recv_pkt * recvpkt)337*4882a593Smuzhiyun void _phl_record_rx_stats(struct rtw_recv_pkt *recvpkt)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun if(NULL == recvpkt)
340*4882a593Smuzhiyun return;
341*4882a593Smuzhiyun if (recvpkt->tx_sta)
342*4882a593Smuzhiyun recvpkt->tx_sta->stats.rx_rate = recvpkt->mdata.rx_rate;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
_phl_add_rx_pkt(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)345*4882a593Smuzhiyun enum rtw_phl_status _phl_add_rx_pkt(struct phl_info_t *phl_info,
346*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
349*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
350*4882a593Smuzhiyun struct rtw_recv_pkt *recvpkt = &phl_rx->r;
351*4882a593Smuzhiyun u16 ring_res = 0, wptr = 0, rptr = 0;
352*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
355*4882a593Smuzhiyun _os_spinlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun if (!ring)
358*4882a593Smuzhiyun goto out;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv, &ring->phl_idx);
361*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv, &ring->core_idx);
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun ring_res = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RX_RING_ENTRY_NUM);
364*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
365*4882a593Smuzhiyun "[3] _phl_add_rx_pkt::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
366*4882a593Smuzhiyun _os_atomic_read(drv, &ring->phl_idx),
367*4882a593Smuzhiyun _os_atomic_read(drv, &ring->core_idx),
368*4882a593Smuzhiyun ring_res);
369*4882a593Smuzhiyun if (ring_res <= 0) {
370*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no ring resource to add new rx pkt!\n");
371*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_RESOURCE;
372*4882a593Smuzhiyun goto out;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun wptr = wptr + 1;
376*4882a593Smuzhiyun if (wptr >= MAX_PHL_RX_RING_ENTRY_NUM)
377*4882a593Smuzhiyun wptr = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun ring->entry[wptr] = recvpkt;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (wptr)
382*4882a593Smuzhiyun _os_atomic_inc(drv, &ring->phl_idx);
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun _os_atomic_set(drv, &ring->phl_idx, 0);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
387*4882a593Smuzhiyun phl_info->rx_new_pending = 1;
388*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
389*4882a593Smuzhiyun #endif
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun out:
392*4882a593Smuzhiyun _os_spinunlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if(pstatus == RTW_PHL_STATUS_SUCCESS)
395*4882a593Smuzhiyun _phl_record_rx_stats(recvpkt);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return pstatus;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun void
phl_sta_ps_enter(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)403*4882a593Smuzhiyun phl_sta_ps_enter(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
404*4882a593Smuzhiyun struct rtw_wifi_role_t *role)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
407*4882a593Smuzhiyun /* enum rtw_hal_status hal_status; */
408*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun _os_atomic_set(d, &sta->ps_sta, 1);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
413*4882a593Smuzhiyun "STA %02X:%02X:%02X:%02X:%02X:%02X enters PS mode, AID=%u, macid=%u, sta=0x%p\n",
414*4882a593Smuzhiyun sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
415*4882a593Smuzhiyun sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
416*4882a593Smuzhiyun sta->aid, sta->macid, sta);
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /* TODO: comment out because beacon may stop if we do this frequently */
419*4882a593Smuzhiyun /* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
420*4882a593Smuzhiyun /* sta->macid, true); */
421*4882a593Smuzhiyun /* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
422*4882a593Smuzhiyun /* PHL_WARN("%s(): failed to pause macid tx, macid=%u\n", */
423*4882a593Smuzhiyun /* __FUNCTION__, sta->macid); */
424*4882a593Smuzhiyun /* } */
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (ops->ap_ps_sta_ps_change)
427*4882a593Smuzhiyun ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, true);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun void
phl_sta_ps_exit(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)431*4882a593Smuzhiyun phl_sta_ps_exit(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
432*4882a593Smuzhiyun struct rtw_wifi_role_t *role)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
435*4882a593Smuzhiyun /* enum rtw_hal_status hal_status; */
436*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
439*4882a593Smuzhiyun "STA %02X:%02X:%02X:%02X:%02X:%02X leaves PS mode, AID=%u, macid=%u, sta=0x%p\n",
440*4882a593Smuzhiyun sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
441*4882a593Smuzhiyun sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
442*4882a593Smuzhiyun sta->aid, sta->macid, sta);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun _os_atomic_set(d, &sta->ps_sta, 0);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* TODO: comment out because beacon may stop if we do this frequently */
447*4882a593Smuzhiyun /* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
448*4882a593Smuzhiyun /* sta->macid, false); */
449*4882a593Smuzhiyun /* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
450*4882a593Smuzhiyun /* PHL_WARN("%s(): failed to resume macid tx, macid=%u\n", */
451*4882a593Smuzhiyun /* __FUNCTION__, sta->macid); */
452*4882a593Smuzhiyun /* } */
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun if (ops->ap_ps_sta_ps_change)
455*4882a593Smuzhiyun ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, false);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun void
phl_rx_handle_sta_process(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * rx)459*4882a593Smuzhiyun phl_rx_handle_sta_process(struct phl_info_t *phl_info,
460*4882a593Smuzhiyun struct rtw_phl_rx_pkt *rx)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun struct rtw_r_meta_data *m = &rx->r.mdata;
463*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
464*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
465*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if (!phl_info->phl_com->dev_sw_cap.ap_ps)
468*4882a593Smuzhiyun return;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun if (m->addr_cam_vld) {
471*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, m->macid);
472*4882a593Smuzhiyun if (sta && sta->wrole)
473*4882a593Smuzhiyun role = sta->wrole;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun if (!sta) {
477*4882a593Smuzhiyun role = phl_get_wrole_by_addr(phl_info, m->mac_addr);
478*4882a593Smuzhiyun if (role)
479*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_addr(phl_info,
480*4882a593Smuzhiyun role, m->ta);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun if (!role || !sta)
484*4882a593Smuzhiyun return;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun rx->r.tx_sta = sta;
487*4882a593Smuzhiyun rx->r.rx_role = role;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_,
490*4882a593Smuzhiyun "ap-ps: more_frag=%u, frame_type=%u, role_type=%d, pwr_bit=%u, seq=%u\n",
491*4882a593Smuzhiyun m->more_frag, m->frame_type, role->type, m->pwr_bit, m->seq);
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun /*
494*4882a593Smuzhiyun * Change STA PS state based on the PM bit in frame control
495*4882a593Smuzhiyun */
496*4882a593Smuzhiyun if (!m->more_frag &&
497*4882a593Smuzhiyun (m->frame_type == RTW_FRAME_TYPE_DATA ||
498*4882a593Smuzhiyun m->frame_type == RTW_FRAME_TYPE_CTRL) &&
499*4882a593Smuzhiyun (role->type == PHL_RTYPE_AP ||
500*4882a593Smuzhiyun role->type == PHL_RTYPE_P2P_GO)) {
501*4882a593Smuzhiyun if (_os_atomic_read(d, &sta->ps_sta)) {
502*4882a593Smuzhiyun if (!m->pwr_bit)
503*4882a593Smuzhiyun phl_sta_ps_exit(phl_info, sta, role);
504*4882a593Smuzhiyun } else {
505*4882a593Smuzhiyun if (m->pwr_bit)
506*4882a593Smuzhiyun phl_sta_ps_enter(phl_info, sta, role);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun void
phl_handle_rx_frame_list(struct phl_info_t * phl_info,_os_list * frames)512*4882a593Smuzhiyun phl_handle_rx_frame_list(struct phl_info_t *phl_info,
513*4882a593Smuzhiyun _os_list *frames)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pos, *n;
516*4882a593Smuzhiyun enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
517*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun phl_list_for_loop_safe(pos, n, struct rtw_phl_rx_pkt, frames, list) {
520*4882a593Smuzhiyun list_del(&pos->list);
521*4882a593Smuzhiyun phl_rx_handle_sta_process(phl_info, pos);
522*4882a593Smuzhiyun status = _phl_add_rx_pkt(phl_info, pos);
523*4882a593Smuzhiyun if (RTW_PHL_STATUS_RESOURCE == status) {
524*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pos);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun #ifndef PHL_RX_BATCH_IND
528*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
529*4882a593Smuzhiyun #endif
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun #define SEQ_MODULO 0x1000
535*4882a593Smuzhiyun #define SEQ_MASK 0xfff
536*4882a593Smuzhiyun
seq_less(u16 sq1,u16 sq2)537*4882a593Smuzhiyun static inline int seq_less(u16 sq1, u16 sq2)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
seq_inc(u16 sq)542*4882a593Smuzhiyun static inline u16 seq_inc(u16 sq)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun return (sq + 1) & SEQ_MASK;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
seq_sub(u16 sq1,u16 sq2)547*4882a593Smuzhiyun static inline u16 seq_sub(u16 sq1, u16 sq2)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun return (sq1 - sq2) & SEQ_MASK;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
reorder_index(struct phl_tid_ampdu_rx * r,u16 seq)552*4882a593Smuzhiyun static inline u16 reorder_index(struct phl_tid_ampdu_rx *r, u16 seq)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun return seq_sub(seq, r->ssn) % r->buf_size;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
phl_release_reorder_frame(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,int index,_os_list * frames)557*4882a593Smuzhiyun static void phl_release_reorder_frame(struct phl_info_t *phl_info,
558*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
559*4882a593Smuzhiyun int index, _os_list *frames)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pkt = r->reorder_buf[index];
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (!pkt)
564*4882a593Smuzhiyun goto out;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* release the frame from the reorder ring buffer */
567*4882a593Smuzhiyun r->stored_mpdu_num--;
568*4882a593Smuzhiyun r->reorder_buf[index] = NULL;
569*4882a593Smuzhiyun list_add_tail(&pkt->list, frames);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun out:
572*4882a593Smuzhiyun r->head_seq_num = seq_inc(r->head_seq_num);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun #define HT_RX_REORDER_BUF_TIMEOUT_MS 500
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun * If the MPDU at head_seq_num is ready,
579*4882a593Smuzhiyun * 1. release all subsequent MPDUs with consecutive SN and
580*4882a593Smuzhiyun * 2. if there's MPDU that is ready but left in the reordering
581*4882a593Smuzhiyun * buffer, find it and set reorder timer according to its reorder
582*4882a593Smuzhiyun * time
583*4882a593Smuzhiyun *
584*4882a593Smuzhiyun * If the MPDU at head_seq_num is not ready and there is no MPDU ready
585*4882a593Smuzhiyun * in the buffer at all, return.
586*4882a593Smuzhiyun *
587*4882a593Smuzhiyun * If the MPDU at head_seq_num is not ready but there is some MPDU in
588*4882a593Smuzhiyun * the buffer that is ready, check whether any frames in the reorder
589*4882a593Smuzhiyun * buffer have timed out in the following way.
590*4882a593Smuzhiyun *
591*4882a593Smuzhiyun * Basically, MPDUs that are not ready are purged and MPDUs that are
592*4882a593Smuzhiyun * ready are released.
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * The process goes through all the buffer but the one at head_seq_num
595*4882a593Smuzhiyun * unless
596*4882a593Smuzhiyun * - there's a MPDU that is ready AND
597*4882a593Smuzhiyun * - there are one or more buffers that are not ready.
598*4882a593Smuzhiyun * In this case, the process is stopped, the head_seq_num becomes the
599*4882a593Smuzhiyun * first buffer that is not ready and the reorder_timer is reset based
600*4882a593Smuzhiyun * on the reorder_time of that ready MPDU.
601*4882a593Smuzhiyun */
phl_reorder_release(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,_os_list * frames)602*4882a593Smuzhiyun static void phl_reorder_release(struct phl_info_t *phl_info,
603*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r, _os_list *frames)
604*4882a593Smuzhiyun {
605*4882a593Smuzhiyun /* ref ieee80211_sta_reorder_release() and wil_reorder_release() */
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun int index, i, j;
608*4882a593Smuzhiyun u32 cur_time = _os_get_cur_time_ms();
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun /* release the buffer until next missing frame */
611*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
612*4882a593Smuzhiyun if (!r->reorder_buf[index] && r->stored_mpdu_num) {
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * No buffers ready to be released, but check whether any
615*4882a593Smuzhiyun * frames in the reorder buffer have timed out.
616*4882a593Smuzhiyun */
617*4882a593Smuzhiyun int skipped = 1;
618*4882a593Smuzhiyun for (j = (index + 1) % r->buf_size; j != index;
619*4882a593Smuzhiyun j = (j + 1) % r->buf_size) {
620*4882a593Smuzhiyun if (!r->reorder_buf[j]) {
621*4882a593Smuzhiyun skipped++;
622*4882a593Smuzhiyun continue;
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun if (skipped && cur_time < r->reorder_time[j] +
625*4882a593Smuzhiyun HT_RX_REORDER_BUF_TIMEOUT_MS)
626*4882a593Smuzhiyun goto set_release_timer;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* don't leave incomplete A-MSDUs around */
629*4882a593Smuzhiyun for (i = (index + 1) % r->buf_size; i != j;
630*4882a593Smuzhiyun i = (i + 1) % r->buf_size)
631*4882a593Smuzhiyun phl_recycle_rx_buf(phl_info, r->reorder_buf[i]);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "release an RX reorder frame due to timeout on earlier frames\n");
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, j, frames);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * Increment the head seq# also for the skipped slots.
639*4882a593Smuzhiyun */
640*4882a593Smuzhiyun r->head_seq_num =
641*4882a593Smuzhiyun (r->head_seq_num + skipped) & SEQ_MASK;
642*4882a593Smuzhiyun skipped = 0;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun } else while (r->reorder_buf[index]) {
645*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, index, frames);
646*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (r->stored_mpdu_num) {
650*4882a593Smuzhiyun j = index = r->head_seq_num % r->buf_size;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun for (; j != (index - 1) % r->buf_size;
653*4882a593Smuzhiyun j = (j + 1) % r->buf_size) {
654*4882a593Smuzhiyun if (r->reorder_buf[j])
655*4882a593Smuzhiyun break;
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun set_release_timer:
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (!r->removed)
661*4882a593Smuzhiyun _os_set_timer(r->drv_priv, &r->sta->reorder_timer,
662*4882a593Smuzhiyun HT_RX_REORDER_BUF_TIMEOUT_MS);
663*4882a593Smuzhiyun } else {
664*4882a593Smuzhiyun /* TODO: implementation of cancel timer on Linux is
665*4882a593Smuzhiyun del_timer_sync(), it can't be called with same spinlock
666*4882a593Smuzhiyun held with the expiration callback, that causes a potential
667*4882a593Smuzhiyun deadlock. */
668*4882a593Smuzhiyun _os_cancel_timer_async(r->drv_priv, &r->sta->reorder_timer);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
phl_sta_rx_reorder_timer_expired(void * t)672*4882a593Smuzhiyun void phl_sta_rx_reorder_timer_expired(void *t)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun /* ref sta_rx_agg_reorder_timer_expired() */
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)t;
677*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = sta->wrole->phl_com;
678*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
679*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
680*4882a593Smuzhiyun u8 i = 0;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun PHL_INFO("Rx reorder timer expired, sta=0x%p\n", sta);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
685*4882a593Smuzhiyun _os_list frames;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
690*4882a593Smuzhiyun if (sta->tid_rx[i])
691*4882a593Smuzhiyun phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
692*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
695*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
696*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
697*4882a593Smuzhiyun #endif
698*4882a593Smuzhiyun }
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun _os_event_set(drv_priv, &sta->comp_sync);
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
phl_release_reorder_frames(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,u16 head_seq_num,_os_list * frames)703*4882a593Smuzhiyun static void phl_release_reorder_frames(struct phl_info_t *phl_info,
704*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
705*4882a593Smuzhiyun u16 head_seq_num, _os_list *frames)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun /* ref ieee80211_release_reorder_frames() and
708*4882a593Smuzhiyun wil_release_reorder_frames() */
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun int index;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun /* note: this function is never called with
713*4882a593Smuzhiyun * hseq preceding r->head_seq_num, i.e it is always true
714*4882a593Smuzhiyun * !seq_less(hseq, r->head_seq_num)
715*4882a593Smuzhiyun * and thus on loop exit it should be
716*4882a593Smuzhiyun * r->head_seq_num == hseq
717*4882a593Smuzhiyun */
718*4882a593Smuzhiyun while (seq_less(r->head_seq_num, head_seq_num) &&
719*4882a593Smuzhiyun r->stored_mpdu_num) { /* Note: do we need to check this? */
720*4882a593Smuzhiyun index = reorder_index(r, r->head_seq_num);
721*4882a593Smuzhiyun phl_release_reorder_frame(phl_info, r, index, frames);
722*4882a593Smuzhiyun }
723*4882a593Smuzhiyun r->head_seq_num = head_seq_num;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
rtw_phl_flush_reorder_buf(void * phl,struct rtw_phl_stainfo_t * sta)726*4882a593Smuzhiyun void rtw_phl_flush_reorder_buf(void *phl, struct rtw_phl_stainfo_t *sta)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
729*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
730*4882a593Smuzhiyun _os_list frames;
731*4882a593Smuzhiyun u8 i = 0;
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun PHL_INFO("%s: sta=0x%p\n", __FUNCTION__, sta);
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
738*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
739*4882a593Smuzhiyun if (sta->tid_rx[i])
740*4882a593Smuzhiyun phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
745*4882a593Smuzhiyun #ifdef PHL_RX_BATCH_IND
746*4882a593Smuzhiyun _phl_indic_new_rxpkt(phl_info);
747*4882a593Smuzhiyun #endif
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
phl_manage_sta_reorder_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * pkt,struct phl_tid_ampdu_rx * r,_os_list * frames)751*4882a593Smuzhiyun static bool phl_manage_sta_reorder_buf(struct phl_info_t *phl_info,
752*4882a593Smuzhiyun struct rtw_phl_rx_pkt *pkt,
753*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r,
754*4882a593Smuzhiyun _os_list *frames)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun /* ref ieee80211_sta_manage_reorder_buf() and wil_rx_reorder() */
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun struct rtw_r_meta_data *meta = &pkt->r.mdata;
759*4882a593Smuzhiyun u16 mpdu_seq_num = meta->seq;
760*4882a593Smuzhiyun u16 head_seq_num, buf_size;
761*4882a593Smuzhiyun int index;
762*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun buf_size = r->buf_size;
765*4882a593Smuzhiyun head_seq_num = r->head_seq_num;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /*
768*4882a593Smuzhiyun * If the current MPDU's SN is smaller than the SSN, it shouldn't
769*4882a593Smuzhiyun * be reordered.
770*4882a593Smuzhiyun */
771*4882a593Smuzhiyun if (!r->started) {
772*4882a593Smuzhiyun if (seq_less(mpdu_seq_num, head_seq_num))
773*4882a593Smuzhiyun return false;
774*4882a593Smuzhiyun r->started = true;
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun if (r->sleep) {
778*4882a593Smuzhiyun PHL_INFO("tid = %d reorder buffer handling after wake up\n",
779*4882a593Smuzhiyun r->tid);
780*4882a593Smuzhiyun PHL_INFO("Update head seq(0x%03x) to the first rx seq(0x%03x) after wake up\n",
781*4882a593Smuzhiyun r->head_seq_num, mpdu_seq_num);
782*4882a593Smuzhiyun r->head_seq_num = mpdu_seq_num;
783*4882a593Smuzhiyun head_seq_num = r->head_seq_num;
784*4882a593Smuzhiyun r->sleep = false;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /* frame with out of date sequence number */
788*4882a593Smuzhiyun if (seq_less(mpdu_seq_num, head_seq_num)) {
789*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
790*4882a593Smuzhiyun meta->seq, r->head_seq_num);
791*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
792*4882a593Smuzhiyun return true;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun * If frame the sequence number exceeds our buffering window
797*4882a593Smuzhiyun * size release some previous frames to make room for this one.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
800*4882a593Smuzhiyun head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
801*4882a593Smuzhiyun /* release stored frames up to new head to stack */
802*4882a593Smuzhiyun phl_release_reorder_frames(phl_info, r, head_seq_num, frames);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /* Now the new frame is always in the range of the reordering buffer */
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun index = reorder_index(r, mpdu_seq_num);
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /* check if we already stored this frame */
810*4882a593Smuzhiyun if (r->reorder_buf[index]) {
811*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
812*4882a593Smuzhiyun meta->seq, r->head_seq_num);
813*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
814*4882a593Smuzhiyun return true;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /*
818*4882a593Smuzhiyun * If the current MPDU is in the right order and nothing else
819*4882a593Smuzhiyun * is stored we can process it directly, no need to buffer it.
820*4882a593Smuzhiyun * If it is first but there's something stored, we may be able
821*4882a593Smuzhiyun * to release frames after this one.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun if (mpdu_seq_num == r->head_seq_num &&
824*4882a593Smuzhiyun r->stored_mpdu_num == 0) {
825*4882a593Smuzhiyun r->head_seq_num = seq_inc(r->head_seq_num);
826*4882a593Smuzhiyun return false;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* put the frame in the reordering buffer */
830*4882a593Smuzhiyun r->reorder_buf[index] = pkt;
831*4882a593Smuzhiyun r->reorder_time[index] = _os_get_cur_time_ms();
832*4882a593Smuzhiyun r->stored_mpdu_num++;
833*4882a593Smuzhiyun phl_reorder_release(phl_info, r, frames);
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun return true;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun
phl_rx_reorder(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx,_os_list * frames)839*4882a593Smuzhiyun enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
840*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx,
841*4882a593Smuzhiyun _os_list *frames)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun /* ref wil_rx_reorder() and ieee80211_rx_reorder_ampdu() */
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
846*4882a593Smuzhiyun struct rtw_r_meta_data *meta = &phl_rx->r.mdata;
847*4882a593Smuzhiyun u16 tid = meta->tid;
848*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
849*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r;
850*4882a593Smuzhiyun struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun /*
853*4882a593Smuzhiyun * Remove FCS if is is appended
854*4882a593Smuzhiyun * TODO: handle more than one in pkt_list
855*4882a593Smuzhiyun */
856*4882a593Smuzhiyun if (phl_info->phl_com->append_fcs) {
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * Only last MSDU of A-MSDU includes FCS.
859*4882a593Smuzhiyun * TODO: If A-MSDU cut processing is in HAL, should only deduct
860*4882a593Smuzhiyun * FCS from length of last one of pkt_list. For such case,
861*4882a593Smuzhiyun * phl_rx->r should have pkt_list length.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun if (!(meta->amsdu_cut && !meta->last_msdu)) {
864*4882a593Smuzhiyun if (phl_rx->r.pkt_list[0].length <= 4) {
865*4882a593Smuzhiyun PHL_ERR("%s, pkt_list[0].length(%d) too short\n",
866*4882a593Smuzhiyun __func__, phl_rx->r.pkt_list[0].length);
867*4882a593Smuzhiyun goto drop_frame;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun phl_rx->r.pkt_list[0].length -= 4;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun }
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (phl_is_mp_mode(phl_info->phl_com))
874*4882a593Smuzhiyun goto dont_reorder;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (meta->bc || meta->mc)
877*4882a593Smuzhiyun goto dont_reorder;
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun if (!meta->qos)
880*4882a593Smuzhiyun goto dont_reorder;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun if (meta->q_null)
883*4882a593Smuzhiyun goto dont_reorder;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /* TODO: check ba policy is either ba or normal */
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun /* if the mpdu is fragmented, don't reorder */
888*4882a593Smuzhiyun if (meta->more_frag || meta->frag_num) {
889*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
890*4882a593Smuzhiyun "Receive QoS Data with more_frag=%u, frag_num=%u\n",
891*4882a593Smuzhiyun meta->more_frag, meta->frag_num);
892*4882a593Smuzhiyun goto dont_reorder;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /* Use MAC ID from address CAM if this packet is address CAM matched */
896*4882a593Smuzhiyun if (meta->addr_cam_vld)
897*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_macid(phl_info, meta->macid);
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun /* Otherwise, search STA by TA */
900*4882a593Smuzhiyun if (!sta || !sta->wrole) {
901*4882a593Smuzhiyun struct rtw_wifi_role_t *wrole;
902*4882a593Smuzhiyun wrole = phl_get_wrole_by_addr(phl_info, meta->mac_addr);
903*4882a593Smuzhiyun if (wrole)
904*4882a593Smuzhiyun sta = rtw_phl_get_stainfo_by_addr(phl_info,
905*4882a593Smuzhiyun wrole, meta->ta);
906*4882a593Smuzhiyun if (!wrole || !sta) {
907*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
908*4882a593Smuzhiyun "%s(): stainfo or wrole not found, cam=%u, macid=%u\n",
909*4882a593Smuzhiyun __FUNCTION__, meta->addr_cam, meta->macid);
910*4882a593Smuzhiyun goto dont_reorder;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun phl_rx->r.tx_sta = sta;
915*4882a593Smuzhiyun phl_rx->r.rx_role = sta->wrole;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun rtw_hal_set_sta_rx_sts(sta, false, meta);
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun if (tid >= ARRAY_SIZE(sta->tid_rx)) {
920*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "Fail: tid (%u) index out of range (%u)\n", tid, 8);
921*4882a593Smuzhiyun goto drop_frame;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun r = sta->tid_rx[tid];
927*4882a593Smuzhiyun if (!r) {
928*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
929*4882a593Smuzhiyun goto dont_reorder;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun if (!phl_manage_sta_reorder_buf(phl_info, phl_rx, r, frames)) {
933*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
934*4882a593Smuzhiyun goto dont_reorder;
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun
937*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun drop_frame:
942*4882a593Smuzhiyun hci_trx_ops->recycle_rx_pkt(phl_info, phl_rx);
943*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun dont_reorder:
946*4882a593Smuzhiyun list_add_tail(&phl_rx->list, frames);
947*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun
phl_check_recv_ring_resource(struct phl_info_t * phl_info)951*4882a593Smuzhiyun u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
954*4882a593Smuzhiyun u16 avail = 0, wptr = 0, rptr = 0;
955*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
958*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
959*4882a593Smuzhiyun avail = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RX_RING_ENTRY_NUM);
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun if (0 == avail)
962*4882a593Smuzhiyun return false;
963*4882a593Smuzhiyun else
964*4882a593Smuzhiyun return true;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
dump_phl_rx_ring(void * phl)967*4882a593Smuzhiyun void dump_phl_rx_ring(void *phl)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
970*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
971*4882a593Smuzhiyun s16 diff = 0;
972*4882a593Smuzhiyun u16 idx = 0, endidx = 0;
973*4882a593Smuzhiyun u16 phl_idx = 0, core_idx = 0;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "===Dump PHL RX Ring===\n");
976*4882a593Smuzhiyun phl_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.phl_idx);
977*4882a593Smuzhiyun core_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.core_idx);
978*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
979*4882a593Smuzhiyun "core_idx = %d\n"
980*4882a593Smuzhiyun "phl_idx = %d\n",
981*4882a593Smuzhiyun core_idx,
982*4882a593Smuzhiyun phl_idx);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun diff= phl_idx-core_idx;
985*4882a593Smuzhiyun if(diff < 0)
986*4882a593Smuzhiyun diff= 4096+diff;
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun endidx = diff > 5 ? (core_idx+6): phl_idx;
989*4882a593Smuzhiyun for (idx = core_idx+1; idx < endidx; idx++) {
990*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "entry[%d] = %p\n", idx,
991*4882a593Smuzhiyun phl_info->phl_rx_ring.entry[idx%4096]);
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun
phl_event_indicator(void * context)996*4882a593Smuzhiyun void phl_event_indicator(void *context)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
999*4882a593Smuzhiyun struct rtw_phl_handler *phl_handler
1000*4882a593Smuzhiyun = (struct rtw_phl_handler *)phl_container_of(context,
1001*4882a593Smuzhiyun struct rtw_phl_handler,
1002*4882a593Smuzhiyun os_handler);
1003*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
1004*4882a593Smuzhiyun struct rtw_phl_evt_ops *ops = NULL;
1005*4882a593Smuzhiyun struct rtw_evt_info_t *evt_info = NULL;
1006*4882a593Smuzhiyun void *drv_priv = NULL;
1007*4882a593Smuzhiyun enum rtw_phl_evt evt_bitmap = 0;
1008*4882a593Smuzhiyun FUNCIN_WSTS(sts);
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun if (NULL != phl_info) {
1011*4882a593Smuzhiyun ops = &phl_info->phl_com->evt_ops;
1012*4882a593Smuzhiyun evt_info = &phl_info->phl_com->evt_info;
1013*4882a593Smuzhiyun drv_priv = phl_to_drvpriv(phl_info);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun _os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1016*4882a593Smuzhiyun evt_bitmap = evt_info->evt_bitmap;
1017*4882a593Smuzhiyun evt_info->evt_bitmap = 0;
1018*4882a593Smuzhiyun _os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun if (RTW_PHL_EVT_RX & evt_bitmap) {
1021*4882a593Smuzhiyun if (NULL != ops->rx_process) {
1022*4882a593Smuzhiyun sts = ops->rx_process(drv_priv);
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun dump_phl_rx_ring(phl_info);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun FUNCOUT_WSTS(sts);
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun
_phl_rx_statistics_reset(struct phl_info_t * phl_info)1031*4882a593Smuzhiyun void _phl_rx_statistics_reset(struct phl_info_t *phl_info)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1034*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
1035*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
1036*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
1037*4882a593Smuzhiyun struct phl_queue *sta_queue;
1038*4882a593Smuzhiyun u8 i;
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun for (i = 0; i< MAX_WIFI_ROLE_NUMBER; i++) {
1041*4882a593Smuzhiyun role = &phl_com->wifi_roles[i];
1042*4882a593Smuzhiyun if (role->active && (role->mstate == MLME_LINKED)) {
1043*4882a593Smuzhiyun sta_queue = &role->assoc_sta_queue;
1044*4882a593Smuzhiyun _os_spinlock(drv, &sta_queue->lock, _bh, NULL);
1045*4882a593Smuzhiyun phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1046*4882a593Smuzhiyun &sta_queue->queue, list) {
1047*4882a593Smuzhiyun if (sta)
1048*4882a593Smuzhiyun rtw_hal_set_sta_rx_sts(sta, true, NULL);
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun _os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun }
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun void
phl_rx_watchdog(struct phl_info_t * phl_info)1056*4882a593Smuzhiyun phl_rx_watchdog(struct phl_info_t *phl_info)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun phl_rx_traffic_upd(phl_stats);
1061*4882a593Smuzhiyun phl_dump_rx_stats(phl_stats);
1062*4882a593Smuzhiyun _phl_rx_statistics_reset(phl_info);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun
rtw_phl_query_new_rx_num(void * phl)1065*4882a593Smuzhiyun u16 rtw_phl_query_new_rx_num(void *phl)
1066*4882a593Smuzhiyun {
1067*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1068*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = NULL;
1069*4882a593Smuzhiyun u16 new_rx = 0, wptr = 0, rptr = 0;
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun if (NULL != phl_info) {
1072*4882a593Smuzhiyun ring = &phl_info->phl_rx_ring;
1073*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1074*4882a593Smuzhiyun &ring->phl_idx);
1075*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1076*4882a593Smuzhiyun &ring->core_idx);
1077*4882a593Smuzhiyun new_rx = phl_calc_avail_rptr(rptr, wptr,
1078*4882a593Smuzhiyun MAX_PHL_RX_RING_ENTRY_NUM);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun return new_rx;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
rtw_phl_query_rx_pkt(void * phl)1084*4882a593Smuzhiyun struct rtw_recv_pkt *rtw_phl_query_rx_pkt(void *phl)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1087*4882a593Smuzhiyun struct rtw_phl_rx_ring *ring = NULL;
1088*4882a593Smuzhiyun struct rtw_recv_pkt *recvpkt = NULL;
1089*4882a593Smuzhiyun void *drv_priv = NULL;
1090*4882a593Smuzhiyun u16 ring_res = 0, wptr = 0, rptr = 0;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (NULL != phl_info) {
1093*4882a593Smuzhiyun ring = &phl_info->phl_rx_ring;
1094*4882a593Smuzhiyun drv_priv = phl_to_drvpriv(phl_info);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1097*4882a593Smuzhiyun rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun ring_res = phl_calc_avail_rptr(rptr, wptr,
1100*4882a593Smuzhiyun MAX_PHL_RX_RING_ENTRY_NUM);
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
1103*4882a593Smuzhiyun "[4] %s::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
1104*4882a593Smuzhiyun __FUNCTION__,
1105*4882a593Smuzhiyun _os_atomic_read(drv_priv, &ring->phl_idx),
1106*4882a593Smuzhiyun _os_atomic_read(drv_priv, &ring->core_idx),
1107*4882a593Smuzhiyun ring_res);
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun if (ring_res > 0) {
1110*4882a593Smuzhiyun rptr = rptr + 1;
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (rptr >= MAX_PHL_RX_RING_ENTRY_NUM) {
1113*4882a593Smuzhiyun rptr=0;
1114*4882a593Smuzhiyun recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1115*4882a593Smuzhiyun ring->entry[rptr]=NULL;
1116*4882a593Smuzhiyun _os_atomic_set(drv_priv, &ring->core_idx, 0);
1117*4882a593Smuzhiyun } else {
1118*4882a593Smuzhiyun recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1119*4882a593Smuzhiyun ring->entry[rptr]=NULL;
1120*4882a593Smuzhiyun _os_atomic_inc(drv_priv, &ring->core_idx);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun if (NULL == recvpkt)
1123*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "recvpkt is NULL!\n");
1124*4882a593Smuzhiyun else
1125*4882a593Smuzhiyun phl_rx_statistics(phl_info, recvpkt);
1126*4882a593Smuzhiyun } else {
1127*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no available rx packet to query!\n");
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun return recvpkt;
1132*4882a593Smuzhiyun }
1133*4882a593Smuzhiyun
rtw_phl_return_rxbuf(void * phl,u8 * recvpkt)1134*4882a593Smuzhiyun enum rtw_phl_status rtw_phl_return_rxbuf(void *phl, u8* recvpkt)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1137*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1138*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1139*4882a593Smuzhiyun struct rtw_recv_pkt *r = (struct rtw_recv_pkt *)recvpkt;
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun do {
1142*4882a593Smuzhiyun if (NULL == recvpkt)
1143*4882a593Smuzhiyun break;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun phl_rx = phl_container_of(r, struct rtw_phl_rx_pkt, r);
1146*4882a593Smuzhiyun phl_recycle_rx_buf(phl_info, phl_rx);
1147*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
1148*4882a593Smuzhiyun } while (false);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun return pstatus;
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun
rtw_phl_start_rx_process(void * phl)1154*4882a593Smuzhiyun enum rtw_phl_status rtw_phl_start_rx_process(void *phl)
1155*4882a593Smuzhiyun {
1156*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1157*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun FUNCIN_WSTS(pstatus);
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun pstatus = phl_schedule_handler(phl_info->phl_com,
1162*4882a593Smuzhiyun &phl_info->phl_rx_handler);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun FUNCOUT_WSTS(pstatus);
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun return pstatus;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
rtw_phl_rx_bar(void * phl,struct rtw_phl_stainfo_t * sta,u8 tid,u16 seq)1169*4882a593Smuzhiyun void rtw_phl_rx_bar(void *phl, struct rtw_phl_stainfo_t *sta, u8 tid, u16 seq)
1170*4882a593Smuzhiyun {
1171*4882a593Smuzhiyun /* ref ieee80211_rx_h_ctrl() and wil_rx_bar() */
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1174*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
1175*4882a593Smuzhiyun struct phl_tid_ampdu_rx *r;
1176*4882a593Smuzhiyun _os_list frames;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun INIT_LIST_HEAD(&frames);
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun if (tid >= RTW_MAX_TID_NUM)
1181*4882a593Smuzhiyun goto out;
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun r = sta->tid_rx[tid];
1186*4882a593Smuzhiyun if (!r) {
1187*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR for non-existing TID %d\n", tid);
1188*4882a593Smuzhiyun goto out;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun if (seq_less(seq, r->head_seq_num)) {
1192*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR Seq 0x%03x preceding head 0x%03x\n",
1193*4882a593Smuzhiyun seq, r->head_seq_num);
1194*4882a593Smuzhiyun goto out;
1195*4882a593Smuzhiyun }
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "BAR: TID %d Seq 0x%03x head 0x%03x\n",
1198*4882a593Smuzhiyun tid, seq, r->head_seq_num);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun phl_release_reorder_frames(phl_info, r, seq, &frames);
1201*4882a593Smuzhiyun phl_handle_rx_frame_list(phl_info, &frames);
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun out:
1204*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
rtw_phl_get_rx_status(void * phl)1207*4882a593Smuzhiyun enum rtw_rx_status rtw_phl_get_rx_status(void *phl)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun #ifdef CONFIG_USB_HCI
1210*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1211*4882a593Smuzhiyun enum rtw_hci_type hci_type = phl_info->phl_com->hci_type;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun if (hci_type & RTW_HCI_USB)
1214*4882a593Smuzhiyun return rtw_hal_get_usb_status(phl_info->hal);
1215*4882a593Smuzhiyun #endif
1216*4882a593Smuzhiyun
1217*4882a593Smuzhiyun return RTW_STATUS_RX_OK;
1218*4882a593Smuzhiyun }
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun enum rtw_phl_status
rtw_phl_enter_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1221*4882a593Smuzhiyun rtw_phl_enter_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1222*4882a593Smuzhiyun {
1223*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1224*4882a593Smuzhiyun enum rtw_hal_status status;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun status = rtw_hal_enter_mon_mode(phl_info->hal, wrole->hw_band);
1227*4882a593Smuzhiyun if (status != RTW_HAL_STATUS_SUCCESS) {
1228*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1229*4882a593Smuzhiyun "%s(): rtw_hal_enter_mon_mode() failed, status=%d",
1230*4882a593Smuzhiyun __FUNCTION__, status);
1231*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun enum rtw_phl_status
rtw_phl_leave_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1238*4882a593Smuzhiyun rtw_phl_leave_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1239*4882a593Smuzhiyun {
1240*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1241*4882a593Smuzhiyun enum rtw_hal_status status;
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun status = rtw_hal_leave_mon_mode(phl_info->hal, wrole->hw_band);
1244*4882a593Smuzhiyun if (status != RTW_HAL_STATUS_SUCCESS) {
1245*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1246*4882a593Smuzhiyun "%s(): rtw_hal_leave_mon_mode() failed, status=%d",
1247*4882a593Smuzhiyun __FUNCTION__, status);
1248*4882a593Smuzhiyun return RTW_PHL_STATUS_FAILURE;
1249*4882a593Smuzhiyun }
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun return RTW_PHL_STATUS_SUCCESS;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
1255*4882a593Smuzhiyun void
_phl_rx_proc_frame_list(struct phl_info_t * phl_info,struct phl_queue * pq)1256*4882a593Smuzhiyun _phl_rx_proc_frame_list(struct phl_info_t *phl_info, struct phl_queue *pq)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1259*4882a593Smuzhiyun _os_list *pkt_list = NULL;
1260*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun if (NULL == pq)
1263*4882a593Smuzhiyun return;
1264*4882a593Smuzhiyun if (0 == pq->cnt)
1265*4882a593Smuzhiyun return;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1268*4882a593Smuzhiyun "_phl_rx_proc_frame_list : queue ele cnt = %d\n",
1269*4882a593Smuzhiyun pq->cnt);
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun while (true == pq_pop(d, pq, &pkt_list, _first, _bh)) {
1272*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)pkt_list;
1273*4882a593Smuzhiyun phl_info->hci_trx_ops->rx_handle_normal(phl_info, phl_rx);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun }
1276*4882a593Smuzhiyun
1277*4882a593Smuzhiyun enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * ppdu_sts)1278*4882a593Smuzhiyun phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1281*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1282*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1283*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx = NULL;
1284*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1285*4882a593Smuzhiyun struct rtw_phl_rssi_stat *rssi_stat = &phl_info->phl_com->rssi_stat;
1286*4882a593Smuzhiyun _os_list *frame = NULL;
1287*4882a593Smuzhiyun bool upt_psts = true;
1288*4882a593Smuzhiyun u8 i = 0;
1289*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun if (NULL == ppdu_sts)
1292*4882a593Smuzhiyun return pstatus;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun if (false == psts_info->en_psts_per_pkt) {
1295*4882a593Smuzhiyun return pstatus;
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun if (ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1299*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1300*4882a593Smuzhiyun "ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!\n");
1301*4882a593Smuzhiyun return pstatus;
1302*4882a593Smuzhiyun }
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun band = (ppdu_sts->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (false == psts_info->en_ppdu_sts[band])
1307*4882a593Smuzhiyun return pstatus;
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun if (ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt[band]) {
1310*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1311*4882a593Smuzhiyun "ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt!\n");
1312*4882a593Smuzhiyun upt_psts = false;
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][psts_info->cur_ppdu_cnt[band]];
1316*4882a593Smuzhiyun /* check list empty */
1317*4882a593Smuzhiyun if (0 == sts_entry->frames.cnt) {
1318*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1319*4882a593Smuzhiyun "cur_ppdu_cnt %d --> sts_entry->frames.cnt = 0\n",
1320*4882a593Smuzhiyun psts_info->cur_ppdu_cnt[band]);
1321*4882a593Smuzhiyun pstatus = RTW_PHL_STATUS_SUCCESS;
1322*4882a593Smuzhiyun return pstatus;
1323*4882a593Smuzhiyun }
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun /* start update phy info to per pkt*/
1326*4882a593Smuzhiyun if (false == pq_get_front(d, &sts_entry->frames, &frame, _bh)) {
1327*4882a593Smuzhiyun PHL_ERR(" %s list empty\n", __FUNCTION__);
1328*4882a593Smuzhiyun return pstatus;
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun /**
1331*4882a593Smuzhiyun * TODO : How to filter the case :
1332*4882a593Smuzhiyun * pkt(ppdu_cnt = 0) --> missing :psts(ppdu_cnt = 0) --> (all of the pkt, psts dropped/missing)
1333*4882a593Smuzhiyun * --> ppdu_sts(ppdu_cnt = 0)(not for the current buffered pkt.)
1334*4882a593Smuzhiyun * workaround : check rate/bw/ppdu_type/... etc
1335*4882a593Smuzhiyun **/
1336*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)frame;
1337*4882a593Smuzhiyun if (upt_psts &&
1338*4882a593Smuzhiyun ((phl_rx->r.mdata.rx_rate != ppdu_sts->r.mdata.rx_rate) ||
1339*4882a593Smuzhiyun (phl_rx->r.mdata.bw != ppdu_sts->r.mdata.bw) ||
1340*4882a593Smuzhiyun (phl_rx->r.mdata.rx_gi_ltf != ppdu_sts->r.mdata.rx_gi_ltf) ||
1341*4882a593Smuzhiyun (phl_rx->r.mdata.ppdu_type != ppdu_sts->r.mdata.ppdu_type))) {
1342*4882a593Smuzhiyun /**
1343*4882a593Smuzhiyun * ppdu status is not for the buffered pkt,
1344*4882a593Smuzhiyun * skip update phy status to phl_rx
1345*4882a593Smuzhiyun **/
1346*4882a593Smuzhiyun upt_psts = false;
1347*4882a593Smuzhiyun }
1348*4882a593Smuzhiyun /* Get Frame Type */
1349*4882a593Smuzhiyun ppdu_sts->r.phy_info.frame_type =
1350*4882a593Smuzhiyun PHL_GET_80211_HDR_TYPE(phl_rx->r.pkt_list[0].vir_addr);
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun if ((false == ppdu_sts->r.phy_info.is_valid) &&
1353*4882a593Smuzhiyun (true == psts_info->en_fake_psts)) {
1354*4882a593Smuzhiyun if (RTW_FRAME_TYPE_MGNT == phl_rx->r.mdata.frame_type) {
1355*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1356*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_MGNT_ACAM_A1M];
1357*4882a593Smuzhiyun } else if (RTW_FRAME_TYPE_DATA == phl_rx->r.mdata.frame_type) {
1358*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1359*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_DATA_ACAM_A1M];
1360*4882a593Smuzhiyun } else if (RTW_FRAME_TYPE_CTRL == phl_rx->r.mdata.frame_type) {
1361*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1362*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_CTRL_ACAM_A1M];
1363*4882a593Smuzhiyun } else {
1364*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi =
1365*4882a593Smuzhiyun rssi_stat->ma_rssi[RTW_RSSI_UNKNOWN];
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun for(i = 0; i< RTW_PHL_MAX_RF_PATH ; i++) {
1368*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi_path[i] =
1369*4882a593Smuzhiyun ppdu_sts->r.phy_info.rssi;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun ppdu_sts->r.phy_info.ch_idx = rtw_hal_get_cur_ch(phl_info->hal,
1372*4882a593Smuzhiyun phl_rx->r.mdata.bb_sel);
1373*4882a593Smuzhiyun ppdu_sts->r.phy_info.is_valid = true;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun do {
1377*4882a593Smuzhiyun if (false == upt_psts)
1378*4882a593Smuzhiyun break;
1379*4882a593Smuzhiyun phl_rx = (struct rtw_phl_rx_pkt *)frame;
1380*4882a593Smuzhiyun _os_mem_cpy(d, &(phl_rx->r.phy_info), &(ppdu_sts->r.phy_info),
1381*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_phy_info));
1382*4882a593Smuzhiyun } while ((true == psts_info->psts_ampdu) &&
1383*4882a593Smuzhiyun (pq_get_next(d, &sts_entry->frames, frame, &frame, _bh)));
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun /*2. indicate the frame list*/
1386*4882a593Smuzhiyun _phl_rx_proc_frame_list(phl_info, &sts_entry->frames);
1387*4882a593Smuzhiyun /*3. reset the queue */
1388*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun return pstatus;
1391*4882a593Smuzhiyun }
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun bool
phl_rx_proc_wait_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1394*4882a593Smuzhiyun phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
1395*4882a593Smuzhiyun struct rtw_phl_rx_pkt *phl_rx)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1398*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1399*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1400*4882a593Smuzhiyun u8 i = 0;
1401*4882a593Smuzhiyun bool ret = false;
1402*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun if (false == psts_info->en_psts_per_pkt) {
1405*4882a593Smuzhiyun return ret;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun if (phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1409*4882a593Smuzhiyun PHL_ASSERT("phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!");
1410*4882a593Smuzhiyun return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun if (false == psts_info->en_ppdu_sts[band])
1416*4882a593Smuzhiyun return ret;
1417*4882a593Smuzhiyun
1418*4882a593Smuzhiyun if (psts_info->cur_ppdu_cnt[band] != phl_rx->r.mdata.ppdu_cnt) {
1419*4882a593Smuzhiyun /* start of PPDU */
1420*4882a593Smuzhiyun /* 1. Check all of the buffer list is empty */
1421*4882a593Smuzhiyun /* only check the target rx pkt band */
1422*4882a593Smuzhiyun for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
1423*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][i];
1424*4882a593Smuzhiyun if (0 != sts_entry->frames.cnt) {
1425*4882a593Smuzhiyun /* need indicate first */
1426*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1427*4882a593Smuzhiyun "band %d ; ppdu_cnt %d queue is not empty \n",
1428*4882a593Smuzhiyun band, i);
1429*4882a593Smuzhiyun _phl_rx_proc_frame_list(phl_info,
1430*4882a593Smuzhiyun &sts_entry->frames);
1431*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun /* 2. check ppdu status filter condition */
1436*4882a593Smuzhiyun /* Filter function is supportted only if rxd = long_rxd */
1437*4882a593Smuzhiyun if ((1 == phl_rx->r.mdata.long_rxd) &&
1438*4882a593Smuzhiyun (0 != (psts_info->ppdu_sts_filter &
1439*4882a593Smuzhiyun BIT(phl_rx->r.mdata.frame_type)))) {
1440*4882a593Smuzhiyun /* 3. add new rx pkt to the tail of the queue */
1441*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1442*4882a593Smuzhiyun pq_reset(d, &(sts_entry->frames), _bh);
1443*4882a593Smuzhiyun pq_push(d, &(sts_entry->frames), &phl_rx->list,
1444*4882a593Smuzhiyun _tail, _bh);
1445*4882a593Smuzhiyun ret = true;
1446*4882a593Smuzhiyun }
1447*4882a593Smuzhiyun psts_info->cur_ppdu_cnt[band] = phl_rx->r.mdata.ppdu_cnt;
1448*4882a593Smuzhiyun } else {
1449*4882a593Smuzhiyun /* 1. check ppdu status filter condition */
1450*4882a593Smuzhiyun /* Filter function is supportted only if rxd = long_rxd */
1451*4882a593Smuzhiyun if ((1 == phl_rx->r.mdata.long_rxd) &&
1452*4882a593Smuzhiyun (0 != (psts_info->ppdu_sts_filter &
1453*4882a593Smuzhiyun BIT(phl_rx->r.mdata.frame_type)))) {
1454*4882a593Smuzhiyun /* 2. add to frame list */
1455*4882a593Smuzhiyun sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1456*4882a593Smuzhiyun if (0 == sts_entry->frames.cnt) {
1457*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1458*4882a593Smuzhiyun "MPDU is not the start of PPDU, but the queue is empty!!!\n");
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun pq_push(d, &(sts_entry->frames), &phl_rx->list,
1461*4882a593Smuzhiyun _tail, _bh);
1462*4882a593Smuzhiyun ret = true;
1463*4882a593Smuzhiyun }
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun return ret;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun #endif
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
_phl_rx_post_proc_ppdu_sts(void * priv,struct phl_msg * msg)1471*4882a593Smuzhiyun void _phl_rx_post_proc_ppdu_sts(void* priv, struct phl_msg* msg)
1472*4882a593Smuzhiyun {
1473*4882a593Smuzhiyun struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1474*4882a593Smuzhiyun if (msg->inbuf && msg->inlen){
1475*4882a593Smuzhiyun _os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun }
1478*4882a593Smuzhiyun
1479*4882a593Smuzhiyun bool
_phl_rx_proc_aggr_psts_ntfy(struct phl_info_t * phl_info,struct rtw_phl_ppdu_sts_ent * ppdu_sts_ent)1480*4882a593Smuzhiyun _phl_rx_proc_aggr_psts_ntfy(struct phl_info_t *phl_info,
1481*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent)
1482*4882a593Smuzhiyun {
1483*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *ppdu_info =
1484*4882a593Smuzhiyun &phl_info->phl_com->ppdu_sts_info;
1485*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ntfy *psts_ntfy = NULL;
1486*4882a593Smuzhiyun u8 i = 0;
1487*4882a593Smuzhiyun bool ret = false;
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun if (ppdu_info->msg_aggr_cnt == 0) {
1490*4882a593Smuzhiyun /* reset entry valid status */
1491*4882a593Smuzhiyun for (i = 0; i < MAX_PSTS_MSG_AGGR_NUM; i++) {
1492*4882a593Smuzhiyun ppdu_info->msg_aggr_buf[i].vld = false;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun /* copy to the buf */
1496*4882a593Smuzhiyun psts_ntfy = &ppdu_info->msg_aggr_buf[ppdu_info->msg_aggr_cnt];
1497*4882a593Smuzhiyun psts_ntfy->frame_type = ppdu_sts_ent->frame_type;
1498*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1499*4882a593Smuzhiyun &psts_ntfy->phy_info,
1500*4882a593Smuzhiyun &ppdu_sts_ent->phy_info,
1501*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_phy_info));
1502*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1503*4882a593Smuzhiyun psts_ntfy->src_mac_addr,
1504*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr,
1505*4882a593Smuzhiyun MAC_ADDRESS_LENGTH);
1506*4882a593Smuzhiyun psts_ntfy->vld = true;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun /* update counter */
1509*4882a593Smuzhiyun ppdu_info->msg_aggr_cnt++;
1510*4882a593Smuzhiyun if (ppdu_info->msg_aggr_cnt >= MAX_PSTS_MSG_AGGR_NUM) {
1511*4882a593Smuzhiyun ppdu_info->msg_aggr_cnt = 0;
1512*4882a593Smuzhiyun ret = true;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun return ret;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun #endif
1518*4882a593Smuzhiyun
1519*4882a593Smuzhiyun void
phl_rx_proc_ppdu_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1520*4882a593Smuzhiyun phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *phl_rx)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun u8 i = 0;
1523*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_info *ppdu_info = NULL;
1524*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent = NULL;
1525*4882a593Smuzhiyun struct rtw_phl_stainfo_t *psta = NULL;
1526*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
1527*4882a593Smuzhiyun struct rtw_phl_ppdu_sts_ntfy *psts_ntfy;
1528*4882a593Smuzhiyun void *d = phl_to_drvpriv(phl_info);
1529*4882a593Smuzhiyun #endif
1530*4882a593Smuzhiyun enum phl_band_idx band = HW_BAND_0;
1531*4882a593Smuzhiyun struct rtw_rssi_info *rssi_sts;
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun if ((NULL == phl_info) || (NULL == phl_rx))
1534*4882a593Smuzhiyun return;
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1537*4882a593Smuzhiyun ppdu_info = &phl_info->phl_com->ppdu_sts_info;
1538*4882a593Smuzhiyun ppdu_sts_ent = &ppdu_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun if (false == ppdu_sts_ent->valid)
1541*4882a593Smuzhiyun return;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun if (true == ppdu_sts_ent->phl_done)
1544*4882a593Smuzhiyun return;
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun ppdu_sts_ent->phl_done = true;
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun /* update phl self varibles */
1549*4882a593Smuzhiyun for(i = 0 ; i < ppdu_sts_ent->usr_num; i++) {
1550*4882a593Smuzhiyun if (ppdu_sts_ent->sta[i].vld) {
1551*4882a593Smuzhiyun psta = rtw_phl_get_stainfo_by_macid(phl_info,
1552*4882a593Smuzhiyun ppdu_sts_ent->sta[i].macid);
1553*4882a593Smuzhiyun if (psta == NULL)
1554*4882a593Smuzhiyun continue;
1555*4882a593Smuzhiyun rssi_sts = &psta->hal_sta->rssi_stat;
1556*4882a593Smuzhiyun STA_UPDATE_MA_RSSI_FAST(rssi_sts->ma_rssi, ppdu_sts_ent->phy_info.rssi);
1557*4882a593Smuzhiyun /* update (re)associate req/resp pkt rssi */
1558*4882a593Smuzhiyun if (RTW_IS_ASOC_PKT(ppdu_sts_ent->frame_type)) {
1559*4882a593Smuzhiyun rssi_sts->assoc_rssi =
1560*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun
1563*4882a593Smuzhiyun if (RTW_IS_BEACON_OR_PROBE_RESP_PKT(
1564*4882a593Smuzhiyun ppdu_sts_ent->frame_type)) {
1565*4882a593Smuzhiyun if (0 == rssi_sts->ma_rssi_mgnt) {
1566*4882a593Smuzhiyun rssi_sts->ma_rssi_mgnt =
1567*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1568*4882a593Smuzhiyun } else {
1569*4882a593Smuzhiyun STA_UPDATE_MA_RSSI_FAST(
1570*4882a593Smuzhiyun rssi_sts->ma_rssi_mgnt,
1571*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi);
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun else {
1576*4882a593Smuzhiyun if (RTW_IS_ASOC_REQ_PKT(ppdu_sts_ent->frame_type) &&
1577*4882a593Smuzhiyun (ppdu_sts_ent->usr_num == 1)) {
1578*4882a593Smuzhiyun psta = rtw_phl_get_stainfo_by_addr_ex(phl_info,
1579*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr);
1580*4882a593Smuzhiyun if (psta) {
1581*4882a593Smuzhiyun psta->hal_sta->rssi_stat.assoc_rssi =
1582*4882a593Smuzhiyun ppdu_sts_ent->phy_info.rssi;
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun #ifdef DBG_AP_CLIENT_ASSOC_RSSI
1585*4882a593Smuzhiyun PHL_INFO("%s [Rx-ASOC_REQ] - macid:%d, MAC-Addr:%02x-%02x-%02x-%02x-%02x-%02x, assoc_rssi:%d\n",
1586*4882a593Smuzhiyun __func__,
1587*4882a593Smuzhiyun psta->macid,
1588*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[0],
1589*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[1],
1590*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[2],
1591*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[3],
1592*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[4],
1593*4882a593Smuzhiyun ppdu_sts_ent->src_mac_addr[5],
1594*4882a593Smuzhiyun psta->hal_sta->rssi_stat.assoc_rssi);
1595*4882a593Smuzhiyun #endif
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun }
1598*4882a593Smuzhiyun }
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun
1601*4882a593Smuzhiyun #ifdef CONFIG_PHY_INFO_NTFY
1602*4882a593Smuzhiyun /*2. prepare and send psts notify to core */
1603*4882a593Smuzhiyun if((RTW_FRAME_TYPE_BEACON == ppdu_sts_ent->frame_type) ||
1604*4882a593Smuzhiyun (RTW_FRAME_TYPE_PROBE_RESP == ppdu_sts_ent->frame_type)) {
1605*4882a593Smuzhiyun
1606*4882a593Smuzhiyun if (false == _phl_rx_proc_aggr_psts_ntfy(phl_info,
1607*4882a593Smuzhiyun ppdu_sts_ent)) {
1608*4882a593Smuzhiyun return;
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* send aggr psts ntfy*/
1612*4882a593Smuzhiyun psts_ntfy = (struct rtw_phl_ppdu_sts_ntfy *)_os_kmem_alloc(d,
1613*4882a593Smuzhiyun MAX_PSTS_MSG_AGGR_NUM * sizeof(struct rtw_phl_ppdu_sts_ntfy));
1614*4882a593Smuzhiyun if (psts_ntfy == NULL) {
1615*4882a593Smuzhiyun PHL_ERR("%s: alloc ppdu sts for ntfy fail.\n", __func__);
1616*4882a593Smuzhiyun return;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun
1619*4882a593Smuzhiyun _os_mem_cpy(phl_info->phl_com->drv_priv,
1620*4882a593Smuzhiyun psts_ntfy,
1621*4882a593Smuzhiyun &ppdu_info->msg_aggr_buf,
1622*4882a593Smuzhiyun (MAX_PSTS_MSG_AGGR_NUM *
1623*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1624*4882a593Smuzhiyun
1625*4882a593Smuzhiyun msg.inbuf = (u8 *)psts_ntfy;
1626*4882a593Smuzhiyun msg.inlen = (MAX_PSTS_MSG_AGGR_NUM *
1627*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy));
1628*4882a593Smuzhiyun SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PSTS);
1629*4882a593Smuzhiyun SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_RX_PSTS);
1630*4882a593Smuzhiyun attr.completion.completion = _phl_rx_post_proc_ppdu_sts;
1631*4882a593Smuzhiyun attr.completion.priv = phl_info;
1632*4882a593Smuzhiyun if (phl_msg_hub_send(phl_info, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
1633*4882a593Smuzhiyun PHL_ERR("%s: send msg_hub failed\n", __func__);
1634*4882a593Smuzhiyun _os_kmem_free(d, psts_ntfy,
1635*4882a593Smuzhiyun (MAX_PSTS_MSG_AGGR_NUM *
1636*4882a593Smuzhiyun sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1637*4882a593Smuzhiyun }
1638*4882a593Smuzhiyun }
1639*4882a593Smuzhiyun #endif
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun
_dump_rx_reorder_info(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta)1642*4882a593Smuzhiyun static void _dump_rx_reorder_info(struct phl_info_t *phl_info,
1643*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun void *drv_priv = phl_to_drvpriv(phl_info);
1646*4882a593Smuzhiyun _os_spinlockfg sp_flags;
1647*4882a593Smuzhiyun u8 i;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun PHL_INFO("dump rx reorder buffer info:\n");
1650*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
1651*4882a593Smuzhiyun
1652*4882a593Smuzhiyun _os_spinlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1653*4882a593Smuzhiyun if (sta->tid_rx[i]) {
1654*4882a593Smuzhiyun PHL_INFO("== tid = %d ==\n", sta->tid_rx[i]->tid);
1655*4882a593Smuzhiyun PHL_INFO("head_seq_num = %d\n",
1656*4882a593Smuzhiyun sta->tid_rx[i]->head_seq_num);
1657*4882a593Smuzhiyun PHL_INFO("stored_mpdu_num = %d\n",
1658*4882a593Smuzhiyun sta->tid_rx[i]->stored_mpdu_num);
1659*4882a593Smuzhiyun PHL_INFO("ssn = %d\n", sta->tid_rx[i]->ssn);
1660*4882a593Smuzhiyun PHL_INFO("buf_size = %d\n", sta->tid_rx[i]->buf_size);
1661*4882a593Smuzhiyun PHL_INFO("started = %d\n", sta->tid_rx[i]->started);
1662*4882a593Smuzhiyun PHL_INFO("removed = %d\n", sta->tid_rx[i]->removed);
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun _os_spinunlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1665*4882a593Smuzhiyun }
1666*4882a593Smuzhiyun }
1667*4882a593Smuzhiyun
phl_dump_all_sta_rx_info(struct phl_info_t * phl_info)1668*4882a593Smuzhiyun void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info)
1669*4882a593Smuzhiyun {
1670*4882a593Smuzhiyun struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1671*4882a593Smuzhiyun struct rtw_phl_stainfo_t *sta = NULL;
1672*4882a593Smuzhiyun struct rtw_wifi_role_t *role = NULL;
1673*4882a593Smuzhiyun void *drv = phl_to_drvpriv(phl_info);
1674*4882a593Smuzhiyun struct phl_queue *sta_queue;
1675*4882a593Smuzhiyun _os_spinlockfg sp_flags;
1676*4882a593Smuzhiyun u8 i;
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun PHL_INFO("dump all sta rx info:\n");
1679*4882a593Smuzhiyun for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
1680*4882a593Smuzhiyun role = &phl_com->wifi_roles[i];
1681*4882a593Smuzhiyun if (role->active) {
1682*4882a593Smuzhiyun PHL_INFO("wrole idx = %d\n", i);
1683*4882a593Smuzhiyun PHL_INFO("wrole type = %d\n", role->type);
1684*4882a593Smuzhiyun PHL_INFO("wrole mstate = %d\n", role->mstate);
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun sta_queue = &role->assoc_sta_queue;
1687*4882a593Smuzhiyun _os_spinlock(drv, &sta_queue->lock, _irq, &sp_flags);
1688*4882a593Smuzhiyun phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1689*4882a593Smuzhiyun &sta_queue->queue, list) {
1690*4882a593Smuzhiyun PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x \n",
1691*4882a593Smuzhiyun __func__, sta->macid,
1692*4882a593Smuzhiyun sta->mac_addr[0],
1693*4882a593Smuzhiyun sta->mac_addr[1],
1694*4882a593Smuzhiyun sta->mac_addr[2],
1695*4882a593Smuzhiyun sta->mac_addr[3],
1696*4882a593Smuzhiyun sta->mac_addr[4],
1697*4882a593Smuzhiyun sta->mac_addr[5]);
1698*4882a593Smuzhiyun _dump_rx_reorder_info(phl_info, sta);
1699*4882a593Smuzhiyun }
1700*4882a593Smuzhiyun _os_spinunlock(drv, &sta_queue->lock, _irq, &sp_flags);
1701*4882a593Smuzhiyun }
1702*4882a593Smuzhiyun }
1703*4882a593Smuzhiyun }
1704*4882a593Smuzhiyun
phl_rx_dbg_dump(struct phl_info_t * phl_info,u8 band_idx)1705*4882a593Smuzhiyun void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx)
1706*4882a593Smuzhiyun {
1707*4882a593Smuzhiyun enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1708*4882a593Smuzhiyun
1709*4882a593Smuzhiyun phl_status = phl_cmd_enqueue(phl_info,
1710*4882a593Smuzhiyun band_idx,
1711*4882a593Smuzhiyun MSG_EVT_DBG_RX_DUMP,
1712*4882a593Smuzhiyun NULL,
1713*4882a593Smuzhiyun 0,
1714*4882a593Smuzhiyun NULL,
1715*4882a593Smuzhiyun PHL_CMD_NO_WAIT,
1716*4882a593Smuzhiyun 0);
1717*4882a593Smuzhiyun if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1718*4882a593Smuzhiyun PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: cmd enqueue fail!\n",
1719*4882a593Smuzhiyun __func__);
1720*4882a593Smuzhiyun }
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun }