xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852bs/phl/phl_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_RX_C_
16 #include "phl_headers.h"
17 
18 
rtw_phl_query_phl_rx(void * phl)19 struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl)
20 {
21 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
22 	void *drv_priv = phl_to_drvpriv(phl_info);
23 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
24 	struct rtw_phl_rx_pkt *phl_rx = NULL;
25 
26 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
27 
28 	_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
29 
30 	if (false == list_empty(&rx_pkt_pool->idle)) {
31 		phl_rx = list_first_entry(&rx_pkt_pool->idle,
32 					struct rtw_phl_rx_pkt, list);
33 		list_del(&phl_rx->list);
34 		rx_pkt_pool->idle_cnt--;
35 	}
36 
37 	_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
38 
39 	return phl_rx;
40 }
41 
rtw_phl_is_phl_rx_idle(struct phl_info_t * phl_info)42 u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info)
43 {
44 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
45 	u8 res = false;
46 
47 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
48 
49 	_os_spinlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
50 
51 	if (MAX_PHL_RING_RX_PKT_NUM == rx_pkt_pool->idle_cnt)
52 		res = true;
53 	else
54 		res = false;
55 
56 	_os_spinunlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
57 
58 	return res;
59 }
60 
phl_dump_rx_stats(struct rtw_stats * stats)61 void phl_dump_rx_stats(struct rtw_stats *stats)
62 {
63 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
64 		  "Dump Rx statistics\n"
65 		  "rx_byte_uni = %lld\n"
66 		  "rx_byte_total = %lld\n"
67 		  "rx_tp_kbits = %d\n"
68 		  "last_rx_time_ms = %d\n",
69 		  stats->rx_byte_uni,
70 		  stats->rx_byte_total,
71 		  stats->rx_tp_kbits,
72 		  stats->last_rx_time_ms);
73 }
74 
phl_reset_rx_stats(struct rtw_stats * stats)75 void phl_reset_rx_stats(struct rtw_stats *stats)
76 {
77 	stats->rx_byte_uni = 0;
78 	stats->rx_byte_total = 0;
79 	stats->rx_tp_kbits = 0;
80 	stats->last_rx_time_ms = 0;
81 	stats->rxtp.last_calc_time_ms = 0;
82 	stats->rxtp.last_calc_time_ms = 0;
83 	stats->rx_traffic.lvl = RTW_TFC_IDLE;
84 	stats->rx_traffic.sts = 0;
85 	stats->rx_tf_cnt = 0;
86 	stats->pre_rx_tf_cnt = 0;
87 }
88 
89 void
phl_rx_traffic_upd(struct rtw_stats * sts)90 phl_rx_traffic_upd(struct rtw_stats *sts)
91 {
92 	u32 tp_k = 0, tp_m = 0;
93 	enum rtw_tfc_lvl rx_tfc_lvl = RTW_TFC_IDLE;
94 	tp_k = sts->rx_tp_kbits;
95 	tp_m = sts->rx_tp_kbits >> 10;
96 
97 	if (tp_m >= RX_HIGH_TP_THRES_MBPS)
98 		rx_tfc_lvl = RTW_TFC_HIGH;
99 	else if (tp_m >= RX_MID_TP_THRES_MBPS)
100 		rx_tfc_lvl = RTW_TFC_MID;
101 	else if (tp_m >= RX_LOW_TP_THRES_MBPS)
102 		rx_tfc_lvl = RTW_TFC_LOW;
103 	else if (tp_k >= RX_ULTRA_LOW_TP_THRES_KBPS)
104 		rx_tfc_lvl = RTW_TFC_ULTRA_LOW;
105 	else
106 		rx_tfc_lvl = RTW_TFC_IDLE;
107 
108 	if (sts->rx_traffic.lvl > rx_tfc_lvl) {
109 		sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
110 		sts->rx_traffic.lvl = rx_tfc_lvl;
111 	} else if (sts->rx_traffic.lvl < rx_tfc_lvl) {
112 		sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
113 		sts->rx_traffic.lvl = rx_tfc_lvl;
114 	} else if (sts->rx_traffic.sts &
115 		(TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
116 		sts->rx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
117 					 TRAFFIC_DECREASE);
118 	}
119 }
120 
phl_update_rx_stats(struct rtw_stats * stats,struct rtw_recv_pkt * rx_pkt)121 void phl_update_rx_stats(struct rtw_stats *stats, struct rtw_recv_pkt *rx_pkt)
122 {
123 	u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
124 	u64 diff_bits = 0;
125 
126 	stats->last_rx_time_ms = cur_time;
127 	stats->rx_byte_total += rx_pkt->mdata.pktlen;
128 	if (rx_pkt->mdata.bc == 0 && rx_pkt->mdata.mc == 0)
129 		stats->rx_byte_uni += rx_pkt->mdata.pktlen;
130 
131 	if (0 == stats->rxtp.last_calc_time_ms ||
132 		0 == stats->rxtp.last_calc_bits) {
133 		stats->rxtp.last_calc_time_ms = stats->last_rx_time_ms;
134 		stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
135 	} else {
136 		if (cur_time >= stats->rxtp.last_calc_time_ms) {
137 			diff_t = cur_time - stats->rxtp.last_calc_time_ms;
138 		} else {
139 			diff_t = RTW_U32_MAX - stats->rxtp.last_calc_time_ms +
140 				cur_time + 1;
141 		}
142 		if (diff_t > RXTP_CALC_DIFF_MS && stats->rx_byte_uni != 0) {
143 			diff_bits = (stats->rx_byte_uni * 8) -
144 				stats->rxtp.last_calc_bits;
145 			stats->rx_tp_kbits = (u32)_os_division64(diff_bits,
146 								 diff_t);
147 			stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
148 			stats->rxtp.last_calc_time_ms = cur_time;
149 		}
150 	}
151 }
152 
phl_rx_statistics(struct phl_info_t * phl_info,struct rtw_recv_pkt * rx_pkt)153 void phl_rx_statistics(struct phl_info_t *phl_info, struct rtw_recv_pkt *rx_pkt)
154 {
155 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
156 	struct rtw_stats *phl_stats = &phl_com->phl_stats;
157 	struct rtw_stats *sta_stats = NULL;
158 	struct rtw_phl_stainfo_t *sta = NULL;
159 	u16 macid = rx_pkt->mdata.macid;
160 
161 	if (!phl_macid_is_valid(phl_info, macid))
162 		goto dev_stat;
163 
164 	sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
165 
166 	if (NULL == sta)
167 		goto dev_stat;
168 	sta_stats = &sta->stats;
169 
170 	phl_update_rx_stats(sta_stats, rx_pkt);
171 dev_stat:
172 	phl_update_rx_stats(phl_stats, rx_pkt);
173 }
174 
phl_release_phl_rx(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)175 void phl_release_phl_rx(struct phl_info_t *phl_info,
176 				struct rtw_phl_rx_pkt *phl_rx)
177 {
178 	void *drv_priv = phl_to_drvpriv(phl_info);
179 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
180 
181 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
182 
183 	_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
184 	_os_mem_set(phl_to_drvpriv(phl_info), &phl_rx->r, 0, sizeof(phl_rx->r));
185 	phl_rx->type = RTW_RX_TYPE_MAX;
186 	phl_rx->rxbuf_ptr = NULL;
187 	INIT_LIST_HEAD(&phl_rx->list);
188 	list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
189 	rx_pkt_pool->idle_cnt++;
190 	_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
191 }
192 
phl_free_recv_pkt_pool(struct phl_info_t * phl_info)193 static void phl_free_recv_pkt_pool(struct phl_info_t *phl_info)
194 {
195 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
196 	u32 buf_len = 0;
197 	FUNCIN();
198 
199 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
200 	if (NULL != rx_pkt_pool) {
201 		_os_spinlock_free(phl_to_drvpriv(phl_info),
202 					&rx_pkt_pool->idle_lock);
203 		_os_spinlock_free(phl_to_drvpriv(phl_info),
204 					&rx_pkt_pool->busy_lock);
205 
206 		buf_len = sizeof(*rx_pkt_pool);
207 		_os_mem_free(phl_to_drvpriv(phl_info), rx_pkt_pool, buf_len);
208 	}
209 
210 	FUNCOUT();
211 }
212 
phl_rx_deinit(struct phl_info_t * phl_info)213 void phl_rx_deinit(struct phl_info_t *phl_info)
214 {
215 	/* TODO: rx reorder deinit */
216 
217 	/* TODO: peer info deinit */
218 
219 	phl_free_recv_pkt_pool(phl_info);
220 }
221 
222 
phl_alloc_recv_pkt_pool(struct phl_info_t * phl_info)223 static enum rtw_phl_status phl_alloc_recv_pkt_pool(struct phl_info_t *phl_info)
224 {
225 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
226 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
227 	struct rtw_phl_rx_pkt *phl_rx = NULL;
228 	u32 buf_len = 0, i = 0;
229 	FUNCIN_WSTS(pstatus);
230 
231 	buf_len = sizeof(*rx_pkt_pool);
232 	rx_pkt_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
233 
234 	if (NULL != rx_pkt_pool) {
235 		_os_mem_set(phl_to_drvpriv(phl_info), rx_pkt_pool, 0, buf_len);
236 		INIT_LIST_HEAD(&rx_pkt_pool->idle);
237 		INIT_LIST_HEAD(&rx_pkt_pool->busy);
238 		_os_spinlock_init(phl_to_drvpriv(phl_info),
239 					&rx_pkt_pool->idle_lock);
240 		_os_spinlock_init(phl_to_drvpriv(phl_info),
241 					&rx_pkt_pool->busy_lock);
242 		rx_pkt_pool->idle_cnt = 0;
243 
244 		for (i = 0; i < MAX_PHL_RING_RX_PKT_NUM; i++) {
245 			phl_rx = &rx_pkt_pool->phl_rx[i];
246 			INIT_LIST_HEAD(&phl_rx->list);
247 			list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
248 			rx_pkt_pool->idle_cnt++;
249 		}
250 
251 		phl_info->rx_pkt_pool = rx_pkt_pool;
252 
253 		pstatus = RTW_PHL_STATUS_SUCCESS;
254 	}
255 
256 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
257 		phl_free_recv_pkt_pool(phl_info);
258 	FUNCOUT_WSTS(pstatus);
259 
260 	return pstatus;
261 }
262 
phl_rx_init(struct phl_info_t * phl_info)263 enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info)
264 {
265 	enum rtw_phl_status status;
266 
267 	/* Allocate rx packet pool */
268 	status = phl_alloc_recv_pkt_pool(phl_info);
269 	if (status != RTW_PHL_STATUS_SUCCESS)
270 		return status;
271 
272 	/* TODO: Peer info init */
273 
274 
275 	/* TODO: Rx reorder init */
276 
277 	return RTW_PHL_STATUS_SUCCESS;
278 }
279 
phl_recycle_rx_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)280 void phl_recycle_rx_buf(struct phl_info_t *phl_info,
281 				struct rtw_phl_rx_pkt *phl_rx)
282 {
283 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
284 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
285 	struct rtw_rx_buf *rx_buf = NULL;
286 
287 	do {
288 		if (NULL == phl_rx) {
289 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]phl_rx is NULL!\n");
290 			break;
291 		}
292 
293 		rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
294 
295 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "[4] %s:: [%p]\n",
296 								__FUNCTION__, rx_buf);
297 		if (phl_rx->rxbuf_ptr) {
298 			pstatus = hci_trx_ops->recycle_rx_buf(phl_info, rx_buf,
299 								phl_rx->r.mdata.dma_ch,
300 								phl_rx->type);
301 		}
302 		if (RTW_PHL_STATUS_SUCCESS != pstatus && phl_rx->rxbuf_ptr)
303 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]recycle hci rx buf error!\n");
304 
305 		phl_release_phl_rx(phl_info, phl_rx);
306 
307 	} while (false);
308 
309 }
310 
_phl_indic_new_rxpkt(struct phl_info_t * phl_info)311 void _phl_indic_new_rxpkt(struct phl_info_t *phl_info)
312 {
313 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
314 	struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
315 	void *drv_priv = phl_to_drvpriv(phl_info);
316 	FUNCIN_WSTS(pstatus);
317 
318 	do {
319 		_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
320 		evt_info->evt_bitmap |= RTW_PHL_EVT_RX;
321 		_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
322 
323 		pstatus = phl_schedule_handler(phl_info->phl_com,
324 							&phl_info->phl_event_handler);
325 	} while (false);
326 
327 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
328 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] Trigger rx indic event fail!\n");
329 
330 	FUNCOUT_WSTS(pstatus);
331 
332 #ifdef PHL_RX_BATCH_IND
333 	phl_info->rx_new_pending = 0;
334 #endif
335 }
336 
_phl_record_rx_stats(struct rtw_recv_pkt * recvpkt)337 void _phl_record_rx_stats(struct rtw_recv_pkt *recvpkt)
338 {
339 	if(NULL == recvpkt)
340 		return;
341 	if (recvpkt->tx_sta)
342 		recvpkt->tx_sta->stats.rx_rate = recvpkt->mdata.rx_rate;
343 }
344 
_phl_add_rx_pkt(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)345 enum rtw_phl_status _phl_add_rx_pkt(struct phl_info_t *phl_info,
346 				    struct rtw_phl_rx_pkt *phl_rx)
347 {
348 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
349 	struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
350 	struct rtw_recv_pkt *recvpkt = &phl_rx->r;
351 	u16 ring_res = 0, wptr = 0, rptr = 0;
352 	void *drv = phl_to_drvpriv(phl_info);
353 
354 	FUNCIN_WSTS(pstatus);
355 	_os_spinlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
356 
357 	if (!ring)
358 		goto out;
359 
360 	wptr = (u16)_os_atomic_read(drv, &ring->phl_idx);
361 	rptr = (u16)_os_atomic_read(drv, &ring->core_idx);
362 
363 	ring_res = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RX_RING_ENTRY_NUM);
364 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
365 		"[3] _phl_add_rx_pkt::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
366 		_os_atomic_read(drv, &ring->phl_idx),
367 		_os_atomic_read(drv, &ring->core_idx),
368 		ring_res);
369 	if (ring_res <= 0) {
370 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no ring resource to add new rx pkt!\n");
371 		pstatus = RTW_PHL_STATUS_RESOURCE;
372 		goto out;
373 	}
374 
375 	wptr = wptr + 1;
376 	if (wptr >= MAX_PHL_RX_RING_ENTRY_NUM)
377 		wptr = 0;
378 
379 	ring->entry[wptr] = recvpkt;
380 
381 	if (wptr)
382 		_os_atomic_inc(drv, &ring->phl_idx);
383 	else
384 		_os_atomic_set(drv, &ring->phl_idx, 0);
385 
386 #ifdef PHL_RX_BATCH_IND
387 	phl_info->rx_new_pending = 1;
388 	pstatus = RTW_PHL_STATUS_SUCCESS;
389 #endif
390 
391 out:
392 	_os_spinunlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
393 
394 	if(pstatus == RTW_PHL_STATUS_SUCCESS)
395 		_phl_record_rx_stats(recvpkt);
396 
397 	FUNCOUT_WSTS(pstatus);
398 
399 	return pstatus;
400 }
401 
402 void
phl_sta_ps_enter(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)403 phl_sta_ps_enter(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
404                  struct rtw_wifi_role_t *role)
405 {
406 	void *d = phl_to_drvpriv(phl_info);
407 	/* enum rtw_hal_status hal_status; */
408 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
409 
410 	_os_atomic_set(d, &sta->ps_sta, 1);
411 
412 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
413 	          "STA %02X:%02X:%02X:%02X:%02X:%02X enters PS mode, AID=%u, macid=%u, sta=0x%p\n",
414 	          sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
415 	          sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
416 	          sta->aid, sta->macid, sta);
417 
418 	/* TODO: comment out because beacon may stop if we do this frequently */
419 	/* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
420 	/*                                         sta->macid, true); */
421 	/* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
422 	/*         PHL_WARN("%s(): failed to pause macid tx, macid=%u\n", */
423 	/*                  __FUNCTION__, sta->macid); */
424 	/* } */
425 
426 	if (ops->ap_ps_sta_ps_change)
427 		ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, true);
428 }
429 
430 void
phl_sta_ps_exit(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)431 phl_sta_ps_exit(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
432                 struct rtw_wifi_role_t *role)
433 {
434 	void *d = phl_to_drvpriv(phl_info);
435 	/* enum rtw_hal_status hal_status; */
436 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
437 
438 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
439 	          "STA %02X:%02X:%02X:%02X:%02X:%02X leaves PS mode, AID=%u, macid=%u, sta=0x%p\n",
440 	          sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
441 	          sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
442 	          sta->aid, sta->macid, sta);
443 
444 	_os_atomic_set(d, &sta->ps_sta, 0);
445 
446 	/* TODO: comment out because beacon may stop if we do this frequently */
447 	/* hal_status = rtw_hal_set_macid_pause(phl_info->hal, */
448 	/*                                         sta->macid, false); */
449 	/* if (RTW_HAL_STATUS_SUCCESS != hal_status) { */
450 	/*         PHL_WARN("%s(): failed to resume macid tx, macid=%u\n", */
451 	/*                  __FUNCTION__, sta->macid); */
452 	/* } */
453 
454 	if (ops->ap_ps_sta_ps_change)
455 		ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, false);
456 }
457 
458 void
phl_rx_handle_sta_process(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * rx)459 phl_rx_handle_sta_process(struct phl_info_t *phl_info,
460                           struct rtw_phl_rx_pkt *rx)
461 {
462 	struct rtw_r_meta_data *m = &rx->r.mdata;
463 	struct rtw_wifi_role_t *role = NULL;
464 	struct rtw_phl_stainfo_t *sta = NULL;
465 	void *d = phl_to_drvpriv(phl_info);
466 
467 	if (!phl_info->phl_com->dev_sw_cap.ap_ps)
468 		return;
469 
470 	if (m->addr_cam_vld) {
471 		sta = rtw_phl_get_stainfo_by_macid(phl_info, m->macid);
472 		if (sta && sta->wrole)
473 			role = sta->wrole;
474 	}
475 
476 	if (!sta) {
477 		role = phl_get_wrole_by_addr(phl_info, m->mac_addr);
478 		if (role)
479 			sta = rtw_phl_get_stainfo_by_addr(phl_info,
480 			                                  role, m->ta);
481 	}
482 
483 	if (!role || !sta)
484 		return;
485 
486 	rx->r.tx_sta = sta;
487 	rx->r.rx_role = role;
488 
489 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_,
490 	          "ap-ps: more_frag=%u, frame_type=%u, role_type=%d, pwr_bit=%u, seq=%u\n",
491 	          m->more_frag, m->frame_type, role->type, m->pwr_bit, m->seq);
492 
493 	/*
494 	 * Change STA PS state based on the PM bit in frame control
495 	 */
496 	if (!m->more_frag &&
497 	    (m->frame_type == RTW_FRAME_TYPE_DATA ||
498 	     m->frame_type == RTW_FRAME_TYPE_CTRL) &&
499 	    (role->type == PHL_RTYPE_AP ||
500 	     role->type == PHL_RTYPE_P2P_GO)) {
501 		if (_os_atomic_read(d, &sta->ps_sta)) {
502 			if (!m->pwr_bit)
503 				phl_sta_ps_exit(phl_info, sta, role);
504 		} else {
505 			if (m->pwr_bit)
506 				phl_sta_ps_enter(phl_info, sta, role);
507 		}
508 	}
509 }
510 
511 void
phl_handle_rx_frame_list(struct phl_info_t * phl_info,_os_list * frames)512 phl_handle_rx_frame_list(struct phl_info_t *phl_info,
513                          _os_list *frames)
514 {
515 	struct rtw_phl_rx_pkt *pos, *n;
516 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
517 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
518 
519 	phl_list_for_loop_safe(pos, n, struct rtw_phl_rx_pkt, frames, list) {
520 		list_del(&pos->list);
521 		phl_rx_handle_sta_process(phl_info, pos);
522 		status = _phl_add_rx_pkt(phl_info, pos);
523 		if (RTW_PHL_STATUS_RESOURCE == status) {
524 			hci_trx_ops->recycle_rx_pkt(phl_info, pos);
525 		}
526 	}
527 #ifndef PHL_RX_BATCH_IND
528 	_phl_indic_new_rxpkt(phl_info);
529 #endif
530 
531 }
532 
533 
534 #define SEQ_MODULO 0x1000
535 #define SEQ_MASK	0xfff
536 
seq_less(u16 sq1,u16 sq2)537 static inline int seq_less(u16 sq1, u16 sq2)
538 {
539 	return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
540 }
541 
seq_inc(u16 sq)542 static inline u16 seq_inc(u16 sq)
543 {
544 	return (sq + 1) & SEQ_MASK;
545 }
546 
seq_sub(u16 sq1,u16 sq2)547 static inline u16 seq_sub(u16 sq1, u16 sq2)
548 {
549 	return (sq1 - sq2) & SEQ_MASK;
550 }
551 
reorder_index(struct phl_tid_ampdu_rx * r,u16 seq)552 static inline u16 reorder_index(struct phl_tid_ampdu_rx *r, u16 seq)
553 {
554 	return seq_sub(seq, r->ssn) % r->buf_size;
555 }
556 
phl_release_reorder_frame(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,int index,_os_list * frames)557 static void phl_release_reorder_frame(struct phl_info_t *phl_info,
558                                       struct phl_tid_ampdu_rx *r,
559                                       int index, _os_list *frames)
560 {
561 	struct rtw_phl_rx_pkt *pkt = r->reorder_buf[index];
562 
563 	if (!pkt)
564 		goto out;
565 
566 	/* release the frame from the reorder ring buffer */
567 	r->stored_mpdu_num--;
568 	r->reorder_buf[index] = NULL;
569 	list_add_tail(&pkt->list, frames);
570 
571 out:
572 	r->head_seq_num = seq_inc(r->head_seq_num);
573 }
574 
575 #define HT_RX_REORDER_BUF_TIMEOUT_MS 500
576 
577 /*
578  * If the MPDU at head_seq_num is ready,
579  *     1. release all subsequent MPDUs with consecutive SN and
580  *     2. if there's MPDU that is ready but left in the reordering
581  *        buffer, find it and set reorder timer according to its reorder
582  *        time
583  *
584  * If the MPDU at head_seq_num is not ready and there is no MPDU ready
585  * in the buffer at all, return.
586  *
587  * If the MPDU at head_seq_num is not ready but there is some MPDU in
588  * the buffer that is ready, check whether any frames in the reorder
589  * buffer have timed out in the following way.
590  *
591  * Basically, MPDUs that are not ready are purged and MPDUs that are
592  * ready are released.
593  *
594  * The process goes through all the buffer but the one at head_seq_num
595  * unless
596  *     - there's a MPDU that is ready AND
597  *     - there are one or more buffers that are not ready.
598  * In this case, the process is stopped, the head_seq_num becomes the
599  * first buffer that is not ready and the reorder_timer is reset based
600  * on the reorder_time of that ready MPDU.
601  */
phl_reorder_release(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,_os_list * frames)602 static void phl_reorder_release(struct phl_info_t *phl_info,
603 								struct phl_tid_ampdu_rx *r, _os_list *frames)
604 {
605 	/* ref ieee80211_sta_reorder_release() and wil_reorder_release() */
606 
607 	int index, i, j;
608 	u32 cur_time = _os_get_cur_time_ms();
609 
610 	/* release the buffer until next missing frame */
611 	index = reorder_index(r, r->head_seq_num);
612 	if (!r->reorder_buf[index] && r->stored_mpdu_num) {
613 		/*
614 		 * No buffers ready to be released, but check whether any
615 		 * frames in the reorder buffer have timed out.
616 		 */
617 		int skipped = 1;
618 		for (j = (index + 1) % r->buf_size; j != index;
619 			j = (j + 1) % r->buf_size) {
620 			if (!r->reorder_buf[j]) {
621 				skipped++;
622 				continue;
623 			}
624 			if (skipped && cur_time < r->reorder_time[j] +
625 				HT_RX_REORDER_BUF_TIMEOUT_MS)
626 				goto set_release_timer;
627 
628 			/* don't leave incomplete A-MSDUs around */
629 			for (i = (index + 1) % r->buf_size; i != j;
630 				i = (i + 1) % r->buf_size)
631 				phl_recycle_rx_buf(phl_info, r->reorder_buf[i]);
632 
633 			PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "release an RX reorder frame due to timeout on earlier frames\n");
634 
635 			phl_release_reorder_frame(phl_info, r, j, frames);
636 
637 			/*
638 			 * Increment the head seq# also for the skipped slots.
639 			 */
640 			r->head_seq_num =
641 				(r->head_seq_num + skipped) & SEQ_MASK;
642 			skipped = 0;
643 		}
644 	} else while (r->reorder_buf[index]) {
645 		phl_release_reorder_frame(phl_info, r, index, frames);
646 		index = reorder_index(r, r->head_seq_num);
647 	}
648 
649 	if (r->stored_mpdu_num) {
650 		j = index = r->head_seq_num % r->buf_size;
651 
652 		for (; j != (index - 1) % r->buf_size;
653 			j = (j + 1) % r->buf_size) {
654 			if (r->reorder_buf[j])
655 				break;
656 		}
657 
658 set_release_timer:
659 
660 		if (!r->removed)
661 			_os_set_timer(r->drv_priv, &r->sta->reorder_timer,
662 			              HT_RX_REORDER_BUF_TIMEOUT_MS);
663 	} else {
664 		/* TODO: implementation of cancel timer on Linux is
665 			del_timer_sync(), it can't be called with same spinlock
666 			held with the expiration callback, that causes a potential
667 			deadlock. */
668 		_os_cancel_timer_async(r->drv_priv, &r->sta->reorder_timer);
669 	}
670 }
671 
phl_sta_rx_reorder_timer_expired(void * t)672 void phl_sta_rx_reorder_timer_expired(void *t)
673 {
674 	/* ref sta_rx_agg_reorder_timer_expired() */
675 
676 	struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)t;
677 	struct rtw_phl_com_t *phl_com = sta->wrole->phl_com;
678 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
679 	void *drv_priv = phl_to_drvpriv(phl_info);
680 	u8 i = 0;
681 
682 	PHL_INFO("Rx reorder timer expired, sta=0x%p\n", sta);
683 
684 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
685 		_os_list frames;
686 
687 		INIT_LIST_HEAD(&frames);
688 
689 		_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
690 		if (sta->tid_rx[i])
691 			phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
692 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
693 
694 		phl_handle_rx_frame_list(phl_info, &frames);
695 #ifdef PHL_RX_BATCH_IND
696 		_phl_indic_new_rxpkt(phl_info);
697 #endif
698 	}
699 
700 	_os_event_set(drv_priv, &sta->comp_sync);
701 }
702 
phl_release_reorder_frames(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,u16 head_seq_num,_os_list * frames)703 static void phl_release_reorder_frames(struct phl_info_t *phl_info,
704 										struct phl_tid_ampdu_rx *r,
705 										u16 head_seq_num, _os_list *frames)
706 {
707 	/* ref ieee80211_release_reorder_frames() and
708 		wil_release_reorder_frames() */
709 
710 	int index;
711 
712 	/* note: this function is never called with
713 	 * hseq preceding r->head_seq_num, i.e it is always true
714 	 * !seq_less(hseq, r->head_seq_num)
715 	 * and thus on loop exit it should be
716 	 * r->head_seq_num == hseq
717 	 */
718 	while (seq_less(r->head_seq_num, head_seq_num) &&
719 		r->stored_mpdu_num) { /* Note: do we need to check this? */
720 		index = reorder_index(r, r->head_seq_num);
721 		phl_release_reorder_frame(phl_info, r, index, frames);
722 	}
723 	r->head_seq_num = head_seq_num;
724 }
725 
rtw_phl_flush_reorder_buf(void * phl,struct rtw_phl_stainfo_t * sta)726 void rtw_phl_flush_reorder_buf(void *phl, struct rtw_phl_stainfo_t *sta)
727 {
728 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
729 	void *drv_priv = phl_to_drvpriv(phl_info);
730 	_os_list frames;
731 	u8 i = 0;
732 
733 	PHL_INFO("%s: sta=0x%p\n", __FUNCTION__, sta);
734 
735 	INIT_LIST_HEAD(&frames);
736 
737 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
738 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
739 		if (sta->tid_rx[i])
740 			phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
741 	}
742 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
743 
744 	phl_handle_rx_frame_list(phl_info, &frames);
745 #ifdef PHL_RX_BATCH_IND
746 	_phl_indic_new_rxpkt(phl_info);
747 #endif
748 
749 }
750 
phl_manage_sta_reorder_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * pkt,struct phl_tid_ampdu_rx * r,_os_list * frames)751 static bool phl_manage_sta_reorder_buf(struct phl_info_t *phl_info,
752                                        struct rtw_phl_rx_pkt *pkt,
753                                        struct phl_tid_ampdu_rx *r,
754                                        _os_list *frames)
755 {
756 	/* ref ieee80211_sta_manage_reorder_buf() and wil_rx_reorder() */
757 
758 	struct rtw_r_meta_data *meta = &pkt->r.mdata;
759 	u16 mpdu_seq_num = meta->seq;
760 	u16 head_seq_num, buf_size;
761 	int index;
762 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
763 
764 	buf_size = r->buf_size;
765 	head_seq_num = r->head_seq_num;
766 
767 	/*
768 	 * If the current MPDU's SN is smaller than the SSN, it shouldn't
769 	 * be reordered.
770 	 */
771 	if (!r->started) {
772 		if (seq_less(mpdu_seq_num, head_seq_num))
773 			return false;
774 		r->started = true;
775 	}
776 
777 	if (r->sleep) {
778 		PHL_INFO("tid = %d reorder buffer handling after wake up\n",
779 		         r->tid);
780 		PHL_INFO("Update head seq(0x%03x) to the first rx seq(0x%03x) after wake up\n",
781 		         r->head_seq_num, mpdu_seq_num);
782 		r->head_seq_num = mpdu_seq_num;
783 		head_seq_num = r->head_seq_num;
784 		r->sleep = false;
785 	}
786 
787 	/* frame with out of date sequence number */
788 	if (seq_less(mpdu_seq_num, head_seq_num)) {
789 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
790 				meta->seq, r->head_seq_num);
791 		hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
792 		return true;
793 	}
794 
795 	/*
796 	 * If frame the sequence number exceeds our buffering window
797 	 * size release some previous frames to make room for this one.
798 	 */
799 	if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
800 		head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
801 		/* release stored frames up to new head to stack */
802 		phl_release_reorder_frames(phl_info, r, head_seq_num, frames);
803 	}
804 
805 	/* Now the new frame is always in the range of the reordering buffer */
806 
807 	index = reorder_index(r, mpdu_seq_num);
808 
809 	/* check if we already stored this frame */
810 	if (r->reorder_buf[index]) {
811 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
812 				meta->seq, r->head_seq_num);
813 		hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
814 		return true;
815 	}
816 
817 	/*
818 	 * If the current MPDU is in the right order and nothing else
819 	 * is stored we can process it directly, no need to buffer it.
820 	 * If it is first but there's something stored, we may be able
821 	 * to release frames after this one.
822 	 */
823 	if (mpdu_seq_num == r->head_seq_num &&
824 		r->stored_mpdu_num == 0) {
825 		r->head_seq_num = seq_inc(r->head_seq_num);
826 		return false;
827 	}
828 
829 	/* put the frame in the reordering buffer */
830 	r->reorder_buf[index] = pkt;
831 	r->reorder_time[index] = _os_get_cur_time_ms();
832 	r->stored_mpdu_num++;
833 	phl_reorder_release(phl_info, r, frames);
834 
835 	return true;
836 
837 }
838 
phl_rx_reorder(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx,_os_list * frames)839 enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
840                                    struct rtw_phl_rx_pkt *phl_rx,
841                                    _os_list *frames)
842 {
843 	/* ref wil_rx_reorder() and ieee80211_rx_reorder_ampdu() */
844 
845 	void *drv_priv = phl_to_drvpriv(phl_info);
846 	struct rtw_r_meta_data *meta = &phl_rx->r.mdata;
847 	u16 tid = meta->tid;
848 	struct rtw_phl_stainfo_t *sta = NULL;
849 	struct phl_tid_ampdu_rx *r;
850 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
851 
852 	/*
853 	 * Remove FCS if is is appended
854 	 * TODO: handle more than one in pkt_list
855 	 */
856 	if (phl_info->phl_com->append_fcs) {
857 		/*
858 		 * Only last MSDU of A-MSDU includes FCS.
859 		 * TODO: If A-MSDU cut processing is in HAL, should only deduct
860 		 * FCS from length of last one of pkt_list. For such case,
861 		 * phl_rx->r should have pkt_list length.
862 		 */
863 		  if (!(meta->amsdu_cut && !meta->last_msdu)) {
864 			  if (phl_rx->r.pkt_list[0].length <= 4) {
865 				  PHL_ERR("%s, pkt_list[0].length(%d) too short\n",
866 				          __func__, phl_rx->r.pkt_list[0].length);
867 				  goto drop_frame;
868 			  }
869 			  phl_rx->r.pkt_list[0].length -= 4;
870 		  }
871 	}
872 
873 	if (phl_is_mp_mode(phl_info->phl_com))
874 		goto dont_reorder;
875 
876 	if (meta->bc || meta->mc)
877 		goto dont_reorder;
878 
879 	if (!meta->qos)
880 		goto dont_reorder;
881 
882 	if (meta->q_null)
883 		goto dont_reorder;
884 
885 	/* TODO: check ba policy is either ba or normal */
886 
887 	/* if the mpdu is fragmented, don't reorder */
888 	if (meta->more_frag || meta->frag_num) {
889 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
890 		          "Receive QoS Data with more_frag=%u, frag_num=%u\n",
891 		          meta->more_frag, meta->frag_num);
892 		goto dont_reorder;
893 	}
894 
895 	/* Use MAC ID from address CAM if this packet is address CAM matched */
896 	if (meta->addr_cam_vld)
897 		sta = rtw_phl_get_stainfo_by_macid(phl_info, meta->macid);
898 
899 	/* Otherwise, search STA by TA */
900 	if (!sta || !sta->wrole) {
901 		struct rtw_wifi_role_t *wrole;
902 		wrole = phl_get_wrole_by_addr(phl_info, meta->mac_addr);
903 		if (wrole)
904 			sta = rtw_phl_get_stainfo_by_addr(phl_info,
905 			                                  wrole, meta->ta);
906 		if (!wrole || !sta) {
907 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
908 			          "%s(): stainfo or wrole not found, cam=%u, macid=%u\n",
909 			          __FUNCTION__, meta->addr_cam, meta->macid);
910 			goto dont_reorder;
911 		}
912 	}
913 
914 	phl_rx->r.tx_sta = sta;
915 	phl_rx->r.rx_role = sta->wrole;
916 
917 	rtw_hal_set_sta_rx_sts(sta, false, meta);
918 
919 	if (tid >= ARRAY_SIZE(sta->tid_rx)) {
920 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "Fail: tid (%u) index out of range (%u)\n", tid, 8);
921 		goto drop_frame;
922 	}
923 
924 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
925 
926 	r = sta->tid_rx[tid];
927 	if (!r) {
928 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
929 		goto dont_reorder;
930 	}
931 
932 	if (!phl_manage_sta_reorder_buf(phl_info, phl_rx, r, frames)) {
933 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
934 		goto dont_reorder;
935 	}
936 
937 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
938 
939 	return RTW_PHL_STATUS_SUCCESS;
940 
941 drop_frame:
942 	hci_trx_ops->recycle_rx_pkt(phl_info, phl_rx);
943 	return RTW_PHL_STATUS_FAILURE;
944 
945 dont_reorder:
946 	list_add_tail(&phl_rx->list, frames);
947 	return RTW_PHL_STATUS_SUCCESS;
948 }
949 
950 
phl_check_recv_ring_resource(struct phl_info_t * phl_info)951 u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info)
952 {
953 	struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
954 	u16 avail = 0, wptr = 0, rptr = 0;
955 	void *drv_priv = phl_to_drvpriv(phl_info);
956 
957 	wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
958 	rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
959 	avail = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RX_RING_ENTRY_NUM);
960 
961 	if (0 == avail)
962 		return false;
963 	else
964 		return true;
965 }
966 
dump_phl_rx_ring(void * phl)967 void dump_phl_rx_ring(void *phl)
968 {
969 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
970 	void *drv_priv = phl_to_drvpriv(phl_info);
971 	s16	diff = 0;
972 	u16 idx = 0, endidx = 0;
973 	u16 phl_idx = 0, core_idx = 0;
974 
975 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "===Dump PHL RX Ring===\n");
976 	phl_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.phl_idx);
977 	core_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.core_idx);
978 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
979 			"core_idx = %d\n"
980 			"phl_idx = %d\n",
981 			core_idx,
982 			phl_idx);
983 
984 	diff= phl_idx-core_idx;
985 	if(diff < 0)
986 		diff= 4096+diff;
987 
988 	endidx = diff > 5 ? (core_idx+6): phl_idx;
989 	for (idx = core_idx+1; idx < endidx; idx++) {
990 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "entry[%d] = %p\n", idx,
991 				phl_info->phl_rx_ring.entry[idx%4096]);
992 	}
993 }
994 
995 
phl_event_indicator(void * context)996 void phl_event_indicator(void *context)
997 {
998 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
999 	struct rtw_phl_handler *phl_handler
1000 		= (struct rtw_phl_handler *)phl_container_of(context,
1001 							struct rtw_phl_handler,
1002 							os_handler);
1003 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
1004 	struct rtw_phl_evt_ops *ops = NULL;
1005 	struct rtw_evt_info_t *evt_info = NULL;
1006 	void *drv_priv = NULL;
1007 	enum rtw_phl_evt evt_bitmap = 0;
1008 	FUNCIN_WSTS(sts);
1009 
1010 	if (NULL != phl_info) {
1011 		ops = &phl_info->phl_com->evt_ops;
1012 		evt_info = &phl_info->phl_com->evt_info;
1013 		drv_priv = phl_to_drvpriv(phl_info);
1014 
1015 		_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1016 		evt_bitmap = evt_info->evt_bitmap;
1017 		evt_info->evt_bitmap = 0;
1018 		_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1019 
1020 		if (RTW_PHL_EVT_RX & evt_bitmap) {
1021 			if (NULL != ops->rx_process) {
1022 				sts = ops->rx_process(drv_priv);
1023 			}
1024 			dump_phl_rx_ring(phl_info);
1025 		}
1026 	}
1027 	FUNCOUT_WSTS(sts);
1028 
1029 }
1030 
_phl_rx_statistics_reset(struct phl_info_t * phl_info)1031 void _phl_rx_statistics_reset(struct phl_info_t *phl_info)
1032 {
1033 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1034 	struct rtw_phl_stainfo_t *sta = NULL;
1035 	struct rtw_wifi_role_t *role = NULL;
1036 	void *drv = phl_to_drvpriv(phl_info);
1037 	struct phl_queue *sta_queue;
1038 	u8 i;
1039 
1040 	for (i = 0; i< MAX_WIFI_ROLE_NUMBER; i++) {
1041 		role = &phl_com->wifi_roles[i];
1042 		if (role->active && (role->mstate == MLME_LINKED)) {
1043 			sta_queue = &role->assoc_sta_queue;
1044 			_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
1045 			phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1046 						&sta_queue->queue, list) {
1047 				if (sta)
1048 					rtw_hal_set_sta_rx_sts(sta, true, NULL);
1049 			}
1050 			_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
1051 		}
1052 	}
1053 }
1054 
1055 void
phl_rx_watchdog(struct phl_info_t * phl_info)1056 phl_rx_watchdog(struct phl_info_t *phl_info)
1057 {
1058 	struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1059 
1060 	phl_rx_traffic_upd(phl_stats);
1061 	phl_dump_rx_stats(phl_stats);
1062 	_phl_rx_statistics_reset(phl_info);
1063 }
1064 
rtw_phl_query_new_rx_num(void * phl)1065 u16 rtw_phl_query_new_rx_num(void *phl)
1066 {
1067 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1068 	struct rtw_phl_rx_ring *ring = NULL;
1069 	u16 new_rx = 0, wptr = 0, rptr = 0;
1070 
1071 	if (NULL != phl_info) {
1072 		ring = &phl_info->phl_rx_ring;
1073 		wptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1074 						&ring->phl_idx);
1075 		rptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1076 						&ring->core_idx);
1077 		new_rx = phl_calc_avail_rptr(rptr, wptr,
1078 						MAX_PHL_RX_RING_ENTRY_NUM);
1079 	}
1080 
1081 	return new_rx;
1082 }
1083 
rtw_phl_query_rx_pkt(void * phl)1084 struct rtw_recv_pkt *rtw_phl_query_rx_pkt(void *phl)
1085 {
1086 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1087 	struct rtw_phl_rx_ring *ring = NULL;
1088 	struct rtw_recv_pkt *recvpkt = NULL;
1089 	void *drv_priv = NULL;
1090 	u16 ring_res = 0, wptr = 0, rptr = 0;
1091 
1092 	if (NULL != phl_info) {
1093 		ring = &phl_info->phl_rx_ring;
1094 		drv_priv = phl_to_drvpriv(phl_info);
1095 
1096 		wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1097 		rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
1098 
1099 		ring_res = phl_calc_avail_rptr(rptr, wptr,
1100 							MAX_PHL_RX_RING_ENTRY_NUM);
1101 
1102 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
1103 			"[4] %s::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
1104 			__FUNCTION__,
1105 			_os_atomic_read(drv_priv, &ring->phl_idx),
1106 			_os_atomic_read(drv_priv, &ring->core_idx),
1107 			ring_res);
1108 
1109 		if (ring_res > 0) {
1110 			rptr = rptr + 1;
1111 
1112 			if (rptr >= MAX_PHL_RX_RING_ENTRY_NUM) {
1113 				rptr=0;
1114 				recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1115 				ring->entry[rptr]=NULL;
1116 				_os_atomic_set(drv_priv, &ring->core_idx, 0);
1117 			} else {
1118 				recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1119 				ring->entry[rptr]=NULL;
1120 				_os_atomic_inc(drv_priv, &ring->core_idx);
1121 			}
1122 			if (NULL == recvpkt)
1123 				PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "recvpkt is NULL!\n");
1124 			else
1125 				phl_rx_statistics(phl_info, recvpkt);
1126 		} else {
1127 			PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no available rx packet to query!\n");
1128 		}
1129 	}
1130 
1131 	return recvpkt;
1132 }
1133 
rtw_phl_return_rxbuf(void * phl,u8 * recvpkt)1134 enum rtw_phl_status rtw_phl_return_rxbuf(void *phl, u8* recvpkt)
1135 {
1136 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1137 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1138 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1139 	struct rtw_recv_pkt *r = (struct rtw_recv_pkt *)recvpkt;
1140 
1141 	do {
1142 		if (NULL == recvpkt)
1143 			break;
1144 
1145 		phl_rx = phl_container_of(r, struct rtw_phl_rx_pkt, r);
1146 		phl_recycle_rx_buf(phl_info, phl_rx);
1147 		pstatus = RTW_PHL_STATUS_SUCCESS;
1148 	} while (false);
1149 
1150 	return pstatus;
1151 }
1152 
1153 
rtw_phl_start_rx_process(void * phl)1154 enum rtw_phl_status rtw_phl_start_rx_process(void *phl)
1155 {
1156 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1157 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1158 
1159 	FUNCIN_WSTS(pstatus);
1160 
1161 	pstatus = phl_schedule_handler(phl_info->phl_com,
1162 	                               &phl_info->phl_rx_handler);
1163 
1164 	FUNCOUT_WSTS(pstatus);
1165 
1166 	return pstatus;
1167 }
1168 
rtw_phl_rx_bar(void * phl,struct rtw_phl_stainfo_t * sta,u8 tid,u16 seq)1169 void rtw_phl_rx_bar(void *phl, struct rtw_phl_stainfo_t *sta, u8 tid, u16 seq)
1170 {
1171 	/* ref ieee80211_rx_h_ctrl() and wil_rx_bar() */
1172 
1173 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1174 	void *drv_priv = phl_to_drvpriv(phl_info);
1175 	struct phl_tid_ampdu_rx *r;
1176 	_os_list frames;
1177 
1178 	INIT_LIST_HEAD(&frames);
1179 
1180 	if (tid >= RTW_MAX_TID_NUM)
1181 		goto out;
1182 
1183 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1184 
1185 	r = sta->tid_rx[tid];
1186 	if (!r) {
1187 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR for non-existing TID %d\n", tid);
1188 		goto out;
1189 	}
1190 
1191 	if (seq_less(seq, r->head_seq_num)) {
1192 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR Seq 0x%03x preceding head 0x%03x\n",
1193 					seq, r->head_seq_num);
1194 		goto out;
1195 	}
1196 
1197 	PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "BAR: TID %d Seq 0x%03x head 0x%03x\n",
1198 				tid, seq, r->head_seq_num);
1199 
1200 	phl_release_reorder_frames(phl_info, r, seq, &frames);
1201 	phl_handle_rx_frame_list(phl_info, &frames);
1202 
1203 out:
1204 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1205 }
1206 
rtw_phl_get_rx_status(void * phl)1207 enum rtw_rx_status rtw_phl_get_rx_status(void *phl)
1208 {
1209 #ifdef CONFIG_USB_HCI
1210 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1211 	enum rtw_hci_type hci_type = phl_info->phl_com->hci_type;
1212 
1213 	if (hci_type & RTW_HCI_USB)
1214 		return rtw_hal_get_usb_status(phl_info->hal);
1215 #endif
1216 
1217 	return RTW_STATUS_RX_OK;
1218 }
1219 
1220 enum rtw_phl_status
rtw_phl_enter_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1221 rtw_phl_enter_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1222 {
1223 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1224 	enum rtw_hal_status status;
1225 
1226 	status = rtw_hal_enter_mon_mode(phl_info->hal, wrole->hw_band);
1227 	if (status != RTW_HAL_STATUS_SUCCESS) {
1228 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1229 		          "%s(): rtw_hal_enter_mon_mode() failed, status=%d",
1230 		          __FUNCTION__, status);
1231 		return RTW_PHL_STATUS_FAILURE;
1232 	}
1233 
1234 	return RTW_PHL_STATUS_SUCCESS;
1235 }
1236 
1237 enum rtw_phl_status
rtw_phl_leave_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1238 rtw_phl_leave_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1239 {
1240 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1241 	enum rtw_hal_status status;
1242 
1243 	status = rtw_hal_leave_mon_mode(phl_info->hal, wrole->hw_band);
1244 	if (status != RTW_HAL_STATUS_SUCCESS) {
1245 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1246 		          "%s(): rtw_hal_leave_mon_mode() failed, status=%d",
1247 		          __FUNCTION__, status);
1248 		return RTW_PHL_STATUS_FAILURE;
1249 	}
1250 
1251 	return RTW_PHL_STATUS_SUCCESS;
1252 }
1253 
1254 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
1255 void
_phl_rx_proc_frame_list(struct phl_info_t * phl_info,struct phl_queue * pq)1256 _phl_rx_proc_frame_list(struct phl_info_t *phl_info, struct phl_queue *pq)
1257 {
1258 	void *d = phl_to_drvpriv(phl_info);
1259 	_os_list *pkt_list = NULL;
1260 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1261 
1262 	if (NULL == pq)
1263 		return;
1264 	if (0 == pq->cnt)
1265 		return;
1266 
1267 	PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1268 		  "_phl_rx_proc_frame_list : queue ele cnt = %d\n",
1269 		   pq->cnt);
1270 
1271 	while (true == pq_pop(d, pq, &pkt_list, _first, _bh)) {
1272 		phl_rx = (struct rtw_phl_rx_pkt *)pkt_list;
1273 		phl_info->hci_trx_ops->rx_handle_normal(phl_info, phl_rx);
1274 	}
1275 }
1276 
1277 enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * ppdu_sts)1278 phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts)
1279 {
1280 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1281 	struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1282 	struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1283 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1284 	void *d = phl_to_drvpriv(phl_info);
1285 	struct rtw_phl_rssi_stat *rssi_stat = &phl_info->phl_com->rssi_stat;
1286 	_os_list *frame = NULL;
1287 	bool upt_psts = true;
1288 	u8 i = 0;
1289 	enum phl_band_idx band = HW_BAND_0;
1290 
1291 	if (NULL == ppdu_sts)
1292 		return pstatus;
1293 
1294 	if (false == psts_info->en_psts_per_pkt) {
1295 		return pstatus;
1296 	}
1297 
1298 	if (ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1299 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1300 			  "ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!\n");
1301 		return pstatus;
1302 	}
1303 
1304 	band = (ppdu_sts->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1305 
1306 	if (false == psts_info->en_ppdu_sts[band])
1307 		return pstatus;
1308 
1309 	if (ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt[band]) {
1310 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1311 			  "ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt!\n");
1312 		upt_psts = false;
1313 	}
1314 
1315 	sts_entry = &psts_info->sts_ent[band][psts_info->cur_ppdu_cnt[band]];
1316 	/* check list empty */
1317 	if (0 == sts_entry->frames.cnt) {
1318 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1319 			  "cur_ppdu_cnt %d --> sts_entry->frames.cnt = 0\n",
1320 			  psts_info->cur_ppdu_cnt[band]);
1321 		pstatus = RTW_PHL_STATUS_SUCCESS;
1322 		return pstatus;
1323 	}
1324 
1325 	/* start update phy info to per pkt*/
1326 	if (false == pq_get_front(d, &sts_entry->frames, &frame, _bh)) {
1327 		PHL_ERR(" %s list empty\n", __FUNCTION__);
1328 		return pstatus;
1329 	}
1330 	/**
1331 	 * TODO : How to filter the case :
1332 	 *	pkt(ppdu_cnt = 0) --> missing :psts(ppdu_cnt = 0) --> (all of the pkt, psts dropped/missing)
1333 	 *	--> ppdu_sts(ppdu_cnt = 0)(not for the current buffered pkt.)
1334 	 * workaround : check rate/bw/ppdu_type/... etc
1335 	 **/
1336 	phl_rx = (struct rtw_phl_rx_pkt *)frame;
1337 	if (upt_psts &&
1338 	   ((phl_rx->r.mdata.rx_rate != ppdu_sts->r.mdata.rx_rate) ||
1339 	    (phl_rx->r.mdata.bw != ppdu_sts->r.mdata.bw) ||
1340 	    (phl_rx->r.mdata.rx_gi_ltf != ppdu_sts->r.mdata.rx_gi_ltf) ||
1341 	    (phl_rx->r.mdata.ppdu_type != ppdu_sts->r.mdata.ppdu_type))) {
1342 		    /**
1343 		     * ppdu status is not for the buffered pkt,
1344 		     * skip update phy status to phl_rx
1345 		     **/
1346 		    upt_psts = false;
1347 	}
1348 	/* Get Frame Type */
1349 	ppdu_sts->r.phy_info.frame_type =
1350 		PHL_GET_80211_HDR_TYPE(phl_rx->r.pkt_list[0].vir_addr);
1351 
1352 	if ((false == ppdu_sts->r.phy_info.is_valid) &&
1353 	    (true == psts_info->en_fake_psts)) {
1354 		if (RTW_FRAME_TYPE_MGNT == phl_rx->r.mdata.frame_type) {
1355 			ppdu_sts->r.phy_info.rssi =
1356 				rssi_stat->ma_rssi[RTW_RSSI_MGNT_ACAM_A1M];
1357 		} else if (RTW_FRAME_TYPE_DATA == phl_rx->r.mdata.frame_type) {
1358 			ppdu_sts->r.phy_info.rssi =
1359 				rssi_stat->ma_rssi[RTW_RSSI_DATA_ACAM_A1M];
1360 		} else if (RTW_FRAME_TYPE_CTRL == phl_rx->r.mdata.frame_type) {
1361 			ppdu_sts->r.phy_info.rssi =
1362 				rssi_stat->ma_rssi[RTW_RSSI_CTRL_ACAM_A1M];
1363 		} else {
1364 			ppdu_sts->r.phy_info.rssi =
1365 				rssi_stat->ma_rssi[RTW_RSSI_UNKNOWN];
1366 		}
1367 		for(i = 0; i< RTW_PHL_MAX_RF_PATH ; i++) {
1368 			ppdu_sts->r.phy_info.rssi_path[i] =
1369 					ppdu_sts->r.phy_info.rssi;
1370 		}
1371 		ppdu_sts->r.phy_info.ch_idx = rtw_hal_get_cur_ch(phl_info->hal,
1372 						phl_rx->r.mdata.bb_sel);
1373 		ppdu_sts->r.phy_info.is_valid = true;
1374 	}
1375 
1376 	do {
1377 		if (false == upt_psts)
1378 			break;
1379 		phl_rx = (struct rtw_phl_rx_pkt *)frame;
1380 		_os_mem_cpy(d, &(phl_rx->r.phy_info), &(ppdu_sts->r.phy_info),
1381 			    sizeof(struct rtw_phl_ppdu_phy_info));
1382 	} while ((true == psts_info->psts_ampdu) &&
1383 		 (pq_get_next(d, &sts_entry->frames, frame, &frame, _bh)));
1384 
1385 	/*2. indicate the frame list*/
1386 	_phl_rx_proc_frame_list(phl_info, &sts_entry->frames);
1387 	/*3. reset the queue */
1388 	pq_reset(d, &(sts_entry->frames), _bh);
1389 
1390 	return pstatus;
1391 }
1392 
1393 bool
phl_rx_proc_wait_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1394 phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
1395 			 struct rtw_phl_rx_pkt *phl_rx)
1396 {
1397 	struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1398 	struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1399 	void *d = phl_to_drvpriv(phl_info);
1400 	u8 i = 0;
1401 	bool ret = false;
1402 	enum phl_band_idx band = HW_BAND_0;
1403 
1404 	if (false == psts_info->en_psts_per_pkt) {
1405 		return ret;
1406 	}
1407 
1408 	if (phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1409 		PHL_ASSERT("phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!");
1410 		return ret;
1411 	}
1412 
1413 	band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1414 
1415 	if (false == psts_info->en_ppdu_sts[band])
1416 		return ret;
1417 
1418 	if (psts_info->cur_ppdu_cnt[band] != phl_rx->r.mdata.ppdu_cnt) {
1419 		/* start of PPDU */
1420 		/* 1. Check all of the buffer list is empty */
1421 		/* only check the target rx pkt band */
1422 		for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
1423 			sts_entry = &psts_info->sts_ent[band][i];
1424 			if (0 != sts_entry->frames.cnt) {
1425 				/* need indicate first */
1426 				PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1427 					  "band %d ; ppdu_cnt %d queue is not empty \n",
1428 					  band, i);
1429 				_phl_rx_proc_frame_list(phl_info,
1430 						&sts_entry->frames);
1431 				pq_reset(d, &(sts_entry->frames), _bh);
1432 			}
1433 		}
1434 
1435 		/* 2. check ppdu status filter condition */
1436 		/* Filter function is supportted only if rxd = long_rxd */
1437 		if ((1 == phl_rx->r.mdata.long_rxd) &&
1438 		    (0 != (psts_info->ppdu_sts_filter &
1439 		           BIT(phl_rx->r.mdata.frame_type)))) {
1440 			/* 3. add new rx pkt to the tail of the queue */
1441 			sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1442 			pq_reset(d, &(sts_entry->frames), _bh);
1443 			pq_push(d, &(sts_entry->frames), &phl_rx->list,
1444 				_tail, _bh);
1445 			ret = true;
1446 		}
1447 		psts_info->cur_ppdu_cnt[band] = phl_rx->r.mdata.ppdu_cnt;
1448 	} else {
1449 		/* 1. check ppdu status filter condition */
1450 		/* Filter function is supportted only if rxd = long_rxd */
1451 		if ((1 == phl_rx->r.mdata.long_rxd) &&
1452 		    (0 != (psts_info->ppdu_sts_filter &
1453 		           BIT(phl_rx->r.mdata.frame_type)))) {
1454 			/* 2. add to frame list */
1455 			sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1456 			if (0 == sts_entry->frames.cnt) {
1457 				PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1458 					  "MPDU is not the start of PPDU, but the queue is empty!!!\n");
1459 			}
1460 			pq_push(d, &(sts_entry->frames), &phl_rx->list,
1461 				_tail, _bh);
1462 			ret = true;
1463 		}
1464 	}
1465 
1466 	return ret;
1467 }
1468 #endif
1469 
1470 #ifdef CONFIG_PHY_INFO_NTFY
_phl_rx_post_proc_ppdu_sts(void * priv,struct phl_msg * msg)1471 void _phl_rx_post_proc_ppdu_sts(void* priv, struct phl_msg* msg)
1472 {
1473 	struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1474 	if (msg->inbuf && msg->inlen){
1475 		_os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
1476 	}
1477 }
1478 
1479 bool
_phl_rx_proc_aggr_psts_ntfy(struct phl_info_t * phl_info,struct rtw_phl_ppdu_sts_ent * ppdu_sts_ent)1480 _phl_rx_proc_aggr_psts_ntfy(struct phl_info_t *phl_info,
1481 			    struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent)
1482 {
1483 	struct rtw_phl_ppdu_sts_info *ppdu_info =
1484 			&phl_info->phl_com->ppdu_sts_info;
1485 	struct  rtw_phl_ppdu_sts_ntfy *psts_ntfy = NULL;
1486 	u8 i = 0;
1487 	bool ret = false;
1488 
1489 	if (ppdu_info->msg_aggr_cnt == 0) {
1490 		/* reset entry valid status  */
1491 		for (i = 0; i < MAX_PSTS_MSG_AGGR_NUM; i++) {
1492 			ppdu_info->msg_aggr_buf[i].vld = false;
1493 		}
1494 	}
1495 	/* copy to the buf */
1496 	psts_ntfy = &ppdu_info->msg_aggr_buf[ppdu_info->msg_aggr_cnt];
1497 	psts_ntfy->frame_type = ppdu_sts_ent->frame_type;
1498 	_os_mem_cpy(phl_info->phl_com->drv_priv,
1499 		    &psts_ntfy->phy_info,
1500 		    &ppdu_sts_ent->phy_info,
1501 		    sizeof(struct rtw_phl_ppdu_phy_info));
1502 	_os_mem_cpy(phl_info->phl_com->drv_priv,
1503 		    psts_ntfy->src_mac_addr,
1504 		    ppdu_sts_ent->src_mac_addr,
1505 		    MAC_ADDRESS_LENGTH);
1506 	psts_ntfy->vld = true;
1507 
1508 	/* update counter */
1509 	ppdu_info->msg_aggr_cnt++;
1510 	if (ppdu_info->msg_aggr_cnt >= MAX_PSTS_MSG_AGGR_NUM) {
1511 		ppdu_info->msg_aggr_cnt = 0;
1512 		ret = true;
1513 	}
1514 
1515 	return ret;
1516 }
1517 #endif
1518 
1519 void
phl_rx_proc_ppdu_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1520 phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *phl_rx)
1521 {
1522 	u8 i = 0;
1523 	struct rtw_phl_ppdu_sts_info *ppdu_info = NULL;
1524 	struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent = NULL;
1525 	struct rtw_phl_stainfo_t *psta = NULL;
1526 #ifdef CONFIG_PHY_INFO_NTFY
1527 	struct  rtw_phl_ppdu_sts_ntfy *psts_ntfy;
1528 	void *d = phl_to_drvpriv(phl_info);
1529 #endif
1530 	enum phl_band_idx band = HW_BAND_0;
1531 	struct rtw_rssi_info *rssi_sts;
1532 
1533 	if ((NULL == phl_info) || (NULL == phl_rx))
1534 		return;
1535 
1536 	band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1537 	ppdu_info = &phl_info->phl_com->ppdu_sts_info;
1538 	ppdu_sts_ent = &ppdu_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1539 
1540 	if (false == ppdu_sts_ent->valid)
1541 		return;
1542 
1543 	if (true == ppdu_sts_ent->phl_done)
1544 		return;
1545 
1546 	ppdu_sts_ent->phl_done = true;
1547 
1548 	/* update phl self varibles */
1549 	for(i = 0 ; i < ppdu_sts_ent->usr_num; i++) {
1550 		if (ppdu_sts_ent->sta[i].vld) {
1551 			psta = rtw_phl_get_stainfo_by_macid(phl_info,
1552 				 ppdu_sts_ent->sta[i].macid);
1553 			if (psta == NULL)
1554 				continue;
1555 			rssi_sts = &psta->hal_sta->rssi_stat;
1556 			STA_UPDATE_MA_RSSI_FAST(rssi_sts->ma_rssi, ppdu_sts_ent->phy_info.rssi);
1557 			/* update (re)associate req/resp pkt rssi */
1558 			if (RTW_IS_ASOC_PKT(ppdu_sts_ent->frame_type)) {
1559 				rssi_sts->assoc_rssi =
1560 						ppdu_sts_ent->phy_info.rssi;
1561 			}
1562 
1563 			if (RTW_IS_BEACON_OR_PROBE_RESP_PKT(
1564 						ppdu_sts_ent->frame_type)) {
1565 				if (0 == rssi_sts->ma_rssi_mgnt) {
1566 					rssi_sts->ma_rssi_mgnt =
1567 						ppdu_sts_ent->phy_info.rssi;
1568 				} else {
1569 					STA_UPDATE_MA_RSSI_FAST(
1570 						rssi_sts->ma_rssi_mgnt,
1571 						ppdu_sts_ent->phy_info.rssi);
1572 				}
1573 			}
1574 		}
1575 		else {
1576 			if (RTW_IS_ASOC_REQ_PKT(ppdu_sts_ent->frame_type) &&
1577 				(ppdu_sts_ent->usr_num == 1)) {
1578 				psta = rtw_phl_get_stainfo_by_addr_ex(phl_info,
1579 						ppdu_sts_ent->src_mac_addr);
1580 				if (psta) {
1581 					psta->hal_sta->rssi_stat.assoc_rssi =
1582 						ppdu_sts_ent->phy_info.rssi;
1583 
1584 					#ifdef DBG_AP_CLIENT_ASSOC_RSSI
1585 					PHL_INFO("%s [Rx-ASOC_REQ] - macid:%d, MAC-Addr:%02x-%02x-%02x-%02x-%02x-%02x, assoc_rssi:%d\n",
1586 						__func__,
1587 						psta->macid,
1588 						ppdu_sts_ent->src_mac_addr[0],
1589 						ppdu_sts_ent->src_mac_addr[1],
1590 						ppdu_sts_ent->src_mac_addr[2],
1591 						ppdu_sts_ent->src_mac_addr[3],
1592 						ppdu_sts_ent->src_mac_addr[4],
1593 						ppdu_sts_ent->src_mac_addr[5],
1594 						psta->hal_sta->rssi_stat.assoc_rssi);
1595 					#endif
1596 				}
1597 			}
1598 		}
1599 	}
1600 
1601 #ifdef CONFIG_PHY_INFO_NTFY
1602 	/*2. prepare and send psts notify to core */
1603 	if((RTW_FRAME_TYPE_BEACON == ppdu_sts_ent->frame_type) ||
1604 	   (RTW_FRAME_TYPE_PROBE_RESP == ppdu_sts_ent->frame_type)) {
1605 
1606 		if (false == _phl_rx_proc_aggr_psts_ntfy(phl_info,
1607 							 ppdu_sts_ent)) {
1608 			return;
1609 		}
1610 
1611 		/* send aggr psts ntfy*/
1612 		psts_ntfy = (struct rtw_phl_ppdu_sts_ntfy *)_os_kmem_alloc(d,
1613 				MAX_PSTS_MSG_AGGR_NUM * sizeof(struct rtw_phl_ppdu_sts_ntfy));
1614 		if (psts_ntfy == NULL) {
1615 			PHL_ERR("%s: alloc ppdu sts for ntfy fail.\n", __func__);
1616 			return;
1617 		}
1618 
1619 		_os_mem_cpy(phl_info->phl_com->drv_priv,
1620 			    psts_ntfy,
1621 			    &ppdu_info->msg_aggr_buf,
1622 			    (MAX_PSTS_MSG_AGGR_NUM *
1623 			     sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1624 
1625 		msg.inbuf = (u8 *)psts_ntfy;
1626 		msg.inlen = (MAX_PSTS_MSG_AGGR_NUM *
1627 			     sizeof(struct rtw_phl_ppdu_sts_ntfy));
1628 		SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PSTS);
1629 		SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_RX_PSTS);
1630 		attr.completion.completion = _phl_rx_post_proc_ppdu_sts;
1631 		attr.completion.priv = phl_info;
1632 		if (phl_msg_hub_send(phl_info, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
1633 			PHL_ERR("%s: send msg_hub failed\n", __func__);
1634 			_os_kmem_free(d, psts_ntfy,
1635 				      (MAX_PSTS_MSG_AGGR_NUM *
1636 				       sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1637 		}
1638 	}
1639 #endif
1640 }
1641 
_dump_rx_reorder_info(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta)1642 static void _dump_rx_reorder_info(struct phl_info_t *phl_info,
1643 				  struct rtw_phl_stainfo_t *sta)
1644 {
1645 	void *drv_priv = phl_to_drvpriv(phl_info);
1646 	_os_spinlockfg sp_flags;
1647 	u8 i;
1648 
1649 	PHL_INFO("dump rx reorder buffer info:\n");
1650 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
1651 
1652 		_os_spinlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1653 		if (sta->tid_rx[i]) {
1654 			PHL_INFO("== tid = %d ==\n", sta->tid_rx[i]->tid);
1655 			PHL_INFO("head_seq_num = %d\n",
1656 				 sta->tid_rx[i]->head_seq_num);
1657 			PHL_INFO("stored_mpdu_num = %d\n",
1658 				 sta->tid_rx[i]->stored_mpdu_num);
1659 			PHL_INFO("ssn = %d\n", sta->tid_rx[i]->ssn);
1660 			PHL_INFO("buf_size = %d\n", sta->tid_rx[i]->buf_size);
1661 			PHL_INFO("started = %d\n", sta->tid_rx[i]->started);
1662 			PHL_INFO("removed = %d\n", sta->tid_rx[i]->removed);
1663 		}
1664 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1665 	}
1666 }
1667 
phl_dump_all_sta_rx_info(struct phl_info_t * phl_info)1668 void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info)
1669 {
1670 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1671 	struct rtw_phl_stainfo_t *sta = NULL;
1672 	struct rtw_wifi_role_t *role = NULL;
1673 	void *drv = phl_to_drvpriv(phl_info);
1674 	struct phl_queue *sta_queue;
1675 	_os_spinlockfg sp_flags;
1676 	u8 i;
1677 
1678 	PHL_INFO("dump all sta rx info:\n");
1679 	for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
1680 		role = &phl_com->wifi_roles[i];
1681 		if (role->active) {
1682 			PHL_INFO("wrole idx = %d\n", i);
1683 			PHL_INFO("wrole type = %d\n", role->type);
1684 			PHL_INFO("wrole mstate = %d\n", role->mstate);
1685 
1686 			sta_queue = &role->assoc_sta_queue;
1687 			_os_spinlock(drv, &sta_queue->lock, _irq, &sp_flags);
1688 			phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1689 						&sta_queue->queue, list) {
1690 				PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x \n",
1691 					 __func__, sta->macid,
1692 					 sta->mac_addr[0],
1693 					 sta->mac_addr[1],
1694 					 sta->mac_addr[2],
1695 					 sta->mac_addr[3],
1696 					 sta->mac_addr[4],
1697 					 sta->mac_addr[5]);
1698 				_dump_rx_reorder_info(phl_info, sta);
1699 			}
1700 			_os_spinunlock(drv, &sta_queue->lock, _irq, &sp_flags);
1701 		}
1702 	}
1703 }
1704 
phl_rx_dbg_dump(struct phl_info_t * phl_info,u8 band_idx)1705 void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx)
1706 {
1707 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1708 
1709 	phl_status = phl_cmd_enqueue(phl_info,
1710 	                   band_idx,
1711 	                   MSG_EVT_DBG_RX_DUMP,
1712 	                   NULL,
1713 	                   0,
1714 	                   NULL,
1715 	                   PHL_CMD_NO_WAIT,
1716 	                   0);
1717 	if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1718 		PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: cmd enqueue fail!\n",
1719 			  __func__);
1720 	}
1721 
1722 }