xref: /OK3568_Linux_fs/external/rkwifibt/drivers/rtl8852be/phl/phl_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /******************************************************************************
2  *
3  * Copyright(c) 2019 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _PHL_RX_C_
16 #include "phl_headers.h"
17 
18 
rtw_phl_query_phl_rx(void * phl)19 struct rtw_phl_rx_pkt *rtw_phl_query_phl_rx(void *phl)
20 {
21 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
22 	void *drv_priv = phl_to_drvpriv(phl_info);
23 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
24 	struct rtw_phl_rx_pkt *phl_rx = NULL;
25 
26 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
27 
28 	_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
29 
30 	if (false == list_empty(&rx_pkt_pool->idle)) {
31 		phl_rx = list_first_entry(&rx_pkt_pool->idle,
32 					struct rtw_phl_rx_pkt, list);
33 		list_del(&phl_rx->list);
34 		rx_pkt_pool->idle_cnt--;
35 	}
36 
37 	_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
38 
39 	return phl_rx;
40 }
41 
rtw_phl_is_phl_rx_idle(struct phl_info_t * phl_info)42 u8 rtw_phl_is_phl_rx_idle(struct phl_info_t *phl_info)
43 {
44 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
45 	u8 res = false;
46 
47 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
48 
49 	_os_spinlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
50 
51 	if (MAX_PHL_RING_RX_PKT_NUM == rx_pkt_pool->idle_cnt)
52 		res = true;
53 	else
54 		res = false;
55 
56 	_os_spinunlock(phl_to_drvpriv(phl_info), &rx_pkt_pool->idle_lock, _bh, NULL);
57 
58 	return res;
59 }
60 
phl_dump_rx_stats(struct rtw_stats * stats)61 void phl_dump_rx_stats(struct rtw_stats *stats)
62 {
63 	PHL_TRACE(COMP_PHL_XMIT, _PHL_DEBUG_,
64 		  "Dump Rx statistics\n"
65 		  "rx_byte_uni = %lld\n"
66 		  "rx_byte_total = %lld\n"
67 		  "rx_tp_kbits = %d\n"
68 		  "last_rx_time_ms = %d\n",
69 		  stats->rx_byte_uni,
70 		  stats->rx_byte_total,
71 		  stats->rx_tp_kbits,
72 		  stats->last_rx_time_ms);
73 }
74 
phl_reset_rx_stats(struct rtw_stats * stats)75 void phl_reset_rx_stats(struct rtw_stats *stats)
76 {
77 	stats->rx_byte_uni = 0;
78 	stats->rx_byte_total = 0;
79 	stats->rx_tp_kbits = 0;
80 	stats->last_rx_time_ms = 0;
81 	stats->rxtp.last_calc_time_ms = 0;
82 	stats->rxtp.last_calc_time_ms = 0;
83 	stats->rx_traffic.lvl = RTW_TFC_IDLE;
84 	stats->rx_traffic.sts = 0;
85 	stats->rx_tf_cnt = 0;
86 	stats->pre_rx_tf_cnt = 0;
87 }
88 
89 void
phl_rx_traffic_upd(struct rtw_stats * sts)90 phl_rx_traffic_upd(struct rtw_stats *sts)
91 {
92 	u32 tp_k = 0, tp_m = 0;
93 	enum rtw_tfc_lvl rx_tfc_lvl = RTW_TFC_IDLE;
94 	tp_k = sts->rx_tp_kbits;
95 	tp_m = sts->rx_tp_kbits >> 10;
96 
97 	if (tp_m >= RX_HIGH_TP_THRES_MBPS)
98 		rx_tfc_lvl = RTW_TFC_HIGH;
99 	else if (tp_m >= RX_MID_TP_THRES_MBPS)
100 		rx_tfc_lvl = RTW_TFC_MID;
101 	else if (tp_m >= RX_LOW_TP_THRES_MBPS)
102 		rx_tfc_lvl = RTW_TFC_LOW;
103 	else if (tp_k >= RX_ULTRA_LOW_TP_THRES_KBPS)
104 		rx_tfc_lvl = RTW_TFC_ULTRA_LOW;
105 	else
106 		rx_tfc_lvl = RTW_TFC_IDLE;
107 
108 	if (sts->rx_traffic.lvl > rx_tfc_lvl) {
109 		sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_DECREASE);
110 		sts->rx_traffic.lvl = rx_tfc_lvl;
111 	} else if (sts->rx_traffic.lvl < rx_tfc_lvl) {
112 		sts->rx_traffic.sts = (TRAFFIC_CHANGED | TRAFFIC_INCREASE);
113 		sts->rx_traffic.lvl = rx_tfc_lvl;
114 	} else if (sts->rx_traffic.sts &
115 		(TRAFFIC_CHANGED | TRAFFIC_INCREASE | TRAFFIC_DECREASE)) {
116 		sts->rx_traffic.sts &= ~(TRAFFIC_CHANGED | TRAFFIC_INCREASE |
117 					 TRAFFIC_DECREASE);
118 	}
119 }
120 
phl_update_rx_stats(struct rtw_stats * stats,struct rtw_recv_pkt * rx_pkt)121 void phl_update_rx_stats(struct rtw_stats *stats, struct rtw_recv_pkt *rx_pkt)
122 {
123 	u32 diff_t = 0, cur_time = _os_get_cur_time_ms();
124 	u64 diff_bits = 0;
125 
126 	stats->last_rx_time_ms = cur_time;
127 	stats->rx_byte_total += rx_pkt->mdata.pktlen;
128 	if (rx_pkt->mdata.bc == 0 && rx_pkt->mdata.mc == 0)
129 		stats->rx_byte_uni += rx_pkt->mdata.pktlen;
130 
131 	if (0 == stats->rxtp.last_calc_time_ms ||
132 		0 == stats->rxtp.last_calc_bits) {
133 		stats->rxtp.last_calc_time_ms = stats->last_rx_time_ms;
134 		stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
135 	} else {
136 		if (cur_time >= stats->rxtp.last_calc_time_ms) {
137 			diff_t = cur_time - stats->rxtp.last_calc_time_ms;
138 		} else {
139 			diff_t = RTW_U32_MAX - stats->rxtp.last_calc_time_ms +
140 				cur_time + 1;
141 		}
142 		if (diff_t > RXTP_CALC_DIFF_MS && stats->rx_byte_uni != 0) {
143 			diff_bits = (stats->rx_byte_uni * 8) -
144 				stats->rxtp.last_calc_bits;
145 			stats->rx_tp_kbits = (u32)_os_division64(diff_bits,
146 								 diff_t);
147 			stats->rxtp.last_calc_bits = stats->rx_byte_uni * 8;
148 			stats->rxtp.last_calc_time_ms = cur_time;
149 		}
150 	}
151 }
152 
phl_rx_statistics(struct phl_info_t * phl_info,struct rtw_recv_pkt * rx_pkt)153 void phl_rx_statistics(struct phl_info_t *phl_info, struct rtw_recv_pkt *rx_pkt)
154 {
155 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
156 	struct rtw_stats *phl_stats = &phl_com->phl_stats;
157 	struct rtw_stats *sta_stats = NULL;
158 	struct rtw_phl_stainfo_t *sta = NULL;
159 	u16 macid = rx_pkt->mdata.macid;
160 
161 	if (!phl_macid_is_valid(phl_info, macid))
162 		goto dev_stat;
163 
164 	sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
165 
166 	if (NULL == sta)
167 		goto dev_stat;
168 	sta_stats = &sta->stats;
169 
170 	phl_update_rx_stats(sta_stats, rx_pkt);
171 dev_stat:
172 	phl_update_rx_stats(phl_stats, rx_pkt);
173 }
174 
phl_release_phl_rx(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)175 void phl_release_phl_rx(struct phl_info_t *phl_info,
176 				struct rtw_phl_rx_pkt *phl_rx)
177 {
178 	void *drv_priv = phl_to_drvpriv(phl_info);
179 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
180 
181 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
182 
183 	_os_spinlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
184 	_os_mem_set(phl_to_drvpriv(phl_info), &phl_rx->r, 0, sizeof(phl_rx->r));
185 	phl_rx->type = RTW_RX_TYPE_MAX;
186 	phl_rx->rxbuf_ptr = NULL;
187 	INIT_LIST_HEAD(&phl_rx->list);
188 	list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
189 	rx_pkt_pool->idle_cnt++;
190 	_os_spinunlock(drv_priv, &rx_pkt_pool->idle_lock, _bh, NULL);
191 }
192 
phl_free_recv_pkt_pool(struct phl_info_t * phl_info)193 static void phl_free_recv_pkt_pool(struct phl_info_t *phl_info)
194 {
195 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
196 	u32 buf_len = 0;
197 	FUNCIN();
198 
199 	rx_pkt_pool = (struct phl_rx_pkt_pool *)phl_info->rx_pkt_pool;
200 	if (NULL != rx_pkt_pool) {
201 		_os_spinlock_free(phl_to_drvpriv(phl_info),
202 					&rx_pkt_pool->idle_lock);
203 		_os_spinlock_free(phl_to_drvpriv(phl_info),
204 					&rx_pkt_pool->busy_lock);
205 
206 		buf_len = sizeof(*rx_pkt_pool);
207 		_os_mem_free(phl_to_drvpriv(phl_info), rx_pkt_pool, buf_len);
208 	}
209 
210 	FUNCOUT();
211 }
212 
phl_rx_deinit(struct phl_info_t * phl_info)213 void phl_rx_deinit(struct phl_info_t *phl_info)
214 {
215 	/* TODO: rx reorder deinit */
216 
217 	/* TODO: peer info deinit */
218 
219 	phl_free_recv_pkt_pool(phl_info);
220 }
221 
222 
phl_alloc_recv_pkt_pool(struct phl_info_t * phl_info)223 static enum rtw_phl_status phl_alloc_recv_pkt_pool(struct phl_info_t *phl_info)
224 {
225 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
226 	struct phl_rx_pkt_pool *rx_pkt_pool = NULL;
227 	struct rtw_phl_rx_pkt *phl_rx = NULL;
228 	u32 buf_len = 0, i = 0;
229 	FUNCIN_WSTS(pstatus);
230 
231 	buf_len = sizeof(*rx_pkt_pool);
232 	rx_pkt_pool = _os_mem_alloc(phl_to_drvpriv(phl_info), buf_len);
233 
234 	if (NULL != rx_pkt_pool) {
235 		_os_mem_set(phl_to_drvpriv(phl_info), rx_pkt_pool, 0, buf_len);
236 		INIT_LIST_HEAD(&rx_pkt_pool->idle);
237 		INIT_LIST_HEAD(&rx_pkt_pool->busy);
238 		_os_spinlock_init(phl_to_drvpriv(phl_info),
239 					&rx_pkt_pool->idle_lock);
240 		_os_spinlock_init(phl_to_drvpriv(phl_info),
241 					&rx_pkt_pool->busy_lock);
242 		rx_pkt_pool->idle_cnt = 0;
243 
244 		for (i = 0; i < MAX_PHL_RING_RX_PKT_NUM; i++) {
245 			phl_rx = &rx_pkt_pool->phl_rx[i];
246 			INIT_LIST_HEAD(&phl_rx->list);
247 			list_add_tail(&phl_rx->list, &rx_pkt_pool->idle);
248 			rx_pkt_pool->idle_cnt++;
249 		}
250 
251 		phl_info->rx_pkt_pool = rx_pkt_pool;
252 
253 		pstatus = RTW_PHL_STATUS_SUCCESS;
254 	}
255 
256 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
257 		phl_free_recv_pkt_pool(phl_info);
258 	FUNCOUT_WSTS(pstatus);
259 
260 	return pstatus;
261 }
262 
phl_rx_init(struct phl_info_t * phl_info)263 enum rtw_phl_status phl_rx_init(struct phl_info_t *phl_info)
264 {
265 	enum rtw_phl_status status;
266 
267 	/* Allocate rx packet pool */
268 	status = phl_alloc_recv_pkt_pool(phl_info);
269 	if (status != RTW_PHL_STATUS_SUCCESS)
270 		return status;
271 
272 	/* TODO: Peer info init */
273 
274 
275 	/* TODO: Rx reorder init */
276 
277 	return RTW_PHL_STATUS_SUCCESS;
278 }
279 
phl_recycle_rx_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)280 void phl_recycle_rx_buf(struct phl_info_t *phl_info,
281 				struct rtw_phl_rx_pkt *phl_rx)
282 {
283 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
284 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
285 	struct rtw_rx_buf *rx_buf = NULL;
286 
287 	do {
288 		if (NULL == phl_rx) {
289 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]phl_rx is NULL!\n");
290 			break;
291 		}
292 
293 		rx_buf = (struct rtw_rx_buf *)phl_rx->rxbuf_ptr;
294 
295 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "[4] %s:: [%p]\n",
296 								__FUNCTION__, rx_buf);
297 		if (phl_rx->rxbuf_ptr) {
298 			pstatus = hci_trx_ops->recycle_rx_buf(phl_info, rx_buf,
299 								phl_rx->r.mdata.dma_ch,
300 								phl_rx->type);
301 		}
302 		if (RTW_PHL_STATUS_SUCCESS != pstatus && phl_rx->rxbuf_ptr)
303 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING]recycle hci rx buf error!\n");
304 
305 		phl_release_phl_rx(phl_info, phl_rx);
306 
307 	} while (false);
308 
309 }
310 
_phl_indic_new_rxpkt(struct phl_info_t * phl_info)311 void _phl_indic_new_rxpkt(struct phl_info_t *phl_info)
312 {
313 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_SUCCESS;
314 	struct rtw_evt_info_t *evt_info = &phl_info->phl_com->evt_info;
315 	void *drv_priv = phl_to_drvpriv(phl_info);
316 	FUNCIN_WSTS(pstatus);
317 
318 	do {
319 		_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
320 		evt_info->evt_bitmap |= RTW_PHL_EVT_RX;
321 		_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
322 
323 		pstatus = phl_schedule_handler(phl_info->phl_com,
324 							&phl_info->phl_event_handler);
325 	} while (false);
326 
327 	if (RTW_PHL_STATUS_SUCCESS != pstatus)
328 		PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "[WARNING] Trigger rx indic event fail!\n");
329 
330 	FUNCOUT_WSTS(pstatus);
331 
332 #ifdef PHL_RX_BATCH_IND
333 	phl_info->rx_new_pending = 0;
334 #endif
335 }
336 
_phl_record_rx_stats(struct rtw_recv_pkt * recvpkt)337 void _phl_record_rx_stats(struct rtw_recv_pkt *recvpkt)
338 {
339 	if(NULL == recvpkt)
340 		return;
341 	if (recvpkt->tx_sta)
342 		recvpkt->tx_sta->stats.rx_rate = recvpkt->mdata.rx_rate;
343 }
344 
_phl_add_rx_pkt(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)345 enum rtw_phl_status _phl_add_rx_pkt(struct phl_info_t *phl_info,
346 				    struct rtw_phl_rx_pkt *phl_rx)
347 {
348 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
349 	struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
350 	struct rtw_recv_pkt *recvpkt = &phl_rx->r;
351 	u16 ring_res = 0, wptr = 0, rptr = 0;
352 	void *drv = phl_to_drvpriv(phl_info);
353 
354 	FUNCIN_WSTS(pstatus);
355 	_os_spinlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
356 
357 	if (!ring)
358 		goto out;
359 
360 	wptr = (u16)_os_atomic_read(drv, &ring->phl_idx);
361 	rptr = (u16)_os_atomic_read(drv, &ring->core_idx);
362 
363 	ring_res = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
364 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
365 		"[3] _phl_add_rx_pkt::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
366 		_os_atomic_read(drv, &ring->phl_idx),
367 		_os_atomic_read(drv, &ring->core_idx),
368 		ring_res);
369 	if (ring_res <= 0) {
370 		PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no ring resource to add new rx pkt!\n");
371 		pstatus = RTW_PHL_STATUS_RESOURCE;
372 		goto out;
373 	}
374 
375 	wptr = wptr + 1;
376 	if (wptr >= MAX_PHL_RING_ENTRY_NUM)
377 		wptr = 0;
378 
379 	ring->entry[wptr] = recvpkt;
380 
381 	if (wptr)
382 		_os_atomic_inc(drv, &ring->phl_idx);
383 	else
384 		_os_atomic_set(drv, &ring->phl_idx, 0);
385 
386 #ifdef PHL_RX_BATCH_IND
387 	phl_info->rx_new_pending = 1;
388 	pstatus = RTW_PHL_STATUS_SUCCESS;
389 #endif
390 
391 out:
392 	_os_spinunlock(drv, &phl_info->rx_ring_lock, _bh, NULL);
393 
394 	if(pstatus == RTW_PHL_STATUS_SUCCESS)
395 		_phl_record_rx_stats(recvpkt);
396 
397 	FUNCOUT_WSTS(pstatus);
398 
399 	return pstatus;
400 }
401 
402 void
phl_sta_ps_enter(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)403 phl_sta_ps_enter(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
404                  struct rtw_wifi_role_t *role)
405 {
406 	void *d = phl_to_drvpriv(phl_info);
407 	enum rtw_hal_status hal_status;
408 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
409 
410 	_os_atomic_set(d, &sta->ps_sta, 1);
411 
412 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
413 	          "STA %02X:%02X:%02X:%02X:%02X:%02X enters PS mode, AID=%u, macid=%u, sta=0x%p\n",
414 	          sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
415 	          sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
416 	          sta->aid, sta->macid, sta);
417 
418 	hal_status = rtw_hal_set_macid_pause(phl_info->hal,
419 	                                     sta->macid, true);
420 	if (RTW_HAL_STATUS_SUCCESS != hal_status) {
421 	        PHL_WARN("%s(): failed to pause macid tx, macid=%u\n",
422 	                 __FUNCTION__, sta->macid);
423 	}
424 
425 	if (ops->ap_ps_sta_ps_change)
426 		ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, true);
427 }
428 
429 void
phl_sta_ps_exit(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta,struct rtw_wifi_role_t * role)430 phl_sta_ps_exit(struct phl_info_t *phl_info, struct rtw_phl_stainfo_t *sta,
431                 struct rtw_wifi_role_t *role)
432 {
433 	void *d = phl_to_drvpriv(phl_info);
434 	enum rtw_hal_status hal_status;
435 	struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
436 
437 	PHL_TRACE(COMP_PHL_PS, _PHL_INFO_,
438 	          "STA %02X:%02X:%02X:%02X:%02X:%02X leaves PS mode, AID=%u, macid=%u, sta=0x%p\n",
439 	          sta->mac_addr[0], sta->mac_addr[1], sta->mac_addr[2],
440 	          sta->mac_addr[3], sta->mac_addr[4], sta->mac_addr[5],
441 	          sta->aid, sta->macid, sta);
442 
443 	_os_atomic_set(d, &sta->ps_sta, 0);
444 
445 	hal_status = rtw_hal_set_macid_pause(phl_info->hal,
446 	                                     sta->macid, false);
447 	if (RTW_HAL_STATUS_SUCCESS != hal_status) {
448 	        PHL_WARN("%s(): failed to resume macid tx, macid=%u\n",
449 	                 __FUNCTION__, sta->macid);
450 	}
451 
452 	if (ops->ap_ps_sta_ps_change)
453 		ops->ap_ps_sta_ps_change(d, role->id, sta->mac_addr, false);
454 }
455 
456 void
phl_rx_handle_sta_process(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * rx)457 phl_rx_handle_sta_process(struct phl_info_t *phl_info,
458                           struct rtw_phl_rx_pkt *rx)
459 {
460 	struct rtw_r_meta_data *m = &rx->r.mdata;
461 	struct rtw_wifi_role_t *role = NULL;
462 	struct rtw_phl_stainfo_t *sta = NULL;
463 	void *d = phl_to_drvpriv(phl_info);
464 
465 	if (!phl_info->phl_com->dev_sw_cap.ap_ps)
466 		return;
467 
468 	if (m->addr_cam_vld) {
469 		sta = rtw_phl_get_stainfo_by_macid(phl_info, m->macid);
470 		if (sta && sta->wrole)
471 			role = sta->wrole;
472 	}
473 
474 	if (!sta) {
475 		role = phl_get_wrole_by_addr(phl_info, m->mac_addr);
476 		if (role)
477 			sta = rtw_phl_get_stainfo_by_addr(phl_info,
478 			                                  role, m->ta);
479 	}
480 
481 	if (!role || !sta)
482 		return;
483 
484 	rx->r.tx_sta = sta;
485 	rx->r.rx_role = role;
486 
487 	PHL_TRACE(COMP_PHL_PS, _PHL_DEBUG_,
488 	          "ap-ps: more_frag=%u, frame_type=%u, role_type=%d, pwr_bit=%u, seq=%u\n",
489 	          m->more_frag, m->frame_type, role->type, m->pwr_bit, m->seq);
490 
491 	/*
492 	 * Change STA PS state based on the PM bit in frame control
493 	 */
494 	if (!m->more_frag &&
495 	    (m->frame_type == RTW_FRAME_TYPE_DATA ||
496 	     m->frame_type == RTW_FRAME_TYPE_MGNT) &&
497 	    (role->type == PHL_RTYPE_AP ||
498 	     role->type == PHL_RTYPE_P2P_GO)) {
499 		/* May get a @rx with macid set to our self macid, check if that
500 		 * happens here to avoid pausing self macid. This is put here so
501 		 * we wouldn't do it on our normal rx path, which degrades rx
502 		 * throughput significantly. */
503 		if (phl_self_stainfo_chk(phl_info, role, sta))
504 			return;
505 
506 		if (_os_atomic_read(d, &sta->ps_sta)) {
507 			if (!m->pwr_bit)
508 				phl_sta_ps_exit(phl_info, sta, role);
509 		} else {
510 			if (m->pwr_bit)
511 				phl_sta_ps_enter(phl_info, sta, role);
512 		}
513 	}
514 }
515 
516 void
phl_handle_rx_frame_list(struct phl_info_t * phl_info,_os_list * frames)517 phl_handle_rx_frame_list(struct phl_info_t *phl_info,
518                          _os_list *frames)
519 {
520 	struct rtw_phl_rx_pkt *pos, *n;
521 	enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
522 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
523 
524 	phl_list_for_loop_safe(pos, n, struct rtw_phl_rx_pkt, frames, list) {
525 		list_del(&pos->list);
526 		phl_rx_handle_sta_process(phl_info, pos);
527 		status = _phl_add_rx_pkt(phl_info, pos);
528 		if (RTW_PHL_STATUS_RESOURCE == status) {
529 			hci_trx_ops->recycle_rx_pkt(phl_info, pos);
530 		}
531 	}
532 #ifndef PHL_RX_BATCH_IND
533 	_phl_indic_new_rxpkt(phl_info);
534 #endif
535 
536 }
537 
538 
539 #define SEQ_MODULO 0x1000
540 #define SEQ_MASK	0xfff
541 
seq_less(u16 sq1,u16 sq2)542 static inline int seq_less(u16 sq1, u16 sq2)
543 {
544 	return ((sq1 - sq2) & SEQ_MASK) > (SEQ_MODULO >> 1);
545 }
546 
seq_inc(u16 sq)547 static inline u16 seq_inc(u16 sq)
548 {
549 	return (sq + 1) & SEQ_MASK;
550 }
551 
seq_sub(u16 sq1,u16 sq2)552 static inline u16 seq_sub(u16 sq1, u16 sq2)
553 {
554 	return (sq1 - sq2) & SEQ_MASK;
555 }
556 
reorder_index(struct phl_tid_ampdu_rx * r,u16 seq)557 static inline u16 reorder_index(struct phl_tid_ampdu_rx *r, u16 seq)
558 {
559 	return seq_sub(seq, r->ssn) % r->buf_size;
560 }
561 
phl_release_reorder_frame(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,int index,_os_list * frames)562 static void phl_release_reorder_frame(struct phl_info_t *phl_info,
563                                       struct phl_tid_ampdu_rx *r,
564                                       int index, _os_list *frames)
565 {
566 	struct rtw_phl_rx_pkt *pkt = r->reorder_buf[index];
567 
568 	if (!pkt)
569 		goto out;
570 
571 	/* release the frame from the reorder ring buffer */
572 	r->stored_mpdu_num--;
573 	r->reorder_buf[index] = NULL;
574 	list_add_tail(&pkt->list, frames);
575 
576 out:
577 	r->head_seq_num = seq_inc(r->head_seq_num);
578 }
579 
580 #define HT_RX_REORDER_BUF_TIMEOUT_MS 100
581 
582 /*
583  * If the MPDU at head_seq_num is ready,
584  *     1. release all subsequent MPDUs with consecutive SN and
585  *     2. if there's MPDU that is ready but left in the reordering
586  *        buffer, find it and set reorder timer according to its reorder
587  *        time
588  *
589  * If the MPDU at head_seq_num is not ready and there is no MPDU ready
590  * in the buffer at all, return.
591  *
592  * If the MPDU at head_seq_num is not ready but there is some MPDU in
593  * the buffer that is ready, check whether any frames in the reorder
594  * buffer have timed out in the following way.
595  *
596  * Basically, MPDUs that are not ready are purged and MPDUs that are
597  * ready are released.
598  *
599  * The process goes through all the buffer but the one at head_seq_num
600  * unless
601  *     - there's a MPDU that is ready AND
602  *     - there are one or more buffers that are not ready.
603  * In this case, the process is stopped, the head_seq_num becomes the
604  * first buffer that is not ready and the reorder_timer is reset based
605  * on the reorder_time of that ready MPDU.
606  */
phl_reorder_release(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,_os_list * frames)607 static void phl_reorder_release(struct phl_info_t *phl_info,
608 								struct phl_tid_ampdu_rx *r, _os_list *frames)
609 {
610 	/* ref ieee80211_sta_reorder_release() and wil_reorder_release() */
611 
612 	int index, i, j;
613 	u32 cur_time = _os_get_cur_time_ms();
614 
615 	/* release the buffer until next missing frame */
616 	index = reorder_index(r, r->head_seq_num);
617 	if (!r->reorder_buf[index] && r->stored_mpdu_num) {
618 		/*
619 		 * No buffers ready to be released, but check whether any
620 		 * frames in the reorder buffer have timed out.
621 		 */
622 		int skipped = 1;
623 		for (j = (index + 1) % r->buf_size; j != index;
624 			j = (j + 1) % r->buf_size) {
625 			if (!r->reorder_buf[j]) {
626 				skipped++;
627 				continue;
628 			}
629 			if (skipped && cur_time < r->reorder_time[j] +
630 				HT_RX_REORDER_BUF_TIMEOUT_MS)
631 				goto set_release_timer;
632 
633 			/* don't leave incomplete A-MSDUs around */
634 			for (i = (index + 1) % r->buf_size; i != j;
635 				i = (i + 1) % r->buf_size)
636 				phl_recycle_rx_buf(phl_info, r->reorder_buf[i]);
637 
638 			PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "release an RX reorder frame due to timeout on earlier frames\n");
639 
640 			phl_release_reorder_frame(phl_info, r, j, frames);
641 
642 			/*
643 			 * Increment the head seq# also for the skipped slots.
644 			 */
645 			r->head_seq_num =
646 				(r->head_seq_num + skipped) & SEQ_MASK;
647 			skipped = 0;
648 		}
649 	} else while (r->reorder_buf[index]) {
650 		phl_release_reorder_frame(phl_info, r, index, frames);
651 		index = reorder_index(r, r->head_seq_num);
652 	}
653 
654 	if (r->stored_mpdu_num) {
655 		j = index = r->head_seq_num % r->buf_size;
656 
657 		for (; j != (index - 1) % r->buf_size;
658 			j = (j + 1) % r->buf_size) {
659 			if (r->reorder_buf[j])
660 				break;
661 		}
662 
663 set_release_timer:
664 
665 		if (!r->removed)
666 			_os_set_timer(r->drv_priv, &r->sta->reorder_timer,
667 			              HT_RX_REORDER_BUF_TIMEOUT_MS);
668 	} else {
669 		/* TODO: implementation of cancel timer on Linux is
670 			del_timer_sync(), it can't be called with same spinlock
671 			held with the expiration callback, that causes a potential
672 			deadlock. */
673 		_os_cancel_timer_async(r->drv_priv, &r->sta->reorder_timer);
674 	}
675 }
676 
phl_sta_rx_reorder_timer_expired(void * t)677 void phl_sta_rx_reorder_timer_expired(void *t)
678 {
679 	/* ref sta_rx_agg_reorder_timer_expired() */
680 
681 	struct rtw_phl_stainfo_t *sta = (struct rtw_phl_stainfo_t *)t;
682 	struct rtw_phl_com_t *phl_com = sta->wrole->phl_com;
683 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_com->phl_priv;
684 	void *drv_priv = phl_to_drvpriv(phl_info);
685 	u8 i = 0;
686 
687 	PHL_INFO("Rx reorder timer expired, sta=0x%p\n", sta);
688 
689 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
690 		_os_list frames;
691 
692 		INIT_LIST_HEAD(&frames);
693 
694 		_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
695 		if (sta->tid_rx[i])
696 			phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
697 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
698 
699 		phl_handle_rx_frame_list(phl_info, &frames);
700 #ifdef PHL_RX_BATCH_IND
701 		_phl_indic_new_rxpkt(phl_info);
702 #endif
703 	}
704 
705 	_os_event_set(drv_priv, &sta->comp_sync);
706 }
707 
phl_release_reorder_frames(struct phl_info_t * phl_info,struct phl_tid_ampdu_rx * r,u16 head_seq_num,_os_list * frames)708 static void phl_release_reorder_frames(struct phl_info_t *phl_info,
709 										struct phl_tid_ampdu_rx *r,
710 										u16 head_seq_num, _os_list *frames)
711 {
712 	/* ref ieee80211_release_reorder_frames() and
713 		wil_release_reorder_frames() */
714 
715 	int index;
716 
717 	/* note: this function is never called with
718 	 * hseq preceding r->head_seq_num, i.e it is always true
719 	 * !seq_less(hseq, r->head_seq_num)
720 	 * and thus on loop exit it should be
721 	 * r->head_seq_num == hseq
722 	 */
723 	while (seq_less(r->head_seq_num, head_seq_num) &&
724 		r->stored_mpdu_num) { /* Note: do we need to check this? */
725 		index = reorder_index(r, r->head_seq_num);
726 		phl_release_reorder_frame(phl_info, r, index, frames);
727 	}
728 	r->head_seq_num = head_seq_num;
729 }
730 
rtw_phl_flush_reorder_buf(void * phl,struct rtw_phl_stainfo_t * sta)731 void rtw_phl_flush_reorder_buf(void *phl, struct rtw_phl_stainfo_t *sta)
732 {
733 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
734 	void *drv_priv = phl_to_drvpriv(phl_info);
735 	_os_list frames;
736 	u8 i = 0;
737 
738 	PHL_INFO("%s: sta=0x%p\n", __FUNCTION__, sta);
739 
740 	INIT_LIST_HEAD(&frames);
741 
742 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
743 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
744 		if (sta->tid_rx[i])
745 			phl_reorder_release(phl_info, sta->tid_rx[i], &frames);
746 	}
747 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
748 
749 	phl_handle_rx_frame_list(phl_info, &frames);
750 #ifdef PHL_RX_BATCH_IND
751 	_phl_indic_new_rxpkt(phl_info);
752 #endif
753 
754 }
755 
phl_manage_sta_reorder_buf(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * pkt,struct phl_tid_ampdu_rx * r,_os_list * frames)756 static bool phl_manage_sta_reorder_buf(struct phl_info_t *phl_info,
757                                        struct rtw_phl_rx_pkt *pkt,
758                                        struct phl_tid_ampdu_rx *r,
759                                        _os_list *frames)
760 {
761 	/* ref ieee80211_sta_manage_reorder_buf() and wil_rx_reorder() */
762 
763 	struct rtw_r_meta_data *meta = &pkt->r.mdata;
764 	u16 mpdu_seq_num = meta->seq;
765 	u16 head_seq_num, buf_size;
766 	int index;
767 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
768 
769 	buf_size = r->buf_size;
770 	head_seq_num = r->head_seq_num;
771 
772 	/*
773 	 * If the current MPDU's SN is smaller than the SSN, it shouldn't
774 	 * be reordered.
775 	 */
776 	if (!r->started) {
777 		if (seq_less(mpdu_seq_num, head_seq_num))
778 			return false;
779 		r->started = true;
780 	}
781 
782 	if (r->sleep) {
783 		PHL_INFO("tid = %d reorder buffer handling after wake up\n",
784 		         r->tid);
785 		PHL_INFO("Update head seq(0x%03x) to the first rx seq(0x%03x) after wake up\n",
786 		         r->head_seq_num, mpdu_seq_num);
787 		r->head_seq_num = mpdu_seq_num;
788 		head_seq_num = r->head_seq_num;
789 		r->sleep = false;
790 	}
791 
792 	/* frame with out of date sequence number */
793 	if (seq_less(mpdu_seq_num, head_seq_num)) {
794 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
795 				meta->seq, r->head_seq_num);
796 		hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
797 		return true;
798 	}
799 
800 	/*
801 	 * If frame the sequence number exceeds our buffering window
802 	 * size release some previous frames to make room for this one.
803 	 */
804 	if (!seq_less(mpdu_seq_num, head_seq_num + buf_size)) {
805 		head_seq_num = seq_inc(seq_sub(mpdu_seq_num, buf_size));
806 		/* release stored frames up to new head to stack */
807 		phl_release_reorder_frames(phl_info, r, head_seq_num, frames);
808 	}
809 
810 	/* Now the new frame is always in the range of the reordering buffer */
811 
812 	index = reorder_index(r, mpdu_seq_num);
813 
814 	/* check if we already stored this frame */
815 	if (r->reorder_buf[index]) {
816 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "Rx drop: old seq 0x%03x head 0x%03x\n",
817 				meta->seq, r->head_seq_num);
818 		hci_trx_ops->recycle_rx_pkt(phl_info, pkt);
819 		return true;
820 	}
821 
822 	/*
823 	 * If the current MPDU is in the right order and nothing else
824 	 * is stored we can process it directly, no need to buffer it.
825 	 * If it is first but there's something stored, we may be able
826 	 * to release frames after this one.
827 	 */
828 	if (mpdu_seq_num == r->head_seq_num &&
829 		r->stored_mpdu_num == 0) {
830 		r->head_seq_num = seq_inc(r->head_seq_num);
831 		return false;
832 	}
833 
834 	/* put the frame in the reordering buffer */
835 	r->reorder_buf[index] = pkt;
836 	r->reorder_time[index] = _os_get_cur_time_ms();
837 	r->stored_mpdu_num++;
838 	phl_reorder_release(phl_info, r, frames);
839 
840 	return true;
841 
842 }
843 
phl_rx_reorder(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx,_os_list * frames)844 enum rtw_phl_status phl_rx_reorder(struct phl_info_t *phl_info,
845                                    struct rtw_phl_rx_pkt *phl_rx,
846                                    _os_list *frames)
847 {
848 	/* ref wil_rx_reorder() and ieee80211_rx_reorder_ampdu() */
849 
850 	void *drv_priv = phl_to_drvpriv(phl_info);
851 	struct rtw_r_meta_data *meta = &phl_rx->r.mdata;
852 	u16 tid = meta->tid;
853 	struct rtw_phl_stainfo_t *sta = NULL;
854 	struct phl_tid_ampdu_rx *r;
855 	struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
856 
857 	/*
858 	 * Remove FCS if is is appended
859 	 * TODO: handle more than one in pkt_list
860 	 */
861 	if (phl_info->phl_com->append_fcs) {
862 		/*
863 		 * Only last MSDU of A-MSDU includes FCS.
864 		 * TODO: If A-MSDU cut processing is in HAL, should only deduct
865 		 * FCS from length of last one of pkt_list. For such case,
866 		 * phl_rx->r should have pkt_list length.
867 		 */
868 		  if (!(meta->amsdu_cut && !meta->last_msdu)) {
869 			  if (phl_rx->r.pkt_list[0].length <= 4) {
870 				  PHL_ERR("%s, pkt_list[0].length(%d) too short\n",
871 				          __func__, phl_rx->r.pkt_list[0].length);
872 				  goto drop_frame;
873 			  }
874 			  phl_rx->r.pkt_list[0].length -= 4;
875 		  }
876 	}
877 
878 	if (phl_is_mp_mode(phl_info->phl_com))
879 		goto dont_reorder;
880 
881 	if (meta->bc || meta->mc)
882 		goto dont_reorder;
883 
884 	if (!meta->qos)
885 		goto dont_reorder;
886 
887 	if (meta->q_null)
888 		goto dont_reorder;
889 
890 	/* TODO: check ba policy is either ba or normal */
891 
892 	/* if the mpdu is fragmented, don't reorder */
893 	if (meta->more_frag || meta->frag_num) {
894 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
895 		          "Receive QoS Data with more_frag=%u, frag_num=%u\n",
896 		          meta->more_frag, meta->frag_num);
897 		goto dont_reorder;
898 	}
899 
900 	/* Use MAC ID from address CAM if this packet is address CAM matched */
901 	if (meta->addr_cam_vld)
902 		sta = rtw_phl_get_stainfo_by_macid(phl_info, meta->macid);
903 
904 	/* Otherwise, search STA by TA */
905 	if (!sta || !sta->wrole) {
906 		struct rtw_wifi_role_t *wrole;
907 		wrole = phl_get_wrole_by_addr(phl_info, meta->mac_addr);
908 		if (wrole)
909 			sta = rtw_phl_get_stainfo_by_addr(phl_info,
910 			                                  wrole, meta->ta);
911 		if (!wrole || !sta) {
912 			PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_,
913 			          "%s(): stainfo or wrole not found, cam=%u, macid=%u\n",
914 			          __FUNCTION__, meta->addr_cam, meta->macid);
915 			goto dont_reorder;
916 		}
917 	}
918 
919 	phl_rx->r.tx_sta = sta;
920 	phl_rx->r.rx_role = sta->wrole;
921 
922 	rtw_hal_set_sta_rx_sts(sta, false, meta);
923 
924 	if (tid >= ARRAY_SIZE(sta->tid_rx)) {
925 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "Fail: tid (%u) index out of range (%u)\n", tid, 8);
926 		goto drop_frame;
927 	}
928 
929 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
930 
931 	r = sta->tid_rx[tid];
932 	if (!r) {
933 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
934 		goto dont_reorder;
935 	}
936 
937 	if (!phl_manage_sta_reorder_buf(phl_info, phl_rx, r, frames)) {
938 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
939 		goto dont_reorder;
940 	}
941 
942 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
943 
944 	return RTW_PHL_STATUS_SUCCESS;
945 
946 drop_frame:
947 	hci_trx_ops->recycle_rx_pkt(phl_info, phl_rx);
948 	return RTW_PHL_STATUS_FAILURE;
949 
950 dont_reorder:
951 	list_add_tail(&phl_rx->list, frames);
952 	return RTW_PHL_STATUS_SUCCESS;
953 }
954 
955 
phl_check_recv_ring_resource(struct phl_info_t * phl_info)956 u8 phl_check_recv_ring_resource(struct phl_info_t *phl_info)
957 {
958 	struct rtw_phl_rx_ring *ring = &phl_info->phl_rx_ring;
959 	u16 avail = 0, wptr = 0, rptr = 0;
960 	void *drv_priv = phl_to_drvpriv(phl_info);
961 
962 	wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
963 	rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
964 	avail = phl_calc_avail_wptr(rptr, wptr, MAX_PHL_RING_ENTRY_NUM);
965 
966 	if (0 == avail)
967 		return false;
968 	else
969 		return true;
970 }
971 
dump_phl_rx_ring(void * phl)972 void dump_phl_rx_ring(void *phl)
973 {
974 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
975 	void *drv_priv = phl_to_drvpriv(phl_info);
976 	s16	diff = 0;
977 	u16 idx = 0, endidx = 0;
978 	u16 phl_idx = 0, core_idx = 0;
979 
980 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "===Dump PHL RX Ring===\n");
981 	phl_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.phl_idx);
982 	core_idx = (u16)_os_atomic_read(drv_priv, &phl_info->phl_rx_ring.core_idx);
983 	PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
984 			"core_idx = %d\n"
985 			"phl_idx = %d\n",
986 			core_idx,
987 			phl_idx);
988 
989 	diff= phl_idx-core_idx;
990 	if(diff < 0)
991 		diff= 4096+diff;
992 
993 	endidx = diff > 5 ? (core_idx+6): phl_idx;
994 	for (idx = core_idx+1; idx < endidx; idx++) {
995 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_, "entry[%d] = %p\n", idx,
996 				phl_info->phl_rx_ring.entry[idx%4096]);
997 	}
998 }
999 
1000 
phl_event_indicator(void * context)1001 void phl_event_indicator(void *context)
1002 {
1003 	enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1004 	struct rtw_phl_handler *phl_handler
1005 		= (struct rtw_phl_handler *)phl_container_of(context,
1006 							struct rtw_phl_handler,
1007 							os_handler);
1008 	struct phl_info_t *phl_info = (struct phl_info_t *)phl_handler->context;
1009 	struct rtw_phl_evt_ops *ops = NULL;
1010 	struct rtw_evt_info_t *evt_info = NULL;
1011 	void *drv_priv = NULL;
1012 	enum rtw_phl_evt evt_bitmap = 0;
1013 	FUNCIN_WSTS(sts);
1014 
1015 	if (NULL != phl_info) {
1016 		ops = &phl_info->phl_com->evt_ops;
1017 		evt_info = &phl_info->phl_com->evt_info;
1018 		drv_priv = phl_to_drvpriv(phl_info);
1019 
1020 		_os_spinlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1021 		evt_bitmap = evt_info->evt_bitmap;
1022 		evt_info->evt_bitmap = 0;
1023 		_os_spinunlock(drv_priv, &evt_info->evt_lock, _bh, NULL);
1024 
1025 		if (RTW_PHL_EVT_RX & evt_bitmap) {
1026 			if (NULL != ops->rx_process) {
1027 				sts = ops->rx_process(drv_priv);
1028 			}
1029 			dump_phl_rx_ring(phl_info);
1030 		}
1031 	}
1032 	FUNCOUT_WSTS(sts);
1033 
1034 }
1035 
_phl_rx_statistics_reset(struct phl_info_t * phl_info)1036 void _phl_rx_statistics_reset(struct phl_info_t *phl_info)
1037 {
1038 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1039 	struct rtw_phl_stainfo_t *sta = NULL;
1040 	struct rtw_wifi_role_t *role = NULL;
1041 	void *drv = phl_to_drvpriv(phl_info);
1042 	struct phl_queue *sta_queue;
1043 	u8 i;
1044 
1045 	for (i = 0; i< MAX_WIFI_ROLE_NUMBER; i++) {
1046 		role = &phl_com->wifi_roles[i];
1047 		if (role->active && (role->mstate == MLME_LINKED)) {
1048 			sta_queue = &role->assoc_sta_queue;
1049 			_os_spinlock(drv, &sta_queue->lock, _bh, NULL);
1050 			phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1051 						&sta_queue->queue, list) {
1052 				if (sta)
1053 					rtw_hal_set_sta_rx_sts(sta, true, NULL);
1054 			}
1055 			_os_spinunlock(drv, &sta_queue->lock, _bh, NULL);
1056 		}
1057 	}
1058 }
1059 
1060 void
phl_rx_watchdog(struct phl_info_t * phl_info)1061 phl_rx_watchdog(struct phl_info_t *phl_info)
1062 {
1063 	struct rtw_stats *phl_stats = &phl_info->phl_com->phl_stats;
1064 
1065 	phl_rx_traffic_upd(phl_stats);
1066 	phl_dump_rx_stats(phl_stats);
1067 	_phl_rx_statistics_reset(phl_info);
1068 }
1069 
rtw_phl_query_new_rx_num(void * phl)1070 u16 rtw_phl_query_new_rx_num(void *phl)
1071 {
1072 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1073 	struct rtw_phl_rx_ring *ring = NULL;
1074 	u16 new_rx = 0, wptr = 0, rptr = 0;
1075 
1076 	if (NULL != phl_info) {
1077 		ring = &phl_info->phl_rx_ring;
1078 		wptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1079 						&ring->phl_idx);
1080 		rptr = (u16)_os_atomic_read(phl_to_drvpriv(phl_info),
1081 						&ring->core_idx);
1082 		new_rx = phl_calc_avail_rptr(rptr, wptr,
1083 						MAX_PHL_RING_ENTRY_NUM);
1084 	}
1085 
1086 	return new_rx;
1087 }
1088 
rtw_phl_query_rx_pkt(void * phl)1089 struct rtw_recv_pkt *rtw_phl_query_rx_pkt(void *phl)
1090 {
1091 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1092 	struct rtw_phl_rx_ring *ring = NULL;
1093 	struct rtw_recv_pkt *recvpkt = NULL;
1094 	void *drv_priv = NULL;
1095 	u16 ring_res = 0, wptr = 0, rptr = 0;
1096 
1097 	if (NULL != phl_info) {
1098 		ring = &phl_info->phl_rx_ring;
1099 		drv_priv = phl_to_drvpriv(phl_info);
1100 
1101 		wptr = (u16)_os_atomic_read(drv_priv, &ring->phl_idx);
1102 		rptr = (u16)_os_atomic_read(drv_priv, &ring->core_idx);
1103 
1104 		ring_res = phl_calc_avail_rptr(rptr, wptr,
1105 							MAX_PHL_RING_ENTRY_NUM);
1106 
1107 		PHL_TRACE(COMP_PHL_RECV, _PHL_DEBUG_,
1108 			"[4] %s::[Query] phl_idx =%d , core_idx =%d , ring_res =%d\n",
1109 			__FUNCTION__,
1110 			_os_atomic_read(drv_priv, &ring->phl_idx),
1111 			_os_atomic_read(drv_priv, &ring->core_idx),
1112 			ring_res);
1113 
1114 		if (ring_res > 0) {
1115 			rptr = rptr + 1;
1116 
1117 			if (rptr >= MAX_PHL_RING_ENTRY_NUM) {
1118 				rptr=0;
1119 				recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1120 				ring->entry[rptr]=NULL;
1121 				_os_atomic_set(drv_priv, &ring->core_idx, 0);
1122 			} else {
1123 				recvpkt = (struct rtw_recv_pkt *)ring->entry[rptr];
1124 				ring->entry[rptr]=NULL;
1125 				_os_atomic_inc(drv_priv, &ring->core_idx);
1126 			}
1127 			if (NULL == recvpkt)
1128 				PHL_TRACE(COMP_PHL_RECV, _PHL_WARNING_, "recvpkt is NULL!\n");
1129 			else
1130 				phl_rx_statistics(phl_info, recvpkt);
1131 		} else {
1132 			PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "no available rx packet to query!\n");
1133 		}
1134 	}
1135 
1136 	return recvpkt;
1137 }
1138 
rtw_phl_return_rxbuf(void * phl,u8 * recvpkt)1139 enum rtw_phl_status rtw_phl_return_rxbuf(void *phl, u8* recvpkt)
1140 {
1141 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1142 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1143 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1144 	struct rtw_recv_pkt *r = (struct rtw_recv_pkt *)recvpkt;
1145 
1146 	do {
1147 		if (NULL == recvpkt)
1148 			break;
1149 
1150 		phl_rx = phl_container_of(r, struct rtw_phl_rx_pkt, r);
1151 		phl_recycle_rx_buf(phl_info, phl_rx);
1152 		pstatus = RTW_PHL_STATUS_SUCCESS;
1153 	} while (false);
1154 
1155 	return pstatus;
1156 }
1157 
1158 
rtw_phl_start_rx_process(void * phl)1159 enum rtw_phl_status rtw_phl_start_rx_process(void *phl)
1160 {
1161 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1162 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1163 
1164 	FUNCIN_WSTS(pstatus);
1165 
1166 	pstatus = phl_schedule_handler(phl_info->phl_com,
1167 	                               &phl_info->phl_rx_handler);
1168 
1169 	FUNCOUT_WSTS(pstatus);
1170 
1171 	return pstatus;
1172 }
1173 
rtw_phl_rx_bar(void * phl,struct rtw_phl_stainfo_t * sta,u8 tid,u16 seq)1174 void rtw_phl_rx_bar(void *phl, struct rtw_phl_stainfo_t *sta, u8 tid, u16 seq)
1175 {
1176 	/* ref ieee80211_rx_h_ctrl() and wil_rx_bar() */
1177 
1178 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1179 	void *drv_priv = phl_to_drvpriv(phl_info);
1180 	struct phl_tid_ampdu_rx *r;
1181 	_os_list frames;
1182 
1183 	INIT_LIST_HEAD(&frames);
1184 
1185 	if (tid >= RTW_MAX_TID_NUM)
1186 		goto out;
1187 
1188 	_os_spinlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1189 
1190 	r = sta->tid_rx[tid];
1191 	if (!r) {
1192 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR for non-existing TID %d\n", tid);
1193 		goto out;
1194 	}
1195 
1196 	if (seq_less(seq, r->head_seq_num)) {
1197 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_, "BAR Seq 0x%03x preceding head 0x%03x\n",
1198 					seq, r->head_seq_num);
1199 		goto out;
1200 	}
1201 
1202 	PHL_TRACE(COMP_PHL_RECV, _PHL_INFO_, "BAR: TID %d Seq 0x%03x head 0x%03x\n",
1203 				tid, seq, r->head_seq_num);
1204 
1205 	phl_release_reorder_frames(phl_info, r, seq, &frames);
1206 	phl_handle_rx_frame_list(phl_info, &frames);
1207 
1208 out:
1209 	_os_spinunlock(drv_priv, &sta->tid_rx_lock, _bh, NULL);
1210 }
1211 
rtw_phl_get_rx_status(void * phl)1212 enum rtw_rx_status rtw_phl_get_rx_status(void *phl)
1213 {
1214 #ifdef CONFIG_USB_HCI
1215 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1216 	enum rtw_hci_type hci_type = phl_info->phl_com->hci_type;
1217 
1218 	if (hci_type & RTW_HCI_USB)
1219 		return rtw_hal_get_usb_status(phl_info->hal);
1220 #endif
1221 
1222 	return RTW_STATUS_RX_OK;
1223 }
1224 
1225 enum rtw_phl_status
rtw_phl_enter_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1226 rtw_phl_enter_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1227 {
1228 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1229 	enum rtw_hal_status status;
1230 
1231 	status = rtw_hal_enter_mon_mode(phl_info->hal, wrole->hw_band);
1232 	if (status != RTW_HAL_STATUS_SUCCESS) {
1233 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1234 		          "%s(): rtw_hal_enter_mon_mode() failed, status=%d",
1235 		          __FUNCTION__, status);
1236 		return RTW_PHL_STATUS_FAILURE;
1237 	}
1238 
1239 	return RTW_PHL_STATUS_SUCCESS;
1240 }
1241 
1242 enum rtw_phl_status
rtw_phl_leave_mon_mode(void * phl,struct rtw_wifi_role_t * wrole)1243 rtw_phl_leave_mon_mode(void *phl, struct rtw_wifi_role_t *wrole)
1244 {
1245 	struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1246 	enum rtw_hal_status status;
1247 
1248 	status = rtw_hal_leave_mon_mode(phl_info->hal, wrole->hw_band);
1249 	if (status != RTW_HAL_STATUS_SUCCESS) {
1250 		PHL_TRACE(COMP_PHL_RECV, _PHL_ERR_,
1251 		          "%s(): rtw_hal_leave_mon_mode() failed, status=%d",
1252 		          __FUNCTION__, status);
1253 		return RTW_PHL_STATUS_FAILURE;
1254 	}
1255 
1256 	return RTW_PHL_STATUS_SUCCESS;
1257 }
1258 
1259 #ifdef CONFIG_PHL_RX_PSTS_PER_PKT
1260 void
_phl_rx_proc_frame_list(struct phl_info_t * phl_info,struct phl_queue * pq)1261 _phl_rx_proc_frame_list(struct phl_info_t *phl_info, struct phl_queue *pq)
1262 {
1263 	void *d = phl_to_drvpriv(phl_info);
1264 	_os_list *pkt_list = NULL;
1265 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1266 
1267 	if (NULL == pq)
1268 		return;
1269 	if (0 == pq->cnt)
1270 		return;
1271 
1272 	PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1273 		  "_phl_rx_proc_frame_list : queue ele cnt = %d\n",
1274 		   pq->cnt);
1275 
1276 	while (true == pq_pop(d, pq, &pkt_list, _first, _bh)) {
1277 		phl_rx = (struct rtw_phl_rx_pkt *)pkt_list;
1278 		phl_info->hci_trx_ops->rx_handle_normal(phl_info, phl_rx);
1279 	}
1280 }
1281 
1282 enum rtw_phl_status
phl_rx_proc_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * ppdu_sts)1283 phl_rx_proc_phy_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *ppdu_sts)
1284 {
1285 	enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
1286 	struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1287 	struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1288 	struct rtw_phl_rx_pkt *phl_rx = NULL;
1289 	void *d = phl_to_drvpriv(phl_info);
1290 	struct rtw_phl_rssi_stat *rssi_stat = &phl_info->phl_com->rssi_stat;
1291 	_os_list *frame = NULL;
1292 	bool upt_psts = true;
1293 	u8 i = 0;
1294 	enum phl_band_idx band = HW_BAND_0;
1295 
1296 	if (NULL == ppdu_sts)
1297 		return pstatus;
1298 
1299 	if (false == psts_info->en_psts_per_pkt) {
1300 		return pstatus;
1301 	}
1302 
1303 	if (ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1304 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1305 			  "ppdu_sts->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!\n");
1306 		return pstatus;
1307 	}
1308 
1309 	band = (ppdu_sts->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1310 
1311 	if (false == psts_info->en_ppdu_sts[band])
1312 		return pstatus;
1313 
1314 	if (ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt[band]) {
1315 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1316 			  "ppdu_sts->r.mdata.ppdu_cnt != psts_info->cur_ppdu_cnt!\n");
1317 		upt_psts = false;
1318 	}
1319 
1320 	sts_entry = &psts_info->sts_ent[band][psts_info->cur_ppdu_cnt[band]];
1321 	/* check list empty */
1322 	if (0 == sts_entry->frames.cnt) {
1323 		PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1324 			  "cur_ppdu_cnt %d --> sts_entry->frames.cnt = 0\n",
1325 			  psts_info->cur_ppdu_cnt[band]);
1326 		pstatus = RTW_PHL_STATUS_SUCCESS;
1327 		return pstatus;
1328 	}
1329 
1330 	/* start update phy info to per pkt*/
1331 	if (false == pq_get_front(d, &sts_entry->frames, &frame, _bh)) {
1332 		PHL_ERR(" %s list empty\n", __FUNCTION__);
1333 		return pstatus;
1334 	}
1335 	/**
1336 	 * TODO : How to filter the case :
1337 	 *	pkt(ppdu_cnt = 0) --> missing :psts(ppdu_cnt = 0) --> (all of the pkt, psts dropped/missing)
1338 	 *	--> ppdu_sts(ppdu_cnt = 0)(not for the current buffered pkt.)
1339 	 * workaround : check rate/bw/ppdu_type/... etc
1340 	 **/
1341 	phl_rx = (struct rtw_phl_rx_pkt *)frame;
1342 	if (upt_psts &&
1343 	   ((phl_rx->r.mdata.rx_rate != ppdu_sts->r.mdata.rx_rate) ||
1344 	    (phl_rx->r.mdata.bw != ppdu_sts->r.mdata.bw) ||
1345 	    (phl_rx->r.mdata.rx_gi_ltf != ppdu_sts->r.mdata.rx_gi_ltf) ||
1346 	    (phl_rx->r.mdata.ppdu_type != ppdu_sts->r.mdata.ppdu_type))) {
1347 		    /**
1348 		     * ppdu status is not for the buffered pkt,
1349 		     * skip update phy status to phl_rx
1350 		     **/
1351 		    upt_psts = false;
1352 	}
1353 	/* Get Frame Type */
1354 	ppdu_sts->r.phy_info.frame_type =
1355 		PHL_GET_80211_HDR_TYPE(phl_rx->r.pkt_list[0].vir_addr);
1356 
1357 	if ((false == ppdu_sts->r.phy_info.is_valid) &&
1358 	    (true == psts_info->en_fake_psts)) {
1359 		if (RTW_FRAME_TYPE_MGNT == phl_rx->r.mdata.frame_type) {
1360 			ppdu_sts->r.phy_info.rssi =
1361 				rssi_stat->ma_rssi[RTW_RSSI_MGNT_ACAM_A1M];
1362 		} else if (RTW_FRAME_TYPE_DATA == phl_rx->r.mdata.frame_type) {
1363 			ppdu_sts->r.phy_info.rssi =
1364 				rssi_stat->ma_rssi[RTW_RSSI_DATA_ACAM_A1M];
1365 		} else if (RTW_FRAME_TYPE_CTRL == phl_rx->r.mdata.frame_type) {
1366 			ppdu_sts->r.phy_info.rssi =
1367 				rssi_stat->ma_rssi[RTW_RSSI_CTRL_ACAM_A1M];
1368 		} else {
1369 			ppdu_sts->r.phy_info.rssi =
1370 				rssi_stat->ma_rssi[RTW_RSSI_UNKNOWN];
1371 		}
1372 		for(i = 0; i< RTW_PHL_MAX_RF_PATH ; i++) {
1373 			ppdu_sts->r.phy_info.rssi_path[i] =
1374 					ppdu_sts->r.phy_info.rssi;
1375 		}
1376 		ppdu_sts->r.phy_info.ch_idx = rtw_hal_get_cur_ch(phl_info->hal,
1377 						phl_rx->r.mdata.bb_sel);
1378 		ppdu_sts->r.phy_info.is_valid = true;
1379 	}
1380 
1381 	do {
1382 		if (false == upt_psts)
1383 			break;
1384 		phl_rx = (struct rtw_phl_rx_pkt *)frame;
1385 		_os_mem_cpy(d, &(phl_rx->r.phy_info), &(ppdu_sts->r.phy_info),
1386 			    sizeof(struct rtw_phl_ppdu_phy_info));
1387 	} while ((true == psts_info->psts_ampdu) &&
1388 		 (pq_get_next(d, &sts_entry->frames, frame, &frame, _bh)));
1389 
1390 	/*2. indicate the frame list*/
1391 	_phl_rx_proc_frame_list(phl_info, &sts_entry->frames);
1392 	/*3. reset the queue */
1393 	pq_reset(d, &(sts_entry->frames), _bh);
1394 
1395 	return pstatus;
1396 }
1397 
1398 bool
phl_rx_proc_wait_phy_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1399 phl_rx_proc_wait_phy_sts(struct phl_info_t *phl_info,
1400 			 struct rtw_phl_rx_pkt *phl_rx)
1401 {
1402 	struct rtw_phl_ppdu_sts_info *psts_info = &(phl_info->phl_com->ppdu_sts_info);
1403 	struct rtw_phl_ppdu_sts_ent *sts_entry = NULL;
1404 	void *d = phl_to_drvpriv(phl_info);
1405 	u8 i = 0;
1406 	bool ret = false;
1407 	enum phl_band_idx band = HW_BAND_0;
1408 
1409 	if (false == psts_info->en_psts_per_pkt) {
1410 		return ret;
1411 	}
1412 
1413 	if (phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT) {
1414 		PHL_ASSERT("phl_rx->r.mdata.ppdu_cnt >= PHL_MAX_PPDU_CNT!");
1415 		return ret;
1416 	}
1417 
1418 	band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1419 
1420 	if (false == psts_info->en_ppdu_sts[band])
1421 		return ret;
1422 
1423 	if (psts_info->cur_ppdu_cnt[band] != phl_rx->r.mdata.ppdu_cnt) {
1424 		/* start of PPDU */
1425 		/* 1. Check all of the buffer list is empty */
1426 		/* only check the target rx pkt band */
1427 		for (i = 0; i < PHL_MAX_PPDU_CNT; i++) {
1428 			sts_entry = &psts_info->sts_ent[band][i];
1429 			if (0 != sts_entry->frames.cnt) {
1430 				/* need indicate first */
1431 				PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1432 					  "band %d ; ppdu_cnt %d queue is not empty \n",
1433 					  band, i);
1434 				_phl_rx_proc_frame_list(phl_info,
1435 						&sts_entry->frames);
1436 				pq_reset(d, &(sts_entry->frames), _bh);
1437 			}
1438 		}
1439 
1440 		/* 2. check ppdu status filter condition */
1441 		/* Filter function is supportted only if rxd = long_rxd */
1442 		if ((1 == phl_rx->r.mdata.long_rxd) &&
1443 		    (0 != (psts_info->ppdu_sts_filter &
1444 		           BIT(phl_rx->r.mdata.frame_type)))) {
1445 			/* 3. add new rx pkt to the tail of the queue */
1446 			sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1447 			pq_reset(d, &(sts_entry->frames), _bh);
1448 			pq_push(d, &(sts_entry->frames), &phl_rx->list,
1449 				_tail, _bh);
1450 			ret = true;
1451 		}
1452 		psts_info->cur_ppdu_cnt[band] = phl_rx->r.mdata.ppdu_cnt;
1453 	} else {
1454 		/* 1. check ppdu status filter condition */
1455 		/* Filter function is supportted only if rxd = long_rxd */
1456 		if ((1 == phl_rx->r.mdata.long_rxd) &&
1457 		    (0 != (psts_info->ppdu_sts_filter &
1458 		           BIT(phl_rx->r.mdata.frame_type)))) {
1459 			/* 2. add to frame list */
1460 			sts_entry = &psts_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1461 			if (0 == sts_entry->frames.cnt) {
1462 				PHL_TRACE(COMP_PHL_PSTS, _PHL_INFO_,
1463 					  "MPDU is not the start of PPDU, but the queue is empty!!!\n");
1464 			}
1465 			pq_push(d, &(sts_entry->frames), &phl_rx->list,
1466 				_tail, _bh);
1467 			ret = true;
1468 		}
1469 	}
1470 
1471 	return ret;
1472 }
1473 #endif
1474 
1475 #ifdef CONFIG_PHY_INFO_NTFY
_phl_rx_post_proc_ppdu_sts(void * priv,struct phl_msg * msg)1476 void _phl_rx_post_proc_ppdu_sts(void* priv, struct phl_msg* msg)
1477 {
1478 	struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1479 	if (msg->inbuf && msg->inlen){
1480 		_os_kmem_free(phl_to_drvpriv(phl_info), msg->inbuf, msg->inlen);
1481 	}
1482 }
1483 
1484 bool
_phl_rx_proc_aggr_psts_ntfy(struct phl_info_t * phl_info,struct rtw_phl_ppdu_sts_ent * ppdu_sts_ent)1485 _phl_rx_proc_aggr_psts_ntfy(struct phl_info_t *phl_info,
1486 			    struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent)
1487 {
1488 	struct rtw_phl_ppdu_sts_info *ppdu_info =
1489 			&phl_info->phl_com->ppdu_sts_info;
1490 	struct  rtw_phl_ppdu_sts_ntfy *psts_ntfy = NULL;
1491 	u8 i = 0;
1492 	bool ret = false;
1493 
1494 	if (ppdu_info->msg_aggr_cnt == 0) {
1495 		/* reset entry valid status  */
1496 		for (i = 0; i < MAX_PSTS_MSG_AGGR_NUM; i++) {
1497 			ppdu_info->msg_aggr_buf[i].vld = false;
1498 		}
1499 	}
1500 	/* copy to the buf */
1501 	psts_ntfy = &ppdu_info->msg_aggr_buf[ppdu_info->msg_aggr_cnt];
1502 	psts_ntfy->frame_type = ppdu_sts_ent->frame_type;
1503 	_os_mem_cpy(phl_info->phl_com->drv_priv,
1504 		    &psts_ntfy->phy_info,
1505 		    &ppdu_sts_ent->phy_info,
1506 		    sizeof(struct rtw_phl_ppdu_phy_info));
1507 	_os_mem_cpy(phl_info->phl_com->drv_priv,
1508 		    psts_ntfy->src_mac_addr,
1509 		    ppdu_sts_ent->src_mac_addr,
1510 		    MAC_ADDRESS_LENGTH);
1511 	psts_ntfy->vld = true;
1512 
1513 	/* update counter */
1514 	ppdu_info->msg_aggr_cnt++;
1515 	if (ppdu_info->msg_aggr_cnt >= MAX_PSTS_MSG_AGGR_NUM) {
1516 		ppdu_info->msg_aggr_cnt = 0;
1517 		ret = true;
1518 	}
1519 
1520 	return ret;
1521 }
1522 #endif
1523 
1524 void
phl_rx_proc_ppdu_sts(struct phl_info_t * phl_info,struct rtw_phl_rx_pkt * phl_rx)1525 phl_rx_proc_ppdu_sts(struct phl_info_t *phl_info, struct rtw_phl_rx_pkt *phl_rx)
1526 {
1527 	u8 i = 0;
1528 	struct rtw_phl_ppdu_sts_info *ppdu_info = NULL;
1529 	struct rtw_phl_ppdu_sts_ent *ppdu_sts_ent = NULL;
1530 	struct rtw_phl_stainfo_t *psta = NULL;
1531 #ifdef CONFIG_PHY_INFO_NTFY
1532 	struct  rtw_phl_ppdu_sts_ntfy *psts_ntfy;
1533 	void *d = phl_to_drvpriv(phl_info);
1534 #endif
1535 	enum phl_band_idx band = HW_BAND_0;
1536 	struct rtw_rssi_info *rssi_sts;
1537 
1538 	if ((NULL == phl_info) || (NULL == phl_rx))
1539 		return;
1540 
1541 	band = (phl_rx->r.mdata.bb_sel > 0) ? HW_BAND_1 : HW_BAND_0;
1542 	ppdu_info = &phl_info->phl_com->ppdu_sts_info;
1543 	ppdu_sts_ent = &ppdu_info->sts_ent[band][phl_rx->r.mdata.ppdu_cnt];
1544 
1545 	if (false == ppdu_sts_ent->valid)
1546 		return;
1547 
1548 	if (true == ppdu_sts_ent->phl_done)
1549 		return;
1550 
1551 	ppdu_sts_ent->phl_done = true;
1552 
1553 	/* update phl self varibles */
1554 	for(i = 0 ; i < ppdu_sts_ent->usr_num; i++) {
1555 		if (ppdu_sts_ent->sta[i].vld) {
1556 			psta = rtw_phl_get_stainfo_by_macid(phl_info,
1557 				 ppdu_sts_ent->sta[i].macid);
1558 			if (psta == NULL)
1559 				continue;
1560 			rssi_sts = &psta->hal_sta->rssi_stat;
1561 			STA_UPDATE_MA_RSSI_FAST(rssi_sts->ma_rssi, ppdu_sts_ent->phy_info.rssi);
1562 			/* update (re)associate req/resp pkt rssi */
1563 			if (RTW_IS_ASOC_PKT(ppdu_sts_ent->frame_type)) {
1564 				rssi_sts->assoc_rssi =
1565 						ppdu_sts_ent->phy_info.rssi;
1566 			}
1567 
1568 			if (RTW_IS_BEACON_OR_PROBE_RESP_PKT(
1569 						ppdu_sts_ent->frame_type)) {
1570 				if (0 == rssi_sts->ma_rssi_mgnt) {
1571 					rssi_sts->ma_rssi_mgnt =
1572 						ppdu_sts_ent->phy_info.rssi;
1573 				} else {
1574 					STA_UPDATE_MA_RSSI_FAST(
1575 						rssi_sts->ma_rssi_mgnt,
1576 						ppdu_sts_ent->phy_info.rssi);
1577 				}
1578 			}
1579 		}
1580 		else {
1581 			if (RTW_IS_ASOC_REQ_PKT(ppdu_sts_ent->frame_type) &&
1582 				(ppdu_sts_ent->usr_num == 1)) {
1583 				psta = rtw_phl_get_stainfo_by_addr_ex(phl_info,
1584 						ppdu_sts_ent->src_mac_addr);
1585 				if (psta) {
1586 					psta->hal_sta->rssi_stat.assoc_rssi =
1587 						ppdu_sts_ent->phy_info.rssi;
1588 
1589 					#ifdef DBG_AP_CLIENT_ASSOC_RSSI
1590 					PHL_INFO("%s [Rx-ASOC_REQ] - macid:%d, MAC-Addr:%02x-%02x-%02x-%02x-%02x-%02x, assoc_rssi:%d\n",
1591 						__func__,
1592 						psta->macid,
1593 						ppdu_sts_ent->src_mac_addr[0],
1594 						ppdu_sts_ent->src_mac_addr[1],
1595 						ppdu_sts_ent->src_mac_addr[2],
1596 						ppdu_sts_ent->src_mac_addr[3],
1597 						ppdu_sts_ent->src_mac_addr[4],
1598 						ppdu_sts_ent->src_mac_addr[5],
1599 						psta->hal_sta->rssi_stat.assoc_rssi);
1600 					#endif
1601 				}
1602 			}
1603 		}
1604 	}
1605 
1606 #ifdef CONFIG_PHY_INFO_NTFY
1607 	/*2. prepare and send psts notify to core */
1608 	if((RTW_FRAME_TYPE_BEACON == ppdu_sts_ent->frame_type) ||
1609 	   (RTW_FRAME_TYPE_PROBE_RESP == ppdu_sts_ent->frame_type)) {
1610 
1611 		if (false == _phl_rx_proc_aggr_psts_ntfy(phl_info,
1612 							 ppdu_sts_ent)) {
1613 			return;
1614 		}
1615 
1616 		/* send aggr psts ntfy*/
1617 		psts_ntfy = (struct rtw_phl_ppdu_sts_ntfy *)_os_kmem_alloc(d,
1618 				MAX_PSTS_MSG_AGGR_NUM * sizeof(struct rtw_phl_ppdu_sts_ntfy));
1619 		if (psts_ntfy == NULL) {
1620 			PHL_ERR("%s: alloc ppdu sts for ntfy fail.\n", __func__);
1621 			return;
1622 		}
1623 
1624 		_os_mem_cpy(phl_info->phl_com->drv_priv,
1625 			    psts_ntfy,
1626 			    &ppdu_info->msg_aggr_buf,
1627 			    (MAX_PSTS_MSG_AGGR_NUM *
1628 			     sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1629 
1630 		msg.inbuf = (u8 *)psts_ntfy;
1631 		msg.inlen = (MAX_PSTS_MSG_AGGR_NUM *
1632 			     sizeof(struct rtw_phl_ppdu_sts_ntfy));
1633 		SET_MSG_MDL_ID_FIELD(msg.msg_id, PHL_MDL_PSTS);
1634 		SET_MSG_EVT_ID_FIELD(msg.msg_id, MSG_EVT_RX_PSTS);
1635 		attr.completion.completion = _phl_rx_post_proc_ppdu_sts;
1636 		attr.completion.priv = phl_info;
1637 		if (phl_msg_hub_send(phl_info, &attr, &msg) != RTW_PHL_STATUS_SUCCESS) {
1638 			PHL_ERR("%s: send msg_hub failed\n", __func__);
1639 			_os_kmem_free(d, psts_ntfy,
1640 				      (MAX_PSTS_MSG_AGGR_NUM *
1641 				       sizeof(struct rtw_phl_ppdu_sts_ntfy)));
1642 		}
1643 	}
1644 #endif
1645 }
1646 
phl_rx_wp_report_record_sts(struct phl_info_t * phl_info,u8 macid,u16 ac_queue,u8 txsts)1647 void phl_rx_wp_report_record_sts(struct phl_info_t *phl_info,
1648 				 u8 macid, u16 ac_queue, u8 txsts)
1649 {
1650 	struct rtw_phl_stainfo_t *phl_sta = NULL;
1651 	struct rtw_hal_stainfo_t *hal_sta = NULL;
1652 	struct rtw_wp_rpt_stats *wp_rpt_stats= NULL;
1653 
1654 	phl_sta = rtw_phl_get_stainfo_by_macid(phl_info, macid);
1655 
1656 	if (phl_sta) {
1657 		hal_sta = phl_sta->hal_sta;
1658 
1659 		if (hal_sta->trx_stat.wp_rpt_stats == NULL) {
1660 			PHL_ERR("rtp_stats NULL\n");
1661 			return;
1662 		}
1663 		/* Record Per ac queue statistics */
1664 		wp_rpt_stats = &hal_sta->trx_stat.wp_rpt_stats[ac_queue];
1665 
1666 		_os_spinlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
1667 		if (TX_STATUS_TX_DONE == txsts) {
1668 			/* record total tx ok*/
1669 			hal_sta->trx_stat.tx_ok_cnt++;
1670 			/* record per ac queue tx ok*/
1671 			wp_rpt_stats->tx_ok_cnt++;
1672 		} else {
1673 			/* record total tx fail*/
1674 			hal_sta->trx_stat.tx_fail_cnt++;
1675 			/* record per ac queue tx fail*/
1676 			if (TX_STATUS_TX_FAIL_REACH_RTY_LMT == txsts)
1677 				wp_rpt_stats->rty_fail_cnt++;
1678 			else if (TX_STATUS_TX_FAIL_LIFETIME_DROP == txsts)
1679 				wp_rpt_stats->lifetime_drop_cnt++;
1680 			else if (TX_STATUS_TX_FAIL_MACID_DROP == txsts)
1681 				wp_rpt_stats->macid_drop_cnt++;
1682 		}
1683 		_os_spinunlock(phl_to_drvpriv(phl_info), &hal_sta->trx_stat.tx_sts_lock, _bh, NULL);
1684 
1685 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,"macid: %u, ac_queue: %u, tx_ok_cnt: %u, rty_fail_cnt: %u, "
1686 			"lifetime_drop_cnt: %u, macid_drop_cnt: %u\n"
1687 			, macid, ac_queue, wp_rpt_stats->tx_ok_cnt, wp_rpt_stats->rty_fail_cnt
1688 			, wp_rpt_stats->lifetime_drop_cnt, wp_rpt_stats->macid_drop_cnt);
1689 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_,"totoal tx ok: %u \n totoal tx fail: %u\n"
1690 			, hal_sta->trx_stat.tx_ok_cnt, hal_sta->trx_stat.tx_fail_cnt);
1691 	} else {
1692 		PHL_TRACE(COMP_PHL_DBG, _PHL_DEBUG_, "%s: PHL_STA not found\n",
1693 				__FUNCTION__);
1694 	}
1695 }
1696 
_dump_rx_reorder_info(struct phl_info_t * phl_info,struct rtw_phl_stainfo_t * sta)1697 static void _dump_rx_reorder_info(struct phl_info_t *phl_info,
1698 				  struct rtw_phl_stainfo_t *sta)
1699 {
1700 	void *drv_priv = phl_to_drvpriv(phl_info);
1701 	_os_spinlockfg sp_flags;
1702 	u8 i;
1703 
1704 	PHL_INFO("dump rx reorder buffer info:\n");
1705 	for (i = 0; i < ARRAY_SIZE(sta->tid_rx); i++) {
1706 
1707 		_os_spinlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1708 		if (sta->tid_rx[i]) {
1709 			PHL_INFO("== tid = %d ==\n", sta->tid_rx[i]->tid);
1710 			PHL_INFO("head_seq_num = %d\n",
1711 				 sta->tid_rx[i]->head_seq_num);
1712 			PHL_INFO("stored_mpdu_num = %d\n",
1713 				 sta->tid_rx[i]->stored_mpdu_num);
1714 			PHL_INFO("ssn = %d\n", sta->tid_rx[i]->ssn);
1715 			PHL_INFO("buf_size = %d\n", sta->tid_rx[i]->buf_size);
1716 			PHL_INFO("started = %d\n", sta->tid_rx[i]->started);
1717 			PHL_INFO("removed = %d\n", sta->tid_rx[i]->removed);
1718 		}
1719 		_os_spinunlock(drv_priv, &sta->tid_rx_lock, _irq, &sp_flags);
1720 	}
1721 }
1722 
phl_dump_all_sta_rx_info(struct phl_info_t * phl_info)1723 void phl_dump_all_sta_rx_info(struct phl_info_t *phl_info)
1724 {
1725 	struct rtw_phl_com_t *phl_com = phl_info->phl_com;
1726 	struct rtw_phl_stainfo_t *sta = NULL;
1727 	struct rtw_wifi_role_t *role = NULL;
1728 	void *drv = phl_to_drvpriv(phl_info);
1729 	struct phl_queue *sta_queue;
1730 	_os_spinlockfg sp_flags;
1731 	u8 i;
1732 
1733 	PHL_INFO("dump all sta rx info:\n");
1734 	for (i = 0; i < MAX_WIFI_ROLE_NUMBER; i++) {
1735 		role = &phl_com->wifi_roles[i];
1736 		if (role->active) {
1737 			PHL_INFO("wrole idx = %d\n", i);
1738 			PHL_INFO("wrole type = %d\n", role->type);
1739 			PHL_INFO("wrole mstate = %d\n", role->mstate);
1740 
1741 			sta_queue = &role->assoc_sta_queue;
1742 			_os_spinlock(drv, &sta_queue->lock, _irq, &sp_flags);
1743 			phl_list_for_loop(sta, struct rtw_phl_stainfo_t,
1744 						&sta_queue->queue, list) {
1745 				PHL_INFO("%s MACID:%d %02x:%02x:%02x:%02x:%02x:%02x \n",
1746 					 __func__, sta->macid,
1747 					 sta->mac_addr[0],
1748 					 sta->mac_addr[1],
1749 					 sta->mac_addr[2],
1750 					 sta->mac_addr[3],
1751 					 sta->mac_addr[4],
1752 					 sta->mac_addr[5]);
1753 				_dump_rx_reorder_info(phl_info, sta);
1754 			}
1755 			_os_spinunlock(drv, &sta_queue->lock, _irq, &sp_flags);
1756 		}
1757 	}
1758 }
1759 
phl_rx_dbg_dump(struct phl_info_t * phl_info,u8 band_idx)1760 void phl_rx_dbg_dump(struct phl_info_t *phl_info, u8 band_idx)
1761 {
1762 	enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
1763 
1764 	phl_status = phl_cmd_enqueue(phl_info,
1765 	                   band_idx,
1766 	                   MSG_EVT_DBG_RX_DUMP,
1767 	                   NULL,
1768 	                   0,
1769 	                   NULL,
1770 	                   PHL_CMD_NO_WAIT,
1771 	                   0);
1772 	if (phl_status != RTW_PHL_STATUS_SUCCESS) {
1773 		PHL_TRACE(COMP_PHL_DBG, _PHL_ERR_, "%s: cmd enqueue fail!\n",
1774 			  __func__);
1775 	}
1776 }
1777