1 /******************************************************************************
2 *
3 * Copyright(c) 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _TRX_TEST_C_
16 #include "../phl_headers.h"
17 #include "../phl_api.h"
18 /*#include "../hal_g6/hal_api_mac.h"*/
19
20 #ifdef CONFIG_PHL_TEST_SUITE
21
22 void rtw_phl_test_rx_callback(void *context);
23 enum rtw_phl_status phl_recycle_test_tx(void *phl, struct rtw_xmit_req *treq);
24
25
26
_phl_free_rx_req_pool(void * phl)27 void _phl_free_rx_req_pool(void *phl)
28 {
29 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
30 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
31 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
32 void *drv_priv = phl_to_drvpriv(phl_info);
33
34 PHL_INFO("_phl_free_rx_req_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
35 rx_req_pool->idle_cnt, rx_req_pool->busy_cnt, rx_req_pool->total_cnt);
36 _os_spinlock_free(drv_priv, &rx_req_pool->idle_lock);
37 _os_spinlock_free(drv_priv, &rx_req_pool->busy_lock);
38 INIT_LIST_HEAD(&rx_req_pool->idle_list);
39 INIT_LIST_HEAD(&rx_req_pool->busy_list);
40
41 if (NULL != rx_req_pool->buf) {
42 _os_mem_free(drv_priv, rx_req_pool->buf, rx_req_pool->buf_len);
43 }
44
45 rx_req_pool->buf_len = 0;
46 rx_req_pool->idle_cnt = 0;
47 rx_req_pool->busy_cnt = 0;
48 rx_req_pool->total_cnt = 0;
49
50 }
51
_phl_alloc_rx_req_pool(void * phl,u32 rx_req_num)52 enum rtw_phl_status _phl_alloc_rx_req_pool(void *phl, u32 rx_req_num)
53 {
54 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
55 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
56 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
57 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
58 void *drv_priv = phl_to_drvpriv(phl_info);
59 struct rtw_test_rx *rreq = NULL;
60 u32 buf_len = 0, i = 0;
61 FUNCIN_WSTS(status);
62 do {
63 if (0 == rx_req_num)
64 break;
65
66 rx_req_pool->total_cnt = rx_req_num;
67 buf_len = sizeof(struct rtw_test_rx) * rx_req_num;
68 rx_req_pool->buf = _os_mem_alloc(drv_priv, buf_len);
69
70 if (NULL == rx_req_pool->buf)
71 break;
72 rx_req_pool->buf_len = buf_len;
73 INIT_LIST_HEAD(&rx_req_pool->idle_list);
74 INIT_LIST_HEAD(&rx_req_pool->busy_list);
75 _os_spinlock_init(drv_priv, &rx_req_pool->idle_lock);
76 _os_spinlock_init(drv_priv, &rx_req_pool->busy_lock);
77 rreq = (struct rtw_test_rx *)rx_req_pool->buf;
78 for (i = 0; i < rx_req_num; i++) {
79 INIT_LIST_HEAD(&rreq[i].list);
80 list_add_tail(&rreq[i].list, &rx_req_pool->idle_list);
81 rx_req_pool->idle_cnt++;
82 }
83
84 status = RTW_PHL_STATUS_SUCCESS;
85
86 } while (false);
87 PHL_INFO("_phl_alloc_rx_req_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
88 rx_req_pool->idle_cnt, rx_req_pool->busy_cnt, rx_req_pool->total_cnt);
89 FUNCOUT_WSTS(status);
90
91 return status;
92 }
93
_phl_query_idle_rx_req(struct phl_info_t * phl_info)94 struct rtw_test_rx *_phl_query_idle_rx_req(struct phl_info_t *phl_info)
95 {
96 struct rtw_test_rx *rreq = NULL;
97 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
98 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
99 void *drv_priv = phl_to_drvpriv(phl_info);
100
101 _os_spinlock(drv_priv, &rx_req_pool->idle_lock, _ps, NULL);
102
103 if (false == list_empty(&rx_req_pool->idle_list)) {
104 rreq = list_first_entry(&rx_req_pool->idle_list,
105 struct rtw_test_rx, list);
106 list_del(&rreq->list);
107 rx_req_pool->idle_cnt--;
108 }
109
110 _os_spinunlock(drv_priv, &rx_req_pool->idle_lock, _ps, NULL);
111
112 return rreq;
113 }
114
_phl_query_busy_rx_req(struct phl_info_t * phl_info)115 struct rtw_test_rx *_phl_query_busy_rx_req(struct phl_info_t *phl_info)
116 {
117 struct rtw_test_rx *rreq = NULL;
118 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
119 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
120 void *drv_priv = phl_to_drvpriv(phl_info);
121
122 _os_spinlock(drv_priv, &rx_req_pool->busy_lock, _ps, NULL);
123
124 if (false == list_empty(&rx_req_pool->busy_list)) {
125 rreq = list_first_entry(&rx_req_pool->busy_list,
126 struct rtw_test_rx, list);
127 list_del(&rreq->list);
128 rx_req_pool->busy_cnt--;
129 }
130
131 _os_spinunlock(drv_priv, &rx_req_pool->busy_lock, _ps, NULL);
132
133 return rreq;
134 }
135
_phl_release_rx_req(struct phl_info_t * phl_info,struct rtw_test_rx * rreq)136 void _phl_release_rx_req(struct phl_info_t *phl_info, struct rtw_test_rx *rreq)
137 {
138 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
139 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
140 void *drv_priv = phl_to_drvpriv(phl_info);
141
142 _os_spinlock(drv_priv, &rx_req_pool->idle_lock, _ps, NULL);
143
144 _os_mem_set(drv_priv, &rreq->rx.mdata, 0,
145 sizeof(rreq->rx.mdata));
146 _os_mem_set(drv_priv, &rreq->rx.pkt_list, 0,
147 sizeof(rreq->rx.pkt_list) * rreq->rx.pkt_cnt);
148
149 rreq->rx.shortcut_id = 0;
150 rreq->rx.pkt_cnt = 0;
151
152 INIT_LIST_HEAD(&rreq->list);
153 list_add_tail(&rreq->list, &rx_req_pool->idle_list);
154 rx_req_pool->idle_cnt++;
155
156 _os_spinunlock(drv_priv, &rx_req_pool->idle_lock, _ps, NULL);
157 }
158
_phl_insert_busy_rx_req(struct phl_info_t * phl_info,struct rtw_test_rx * rreq)159 void _phl_insert_busy_rx_req(struct phl_info_t *phl_info, struct rtw_test_rx *rreq)
160 {
161 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
162 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
163 void *drv_priv = phl_to_drvpriv(phl_info);
164
165 _os_spinlock(drv_priv, &rx_req_pool->busy_lock, _ps, NULL);
166
167 list_add_tail(&rreq->list, &rx_req_pool->busy_list);
168 rx_req_pool->busy_cnt++;
169
170 _os_spinunlock(drv_priv, &rx_req_pool->busy_lock, _ps, NULL);
171 }
172
173
_phl_free_tx_req_pool(void * phl)174 void _phl_free_tx_req_pool(void *phl)
175 {
176 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
177 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
178 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
179 void *drv_priv = phl_to_drvpriv(phl_info);
180
181 PHL_INFO("_phl_free_tx_req_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
182 tx_req_pool->idle_cnt, tx_req_pool->busy_cnt, tx_req_pool->total_cnt);
183 _os_spinlock_free(drv_priv, &tx_req_pool->idle_lock);
184 _os_spinlock_free(drv_priv, &tx_req_pool->busy_lock);
185 INIT_LIST_HEAD(&tx_req_pool->idle_list);
186 INIT_LIST_HEAD(&tx_req_pool->busy_list);
187
188 if (NULL != tx_req_pool->buf) {
189 _os_mem_free(drv_priv, tx_req_pool->buf, tx_req_pool->buf_len);
190 }
191
192 tx_req_pool->buf_len = 0;
193 tx_req_pool->idle_cnt = 0;
194 tx_req_pool->busy_cnt = 0;
195 tx_req_pool->total_cnt = 0;
196
197 }
198
_phl_alloc_tx_req_pool(void * phl,u32 tx_req_num)199 enum rtw_phl_status _phl_alloc_tx_req_pool(void *phl, u32 tx_req_num)
200 {
201 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
202 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
203 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
204 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
205 void *drv_priv = phl_to_drvpriv(phl_info);
206 struct rtw_xmit_req *treq = NULL;
207 u32 buf_len = 0, i = 0;
208 FUNCIN_WSTS(status);
209
210 do {
211 if (0 == tx_req_num)
212 break;
213
214 tx_req_pool->total_cnt = tx_req_num;
215 buf_len = sizeof(struct rtw_xmit_req) * tx_req_num;
216 tx_req_pool->buf = _os_mem_alloc(drv_priv, buf_len);
217
218 if (NULL == tx_req_pool->buf)
219 break;
220 tx_req_pool->buf_len = buf_len;
221 INIT_LIST_HEAD(&tx_req_pool->idle_list);
222 INIT_LIST_HEAD(&tx_req_pool->busy_list);
223 _os_spinlock_init(drv_priv, &tx_req_pool->idle_lock);
224 _os_spinlock_init(drv_priv, &tx_req_pool->busy_lock);
225 treq = (struct rtw_xmit_req *)tx_req_pool->buf;
226 for (i = 0; i < tx_req_num; i++) {
227 INIT_LIST_HEAD(&treq[i].list);
228 treq[i].treq_type = RTW_PHL_TREQ_TYPE_TEST_PATTERN;
229 list_add_tail(&treq[i].list, &tx_req_pool->idle_list);
230 tx_req_pool->idle_cnt++;
231 }
232
233 status = RTW_PHL_STATUS_SUCCESS;
234
235 } while (false);
236 PHL_INFO("_phl_alloc_tx_req_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
237 tx_req_pool->idle_cnt, tx_req_pool->busy_cnt, tx_req_pool->total_cnt);
238 FUNCOUT_WSTS(status);
239
240 return status;
241 }
242
_phl_query_idle_tx_req(struct phl_info_t * phl_info)243 struct rtw_xmit_req *_phl_query_idle_tx_req(struct phl_info_t *phl_info)
244 {
245 struct rtw_xmit_req *treq = NULL;
246 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
247 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
248 void *drv_priv = phl_to_drvpriv(phl_info);
249
250 _os_spinlock(drv_priv, &tx_req_pool->idle_lock, _ps, NULL);
251
252 if (false == list_empty(&tx_req_pool->idle_list)) {
253 treq = list_first_entry(&tx_req_pool->idle_list,
254 struct rtw_xmit_req, list);
255 list_del(&treq->list);
256 tx_req_pool->idle_cnt--;
257 }
258
259 _os_spinunlock(drv_priv, &tx_req_pool->idle_lock, _ps, NULL);
260
261 return treq;
262 }
263
_phl_query_busy_tx_req(struct phl_info_t * phl_info)264 struct rtw_xmit_req *_phl_query_busy_tx_req(struct phl_info_t *phl_info)
265 {
266 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
267 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
268 struct rtw_xmit_req *treq = NULL;
269 void *drv_priv = phl_to_drvpriv(phl_info);
270
271 _os_spinlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
272
273 if (false == list_empty(&tx_req_pool->busy_list)) {
274 treq = list_first_entry(&tx_req_pool->busy_list,
275 struct rtw_xmit_req, list);
276 list_del(&treq->list);
277 tx_req_pool->busy_cnt--;
278 }
279
280 _os_spinunlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
281
282 return treq;
283 }
284
_phl_remove_busy_tx_req(struct phl_info_t * phl_info,struct rtw_xmit_req * treq)285 void _phl_remove_busy_tx_req(struct phl_info_t *phl_info, struct rtw_xmit_req *treq)
286 {
287 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
288 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
289 void *drv_priv = phl_to_drvpriv(phl_info);
290
291 _os_spinlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
292
293 if (false == list_empty(&tx_req_pool->busy_list)) {
294 list_del(&treq->list);
295 tx_req_pool->busy_cnt--;
296 }
297
298 _os_spinunlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
299 }
300
301
_phl_release_tx_req(struct phl_info_t * phl_info,struct rtw_xmit_req * treq)302 void _phl_release_tx_req(struct phl_info_t *phl_info, struct rtw_xmit_req *treq)
303 {
304 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
305 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
306 void *drv_priv = phl_to_drvpriv(phl_info);
307
308 _os_spinlock(drv_priv, &tx_req_pool->idle_lock, _ps, NULL);
309
310 _os_mem_set(drv_priv, &treq->mdata, 0, sizeof(treq->mdata));
311
312 treq->shortcut_id = 0;
313 treq->total_len = 0;
314 treq->pkt_cnt = 0;
315 treq->pkt_list = NULL;
316 treq->os_priv = NULL;
317
318 INIT_LIST_HEAD(&treq->list);
319 list_add_tail(&treq->list, &tx_req_pool->idle_list);
320 tx_req_pool->idle_cnt++;
321
322 _os_spinunlock(drv_priv, &tx_req_pool->idle_lock, _ps, NULL);
323 }
324
_phl_insert_busy_tx_req(struct phl_info_t * phl_info,struct rtw_xmit_req * treq)325 void _phl_insert_busy_tx_req(struct phl_info_t *phl_info, struct rtw_xmit_req *treq)
326 {
327 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
328 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
329 void *drv_priv = phl_to_drvpriv(phl_info);
330
331 _os_spinlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
332 list_add_tail(&treq->list, &tx_req_pool->busy_list);
333 tx_req_pool->busy_cnt++;
334
335 _os_spinunlock(drv_priv, &tx_req_pool->busy_lock, _ps, NULL);
336 }
337
338
_phl_free_tx_pkt_pool(void * phl)339 void _phl_free_tx_pkt_pool(void *phl)
340 {
341 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
342 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
343 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
344 void *drv_priv = phl_to_drvpriv(phl_info);
345 struct rtw_payload *tpkt = NULL;
346 u32 i = 0;
347
348 PHL_INFO("_phl_free_tx_pkt_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
349 tx_pkt_pool->idle_cnt, tx_pkt_pool->busy_cnt, tx_pkt_pool->total_cnt);
350 _os_spinlock_free(drv_priv, &tx_pkt_pool->idle_lock);
351 _os_spinlock_free(drv_priv, &tx_pkt_pool->busy_lock);
352 INIT_LIST_HEAD(&tx_pkt_pool->idle_list);
353 INIT_LIST_HEAD(&tx_pkt_pool->busy_list);
354
355 tpkt = (struct rtw_payload *)tx_pkt_pool->buf;
356 for (i = 0; i < tx_pkt_pool->total_cnt; i++) {
357 INIT_LIST_HEAD(&tpkt->list);
358 if (NULL != tpkt->pkt.vir_addr) {
359 #ifdef CONFIG_PCI_HCI
360 _os_shmem_free(drv_priv, NULL,
361 tpkt->pkt.vir_addr,
362 (_dma *)&tpkt->pkt.phy_addr_l,
363 (_dma *)&tpkt->pkt.phy_addr_h,
364 MAX_TEST_PAYLOAD_SIZE,
365 false,
366 PCI_DMA_TODEVICE,
367 tpkt->os_rsvd[0]);
368 #else
369 _os_mem_free(drv_priv, tpkt->pkt.vir_addr,
370 tpkt->pkt.length);
371 #endif
372 }
373 tpkt->pkt.length = 0;
374 tpkt++;
375 }
376
377 if (NULL != tx_pkt_pool->buf) {
378 _os_mem_free(drv_priv, tx_pkt_pool->buf, tx_pkt_pool->buf_len);
379 }
380
381 tx_pkt_pool->buf_len = 0;
382 tx_pkt_pool->idle_cnt = 0;
383 tx_pkt_pool->busy_cnt = 0;
384 tx_pkt_pool->total_cnt = 0;
385
386 }
387
388
_phl_alloc_tx_pkt_pool(void * phl,u32 tx_pkt_num,u32 tx_pkt_size)389 enum rtw_phl_status _phl_alloc_tx_pkt_pool(void *phl, u32 tx_pkt_num,
390 u32 tx_pkt_size)
391 {
392 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
393 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
394 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
395 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
396 void *drv_priv = phl_to_drvpriv(phl_info);
397 struct rtw_payload *tpkt = NULL;
398 u32 buf_len = 0, i = 0;
399 FUNCIN_WSTS(status);
400
401 do {
402 if (0 == tx_pkt_num)
403 break;
404
405 tx_pkt_pool->total_cnt = tx_pkt_num;
406 buf_len = sizeof(struct rtw_payload) * tx_pkt_num;
407 tx_pkt_pool->buf = _os_mem_alloc(drv_priv, buf_len);
408
409 if (NULL == tx_pkt_pool->buf)
410 break;
411 tx_pkt_pool->buf_len = buf_len;
412 INIT_LIST_HEAD(&tx_pkt_pool->idle_list);
413 INIT_LIST_HEAD(&tx_pkt_pool->busy_list);
414 _os_spinlock_init(drv_priv, &tx_pkt_pool->idle_lock);
415 _os_spinlock_init(drv_priv, &tx_pkt_pool->busy_lock);
416 tpkt = (struct rtw_payload *)tx_pkt_pool->buf;
417 for (i = 0; i < tx_pkt_num; i++) {
418 INIT_LIST_HEAD(&tpkt[i].list);
419 #ifdef CONFIG_PCI_HCI
420 tpkt[i].pkt.vir_addr = _os_shmem_alloc(drv_priv, NULL,
421 (_dma *)&tpkt[i].pkt.phy_addr_l,
422 (_dma *)&tpkt[i].pkt.phy_addr_h,
423 tx_pkt_size,
424 false,
425 PCI_DMA_TODEVICE,
426 &tpkt[i].os_rsvd[0]);
427 #else /*USB/SDIO*/
428 tpkt[i].pkt.vir_addr = _os_mem_alloc(drv_priv, tx_pkt_size);
429 #endif
430 if (NULL == tpkt[i].pkt.vir_addr) {
431 PHL_WARN("allocate tx pkt buf fail\n");
432 break;
433 }
434
435 /* hana_todo */
436 /* tpkt[i].pkt.phy_addr_l = 0; */
437 /* tpkt[i].pkt.phy_addr_h = 0; */
438 tpkt[i].pkt.length = (u16)tx_pkt_size;
439 tpkt[i].test_id = i;
440
441 list_add_tail(&tpkt[i].list, &tx_pkt_pool->idle_list);
442 tx_pkt_pool->idle_cnt++;
443 }
444
445 status = RTW_PHL_STATUS_SUCCESS;
446
447 } while (false);
448 PHL_INFO("_phl_alloc_tx_pkt_pool : idle counter (%d), busy counter (%d), total counter (%d)\n",
449 tx_pkt_pool->idle_cnt, tx_pkt_pool->busy_cnt, tx_pkt_pool->total_cnt);
450 FUNCOUT_WSTS(status);
451
452 return status;
453 }
454
_phl_query_idle_tx_pkt(struct phl_info_t * phl_info)455 struct rtw_payload *_phl_query_idle_tx_pkt(struct phl_info_t *phl_info)
456 {
457 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
458 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
459 struct rtw_payload *tpkt = NULL;
460 void *drv_priv = phl_to_drvpriv(phl_info);
461
462 _os_spinlock(drv_priv, &tx_pkt_pool->idle_lock, _ps, NULL);
463
464 if (false == list_empty(&tx_pkt_pool->idle_list)) {
465 tpkt = list_first_entry(&tx_pkt_pool->idle_list,
466 struct rtw_payload, list);
467 list_del(&tpkt->list);
468 tx_pkt_pool->idle_cnt--;
469 }
470
471 _os_spinunlock(drv_priv, &tx_pkt_pool->idle_lock, _ps, NULL);
472
473 return tpkt;
474 }
475
_phl_query_busy_tx_pkt(struct phl_info_t * phl_info)476 struct rtw_payload *_phl_query_busy_tx_pkt(struct phl_info_t *phl_info)
477 {
478 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
479 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
480 struct rtw_payload *tpkt = NULL;
481 void *drv_priv = phl_to_drvpriv(phl_info);
482
483 _os_spinlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
484
485 if (false == list_empty(&tx_pkt_pool->busy_list)) {
486 tpkt = list_first_entry(&tx_pkt_pool->busy_list,
487 struct rtw_payload, list);
488 list_del(&tpkt->list);
489 tx_pkt_pool->busy_cnt--;
490 }
491
492 _os_spinunlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
493
494 return tpkt;
495 }
496
_phl_remove_busy_tx_pkt(struct phl_info_t * phl_info,struct rtw_payload * tpkt)497 void _phl_remove_busy_tx_pkt(struct phl_info_t *phl_info, struct rtw_payload *tpkt)
498 {
499 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
500 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
501 void *drv_priv = phl_to_drvpriv(phl_info);
502
503 _os_spinlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
504
505 if (false == list_empty(&tx_pkt_pool->busy_list)) {
506 list_del(&tpkt->list);
507 tx_pkt_pool->busy_cnt--;
508 }
509
510 _os_spinunlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
511 }
512
513
_phl_release_tx_pkt(struct phl_info_t * phl_info,struct rtw_payload * tpkt)514 void _phl_release_tx_pkt(struct phl_info_t *phl_info, struct rtw_payload *tpkt)
515 {
516 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
517 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
518 void *drv_priv = phl_to_drvpriv(phl_info);
519
520 _os_spinlock(drv_priv, &tx_pkt_pool->idle_lock, _ps, NULL);
521
522 _os_mem_set(drv_priv, tpkt->pkt.vir_addr, 0, tpkt->pkt.length);
523 tpkt->pkt.length = 0;
524
525 INIT_LIST_HEAD(&tpkt->list);
526 list_add_tail(&tpkt->list, &tx_pkt_pool->idle_list);
527 tx_pkt_pool->idle_cnt++;
528
529 _os_spinunlock(drv_priv, &tx_pkt_pool->idle_lock, _ps, NULL);
530 }
531
_phl_insert_busy_tx_pkt(struct phl_info_t * phl_info,struct rtw_payload * tpkt)532 void _phl_insert_busy_tx_pkt(struct phl_info_t *phl_info, struct rtw_payload *tpkt)
533 {
534 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
535 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
536 void *drv_priv = phl_to_drvpriv(phl_info);
537
538 _os_spinlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
539
540 list_add_tail(&tpkt->list, &tx_pkt_pool->busy_list);
541 tx_pkt_pool->busy_cnt++;
542
543 _os_spinunlock(drv_priv, &tx_pkt_pool->busy_lock, _ps, NULL);
544 }
545
_phl_is_tx_test_done(void * phl)546 u8 _phl_is_tx_test_done(void *phl)
547 {
548 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
549 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
550 struct rtw_pool *tx_req_pool = &trx_test->tx_req_pool;
551 struct rtw_pool *tx_pkt_pool = &trx_test->tx_pkt_pool;
552
553 if (list_empty(&tx_req_pool->busy_list) && list_empty(&tx_pkt_pool->busy_list))
554 return true;
555 else
556 return false;
557 }
558
559
560
phl_update_test_param(void * phl,struct rtw_trx_test_param * test_param)561 void phl_update_test_param(void *phl, struct rtw_trx_test_param *test_param)
562 {
563 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
564 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
565 void *drv_priv = phl_to_drvpriv(phl_info);
566
567 _os_mem_cpy(drv_priv, &trx_test->test_param, test_param, sizeof(*test_param));
568 }
569
570 extern enum rtw_phl_status
571 phl_wifi_role_start(struct phl_info_t *phl_info,
572 struct rtw_wifi_role_t *wrole,
573 struct rtw_phl_stainfo_t *sta);
_phl_test_add_role(void * phl,struct rtw_trx_test_param * test_param)574 enum rtw_phl_status _phl_test_add_role(
575 void *phl,
576 struct rtw_trx_test_param *test_param)
577 {
578 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
579 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
580 struct rtw_phl_com_t *phl_com = phl_info->phl_com;
581 struct rtw_t_meta_data *txcap = NULL;
582 struct rtw_wifi_role_t *test_wrole = &phl_com->wifi_roles[0];
583 struct rtw_phl_stainfo_t *sta_info = NULL;
584
585 if (NULL != test_param) {
586 txcap = &test_param->tx_cap;
587
588 test_wrole->hw_band = txcap->band;
589 test_wrole->hw_port = (u8)txcap->macid;
590
591 sta_info = rtw_phl_get_stainfo_by_addr(phl_info, test_wrole, test_wrole->mac_addr);
592 if (NULL != sta_info) {
593 test_param->tx_cap.macid = sta_info->macid;
594 phl_status = phl_wifi_role_start(phl_info, test_wrole, sta_info);
595 PHL_INFO("update test param macid to %d\n", test_param->tx_cap.macid);
596 } else {
597 PHL_ERR("fail to get stainfo from test wrole!\n");
598 phl_status = RTW_PHL_STATUS_FAILURE;
599 }
600 }
601
602 return phl_status;
603 }
604
605 extern enum rtw_phl_status
606 phl_wifi_role_stop(struct phl_info_t *phl_info, struct rtw_wifi_role_t *wrole);
_phl_test_remove_role(void * phl,struct rtw_trx_test_param * test_param)607 enum rtw_phl_status _phl_test_remove_role(
608 void *phl,
609 struct rtw_trx_test_param *test_param)
610 {
611 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
612 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
613 struct rtw_phl_com_t *phl_com = phl_info->phl_com;
614 struct rtw_wifi_role_t *test_wrole = &phl_com->wifi_roles[0];
615
616 if (NULL != test_param)
617 phl_status = phl_wifi_role_stop(phl_info, test_wrole);
618
619 return phl_status;
620 }
621
phl_test_sw_free(void * phl)622 void phl_test_sw_free(void *phl)
623 {
624 FUNCIN();
625
626 _phl_free_tx_pkt_pool(phl);
627 _phl_free_rx_req_pool(phl);
628 _phl_free_tx_req_pool(phl);
629
630 FUNCOUT();
631 }
632
633
phl_test_sw_alloc(void * phl)634 enum rtw_phl_status phl_test_sw_alloc(void *phl)
635 {
636 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
637 FUNCIN_WSTS(sts);
638 do {
639 sts = _phl_alloc_tx_req_pool(phl, MAX_TEST_TXREQ_NUM);
640 if (RTW_PHL_STATUS_SUCCESS != sts)
641 break;
642 sts = _phl_alloc_rx_req_pool(phl, MAX_TEST_RXREQ_NUM);
643 if (RTW_PHL_STATUS_SUCCESS != sts)
644 break;
645 sts = _phl_alloc_tx_pkt_pool(phl, MAX_TEST_PAYLOAD_NUM,
646 MAX_TEST_PAYLOAD_SIZE);
647
648 } while (false);
649 FUNCOUT_WSTS(sts);
650
651 return sts;
652 }
653
654
phl_trx_test_init(void * phl)655 enum rtw_phl_status phl_trx_test_init(void *phl)
656 {
657 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
658 struct phl_trx_test *trx_test = NULL;
659 struct rtw_phl_evt_ops *ops = &phl_info->phl_com->evt_ops;
660 void *drv_priv = phl_to_drvpriv(phl_info);
661 enum rtw_phl_status phl_status = RTW_PHL_STATUS_FAILURE;
662
663 if (NULL == (trx_test = _os_mem_alloc(drv_priv, sizeof(struct phl_trx_test)))) {
664 phl_info->trx_test = NULL;
665 PHL_ERR("%s: alloc buffer failed\n", __func__);
666 return RTW_PHL_STATUS_FAILURE;
667 }
668
669 phl_info->trx_test = trx_test;
670
671 INIT_LIST_HEAD(&trx_test->rx_q);
672 _os_mem_set(drv_priv, &trx_test->tx_req_pool, 0, sizeof(trx_test->tx_req_pool));
673 _os_mem_set(drv_priv, &trx_test->rx_req_pool, 0, sizeof(trx_test->rx_req_pool));
674 _os_mem_set(drv_priv, &trx_test->tx_pkt_pool, 0, sizeof(trx_test->tx_pkt_pool));
675 _os_mem_set(drv_priv, &trx_test->test_param, 0, sizeof(trx_test->test_param));
676 _os_mem_set(drv_priv, &trx_test->trx_test_obj, 0, sizeof(trx_test->trx_test_obj));
677 ops->tx_test_recycle = phl_recycle_test_tx;
678 phl_status = phl_test_sw_alloc(phl);
679 #if 0
680 gtest_rxq_handler.type = RTW_PHL_HANDLER_PRIO_HIGH; /* tasklet */
681 gtest_rxq_handler.callback = rtw_phl_test_rx_callback;
682 gtest_rxq_handler.context = phl_info;
683 gtest_rxq_handler.drv_priv = phl_to_drvpriv(phl_info);
684 sts = phl_register_handler(phl_info->phl_com, >est_rxq_handler);
685 if (RTW_PHL_STATUS_SUCCESS != sts) {
686 PHL_ERR("register test rx queue handler fail\n");
687 phl_deregister_handler(phl_info->phl_com, >est_rxq_handler);
688 }
689 #endif
690 return phl_status;
691 }
692
phl_trx_test_deinit(void * phl)693 void phl_trx_test_deinit(void *phl)
694 {
695 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
696 void *drv = phl_to_drvpriv(phl_info);
697
698 phl_test_sw_free(phl);
699
700 _os_mem_free(drv, phl_info->trx_test, sizeof(struct phl_trx_test));
701 phl_info->trx_test = NULL;
702 }
703
704
phl_test_hw_config_init(void * phl,u8 mode)705 void phl_test_hw_config_init(void *phl, u8 mode)
706 {
707 switch (mode) {
708 case TEST_MODE_PHL_TX_RING_TEST:
709 /* address cam receive all */
710 /* rtl8852a_hal_init_misc: */
711 /* rtl8852ae_test_loopback: */
712 break;
713 default:
714 break;
715
716 }
717
718
719 }
720
721
phl_test_hw_config_runtime(void * phl,u8 mode)722 void phl_test_hw_config_runtime(void *phl, u8 mode)
723 {
724 switch (mode) {
725 case TEST_MODE_PHL_TX_RING_TEST:
726 /* debug register :*/
727 /* edca config */
728 /* zero delimiter counter flush */
729 break;
730 default:
731 break;
732
733 }
734
735
736 }
737
738
phl_test_fill_packet_content(struct phl_info_t * phl_info,u8 * pkt,u16 size,struct rtw_trx_test_param * test_param)739 void phl_test_fill_packet_content(struct phl_info_t *phl_info, u8 *pkt,
740 u16 size,
741 struct rtw_trx_test_param *test_param)
742 {
743 void *drv_priv = phl_to_drvpriv(phl_info);
744 u8 test_pattern[7] = {0x00, 0x01, 0x02, 0x55, 0x66, 0x94, 0x87};
745 u16 qos_ofst = 0, payload_ofst = 0;
746
747 /* wlan header */
748 SET_WHDR_PROTOCOL_VERSION(pkt, 0);
749 SET_WHDR_TYPE(pkt, 2); //data
750 SET_WHDR_SUBTYPE(pkt, 0);
751 if (true == test_param->ap_mode) {
752 SET_WHDR_TO_DS(pkt, 0);
753 SET_WHDR_FROM_DS(pkt, 1);
754 SET_WHDR_ADDRESS1(drv_priv, pkt, test_param->sta_addr);
755 SET_WHDR_ADDRESS2(drv_priv, pkt, test_param->bssid);
756 SET_WHDR_ADDRESS3(drv_priv, pkt, test_param->cur_addr);
757 } else {
758 SET_WHDR_TO_DS(pkt, 1);
759 SET_WHDR_FROM_DS(pkt, 0);
760 SET_WHDR_ADDRESS1(drv_priv, pkt, test_param->bssid);
761 SET_WHDR_ADDRESS2(drv_priv, pkt, test_param->cur_addr);
762 SET_WHDR_ADDRESS3(drv_priv, pkt, test_param->sta_addr);
763 }
764
765 SET_WHDR_MORE_FRAG(pkt, 0);
766 SET_WHDR_RETRY(pkt, 0);
767 SET_WHDR_PWR_MGNT(pkt, 0);
768 SET_WHDR_MORE_DATA(pkt, 0);
769 SET_WHDR_WEP(pkt, 0);
770 SET_WHDR_ORDER(pkt, 0);
771
772 SET_WHDR_DURATION(pkt, 0);
773 SET_WHDR_FRAGMENT_SEQUENCE(pkt, 0);
774
775 qos_ofst = 24;
776
777 if (true == test_param->qos) {
778 SET_WHDR_QOS_EN(pkt, 1);
779 SET_WHDR_QOS_CTRL_STA_DATA_TID(pkt + qos_ofst,
780 test_param->tx_cap.tid);
781 SET_WHDR_QOS_CTRL_STA_DATA_EOSP(pkt + qos_ofst, 0);
782 SET_WHDR_QOS_CTRL_STA_DATA_ACK_POLICY(pkt + qos_ofst, 0);
783 SET_WHDR_QOS_CTRL_STA_DATA_AMSDU(pkt + qos_ofst, 0);
784 SET_WHDR_QOS_CTRL_STA_DATA_TXOP(pkt + qos_ofst, 0);
785 }
786 /* wlan payload */
787 payload_ofst = qos_ofst + WHDR_QOS_LENGTH;
788 _os_mem_cpy(drv_priv, pkt + payload_ofst, test_pattern,
789 sizeof(test_pattern));
790
791 debug_dump_data(pkt, size, "phl trx test pattern");
792 }
793
rtw_phl_test_rx_callback(void * context)794 void rtw_phl_test_rx_callback(void *context)
795 {
796 struct phl_info_t *phl_info = (struct phl_info_t *)context;
797 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
798 struct rtw_pool *rx_req_pool = &trx_test->rx_req_pool;
799 struct rtw_test_rx *rreq = NULL;
800 struct rtw_recv_pkt *recvpkt = NULL;
801 struct rtw_payload *tpkt = NULL;
802
803 while (!list_empty(&rx_req_pool->busy_list)) {
804 rreq = _phl_query_busy_rx_req(phl_info);
805 if (NULL == rreq) {
806 PHL_WARN("Get NULL rx request from busy queue\n");
807 break;
808 }
809 recvpkt = &rreq->rx;
810 PHL_INFO("Rx test_id = %d\n", rreq->test_id);
811 PHL_INFO("Rx mac id = %d\n", recvpkt->mdata.macid);
812 PHL_INFO("Rx tid = %d\n", recvpkt->mdata.tid);
813 debug_dump_data(recvpkt->mdata.mac_addr, 6, "Rx mac address: ");
814 debug_dump_data(recvpkt->pkt_list[0].vir_addr,
815 recvpkt->pkt_list[0].length, "Rx Pkt: ");
816
817 _phl_release_rx_req(phl_info, rreq);
818 tpkt = (struct rtw_payload *)rreq->tpkt;
819 _phl_release_tx_pkt(phl_info, tpkt);
820
821 }
822
823 }
824
825
rtw_phl_rx_reap(void * phl,u8 * xmit_req,struct rtw_trx_test_param * param)826 enum rtw_phl_status rtw_phl_rx_reap(void *phl, u8 *xmit_req,
827 struct rtw_trx_test_param *param)
828 {
829 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
830 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
831 struct rtw_xmit_req *treq = (struct rtw_xmit_req *)xmit_req;
832 struct rtw_payload *tpkt = NULL;
833 struct rtw_test_rx *rreq = NULL;
834 struct rtw_recv_pkt *recvpkt = NULL;
835 struct rtw_pkt_buf_list *pkt = NULL;
836 void *drv_priv = phl_to_drvpriv(phl_info);
837 u8 i = 0;
838
839 rreq = _phl_query_idle_rx_req(phl_info);
840
841 do {
842 if (NULL == rreq) {
843 PHL_ERR("query idle rx request fail!\n");
844 break;
845 }
846
847 recvpkt = &rreq->rx;
848 recvpkt->mdata.macid = treq->mdata.macid;
849 recvpkt->mdata.tid = treq->mdata.tid;
850 recvpkt->mdata.hal_port = treq->mdata.hal_port;
851 recvpkt->mdata.dma_ch = 0; /* normal rx ch */
852 _os_mem_cpy(drv_priv, recvpkt->mdata.mac_addr, param->sta_addr,
853 6);
854 recvpkt->mdata.seq = treq->mdata.sw_seq;
855 recvpkt->mdata.mc = treq->mdata.mc;
856 recvpkt->mdata.bc = treq->mdata.bc;
857
858 pkt = (struct rtw_pkt_buf_list *)treq->pkt_list;
859
860 tpkt = phl_container_of(pkt, struct rtw_payload, pkt);
861 for (i = 0; i < treq->pkt_cnt; i++) {
862 if (i >= MAX_RX_BUF_SEG_NUM) {
863 PHL_ERR("tx packet has too many segments\n");
864 break;
865 }
866
867 recvpkt->pkt_list[i].vir_addr = pkt->vir_addr;
868 recvpkt->pkt_list[i].phy_addr_l = pkt->phy_addr_l;
869 recvpkt->pkt_list[i].phy_addr_h = pkt->phy_addr_h;
870 recvpkt->pkt_list[i].length = pkt->length;
871
872 recvpkt->pkt_cnt++;
873
874 pkt++;
875 }
876
877 rreq->tpkt = (u8 *)tpkt;
878 rreq->test_id = tpkt->test_id;
879
880 _phl_insert_busy_rx_req(phl_info, rreq);
881 sts = RTW_PHL_STATUS_SUCCESS;
882
883
884 } while (false);
885
886 if (RTW_PHL_STATUS_SUCCESS == sts) {
887 _phl_release_tx_req(phl_info, treq);
888 }
889
890 return sts;
891 }
892
rtw_phl_test_rxq_notify(void * phl)893 enum rtw_phl_status rtw_phl_test_rxq_notify(void *phl)
894 {
895 enum rtw_phl_status pstatus = RTW_PHL_STATUS_FAILURE;
896 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
897
898 pstatus = phl_schedule_handler(phl_info->phl_com,
899 &phl_info->phl_tx_handler);
900
901 return pstatus;
902 }
903
904
phl_tx_ring_test(void * phl,struct rtw_trx_test_param * test_param)905 enum rtw_phl_status phl_tx_ring_test(void *phl,
906 struct rtw_trx_test_param *test_param)
907 {
908 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
909 void *drv_priv = phl_to_drvpriv(phl_info);
910 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
911 struct rtw_xmit_req *treq = NULL;
912 struct rtw_payload *tpkt = NULL;
913 u32 tx_cnt = 0;
914
915 FUNCIN_WSTS(sts);
916 do {
917 for (tx_cnt = 0; tx_cnt < test_param->tx_req_num; tx_cnt++) {
918 /* query tx request pool */
919 treq = _phl_query_idle_tx_req(phl_info);
920 if (NULL == treq) {
921 PHL_WARN("query idle tx request from pool fail\n");
922 break;
923 }
924
925 tpkt = _phl_query_idle_tx_pkt(phl_info);
926 if (NULL == tpkt) {
927 PHL_WARN("query idle tx packet from pool fail\n");
928 break;
929 }
930
931 /* fill meta_data*/
932 _os_mem_cpy(drv_priv, &treq->mdata, &test_param->tx_cap,
933 sizeof(struct rtw_t_meta_data));
934 /* fill tx request content */
935 if (test_param->tx_payload_size > MAX_TEST_PAYLOAD_SIZE)
936 tpkt->pkt.length = MAX_TEST_PAYLOAD_SIZE;
937 else
938 tpkt->pkt.length = (u16)test_param->tx_payload_size;
939
940 phl_test_fill_packet_content(phl_info, tpkt->pkt.vir_addr,
941 tpkt->pkt.length, test_param);
942 /* assign this tx pkt to tx request */
943 treq->os_priv = tpkt;
944 treq->pkt_cnt = 1;
945 treq->mdata.wdinfo_en = 1;
946 treq->total_len = (u16)tpkt->pkt.length;
947 treq->pkt_list = (u8 *)&tpkt->pkt;
948
949 /* add to phl_tx_ring */
950 sts = rtw_phl_add_tx_req(phl, treq);
951 if (RTW_PHL_STATUS_SUCCESS != sts) {
952 PHL_INFO("add new tx request (%d) to phl ring fail\n", tx_cnt);
953 _phl_release_tx_req(phl_info, treq);
954 _phl_release_tx_pkt(phl_info, tpkt);
955 break;
956 } else {
957 _phl_insert_busy_tx_req(phl_info, treq);
958 _phl_insert_busy_tx_pkt(phl_info, tpkt);
959 }
960 }
961 /* schedule tx process */
962 sts = rtw_phl_tx_req_notify(phl);
963 if (RTW_PHL_STATUS_SUCCESS != sts) {
964 PHL_INFO("add notify phl start tx process fail\n");
965 break;
966 }
967 } while (false);
968 /*
969 while (false == _phl_is_tx_test_done(phl) && i < 100) {
970 _os_delay_ms(drv_priv, 1);
971 i++;
972 }
973 */
974 FUNCOUT_WSTS(sts);
975 return sts;
976 }
977
phl_rx_ring_test(void * phl,struct rtw_trx_test_param * test_param)978 enum rtw_phl_status phl_rx_ring_test(void *phl,
979 struct rtw_trx_test_param *test_param)
980 {
981 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
982 FUNCIN_WSTS(sts);
983 do {
984 sts = rtw_phl_start_rx_process(phl);
985
986 if (RTW_PHL_STATUS_FAILURE == sts) {
987 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] phl_rx ring fail!\n");
988 break;
989 }
990 } while (false);
991 FUNCOUT_WSTS(sts);
992 return sts;
993 }
994
995
phl_hal_tx_test(void * phl,struct rtw_trx_test_param * test_param)996 enum rtw_phl_status phl_hal_tx_test(void *phl,
997 struct rtw_trx_test_param *test_param)
998 {
999 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1000 void *drv_priv = phl_to_drvpriv(phl_info);
1001 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1002 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1003 struct rtw_xmit_req *treq = NULL;
1004 struct rtw_payload *tpkt = NULL;
1005 FUNCIN_WSTS(sts);
1006 do {
1007 /* query tx request pool */
1008 treq = _phl_query_idle_tx_req(phl_info);
1009 if (NULL == treq) {
1010 PHL_WARN("query idle tx request from pool fail\n");
1011 break;
1012 }
1013
1014 tpkt = _phl_query_idle_tx_pkt(phl_info);
1015 if (NULL == tpkt) {
1016 PHL_WARN("query idle tx packet from pool fail\n");
1017 break;
1018 }
1019
1020 /* fill meta_data */
1021 _os_mem_cpy(drv_priv, &treq->mdata, &test_param->tx_cap,
1022 sizeof(struct rtw_t_meta_data));
1023 /* fill tx request content */
1024 if (test_param->tx_payload_size > MAX_TEST_PAYLOAD_SIZE)
1025 tpkt->pkt.length = MAX_TEST_PAYLOAD_SIZE;
1026 else
1027 tpkt->pkt.length = (u16)test_param->tx_payload_size;
1028
1029 phl_test_fill_packet_content(phl_info, tpkt->pkt.vir_addr,
1030 tpkt->pkt.length, test_param);
1031 /* assign this tx pkt to tx request */
1032 treq->os_priv = tpkt;
1033 treq->pkt_cnt = 1;
1034 treq->mdata.wdinfo_en = 1;
1035 treq->total_len = (u16)tpkt->pkt.length;
1036 treq->pkt_list = (u8 *)&tpkt->pkt;
1037 /* add to phl_tx_ring */
1038 sts = hci_trx_ops->prepare_tx(phl_info, treq);
1039 if (RTW_PHL_STATUS_SUCCESS != sts) {
1040 PHL_INFO("add new tx request to phl_prepare_tx_pcie fail\n");
1041 break;
1042 }
1043 /* schedule tx process */
1044 sts = hci_trx_ops->tx(phl_info);
1045 if (RTW_PHL_STATUS_SUCCESS != sts) {
1046 PHL_INFO("phl_tx_pcie fail\n");
1047 break;
1048 }
1049 } while (false);
1050
1051 _os_delay_ms(drv_priv, 100);
1052
1053 if (RTW_PHL_STATUS_SUCCESS != sts) {
1054 if (NULL != treq)
1055 _phl_release_tx_req(phl, treq);
1056 if (NULL != tpkt)
1057 _phl_release_tx_pkt(phl, tpkt);
1058 }
1059 FUNCOUT_WSTS(sts);
1060 return sts;
1061 }
1062
phl_hal_rx_test(void * phl,struct rtw_trx_test_param * test_param)1063 enum rtw_phl_status phl_hal_rx_test(void *phl,
1064 struct rtw_trx_test_param *test_param)
1065 {
1066 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1067 struct phl_hci_trx_ops *hci_trx_ops = phl_info->hci_trx_ops;
1068 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1069 FUNCIN_WSTS(sts);
1070 do {
1071
1072 /* add to phl_tx_ring */
1073 sts = hci_trx_ops->rx(phl_info);
1074
1075 if (RTW_PHL_STATUS_FAILURE == sts) {
1076 PHL_TRACE(COMP_PHL_DBG, _PHL_WARNING_, "[WARNING] phl_rx fail!\n");
1077 break;
1078 }
1079 } while (false);
1080 FUNCOUT_WSTS(sts);
1081 return sts;
1082 }
1083
1084
phl_trx_test_start(void * phl,struct rtw_trx_test_param * test_param)1085 enum rtw_phl_status phl_trx_test_start(void *phl,
1086 struct rtw_trx_test_param *test_param)
1087 {
1088 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
1089 FUNCIN_WSTS(status);
1090 switch (test_param->mode) {
1091 case TEST_MODE_PHL_TX_RING_TEST:
1092 status = phl_tx_ring_test(phl, test_param);
1093 break;
1094 case TEST_MODE_PHL_RX_RING_TEST:
1095 status = phl_rx_ring_test(phl, test_param);
1096 break;
1097 case TEST_MODE_HAL_TX_TEST:
1098 status = phl_hal_tx_test(phl, test_param);
1099 break;
1100 case TEST_MODE_HAL_RX_TEST:
1101 status = phl_hal_rx_test(phl, test_param);
1102 break;
1103 default:
1104 break;
1105
1106 }
1107 FUNCOUT_WSTS(status);
1108 return status;
1109 }
1110
phl_trx_test_dump_result(void * phl,struct rtw_trx_test_param * test_param)1111 void phl_trx_test_dump_result(void *phl, struct rtw_trx_test_param *test_param)
1112 {
1113 PHL_INFO("Test Done");
1114 }
1115
phl_recycle_test_tx(void * phl,struct rtw_xmit_req * treq)1116 enum rtw_phl_status phl_recycle_test_tx(void *phl, struct rtw_xmit_req *treq)
1117 {
1118 struct phl_info_t *phl_info = NULL;
1119 struct rtw_payload *tpkt = NULL;
1120 enum rtw_phl_status sts = RTW_PHL_STATUS_FAILURE;
1121
1122 FUNCIN_WSTS(sts);
1123 if (NULL == phl) {
1124 PHL_ERR("treq is NULL!\n");
1125 goto end;
1126 }
1127 phl_info = (struct phl_info_t *)phl;
1128
1129 if (NULL == treq) {
1130 PHL_ERR("treq is NULL!\n");
1131 goto end;
1132 }
1133 tpkt = (struct rtw_payload *)treq->os_priv;
1134
1135 if (NULL == tpkt) {
1136 PHL_ERR("tpkt is NULL!\n");
1137 goto end;
1138 }
1139 _phl_remove_busy_tx_req(phl_info, treq);
1140 _phl_release_tx_req(phl_info, treq);
1141
1142 _phl_remove_busy_tx_pkt(phl_info, tpkt);
1143 _phl_release_tx_pkt(phl_info, tpkt);
1144
1145 sts = RTW_PHL_STATUS_SUCCESS;
1146
1147 end:
1148 FUNCOUT_WSTS(sts);
1149 return sts;
1150 }
1151
_phl_rx_test_pattern(struct phl_info_t * phl_info,void * ptr)1152 void _phl_rx_test_pattern(struct phl_info_t *phl_info, void *ptr)
1153 {
1154 struct rtw_recv_pkt *rpkt = NULL;
1155
1156 FUNCIN();
1157
1158 if (NULL == ptr) {
1159 PHL_ERR("bp_info->ptr is NULL!\n");
1160 goto end;
1161 }
1162
1163 rpkt = (struct rtw_recv_pkt *)ptr;
1164 if (NULL == rpkt) {
1165 PHL_ERR("rpkt is NULL!\n");
1166 goto end;
1167 }
1168
1169 PHL_INFO("rpkt->buf_len = %d\n", rpkt->pkt_list[0].length);
1170 debug_dump_data(rpkt->pkt_list[0].vir_addr, (u16)rpkt->pkt_list[0].length, "dump_rx");
1171
1172 end:
1173 FUNCOUT();
1174 }
1175
rtw_phl_trx_default_param(void * phl,struct rtw_trx_test_param * test_param)1176 void rtw_phl_trx_default_param(void *phl, struct rtw_trx_test_param *test_param)
1177 {
1178 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1179 _os_mem_set(phl_to_drvpriv(phl_info), test_param, 0, sizeof(struct rtw_trx_test_param));
1180 test_param->mode = TEST_MODE_PHL_TX_RING_TEST;
1181 test_param->ap_mode = 0;
1182 test_param->pkt_type = TEST_PKT_TYPE_BC;
1183 test_param->tx_req_num = 1;
1184 test_param->rx_req_num = 1;
1185 test_param->tx_payload_num = 1;
1186 test_param->tx_payload_size = 100;
1187 test_param->trx_mode = 0;
1188 test_param->qta_mode = 0;
1189 test_param->cur_addr[0] = 0x00;
1190 test_param->cur_addr[1] = 0xE0;
1191 test_param->cur_addr[2] = 0x4c;
1192 test_param->cur_addr[3] = 0x88;
1193 test_param->cur_addr[4] = 0x52;
1194 test_param->cur_addr[5] = 0xaa;
1195 if (TEST_PKT_TYPE_BC == test_param->pkt_type || TEST_PKT_TYPE_MC == test_param->pkt_type) {
1196 test_param->sta_addr[0] = 0xFF;
1197 test_param->sta_addr[1] = 0xFF;
1198 test_param->sta_addr[2] = 0xFF;
1199 test_param->sta_addr[3] = 0xFF;
1200 test_param->sta_addr[4] = 0xFF;
1201 test_param->sta_addr[5] = 0xFF;
1202 test_param->bssid[0] = 0xFF;
1203 test_param->bssid[1] = 0xFF;
1204 test_param->bssid[2] = 0xFF;
1205 test_param->bssid[3] = 0xFF;
1206 test_param->bssid[4] = 0xFF;
1207 test_param->bssid[5] = 0xFF;
1208 test_param->tx_cap.bc = 1;
1209 test_param->qos = 0;
1210 } else if (TEST_PKT_TYPE_UNI == test_param->pkt_type){
1211 test_param->sta_addr[0] = 0x00;
1212 test_param->sta_addr[1] = 0xE0;
1213 test_param->sta_addr[2] = 0x4C;
1214 test_param->sta_addr[3] = 0x88;
1215 test_param->sta_addr[4] = 0x52;
1216 test_param->sta_addr[5] = 0xbb;
1217 test_param->bssid[0] = 0x00;
1218 test_param->bssid[1] = 0xE0;
1219 test_param->bssid[2] = 0x4C;
1220 test_param->bssid[3] = 0x88;
1221 test_param->bssid[4] = 0x52;
1222 test_param->bssid[5] = 0xbb;
1223 test_param->tx_cap.bc = 0;
1224 test_param->qos = 1;
1225 }
1226 test_param->tx_cap.macid= 0x00;
1227 test_param->tx_cap.tid = 0x03;
1228 test_param->tx_cap.dma_ch= 0x01;
1229 test_param->tx_cap.band= 0x0;
1230 test_param->tx_cap.userate_sel = 0x1;
1231 test_param->tx_cap.f_rate = 0x87;
1232 test_param->tx_cap.f_bw = 0;
1233 test_param->tx_cap.f_gi_ltf = 0;
1234 }
1235
rtw_phl_trx_testsuite(void * phl,struct rtw_trx_test_param * test_param)1236 enum rtw_phl_status rtw_phl_trx_testsuite(void *phl,
1237 struct rtw_trx_test_param *test_param)
1238 {
1239 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1240 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
1241 struct rtw_trx_test_param *param = &trx_test->test_param;
1242 enum rtw_phl_status status = RTW_PHL_STATUS_FAILURE;
1243 FUNCIN_WSTS(status);
1244 do {
1245 param->is_trx_test_end = false;
1246 phl_update_test_param(phl, test_param);
1247
1248 phl_test_hw_config_init(phl, test_param->mode);
1249
1250 status = phl_trx_test_start(phl, test_param);
1251 if (RTW_PHL_STATUS_SUCCESS != status) {
1252 PHL_ERR("The phl trx test failure\n");
1253
1254 break;
1255 }
1256
1257 phl_trx_test_dump_result(phl, test_param);
1258
1259 param->is_trx_test_end = true;
1260 } while (false);
1261 FUNCOUT_WSTS(status);
1262 return status;
1263 }
1264
1265
trx_test_bp_handler(void * priv,struct test_bp_info * bp_info)1266 u8 trx_test_bp_handler(void *priv, struct test_bp_info* bp_info)
1267 {
1268 u8 ret = BP_RET_SKIP_SECTION;
1269
1270 return ret;
1271 }
1272
1273
trx_test_is_test_end(void * priv)1274 u8 trx_test_is_test_end(void *priv)
1275 {
1276 struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1277 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
1278 struct rtw_trx_test_param *test_param = &trx_test->test_param;
1279 FUNCIN();
1280 FUNCOUT();
1281 return (test_param->is_trx_test_end == true) ? (true) : (false);
1282 }
trx_test_is_test_pass(void * priv)1283 u8 trx_test_is_test_pass(void *priv)
1284 {
1285 FUNCIN();
1286 FUNCOUT();
1287 return true;
1288 }
1289
trx_test_get_fail_rsn(void * priv,char * rsn,u32 max_len)1290 u8 trx_test_get_fail_rsn(void *priv, char* rsn, u32 max_len)
1291 {
1292 //struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1293 FUNCIN();
1294 FUNCOUT();
1295 return true;
1296
1297 }
1298
trx_test_start_test(void * priv)1299 u8 trx_test_start_test(void *priv)
1300 {
1301 //struct phl_info_t *phl_info = (struct phl_info_t *)priv;
1302 FUNCIN();
1303 FUNCOUT();
1304 return true;
1305 }
1306
1307
1308 void
phl_add_trx_test_obj(void * phl)1309 phl_add_trx_test_obj(void *phl)
1310 {
1311 struct phl_info_t *phl_info = (struct phl_info_t *)phl;
1312 struct phl_trx_test *trx_test = (struct phl_trx_test *)phl_info->trx_test;
1313 struct test_obj_ctrl_interface *trx_test_obj = &trx_test->trx_test_obj;
1314 FUNCIN();
1315 trx_test_obj->bp_handler = trx_test_bp_handler;
1316 trx_test_obj->get_fail_rsn = trx_test_get_fail_rsn;
1317 trx_test_obj->is_test_end = trx_test_is_test_end;
1318 trx_test_obj->is_test_pass = trx_test_is_test_pass;
1319 trx_test_obj->start_test = trx_test_start_test;
1320 rtw_phl_test_add_new_test_obj(phl_info->phl_com,
1321 "tx_test",
1322 phl_info,
1323 TEST_LVL_HIGH,
1324 trx_test_obj,
1325 -1,
1326 TEST_SUB_MODULE_TRX,
1327 UNIT_TEST_MODE);
1328 FUNCOUT();
1329 }
1330
1331 #endif /* #ifdef CONFIG_PHL_TEST_SUITE */
1332