1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _RTW_XMIT_C_
16
17 #include <drv_types.h>
18
19 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
20 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
21
_init_txservq(struct tx_servq * ptxservq)22 static void _init_txservq(struct tx_servq *ptxservq)
23 {
24 _rtw_init_listhead(&ptxservq->tx_pending);
25 _rtw_init_queue(&ptxservq->sta_pending);
26 ptxservq->qcnt = 0;
27 }
28
29
_rtw_init_sta_xmit_priv(struct sta_xmit_priv * psta_xmitpriv)30 void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
31 {
32
33
34 _rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
35
36 _rtw_spinlock_init(&psta_xmitpriv->lock);
37
38 /* for(i = 0 ; i < MAX_NUMBLKS; i++) */
39 /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
40
41 _init_txservq(&psta_xmitpriv->be_q);
42 _init_txservq(&psta_xmitpriv->bk_q);
43 _init_txservq(&psta_xmitpriv->vi_q);
44 _init_txservq(&psta_xmitpriv->vo_q);
45 _rtw_init_listhead(&psta_xmitpriv->legacy_dz);
46 _rtw_init_listhead(&psta_xmitpriv->apsd);
47
48
49 }
50
rtw_init_xmit_block(_adapter * padapter)51 void rtw_init_xmit_block(_adapter *padapter)
52 {
53 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
54
55 _rtw_spinlock_init(&dvobj->xmit_block_lock);
56 dvobj->xmit_block = XMIT_BLOCK_NONE;
57
58 }
rtw_free_xmit_block(_adapter * padapter)59 void rtw_free_xmit_block(_adapter *padapter)
60 {
61 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
62
63 _rtw_spinlock_free(&dvobj->xmit_block_lock);
64 }
65
66 #ifdef RTW_PHL_TX
alloc_txring(_adapter * padapter)67 u8 alloc_txring(_adapter *padapter)
68 {
69 struct xmit_txreq_buf *ptxreq_buf = NULL;
70 struct rtw_xmit_req *txreq = NULL;
71 u32 idx, alloc_sz = 0, alloc_sz_txreq = 0;
72 u8 res = _SUCCESS;
73
74 u32 offset_head = (sizeof(struct rtw_xmit_req) * RTW_MAX_FRAG_NUM);
75 u32 offset_tail = offset_head + (SZ_HEAD_BUF * RTW_MAX_FRAG_NUM);
76 u32 offset_list = offset_tail + (SZ_TAIL_BUF * RTW_MAX_FRAG_NUM);
77
78 PHLTX_ENTER;
79
80 alloc_sz = (SZ_TX_RING * RTW_MAX_FRAG_NUM);
81 alloc_sz_txreq = MAX_TX_RING_NUM * (sizeof(struct xmit_txreq_buf));
82
83 RTW_INFO("eric-tx [%s] alloc_sz = %d, alloc_sz_txreq = %d\n", __FUNCTION__, alloc_sz, alloc_sz_txreq);
84
85 padapter->pxmit_txreq_buf = rtw_vmalloc(alloc_sz_txreq);
86 ptxreq_buf = (struct xmit_txreq_buf *)padapter->pxmit_txreq_buf;
87
88 _rtw_init_queue(&padapter->free_txreq_queue);
89
90 for (idx = 0; idx < MAX_TX_RING_NUM; idx++) {
91
92 padapter->tx_pool_ring[idx] = rtw_zmalloc(alloc_sz);
93 if (!padapter->tx_pool_ring[idx]) {
94 RTW_ERR("[core] alloc txring fail, plz check.\n");
95 res = _FAIL;
96 break;
97 }
98 _rtw_init_listhead(&ptxreq_buf->list);
99 ptxreq_buf->txreq = padapter->tx_pool_ring[idx];
100 ptxreq_buf->head = padapter->tx_pool_ring[idx] + offset_head;
101 ptxreq_buf->tail = padapter->tx_pool_ring[idx] + offset_tail;
102 ptxreq_buf->pkt_list = padapter->tx_pool_ring[idx] + offset_list;
103
104 txreq = (struct rtw_xmit_req *)ptxreq_buf->txreq;
105 txreq->cache = VIRTUAL_ADDR;
106
107 #ifdef USE_PREV_WLHDR_BUF /* CONFIG_CORE_TXSC */
108 ptxreq_buf->macid = 0xff;
109 ptxreq_buf->txsc_id = 0xff;
110 #endif
111
112 rtw_list_insert_tail(&(ptxreq_buf->list), &(padapter->free_txreq_queue.queue));
113
114 ptxreq_buf++;
115 }
116
117 padapter->free_txreq_cnt = MAX_TX_RING_NUM;
118
119 return res;
120 }
121
free_txring(_adapter * padapter)122 void free_txring(_adapter *padapter)
123 {
124 u32 idx, alloc_sz = 0, alloc_sz_txreq = 0;
125 #ifdef CONFIG_CORE_TXSC
126 struct rtw_xmit_req *txreq = NULL;
127 struct xmit_txreq_buf *txreq_buf = NULL;
128 u8 j;
129 #endif
130
131 PHLTX_ENTER;
132
133 alloc_sz = (SZ_TX_RING * RTW_MAX_FRAG_NUM);
134 alloc_sz_txreq = MAX_TX_RING_NUM * (sizeof(struct xmit_txreq_buf));
135
136 RTW_INFO("eric-tx [%s] alloc_sz = %d, alloc_sz_txreq = %d\n", __func__, alloc_sz, alloc_sz_txreq);
137
138 for (idx = 0; idx < MAX_TX_RING_NUM; idx++) {
139 if (padapter->tx_pool_ring[idx]) {
140 #ifdef CONFIG_CORE_TXSC
141 txreq = (struct rtw_xmit_req *)padapter->tx_pool_ring[idx];
142 if (txreq->treq_type == RTW_PHL_TREQ_TYPE_CORE_TXSC) {
143 txreq_buf = (struct xmit_txreq_buf *)txreq->os_priv;
144 if (txreq_buf) {
145 /* CONFGI_TXSC_AMSDU */
146 for (j = 0; j < txreq_buf->pkt_cnt; j++) {
147 if (txreq_buf->pkt[j])
148 rtw_os_pkt_complete(padapter, (void *)txreq_buf->pkt[j]);
149 }
150 }
151 }
152 #endif
153 rtw_mfree(padapter->tx_pool_ring[idx], alloc_sz);
154 }
155 }
156
157 _rtw_spinlock_free(&padapter->free_txreq_queue.lock);
158 rtw_vmfree(padapter->pxmit_txreq_buf, alloc_sz_txreq);
159 }
160
161 #endif
162
163
_rtw_init_xmit_priv(struct xmit_priv * pxmitpriv,_adapter * padapter)164 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, _adapter *padapter)
165 {
166 int i;
167 #if 0 /*CONFIG_CORE_XMITBUF*/
168 struct xmit_buf *pxmitbuf;
169 #endif
170 struct xmit_frame *pxframe;
171 sint res = _SUCCESS;
172 /* MGT_TXREQ_MGT */
173 u8 *txreq = NULL, *pkt_list = NULL;
174
175 #if 0 /*CONFIG_CORE_XMITBUF*/
176 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
177
178 u8 xmitbuf_nr = GET_HAL_XMITBUF_NR(dvobj);
179 u16 xmitbuf_sz = GET_HAL_XMITBUF_SZ(dvobj);
180
181 u8 xmitbuf_ext_nr = GET_HAL_XMITBUF_EXT_NR(dvobj);
182 u16 xmitbuf_ext_sz = GET_HAL_XMITBUF_EXT_SZ(dvobj);
183 #endif
184
185 /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
186 /* _rtw_memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
187
188 _rtw_spinlock_init(&pxmitpriv->lock);
189 _rtw_spinlock_init(&pxmitpriv->lock_sctx);
190 #if 0 /*def CONFIG_XMIT_THREAD_MODE*/
191 _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
192 #endif
193
194 /*
195 Please insert all the queue initializaiton using _rtw_init_queue below
196 */
197
198 pxmitpriv->adapter = padapter;
199
200 /* for(i = 0 ; i < MAX_NUMBLKS; i++) */
201 /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
202
203 _rtw_init_queue(&pxmitpriv->be_pending);
204 _rtw_init_queue(&pxmitpriv->bk_pending);
205 _rtw_init_queue(&pxmitpriv->vi_pending);
206 _rtw_init_queue(&pxmitpriv->vo_pending);
207 _rtw_init_queue(&pxmitpriv->bm_pending);
208
209 /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
210 /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
211
212 _rtw_init_queue(&pxmitpriv->free_xmit_queue);
213
214 /*
215 Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
216 and initialize free_xmit_frame below.
217 Please also apply free_txobj to link_up all the xmit_frames...
218 */
219
220 pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
221
222 if (pxmitpriv->pallocated_frame_buf == NULL) {
223 pxmitpriv->pxmit_frame_buf = NULL;
224 res = _FAIL;
225 goto exit;
226 }
227 pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
228 /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
229 /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
230
231 pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
232
233 for (i = 0; i < NR_XMITFRAME; i++) {
234 _rtw_init_listhead(&(pxframe->list));
235
236 pxframe->padapter = padapter;
237 pxframe->frame_tag = NULL_FRAMETAG;
238
239 pxframe->pkt = NULL;
240
241 #if 0 /*CONFIG_CORE_XMITBUF*/
242 pxframe->buf_addr = NULL;
243 pxframe->pxmitbuf = NULL;
244 #else
245 /*alloc buf_addr*/
246 /*rtw_os_xmit_resource_alloc(padapter, pxframe);*/
247 #endif
248
249 rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
250
251 pxframe++;
252 }
253
254 pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
255
256 pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
257
258 #if 0 /*CONFIG_CORE_XMITBUF*/
259 /* init xmit_buf */
260 _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
261 _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
262
263 pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(xmitbuf_nr * sizeof(struct xmit_buf) + 4);
264
265 if (pxmitpriv->pallocated_xmitbuf == NULL) {
266 res = _FAIL;
267 goto exit;
268 }
269
270 pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
271 /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
272 /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
273
274 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
275
276 for (i = 0; i < xmitbuf_nr; i++) {
277 _rtw_init_listhead(&pxmitbuf->list);
278
279 pxmitbuf->priv_data = NULL;
280 pxmitbuf->padapter = padapter;
281 pxmitbuf->buf_tag = XMITBUF_DATA;
282
283 /* Tx buf allocation may fail sometimes, so sleep and retry. */
284 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
285 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
286 if (res == _FAIL) {
287 rtw_msleep_os(10);
288 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
289 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
290 if (res == _FAIL)
291 goto exit;
292 }
293
294 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
295 pxmitbuf->phead = pxmitbuf->pbuf;
296 pxmitbuf->pend = pxmitbuf->pbuf + xmitbuf_sz;
297 pxmitbuf->len = 0;
298 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
299 #endif
300
301 pxmitbuf->flags = XMIT_VO_QUEUE;
302
303 rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
304 #ifdef DBG_XMIT_BUF
305 pxmitbuf->no = i;
306 #endif
307
308 pxmitbuf++;
309
310 }
311
312 pxmitpriv->free_xmitbuf_cnt = xmitbuf_nr;
313 #endif
314 /* init xframe_ext queue, the same count as extbuf */
315 _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue);
316 #ifdef CONFIG_LAYER2_ROAMING
317 _rtw_init_queue(&pxmitpriv->rpkt_queue);
318 #endif
319
320 pxmitpriv->xframe_ext_alloc_addr = rtw_zvmalloc(NR_XMITFRAME_EXT * sizeof(struct xmit_frame) + 4);
321
322 if (pxmitpriv->xframe_ext_alloc_addr == NULL) {
323 pxmitpriv->xframe_ext = NULL;
324 res = _FAIL;
325 goto exit;
326 }
327 pxmitpriv->xframe_ext = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->xframe_ext_alloc_addr), 4);
328 pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
329
330 /* MGT_TXREQ_QMGT */
331 pxmitpriv->xframe_ext_txreq_alloc_addr = rtw_zmalloc(NR_XMITFRAME_EXT * SZ_MGT_RING);
332 if (pxmitpriv->xframe_ext_txreq_alloc_addr == NULL) {
333 pxmitpriv->xframe_ext_txreq = NULL;
334 res = _FAIL;
335 goto exit;
336 }
337 pxmitpriv->xframe_ext_txreq = pxmitpriv->xframe_ext_txreq_alloc_addr;
338 txreq = pxmitpriv->xframe_ext_txreq;
339 pkt_list = pxmitpriv->xframe_ext_txreq + sizeof(struct rtw_xmit_req);
340
341 for (i = 0; i < NR_XMITFRAME_EXT; i++) {
342 _rtw_init_listhead(&(pxframe->list));
343
344 pxframe->padapter = padapter;
345 pxframe->frame_tag = NULL_FRAMETAG;
346
347 pxframe->pkt = NULL;
348
349 #if 0 /*CONFIG_CORE_XMITBUF*/
350 pxframe->buf_addr = NULL;
351 pxframe->pxmitbuf = NULL;
352 #else
353 /*alloc buf_addr*/
354 rtw_os_xmit_resource_alloc(padapter, pxframe);
355 #endif
356
357 pxframe->ext_tag = 1;
358
359 /* MGT_TXREQ_QMGT */
360 pxframe->phl_txreq = (struct rtw_xmit_req *)txreq;
361 pxframe->phl_txreq->pkt_list = pkt_list;
362 pxframe->phl_txreq->cache = VIRTUAL_ADDR;
363
364 rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
365
366 pxframe++;
367 /* MGT_TXREQ_QMGT */
368 txreq += SZ_MGT_RING;
369 pkt_list += SZ_MGT_RING;
370 }
371 pxmitpriv->free_xframe_ext_cnt = NR_XMITFRAME_EXT;
372
373 #if 0 /*CONFIG_CORE_XMITBUF*/
374 /* Init xmit extension buff */
375 _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
376
377 pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(xmitbuf_ext_nr * sizeof(struct xmit_buf) + 4);
378
379 if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
380 res = _FAIL;
381 goto exit;
382 }
383
384 pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmit_extbuf), 4);
385
386 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
387
388 for (i = 0; i < xmitbuf_ext_nr; i++) {
389 _rtw_init_listhead(&pxmitbuf->list);
390
391 pxmitbuf->priv_data = NULL;
392 pxmitbuf->padapter = padapter;
393 pxmitbuf->buf_tag = XMITBUF_MGNT;
394
395 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
396 xmitbuf_ext_sz + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
397 if (res == _FAIL) {
398 res = _FAIL;
399 goto exit;
400 }
401
402 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
403 pxmitbuf->phead = pxmitbuf->pbuf;
404 pxmitbuf->pend = pxmitbuf->pbuf + xmitbuf_ext_sz;
405 pxmitbuf->len = 0;
406 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
407 #endif
408
409 rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
410 #ifdef DBG_XMIT_BUF_EXT
411 pxmitbuf->no = i;
412 #endif
413 pxmitbuf++;
414
415 }
416
417 pxmitpriv->free_xmit_extbuf_cnt = xmitbuf_ext_nr;
418
419 /*GEORGIA_TODO_FIXIT_IC_GEN_DEPENDENCE*/
420 for (i = 0; i < CMDBUF_MAX; i++) {
421 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
422 if (pxmitbuf) {
423 _rtw_init_listhead(&pxmitbuf->list);
424
425 pxmitbuf->priv_data = NULL;
426 pxmitbuf->padapter = padapter;
427 pxmitbuf->buf_tag = XMITBUF_CMD;
428
429 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
430 MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
431 if (res == _FAIL) {
432 res = _FAIL;
433 goto exit;
434 }
435
436 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
437 pxmitbuf->phead = pxmitbuf->pbuf;
438 pxmitbuf->pend = pxmitbuf->pbuf + MAX_CMDBUF_SZ;
439 pxmitbuf->len = 0;
440 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
441 #endif
442 pxmitbuf->alloc_sz = MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT;
443 }
444 }
445 #endif
446 rtw_alloc_hwxmits(padapter);
447 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
448
449 for (i = 0; i < 4; i++)
450 pxmitpriv->wmm_para_seq[i] = i;
451
452 #ifdef CONFIG_USB_HCI
453 pxmitpriv->txirp_cnt = 1;
454
455 _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
456
457 /* per AC pending irp */
458 pxmitpriv->beq_cnt = 0;
459 pxmitpriv->bkq_cnt = 0;
460 pxmitpriv->viq_cnt = 0;
461 pxmitpriv->voq_cnt = 0;
462 #endif
463
464
465 #ifdef CONFIG_XMIT_ACK
466 pxmitpriv->ack_tx = _FALSE;
467 _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
468 rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
469 #endif
470
471 #ifdef CONFIG_TX_AMSDU
472 rtw_init_timer(&(pxmitpriv->amsdu_vo_timer),
473 rtw_amsdu_vo_timeout_handler, padapter);
474 pxmitpriv->amsdu_vo_timeout = RTW_AMSDU_TIMER_UNSET;
475
476 rtw_init_timer(&(pxmitpriv->amsdu_vi_timer),
477 rtw_amsdu_vi_timeout_handler, padapter);
478 pxmitpriv->amsdu_vi_timeout = RTW_AMSDU_TIMER_UNSET;
479
480 rtw_init_timer(&(pxmitpriv->amsdu_be_timer),
481 rtw_amsdu_be_timeout_handler, padapter);
482 pxmitpriv->amsdu_be_timeout = RTW_AMSDU_TIMER_UNSET;
483
484 rtw_init_timer(&(pxmitpriv->amsdu_bk_timer),
485 rtw_amsdu_bk_timeout_handler, padapter);
486 pxmitpriv->amsdu_bk_timeout = RTW_AMSDU_TIMER_UNSET;
487
488 pxmitpriv->amsdu_debug_set_timer = 0;
489 pxmitpriv->amsdu_debug_timeout = 0;
490 pxmitpriv->amsdu_debug_tasklet = 0;
491 pxmitpriv->amsdu_debug_enqueue = 0;
492 pxmitpriv->amsdu_debug_dequeue = 0;
493 for (i = 0; i < AMSDU_DEBUG_MAX_COUNT; i++)
494 pxmitpriv->amsdu_debug_coalesce[i] = 0;
495 #endif
496 #ifdef DBG_TXBD_DESC_DUMP
497 pxmitpriv->dump_txbd_desc = 0;
498 #endif
499 rtw_init_xmit_block(padapter);
500 rtw_intf_init_xmit_priv(padapter);
501
502 #ifdef RTW_PHL_TX //alloc xmit resource
503 printk("eric-tx CALL alloc_txring !!!!\n");
504 if (alloc_txring(padapter) == _FAIL) {
505 RTW_ERR("[core] alloc_txring fail !!!\n");
506 res = _FAIL;
507 goto exit;
508 }
509 #endif
510
511 #if defined(CONFIG_CORE_TXSC)
512 _rtw_spinlock_init(&pxmitpriv->txsc_lock);
513 #endif
514
515 exit:
516
517 return res;
518 }
519
rtw_mfree_xmit_priv_lock(struct xmit_priv * pxmitpriv)520 void rtw_mfree_xmit_priv_lock(struct xmit_priv *pxmitpriv)
521 {
522 _rtw_spinlock_free(&pxmitpriv->lock);
523 #if 0 /*def CONFIG_XMIT_THREAD_MODE*/
524 _rtw_free_sema(&pxmitpriv->xmit_sema);
525 #endif
526
527 _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
528 _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
529 _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
530 _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
531 _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
532
533 /* _rtw_spinlock_free(&pxmitpriv->legacy_dz_queue.lock); */
534 /* _rtw_spinlock_free(&pxmitpriv->apsd_queue.lock); */
535
536 _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
537 #if 0 /*CONFIG_CORE_XMITBUF*/
538 _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
539 _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
540 #endif
541 }
542
543
_rtw_free_xmit_priv(struct xmit_priv * pxmitpriv)544 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
545 {
546 int i;
547 _adapter *padapter = pxmitpriv->adapter;
548 struct xmit_frame *pxmitframe;
549 #if 0 /*CONFIG_CORE_XMITBUF*/
550 struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
551 #endif
552
553 #if 0 /*CONFIG_CORE_XMITBUF*/
554 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
555 u8 xmitbuf_nr = GET_HAL_XMITBUF_NR(dvobj);
556 u16 xmitbuf_sz = GET_HAL_XMITBUF_SZ(dvobj);
557
558 u8 xmitbuf_ext_nr = GET_HAL_XMITBUF_EXT_NR(dvobj);
559 u16 xmitbuf_ext_sz = GET_HAL_XMITBUF_EXT_SZ(dvobj);
560 #endif
561
562 rtw_intf_free_xmit_priv(padapter);
563
564 rtw_mfree_xmit_priv_lock(pxmitpriv);
565
566 if (pxmitpriv->pxmit_frame_buf == NULL)
567 goto out;
568
569 pxmitframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
570
571 for (i = 0; i < NR_XMITFRAME; i++) {
572 rtw_os_xmit_complete(padapter, pxmitframe);
573 /*alloc buf_addr*/
574 /*rtw_os_xmit_resource_free(padapter, pxmitframe);*/
575 pxmitframe++;
576 }
577
578 #if 0 /*CONFIG_CORE_XMITBUF*/
579 for (i = 0; i < xmitbuf_nr; i++) {
580 rtw_os_xmit_resource_free(padapter, pxmitbuf,
581 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
582
583 pxmitbuf++;
584 }
585 #endif
586 if (pxmitpriv->pallocated_frame_buf)
587 rtw_vmfree(pxmitpriv->pallocated_frame_buf,
588 NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
589
590 #if 0 /*CONFIG_CORE_XMITBUF*/
591 if (pxmitpriv->pallocated_xmitbuf)
592 rtw_vmfree(pxmitpriv->pallocated_xmitbuf,
593 xmitbuf_nr * sizeof(struct xmit_buf) + 4);
594 #endif
595
596 /* free xframe_ext queue, the same count as extbuf */
597 if (pxmitpriv->xframe_ext == NULL)
598 goto out;
599
600 pxmitframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
601 for (i = 0; i < NR_XMITFRAME_EXT; i++) {
602 rtw_os_xmit_complete(padapter, pxmitframe);
603 /*free buf_addr*/
604 rtw_os_xmit_resource_free(padapter, pxmitframe);
605 pxmitframe++;
606 }
607
608 if (pxmitpriv->xframe_ext_alloc_addr)
609 rtw_vmfree(pxmitpriv->xframe_ext_alloc_addr,
610 NR_XMITFRAME_EXT * sizeof(struct xmit_frame) + 4);
611 _rtw_spinlock_free(&pxmitpriv->free_xframe_ext_queue.lock);
612
613 if (pxmitpriv->xframe_ext_txreq_alloc_addr)
614 rtw_mfree(pxmitpriv->xframe_ext_txreq_alloc_addr, NR_XMITFRAME_EXT * SZ_MGT_RING);
615
616 #if 0 /*CONFIG_CORE_XMITBUF*/
617
618 /* free xmit extension buff */
619 _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
620
621 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
622 for (i = 0; i < xmitbuf_ext_nr; i++) {
623 rtw_os_xmit_resource_free(padapter, pxmitbuf,
624 (xmitbuf_ext_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
625
626 pxmitbuf++;
627 }
628
629 if (pxmitpriv->pallocated_xmit_extbuf)
630 rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf,
631 xmitbuf_ext_nr * sizeof(struct xmit_buf) + 4);
632
633 for (i = 0; i < CMDBUF_MAX; i++) {
634 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
635 if (pxmitbuf != NULL)
636 rtw_os_xmit_resource_free(padapter, pxmitbuf, MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
637 }
638 #endif
639 rtw_free_hwxmits(padapter);
640
641 #ifdef CONFIG_LAYER2_ROAMING
642 _rtw_spinlock_free(&pxmitpriv->rpkt_queue.lock);
643 #endif
644
645 #ifdef RTW_PHL_TX
646 free_txring(padapter);
647 #endif
648
649 #ifdef CONFIG_CORE_TXSC
650 txsc_clear(padapter);
651 _rtw_spinlock_free(&pxmitpriv->txsc_lock);
652 #endif
653
654 #ifdef CONFIG_XMIT_ACK
655 _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
656 #endif
657 rtw_free_xmit_block(padapter);
658 out:
659 return;
660 }
661
rtw_init_lite_xmit_resource(struct dvobj_priv * dvobj)662 u8 rtw_init_lite_xmit_resource(struct dvobj_priv *dvobj)
663 {
664
665 u8 ret = _SUCCESS;
666 /*YiWei_todo need use correct litexmitbuf_nr urb_nr*/
667 u32 litexmitbuf_nr = RTW_LITEXMITBUF_NR;
668 u32 litexmitbuf_ext_nr = RTW_LITEXMITBUF_NR;
669 struct lite_data_buf *litexmitbuf;
670 struct trx_data_buf_q *litexmitbuf_q = &dvobj->litexmitbuf_q;
671 struct trx_data_buf_q *litexmit_extbuf_q = &dvobj->litexmit_extbuf_q;
672 int i;
673 #ifdef CONFIG_USB_HCI
674 struct trx_urb_buf_q *xmit_urb_q = &dvobj->xmit_urb_q;
675 struct data_urb *xmiturb;
676 u32 urb_nr = RTW_XMITURB_NR;
677 #endif
678
679 /* init lite_xmit_buf */
680 _rtw_init_queue(&litexmitbuf_q->free_data_buf_queue);
681
682 litexmitbuf_q->alloc_data_buf =
683 rtw_zvmalloc(litexmitbuf_nr * sizeof(struct lite_data_buf) + 4);
684
685 if (litexmitbuf_q->alloc_data_buf == NULL) {
686 ret = _FAIL;
687 goto exit;
688 }
689
690 litexmitbuf_q->data_buf =
691 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(litexmitbuf_q->alloc_data_buf), 4);
692
693 litexmitbuf = (struct lite_data_buf *)litexmitbuf_q->data_buf;
694
695 for (i = 0; i < litexmitbuf_nr; i++) {
696 _rtw_init_listhead(&litexmitbuf->list);
697 rtw_list_insert_tail(&litexmitbuf->list,
698 &(litexmitbuf_q->free_data_buf_queue.queue));
699 litexmitbuf++;
700 }
701 litexmitbuf_q->free_data_buf_cnt = litexmitbuf_nr;
702
703
704 /* Init lite xmit extension buff */
705 _rtw_init_queue(&litexmit_extbuf_q->free_data_buf_queue);
706
707 litexmit_extbuf_q->alloc_data_buf =
708 rtw_zvmalloc(litexmitbuf_ext_nr * sizeof(struct lite_data_buf) + 4);
709
710 if (litexmit_extbuf_q->alloc_data_buf == NULL) {
711 ret = _FAIL;
712 goto exit;
713 }
714
715 litexmit_extbuf_q->data_buf =
716 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(litexmit_extbuf_q->alloc_data_buf), 4);
717
718 litexmitbuf = (struct lite_data_buf *)litexmit_extbuf_q->data_buf;
719
720 for (i = 0; i < litexmitbuf_ext_nr; i++) {
721 _rtw_init_listhead(&litexmitbuf->list);
722 rtw_list_insert_tail(&litexmitbuf->list,
723 &(litexmit_extbuf_q->free_data_buf_queue.queue));
724 litexmitbuf++;
725 }
726 litexmit_extbuf_q->free_data_buf_cnt = litexmitbuf_ext_nr;
727
728 #ifdef CONFIG_USB_HCI
729 /* init xmit_urb */
730 _rtw_init_queue(&xmit_urb_q->free_urb_buf_queue);
731 xmit_urb_q->alloc_urb_buf =
732 rtw_zvmalloc(urb_nr * sizeof(struct data_urb) + 4);
733 if (xmit_urb_q->alloc_urb_buf == NULL) {
734 ret = _FAIL;
735 goto exit;
736 }
737
738 xmit_urb_q->urb_buf =
739 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(xmit_urb_q->alloc_urb_buf), 4);
740
741 xmiturb = (struct data_urb *)xmit_urb_q->urb_buf;
742 for (i = 0; i < urb_nr; i++) {
743 _rtw_init_listhead(&xmiturb->list);
744 ret = rtw_os_urb_resource_alloc(xmiturb);
745 rtw_list_insert_tail(&xmiturb->list,
746 &(xmit_urb_q->free_urb_buf_queue.queue));
747 xmiturb++;
748 }
749 xmit_urb_q->free_urb_buf_cnt = urb_nr;
750 #endif
751
752 exit:
753 return ret;
754 }
755
rtw_free_lite_xmit_resource(struct dvobj_priv * dvobj)756 void rtw_free_lite_xmit_resource(struct dvobj_priv *dvobj)
757 {
758 u8 ret = _SUCCESS;
759 /*YiWei_todo need use correct litexmitbuf_nr urb_nr*/
760 u32 litexmitbuf_nr = RTW_LITEXMITBUF_NR;
761 u32 litexmitbuf_ext_nr = RTW_LITEXMITBUF_NR;
762 struct trx_data_buf_q *litexmitbuf_q = &dvobj->litexmitbuf_q;
763 struct trx_data_buf_q *litexmit_extbuf_q = &dvobj->litexmit_extbuf_q;
764 #ifdef CONFIG_USB_HCI
765 struct data_urb *xmiturb;
766 struct trx_urb_buf_q *xmit_urb_q = &dvobj->xmit_urb_q;
767 u32 urb_nr = RTW_XMITURB_NR;
768 int i;
769 #endif
770
771 if (litexmitbuf_q->alloc_data_buf)
772 rtw_vmfree(litexmitbuf_q->alloc_data_buf,
773 litexmitbuf_nr * sizeof(struct lite_data_buf) + 4);
774
775 if (litexmit_extbuf_q->alloc_data_buf)
776 rtw_vmfree(litexmit_extbuf_q->alloc_data_buf,
777 litexmitbuf_ext_nr * sizeof(struct lite_data_buf) + 4);
778
779 #ifdef CONFIG_USB_HCI
780 xmiturb = (struct data_urb *)xmit_urb_q->urb_buf;
781 for (i = 0; i < urb_nr; i++) {
782 rtw_os_urb_resource_free(xmiturb);
783 xmiturb++;
784 }
785
786 if (xmit_urb_q->alloc_urb_buf)
787 rtw_vmfree(xmit_urb_q->alloc_urb_buf,
788 urb_nr * sizeof(struct data_urb) + 4);
789 #endif
790
791 }
792
793
rtw_get_tx_bw_mode(_adapter * adapter,struct sta_info * sta)794 u8 rtw_get_tx_bw_mode(_adapter *adapter, struct sta_info *sta)
795 {
796 u8 bw;
797
798 bw = sta->phl_sta->chandef.bw;
799 if (MLME_STATE(adapter) & WIFI_ASOC_STATE) {
800 if (adapter->mlmeextpriv.chandef.chan <= 14)
801 bw = rtw_min(bw, ADAPTER_TX_BW_2G(adapter));
802 else
803 bw = rtw_min(bw, ADAPTER_TX_BW_5G(adapter));
804 }
805
806 return bw;
807 }
808
rtw_get_adapter_tx_rate_bmp_by_bw(_adapter * adapter,u8 bw,u16 * r_bmp_cck_ofdm,u32 * r_bmp_ht,u64 * r_bmp_vht)809 void rtw_get_adapter_tx_rate_bmp_by_bw(_adapter *adapter, u8 bw, u16 *r_bmp_cck_ofdm, u32 *r_bmp_ht, u64 *r_bmp_vht)
810 {
811 /* ToDo */
812 #if 0
813 struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
814 struct macid_ctl_t *macid_ctl = dvobj_to_macidctl(dvobj);
815 u8 fix_bw = 0xFF;
816 u16 bmp_cck_ofdm = 0;
817 u32 bmp_ht = 0;
818 u64 bmp_vht = 0;
819 int i;
820
821 if (adapter->fix_rate != NO_FIX_RATE && adapter->fix_bw != NO_FIX_BW)
822 fix_bw = adapter->fix_bw;
823
824 /* TODO: adapter->fix_rate */
825
826 for (i = 0; i < macid_ctl->num; i++) {
827 if (!rtw_macid_is_used(macid_ctl, i))
828 continue;
829 if (!rtw_macid_is_iface_specific(macid_ctl, i, adapter))
830 continue;
831
832 if (bw == CHANNEL_WIDTH_20) /* CCK, OFDM always 20MHz */
833 bmp_cck_ofdm |= macid_ctl->rate_bmp0[i] & 0x00000FFF;
834
835 /* bypass mismatch bandwidth for HT, VHT */
836 if ((fix_bw != 0xFF && fix_bw != bw) || (fix_bw == 0xFF && macid_ctl->bw[i] != bw))
837 continue;
838
839 if (macid_ctl->vht_en[i])
840 bmp_vht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
841 else
842 bmp_ht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
843 }
844
845 /* TODO: mlmeext->tx_rate*/
846
847 if (r_bmp_cck_ofdm)
848 *r_bmp_cck_ofdm = bmp_cck_ofdm;
849 if (r_bmp_ht)
850 *r_bmp_ht = bmp_ht;
851 if (r_bmp_vht)
852 *r_bmp_vht = bmp_vht;
853 #endif
854 }
855
rtw_get_shared_macid_tx_rate_bmp_by_bw(struct dvobj_priv * dvobj,u8 bw,u16 * r_bmp_cck_ofdm,u32 * r_bmp_ht,u64 * r_bmp_vht)856 void rtw_get_shared_macid_tx_rate_bmp_by_bw(struct dvobj_priv *dvobj, u8 bw, u16 *r_bmp_cck_ofdm, u32 *r_bmp_ht, u64 *r_bmp_vht)
857 {
858 /* ToDo */
859 #if 0
860 struct macid_ctl_t *macid_ctl = dvobj_to_macidctl(dvobj);
861 u16 bmp_cck_ofdm = 0;
862 u32 bmp_ht = 0;
863 u64 bmp_vht = 0;
864 int i;
865
866 for (i = 0; i < macid_ctl->num; i++) {
867 if (!rtw_macid_is_used(macid_ctl, i))
868 continue;
869 if (!rtw_macid_is_iface_shared(macid_ctl, i))
870 continue;
871
872 if (bw == CHANNEL_WIDTH_20) /* CCK, OFDM always 20MHz */
873 bmp_cck_ofdm |= macid_ctl->rate_bmp0[i] & 0x00000FFF;
874
875 /* bypass mismatch bandwidth for HT, VHT */
876 if (macid_ctl->bw[i] != bw)
877 continue;
878
879 if (macid_ctl->vht_en[i])
880 bmp_vht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
881 else
882 bmp_ht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
883 }
884
885 if (r_bmp_cck_ofdm)
886 *r_bmp_cck_ofdm = bmp_cck_ofdm;
887 if (r_bmp_ht)
888 *r_bmp_ht = bmp_ht;
889 if (r_bmp_vht)
890 *r_bmp_vht = bmp_vht;
891 #endif
892 }
893
rtw_update_tx_rate_bmp(struct dvobj_priv * dvobj)894 void rtw_update_tx_rate_bmp(struct dvobj_priv *dvobj)
895 {
896 #if 0 /*GEORGIA_TODO_FIXIT*/
897
898 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
899 _adapter *adapter = dvobj_get_primary_adapter(dvobj);
900 HAL_DATA_TYPE *hal_data = GET_PHL_COM(dvobj);
901 u8 bw;
902 u16 bmp_cck_ofdm, tmp_cck_ofdm;
903 u32 bmp_ht, tmp_ht, ori_bmp_ht[2];
904 u64 bmp_vht, tmp_vht, ori_bmp_vht[4];
905 int i;
906
907 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_160; bw++) {
908 /* backup the original ht & vht bmp */
909 if (bw <= CHANNEL_WIDTH_40)
910 ori_bmp_ht[bw] = rf_ctl->rate_bmp_ht_by_bw[bw];
911 if (bw <= CHANNEL_WIDTH_160)
912 ori_bmp_vht[bw] = rf_ctl->rate_bmp_vht_by_bw[bw];
913
914 bmp_cck_ofdm = bmp_ht = bmp_vht = 0;
915 if (rtw_hw_is_bw_support(dvobj, bw)) {
916 for (i = 0; i < dvobj->iface_nums; i++) {
917 if (!dvobj->padapters[i])
918 continue;
919 rtw_get_adapter_tx_rate_bmp_by_bw(dvobj->padapters[i], bw, &tmp_cck_ofdm, &tmp_ht, &tmp_vht);
920 bmp_cck_ofdm |= tmp_cck_ofdm;
921 bmp_ht |= tmp_ht;
922 bmp_vht |= tmp_vht;
923 }
924 rtw_get_shared_macid_tx_rate_bmp_by_bw(dvobj, bw, &tmp_cck_ofdm, &tmp_ht, &tmp_vht);
925 bmp_cck_ofdm |= tmp_cck_ofdm;
926 bmp_ht |= tmp_ht;
927 bmp_vht |= tmp_vht;
928 }
929 if (bw == CHANNEL_WIDTH_20)
930 rf_ctl->rate_bmp_cck_ofdm = bmp_cck_ofdm;
931 if (bw <= CHANNEL_WIDTH_40)
932 rf_ctl->rate_bmp_ht_by_bw[bw] = bmp_ht;
933 if (bw <= CHANNEL_WIDTH_160)
934 rf_ctl->rate_bmp_vht_by_bw[bw] = bmp_vht;
935 }
936
937 #if CONFIG_TXPWR_LIMIT
938 #ifndef DBG_HIGHEST_RATE_BMP_BW_CHANGE
939 #define DBG_HIGHEST_RATE_BMP_BW_CHANGE 0
940 #endif
941
942 if (hal_data->txpwr_limit_loaded) {
943 u8 ori_highest_ht_rate_bw_bmp;
944 u8 ori_highest_vht_rate_bw_bmp;
945 u8 highest_rate_bw;
946 u8 highest_rate_bw_bmp;
947 u8 update_ht_rs = _FALSE;
948 u8 update_vht_rs = _FALSE;
949
950 /* backup the original ht & vht highest bw bmp */
951 ori_highest_ht_rate_bw_bmp = rf_ctl->highest_ht_rate_bw_bmp;
952 ori_highest_vht_rate_bw_bmp = rf_ctl->highest_vht_rate_bw_bmp;
953
954 highest_rate_bw_bmp = BW_CAP_20M;
955 highest_rate_bw = CHANNEL_WIDTH_20;
956 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_40; bw++) {
957 if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw] < rf_ctl->rate_bmp_ht_by_bw[bw]) {
958 highest_rate_bw_bmp = ch_width_to_bw_cap(bw);
959 highest_rate_bw = bw;
960 } else if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw] == rf_ctl->rate_bmp_ht_by_bw[bw])
961 highest_rate_bw_bmp |= ch_width_to_bw_cap(bw);
962 }
963 rf_ctl->highest_ht_rate_bw_bmp = highest_rate_bw_bmp;
964
965 if (ori_highest_ht_rate_bw_bmp != rf_ctl->highest_ht_rate_bw_bmp
966 || largest_bit(ori_bmp_ht[highest_rate_bw]) != largest_bit(rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw])
967 ) {
968 if (DBG_HIGHEST_RATE_BMP_BW_CHANGE) {
969 RTW_INFO("highest_ht_rate_bw_bmp:0x%02x=>0x%02x\n", ori_highest_ht_rate_bw_bmp, rf_ctl->highest_ht_rate_bw_bmp);
970 RTW_INFO("rate_bmp_ht_by_bw[%u]:0x%08x=>0x%08x\n", highest_rate_bw, ori_bmp_ht[highest_rate_bw], rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw]);
971 }
972 if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw])
973 update_ht_rs = _TRUE;
974 }
975
976 highest_rate_bw_bmp = BW_CAP_20M;
977 highest_rate_bw = CHANNEL_WIDTH_20;
978 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_160; bw++) {
979 if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw] < rf_ctl->rate_bmp_vht_by_bw[bw]) {
980 highest_rate_bw_bmp = ch_width_to_bw_cap(bw);
981 highest_rate_bw = bw;
982 } else if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw] == rf_ctl->rate_bmp_vht_by_bw[bw])
983 highest_rate_bw_bmp |= ch_width_to_bw_cap(bw);
984 }
985 rf_ctl->highest_vht_rate_bw_bmp = highest_rate_bw_bmp;
986
987 if (ori_highest_vht_rate_bw_bmp != rf_ctl->highest_vht_rate_bw_bmp
988 || largest_bit_64(ori_bmp_vht[highest_rate_bw]) != largest_bit_64(rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw])
989 ) {
990 if (DBG_HIGHEST_RATE_BMP_BW_CHANGE) {
991 RTW_INFO("highest_vht_rate_bw_bmp:0x%02x=>0x%02x\n", ori_highest_vht_rate_bw_bmp, rf_ctl->highest_vht_rate_bw_bmp);
992 RTW_INFO("rate_bmp_vht_by_bw[%u]:0x%016llx=>0x%016llx\n", highest_rate_bw, ori_bmp_vht[highest_rate_bw], rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw]);
993 }
994 if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw])
995 update_vht_rs = _TRUE;
996 }
997
998 /* TODO: per rfpath and rate section handling? */
999 if (update_ht_rs == _TRUE || update_vht_rs == _TRUE)
1000 rtw_hal_set_tx_power_level(dvobj_get_primary_adapter(dvobj), hal_data->current_channel);
1001 }
1002 #endif /* CONFIG_TXPWR_LIMIT */
1003 #endif
1004 }
1005
rtw_get_tx_bw_bmp_of_ht_rate(struct dvobj_priv * dvobj,u8 rate,u8 max_bw)1006 u8 rtw_get_tx_bw_bmp_of_ht_rate(struct dvobj_priv *dvobj, u8 rate, u8 max_bw)
1007 {
1008 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
1009 u8 bw;
1010 u8 bw_bmp = 0;
1011 u32 rate_bmp;
1012
1013 if (!IS_HT_RATE(rate)) {
1014 rtw_warn_on(1);
1015 goto exit;
1016 }
1017
1018 rate_bmp = 1 << (rate - MGN_MCS0);
1019
1020 if (max_bw > CHANNEL_WIDTH_40)
1021 max_bw = CHANNEL_WIDTH_40;
1022
1023 for (bw = CHANNEL_WIDTH_20; bw <= max_bw; bw++) {
1024 /* RA may use lower rate for retry */
1025 if (rf_ctl->rate_bmp_ht_by_bw[bw] >= rate_bmp)
1026 bw_bmp |= ch_width_to_bw_cap(bw);
1027 }
1028
1029 exit:
1030 return bw_bmp;
1031 }
1032
rtw_get_tx_bw_bmp_of_vht_rate(struct dvobj_priv * dvobj,u8 rate,u8 max_bw)1033 u8 rtw_get_tx_bw_bmp_of_vht_rate(struct dvobj_priv *dvobj, u8 rate, u8 max_bw)
1034 {
1035 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
1036 u8 bw;
1037 u8 bw_bmp = 0;
1038 u64 rate_bmp;
1039
1040 if (!IS_VHT_RATE(rate)) {
1041 rtw_warn_on(1);
1042 goto exit;
1043 }
1044
1045 rate_bmp = BIT_ULL(rate - MGN_VHT1SS_MCS0);
1046
1047 if (max_bw > CHANNEL_WIDTH_160)
1048 max_bw = CHANNEL_WIDTH_160;
1049
1050 for (bw = CHANNEL_WIDTH_20; bw <= max_bw; bw++) {
1051 /* RA may use lower rate for retry */
1052 if (rf_ctl->rate_bmp_vht_by_bw[bw] >= rate_bmp)
1053 bw_bmp |= ch_width_to_bw_cap(bw);
1054 }
1055
1056 exit:
1057 return bw_bmp;
1058 }
1059
rtw_rfctl_get_oper_txpwr_max_mbm(struct rf_ctl_t * rfctl,u8 ch,u8 bw,u8 offset,u8 ifbmp_mod,u8 if_op,bool eirp)1060 s16 rtw_rfctl_get_oper_txpwr_max_mbm(struct rf_ctl_t *rfctl, u8 ch, u8 bw, u8 offset, u8 ifbmp_mod, u8 if_op, bool eirp)
1061 {
1062 /* TODO: get maximum txpower of current operating class & channel belongs to this radio */
1063 s16 mbm = 2000;
1064 return mbm;
1065 }
1066
rtw_rfctl_get_reg_max_txpwr_mbm(struct rf_ctl_t * rfctl,u8 ch,u8 bw,u8 offset,bool eirp)1067 s16 rtw_rfctl_get_reg_max_txpwr_mbm(struct rf_ctl_t *rfctl, u8 ch, u8 bw, u8 offset, bool eirp)
1068 {
1069 /* TODO: get maximum txpower of current operating class & channel belongs to this radio allowed by regulatory */
1070 s16 mbm = 1300;
1071 return mbm;
1072 }
1073
query_ra_short_GI(struct sta_info * psta,u8 bw)1074 u8 query_ra_short_GI(struct sta_info *psta, u8 bw)
1075 {
1076 u8 sgi = _FALSE, sgi_20m = _FALSE, sgi_40m = _FALSE, sgi_80m = _FALSE;
1077
1078 #ifdef CONFIG_80211N_HT
1079 #ifdef CONFIG_80211AC_VHT
1080 #ifdef CONFIG_80211AX_HE
1081 /* CONFIG_80211AX_HE_TODO */
1082 #endif /* CONFIG_80211AX_HE */
1083 if (psta->vhtpriv.vht_option)
1084 sgi_80m = psta->vhtpriv.sgi_80m;
1085 #endif
1086 sgi_20m = psta->htpriv.sgi_20m;
1087 sgi_40m = psta->htpriv.sgi_40m;
1088 #endif
1089
1090 switch (bw) {
1091 case CHANNEL_WIDTH_80:
1092 sgi = sgi_80m;
1093 break;
1094 case CHANNEL_WIDTH_40:
1095 sgi = sgi_40m;
1096 break;
1097 case CHANNEL_WIDTH_20:
1098 default:
1099 sgi = sgi_20m;
1100 break;
1101 }
1102
1103 return sgi;
1104 }
1105
update_attrib_vcs_info(_adapter * padapter,struct xmit_frame * pxmitframe)1106 static void update_attrib_vcs_info(_adapter *padapter, struct xmit_frame *pxmitframe)
1107 {
1108 u32 sz;
1109 struct pkt_attrib *pattrib = &pxmitframe->attrib;
1110 /* struct sta_info *psta = pattrib->psta; */
1111 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
1112 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
1113
1114 /*
1115 if(pattrib->psta)
1116 {
1117 psta = pattrib->psta;
1118 }
1119 else
1120 {
1121 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
1122 psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0] );
1123 }
1124
1125 if(psta==NULL)
1126 {
1127 RTW_INFO("%s, psta==NUL\n", __func__);
1128 return;
1129 }
1130
1131 if(!(psta->state &WIFI_ASOC_STATE))
1132 {
1133 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
1134 return;
1135 }
1136 */
1137
1138 if (pattrib->nr_frags != 1)
1139 sz = padapter->xmitpriv.frag_len;
1140 else /* no frag */
1141 sz = pattrib->last_txcmdsz;
1142
1143 /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
1144 /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
1145 /* Other fragments are protected by previous fragment. */
1146 /* So we only need to check the length of first fragment. */
1147 if (pmlmeext->cur_wireless_mode < WLAN_MD_11N || padapter->registrypriv.wifi_spec) {
1148 if (sz > padapter->registrypriv.rts_thresh)
1149 pattrib->vcs_mode = RTS_CTS;
1150 else {
1151 if (pattrib->rtsen)
1152 pattrib->vcs_mode = RTS_CTS;
1153 else if (pattrib->cts2self)
1154 pattrib->vcs_mode = CTS_TO_SELF;
1155 else
1156 pattrib->vcs_mode = NONE_VCS;
1157 }
1158 } else {
1159 while (_TRUE) {
1160 #if 0 /* Todo */
1161 /* check IOT action */
1162 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
1163 pattrib->vcs_mode = CTS_TO_SELF;
1164 pattrib->rts_rate = MGN_24M;
1165 break;
1166 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS | HT_IOT_ACT_PURE_N_MODE)) {
1167 pattrib->vcs_mode = RTS_CTS;
1168 pattrib->rts_rate = MGN_24M;
1169 break;
1170 }
1171 #endif
1172
1173 /* IOT action */
1174 if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && (pattrib->ampdu_en == _TRUE) &&
1175 (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
1176 pattrib->vcs_mode = CTS_TO_SELF;
1177 break;
1178 }
1179
1180
1181 /* check ERP protection */
1182 if (pattrib->rtsen || pattrib->cts2self) {
1183 if (pattrib->rtsen)
1184 pattrib->vcs_mode = RTS_CTS;
1185 else if (pattrib->cts2self)
1186 pattrib->vcs_mode = CTS_TO_SELF;
1187
1188 break;
1189 }
1190
1191 /* check HT op mode */
1192 if (pattrib->ht_en) {
1193 u8 HTOpMode = pmlmeinfo->HT_protection;
1194 if ((pmlmeext->chandef.bw && (HTOpMode == 2 || HTOpMode == 3)) ||
1195 (!pmlmeext->chandef.bw && HTOpMode == 3)) {
1196 pattrib->vcs_mode = RTS_CTS;
1197 break;
1198 }
1199 }
1200
1201 /* check rts */
1202 if (sz > padapter->registrypriv.rts_thresh) {
1203 pattrib->vcs_mode = RTS_CTS;
1204 break;
1205 }
1206
1207 /* to do list: check MIMO power save condition. */
1208
1209 /* check AMPDU aggregation for TXOP */
1210 if (pattrib->ampdu_en == _TRUE) {
1211 pattrib->vcs_mode = RTS_CTS;
1212 break;
1213 }
1214
1215 pattrib->vcs_mode = NONE_VCS;
1216 break;
1217 }
1218 }
1219
1220 /* for debug : force driver control vrtl_carrier_sense. */
1221 if (padapter->driver_vcs_en == 1) {
1222 /* u8 driver_vcs_en; */ /* Enable=1, Disable=0 driver control vrtl_carrier_sense. */
1223 /* u8 driver_vcs_type; */ /* force 0:disable VCS, 1:RTS-CTS, 2:CTS-to-self when vcs_en=1. */
1224 pattrib->vcs_mode = padapter->driver_vcs_type;
1225 }
1226
1227 }
1228
1229 #ifdef CONFIG_WMMPS_STA
1230 /*
1231 * update_attrib_trigger_frame_info
1232 * For Station mode, if a specific TID of driver setting and an AP support uapsd function, the data
1233 * frame with corresponding TID will be a trigger frame when driver is in wmm power saving mode.
1234 *
1235 * Arguments:
1236 * @padapter: _adapter pointer.
1237 * @pattrib: pkt_attrib pointer.
1238 *
1239 * Auther: Arvin Liu
1240 * Date: 2017/06/05
1241 */
update_attrib_trigger_frame_info(_adapter * padapter,struct pkt_attrib * pattrib)1242 static void update_attrib_trigger_frame_info(_adapter *padapter, struct pkt_attrib *pattrib)
1243 {
1244 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1245 struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
1246 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1247 u8 trigger_frame_en = 0;
1248
1249 if (MLME_IS_STA(padapter)) {
1250 if ((pwrpriv->pwr_mode == PM_PS_MODE_MIN) || (pwrpriv->pwr_mode == PM_PS_MODE_MAX)) {
1251 if ((pqospriv->uapsd_ap_supported) && ((pqospriv->uapsd_tid & BIT(pattrib->priority)) == _TRUE)) {
1252 trigger_frame_en = 1;
1253 RTW_INFO("[WMMPS]"FUNC_ADPT_FMT": This is a Trigger Frame\n", FUNC_ADPT_ARG(padapter));
1254 }
1255 }
1256 }
1257
1258 pattrib->trigger_frame = trigger_frame_en;
1259 }
1260 #endif /* CONFIG_WMMPS_STA */
1261
update_attrib_phy_info(_adapter * padapter,struct pkt_attrib * pattrib,struct sta_info * psta)1262 static void update_attrib_phy_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
1263 {
1264 struct mlme_ext_priv *mlmeext = &padapter->mlmeextpriv;
1265 u8 bw;
1266
1267 pattrib->rtsen = psta->rtsen;
1268 pattrib->cts2self = psta->cts2self;
1269 pattrib->hw_rts_en = psta->hw_rts_en;
1270
1271 pattrib->mdata = 0;
1272 pattrib->eosp = 0;
1273 #ifdef CONFIG_80211AX_HE
1274 if (psta->hepriv.he_option == _TRUE)
1275 pattrib->eosp = 1;
1276 #endif
1277 pattrib->triggered = 0;
1278 pattrib->ampdu_spacing = 0;
1279
1280 /* ht_en, init rate, ,bw, ch_offset, sgi */
1281
1282 /* ToDo: Need API to inform hal_sta->ra_info.rate_id */
1283 /* pattrib->raid = psta->phl_sta->ra_info.rate_id; */
1284
1285 bw = rtw_get_tx_bw_mode(padapter, psta);
1286 pattrib->bwmode = rtw_min(bw, mlmeext->chandef.bw);
1287 pattrib->sgi = query_ra_short_GI(psta, pattrib->bwmode);
1288
1289 if (psta->phl_sta->wmode & WLAN_MD_11AX) {
1290 pattrib->ldpc = psta->phl_sta->asoc_cap.he_ldpc;
1291 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_he_rx > 0) ? 1:0;
1292 } else if (psta->phl_sta->wmode & WLAN_MD_11AC) {
1293 pattrib->ldpc = psta->phl_sta->asoc_cap.vht_ldpc;
1294 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_vht_rx > 0) ? 1:0;
1295 } else if (psta->phl_sta->wmode & WLAN_MD_11N) {
1296 pattrib->ldpc = psta->phl_sta->asoc_cap.ht_ldpc;
1297 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_ht_rx > 0) ? 1:0;
1298 } else {
1299 pattrib->ldpc = 0;
1300 pattrib->stbc = 0;
1301 }
1302
1303 #ifdef CONFIG_80211N_HT
1304 if (padapter->registrypriv.ht_enable &&
1305 is_supported_ht(padapter->registrypriv.wireless_mode)) {
1306 pattrib->ht_en = psta->htpriv.ht_option;
1307 pattrib->ch_offset = psta->htpriv.ch_offset;
1308 pattrib->ampdu_en = _FALSE;
1309
1310 if (padapter->driver_ampdu_spacing != 0xFF) /* driver control AMPDU Density for peer sta's rx */
1311 pattrib->ampdu_spacing = padapter->driver_ampdu_spacing;
1312 else
1313 pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
1314
1315 /* check if enable ampdu */
1316 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
1317 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) {
1318 pattrib->ampdu_en = _TRUE;
1319 if (psta->htpriv.tx_amsdu_enable == _TRUE)
1320 pattrib->amsdu_ampdu_en = _TRUE;
1321 else
1322 pattrib->amsdu_ampdu_en = _FALSE;
1323 }
1324 }
1325 }
1326 #endif /* CONFIG_80211N_HT */
1327 /* if(pattrib->ht_en && psta->htpriv.ampdu_enable) */
1328 /* { */
1329 /* if(psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
1330 /* pattrib->ampdu_en = _TRUE; */
1331 /* } */
1332
1333 #ifdef CONFIG_TDLS
1334 if (pattrib->direct_link == _TRUE) {
1335 psta = pattrib->ptdls_sta;
1336
1337 pattrib->raid = psta->phl_sta->ra_info.rate_id;
1338 #ifdef CONFIG_80211N_HT
1339 if (padapter->registrypriv.ht_enable &&
1340 is_supported_ht(padapter->registrypriv.wireless_mode)) {
1341 pattrib->bwmode = rtw_get_tx_bw_mode(padapter, psta);
1342 pattrib->ht_en = psta->htpriv.ht_option;
1343 pattrib->ch_offset = psta->htpriv.ch_offset;
1344 pattrib->sgi = query_ra_short_GI(psta, pattrib->bwmode);
1345 }
1346 #endif /* CONFIG_80211N_HT */
1347 }
1348 #endif /* CONFIG_TDLS */
1349
1350 pattrib->retry_ctrl = _FALSE;
1351 }
1352
update_attrib_sec_iv_info(_adapter * padapter,struct pkt_attrib * pattrib)1353 static s32 update_attrib_sec_iv_info(_adapter *padapter, struct pkt_attrib *pattrib)
1354 {
1355 struct sta_info *psta = pattrib->psta;
1356 sint bmcast = IS_MCAST(pattrib->ra);
1357
1358 if (!psta)
1359 return _FAIL;
1360
1361 switch (pattrib->encrypt) {
1362 case _WEP40_:
1363 case _WEP104_:
1364 WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1365 break;
1366
1367 case _TKIP_:
1368 if (bmcast)
1369 TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1370 else
1371 TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
1372 break;
1373
1374 case _AES_:
1375 if (bmcast)
1376 AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1377 else
1378 AES_IV(pattrib->iv, psta->dot11txpn, 0);
1379
1380 break;
1381
1382 case _GCMP_:
1383 case _GCMP_256_:
1384 if (bmcast)
1385 GCMP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1386 else
1387 GCMP_IV(pattrib->iv, psta->dot11txpn, 0);
1388
1389 break;
1390
1391 case _CCMP_256_:
1392 if (bmcast)
1393 GCMP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1394 else
1395 GCMP_IV(pattrib->iv, psta->dot11txpn, 0);
1396
1397 break;
1398
1399 #ifdef CONFIG_WAPI_SUPPORT
1400 case _SMS4_:
1401 rtw_wapi_get_iv(padapter, pattrib->ra, pattrib->iv);
1402 break;
1403 #endif
1404 default:
1405 break;
1406 }
1407
1408 return _SUCCESS;
1409 }
1410
update_attrib_sec_info(_adapter * padapter,struct pkt_attrib * pattrib,struct sta_info * psta,enum eap_type eapol_type)1411 static s32 update_attrib_sec_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta, enum eap_type eapol_type)
1412 {
1413 sint res = _SUCCESS;
1414 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1415 struct security_priv *psecuritypriv = &padapter->securitypriv;
1416 sint bmcast = IS_MCAST(pattrib->ra);
1417 s8 hw_decrypted = _FALSE;
1418
1419 _rtw_memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
1420 _rtw_memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
1421 pattrib->mac_id = psta->phl_sta->macid;
1422
1423 /* Comment by Owen at 2020/05/19
1424 * Issue: RTK STA sends encrypted 4-way 4/4 when AP thinks the 4-way incomplete
1425 * In TCL pressure test, AP may resend 4-way 3/4 with new replay counter in 2 ms.
1426 * In this situation, STA sends unencrypted 4-way 4/4 with old replay counter after more
1427 * than 2 ms, followed by the encrypted 4-way 4/4 with new replay counter. Because the
1428 * AP only accepts unencrypted 4-way 4/4 with a new play counter, and the STA encrypts
1429 * each 4-way 4/4 at this time, the 4-way handshake cannot be completed.
1430 * So we modified that after STA receives unencrypted 4-way 1/4 and 4-way 3/4,
1431 * 4-way 2/4 and 4-way 4/4 sent by STA in the next 100 ms are not encrypted.
1432 */
1433 if (psta->ieee8021x_blocked == _TRUE ||
1434 ((eapol_type == EAPOL_2_4 || eapol_type == EAPOL_4_4) &&
1435 rtw_get_passing_time_ms(psta->resp_nonenc_eapol_key_starttime) <= 100)) {
1436
1437 if (eapol_type == EAPOL_2_4 || eapol_type == EAPOL_4_4)
1438 RTW_INFO("Respond unencrypted eapol key\n");
1439
1440 pattrib->encrypt = 0;
1441
1442 if ((pattrib->ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _FALSE)) {
1443 #ifdef DBG_TX_DROP_FRAME
1444 RTW_INFO("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%04x) != 0x888e\n", __FUNCTION__, pattrib->ether_type);
1445 #endif
1446 res = _FAIL;
1447 goto exit;
1448 }
1449 } else {
1450 GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
1451
1452 #ifdef CONFIG_WAPI_SUPPORT
1453 if (pattrib->ether_type == 0x88B4)
1454 pattrib->encrypt = _NO_PRIVACY_;
1455 #endif
1456
1457 switch (psecuritypriv->dot11AuthAlgrthm) {
1458 case dot11AuthAlgrthm_Open:
1459 case dot11AuthAlgrthm_Shared:
1460 case dot11AuthAlgrthm_Auto:
1461 pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
1462 break;
1463 case dot11AuthAlgrthm_8021X:
1464 if (bmcast)
1465 pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
1466 else
1467 pattrib->key_idx = 0;
1468 break;
1469 default:
1470 pattrib->key_idx = 0;
1471 break;
1472 }
1473
1474 /* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
1475 if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
1476 pattrib->encrypt = _NO_PRIVACY_;
1477
1478 }
1479
1480 #ifdef CONFIG_TDLS
1481 if (pattrib->direct_link == _TRUE) {
1482 if (pattrib->encrypt > 0)
1483 pattrib->encrypt = _AES_;
1484 }
1485 #endif
1486
1487 switch (pattrib->encrypt) {
1488 case _WEP40_:
1489 case _WEP104_:
1490 pattrib->iv_len = 4;
1491 pattrib->icv_len = 4;
1492 break;
1493
1494 case _TKIP_:
1495 pattrib->iv_len = 8;
1496 pattrib->icv_len = 4;
1497
1498 if (psecuritypriv->busetkipkey == _FAIL) {
1499 #ifdef DBG_TX_DROP_FRAME
1500 RTW_INFO("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d)==_FAIL drop packet\n", __FUNCTION__, psecuritypriv->busetkipkey);
1501 #endif
1502 res = _FAIL;
1503 goto exit;
1504 }
1505
1506 _rtw_memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
1507
1508 break;
1509
1510 case _AES_:
1511
1512 pattrib->iv_len = 8;
1513 pattrib->icv_len = 8;
1514
1515 break;
1516
1517 case _GCMP_:
1518 case _GCMP_256_:
1519
1520 pattrib->iv_len = 8;
1521 pattrib->icv_len = 16;
1522
1523 break;
1524
1525 case _CCMP_256_:
1526
1527 pattrib->iv_len = 8;
1528 pattrib->icv_len = 16;
1529
1530 break;
1531
1532 #ifdef CONFIG_WAPI_SUPPORT
1533 case _SMS4_:
1534 pattrib->iv_len = 18;
1535 pattrib->icv_len = 16;
1536 break;
1537 #endif
1538 default:
1539 pattrib->iv_len = 0;
1540 pattrib->icv_len = 0;
1541 break;
1542 }
1543
1544 if (pattrib->encrypt > 0) {
1545 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
1546 , psta->dot118021x_UncstKey.skey
1547 , (pattrib->encrypt & _SEC_TYPE_256_) ? 32 : 16);
1548 }
1549
1550 if (!bmcast)
1551 hw_decrypted = psta->hw_decrypted;
1552 else
1553 hw_decrypted = psecuritypriv->hw_decrypted;
1554
1555 if (pattrib->encrypt &&
1556 (padapter->securitypriv.sw_encrypt == _TRUE || hw_decrypted == _FALSE || pattrib->encrypt == _TKIP_)) {
1557 pattrib->bswenc = _TRUE;
1558 } else {
1559 pattrib->bswenc = _FALSE;
1560 }
1561
1562 #if defined(CONFIG_CONCURRENT_MODE)
1563 pattrib->bmc_camid = padapter->securitypriv.dot118021x_bmc_cam_id;
1564 #endif
1565
1566 #ifdef CONFIG_WAPI_SUPPORT
1567 if (pattrib->encrypt == _SMS4_)
1568 pattrib->bswenc = _FALSE;
1569 #endif
1570
1571 if ((pattrib->encrypt) && (eapol_type == EAPOL_4_4))
1572 pattrib->bswenc = _TRUE;
1573
1574 exit:
1575
1576 return res;
1577
1578 }
1579
qos_acm(u8 acm_mask,u8 priority)1580 u8 qos_acm(u8 acm_mask, u8 priority)
1581 {
1582 u8 change_priority = priority;
1583
1584 switch (priority) {
1585 case 0:
1586 case 3:
1587 if (acm_mask & BIT(1))
1588 change_priority = 1;
1589 break;
1590 case 1:
1591 case 2:
1592 break;
1593 case 4:
1594 case 5:
1595 if (acm_mask & BIT(2))
1596 change_priority = 0;
1597 break;
1598 case 6:
1599 case 7:
1600 if (acm_mask & BIT(3))
1601 change_priority = 5;
1602 break;
1603 default:
1604 RTW_INFO("qos_acm(): invalid pattrib->priority: %d!!!\n", priority);
1605 break;
1606 }
1607
1608 return change_priority;
1609 }
1610
1611 /* refer to IEEE802.11-2016 Table R-3; Comply with IETF RFC4594 */
tos_to_up(u8 tos)1612 u8 tos_to_up(u8 tos)
1613 {
1614 u8 up = 0;
1615 u8 dscp;
1616 u8 mode = CONFIG_RTW_UP_MAPPING_RULE;
1617
1618
1619 /* tos precedence mapping */
1620 if (mode == 0) {
1621 up = tos >> 5;
1622 return up;
1623 }
1624
1625 /* refer to IEEE802.11-2016 Table R-3;
1626 * DCSP 32(CS4) comply with IETF RFC4594
1627 */
1628 dscp = (tos >> 2);
1629
1630 if (dscp == 0)
1631 up = 0;
1632 else if (dscp >= 1 && dscp <= 9)
1633 up = 1;
1634 else if (dscp >= 10 && dscp <= 16)
1635 up = 2;
1636 else if (dscp >= 17 && dscp <= 23)
1637 up = 3;
1638 else if (dscp >= 24 && dscp <= 31)
1639 up = 4;
1640 else if (dscp >= 33 && dscp <= 40)
1641 up = 5;
1642 else if ((dscp >= 41 && dscp <= 47) || (dscp == 32))
1643 up = 6;
1644 else if (dscp >= 48 && dscp <= 63)
1645 up = 7;
1646
1647 return up;
1648 }
1649
1650 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
1651 static void set_qos_core(struct xmit_frame *pxframe)
1652 {
1653 s32 UserPriority = 0;
1654
1655 if (!pxframe->pkt)
1656 goto null_pkt;
1657
1658 /* get UserPriority from IP hdr */
1659 if (pxframe->attrib.ether_type == 0x0800) {
1660 struct pkt_file ppktfile;
1661 struct ethhdr etherhdr;
1662 struct iphdr ip_hdr;
1663
1664 _rtw_open_pktfile(pxframe->pkt, &ppktfile);
1665 _rtw_pktfile_read(&ppktfile, (unsigned char *)ðerhdr, ETH_HLEN);
1666 _rtw_pktfile_read(&ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
1667 UserPriority = tos_to_up(ip_hdr.tos);
1668 }
1669
1670
1671 #ifdef CONFIG_ICMP_VOQ
1672 if (pxframe->attrib.icmp_pkt == 1)/*use VO queue to send icmp packet*/
1673 UserPriority = 7;
1674 #endif
1675 #ifdef CONFIG_IP_R_MONITOR
1676 if (pxframe->attrib.ether_type == ETH_P_ARP)
1677 UserPriority = 7;
1678 #endif/*CONFIG_IP_R_MONITOR*/
1679
1680 null_pkt:
1681 pxframe->attrib.priority = UserPriority;
1682 pxframe->attrib.hdrlen = WLAN_HDR_A3_QOS_LEN;
1683 pxframe->attrib.subtype = WIFI_QOS_DATA_TYPE;
1684 }
1685 #endif
1686
set_qos(struct sk_buff * pkt,struct pkt_attrib * pattrib)1687 static void set_qos(struct sk_buff *pkt, struct pkt_attrib *pattrib)
1688 {
1689 s32 UserPriority = 0;
1690
1691 if (!pkt)
1692 goto null_pkt;
1693
1694 /* get UserPriority from IP hdr */
1695 if (pattrib->ether_type == 0x0800) {
1696 struct pkt_file ppktfile;
1697 struct ethhdr etherhdr;
1698 struct iphdr ip_hdr;
1699
1700 _rtw_open_pktfile(pkt, &ppktfile);
1701 _rtw_pktfile_read(&ppktfile, (unsigned char *)ðerhdr, ETH_HLEN);
1702 _rtw_pktfile_read(&ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
1703 /* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
1704 UserPriority = tos_to_up(ip_hdr.tos);
1705 }
1706 /*
1707 else if (pattrib->ether_type == 0x888e) {
1708
1709
1710 UserPriority = 7;
1711 }
1712 */
1713
1714 #ifdef CONFIG_ICMP_VOQ
1715 if (pattrib->icmp_pkt == 1)/*use VO queue to send icmp packet*/
1716 UserPriority = 7;
1717 #endif
1718 #ifdef CONFIG_IP_R_MONITOR
1719 if (pattrib->ether_type == ETH_P_ARP)
1720 UserPriority = 7;
1721 #endif/*CONFIG_IP_R_MONITOR*/
1722
1723 null_pkt:
1724 pattrib->priority = UserPriority;
1725 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_QOS_LEN : WLAN_HDR_A3_QOS_LEN;
1726 pattrib->subtype = WIFI_QOS_DATA_TYPE;
1727 }
1728
1729 #ifdef CONFIG_TDLS
rtw_check_tdls_established(_adapter * padapter,struct pkt_attrib * pattrib)1730 u8 rtw_check_tdls_established(_adapter *padapter, struct pkt_attrib *pattrib)
1731 {
1732 pattrib->ptdls_sta = NULL;
1733
1734 pattrib->direct_link = _FALSE;
1735 if (padapter->tdlsinfo.link_established == _TRUE) {
1736 pattrib->ptdls_sta = rtw_get_stainfo(&padapter->stapriv, pattrib->dst);
1737 #if 1
1738 if ((pattrib->ptdls_sta != NULL) &&
1739 (pattrib->ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) &&
1740 (pattrib->ether_type != 0x0806)) {
1741 pattrib->direct_link = _TRUE;
1742 /* RTW_INFO("send ptk to "MAC_FMT" using direct link\n", MAC_ARG(pattrib->dst)); */
1743 }
1744 #else
1745 if (pattrib->ptdls_sta != NULL &&
1746 pattrib->ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) {
1747 pattrib->direct_link = _TRUE;
1748 #if 0
1749 RTW_INFO("send ptk to "MAC_FMT" using direct link\n", MAC_ARG(pattrib->dst));
1750 #endif
1751 }
1752
1753 /* ARP frame may be helped by AP*/
1754 if (pattrib->ether_type != 0x0806)
1755 pattrib->direct_link = _FALSE;
1756 #endif
1757 }
1758
1759 return pattrib->direct_link;
1760 }
1761
update_tdls_attrib(_adapter * padapter,struct pkt_attrib * pattrib)1762 s32 update_tdls_attrib(_adapter *padapter, struct pkt_attrib *pattrib)
1763 {
1764
1765 struct sta_info *psta = NULL;
1766 struct sta_priv *pstapriv = &padapter->stapriv;
1767 struct security_priv *psecuritypriv = &padapter->securitypriv;
1768 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1769 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1770
1771 s32 res = _SUCCESS;
1772
1773 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
1774 if (psta == NULL) {
1775 res = _FAIL;
1776 goto exit;
1777 }
1778
1779 pattrib->mac_id = psta->phl_sta->macid;
1780 pattrib->psta = psta;
1781 pattrib->ack_policy = 0;
1782 /* get ether_hdr_len */
1783 pattrib->pkt_hdrlen = ETH_HLEN;
1784
1785 pattrib->qos_en = psta->qos_option;
1786
1787 /* [TDLS] TODO: setup req/rsp should be AC_BK */
1788 if (pqospriv->qos_option && psta->qos_option) {
1789 pattrib->priority = 4; /* tdls management frame should be AC_VI */
1790 pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
1791 pattrib->subtype = WIFI_QOS_DATA_TYPE;
1792 } else {
1793 pattrib->priority = 0;
1794 pattrib->hdrlen = WLAN_HDR_A3_LEN;
1795 pattrib->subtype = WIFI_DATA_TYPE;
1796 }
1797
1798 /* TODO:_lock */
1799 if (update_attrib_sec_info(padapter, pattrib, psta, NON_EAPOL) == _FAIL) {
1800 res = _FAIL;
1801 goto exit;
1802 }
1803
1804 update_attrib_phy_info(padapter, pattrib, psta);
1805
1806
1807 exit:
1808
1809 return res;
1810 }
1811
1812 #endif /* CONFIG_TDLS */
1813
1814
1815 #ifdef CONFIG_LPS
1816 #define LPS_PT_NORMAL 0
1817 #define LPS_PT_SP 1/* only DHCP packets is as SPECIAL_PACKET*/
1818 #define LPS_PT_ICMP 2
1819
1820 /*If EAPOL , ARP , OR DHCP packet, driver must be in active mode.*/
_rtw_lps_chk_packet_type(struct pkt_attrib * pattrib)1821 static u8 _rtw_lps_chk_packet_type(struct pkt_attrib *pattrib)
1822 {
1823 u8 pkt_type = LPS_PT_NORMAL; /*normal data frame*/
1824
1825 #ifdef CONFIG_WAPI_SUPPORT
1826 if ((pattrib->ether_type == 0x88B4) || (pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
1827 pkt_type = LPS_PT_SP;
1828 #else /* !CONFIG_WAPI_SUPPORT */
1829
1830 #ifndef CONFIG_LPS_NOT_LEAVE_FOR_ICMP
1831 if (pattrib->icmp_pkt == 1)
1832 pkt_type = LPS_PT_ICMP;
1833 else
1834 #endif
1835 if (pattrib->dhcp_pkt == 1)
1836 pkt_type = LPS_PT_SP;
1837 #endif
1838 return pkt_type;
1839 }
1840 #endif
1841
1842 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
1843 static s32 update_xmitframe_from_hdr(_adapter *padapter, struct xmit_frame *pxframe)
1844 {
1845 uint i;
1846 struct pkt_file pktfile;
1847 struct sta_info *psta = NULL;
1848 struct ethhdr etherhdr;
1849 struct sk_buff *pkt = NULL;
1850 sint bmcast;
1851
1852 struct sta_priv *pstapriv = &padapter->stapriv;
1853 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1854 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1855 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
1856
1857 PHLTX_LOG;
1858
1859 if (pxframe->pkt)
1860 pkt = pxframe->pkt;
1861 else
1862 return FAIL;
1863
1864 PHLTX_LOG;
1865
1866 _rtw_open_pktfile(pkt, &pktfile);
1867 i = _rtw_pktfile_read(&pktfile, (u8 *)ðerhdr, ETH_HLEN);
1868
1869 pxframe->attrib.ether_type = ntohs(etherhdr.h_proto);
1870
1871 if (MLME_IS_MESH(padapter)) /* address resolve is done for mesh */
1872 goto get_sta_info;
1873
1874 _rtw_memcpy(pxframe->attrib.dst, ðerhdr.h_dest, ETH_ALEN);
1875 _rtw_memcpy(pxframe->attrib.src, ðerhdr.h_source, ETH_ALEN);
1876
1877 if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
1878 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
1879 _rtw_memcpy(pxframe->attrib.ra, pxframe->attrib.dst, ETH_ALEN);
1880 _rtw_memcpy(pxframe->attrib.ta, adapter_mac_addr(padapter), ETH_ALEN);
1881 } else if (MLME_IS_STA(padapter)) {
1882
1883 #if 0//def CONFIG_TDLS //rtw_phl_tx
1884 if (rtw_check_tdls_established(padapter, pattrib) == _TRUE)
1885 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); /* For TDLS direct link Tx, set ra to be same to dst */
1886 else
1887 #endif
1888 _rtw_memcpy(pxframe->attrib.ra, get_bssid(pmlmepriv), ETH_ALEN);
1889 _rtw_memcpy(pxframe->attrib.ta, adapter_mac_addr(padapter), ETH_ALEN);
1890 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta);
1891 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
1892 _rtw_memcpy(pxframe->attrib.ra, pxframe->attrib.dst, ETH_ALEN);
1893 _rtw_memcpy(pxframe->attrib.ta, get_bssid(pmlmepriv), ETH_ALEN);
1894 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_ap);
1895 } else
1896 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown);
1897
1898 PHLTX_LOG;
1899
1900 get_sta_info:
1901 bmcast = IS_MCAST(pxframe->attrib.ra);
1902 if (bmcast) {
1903 PHLTX_LOG;
1904 psta = rtw_get_bcmc_stainfo(padapter);
1905 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
1906 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta);
1907 #ifdef DBG_TX_DROP_FRAME
1908 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pxframe->attrib.ra));
1909 #endif
1910 return FAIL;
1911 }
1912 } else {
1913 PHLTX_LOG;
1914 psta = rtw_get_stainfo(pstapriv, pxframe->attrib.ra);
1915 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
1916 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta);
1917 #ifdef DBG_TX_DROP_FRAME
1918 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pxframe->attrib.ra));
1919 #endif
1920 return FAIL;
1921 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE && !(psta->state & WIFI_ASOC_STATE)) {
1922 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link);
1923 return FAIL;
1924 }
1925 }
1926
1927 PHLTX_LOG;
1928
1929 if (!(psta->state & WIFI_ASOC_STATE)) {
1930 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link);
1931 RTW_INFO("%s-"ADPT_FMT" psta("MAC_FMT")->state(0x%x) != WIFI_ASOC_STATE\n",
1932 __func__, ADPT_ARG(padapter), MAC_ARG(psta->phl_sta->mac_addr), psta->state);
1933 return FAIL;
1934 }
1935
1936 pxframe->attrib.psta = psta;
1937
1938 PHLTX_LOG;
1939
1940 pxframe->attrib.pktlen = pktfile.pkt_len;
1941
1942 /* TODO: 802.1Q VLAN header */
1943 /* TODO: IPV6 */
1944
1945 if (ETH_P_IP == pxframe->attrib.ether_type) {
1946 u8 ip[20];
1947
1948 _rtw_pktfile_read(&pktfile, ip, 20);
1949
1950 if (GET_IPV4_IHL(ip) * 4 > 20)
1951 _rtw_pktfile_read(&pktfile, NULL, GET_IPV4_IHL(ip) - 20);
1952
1953 pxframe->attrib.icmp_pkt = 0;
1954 pxframe->attrib.dhcp_pkt = 0;
1955 pxframe->attrib.hipriority_pkt = 0;
1956
1957 if (GET_IPV4_PROTOCOL(ip) == 0x01) { /* ICMP */
1958 pxframe->attrib.icmp_pkt = 1;
1959 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
1960
1961 } else if (GET_IPV4_PROTOCOL(ip) == 0x11) { /* UDP */
1962 u8 udp[24];
1963
1964 _rtw_pktfile_read(&pktfile, udp, 24);
1965
1966 if ((GET_UDP_SRC(udp) == 68 && GET_UDP_DST(udp) == 67)
1967 || (GET_UDP_SRC(udp) == 67 && GET_UDP_DST(udp) == 68)
1968 ) {
1969 /* 67 : UDP BOOTP server, 68 : UDP BOOTP client */
1970 if (pxframe->attrib.pktlen > 282) { /* MINIMUM_DHCP_PACKET_SIZE */
1971 pxframe->attrib.dhcp_pkt = 1;
1972 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp);
1973 if (0)
1974 RTW_INFO("send DHCP packet\n");
1975 }
1976 }
1977
1978 /* WaveAgent packet, increase priority so that the system can read data in time */
1979 if (((GET_UDP_SIG1(udp) == 0xcc) || (GET_UDP_SIG1(udp) == 0xdd)) &&
1980 (GET_UDP_SIG2(udp) == 0xe2)) {
1981 pxframe->attrib.hipriority_pkt = 1;
1982 }
1983
1984 } else if (GET_IPV4_PROTOCOL(ip) == 0x06 /* TCP */
1985 && rtw_st_ctl_chk_reg_s_proto(&psta->st_ctl, 0x06) == _TRUE
1986 ) {
1987 u8 tcp[20];
1988
1989 _rtw_pktfile_read(&pktfile, tcp, 20);
1990
1991 if (rtw_st_ctl_chk_reg_rule(&psta->st_ctl, padapter, IPV4_SRC(ip), TCP_SRC(tcp), IPV4_DST(ip), TCP_DST(tcp)) == _TRUE) {
1992 if (GET_TCP_SYN(tcp) && GET_TCP_ACK(tcp)) {
1993 session_tracker_add_cmd(padapter, psta
1994 , IPV4_SRC(ip), TCP_SRC(tcp)
1995 , IPV4_SRC(ip), TCP_DST(tcp));
1996 if (DBG_SESSION_TRACKER)
1997 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" SYN-ACK\n"
1998 , FUNC_ADPT_ARG(padapter)
1999 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2000 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2001 }
2002 if (GET_TCP_FIN(tcp)) {
2003 session_tracker_del_cmd(padapter, psta
2004 , IPV4_SRC(ip), TCP_SRC(tcp)
2005 , IPV4_SRC(ip), TCP_DST(tcp));
2006 if (DBG_SESSION_TRACKER)
2007 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" FIN\n"
2008 , FUNC_ADPT_ARG(padapter)
2009 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2010 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2011 }
2012 }
2013 }
2014
2015 } else if (0x888e == pxframe->attrib.ether_type)
2016 parsing_eapol_packet(padapter, pktfile.cur_addr, psta, 1);
2017 #if defined (DBG_ARP_DUMP) || defined (DBG_IP_R_MONITOR)
2018 else if (pxframe->attrib.ether_type == ETH_P_ARP) {
2019 u8 arp[28] = {0};
2020
2021 _rtw_pktfile_read(&pktfile, arp, 28);
2022 dump_arp_pkt(RTW_DBGDUMP, etherhdr.h_dest, etherhdr.h_source, arp, 1);
2023 }
2024 #endif
2025
2026 PHLTX_LOG;
2027
2028 if ((pxframe->attrib.ether_type == 0x888e) || (pxframe->attrib.dhcp_pkt == 1))
2029 rtw_mi_set_scan_deny(padapter, 3000);
2030
2031 if (MLME_IS_STA(padapter) &&
2032 pxframe->attrib.ether_type == ETH_P_ARP &&
2033 !IS_MCAST(pxframe->attrib.dst)) {
2034 rtw_mi_set_scan_deny(padapter, 1000);
2035 rtw_mi_scan_abort(padapter, _FALSE); /*rtw_scan_abort_no_wait*/
2036 }
2037
2038
2039 PHLTX_LOG;
2040
2041 /* get ether_hdr_len */
2042 pxframe->attrib.pkt_hdrlen = ETH_HLEN;/* (pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; */ /* vlan tag */
2043
2044 pxframe->attrib.hdrlen = WLAN_HDR_A3_LEN;
2045 pxframe->attrib.type = WIFI_DATA_TYPE;
2046 pxframe->attrib.subtype = WIFI_DATA_TYPE;
2047 pxframe->attrib.qos_en = pxframe->attrib.psta->qos_option;
2048 pxframe->attrib.priority = 0;
2049
2050 pxframe->attrib.frag_len = pxmitpriv->frag_len;
2051
2052
2053
2054 PHLTX_LOG;
2055
2056 return SUCCESS;
2057 }
2058
2059
2060 static s32 update_xmitframe_qos(_adapter *padapter, struct xmit_frame *pxframe)
2061 {
2062
2063 struct sta_priv *pstapriv = &padapter->stapriv;
2064 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2065 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2066 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2067
2068 if (!pxframe->attrib.qos_en)
2069 return SUCCESS;
2070
2071 if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_MESH_STATE
2072 | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)
2073 ) {
2074 set_qos_core(pxframe);
2075 #if 0//rtw_phl_tx def CONFIG_RTW_MESH
2076 if (MLME_IS_MESH(padapter))
2077 rtw_mesh_tx_set_whdr_mctrl_len(pattrib->mesh_frame_mode, pattrib);
2078 #endif
2079 } else {
2080 #if 0// def CONFIG_TDLS
2081 if (pattrib->direct_link == _TRUE) {
2082 if (pattrib->qos_en)
2083 set_qos(pkt, pattrib);
2084 } else
2085 #endif
2086 {
2087 if (pqospriv->qos_option) {
2088 set_qos_core(pxframe);
2089
2090 if (pmlmepriv->acm_mask != 0)
2091 pxframe->attrib.priority = qos_acm(pmlmepriv->acm_mask, pxframe->attrib.priority);
2092 }
2093 }
2094 }
2095
2096 return SUCCESS;
2097 }
2098
2099 static s32 update_xmitframe_security(_adapter *padapter, struct xmit_frame *pxframe) //rtw_phl_tx todo
2100 {
2101 sint res = _SUCCESS;
2102 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2103 struct security_priv *psecuritypriv = &padapter->securitypriv;
2104 sint bmcast = IS_MCAST(pxframe->attrib.ra);
2105
2106 #if 0
2107 _rtw_memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
2108 _rtw_memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
2109 pattrib->mac_id = psta->phl_sta->macid;
2110 #endif
2111
2112 if (pxframe->attrib.psta->ieee8021x_blocked == _TRUE) {
2113
2114 pxframe->attrib.encrypt = 0;
2115
2116 if ((pxframe->attrib.ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _FALSE)) {
2117 #ifdef DBG_TX_DROP_FRAME
2118 RTW_INFO("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%04x) != 0x888e\n", __FUNCTION__, pxframe->attrib.ether_type);
2119 #endif
2120 res = _FAIL;
2121 goto exit;
2122 }
2123 } else {
2124 GET_ENCRY_ALGO(psecuritypriv, pxframe->attrib.psta, pxframe->attrib.encrypt, bmcast);
2125
2126 #ifdef CONFIG_WAPI_SUPPORT
2127 if (pxframe->attrib.ether_type == 0x88B4)
2128 pxframe->attrib.encrypt = _NO_PRIVACY_;
2129 #endif
2130
2131 switch (psecuritypriv->dot11AuthAlgrthm) {
2132 case dot11AuthAlgrthm_Open:
2133 case dot11AuthAlgrthm_Shared:
2134 case dot11AuthAlgrthm_Auto:
2135 pxframe->attrib.key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
2136 break;
2137 case dot11AuthAlgrthm_8021X:
2138 if (bmcast)
2139 pxframe->attrib.key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
2140 else
2141 pxframe->attrib.key_idx = 0;
2142 break;
2143 default:
2144 pxframe->attrib.key_idx = 0;
2145 break;
2146 }
2147
2148 /* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
2149 if (((pxframe->attrib.encrypt == _WEP40_) || (pxframe->attrib.encrypt == _WEP104_)) && (pxframe->attrib.ether_type == 0x888e))
2150 pxframe->attrib.encrypt = _NO_PRIVACY_;
2151
2152 }
2153
2154 #if 0 //def CONFIG_TDLS
2155 if (pattrib->direct_link == _TRUE) {
2156 if (pxframe->attrib.encrypt > 0)
2157 pxframe->attrib.encrypt = _AES_;
2158 }
2159 #endif
2160
2161 switch (pxframe->attrib.encrypt) {
2162 case _WEP40_:
2163 case _WEP104_:
2164 pxframe->attrib.iv_len = 4;
2165 pxframe->attrib.icv_len = 4;
2166 WEP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2167 break;
2168
2169 case _TKIP_:
2170 pxframe->attrib.iv_len = 8;
2171 pxframe->attrib.icv_len = 4;
2172
2173 if (psecuritypriv->busetkipkey == _FAIL) {
2174 #ifdef DBG_TX_DROP_FRAME
2175 RTW_INFO("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d)==_FAIL drop packet\n", __FUNCTION__, psecuritypriv->busetkipkey);
2176 #endif
2177 res = _FAIL;
2178 goto exit;
2179 }
2180
2181 if (bmcast)
2182 TKIP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2183 else
2184 TKIP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2185
2186
2187 //_rtw_memcpy(pattrib->dot11tkiptxmickey.skey, pxframe->attrib.psta->dot11tkiptxmickey.skey, 16);
2188
2189 break;
2190
2191 case _AES_:
2192
2193 pxframe->attrib.iv_len = 8;
2194 pxframe->attrib.icv_len = 8;
2195
2196 if (bmcast)
2197 AES_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2198 else
2199 AES_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2200
2201 break;
2202
2203 case _GCMP_:
2204 case _GCMP_256_:
2205
2206 pxframe->attrib.iv_len = 8;
2207 pxframe->attrib.icv_len = 16;
2208
2209 if (bmcast)
2210 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2211 else
2212 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2213
2214 break;
2215
2216 case _CCMP_256_:
2217
2218 pxframe->attrib.iv_len = 8;
2219 pxframe->attrib.icv_len = 16;
2220
2221 if (bmcast)
2222 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2223 else
2224 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2225
2226 break;
2227
2228 #ifdef CONFIG_WAPI_SUPPORT
2229 case _SMS4_:
2230 pxframe->attrib.iv_len = 18;
2231 pxframe->attrib.icv_len = 16;
2232 rtw_wapi_get_iv(padapter, pxframe->attrib.ra, pxframe->attrib.iv);
2233 break;
2234 #endif
2235 default:
2236 pxframe->attrib.iv_len = 0;
2237 pxframe->attrib.icv_len = 0;
2238 break;
2239 }
2240
2241 #if 0
2242 if (pxframe->attrib.encrypt > 0) {
2243 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
2244 , pxframe->attrib.psta->dot118021x_UncstKey.skey
2245 , (pxframe->attrib.encrypt & _SEC_TYPE_256_) ? 32 : 16);
2246 }
2247 #endif
2248
2249 if (pxframe->attrib.encrypt &&
2250 ((padapter->securitypriv.sw_encrypt == _TRUE) || (psecuritypriv->hw_decrypted == _FALSE))) {
2251 pxframe->attrib.bswenc = _TRUE;
2252 } else {
2253 pxframe->attrib.bswenc = _FALSE;
2254 }
2255
2256 #if defined(CONFIG_CONCURRENT_MODE)
2257 //pattrib->bmc_camid = padapter->securitypriv.dot118021x_bmc_cam_id;
2258 #endif
2259
2260 #ifdef CONFIG_WAPI_SUPPORT
2261 if (pxframe->attrib.encrypt == _SMS4_)
2262 pxframe->attrib.bswenc = _FALSE;
2263 #endif
2264
2265 exit:
2266 return res;
2267
2268 }
2269
2270 static s32 update_xmitframe_hw(_adapter *padapter, struct xmit_frame *pxframe)
2271 {
2272 pxframe->phl_txreq->mdata.rts_en = pxframe->attrib.psta->rtsen;
2273 pxframe->phl_txreq->mdata.cts2self = pxframe->attrib.psta->cts2self;
2274 pxframe->phl_txreq->mdata.ampdu_density = 0;
2275 return 0;
2276 }
2277
2278 #if 0
2279 static s32 rtw_core_update_txattrib(_adapter *padapter, struct xmit_frame *pxframe)
2280 {
2281 uint i;
2282 struct pkt_file pktfile;
2283 struct sta_info *psta = NULL;
2284 struct ethhdr etherhdr;
2285 struct sk_buff *pkt = NULL;
2286
2287 struct sta_priv *pstapriv = &padapter->stapriv;
2288 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2289 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2290 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2291 sint res = _SUCCESS;
2292
2293 #if 0//rtw_phl_tx todo def CONFIG_LPS
2294 pkt_type = _rtw_lps_chk_packet_type(pattrib);
2295
2296 if (pkt_type == LPS_PT_SP) {/*packet is as SPECIAL_PACKET*/
2297 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active);
2298 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 0);
2299 } else if (pkt_type == LPS_PT_ICMP)
2300 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 0);
2301 #endif /* CONFIG_LPS */
2302
2303 #if 0//rtw_phl_tx todo def CONFIG_BEAMFORMING
2304 update_attrib_txbf_info(padapter, pattrib, psta);
2305 #endif
2306
2307 #if 0
2308 /* TODO:_lock */
2309 if (update_attrib_sec_info(padapter, pattrib, psta, NON_EAPOL) == _FAIL) {
2310 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
2311 res = _FAIL;
2312 goto exit;
2313 }
2314 #endif
2315
2316 update_attrib_phy_info(padapter, pattrib, psta);
2317
2318 /* RTW_INFO("%s ==> mac_id(%d)\n",__FUNCTION__,pattrib->mac_id ); */
2319 /* TODO:_unlock */
2320
2321 #ifdef CONFIG_AUTO_AP_MODE
2322 if (psta->isrc && psta->pid > 0)
2323 pattrib->pctrl = _TRUE;
2324 else
2325 #endif
2326 pattrib->pctrl = 0;
2327
2328 pattrib->ack_policy = 0;
2329
2330 if (bmcast)
2331 pattrib->rate = psta->init_rate;
2332
2333
2334 #ifdef CONFIG_WMMPS_STA
2335 update_attrib_trigger_frame_info(padapter, pattrib);
2336 #endif /* CONFIG_WMMPS_STA */
2337
2338 /* pattrib->priority = 5; */ /* force to used VI queue, for testing */
2339 pattrib->hw_ssn_sel = pxmitpriv->hw_ssn_seq_no;
2340
2341 pattrib->wdinfo_en = 1;/*FPGA_test YiWei need modify*/
2342
2343 rtw_set_tx_chksum_offload(pkt, pattrib);
2344
2345 exit:
2346 return res;
2347 }
2348 #endif
2349 #endif
2350
rtw_chk_htc_en(_adapter * padapter,struct sta_info * psta,struct pkt_attrib * pattrib)2351 static u8 rtw_chk_htc_en(_adapter *padapter, struct sta_info *psta, struct pkt_attrib *pattrib)
2352 {
2353
2354 #ifdef CONFIG_80211AX_HE
2355 if (psta->hepriv.he_option == _TRUE) {
2356 /*By test, some HE AP eapol & arp & dhcp pkt can not append ht control*/
2357 if ((0x888e == pattrib->ether_type) || (0x0806 == pattrib->ether_type) || (pattrib->dhcp_pkt == 1))
2358 return 0;
2359 else if (rtw_get_current_tx_rate(padapter, psta) < RTW_DATA_RATE_HE_NSS1_MCS0)
2360 return 0;
2361 else
2362 return rtw_he_htc_en(padapter, psta);
2363 }
2364 #endif
2365
2366 return 0;
2367 }
2368
update_attrib(_adapter * padapter,struct sk_buff * pkt,struct pkt_attrib * pattrib)2369 static s32 update_attrib(_adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
2370 {
2371 uint i;
2372 struct pkt_file pktfile;
2373 struct sta_info *psta = NULL;
2374 struct ethhdr etherhdr;
2375
2376 sint bmcast;
2377 struct sta_priv *pstapriv = &padapter->stapriv;
2378 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2379 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2380 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2381 sint res = _SUCCESS;
2382 enum eap_type eapol_type = NON_EAPOL;
2383 #ifdef CONFIG_LPS
2384 u8 pkt_type = 0;
2385 #endif
2386
2387 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
2388
2389 _rtw_open_pktfile(pkt, &pktfile);
2390 i = _rtw_pktfile_read(&pktfile, (u8 *)ðerhdr, ETH_HLEN);
2391
2392 pattrib->ether_type = ntohs(etherhdr.h_proto);
2393
2394 if (MLME_STATE(padapter) & (WIFI_AP_STATE | WIFI_MESH_STATE)) /* address resolve is done for ap/mesh */
2395 goto get_sta_info;
2396
2397 _rtw_memcpy(pattrib->dst, ðerhdr.h_dest, ETH_ALEN);
2398 _rtw_memcpy(pattrib->src, ðerhdr.h_source, ETH_ALEN);
2399 _rtw_memcpy(pattrib->ta, adapter_mac_addr(padapter), ETH_ALEN);
2400
2401 if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
2402 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
2403 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
2404 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_adhoc);
2405 } else if (MLME_IS_STA(padapter)) {
2406 #ifdef CONFIG_TDLS
2407 if (rtw_check_tdls_established(padapter, pattrib) == _TRUE)
2408 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); /* For TDLS direct link Tx, set ra to be same to dst */
2409 else
2410 #endif
2411 {
2412 _rtw_memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
2413 #ifdef CONFIG_RTW_WDS
2414 if (adapter_use_wds(padapter)
2415 && _rtw_memcmp(pattrib->src, pattrib->ta, ETH_ALEN) == _FALSE
2416 ) {
2417 pattrib->wds = 1;
2418 if (IS_MCAST(pattrib->dst))
2419 rtw_tx_wds_gptr_update(padapter, pattrib->src);
2420 }
2421 #endif
2422 }
2423 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta);
2424 } else
2425 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown);
2426
2427 get_sta_info:
2428 bmcast = IS_MCAST(pattrib->ra);
2429 if (bmcast) {
2430 psta = rtw_get_bcmc_stainfo(padapter);
2431 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
2432 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta);
2433 #ifdef DBG_TX_DROP_FRAME
2434 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
2435 #endif
2436 res = _FAIL;
2437 goto exit;
2438 }
2439 } else {
2440 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
2441 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
2442 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta);
2443 #ifdef DBG_TX_DROP_FRAME
2444 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
2445 #endif
2446 res = _FAIL;
2447 goto exit;
2448 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE && !(psta->state & WIFI_ASOC_STATE)) {
2449 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link);
2450 res = _FAIL;
2451 goto exit;
2452 }
2453
2454 #ifdef CONFIG_RTW_WDS
2455 if (XATTRIB_GET_WDS(pattrib) && !(psta->flags & WLAN_STA_WDS))
2456 pattrib->wds = 0;
2457 #endif
2458 }
2459
2460 if (!(psta->state & WIFI_ASOC_STATE)) {
2461 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link);
2462 RTW_INFO("%s-"ADPT_FMT" psta("MAC_FMT")->state(0x%x) != WIFI_ASOC_STATE\n",
2463 __func__, ADPT_ARG(padapter), MAC_ARG(psta->phl_sta->mac_addr), psta->state);
2464 res = _FAIL;
2465 goto exit;
2466 }
2467
2468 pattrib->pktlen = pktfile.pkt_len;
2469 #ifdef CONFIG_CORE_TXSC
2470 pattrib->frag_len = pxmitpriv->frag_len;
2471 #endif
2472
2473 /* TODO: 802.1Q VLAN header */
2474 /* TODO: IPV6 */
2475
2476 if (ETH_P_IP == pattrib->ether_type) {
2477 u8 ip[20];
2478
2479 _rtw_pktfile_read(&pktfile, ip, 20);
2480
2481 if (GET_IPV4_IHL(ip) * 4 > 20)
2482 _rtw_pktfile_read(&pktfile, NULL, GET_IPV4_IHL(ip) - 20);
2483
2484 pattrib->icmp_pkt = 0;
2485 pattrib->dhcp_pkt = 0;
2486 pattrib->hipriority_pkt = 0;
2487
2488 if (GET_IPV4_PROTOCOL(ip) == 0x01) { /* ICMP */
2489 pattrib->icmp_pkt = 1;
2490 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
2491
2492 } else if (GET_IPV4_PROTOCOL(ip) == 0x11) { /* UDP */
2493 u8 udp[24];
2494
2495 _rtw_pktfile_read(&pktfile, udp, 24);
2496
2497 if ((GET_UDP_SRC(udp) == 68 && GET_UDP_DST(udp) == 67)
2498 || (GET_UDP_SRC(udp) == 67 && GET_UDP_DST(udp) == 68)
2499 ) {
2500 /* 67 : UDP BOOTP server, 68 : UDP BOOTP client */
2501 if (pattrib->pktlen > 282) { /* MINIMUM_DHCP_PACKET_SIZE */
2502 pattrib->dhcp_pkt = 1;
2503 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp);
2504 if (0)
2505 RTW_INFO("send DHCP packet\n");
2506 }
2507 }
2508
2509 /* WaveAgent packet, increase priority so that the system can read data in time */
2510 if (((GET_UDP_SIG1(udp) == 0xcc) || (GET_UDP_SIG1(udp) == 0xdd)) &&
2511 (GET_UDP_SIG2(udp) == 0xe2)) {
2512 pattrib->hipriority_pkt = 1;
2513 }
2514
2515 } else if (GET_IPV4_PROTOCOL(ip) == 0x06 /* TCP */
2516 && rtw_st_ctl_chk_reg_s_proto(&psta->st_ctl, 0x06) == _TRUE
2517 ) {
2518 u8 tcp[20];
2519
2520 _rtw_pktfile_read(&pktfile, tcp, 20);
2521
2522 if (rtw_st_ctl_chk_reg_rule(&psta->st_ctl, padapter, IPV4_SRC(ip), TCP_SRC(tcp), IPV4_DST(ip), TCP_DST(tcp)) == _TRUE) {
2523 if (GET_TCP_SYN(tcp) && GET_TCP_ACK(tcp)) {
2524 session_tracker_add_cmd(padapter, psta
2525 , IPV4_SRC(ip), TCP_SRC(tcp)
2526 , IPV4_SRC(ip), TCP_DST(tcp));
2527 if (DBG_SESSION_TRACKER)
2528 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" SYN-ACK\n"
2529 , FUNC_ADPT_ARG(padapter)
2530 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2531 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2532 }
2533 if (GET_TCP_FIN(tcp)) {
2534 session_tracker_del_cmd(padapter, psta
2535 , IPV4_SRC(ip), TCP_SRC(tcp)
2536 , IPV4_SRC(ip), TCP_DST(tcp));
2537 if (DBG_SESSION_TRACKER)
2538 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" FIN\n"
2539 , FUNC_ADPT_ARG(padapter)
2540 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2541 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2542 }
2543 }
2544 }
2545
2546 } else if (0x888e == pattrib->ether_type)
2547 eapol_type = parsing_eapol_packet(padapter, pktfile.cur_addr, psta, 1);
2548 #if defined (DBG_ARP_DUMP) || defined (DBG_IP_R_MONITOR)
2549 else if (pattrib->ether_type == ETH_P_ARP) {
2550 u8 arp[28] = {0};
2551
2552 _rtw_pktfile_read(&pktfile, arp, 28);
2553 dump_arp_pkt(RTW_DBGDUMP, etherhdr.h_dest, etherhdr.h_source, arp, 1);
2554 }
2555 #endif
2556
2557 if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
2558 rtw_mi_set_scan_deny(padapter, 3000);
2559
2560 if (MLME_IS_STA(padapter) &&
2561 pattrib->ether_type == ETH_P_ARP &&
2562 !IS_MCAST(pattrib->dst)) {
2563 rtw_mi_set_scan_deny(padapter, 1000);
2564 rtw_mi_scan_abort(padapter, _FALSE); /*rtw_scan_abort_no_wait*/
2565 }
2566
2567 #ifdef CONFIG_LPS
2568 pkt_type = _rtw_lps_chk_packet_type(pattrib);
2569
2570 if (pkt_type == LPS_PT_SP) {/*packet is as SPECIAL_PACKET*/
2571 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active);
2572 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 0);
2573 } else if (pkt_type == LPS_PT_ICMP)
2574 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 0);
2575 #endif /* CONFIG_LPS */
2576
2577 /* TODO:_lock */
2578 if (update_attrib_sec_info(padapter, pattrib, psta, eapol_type) == _FAIL) {
2579 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
2580 res = _FAIL;
2581 goto exit;
2582 }
2583
2584 /* get ether_hdr_len */
2585 pattrib->pkt_hdrlen = ETH_HLEN;/* (pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; */ /* vlan tag */
2586
2587 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_LEN : WLAN_HDR_A3_LEN;
2588 pattrib->type = WIFI_DATA_TYPE;
2589 pattrib->subtype = WIFI_DATA_TYPE;
2590 pattrib->qos_en = psta->qos_option;
2591 pattrib->priority = 0;
2592
2593 if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_MESH_STATE
2594 | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)
2595 ) {
2596 if (pattrib->qos_en) {
2597 set_qos(pkt, pattrib);
2598 #ifdef CONFIG_RTW_MESH
2599 if (MLME_IS_MESH(padapter))
2600 rtw_mesh_tx_set_whdr_mctrl_len(pattrib->mesh_frame_mode, pattrib);
2601 #endif
2602 }
2603 } else {
2604 #ifdef CONFIG_TDLS
2605 if (pattrib->direct_link == _TRUE) {
2606 if (pattrib->qos_en)
2607 set_qos(pkt, pattrib);
2608 } else
2609 #endif
2610 {
2611 if (pqospriv->qos_option) {
2612 set_qos(pkt, pattrib);
2613
2614 if (pmlmepriv->acm_mask != 0)
2615 pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
2616 }
2617 }
2618 }
2619
2620 pattrib->order = rtw_chk_htc_en(padapter, psta, pattrib);
2621 if (pattrib->order) {
2622 if (pattrib->qos_en)
2623 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_QOS_HTC_LEN : WLAN_HDR_A3_QOS_HTC_LEN;
2624 else
2625 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_HTC_LEN : WLAN_HDR_A3_HTC_LEN;
2626 }
2627
2628 update_attrib_phy_info(padapter, pattrib, psta);
2629
2630 /* RTW_INFO("%s ==> mac_id(%d)\n",__FUNCTION__,pattrib->mac_id ); */
2631
2632 pattrib->psta = psta;
2633 /* TODO:_unlock */
2634
2635 #ifdef CONFIG_AUTO_AP_MODE
2636 if (psta->isrc && psta->pid > 0)
2637 pattrib->pctrl = _TRUE;
2638 else
2639 #endif
2640 pattrib->pctrl = 0;
2641
2642 pattrib->ack_policy = 0;
2643
2644 if (bmcast)
2645 pattrib->rate = psta->init_rate;
2646
2647
2648 #ifdef CONFIG_WMMPS_STA
2649 update_attrib_trigger_frame_info(padapter, pattrib);
2650 #endif /* CONFIG_WMMPS_STA */
2651
2652 /* pattrib->priority = 5; */ /* force to used VI queue, for testing */
2653 pattrib->hw_ssn_sel = pxmitpriv->hw_ssn_seq_no;
2654
2655 pattrib->wdinfo_en = 1;/*FPGA_test YiWei need modify*/
2656
2657 rtw_set_tx_chksum_offload(pkt, pattrib);
2658
2659 exit:
2660
2661
2662 return res;
2663 }
2664
xmitframe_addmic(_adapter * padapter,struct xmit_frame * pxmitframe)2665 static s32 xmitframe_addmic(_adapter *padapter, struct xmit_frame *pxmitframe)
2666 {
2667 sint curfragnum, length;
2668 u8 *pframe, *payload, mic[8];
2669 struct mic_data micdata;
2670 /* struct sta_info *stainfo; */
2671 struct pkt_attrib *pattrib = &pxmitframe->attrib;
2672 struct security_priv *psecuritypriv = &padapter->securitypriv;
2673 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2674 u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
2675 u8 hw_hdr_offset = 0;
2676 sint bmcst = IS_MCAST(pattrib->ra);
2677
2678 /*
2679 if(pattrib->psta)
2680 {
2681 stainfo = pattrib->psta;
2682 }
2683 else
2684 {
2685 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
2686 stainfo=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
2687 }
2688
2689 if(stainfo==NULL)
2690 {
2691 RTW_INFO("%s, psta==NUL\n", __func__);
2692 return _FAIL;
2693 }
2694
2695 if(!(stainfo->state &WIFI_ASOC_STATE))
2696 {
2697 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, stainfo->state);
2698 return _FAIL;
2699 }
2700 */
2701
2702
2703 #ifdef CONFIG_USB_TX_AGGREGATION
2704 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
2705 #else
2706 #ifdef CONFIG_TX_EARLY_MODE
2707 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
2708 #else
2709 hw_hdr_offset = TXDESC_OFFSET;
2710 #endif
2711 #endif
2712
2713 if (pattrib->encrypt == _TKIP_) { /* if(psecuritypriv->dot11PrivacyAlgrthm==_TKIP_PRIVACY_) */
2714 /* encode mic code */
2715 /* if(stainfo!= NULL) */
2716 {
2717 u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2718
2719 pframe = pxmitframe->buf_addr + hw_hdr_offset;
2720
2721 if (bmcst) {
2722 if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16) == _TRUE) {
2723 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2724 /* rtw_msleep_os(10); */
2725 return _FAIL;
2726 }
2727 /* start to calculate the mic code */
2728 rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
2729 } else {
2730 if (_rtw_memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16) == _TRUE) {
2731 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2732 /* rtw_msleep_os(10); */
2733 return _FAIL;
2734 }
2735 /* start to calculate the mic code */
2736 rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
2737 }
2738
2739 if (pframe[1] & 1) { /* ToDS==1 */
2740 rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
2741 if (pframe[1] & 2) /* From Ds==1 */
2742 rtw_secmicappend(&micdata, &pframe[24], 6);
2743 else
2744 rtw_secmicappend(&micdata, &pframe[10], 6);
2745 } else { /* ToDS==0 */
2746 rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
2747 if (pframe[1] & 2) /* From Ds==1 */
2748 rtw_secmicappend(&micdata, &pframe[16], 6);
2749 else
2750 rtw_secmicappend(&micdata, &pframe[10], 6);
2751
2752 }
2753
2754 if (pattrib->qos_en)
2755 priority[0] = (u8)pxmitframe->attrib.priority;
2756
2757
2758 rtw_secmicappend(&micdata, &priority[0], 4);
2759
2760 payload = pframe;
2761
2762 for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
2763 payload = (u8 *)RND4((SIZE_PTR)(payload));
2764
2765 payload = payload + pattrib->hdrlen + pattrib->iv_len;
2766 if ((curfragnum + 1) == pattrib->nr_frags) {
2767 length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
2768 rtw_secmicappend(&micdata, payload, length);
2769 payload = payload + length;
2770 } else {
2771 length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
2772 rtw_secmicappend(&micdata, payload, length);
2773 payload = payload + length + pattrib->icv_len;
2774 }
2775 }
2776 rtw_secgetmic(&micdata, &(mic[0]));
2777 /* add mic code and add the mic code length in last_txcmdsz */
2778
2779 _rtw_memcpy(payload, &(mic[0]), 8);
2780 pattrib->last_txcmdsz += 8;
2781
2782 payload = payload - pattrib->last_txcmdsz + 8;
2783 }
2784 }
2785
2786
2787 return _SUCCESS;
2788 }
2789
2790 /*#define DBG_TX_SW_ENCRYPTOR*/
2791
xmitframe_swencrypt(_adapter * padapter,struct xmit_frame * pxmitframe)2792 static s32 xmitframe_swencrypt(_adapter *padapter, struct xmit_frame *pxmitframe)
2793 {
2794 struct pkt_attrib *pattrib = &pxmitframe->attrib;
2795
2796
2797 if (!pattrib->bswenc)
2798 return _SUCCESS;
2799
2800 #ifdef DBG_TX_SW_ENCRYPTOR
2801 RTW_INFO(ADPT_FMT" - sec_type:%s DO SW encryption\n",
2802 ADPT_ARG(padapter), security_type_str(pattrib->encrypt));
2803 #endif
2804
2805 switch (pattrib->encrypt) {
2806 case _WEP40_:
2807 case _WEP104_:
2808 rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
2809 break;
2810 case _TKIP_:
2811 rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
2812 break;
2813 case _AES_:
2814 case _CCMP_256_:
2815 rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
2816 break;
2817 case _GCMP_:
2818 case _GCMP_256_:
2819 rtw_gcmp_encrypt(padapter, (u8 *)pxmitframe);
2820 break;
2821 #ifdef CONFIG_WAPI_SUPPORT
2822 case _SMS4_:
2823 rtw_sms4_encrypt(padapter, (u8 *)pxmitframe);
2824 #endif
2825 default:
2826 break;
2827 }
2828
2829 return _SUCCESS;
2830 }
2831
2832 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
2833 static s32 rtw_core_xmitframe_addmic(_adapter *padapter, struct xmit_frame *pxframe)
2834 {
2835 sint curfragnum, payload_length;
2836 u8 *pwlhdr, *payload, mic[8];
2837 struct mic_data micdata;
2838 /* struct sta_info *stainfo; */
2839 struct security_priv *psecuritypriv = &padapter->securitypriv;
2840 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2841 u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
2842 u8 hw_hdr_offset = 0;
2843 sint bmcst = IS_MCAST(pxframe->attrib.ra);
2844
2845 if (pxframe->attrib.encrypt == _TKIP_) { /* if(psecuritypriv->dot11PrivacyAlgrthm==_TKIP_PRIVACY_) */
2846 /* encode mic code */
2847 /* if(stainfo!= NULL) */
2848 {
2849 u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2850
2851 pwlhdr = pxframe->wlhdr[0];
2852 payload = pxframe->pkt->data + pxframe->attrib.pkt_hdrlen;
2853 payload_length = pxframe->pkt->len - pxframe->attrib.pkt_hdrlen;
2854
2855 if (bmcst) {
2856 if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16) == _TRUE) {
2857 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2858 /* rtw_msleep_os(10); */
2859 return _FAIL;
2860 }
2861 /* start to calculate the mic code */
2862 rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
2863 } else {
2864 if (_rtw_memcmp(&pxframe->attrib.psta->dot11tkiptxmickey.skey[0], null_key, 16) == _TRUE) {
2865 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2866 /* rtw_msleep_os(10); */
2867 return _FAIL;
2868 }
2869 /* start to calculate the mic code */
2870 rtw_secmicsetkey(&micdata, &pxframe->attrib.psta->dot11tkiptxmickey.skey[0]);
2871 }
2872
2873 if (pwlhdr[1] & 1) { /* ToDS==1 */
2874 rtw_secmicappend(&micdata, &pwlhdr[16], 6); /* DA */
2875 if (pwlhdr[1] & 2) /* From Ds==1 */
2876 rtw_secmicappend(&micdata, &pwlhdr[24], 6);
2877 else
2878 rtw_secmicappend(&micdata, &pwlhdr[10], 6);
2879 } else { /* ToDS==0 */
2880 rtw_secmicappend(&micdata, &pwlhdr[4], 6); /* DA */
2881 if (pwlhdr[1] & 2) /* From Ds==1 */
2882 rtw_secmicappend(&micdata, &pwlhdr[16], 6);
2883 else
2884 rtw_secmicappend(&micdata, &pwlhdr[10], 6);
2885
2886 }
2887
2888 if (pxframe->attrib.qos_en)
2889 priority[0] = (u8)pxframe->attrib.qos_en;
2890
2891 rtw_secmicappend(&micdata, &priority[0], 4);
2892
2893 payload = (u8 *)RND4((SIZE_PTR)(payload));
2894 rtw_secmicappend(&micdata, payload, payload_length);
2895
2896 rtw_secgetmic(&micdata, &(mic[0]));
2897 /* add mic code and add the mic code length in last_txcmdsz */
2898
2899 _rtw_memcpy(pxframe->wltail[0]+pxframe->attrib.icv_len, &(mic[0]), 8);
2900 }
2901 }
2902
2903 return _SUCCESS;
2904 }
2905
2906 /*#define DBG_TX_SW_ENCRYPTOR*/
2907
2908 static s32 rtw_core_xmitframe_swencrypt(_adapter *padapter, struct xmit_frame *pxframe)
2909 {
2910 if (pxframe->attrib.bswenc) {
2911 #ifdef DBG_TX_SW_ENCRYPTOR
2912 RTW_INFO(ADPT_FMT" - sec_type:%s DO SW encryption\n",
2913 ADPT_ARG(padapter), security_type_str(pxframe->attrib.encrypt));
2914 #endif
2915
2916 switch (pxframe->attrib.encrypt) {
2917 case _WEP40_:
2918 case _WEP104_:
2919 //rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
2920 break;
2921 case _TKIP_:
2922 //rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
2923 break;
2924 case _AES_:
2925 case _CCMP_256_:
2926 rtw_core_aes_encrypt(padapter, (u8 *)pxframe);
2927 break;
2928 case _GCMP_:
2929 case _GCMP_256_:
2930 //rtw_gcmp_encrypt(padapter, (u8 *)pxmitframe);
2931 break;
2932 #ifdef CONFIG_WAPI_SUPPORT
2933 case _SMS4_:
2934 //rtw_sms4_encrypt(padapter, (u8 *)pxmitframe);
2935 #endif
2936 default:
2937 break;
2938 }
2939
2940 }
2941 return _SUCCESS;
2942 }
2943
2944
2945 s32 rtw_core_make_wlanhdr(_adapter *padapter, u8 *hdr, struct xmit_frame *pxframe)
2946 {
2947 u16 *qc;
2948
2949 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
2950 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2951 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2952 u8 qos_option = _FALSE;
2953 sint res = _SUCCESS;
2954 u16 *fctrl = &pwlanhdr->frame_ctl;
2955
2956 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
2957
2958 set_frame_sub_type(fctrl, pxframe->attrib.subtype);
2959
2960 if (pxframe->attrib.subtype & WIFI_DATA_TYPE) {
2961 if (MLME_IS_STA(padapter)) {
2962 #ifdef CONFIG_TDLS
2963 if (pattrib->direct_link == _TRUE) {
2964 /* TDLS data transfer, ToDS=0, FrDs=0 */
2965 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2966 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.src, ETH_ALEN);
2967 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
2968
2969 if (pxframe->attrib.qos_en)
2970 qos_option = _TRUE;
2971 } else
2972 #endif /* CONFIG_TDLS */
2973 {
2974 /* to_ds = 1, fr_ds = 0; */
2975 /* 1.Data transfer to AP */
2976 /* 2.Arp pkt will relayed by AP */
2977 SetToDs(fctrl);
2978 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
2979 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.ta, ETH_ALEN);
2980 _rtw_memcpy(pwlanhdr->addr3, pxframe->attrib.dst, ETH_ALEN);
2981
2982 if (pqospriv->qos_option)
2983 qos_option = _TRUE;
2984 }
2985 } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)) {
2986 /* to_ds = 0, fr_ds = 1; */
2987 SetFrDs(fctrl);
2988 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2989 _rtw_memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
2990 _rtw_memcpy(pwlanhdr->addr3, pxframe->attrib.src, ETH_ALEN);
2991
2992 if (pxframe->attrib.qos_en)
2993 qos_option = _TRUE;
2994 } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
2995 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
2996 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2997 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.ta, ETH_ALEN);
2998 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
2999
3000 if (pxframe->attrib.qos_en)
3001 qos_option = _TRUE;
3002 #ifdef CONFIG_RTW_MESH
3003 } else if (MLME_IS_STA(padapter)) {
3004 rtw_mesh_tx_build_whdr(padapter, pattrib, fctrl, pwlanhdr);
3005 if (pxframe->attrib.qos_en)
3006 qos_option = _TRUE;
3007 else {
3008 RTW_WARN("[%s] !qos_en in Mesh\n", __FUNCTION__);
3009 res = _FAIL;
3010 goto exit;
3011 }
3012 #endif
3013 } else {
3014 res = _FAIL;
3015 goto exit;
3016 }
3017
3018 if (pxframe->attrib.mdata)
3019 SetMData(fctrl);
3020
3021 if (pxframe->attrib.encrypt)
3022 SetPrivacy(fctrl);
3023
3024 if (qos_option) {
3025 qc = (unsigned short *)(hdr + pxframe->attrib.hdrlen - 2);
3026
3027 if (pxframe->attrib.priority)
3028 SetPriority(qc, pxframe->attrib.priority);
3029
3030 SetEOSP(qc, pxframe->attrib.eosp);
3031
3032 SetAckpolicy(qc, pxframe->attrib.ack_policy);
3033
3034 if (pxframe->attrib.amsdu)
3035 SetAMsdu(qc, pxframe->attrib.amsdu);
3036 #ifdef CONFIG_RTW_MESH
3037 if (MLME_IS_MESH(padapter)) {
3038 /* active: don't care, light sleep: 0, deep sleep: 1*/
3039 set_mps_lv(qc, 0); //TBD
3040
3041 /* TBD: temporary set (rspi, eosp) = (0, 1) which means End MPSP */
3042 set_rspi(qc, 0);
3043 SetEOSP(qc, 1);
3044
3045 set_mctrl_present(qc, 1);
3046 }
3047 #endif
3048 }
3049
3050 /* TODO: fill HT Control Field */
3051
3052 /* Update Seq Num will be handled by f/w */
3053 {
3054 struct sta_info *psta;
3055 psta = pxframe->attrib.psta;
3056
3057 if (psta == NULL) {
3058 RTW_INFO("%s, psta==NUL\n", __func__);
3059 return _FAIL;
3060 }
3061
3062 if (!(psta->state & WIFI_ASOC_STATE)) {
3063 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3064 return _FAIL;
3065 }
3066
3067 if (psta) {
3068 psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority]++;
3069 psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority] &= 0xFFF;
3070 pxframe->attrib.seqnum = psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority];
3071
3072 SetSeqNum(hdr, pxframe->attrib.seqnum);
3073
3074 #ifdef CONFIG_80211N_HT
3075 #if 0 /* move into update_attrib_phy_info(). */
3076 /* check if enable ampdu */
3077 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
3078 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
3079 pattrib->ampdu_en = _TRUE;
3080 }
3081 #endif
3082 /* re-check if enable ampdu by BA_starting_seqctrl */
3083 if (pxframe->attrib.ampdu_en == _TRUE) {
3084 u16 tx_seq;
3085
3086 tx_seq = psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f];
3087
3088 /* check BA_starting_seqctrl */
3089 if (SN_LESS(pxframe->attrib.seqnum, tx_seq)) {
3090 /* RTW_INFO("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
3091 pxframe->attrib.ampdu_en = _FALSE;/* AGG BK */
3092 } else if (SN_EQUAL(pxframe->attrib.seqnum, tx_seq)) {
3093 psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f] = (tx_seq + 1) & 0xfff;
3094
3095 pxframe->attrib.ampdu_en = _TRUE;/* AGG EN */
3096 } else {
3097 /* RTW_INFO("tx ampdu over run\n"); */
3098 psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f] = (pxframe->attrib.seqnum + 1) & 0xfff;
3099 pxframe->attrib.ampdu_en = _TRUE;/* AGG EN */
3100 }
3101
3102 }
3103 #endif /* CONFIG_80211N_HT */
3104 }
3105 }
3106
3107 } else {
3108
3109 }
3110
3111 exit:
3112
3113
3114 return res;
3115 }
3116
3117
3118
3119
3120
3121 #endif
3122
rtw_fill_htc_in_wlanhdr(_adapter * padapter,struct pkt_attrib * pattrib,u32 * phtc_buf)3123 static void rtw_fill_htc_in_wlanhdr(_adapter *padapter, struct pkt_attrib *pattrib, u32 *phtc_buf)
3124 {
3125 #ifdef CONFIG_80211AX_HE
3126 rtw_he_fill_htc(padapter, pattrib, phtc_buf);
3127 #endif
3128 }
3129
rtw_make_wlanhdr(_adapter * padapter,u8 * hdr,struct pkt_attrib * pattrib)3130 s32 rtw_make_wlanhdr(_adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
3131 {
3132 u16 *qc;
3133 u32 *htc = NULL;
3134
3135 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
3136 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3137 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
3138 u8 qos_option = _FALSE, htc_option = _FALSE;
3139 sint res = _SUCCESS;
3140 u16 *fctrl = &pwlanhdr->frame_ctl;
3141
3142 /* struct sta_info *psta; */
3143
3144 /* sint bmcst = IS_MCAST(pattrib->ra); */
3145
3146
3147 /*
3148 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3149 if(pattrib->psta != psta)
3150 {
3151 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3152 return;
3153 }
3154
3155 if(psta==NULL)
3156 {
3157 RTW_INFO("%s, psta==NUL\n", __func__);
3158 return _FAIL;
3159 }
3160
3161 if(!(psta->state &WIFI_ASOC_STATE))
3162 {
3163 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3164 return _FAIL;
3165 }
3166 */
3167
3168 #ifdef RTW_PHL_TX
3169 _rtw_memset(hdr, 0, pattrib->hdrlen);
3170 #else
3171 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
3172 #endif
3173
3174 set_frame_sub_type(fctrl, pattrib->subtype);
3175
3176 if (pattrib->subtype & WIFI_DATA_TYPE) {
3177 if (MLME_IS_STA(padapter)) {
3178 #ifdef CONFIG_TDLS
3179 if (pattrib->direct_link == _TRUE) {
3180 /* TDLS data transfer, ToDS=0, FrDs=0 */
3181 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3182 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3183 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3184
3185 if (pattrib->qos_en)
3186 qos_option = _TRUE;
3187 } else
3188 #endif /* CONFIG_TDLS */
3189 {
3190 #ifdef CONFIG_RTW_WDS
3191 if (pattrib->wds) {
3192 SetToDs(fctrl);
3193 SetFrDs(fctrl);
3194 _rtw_memcpy(pwlanhdr->addr1, pattrib->ra, ETH_ALEN);
3195 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3196 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3197 _rtw_memcpy(pwlanhdr->addr4, pattrib->src, ETH_ALEN);
3198 } else
3199 #endif
3200 {
3201 /* to_ds = 1, fr_ds = 0; */
3202 /* 1.Data transfer to AP */
3203 /* 2.Arp pkt will relayed by AP */
3204 SetToDs(fctrl);
3205 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3206 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3207 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3208 }
3209
3210 if (pqospriv->qos_option)
3211 qos_option = _TRUE;
3212 }
3213 } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)) {
3214 #ifdef CONFIG_RTW_WDS
3215 if (pattrib->wds) {
3216 SetToDs(fctrl);
3217 SetFrDs(fctrl);
3218 _rtw_memcpy(pwlanhdr->addr1, pattrib->ra, ETH_ALEN);
3219 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3220 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3221 _rtw_memcpy(pwlanhdr->addr4, pattrib->src, ETH_ALEN);
3222 } else
3223 #endif
3224 {
3225 /* to_ds = 0, fr_ds = 1; */
3226 SetFrDs(fctrl);
3227 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3228 _rtw_memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
3229 _rtw_memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
3230 }
3231
3232 if (pattrib->qos_en)
3233 qos_option = _TRUE;
3234 } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
3235 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
3236 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3237 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3238 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3239
3240 if (pattrib->qos_en)
3241 qos_option = _TRUE;
3242 #ifdef CONFIG_RTW_MESH
3243 } else if (check_fwstate(pmlmepriv, WIFI_MESH_STATE) == _TRUE) {
3244 rtw_mesh_tx_build_whdr(padapter, pattrib, fctrl, pwlanhdr);
3245 if (pattrib->qos_en)
3246 qos_option = _TRUE;
3247 else {
3248 RTW_WARN("[%s] !qos_en in Mesh\n", __FUNCTION__);
3249 res = _FAIL;
3250 goto exit;
3251 }
3252 #endif
3253 } else {
3254 res = _FAIL;
3255 goto exit;
3256 }
3257
3258 if (pattrib->mdata)
3259 SetMData(fctrl);
3260
3261 if (pattrib->encrypt)
3262 SetPrivacy(fctrl);
3263
3264 if (pattrib->order)
3265 htc_option = _TRUE;
3266
3267 if (qos_option) {
3268 qc = (unsigned short *)(hdr + (XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_LEN : WLAN_HDR_A3_LEN));
3269
3270 if (pattrib->priority)
3271 SetPriority(qc, pattrib->priority);
3272
3273 SetEOSP(qc, pattrib->eosp);
3274
3275 SetAckpolicy(qc, pattrib->ack_policy);
3276
3277 if (pattrib->amsdu)
3278 SetAMsdu(qc, pattrib->amsdu);
3279 #ifdef CONFIG_RTW_MESH
3280 if (MLME_IS_MESH(padapter)) {
3281 /* active: don't care, light sleep: 0, deep sleep: 1*/
3282 set_mps_lv(qc, 0); //TBD
3283
3284 /* TBD: temporary set (rspi, eosp) = (0, 1) which means End MPSP */
3285 set_rspi(qc, 0);
3286 SetEOSP(qc, 1);
3287
3288 set_mctrl_present(qc, 1);
3289 }
3290 #endif
3291 }
3292
3293 /* TODO: fill HT Control Field */
3294 if (htc_option == _TRUE) {
3295 set_htc_order_bit(fctrl);
3296
3297 htc = (u32 *)(hdr + pattrib->hdrlen - 4);
3298 rtw_fill_htc_in_wlanhdr(padapter, pattrib, htc);
3299 }
3300
3301 /* Update Seq Num will be handled by f/w */
3302 {
3303 struct sta_info *psta;
3304 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3305 if (pattrib->psta != psta) {
3306 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3307 return _FAIL;
3308 }
3309
3310 if (psta == NULL) {
3311 RTW_INFO("%s, psta==NUL\n", __func__);
3312 return _FAIL;
3313 }
3314
3315 if (!(psta->state & WIFI_ASOC_STATE)) {
3316 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3317 return _FAIL;
3318 }
3319
3320
3321 if (psta) {
3322 psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3323 psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3324 pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
3325
3326 SetSeqNum(hdr, pattrib->seqnum);
3327
3328 #ifdef CONFIG_80211N_HT
3329 #if 0 /* move into update_attrib_phy_info(). */
3330 /* check if enable ampdu */
3331 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
3332 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
3333 pattrib->ampdu_en = _TRUE;
3334 }
3335 #endif
3336 /* re-check if enable ampdu by BA_starting_seqctrl */
3337 if (pattrib->ampdu_en == _TRUE) {
3338 u16 tx_seq;
3339
3340 tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
3341
3342 /* check BA_starting_seqctrl */
3343 if (SN_LESS(pattrib->seqnum, tx_seq)) {
3344 /* RTW_INFO("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
3345 pattrib->ampdu_en = _FALSE;/* AGG BK */
3346 } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
3347 psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq + 1) & 0xfff;
3348
3349 pattrib->ampdu_en = _TRUE;/* AGG EN */
3350 } else {
3351 /* RTW_INFO("tx ampdu over run\n"); */
3352 psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum + 1) & 0xfff;
3353 pattrib->ampdu_en = _TRUE;/* AGG EN */
3354 }
3355
3356 }
3357 #endif /* CONFIG_80211N_HT */
3358 }
3359 }
3360
3361 } else {
3362
3363 }
3364
3365 exit:
3366
3367
3368 return res;
3369 }
3370
rtw_txframes_pending(_adapter * padapter)3371 s32 rtw_txframes_pending(_adapter *padapter)
3372 {
3373 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3374
3375 return ((_rtw_queue_empty(&pxmitpriv->be_pending) == _FALSE) ||
3376 (_rtw_queue_empty(&pxmitpriv->bk_pending) == _FALSE) ||
3377 (_rtw_queue_empty(&pxmitpriv->vi_pending) == _FALSE) ||
3378 (_rtw_queue_empty(&pxmitpriv->vo_pending) == _FALSE));
3379 }
3380
rtw_txframes_sta_ac_pending(_adapter * padapter,struct pkt_attrib * pattrib)3381 s32 rtw_txframes_sta_ac_pending(_adapter *padapter, struct pkt_attrib *pattrib)
3382 {
3383 struct sta_info *psta;
3384 struct tx_servq *ptxservq;
3385 int priority = pattrib->priority;
3386 /*
3387 if(pattrib->psta)
3388 {
3389 psta = pattrib->psta;
3390 }
3391 else
3392 {
3393 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
3394 psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
3395 }
3396 */
3397 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3398 if (pattrib->psta != psta) {
3399 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3400 return 0;
3401 }
3402
3403 if (psta == NULL) {
3404 RTW_INFO("%s, psta==NUL\n", __func__);
3405 return 0;
3406 }
3407
3408 if (!(psta->state & WIFI_ASOC_STATE)) {
3409 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3410 return 0;
3411 }
3412
3413 switch (priority) {
3414 case 1:
3415 case 2:
3416 ptxservq = &(psta->sta_xmitpriv.bk_q);
3417 break;
3418 case 4:
3419 case 5:
3420 ptxservq = &(psta->sta_xmitpriv.vi_q);
3421 break;
3422 case 6:
3423 case 7:
3424 ptxservq = &(psta->sta_xmitpriv.vo_q);
3425 break;
3426 case 0:
3427 case 3:
3428 default:
3429 ptxservq = &(psta->sta_xmitpriv.be_q);
3430 break;
3431
3432 }
3433
3434 return ptxservq->qcnt;
3435 }
3436
3437 #ifdef CONFIG_TDLS
3438
rtw_build_tdls_ies(_adapter * padapter,struct xmit_frame * pxmitframe,u8 * pframe,struct tdls_txmgmt * ptxmgmt)3439 int rtw_build_tdls_ies(_adapter *padapter, struct xmit_frame *pxmitframe, u8 *pframe, struct tdls_txmgmt *ptxmgmt)
3440 {
3441 struct pkt_attrib *pattrib = &pxmitframe->attrib;
3442 struct sta_info *ptdls_sta = NULL;
3443 int res = _SUCCESS;
3444
3445 ptdls_sta = rtw_get_stainfo((&padapter->stapriv), pattrib->dst);
3446 if (ptdls_sta == NULL) {
3447 switch (ptxmgmt->action_code) {
3448 case TDLS_DISCOVERY_REQUEST:
3449 case TUNNELED_PROBE_REQ:
3450 case TUNNELED_PROBE_RSP:
3451 break;
3452 default:
3453 RTW_INFO("[TDLS] %s - Direct Link Peer = "MAC_FMT" not found for action = %d\n", __func__, MAC_ARG(pattrib->dst), ptxmgmt->action_code);
3454 res = _FAIL;
3455 goto exit;
3456 }
3457 }
3458
3459 switch (ptxmgmt->action_code) {
3460 case TDLS_SETUP_REQUEST:
3461 rtw_build_tdls_setup_req_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3462 break;
3463 case TDLS_SETUP_RESPONSE:
3464 rtw_build_tdls_setup_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3465 break;
3466 case TDLS_SETUP_CONFIRM:
3467 rtw_build_tdls_setup_cfm_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3468 break;
3469 case TDLS_TEARDOWN:
3470 rtw_build_tdls_teardown_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3471 break;
3472 case TDLS_DISCOVERY_REQUEST:
3473 rtw_build_tdls_dis_req_ies(padapter, pxmitframe, pframe, ptxmgmt);
3474 break;
3475 case TDLS_PEER_TRAFFIC_INDICATION:
3476 rtw_build_tdls_peer_traffic_indication_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3477 break;
3478 #ifdef CONFIG_TDLS_CH_SW
3479 case TDLS_CHANNEL_SWITCH_REQUEST:
3480 rtw_build_tdls_ch_switch_req_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3481 break;
3482 case TDLS_CHANNEL_SWITCH_RESPONSE:
3483 rtw_build_tdls_ch_switch_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3484 break;
3485 #endif
3486 case TDLS_PEER_TRAFFIC_RESPONSE:
3487 rtw_build_tdls_peer_traffic_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3488 break;
3489 #ifdef CONFIG_WFD
3490 case TUNNELED_PROBE_REQ:
3491 rtw_build_tunneled_probe_req_ies(padapter, pxmitframe, pframe);
3492 break;
3493 case TUNNELED_PROBE_RSP:
3494 rtw_build_tunneled_probe_rsp_ies(padapter, pxmitframe, pframe);
3495 break;
3496 #endif /* CONFIG_WFD */
3497 default:
3498 res = _FAIL;
3499 break;
3500 }
3501
3502 exit:
3503 return res;
3504 }
3505
rtw_make_tdls_wlanhdr(_adapter * padapter,u8 * hdr,struct pkt_attrib * pattrib,struct tdls_txmgmt * ptxmgmt)3506 s32 rtw_make_tdls_wlanhdr(_adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib, struct tdls_txmgmt *ptxmgmt)
3507 {
3508 u16 *qc;
3509 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
3510 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3511 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
3512 struct sta_priv *pstapriv = &padapter->stapriv;
3513 struct sta_info *psta = NULL, *ptdls_sta = NULL;
3514 u8 tdls_seq = 0, baddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3515
3516 sint res = _SUCCESS;
3517 u16 *fctrl = &pwlanhdr->frame_ctl;
3518
3519
3520 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
3521
3522 set_frame_sub_type(fctrl, pattrib->subtype);
3523
3524 switch (ptxmgmt->action_code) {
3525 case TDLS_SETUP_REQUEST:
3526 case TDLS_SETUP_RESPONSE:
3527 case TDLS_SETUP_CONFIRM:
3528 case TDLS_PEER_TRAFFIC_INDICATION:
3529 case TDLS_PEER_PSM_REQUEST:
3530 case TUNNELED_PROBE_REQ:
3531 case TUNNELED_PROBE_RSP:
3532 case TDLS_DISCOVERY_REQUEST:
3533 SetToDs(fctrl);
3534 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3535 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3536 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3537 break;
3538 case TDLS_CHANNEL_SWITCH_REQUEST:
3539 case TDLS_CHANNEL_SWITCH_RESPONSE:
3540 case TDLS_PEER_PSM_RESPONSE:
3541 case TDLS_PEER_TRAFFIC_RESPONSE:
3542 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3543 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3544 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3545 tdls_seq = 1;
3546 break;
3547 case TDLS_TEARDOWN:
3548 if (ptxmgmt->status_code == _RSON_TDLS_TEAR_UN_RSN_) {
3549 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3550 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3551 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3552 tdls_seq = 1;
3553 } else {
3554 SetToDs(fctrl);
3555 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3556 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3557 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3558 }
3559 break;
3560 }
3561
3562 if (pattrib->encrypt)
3563 SetPrivacy(fctrl);
3564
3565 if (ptxmgmt->action_code == TDLS_PEER_TRAFFIC_RESPONSE)
3566 SetPwrMgt(fctrl);
3567
3568 if (pqospriv->qos_option) {
3569 qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
3570 if (pattrib->priority)
3571 SetPriority(qc, pattrib->priority);
3572 SetAckpolicy(qc, pattrib->ack_policy);
3573 }
3574
3575 psta = pattrib->psta;
3576
3577 /* 1. update seq_num per link by sta_info */
3578 /* 2. rewrite encrypt to _AES_, also rewrite iv_len, icv_len */
3579 if (tdls_seq == 1) {
3580 ptdls_sta = rtw_get_stainfo(pstapriv, pattrib->dst);
3581 if (ptdls_sta) {
3582 ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3583 ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3584 pattrib->seqnum = ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority];
3585 SetSeqNum(hdr, pattrib->seqnum);
3586
3587 if (pattrib->encrypt) {
3588 pattrib->encrypt = _AES_;
3589 pattrib->iv_len = 8;
3590 pattrib->icv_len = 8;
3591 pattrib->bswenc = _FALSE;
3592 }
3593 pattrib->mac_id = ptdls_sta->phl_sta->macid;
3594 } else {
3595 res = _FAIL;
3596 goto exit;
3597 }
3598 } else if (psta) {
3599 psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3600 psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3601 pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
3602 SetSeqNum(hdr, pattrib->seqnum);
3603 }
3604
3605
3606 exit:
3607
3608
3609 return res;
3610 }
3611
rtw_xmit_tdls_coalesce(_adapter * padapter,struct xmit_frame * pxmitframe,struct tdls_txmgmt * ptxmgmt)3612 s32 rtw_xmit_tdls_coalesce(_adapter *padapter, struct xmit_frame *pxmitframe, struct tdls_txmgmt *ptxmgmt)
3613 {
3614 s32 llc_sz;
3615
3616 u8 *pframe, *mem_start;
3617
3618 struct sta_info *psta;
3619 struct sta_priv *pstapriv = &padapter->stapriv;
3620 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3621 struct pkt_attrib *pattrib = &pxmitframe->attrib;
3622 u8 *pbuf_start;
3623 s32 bmcst = IS_MCAST(pattrib->ra);
3624 s32 res = _SUCCESS;
3625
3626
3627 if (pattrib->psta)
3628 psta = pattrib->psta;
3629 else {
3630 if (bmcst)
3631 psta = rtw_get_bcmc_stainfo(padapter);
3632 else
3633 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3634 }
3635
3636 if (psta == NULL) {
3637 res = _FAIL;
3638 goto exit;
3639 }
3640
3641 if (pxmitframe->buf_addr == NULL) {
3642 res = _FAIL;
3643 goto exit;
3644 }
3645
3646 pbuf_start = pxmitframe->buf_addr;
3647 mem_start = pbuf_start + TXDESC_OFFSET;
3648
3649 if (rtw_make_tdls_wlanhdr(padapter, mem_start, pattrib, ptxmgmt) == _FAIL) {
3650 res = _FAIL;
3651 goto exit;
3652 }
3653
3654 pframe = mem_start;
3655 pframe += pattrib->hdrlen;
3656
3657 /* adding icv, if necessary... */
3658 if (pattrib->iv_len) {
3659 if (psta != NULL) {
3660 switch (pattrib->encrypt) {
3661 case _WEP40_:
3662 case _WEP104_:
3663 WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3664 break;
3665 case _TKIP_:
3666 if (bmcst)
3667 TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3668 else
3669 TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
3670 break;
3671 case _AES_:
3672 if (bmcst)
3673 AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3674 else
3675 AES_IV(pattrib->iv, psta->dot11txpn, 0);
3676 break;
3677 }
3678 }
3679
3680 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
3681 pframe += pattrib->iv_len;
3682
3683 }
3684
3685 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
3686 pframe += llc_sz;
3687
3688 /* pattrib->pktlen will be counted in rtw_build_tdls_ies */
3689 pattrib->pktlen = 0;
3690
3691 rtw_build_tdls_ies(padapter, pxmitframe, pframe, ptxmgmt);
3692
3693 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
3694 pframe += pattrib->pktlen;
3695 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
3696 pframe += pattrib->icv_len;
3697 }
3698
3699 pattrib->nr_frags = 1;
3700 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + llc_sz +
3701 ((pattrib->bswenc) ? pattrib->icv_len : 0) + pattrib->pktlen;
3702
3703 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
3704 res = _FAIL;
3705 goto exit;
3706 }
3707
3708 xmitframe_swencrypt(padapter, pxmitframe);
3709
3710 update_attrib_vcs_info(padapter, pxmitframe);
3711
3712 exit:
3713
3714
3715 return res;
3716 }
3717 #endif /* CONFIG_TDLS */
3718
3719 /*
3720 * Calculate wlan 802.11 packet MAX size from pkt_attrib
3721 * This function doesn't consider fragment case
3722 */
rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib * pattrib)3723 u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
3724 {
3725 u32 len = 0;
3726
3727 len = pattrib->hdrlen /* WLAN Header */
3728 + pattrib->iv_len /* IV */
3729 + XATTRIB_GET_MCTRL_LEN(pattrib)
3730 + SNAP_SIZE + sizeof(u16) /* LLC */
3731 + pattrib->pktlen
3732 + (pattrib->encrypt == _TKIP_ ? 8 : 0) /* MIC */
3733 + (pattrib->bswenc ? pattrib->icv_len : 0) /* ICV */
3734 ;
3735
3736 return len;
3737 }
3738
3739 #ifdef CONFIG_TX_AMSDU
check_amsdu(struct xmit_frame * pxmitframe)3740 s32 check_amsdu(struct xmit_frame *pxmitframe)
3741 {
3742 struct pkt_attrib *pattrib;
3743 struct sta_info *psta = NULL;
3744 s32 ret = _TRUE;
3745
3746 if (!pxmitframe)
3747 ret = _FALSE;
3748
3749 pattrib = &pxmitframe->attrib;
3750
3751 psta = rtw_get_stainfo(&pxmitframe->padapter->stapriv, &pattrib->ra[0]);
3752 if (psta) {
3753 if (psta->flags & WLAN_STA_AMSDU_DISABLE)
3754 ret =_FALSE;
3755 }
3756
3757 if (IS_MCAST(pattrib->ra))
3758 ret = _FALSE;
3759
3760 if ((pattrib->ether_type == 0x888e) ||
3761 (pattrib->ether_type == 0x0806) ||
3762 (pattrib->ether_type == 0x88b4) ||
3763 (pattrib->dhcp_pkt == 1))
3764 ret = _FALSE;
3765
3766 if ((pattrib->encrypt == _WEP40_) ||
3767 (pattrib->encrypt == _WEP104_) ||
3768 (pattrib->encrypt == _TKIP_))
3769 ret = _FALSE;
3770
3771 if (!pattrib->qos_en)
3772 ret = _FALSE;
3773
3774 if (IS_AMSDU_AMPDU_NOT_VALID(pattrib))
3775 ret = _FALSE;
3776
3777 return ret;
3778 }
3779
check_amsdu_tx_support(_adapter * padapter)3780 s32 check_amsdu_tx_support(_adapter *padapter)
3781 {
3782 struct dvobj_priv *pdvobjpriv;
3783 int tx_amsdu;
3784 int tx_amsdu_rate;
3785 int current_tx_rate;
3786 s32 ret = _FALSE;
3787
3788 pdvobjpriv = adapter_to_dvobj(padapter);
3789 tx_amsdu = padapter->tx_amsdu;
3790 tx_amsdu_rate = padapter->tx_amsdu_rate;
3791 current_tx_rate = pdvobjpriv->traffic_stat.cur_tx_tp;
3792
3793 if (tx_amsdu == 1)
3794 ret = _TRUE;
3795 else if (tx_amsdu >= 2 && (tx_amsdu_rate == 0 || current_tx_rate > tx_amsdu_rate))
3796 ret = _TRUE;
3797 else
3798 ret = _FALSE;
3799
3800 return ret;
3801 }
3802
rtw_xmitframe_coalesce_amsdu(_adapter * padapter,struct xmit_frame * pxmitframe,struct xmit_frame * pxmitframe_queue)3803 s32 rtw_xmitframe_coalesce_amsdu(_adapter *padapter, struct xmit_frame *pxmitframe, struct xmit_frame *pxmitframe_queue)
3804 {
3805
3806 struct pkt_file pktfile;
3807 struct pkt_attrib *pattrib;
3808 struct sk_buff *pkt;
3809
3810 struct pkt_file pktfile_queue;
3811 struct pkt_attrib *pattrib_queue;
3812 struct sk_buff *pkt_queue;
3813
3814 s32 llc_sz, mem_sz;
3815
3816 s32 padding = 0;
3817
3818 u8 *pframe, *mem_start;
3819 u8 hw_hdr_offset;
3820
3821 u16 *len;
3822 u8 *pbuf_start;
3823 s32 res = _SUCCESS;
3824
3825 if (pxmitframe->buf_addr == NULL) {
3826 RTW_INFO("==> %s buf_addr==NULL\n", __FUNCTION__);
3827 return _FAIL;
3828 }
3829
3830
3831 pbuf_start = pxmitframe->buf_addr;
3832
3833 #ifdef CONFIG_USB_TX_AGGREGATION
3834 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
3835 #else
3836 #ifdef CONFIG_TX_EARLY_MODE /* for SDIO && Tx Agg */
3837 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
3838 #else
3839 hw_hdr_offset = TXDESC_OFFSET;
3840 #endif
3841 #endif
3842
3843 mem_start = pbuf_start + hw_hdr_offset; //for DMA
3844
3845 pattrib = &pxmitframe->attrib;
3846
3847 pattrib->amsdu = 1;
3848
3849 if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
3850 RTW_INFO("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
3851 res = _FAIL;
3852 goto exit;
3853 }
3854
3855 llc_sz = 0;
3856
3857 pframe = mem_start;
3858
3859 //SetMFrag(mem_start);
3860 ClearMFrag(mem_start);
3861
3862 pframe += pattrib->hdrlen;
3863
3864 /* adding icv, if necessary... */
3865 if (pattrib->iv_len) {
3866 update_attrib_sec_iv_info(padapter, pattrib);
3867 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len); // queue or new?
3868
3869 RTW_DBG("%s: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n",
3870 __func__, padapter->securitypriv.dot11PrivacyKeyIndex,
3871 pattrib->iv[3], *pframe, *(pframe + 1), *(pframe + 2), *(pframe + 3));
3872
3873 pframe += pattrib->iv_len;
3874 }
3875
3876 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len;
3877
3878 if (pxmitframe_queue) {
3879 pattrib_queue = &pxmitframe_queue->attrib;
3880 pkt_queue = pxmitframe_queue->pkt;
3881
3882 _rtw_open_pktfile(pkt_queue, &pktfile_queue);
3883 _rtw_pktfile_read(&pktfile_queue, NULL, pattrib_queue->pkt_hdrlen);
3884
3885 #ifdef CONFIG_RTW_MESH
3886 if (MLME_IS_MESH(padapter)) {
3887 /* mDA(6), mSA(6), len(2), mctrl */
3888 _rtw_memcpy(pframe, pattrib_queue->mda, ETH_ALEN);
3889 pframe += ETH_ALEN;
3890 _rtw_memcpy(pframe, pattrib_queue->msa, ETH_ALEN);
3891 pframe += ETH_ALEN;
3892 len = (u16 *)pframe;
3893 pframe += 2;
3894 rtw_mesh_tx_build_mctrl(padapter, pattrib_queue, pframe);
3895 pframe += XATTRIB_GET_MCTRL_LEN(pattrib_queue);
3896 } else
3897 #endif
3898 {
3899 /* 802.3 MAC Header DA(6) SA(6) Len(2)*/
3900 _rtw_memcpy(pframe, pattrib_queue->dst, ETH_ALEN);
3901 pframe += ETH_ALEN;
3902 _rtw_memcpy(pframe, pattrib_queue->src, ETH_ALEN);
3903 pframe += ETH_ALEN;
3904 len = (u16 *)pframe;
3905 pframe += 2;
3906 }
3907
3908 llc_sz = rtw_put_snap(pframe, pattrib_queue->ether_type);
3909 pframe += llc_sz;
3910
3911 mem_sz = _rtw_pktfile_read(&pktfile_queue, pframe, pattrib_queue->pktlen);
3912 pframe += mem_sz;
3913
3914 *len = htons(XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz);
3915
3916 //calc padding
3917 padding = 4 - ((ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz) & (4-1));
3918 if (padding == 4)
3919 padding = 0;
3920
3921 //_rtw_memset(pframe,0xaa, padding);
3922 pframe += padding;
3923
3924 pattrib->last_txcmdsz += ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz + padding ;
3925 }
3926
3927 //2nd mpdu
3928
3929 pkt = pxmitframe->pkt;
3930 _rtw_open_pktfile(pkt, &pktfile);
3931 _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
3932
3933 #ifdef CONFIG_RTW_MESH
3934 if (MLME_IS_MESH(padapter)) {
3935 /* mDA(6), mSA(6), len(2), mctrl */
3936 _rtw_memcpy(pframe, pattrib->mda, ETH_ALEN);
3937 pframe += ETH_ALEN;
3938 _rtw_memcpy(pframe, pattrib->msa, ETH_ALEN);
3939 pframe += ETH_ALEN;
3940 len = (u16 *)pframe;
3941 pframe += 2;
3942 rtw_mesh_tx_build_mctrl(padapter, pattrib, pframe);
3943 pframe += XATTRIB_GET_MCTRL_LEN(pattrib);
3944 } else
3945 #endif
3946 {
3947 /* 802.3 MAC Header DA(6) SA(6) Len(2) */
3948 _rtw_memcpy(pframe, pattrib->dst, ETH_ALEN);
3949 pframe += ETH_ALEN;
3950 _rtw_memcpy(pframe, pattrib->src, ETH_ALEN);
3951 pframe += ETH_ALEN;
3952 len = (u16 *)pframe;
3953 pframe += 2;
3954 }
3955
3956 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
3957 pframe += llc_sz;
3958
3959 mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
3960
3961 pframe += mem_sz;
3962
3963 *len = htons(XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz + mem_sz);
3964
3965 //the last ampdu has no padding
3966 padding = 0;
3967
3968 pattrib->nr_frags = 1;
3969
3970 pattrib->last_txcmdsz += ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz + mem_sz + padding +
3971 ((pattrib->bswenc) ? pattrib->icv_len : 0) ;
3972
3973 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
3974 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
3975 pframe += pattrib->icv_len;
3976 }
3977
3978 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
3979 RTW_INFO("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n");
3980 res = _FAIL;
3981 goto exit;
3982 }
3983
3984 xmitframe_swencrypt(padapter, pxmitframe);
3985
3986 update_attrib_vcs_info(padapter, pxmitframe);
3987
3988 exit:
3989 return res;
3990 }
3991 #endif /* CONFIG_TX_AMSDU */
3992
3993 /*
3994
3995 This sub-routine will perform all the following:
3996
3997 1. remove 802.3 header.
3998 2. create wlan_header, based on the info in pxmitframe
3999 3. append sta's iv/ext-iv
4000 4. append LLC
4001 5. move frag chunk from pframe to pxmitframe->mem
4002 6. apply sw-encrypt, if necessary.
4003
4004 */
rtw_xmitframe_coalesce(_adapter * padapter,struct sk_buff * pkt,struct xmit_frame * pxmitframe)4005 s32 rtw_xmitframe_coalesce(_adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
4006 {
4007 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
4008 struct pkt_file pktfile;
4009
4010 s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
4011
4012 SIZE_PTR addr;
4013
4014 u8 *pframe, *mem_start;
4015 u8 hw_hdr_offset;
4016
4017 /* struct sta_info *psta; */
4018 /* struct sta_priv *pstapriv = &padapter->stapriv; */
4019 /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
4020 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4021
4022 struct pkt_attrib *pattrib = &pxmitframe->attrib;
4023
4024 u8 *pbuf_start;
4025
4026 s32 bmcst = IS_MCAST(pattrib->ra);
4027 s32 res = _SUCCESS;
4028
4029
4030 /*
4031 if (pattrib->psta)
4032 {
4033 psta = pattrib->psta;
4034 } else
4035 {
4036 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
4037 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
4038 }
4039
4040 if(psta==NULL)
4041 {
4042
4043 RTW_INFO("%s, psta==NUL\n", __func__);
4044 return _FAIL;
4045 }
4046
4047
4048 if(!(psta->state &WIFI_ASOC_STATE))
4049 {
4050 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
4051 return _FAIL;
4052 }
4053 */
4054 if (pxmitframe->buf_addr == NULL) {
4055 RTW_INFO("==> %s buf_addr==NULL\n", __FUNCTION__);
4056 return _FAIL;
4057 }
4058
4059 pbuf_start = pxmitframe->buf_addr;
4060
4061 #if 0
4062 #ifdef CONFIG_USB_TX_AGGREGATION
4063 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
4064 #else
4065 #ifdef CONFIG_TX_EARLY_MODE /* for SDIO && Tx Agg */
4066 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
4067 #else
4068 hw_hdr_offset = TXDESC_OFFSET;
4069 #endif
4070 #endif
4071 #endif
4072 hw_hdr_offset = rtw_hal_get_txdesc_len(GET_PHL_COM(dvobj), pattrib); /*FPGA_test*/
4073
4074 mem_start = pbuf_start + hw_hdr_offset;
4075
4076 if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
4077 RTW_INFO("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
4078 res = _FAIL;
4079 goto exit;
4080 }
4081
4082 _rtw_open_pktfile(pkt, &pktfile);
4083 _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
4084
4085 frg_inx = 0;
4086 frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
4087
4088 while (1) {
4089 llc_sz = 0;
4090
4091 mpdu_len = frg_len;
4092
4093 pframe = mem_start;
4094
4095 SetMFrag(mem_start);
4096
4097 pframe += pattrib->hdrlen;
4098 mpdu_len -= pattrib->hdrlen;
4099
4100 /* adding icv, if necessary... */
4101 if (pattrib->iv_len) {
4102 update_attrib_sec_iv_info(padapter, pattrib);
4103 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
4104
4105
4106 pframe += pattrib->iv_len;
4107
4108 mpdu_len -= pattrib->iv_len;
4109 }
4110
4111 if (frg_inx == 0) {
4112 #ifdef CONFIG_RTW_MESH
4113 if (MLME_IS_MESH(padapter)) {
4114 rtw_mesh_tx_build_mctrl(padapter, pattrib, pframe);
4115 pframe += XATTRIB_GET_MCTRL_LEN(pattrib);
4116 mpdu_len -= XATTRIB_GET_MCTRL_LEN(pattrib);
4117 }
4118 #endif
4119
4120 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
4121 pframe += llc_sz;
4122 mpdu_len -= llc_sz;
4123 }
4124
4125 if ((pattrib->icv_len > 0) && (pattrib->bswenc))
4126 mpdu_len -= pattrib->icv_len;
4127
4128
4129 if (bmcst) {
4130 /* don't do fragment to broadcat/multicast packets */
4131 mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
4132 } else
4133 mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
4134
4135 pframe += mem_sz;
4136
4137 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
4138 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
4139 pframe += pattrib->icv_len;
4140 }
4141
4142 frg_inx++;
4143
4144 if (bmcst || (rtw_endofpktfile(&pktfile) == _TRUE)) {
4145 pattrib->nr_frags = frg_inx;
4146
4147 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len +
4148 ((pattrib->nr_frags == 1) ? (XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz) : 0) +
4149 ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
4150
4151 ClearMFrag(mem_start);
4152
4153 break;
4154 }
4155
4156 addr = (SIZE_PTR)(pframe);
4157
4158 mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
4159 _rtw_memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
4160
4161 }
4162
4163 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
4164 RTW_INFO("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n");
4165 res = _FAIL;
4166 goto exit;
4167 }
4168
4169 xmitframe_swencrypt(padapter, pxmitframe);
4170
4171 if (bmcst == _FALSE)
4172 update_attrib_vcs_info(padapter, pxmitframe);
4173 else
4174 pattrib->vcs_mode = NONE_VCS;
4175
4176 exit:
4177
4178
4179 return res;
4180 }
4181
4182 #if defined(CONFIG_IEEE80211W) || defined(CONFIG_RTW_MESH)
4183 /*
4184 * CCMP encryption for unicast robust mgmt frame and broadcast group privicy action
4185 * BIP for broadcast robust mgmt frame
4186 */
rtw_mgmt_xmitframe_coalesce(_adapter * padapter,struct sk_buff * pkt,struct xmit_frame * pxmitframe)4187 s32 rtw_mgmt_xmitframe_coalesce(_adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
4188 {
4189 #define DBG_MGMT_XMIT_COALESEC_DUMP 0
4190 #define DBG_MGMT_XMIT_BIP_DUMP 0
4191 #define DBG_MGMT_XMIT_ENC_DUMP 0
4192
4193 struct pkt_file pktfile;
4194 s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
4195 SIZE_PTR addr;
4196 u8 *pframe, *mem_start = NULL, *tmp_buf = NULL;
4197 u8 hw_hdr_offset, subtype ;
4198 u8 category = 0xFF;
4199 struct sta_info *psta = NULL;
4200 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4201 struct pkt_attrib *pattrib = &pxmitframe->attrib;
4202 u8 *pbuf_start;
4203 s32 bmcst = IS_MCAST(pattrib->ra);
4204 s32 res = _FAIL;
4205 u8 *BIP_AAD = NULL;
4206 u8 *MGMT_body = NULL;
4207
4208 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
4209 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4210 struct rtw_ieee80211_hdr *pwlanhdr;
4211 u8 mme_cont[_MME_IE_LENGTH_ - 2];
4212 u8 mme_clen;
4213
4214 u32 ori_len;
4215 union pn48 *pn = NULL;
4216 enum security_type cipher = _NO_PRIVACY_;
4217 u8 kid;
4218
4219 if (pxmitframe->buf_addr == NULL) {
4220 RTW_WARN(FUNC_ADPT_FMT" pxmitframe->buf_addr\n"
4221 , FUNC_ADPT_ARG(padapter));
4222 return _FAIL;
4223 }
4224
4225 mem_start = pframe = (u8 *)(pxmitframe->buf_addr) + TXDESC_OFFSET;
4226 pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
4227 subtype = get_frame_sub_type(pframe); /* bit(7)~bit(2) */
4228
4229 /* check if robust mgmt frame */
4230 if (subtype != WIFI_DEAUTH && subtype != WIFI_DISASSOC && subtype != WIFI_ACTION)
4231 return _SUCCESS;
4232 if (subtype == WIFI_ACTION) {
4233 category = *(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
4234 if (CATEGORY_IS_NON_ROBUST(category))
4235 return _SUCCESS;
4236 }
4237 if (!bmcst) {
4238 if (pattrib->psta)
4239 psta = pattrib->psta;
4240 else
4241 pattrib->psta = psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
4242 if (psta == NULL) {
4243 RTW_INFO(FUNC_ADPT_FMT" unicast sta == NULL\n", FUNC_ADPT_ARG(padapter));
4244 return _FAIL;
4245 }
4246 if (!(psta->flags & WLAN_STA_MFP)) {
4247 /* peer is not MFP capable, no need to encrypt */
4248 return _SUCCESS;
4249 }
4250 if (psta->bpairwise_key_installed != _TRUE) {
4251 RTW_INFO(FUNC_ADPT_FMT" PTK is not installed\n"
4252 , FUNC_ADPT_ARG(padapter));
4253 return _FAIL;
4254 }
4255 }
4256
4257 ori_len = BIP_AAD_SIZE + pattrib->pktlen + _MME_IE_LENGTH_;
4258 tmp_buf = BIP_AAD = rtw_zmalloc(ori_len);
4259 if (BIP_AAD == NULL)
4260 return _FAIL;
4261
4262 _rtw_spinlock_bh(&padapter->security_key_mutex);
4263
4264 if (bmcst) {
4265 if (subtype == WIFI_ACTION && CATEGORY_IS_GROUP_PRIVACY(category)) {
4266 /* broadcast group privacy action frame */
4267 #if DBG_MGMT_XMIT_COALESEC_DUMP
4268 RTW_INFO(FUNC_ADPT_FMT" broadcast gp action(%u)\n"
4269 , FUNC_ADPT_ARG(padapter), category);
4270 #endif
4271
4272 if (pattrib->psta)
4273 psta = pattrib->psta;
4274 else
4275 pattrib->psta = psta = rtw_get_bcmc_stainfo(padapter);
4276 if (psta == NULL) {
4277 RTW_INFO(FUNC_ADPT_FMT" broadcast sta == NULL\n"
4278 , FUNC_ADPT_ARG(padapter));
4279 goto xmitframe_coalesce_fail;
4280 }
4281 if (padapter->securitypriv.binstallGrpkey != _TRUE) {
4282 RTW_INFO(FUNC_ADPT_FMT" GTK is not installed\n"
4283 , FUNC_ADPT_ARG(padapter));
4284 goto xmitframe_coalesce_fail;
4285 }
4286
4287 pn = &psta->dot11txpn;
4288 cipher = padapter->securitypriv.dot118021XGrpPrivacy;
4289 kid = padapter->securitypriv.dot118021XGrpKeyid;
4290 } else {
4291 #ifdef CONFIG_IEEE80211W
4292 /* broadcast robust mgmt frame, using BIP */
4293 int frame_body_len;
4294 u8 mic[16];
4295
4296 /* IGTK key is not install ex: mesh MFP without IGTK */
4297 if (SEC_IS_BIP_KEY_INSTALLED(&padapter->securitypriv) != _TRUE)
4298 goto xmitframe_coalesce_success;
4299
4300 #if DBG_MGMT_XMIT_COALESEC_DUMP
4301 if (subtype == WIFI_DEAUTH)
4302 RTW_INFO(FUNC_ADPT_FMT" braodcast deauth\n", FUNC_ADPT_ARG(padapter));
4303 else if (subtype == WIFI_DISASSOC)
4304 RTW_INFO(FUNC_ADPT_FMT" braodcast disassoc\n", FUNC_ADPT_ARG(padapter));
4305 else if (subtype == WIFI_ACTION) {
4306 RTW_INFO(FUNC_ADPT_FMT" braodcast action(%u)\n"
4307 , FUNC_ADPT_ARG(padapter), category);
4308 }
4309 #endif
4310
4311 /*HW encrypt need to record encrypt type*/
4312 pattrib->encrypt = padapter->securitypriv.dot11wCipher;
4313
4314 _rtw_memset(mme_cont, 0, _MME_IE_LENGTH_ - 2);
4315 mme_clen = padapter->securitypriv.dot11wCipher == _BIP_CMAC_128_ ? 16 : 24;
4316
4317 MGMT_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
4318 pframe += pattrib->pktlen;
4319
4320 /* octent 0 and 1 is key index ,BIP keyid is 4 or 5, LSB only need octent 0 */
4321 mme_cont[0] = padapter->securitypriv.dot11wBIPKeyid;
4322 /* increase PN and apply to packet */
4323 padapter->securitypriv.dot11wBIPtxpn.val++;
4324 RTW_PUT_LE64(&mme_cont[2], padapter->securitypriv.dot11wBIPtxpn.val);
4325
4326 /* add MME IE with MIC all zero, MME string doesn't include element id and length */
4327 pframe = rtw_set_ie(pframe, _MME_IE_, mme_clen, mme_cont, &(pattrib->pktlen));
4328 pattrib->last_txcmdsz = pattrib->pktlen;
4329
4330 if (pattrib->encrypt &&
4331 (padapter->securitypriv.sw_encrypt == _TRUE || padapter->securitypriv.hw_decrypted == _FALSE)) {
4332 pattrib->bswenc = _TRUE;
4333 } else {
4334 /* currently HW only support _BIP_CMAC_128_ */
4335 if (pattrib->encrypt == _BIP_CMAC_128_)
4336 pattrib->bswenc = _FALSE;
4337 else
4338 pattrib->bswenc = _TRUE;
4339 }
4340
4341 if (!pattrib->bswenc) {
4342 pattrib->key_idx = padapter->securitypriv.dot11wBIPKeyid;
4343 /*Don't need to append MIC part of MME*/
4344 pattrib->pktlen -= (mme_clen == 16 ? 8 : 16);
4345 pattrib->last_txcmdsz = pattrib->pktlen;
4346 goto xmitframe_coalesce_success;
4347 }
4348
4349 /* total frame length - header length */
4350 frame_body_len = pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr);
4351
4352 /* conscruct AAD, copy frame control field */
4353 _rtw_memcpy(BIP_AAD, &pwlanhdr->frame_ctl, 2);
4354 ClearRetry(BIP_AAD);
4355 ClearPwrMgt(BIP_AAD);
4356 ClearMData(BIP_AAD);
4357 /* conscruct AAD, copy address 1 to address 3 */
4358 _rtw_memcpy(BIP_AAD + 2, pwlanhdr->addr1, 18);
4359 /* copy management fram body */
4360 _rtw_memcpy(BIP_AAD + BIP_AAD_SIZE, MGMT_body, frame_body_len);
4361
4362 #if DBG_MGMT_XMIT_BIP_DUMP
4363 /* dump total packet include MME with zero MIC */
4364 {
4365 int i;
4366 printk("Total packet: ");
4367 for (i = 0; i < BIP_AAD_SIZE + frame_body_len; i++)
4368 printk(" %02x ", BIP_AAD[i]);
4369 printk("\n");
4370 }
4371 #endif
4372
4373 /* calculate mic */
4374 if (rtw_calculate_bip_mic(padapter->securitypriv.dot11wCipher,
4375 (u8 *)pwlanhdr, pattrib->pktlen,
4376 padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey,
4377 BIP_AAD, (BIP_AAD_SIZE + frame_body_len), mic) == _FAIL)
4378 goto xmitframe_coalesce_fail;
4379
4380 #if DBG_MGMT_XMIT_BIP_DUMP
4381 /* dump calculated mic result */
4382 {
4383 int i;
4384 printk("Calculated mic result: ");
4385 for (i = 0; i < 16; i++)
4386 printk(" %02x ", mic[i]);
4387 printk("\n");
4388 }
4389 #endif
4390
4391 /* copy right BIP mic value, total is 128bits, we use the 0~63 bits */
4392 if (padapter->securitypriv.dot11wCipher == _BIP_CMAC_128_)
4393 _rtw_memcpy(pframe - 8, mic, 8);
4394 else
4395 _rtw_memcpy(pframe - 16, mic, 16);
4396
4397 #if DBG_MGMT_XMIT_BIP_DUMP
4398 /*dump all packet after mic ok */
4399 {
4400 int pp;
4401 printk("pattrib->pktlen = %d\n", pattrib->pktlen);
4402 for (pp = 0; pp < pattrib->pktlen; pp++)
4403 printk(" %02x ", mem_start[pp]);
4404 printk("\n");
4405 }
4406 #endif
4407
4408 #endif /* CONFIG_IEEE80211W */
4409
4410 goto xmitframe_coalesce_success;
4411 }
4412 } else {
4413 /* unicast robust mgmt frame */
4414 #if DBG_MGMT_XMIT_COALESEC_DUMP
4415 if (subtype == WIFI_DEAUTH) {
4416 RTW_INFO(FUNC_ADPT_FMT" unicast deauth to "MAC_FMT"\n"
4417 , FUNC_ADPT_ARG(padapter), MAC_ARG(pattrib->ra));
4418 } else if (subtype == WIFI_DISASSOC) {
4419 RTW_INFO(FUNC_ADPT_FMT" unicast disassoc to "MAC_FMT"\n"
4420 , FUNC_ADPT_ARG(padapter), MAC_ARG(pattrib->ra));
4421 } else if (subtype == WIFI_ACTION) {
4422 RTW_INFO(FUNC_ADPT_FMT" unicast action(%u) to "MAC_FMT"\n"
4423 , FUNC_ADPT_ARG(padapter), category, MAC_ARG(pattrib->ra));
4424 }
4425 #endif
4426
4427 pn = &psta->dot11txpn;
4428 cipher = psta->dot118021XPrivacy;
4429 kid = 0;
4430
4431 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
4432 , psta->dot118021x_UncstKey.skey
4433 , (cipher & _SEC_TYPE_256_) ? 32 : 16);
4434
4435 /* To use wrong key */
4436 if (pattrib->key_type == IEEE80211W_WRONG_KEY) {
4437 RTW_INFO("use wrong key\n");
4438 pattrib->dot118021x_UncstKey.skey[0] = 0xff;
4439 }
4440 }
4441
4442 #if DBG_MGMT_XMIT_ENC_DUMP
4443 /* before encrypt dump the management packet content */
4444 {
4445 int i;
4446 printk("Management pkt: ");
4447 for (i = 0; i < pattrib->pktlen; i++)
4448 printk(" %02x ", pframe[i]);
4449 printk("=======\n");
4450 }
4451 #endif
4452
4453 /* bakeup original management packet */
4454 _rtw_memcpy(tmp_buf, pframe, pattrib->pktlen);
4455 /* move to data portion */
4456 pframe += pattrib->hdrlen;
4457
4458 if (pattrib->key_type != IEEE80211W_NO_KEY) {
4459 pattrib->encrypt = cipher;
4460 pattrib->bswenc = _TRUE;
4461 }
4462
4463 /*
4464 * 802.11w encrypted management packet must be:
4465 * _AES_, _CCMP_256_, _GCMP_, _GCMP_256_
4466 */
4467 switch (pattrib->encrypt) {
4468 case _AES_:
4469 pattrib->iv_len = 8;
4470 pattrib->icv_len = 8;
4471 AES_IV(pattrib->iv, (*pn), kid);
4472 break;
4473 case _CCMP_256_:
4474 pattrib->iv_len = 8;
4475 pattrib->icv_len = 16;
4476 AES_IV(pattrib->iv, (*pn), kid);
4477 break;
4478 case _GCMP_:
4479 case _GCMP_256_:
4480 pattrib->iv_len = 8;
4481 pattrib->icv_len = 16;
4482 GCMP_IV(pattrib->iv, (*pn), kid);
4483 break;
4484 default:
4485 goto xmitframe_coalesce_fail;
4486 }
4487
4488 /* insert iv header into management frame */
4489 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
4490 pframe += pattrib->iv_len;
4491 /* copy mgmt data portion after CCMP header */
4492 _rtw_memcpy(pframe, tmp_buf + pattrib->hdrlen, pattrib->pktlen - pattrib->hdrlen);
4493 /* move pframe to end of mgmt pkt */
4494 pframe += pattrib->pktlen - pattrib->hdrlen;
4495 /* add 8 bytes CCMP IV header to length */
4496 pattrib->pktlen += pattrib->iv_len;
4497
4498 #if DBG_MGMT_XMIT_ENC_DUMP
4499 /* dump management packet include AES IV header */
4500 {
4501 int i;
4502 printk("Management pkt + IV: ");
4503 /* for(i=0; i<pattrib->pktlen; i++) */
4504
4505 printk("@@@@@@@@@@@@@\n");
4506 }
4507 #endif
4508
4509 if (pattrib->encrypt &&
4510 (padapter->securitypriv.sw_encrypt == _TRUE || psta->hw_decrypted == _FALSE)) {
4511 pattrib->bswenc = _TRUE;
4512 } else {
4513 /* only right key can use HW encrypt */
4514 if (pattrib->key_type == IEEE80211W_RIGHT_KEY)
4515 pattrib->bswenc = _FALSE;
4516 else
4517 pattrib->bswenc = _TRUE;
4518 }
4519
4520 /* at the moment the security CAM may be cleaned already --> use SW encryption */
4521 if (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)
4522 pattrib->bswenc = _TRUE;
4523 if (!pattrib->bswenc) {
4524 pattrib->key_idx = kid;
4525 pattrib->last_txcmdsz = pattrib->pktlen;
4526 SetPrivacy(mem_start);
4527 goto xmitframe_coalesce_success;
4528 }
4529
4530 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
4531 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
4532 pframe += pattrib->icv_len;
4533 }
4534 /* add 8 bytes MIC */
4535 pattrib->pktlen += pattrib->icv_len;
4536 /* set final tx command size */
4537 pattrib->last_txcmdsz = pattrib->pktlen;
4538
4539 /* set protected bit must be beofre SW encrypt */
4540 SetPrivacy(mem_start);
4541
4542 #if DBG_MGMT_XMIT_ENC_DUMP
4543 /* dump management packet include AES header */
4544 {
4545 int i;
4546 printk("prepare to enc Management pkt + IV: ");
4547 for (i = 0; i < pattrib->pktlen; i++)
4548 printk(" %02x ", mem_start[i]);
4549 printk("@@@@@@@@@@@@@\n");
4550 }
4551 #endif
4552
4553 /* software encrypt */
4554 /* move to core_wlan_sw_encrypt() because of new txreq architecture */
4555
4556 xmitframe_coalesce_success:
4557 _rtw_spinunlock_bh(&padapter->security_key_mutex);
4558 rtw_mfree(BIP_AAD, ori_len);
4559 return _SUCCESS;
4560
4561 xmitframe_coalesce_fail:
4562 _rtw_spinunlock_bh(&padapter->security_key_mutex);
4563 rtw_mfree(BIP_AAD, ori_len);
4564
4565 return _FAIL;
4566 }
4567 #endif /* defined(CONFIG_IEEE80211W) || defined(CONFIG_RTW_MESH) */
4568
4569 /* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
4570 * IEEE LLC/SNAP header contains 8 octets
4571 * First 3 octets comprise the LLC portion
4572 * SNAP portion, 5 octets, is divided into two fields:
4573 * Organizationally Unique Identifier(OUI), 3 octets,
4574 * type, defined by that organization, 2 octets.
4575 */
rtw_put_snap(u8 * data,u16 h_proto)4576 s32 rtw_put_snap(u8 *data, u16 h_proto)
4577 {
4578 struct ieee80211_snap_hdr *snap;
4579 u8 *oui;
4580
4581
4582 snap = (struct ieee80211_snap_hdr *)data;
4583 snap->dsap = 0xaa;
4584 snap->ssap = 0xaa;
4585 snap->ctrl = 0x03;
4586
4587 if (h_proto == 0x8137 || h_proto == 0x80f3)
4588 oui = P802_1H_OUI;
4589 else
4590 oui = RFC1042_OUI;
4591
4592 snap->oui[0] = oui[0];
4593 snap->oui[1] = oui[1];
4594 snap->oui[2] = oui[2];
4595
4596 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
4597
4598
4599 return SNAP_SIZE + sizeof(u16);
4600 }
4601
rtw_update_protection(_adapter * padapter,u8 * ie,uint ie_len)4602 void rtw_update_protection(_adapter *padapter, u8 *ie, uint ie_len)
4603 {
4604
4605 uint protection;
4606 u8 *perp;
4607 sint erp_len;
4608 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4609 struct registry_priv *pregistrypriv = &padapter->registrypriv;
4610
4611
4612 switch (pxmitpriv->vcs_setting) {
4613 case DISABLE_VCS:
4614 pxmitpriv->vcs = NONE_VCS;
4615 break;
4616
4617 case ENABLE_VCS:
4618 break;
4619
4620 case AUTO_VCS:
4621 default:
4622 perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
4623 if (perp == NULL)
4624 pxmitpriv->vcs = NONE_VCS;
4625 else {
4626 protection = (*(perp + 2)) & BIT(1);
4627 if (protection) {
4628 if (pregistrypriv->vcs_type == RTS_CTS)
4629 pxmitpriv->vcs = RTS_CTS;
4630 else
4631 pxmitpriv->vcs = CTS_TO_SELF;
4632 } else
4633 pxmitpriv->vcs = NONE_VCS;
4634 }
4635
4636 break;
4637
4638 }
4639
4640
4641 }
4642
4643 #ifdef CONFIG_CORE_TXSC
rtw_count_tx_stats_tx_req(_adapter * padapter,struct rtw_xmit_req * txreq,struct sta_info * psta)4644 void rtw_count_tx_stats_tx_req(_adapter *padapter, struct rtw_xmit_req *txreq, struct sta_info *psta)
4645 {
4646 struct stainfo_stats *pstats = NULL;
4647 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4648 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4649 u32 sz = 0;
4650
4651 if (txreq->mdata.type == RTW_PHL_PKT_TYPE_DATA) {
4652 pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
4653 pxmitpriv->tx_pkts++;
4654 sz = txreq->mdata.pktlen - RTW_SZ_LLC - txreq->mdata.hdr_len;
4655 switch (txreq->mdata.sec_type) {
4656 case RTW_ENC_WEP104:
4657 case RTW_ENC_WEP40:
4658 sz -= 4;
4659 break;
4660 case RTW_ENC_TKIP:
4661 sz -= 8;
4662 break;
4663 case RTW_ENC_CCMP:
4664 sz -= 8;
4665 break;
4666 case RTW_ENC_WAPI:
4667 sz -= 18;
4668 break;
4669 case RTW_ENC_GCMP256:
4670 case RTW_ENC_GCMP:
4671 case RTW_ENC_CCMP256:
4672 sz -= 8;
4673 break;
4674 default:
4675 break;
4676 }
4677 pxmitpriv->tx_bytes += sz;
4678 if (psta) {
4679 pstats = &psta->sta_stats;
4680 pstats->tx_pkts++;
4681 pstats->tx_bytes += sz;
4682 #if 0
4683 if (is_multicast_mac_addr(psta->phl_sta->mac_addr))
4684 pxmitpriv->tx_mc_pkts++;
4685 else if (is_broadcast_mac_addr(psta->phl_sta->mac_addr))
4686 pxmitpriv->tx_bc_pkts++;
4687 else
4688 pxmitpriv->tx_uc_pkts++;
4689 #endif
4690 }
4691 }
4692 }
4693 #endif
4694
rtw_count_tx_stats(_adapter * padapter,struct xmit_frame * pxmitframe,int sz)4695 void rtw_count_tx_stats(_adapter *padapter, struct xmit_frame *pxmitframe, int sz)
4696 {
4697 struct sta_info *psta = NULL;
4698 struct stainfo_stats *pstats = NULL;
4699 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4700 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4701
4702
4703 if (pxmitframe->xftype == RTW_TX_OS) {
4704 pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
4705 pxmitpriv->tx_pkts++;
4706 pxmitpriv->tx_bytes += sz;
4707
4708 psta = pxmitframe->attrib.psta;
4709 if (psta) {
4710 pstats = &psta->sta_stats;
4711
4712 pstats->tx_pkts++;
4713 pstats->tx_bytes += sz;
4714 #if defined(CONFIG_CHECK_LEAVE_LPS) && defined(CONFIG_LPS_CHK_BY_TP)
4715 if (adapter_to_pwrctl(padapter)->lps_chk_by_tp)
4716 traffic_check_for_leave_lps_by_tp(padapter, _TRUE, psta);
4717 #endif /* CONFIG_LPS */
4718 }
4719
4720 #ifdef CONFIG_CHECK_LEAVE_LPS
4721 /* traffic_check_for_leave_lps(padapter, _TRUE); */
4722 #endif /* CONFIG_CHECK_LEAVE_LPS */
4723 }
4724 }
4725
4726 #if 0 /*CONFIG_CORE_XMITBUF*/
4727 static struct xmit_buf *__rtw_alloc_cmd_xmitbuf(struct xmit_priv *pxmitpriv,
4728 enum cmdbuf_type buf_type)
4729 {
4730 struct xmit_buf *pxmitbuf = NULL;
4731
4732
4733 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[buf_type];
4734 if (pxmitbuf != NULL) {
4735 pxmitbuf->priv_data = NULL;
4736
4737 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4738 pxmitbuf->len = 0;
4739 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4740 pxmitbuf->agg_num = 0;
4741 pxmitbuf->pg_num = 0;
4742 #endif
4743 #ifdef CONFIG_PCI_HCI
4744 pxmitbuf->len = 0;
4745 #ifdef CONFIG_TRX_BD_ARCH
4746 /*pxmitbuf->buf_desc = NULL;*/
4747 #else
4748 pxmitbuf->desc = NULL;
4749 #endif
4750 #endif
4751
4752 if (pxmitbuf->sctx) {
4753 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4754 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4755 }
4756 } else
4757 RTW_INFO("%s fail, no xmitbuf available !!!\n", __func__);
4758
4759 return pxmitbuf;
4760 }
4761
4762 struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
4763 enum cmdbuf_type buf_type)
4764 {
4765 struct xmit_frame *pcmdframe;
4766 struct xmit_buf *pxmitbuf;
4767
4768 pcmdframe = rtw_alloc_xmitframe(pxmitpriv);
4769 if (pcmdframe == NULL) {
4770 RTW_INFO("%s, alloc xmitframe fail\n", __FUNCTION__);
4771 return NULL;
4772 }
4773
4774 pxmitbuf = __rtw_alloc_cmd_xmitbuf(pxmitpriv, buf_type);
4775 if (pxmitbuf == NULL) {
4776 RTW_INFO("%s, alloc xmitbuf fail\n", __FUNCTION__);
4777 rtw_free_xmitframe(pxmitpriv, pcmdframe);
4778 return NULL;
4779 }
4780
4781 pcmdframe->frame_tag = MGNT_FRAMETAG;
4782
4783 pcmdframe->pxmitbuf = pxmitbuf;
4784
4785 pcmdframe->buf_addr = pxmitbuf->pbuf;
4786
4787 /* initial memory to zero */
4788 _rtw_memset(pcmdframe->buf_addr, 0, MAX_CMDBUF_SZ);
4789
4790 pxmitbuf->priv_data = pcmdframe;
4791
4792 return pcmdframe;
4793
4794 }
4795
4796 struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
4797 {
4798 struct xmit_buf *pxmitbuf = NULL;
4799 _list *plist, *phead;
4800 _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
4801 unsigned long sp_flags;
4802
4803 _rtw_spinlock_irq(&pfree_queue->lock, &sp_flags);
4804
4805 if (_rtw_queue_empty(pfree_queue) == _TRUE)
4806 pxmitbuf = NULL;
4807 else {
4808
4809 phead = get_list_head(pfree_queue);
4810
4811 plist = get_next(phead);
4812
4813 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
4814
4815 rtw_list_delete(&(pxmitbuf->list));
4816 }
4817
4818 if (pxmitbuf != NULL) {
4819 pxmitpriv->free_xmit_extbuf_cnt--;
4820 #ifdef DBG_XMIT_BUF_EXT
4821 RTW_INFO("DBG_XMIT_BUF_EXT ALLOC no=%d, free_xmit_extbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
4822 #endif
4823
4824
4825 pxmitbuf->priv_data = NULL;
4826
4827 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4828 pxmitbuf->len = 0;
4829 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4830 pxmitbuf->agg_num = 1;
4831 #endif
4832 #ifdef CONFIG_PCI_HCI
4833 pxmitbuf->len = 0;
4834 #ifdef CONFIG_TRX_BD_ARCH
4835 /*pxmitbuf->buf_desc = NULL;*/
4836 #else
4837 pxmitbuf->desc = NULL;
4838 #endif
4839 #endif
4840
4841 if (pxmitbuf->sctx) {
4842 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4843 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4844 }
4845
4846 }
4847
4848 _rtw_spinunlock_irq(&pfree_queue->lock, &sp_flags);
4849
4850
4851 return pxmitbuf;
4852 }
4853
4854 s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
4855 {
4856 _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
4857 unsigned long sp_flags;
4858
4859 if (pxmitbuf == NULL)
4860 return _FAIL;
4861
4862 _rtw_spinlock_irq(&pfree_queue->lock, &sp_flags);
4863
4864 rtw_list_delete(&pxmitbuf->list);
4865
4866 rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
4867 pxmitpriv->free_xmit_extbuf_cnt++;
4868 #ifdef DBG_XMIT_BUF_EXT
4869 RTW_INFO("DBG_XMIT_BUF_EXT FREE no=%d, free_xmit_extbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
4870 #endif
4871
4872 _rtw_spinunlock_irq(&pfree_queue->lock, &sp_flags);
4873
4874
4875 return _SUCCESS;
4876 }
4877
4878 struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
4879 {
4880 struct xmit_buf *pxmitbuf = NULL;
4881 _list *plist, *phead;
4882 _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
4883 unsigned long sp_flags;
4884
4885 /* RTW_INFO("+rtw_alloc_xmitbuf\n"); */
4886
4887 _rtw_spinlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4888
4889 if (_rtw_queue_empty(pfree_xmitbuf_queue) == _TRUE)
4890 pxmitbuf = NULL;
4891 else {
4892
4893 phead = get_list_head(pfree_xmitbuf_queue);
4894
4895 plist = get_next(phead);
4896
4897 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
4898
4899 rtw_list_delete(&(pxmitbuf->list));
4900 }
4901
4902 if (pxmitbuf != NULL) {
4903 pxmitpriv->free_xmitbuf_cnt--;
4904 #ifdef DBG_XMIT_BUF
4905 RTW_INFO("DBG_XMIT_BUF ALLOC no=%d, free_xmitbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
4906 #endif
4907 /* RTW_INFO("alloc, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); */
4908
4909 pxmitbuf->priv_data = NULL;
4910
4911 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4912 pxmitbuf->len = 0;
4913 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4914 pxmitbuf->agg_num = 0;
4915 pxmitbuf->pg_num = 0;
4916 #endif
4917 #ifdef CONFIG_PCI_HCI
4918 pxmitbuf->len = 0;
4919 #ifdef CONFIG_TRX_BD_ARCH
4920 /*pxmitbuf->buf_desc = NULL;*/
4921 #else
4922 pxmitbuf->desc = NULL;
4923 #endif
4924 #endif
4925
4926 if (pxmitbuf->sctx) {
4927 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4928 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4929 }
4930 }
4931 #ifdef DBG_XMIT_BUF
4932 else
4933 RTW_INFO("DBG_XMIT_BUF rtw_alloc_xmitbuf return NULL\n");
4934 #endif
4935
4936 _rtw_spinunlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4937
4938
4939 return pxmitbuf;
4940 }
4941
4942 s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
4943 {
4944 _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
4945 unsigned long sp_flags;
4946
4947 /* RTW_INFO("+rtw_free_xmitbuf\n"); */
4948
4949 if (pxmitbuf == NULL)
4950 return _FAIL;
4951
4952 if (pxmitbuf->sctx) {
4953 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4954 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
4955 }
4956
4957 if (pxmitbuf->buf_tag == XMITBUF_CMD) {
4958 } else if (pxmitbuf->buf_tag == XMITBUF_MGNT)
4959 rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
4960 else {
4961 _rtw_spinlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4962
4963 rtw_list_delete(&pxmitbuf->list);
4964
4965 rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
4966
4967 pxmitpriv->free_xmitbuf_cnt++;
4968 /* RTW_INFO("FREE, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); */
4969 #ifdef DBG_XMIT_BUF
4970 RTW_INFO("DBG_XMIT_BUF FREE no=%d, free_xmitbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
4971 #endif
4972 _rtw_spinunlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4973 }
4974
4975
4976 return _SUCCESS;
4977 }
4978 #endif
4979
rtw_init_xmitframe(struct xmit_frame * pxframe)4980 void rtw_init_xmitframe(struct xmit_frame *pxframe)
4981 {
4982 if (pxframe != NULL) { /* default value setting */
4983 #if 0 /*CONFIG_CORE_XMITBUF*/
4984 pxframe->buf_addr = NULL;
4985 pxframe->pxmitbuf = NULL;
4986 #endif
4987
4988 _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
4989 /* pxframe->attrib.psta = NULL; */
4990
4991 pxframe->frame_tag = DATA_FRAMETAG;
4992
4993 #ifdef CONFIG_USB_HCI
4994 pxframe->pkt = NULL;
4995 #ifdef USB_PACKET_OFFSET_SZ
4996 pxframe->pkt_offset = (PACKET_OFFSET_SZ / 8);
4997 #else
4998 pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
4999 #endif
5000
5001 #ifdef CONFIG_USB_TX_AGGREGATION
5002 pxframe->agg_num = 1;
5003 #endif
5004
5005 #endif /* #ifdef CONFIG_USB_HCI */
5006
5007 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
5008 pxframe->pg_num = 1;
5009 pxframe->agg_num = 1;
5010 #endif
5011
5012 #ifdef CONFIG_XMIT_ACK
5013 pxframe->ack_report = 0;
5014 #endif
5015 pxframe->txfree_cnt = 0;
5016 }
5017 }
5018
5019 /*
5020 Calling context:
5021 1. OS_TXENTRY
5022 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
5023
5024 If we turn on USE_RXTHREAD, then, no need for critical section.
5025 Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
5026
5027 Must be very very cautious...
5028
5029 */
5030
5031 #ifdef RTW_PHL_TX
core_tx_init_xmitframe(struct xmit_frame * pxframe)5032 void core_tx_init_xmitframe(struct xmit_frame *pxframe)
5033 {
5034 if (!pxframe)
5035 return;
5036 #if 0 /*CONFIG_CORE_XMITBUF*/
5037 pxframe->pxmitbuf = NULL;
5038 #endif
5039 _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
5040 /* TXREQ_QMGT */
5041 pxframe->ptxreq_buf = NULL;
5042 pxframe->phl_txreq = NULL;
5043
5044 pxframe->txreq_cnt = 0;
5045 pxframe->txfree_cnt = 0;
5046 }
5047
core_tx_alloc_xmitframe(_adapter * padapter,struct xmit_frame ** pxmitframe,u16 os_qid)5048 s32 core_tx_alloc_xmitframe(_adapter *padapter, struct xmit_frame **pxmitframe, u16 os_qid)
5049 {
5050 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5051 struct xmit_frame *pxframe = NULL;
5052 _queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
5053 _list *plist, *phead;
5054
5055 PHLTX_LOG;
5056
5057 _rtw_spinlock_bh(&pfree_xmit_queue->lock);
5058
5059 if (_rtw_queue_empty(pfree_xmit_queue) == _TRUE) {
5060 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5061 return FAIL;
5062 } else {
5063 phead = get_list_head(pfree_xmit_queue);
5064
5065 plist = get_next(phead);
5066
5067 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5068
5069 rtw_list_delete(&pxframe->list);
5070 pxmitpriv->free_xmitframe_cnt--;
5071 pxframe->os_qid = os_qid;
5072 }
5073
5074 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5075 rtw_os_check_stop_queue(pxmitpriv->adapter, os_qid);
5076 core_tx_init_xmitframe(pxframe);
5077
5078 *pxmitframe = pxframe;
5079 return SUCCESS;
5080 }
5081
core_tx_free_xmitframe(_adapter * padapter,struct xmit_frame * pxframe)5082 s32 core_tx_free_xmitframe(_adapter *padapter, struct xmit_frame *pxframe)
5083 {
5084 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5085 _queue *queue = NULL;
5086 /* TXREQ_QMGT */
5087 struct xmit_txreq_buf *ptxreq_buf = NULL;
5088 int i;
5089 struct rtw_xmit_req *txreq = NULL;
5090 struct rtw_pkt_buf_list *pkt_list = NULL;
5091
5092 PHLTX_LOG;
5093
5094 if (pxframe == NULL)
5095 goto exit;
5096
5097 /* TXREQ_QMGT */
5098 ptxreq_buf = pxframe->ptxreq_buf;
5099
5100 pxframe->txfree_cnt++;
5101
5102 /* ?? shall detail check, like free 1 2 3, not free 2 2 3 */
5103 /* ?? rtw_alloc_xmitframe_once case, seems no one use*/
5104
5105 if (pxframe->txfree_cnt < pxframe->txreq_cnt)
5106 goto exit;
5107
5108 #if 0 /*CONFIG_CORE_XMITBUF*/
5109 if (pxframe->pxmitbuf)
5110 rtw_free_xmitbuf(pxmitpriv, pxframe->pxmitbuf);
5111 #endif
5112
5113 for (i = 0; i < pxframe->txreq_cnt; i++) {
5114 if (!pxframe->buf_need_free)
5115 break;
5116 if (!(pxframe->buf_need_free & BIT(i)))
5117 continue;
5118 pxframe->buf_need_free &= ~BIT(i);
5119
5120 txreq = &pxframe->phl_txreq[i];
5121 rtw_warn_on(txreq->pkt_cnt != 1);
5122 pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
5123 if (pkt_list->vir_addr && pkt_list->length)
5124 rtw_mfree(pkt_list->vir_addr, pkt_list->length);
5125 }
5126
5127 if (ptxreq_buf) {
5128 queue = &padapter->free_txreq_queue;
5129 _rtw_spinlock_bh(&queue->lock);
5130
5131 rtw_list_delete(&ptxreq_buf->list);
5132 rtw_list_insert_tail(&ptxreq_buf->list, get_list_head(queue));
5133
5134 padapter->free_txreq_cnt++;
5135 _rtw_spinunlock_bh(&queue->lock);
5136 } else {
5137 if (pxframe->ext_tag == 0)
5138 ;//printk("%s:tx recyele: ptxreq_buf=NULL\n", __FUNCTION__);
5139 }
5140
5141 rtw_os_xmit_complete(padapter, pxframe);
5142
5143 if (pxframe->ext_tag == 0)
5144 queue = &pxmitpriv->free_xmit_queue;
5145 else if (pxframe->ext_tag == 1)
5146 queue = &pxmitpriv->free_xframe_ext_queue;
5147 else
5148 rtw_warn_on(1);
5149
5150 _rtw_spinlock_bh(&queue->lock);
5151
5152 rtw_list_delete(&pxframe->list);
5153 rtw_list_insert_tail(&pxframe->list, get_list_head(queue));
5154
5155 if (pxframe->ext_tag == 0)
5156 pxmitpriv->free_xmitframe_cnt++;
5157 else if (pxframe->ext_tag == 1)
5158 pxmitpriv->free_xframe_ext_cnt++;
5159
5160 _rtw_spinunlock_bh(&queue->lock);
5161
5162 if (queue == &pxmitpriv->free_xmit_queue)
5163 rtw_os_check_wakup_queue(padapter, pxframe->os_qid);
5164
5165 exit:
5166 return _SUCCESS;
5167 }
5168
5169 #endif
5170
rtw_alloc_xmitframe(struct xmit_priv * pxmitpriv,u16 os_qid)5171 struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv, u16 os_qid)/* (_queue *pfree_xmit_queue) */
5172 {
5173 /*
5174 Please remember to use all the osdep_service api,
5175 and lock/unlock or _enter/_exit critical to protect
5176 pfree_xmit_queue
5177 */
5178
5179 struct xmit_frame *pxframe = NULL;
5180 _list *plist, *phead;
5181 _queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
5182
5183
5184 _rtw_spinlock_bh(&pfree_xmit_queue->lock);
5185
5186 if (_rtw_queue_empty(pfree_xmit_queue) == _TRUE) {
5187 pxframe = NULL;
5188 } else {
5189 phead = get_list_head(pfree_xmit_queue);
5190
5191 plist = get_next(phead);
5192
5193 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5194
5195 rtw_list_delete(&(pxframe->list));
5196 pxmitpriv->free_xmitframe_cnt--;
5197 pxframe->os_qid = os_qid;
5198 }
5199
5200 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5201
5202 if (pxframe)
5203 rtw_os_check_stop_queue(pxmitpriv->adapter, os_qid);
5204
5205 rtw_init_xmitframe(pxframe);
5206
5207
5208 return pxframe;
5209 }
5210
rtw_alloc_xmitframe_ext(struct xmit_priv * pxmitpriv)5211 struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
5212 {
5213 struct xmit_frame *pxframe = NULL;
5214 _list *plist, *phead;
5215 _queue *queue = &pxmitpriv->free_xframe_ext_queue;
5216
5217
5218 _rtw_spinlock_bh(&queue->lock);
5219
5220 if (_rtw_queue_empty(queue) == _TRUE) {
5221 pxframe = NULL;
5222 } else {
5223 phead = get_list_head(queue);
5224 plist = get_next(phead);
5225 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5226
5227 rtw_list_delete(&(pxframe->list));
5228 pxmitpriv->free_xframe_ext_cnt--;
5229 }
5230
5231 _rtw_spinunlock_bh(&queue->lock);
5232
5233 rtw_init_xmitframe(pxframe);
5234
5235
5236 return pxframe;
5237 }
5238
rtw_alloc_xmitframe_once(struct xmit_priv * pxmitpriv)5239 struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv)
5240 {
5241 struct xmit_frame *pxframe = NULL;
5242 u8 *alloc_addr;
5243
5244 alloc_addr = rtw_zmalloc(sizeof(struct xmit_frame) + 4);
5245
5246 if (alloc_addr == NULL)
5247 goto exit;
5248
5249 pxframe = (struct xmit_frame *)N_BYTE_ALIGMENT((SIZE_PTR)(alloc_addr), 4);
5250 pxframe->alloc_addr = alloc_addr;
5251
5252 pxframe->padapter = pxmitpriv->adapter;
5253 pxframe->frame_tag = NULL_FRAMETAG;
5254
5255 pxframe->pkt = NULL;
5256 #if 0 /*CONFIG_CORE_XMITBUF*/
5257 pxframe->buf_addr = NULL;
5258 pxframe->pxmitbuf = NULL;
5259 #endif
5260
5261 rtw_init_xmitframe(pxframe);
5262
5263 RTW_INFO("################## %s ##################\n", __func__);
5264
5265 exit:
5266 return pxframe;
5267 }
5268
rtw_free_xmitframe(struct xmit_priv * pxmitpriv,struct xmit_frame * pxmitframe)5269 s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
5270 {
5271 _queue *queue = NULL;
5272 _adapter *padapter = pxmitpriv->adapter;
5273 struct sk_buff *pndis_pkt = NULL;
5274
5275
5276 if (pxmitframe == NULL) {
5277 goto exit;
5278 }
5279
5280 if (pxmitframe->pkt) {
5281 pndis_pkt = pxmitframe->pkt;
5282 pxmitframe->pkt = NULL;
5283 }
5284
5285 if (pxmitframe->alloc_addr) {
5286 RTW_INFO("################## %s with alloc_addr ##################\n", __func__);
5287 rtw_mfree(pxmitframe->alloc_addr, sizeof(struct xmit_frame) + 4);
5288 goto check_pkt_complete;
5289 }
5290
5291 if (pxmitframe->ext_tag == 0)
5292 queue = &pxmitpriv->free_xmit_queue;
5293 else if (pxmitframe->ext_tag == 1)
5294 queue = &pxmitpriv->free_xframe_ext_queue;
5295 else
5296 rtw_warn_on(1);
5297
5298 _rtw_spinlock_bh(&queue->lock);
5299
5300 rtw_list_delete(&pxmitframe->list);
5301 rtw_list_insert_tail(&pxmitframe->list, get_list_head(queue));
5302 if (pxmitframe->ext_tag == 0) {
5303 pxmitpriv->free_xmitframe_cnt++;
5304 } else if (pxmitframe->ext_tag == 1) {
5305 pxmitpriv->free_xframe_ext_cnt++;
5306 } else {
5307 }
5308
5309 _rtw_spinunlock_bh(&queue->lock);
5310 if (queue == &pxmitpriv->free_xmit_queue)
5311 rtw_os_check_wakup_queue(padapter, pxmitframe->os_qid);
5312
5313 check_pkt_complete:
5314
5315 if (pndis_pkt)
5316 rtw_os_pkt_complete(padapter, pndis_pkt);
5317
5318 exit:
5319
5320
5321 return _SUCCESS;
5322 }
5323
rtw_free_xmitframe_queue(struct xmit_priv * pxmitpriv,_queue * pframequeue)5324 void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, _queue *pframequeue)
5325 {
5326 _list *plist, *phead;
5327 struct xmit_frame *pxmitframe;
5328
5329
5330 _rtw_spinlock_bh(&(pframequeue->lock));
5331
5332 phead = get_list_head(pframequeue);
5333 plist = get_next(phead);
5334
5335 while (rtw_end_of_queue_search(phead, plist) == _FALSE) {
5336
5337 pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5338
5339 plist = get_next(plist);
5340
5341 rtw_free_xmitframe(pxmitpriv, pxmitframe);
5342
5343 }
5344 _rtw_spinunlock_bh(&(pframequeue->lock));
5345
5346 }
5347
rtw_xmitframe_enqueue(_adapter * padapter,struct xmit_frame * pxmitframe)5348 s32 rtw_xmitframe_enqueue(_adapter *padapter, struct xmit_frame *pxmitframe)
5349 {
5350 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue);
5351 if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
5352 /* pxmitframe->pkt = NULL; */
5353 return _FAIL;
5354 }
5355
5356 return _SUCCESS;
5357 }
5358
dequeue_one_xmitframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit,struct tx_servq * ptxservq,_queue * pframe_queue)5359 static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, _queue *pframe_queue)
5360 {
5361 _list *xmitframe_plist, *xmitframe_phead;
5362 struct xmit_frame *pxmitframe = NULL;
5363
5364 xmitframe_phead = get_list_head(pframe_queue);
5365 xmitframe_plist = get_next(xmitframe_phead);
5366
5367 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
5368 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
5369
5370 /* xmitframe_plist = get_next(xmitframe_plist); */
5371
5372 /*#ifdef RTK_DMP_PLATFORM
5373 #ifdef CONFIG_USB_TX_AGGREGATION
5374 if((ptxservq->qcnt>0) && (ptxservq->qcnt<=2))
5375 {
5376 pxmitframe = NULL;
5377
5378 rtw_tasklet_schedule(&pxmitpriv->xmit_tasklet);
5379
5380 break;
5381 }
5382 #endif
5383 #endif*/
5384 rtw_list_delete(&pxmitframe->list);
5385
5386 ptxservq->qcnt--;
5387
5388 /* rtw_list_insert_tail(&pxmitframe->list, &phwxmit->pending); */
5389
5390 /* ptxservq->qcnt--; */
5391
5392 break;
5393
5394 /* pxmitframe = NULL; */
5395
5396 }
5397
5398 return pxmitframe;
5399 }
5400
get_one_xmitframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit,struct tx_servq * ptxservq,_queue * pframe_queue)5401 static struct xmit_frame *get_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, _queue *pframe_queue)
5402 {
5403 _list *xmitframe_plist, *xmitframe_phead;
5404 struct xmit_frame *pxmitframe = NULL;
5405
5406 xmitframe_phead = get_list_head(pframe_queue);
5407 xmitframe_plist = get_next(xmitframe_phead);
5408
5409 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
5410 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
5411 break;
5412 }
5413
5414 return pxmitframe;
5415 }
5416
rtw_get_xframe(struct xmit_priv * pxmitpriv,int * num_frame)5417 struct xmit_frame *rtw_get_xframe(struct xmit_priv *pxmitpriv, int *num_frame)
5418 {
5419 _list *sta_plist, *sta_phead;
5420 struct hw_xmit *phwxmit_i = pxmitpriv->hwxmits;
5421 sint entry = pxmitpriv->hwxmit_entry;
5422
5423 struct hw_xmit *phwxmit;
5424 struct tx_servq *ptxservq = NULL;
5425 _queue *pframe_queue = NULL;
5426 struct xmit_frame *pxmitframe = NULL;
5427 _adapter *padapter = pxmitpriv->adapter;
5428 int i, inx[4];
5429
5430 inx[0] = 0;
5431 inx[1] = 1;
5432 inx[2] = 2;
5433 inx[3] = 3;
5434
5435 *num_frame = 0;
5436
5437 _rtw_spinlock_bh(&pxmitpriv->lock);
5438
5439 for (i = 0; i < entry; i++) {
5440 phwxmit = phwxmit_i + inx[i];
5441
5442 sta_phead = get_list_head(phwxmit->sta_queue);
5443 sta_plist = get_next(sta_phead);
5444
5445 while ((rtw_end_of_queue_search(sta_phead, sta_plist)) == _FALSE) {
5446
5447 ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
5448 pframe_queue = &ptxservq->sta_pending;
5449
5450 if (ptxservq->qcnt) {
5451 *num_frame = ptxservq->qcnt;
5452 pxmitframe = get_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
5453 goto exit;
5454 }
5455 sta_plist = get_next(sta_plist);
5456 }
5457 }
5458
5459 exit:
5460
5461 _rtw_spinunlock_bh(&pxmitpriv->lock);
5462
5463 return pxmitframe;
5464 }
5465
5466
rtw_dequeue_xframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit_i,sint entry)5467 struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, sint entry)
5468 {
5469 _list *sta_plist, *sta_phead;
5470 struct hw_xmit *phwxmit;
5471 struct tx_servq *ptxservq = NULL;
5472 _queue *pframe_queue = NULL;
5473 struct xmit_frame *pxmitframe = NULL;
5474 _adapter *padapter = pxmitpriv->adapter;
5475 struct registry_priv *pregpriv = &padapter->registrypriv;
5476 int i, inx[4];
5477
5478 inx[0] = 0;
5479 inx[1] = 1;
5480 inx[2] = 2;
5481 inx[3] = 3;
5482
5483 if (pregpriv->wifi_spec == 1) {
5484 int j;
5485 #if 0
5486 if (flags < XMIT_QUEUE_ENTRY) {
5487 /* priority exchange according to the completed xmitbuf flags. */
5488 inx[flags] = 0;
5489 inx[0] = flags;
5490 }
5491 #endif
5492
5493 #if defined(CONFIG_USB_HCI) || defined(CONFIG_SDIO_HCI) || defined(CONFIG_PCI_HCI)
5494 for (j = 0; j < 4; j++)
5495 inx[j] = pxmitpriv->wmm_para_seq[j];
5496 #endif
5497 }
5498
5499 _rtw_spinlock_bh(&pxmitpriv->lock);
5500
5501 for (i = 0; i < entry; i++) {
5502 phwxmit = phwxmit_i + inx[i];
5503
5504 /* _rtw_spinlock_irq(&phwxmit->sta_queue->lock, &sp_flags); */
5505
5506 sta_phead = get_list_head(phwxmit->sta_queue);
5507 sta_plist = get_next(sta_phead);
5508
5509 while ((rtw_end_of_queue_search(sta_phead, sta_plist)) == _FALSE) {
5510
5511 ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
5512
5513 pframe_queue = &ptxservq->sta_pending;
5514
5515 pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
5516
5517 if (pxmitframe) {
5518 phwxmit->accnt--;
5519
5520 /* Remove sta node when there is no pending packets. */
5521 if (_rtw_queue_empty(pframe_queue)) /* must be done after get_next and before break */
5522 rtw_list_delete(&ptxservq->tx_pending);
5523
5524 /* _rtw_spinunlock_irq(&phwxmit->sta_queue->lock, sp_flags); */
5525
5526 goto exit;
5527 }
5528
5529 sta_plist = get_next(sta_plist);
5530
5531 }
5532
5533 /* _rtw_spinunlock_irq(&phwxmit->sta_queue->lock, sp_flags); */
5534
5535 }
5536
5537 exit:
5538
5539 _rtw_spinunlock_bh(&pxmitpriv->lock);
5540
5541 return pxmitframe;
5542 }
5543
5544 #if 1
rtw_get_sta_pending(_adapter * padapter,struct sta_info * psta,sint up,u8 * ac)5545 struct tx_servq *rtw_get_sta_pending(_adapter *padapter, struct sta_info *psta, sint up, u8 *ac)
5546 {
5547 struct tx_servq *ptxservq = NULL;
5548
5549
5550 switch (up) {
5551 case 1:
5552 case 2:
5553 ptxservq = &(psta->sta_xmitpriv.bk_q);
5554 *(ac) = 3;
5555 break;
5556
5557 case 4:
5558 case 5:
5559 ptxservq = &(psta->sta_xmitpriv.vi_q);
5560 *(ac) = 1;
5561 break;
5562
5563 case 6:
5564 case 7:
5565 ptxservq = &(psta->sta_xmitpriv.vo_q);
5566 *(ac) = 0;
5567 break;
5568
5569 case 0:
5570 case 3:
5571 default:
5572 ptxservq = &(psta->sta_xmitpriv.be_q);
5573 *(ac) = 2;
5574 break;
5575
5576 }
5577
5578
5579 return ptxservq;
5580 }
5581 #else
rtw_get_sta_pending(_adapter * padapter,_queue ** ppstapending,struct sta_info * psta,sint up)5582 __inline static struct tx_servq *rtw_get_sta_pending
5583 (_adapter *padapter, _queue **ppstapending, struct sta_info *psta, sint up) {
5584 struct tx_servq *ptxservq;
5585 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
5586
5587
5588 #ifdef CONFIG_RTL8711
5589
5590 if (IS_MCAST(psta->phl_sta->mac_addr)) {
5591 ptxservq = &(psta->sta_xmitpriv.be_q); /* we will use be_q to queue bc/mc frames in BCMC_stainfo */
5592 *ppstapending = &padapter->xmitpriv.bm_pending;
5593 } else
5594 #endif
5595 {
5596 switch (up) {
5597 case 1:
5598 case 2:
5599 ptxservq = &(psta->sta_xmitpriv.bk_q);
5600 *ppstapending = &padapter->xmitpriv.bk_pending;
5601 (phwxmits + 3)->accnt++;
5602 break;
5603
5604 case 4:
5605 case 5:
5606 ptxservq = &(psta->sta_xmitpriv.vi_q);
5607 *ppstapending = &padapter->xmitpriv.vi_pending;
5608 (phwxmits + 1)->accnt++;
5609 break;
5610
5611 case 6:
5612 case 7:
5613 ptxservq = &(psta->sta_xmitpriv.vo_q);
5614 *ppstapending = &padapter->xmitpriv.vo_pending;
5615 (phwxmits + 0)->accnt++;
5616 break;
5617
5618 case 0:
5619 case 3:
5620 default:
5621 ptxservq = &(psta->sta_xmitpriv.be_q);
5622 *ppstapending = &padapter->xmitpriv.be_pending;
5623 (phwxmits + 2)->accnt++;
5624 break;
5625
5626 }
5627
5628 }
5629
5630
5631 return ptxservq;
5632 }
5633 #endif
5634
5635 /*
5636 * Will enqueue pxmitframe to the proper queue,
5637 * and indicate it to xx_pending list.....
5638 */
rtw_xmit_classifier(_adapter * padapter,struct xmit_frame * pxmitframe)5639 s32 rtw_xmit_classifier(_adapter *padapter, struct xmit_frame *pxmitframe)
5640 {
5641 u8 ac_index;
5642 struct sta_info *psta;
5643 struct tx_servq *ptxservq;
5644 struct pkt_attrib *pattrib = &pxmitframe->attrib;
5645 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
5646 sint res = _SUCCESS;
5647
5648
5649 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
5650
5651 /*
5652 if (pattrib->psta) {
5653 psta = pattrib->psta;
5654 } else {
5655 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
5656 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
5657 }
5658 */
5659
5660 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
5661 if (pattrib->psta != psta) {
5662 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
5663 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
5664 return _FAIL;
5665 }
5666
5667 if (psta == NULL) {
5668 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_nosta);
5669 res = _FAIL;
5670 RTW_INFO("rtw_xmit_classifier: psta == NULL\n");
5671 goto exit;
5672 }
5673
5674 if (!(psta->state & WIFI_ASOC_STATE)) {
5675 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_fwlink);
5676 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
5677 return _FAIL;
5678 }
5679
5680 ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
5681
5682 /* _rtw_spinlock_irq(&pstapending->lock, &flags); */
5683
5684 if (rtw_is_list_empty(&ptxservq->tx_pending))
5685 rtw_list_insert_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
5686
5687 /* _rtw_spinlock_irq(&ptxservq->sta_pending.lock, &sp_flags); */
5688
5689 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
5690 ptxservq->qcnt++;
5691 phwxmits[ac_index].accnt++;
5692
5693 /* _rtw_spinunlock_irq(&ptxservq->sta_pending.lock, &sp_flags); */
5694
5695 /* _rtw_spinunlock_irq(&pstapending->lock, &flags); */
5696
5697 exit:
5698
5699
5700 return res;
5701 }
5702
rtw_alloc_hwxmits(_adapter * padapter)5703 void rtw_alloc_hwxmits(_adapter *padapter)
5704 {
5705 struct hw_xmit *hwxmits;
5706 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5707
5708 pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
5709
5710 pxmitpriv->hwxmits = NULL;
5711
5712 pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
5713
5714 if (pxmitpriv->hwxmits == NULL) {
5715 RTW_INFO("alloc hwxmits fail!...\n");
5716 return;
5717 }
5718
5719 hwxmits = pxmitpriv->hwxmits;
5720
5721 if (pxmitpriv->hwxmit_entry == 5) {
5722 /* pxmitpriv->bmc_txqueue.head = 0; */
5723 /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
5724 hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
5725
5726 /* pxmitpriv->vo_txqueue.head = 0; */
5727 /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
5728 hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
5729
5730 /* pxmitpriv->vi_txqueue.head = 0; */
5731 /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
5732 hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
5733
5734 /* pxmitpriv->bk_txqueue.head = 0; */
5735 /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
5736 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
5737
5738 /* pxmitpriv->be_txqueue.head = 0; */
5739 /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
5740 hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
5741
5742 } else if (pxmitpriv->hwxmit_entry == 4) {
5743
5744 /* pxmitpriv->vo_txqueue.head = 0; */
5745 /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
5746 hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
5747
5748 /* pxmitpriv->vi_txqueue.head = 0; */
5749 /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
5750 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
5751
5752 /* pxmitpriv->be_txqueue.head = 0; */
5753 /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
5754 hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
5755
5756 /* pxmitpriv->bk_txqueue.head = 0; */
5757 /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
5758 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
5759 } else {
5760
5761
5762 }
5763
5764
5765 }
5766
rtw_free_hwxmits(_adapter * padapter)5767 void rtw_free_hwxmits(_adapter *padapter)
5768 {
5769 struct hw_xmit *hwxmits;
5770 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5771
5772 hwxmits = pxmitpriv->hwxmits;
5773 if (hwxmits)
5774 rtw_mfree((u8 *)hwxmits, (sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry));
5775 }
5776
rtw_init_hwxmits(struct hw_xmit * phwxmit,sint entry)5777 void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
5778 {
5779 sint i;
5780 for (i = 0; i < entry; i++, phwxmit++) {
5781 /* _rtw_spinlock_init(&phwxmit->xmit_lock); */
5782 /* _rtw_init_listhead(&phwxmit->pending); */
5783 /* phwxmit->txcmdcnt = 0; */
5784 phwxmit->accnt = 0;
5785 }
5786 }
5787
5788 #ifdef CONFIG_BR_EXT
rtw_br_client_tx(_adapter * padapter,struct sk_buff ** pskb)5789 int rtw_br_client_tx(_adapter *padapter, struct sk_buff **pskb)
5790 {
5791 struct sk_buff *skb = *pskb;
5792 /* if(MLME_IS_STA(adapter) */
5793 {
5794 void dhcp_flag_bcast(_adapter *priv, struct sk_buff *skb);
5795 int res, is_vlan_tag = 0, i, do_nat25 = 1;
5796 unsigned short vlan_hdr = 0;
5797 void *br_port = NULL;
5798
5799 /* mac_clone_handle_frame(priv, skb); */
5800
5801 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
5802 br_port = padapter->pnetdev->br_port;
5803 #else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
5804 rcu_read_lock();
5805 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
5806 rcu_read_unlock();
5807 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
5808 _rtw_spinlock_bh(&padapter->br_ext_lock);
5809 if (!(skb->data[0] & 1) &&
5810 br_port &&
5811 _rtw_memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
5812 *((unsigned short *)(skb->data + MACADDRLEN * 2)) != __constant_htons(ETH_P_8021Q) &&
5813 *((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP) &&
5814 !_rtw_memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
5815 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5816 padapter->scdb_entry->ageing_timer = jiffies;
5817 _rtw_spinunlock_bh(&padapter->br_ext_lock);
5818 } else
5819 /* if (!priv->pmib->ethBrExtInfo.nat25_disable) */
5820 {
5821 /* if (priv->dev->br_port &&
5822 * !_rtw_memcmp(skb->data+MACADDRLEN, priv->br_mac, MACADDRLEN)) { */
5823 #if 1
5824 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) {
5825 is_vlan_tag = 1;
5826 vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
5827 for (i = 0; i < 6; i++)
5828 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
5829 skb_pull(skb, 4);
5830 }
5831 /* if SA == br_mac && skb== IP => copy SIP to br_ip ?? why */
5832 if (!_rtw_memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
5833 (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)))
5834 _rtw_memcpy(padapter->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
5835
5836 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)) {
5837 if (_rtw_memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN)) {
5838 void *scdb_findEntry(_adapter *priv, unsigned char *macAddr, unsigned char *ipAddr);
5839
5840 padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
5841 skb->data + MACADDRLEN, skb->data + WLAN_ETHHDR_LEN + 12);
5842 if (padapter->scdb_entry != NULL) {
5843 _rtw_memcpy(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN);
5844 _rtw_memcpy(padapter->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
5845 padapter->scdb_entry->ageing_timer = jiffies;
5846 do_nat25 = 0;
5847 }
5848 } else {
5849 if (padapter->scdb_entry) {
5850 padapter->scdb_entry->ageing_timer = jiffies;
5851 do_nat25 = 0;
5852 } else {
5853 _rtw_memset(padapter->scdb_mac, 0, MACADDRLEN);
5854 _rtw_memset(padapter->scdb_ip, 0, 4);
5855 }
5856 }
5857 }
5858 _rtw_spinunlock_bh(&padapter->br_ext_lock);
5859 #endif /* 1 */
5860 if (do_nat25) {
5861 int nat25_db_handle(_adapter *priv, struct sk_buff *skb, int method);
5862 if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
5863 struct sk_buff *newskb;
5864
5865 if (is_vlan_tag) {
5866 skb_push(skb, 4);
5867 for (i = 0; i < 6; i++)
5868 *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
5869 *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
5870 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
5871 }
5872
5873 newskb = rtw_skb_copy(skb);
5874 if (newskb == NULL) {
5875 /* priv->ext_stats.tx_drops++; */
5876 DEBUG_ERR("TX DROP: rtw_skb_copy fail!\n");
5877 /* goto stop_proc; */
5878 return -1;
5879 }
5880 rtw_skb_free(skb);
5881
5882 *pskb = skb = newskb;
5883 if (is_vlan_tag) {
5884 vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
5885 for (i = 0; i < 6; i++)
5886 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
5887 skb_pull(skb, 4);
5888 }
5889 }
5890
5891 if (skb_is_nonlinear(skb))
5892 DEBUG_ERR("%s(): skb_is_nonlinear!!\n", __FUNCTION__);
5893
5894
5895 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
5896 res = skb_linearize(skb, GFP_ATOMIC);
5897 #else /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
5898 res = skb_linearize(skb);
5899 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
5900 if (res < 0) {
5901 DEBUG_ERR("TX DROP: skb_linearize fail!\n");
5902 /* goto free_and_stop; */
5903 return -1;
5904 }
5905
5906 res = nat25_db_handle(padapter, skb, NAT25_INSERT);
5907 if (res < 0) {
5908 if (res == -2) {
5909 /* priv->ext_stats.tx_drops++; */
5910 DEBUG_ERR("TX DROP: nat25_db_handle fail!\n");
5911 /* goto free_and_stop; */
5912 return -1;
5913
5914 }
5915 /* we just print warning message and let it go */
5916 /* DEBUG_WARN("%s()-%d: nat25_db_handle INSERT Warning!\n", __FUNCTION__, __LINE__); */
5917 /* return -1; */ /* return -1 will cause system crash on 2011/08/30! */
5918 return 0;
5919 }
5920 }
5921
5922 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5923
5924 dhcp_flag_bcast(padapter, skb);
5925
5926 if (is_vlan_tag) {
5927 skb_push(skb, 4);
5928 for (i = 0; i < 6; i++)
5929 *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
5930 *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
5931 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
5932 }
5933 }
5934 #if 0
5935 else {
5936 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q))
5937 is_vlan_tag = 1;
5938
5939 if (is_vlan_tag) {
5940 if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A_VALN(skb->data))
5941 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5942 } else {
5943 if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A(skb->data))
5944 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5945 }
5946 }
5947 #endif /* 0 */
5948
5949 /* check if SA is equal to our MAC */
5950 if (_rtw_memcmp(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) {
5951 /* priv->ext_stats.tx_drops++; */
5952 DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
5953 skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
5954 /* goto free_and_stop; */
5955 return -1;
5956 }
5957 }
5958 return 0;
5959 }
5960 #endif /* CONFIG_BR_EXT */
5961
do_queue_select(_adapter * padapter,struct pkt_attrib * pattrib)5962 static void do_queue_select(_adapter *padapter, struct pkt_attrib *pattrib)
5963 {
5964 u8 qsel;
5965
5966 qsel = pattrib->priority;
5967
5968 /* high priority packet */
5969 if (pattrib->hipriority_pkt) {
5970 pattrib->qsel = rtw_hal_get_qsel(padapter, QSLT_VO_ID);
5971 pattrib->priority = rtw_hal_get_qsel(padapter, QSLT_VO_ID);
5972 }
5973 }
5974
5975 /*
5976 * The main transmit(tx) entry
5977 *
5978 * Return
5979 * 1 enqueue
5980 * 0 success, hardware will handle this xmit frame(packet)
5981 * <0 fail
5982 */
5983 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
rtw_monitor_xmit_entry(struct sk_buff * skb,struct net_device * ndev)5984 s32 rtw_monitor_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
5985 {
5986 u16 frame_ctl;
5987 struct ieee80211_radiotap_header rtap_hdr;
5988 _adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
5989 struct pkt_file pktfile;
5990 struct rtw_ieee80211_hdr *pwlanhdr;
5991 struct pkt_attrib *pattrib;
5992 struct xmit_frame *pmgntframe;
5993 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
5994 struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
5995 unsigned char *pframe;
5996 u8 dummybuf[32];
5997 int len = skb->len, rtap_len;
5998
5999
6000 rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, skb->truesize);
6001
6002 #ifndef CONFIG_CUSTOMER_ALIBABA_GENERAL
6003 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
6004 goto fail;
6005
6006 _rtw_open_pktfile((struct sk_buff *)skb, &pktfile);
6007 _rtw_pktfile_read(&pktfile, (u8 *)(&rtap_hdr), sizeof(struct ieee80211_radiotap_header));
6008 rtap_len = ieee80211_get_radiotap_len((u8 *)(&rtap_hdr));
6009 if (unlikely(rtap_hdr.it_version))
6010 goto fail;
6011
6012 if (unlikely(skb->len < rtap_len))
6013 goto fail;
6014
6015 if (rtap_len != 12) {
6016 RTW_INFO("radiotap len (should be 14): %d\n", rtap_len);
6017 goto fail;
6018 }
6019 _rtw_pktfile_read(&pktfile, dummybuf, rtap_len-sizeof(struct ieee80211_radiotap_header));
6020 len = len - rtap_len;
6021 #endif
6022 pmgntframe = alloc_mgtxmitframe(pxmitpriv);
6023 if (pmgntframe == NULL) {
6024 rtw_udelay_os(500);
6025 goto fail;
6026 }
6027
6028 _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
6029 pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
6030 // _rtw_memcpy(pframe, (void *)checking, len);
6031 _rtw_pktfile_read(&pktfile, pframe, len);
6032
6033
6034 /* Check DATA/MGNT frames */
6035 pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
6036 frame_ctl = le16_to_cpu(pwlanhdr->frame_ctl);
6037 if ((frame_ctl & RTW_IEEE80211_FCTL_FTYPE) == RTW_IEEE80211_FTYPE_DATA) {
6038
6039 pattrib = &pmgntframe->attrib;
6040 update_monitor_frame_attrib(padapter, pattrib);
6041
6042 if (is_broadcast_mac_addr(pwlanhdr->addr3) || is_broadcast_mac_addr(pwlanhdr->addr1))
6043 pattrib->rate = MGN_24M;
6044
6045 } else {
6046
6047 pattrib = &pmgntframe->attrib;
6048 update_mgntframe_attrib(padapter, pattrib);
6049
6050 }
6051 pattrib->retry_ctrl = _FALSE;
6052 pattrib->pktlen = len;
6053 pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
6054 pattrib->seqnum = pmlmeext->mgnt_seq;
6055 pmlmeext->mgnt_seq++;
6056 pattrib->last_txcmdsz = pattrib->pktlen;
6057
6058 dump_mgntframe(padapter, pmgntframe);
6059
6060 fail:
6061 rtw_skb_free(skb);
6062 return 0;
6063 }
6064 #endif
6065
6066 /*
6067 *
6068 * Return _TRUE when frame has been put to queue, otherwise return _FALSE.
6069 */
xmit_enqueue(_adapter * a,struct xmit_frame * frame)6070 static u8 xmit_enqueue(_adapter *a, struct xmit_frame *frame)
6071 {
6072 struct sta_info *sta = NULL;
6073 struct pkt_attrib *attrib = NULL;
6074 _list *head;
6075 u8 ret = _TRUE;
6076
6077
6078 attrib = &frame->attrib;
6079 sta = attrib->psta;
6080 if (!sta)
6081 return _FALSE;
6082
6083 _rtw_spinlock_bh(&sta->tx_queue.lock);
6084
6085 head = get_list_head(&sta->tx_queue);
6086
6087 if ((rtw_is_list_empty(head) == _TRUE) && (!sta->tx_q_enable)) {
6088 ret = _FALSE;
6089 goto exit;
6090 }
6091
6092 rtw_list_insert_tail(&frame->list, head);
6093 RTW_INFO(FUNC_ADPT_FMT ": en-queue tx pkt for macid=%d\n",
6094 FUNC_ADPT_ARG(a), sta->phl_sta->macid);
6095
6096 exit:
6097 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6098
6099 return ret;
6100 }
6101
xmit_dequeue(struct sta_info * sta)6102 static void xmit_dequeue(struct sta_info *sta)
6103 {
6104 _adapter *a;
6105 _list *head, *list;
6106 struct xmit_frame *frame;
6107
6108
6109 a = sta->padapter;
6110
6111 _rtw_spinlock_bh(&sta->tx_queue.lock);
6112
6113 head = get_list_head(&sta->tx_queue);
6114
6115 do {
6116 if (rtw_is_list_empty(head) == _TRUE)
6117 break;
6118
6119 list = get_next(head);
6120 rtw_list_delete(list);
6121 frame = LIST_CONTAINOR(list, struct xmit_frame, list);
6122 RTW_INFO(FUNC_ADPT_FMT ": de-queue tx frame of macid=%d\n",
6123 FUNC_ADPT_ARG(a), sta->phl_sta->macid);
6124
6125 /*rtw_hal_xmit(a, frame);*/
6126 rtw_intf_data_xmit(a, frame);
6127 } while (1);
6128
6129 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6130 }
6131
rtw_xmit_dequeue_callback(_workitem * work)6132 void rtw_xmit_dequeue_callback(_workitem *work)
6133 {
6134 struct sta_info *sta;
6135
6136
6137 sta = container_of(work, struct sta_info, tx_q_work);
6138 xmit_dequeue(sta);
6139 }
6140
rtw_xmit_queue_set(struct sta_info * sta)6141 void rtw_xmit_queue_set(struct sta_info *sta)
6142 {
6143 _rtw_spinlock_bh(&sta->tx_queue.lock);
6144
6145 if (sta->tx_q_enable) {
6146 RTW_WARN(FUNC_ADPT_FMT ": duplicated set!\n",
6147 FUNC_ADPT_ARG(sta->padapter));
6148 goto exit;
6149 }
6150 sta->tx_q_enable = 1;
6151 RTW_INFO(FUNC_ADPT_FMT ": enable queue TX for macid=%d\n",
6152 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6153
6154 exit:
6155 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6156 }
6157
rtw_xmit_queue_clear(struct sta_info * sta)6158 void rtw_xmit_queue_clear(struct sta_info *sta)
6159 {
6160 _rtw_spinlock_bh(&sta->tx_queue.lock);
6161
6162 if (!sta->tx_q_enable) {
6163 RTW_WARN(FUNC_ADPT_FMT ": tx queue for macid=%d "
6164 "not be enabled!\n",
6165 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6166 goto exit;
6167 }
6168
6169 sta->tx_q_enable = 0;
6170 RTW_INFO(FUNC_ADPT_FMT ": disable queue TX for macid=%d\n",
6171 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6172
6173 _set_workitem(&sta->tx_q_work);
6174
6175 exit:
6176 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6177 }
6178
6179 /*
6180 * The main transmit(tx) entry post handle
6181 *
6182 * Return
6183 * 1 enqueue
6184 * 0 success, hardware will handle this xmit frame(packet)
6185 * <0 fail
6186 */
rtw_xmit_posthandle(_adapter * padapter,struct xmit_frame * pxmitframe,struct sk_buff * pkt)6187 s32 rtw_xmit_posthandle(_adapter *padapter, struct xmit_frame *pxmitframe,
6188 struct sk_buff *pkt)
6189 {
6190 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6191 s32 res;
6192
6193 res = update_attrib(padapter, pkt, &pxmitframe->attrib);
6194
6195 #ifdef CONFIG_WAPI_SUPPORT
6196 if (pxmitframe->attrib.ether_type != 0x88B4) {
6197 if (rtw_wapi_drop_for_key_absent(padapter, pxmitframe->attrib.ra)) {
6198 WAPI_TRACE(WAPI_RX, "drop for key absend when tx\n");
6199 res = _FAIL;
6200 }
6201 }
6202 #endif
6203 if (res == _FAIL) {
6204 /*RTW_INFO("%s-"ADPT_FMT" update attrib fail\n", __func__, ADPT_ARG(padapter));*/
6205 #ifdef DBG_TX_DROP_FRAME
6206 RTW_INFO("DBG_TX_DROP_FRAME %s update attrib fail\n", __FUNCTION__);
6207 #endif
6208 rtw_free_xmitframe(pxmitpriv, pxmitframe);
6209 return -1;
6210 }
6211 pxmitframe->pkt = pkt;
6212
6213 rtw_led_tx_control(padapter, pxmitframe->attrib.dst);
6214
6215 do_queue_select(padapter, &pxmitframe->attrib);
6216
6217 #ifdef CONFIG_AP_MODE
6218 _rtw_spinlock_bh(&pxmitpriv->lock);
6219 if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe) == _TRUE) {
6220 _rtw_spinunlock_bh(&pxmitpriv->lock);
6221 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue);
6222 return 1;
6223 }
6224 _rtw_spinunlock_bh(&pxmitpriv->lock);
6225 #endif
6226
6227 /*if (xmit_enqueue(padapter, pxmitframe) == _TRUE)*/
6228 /* return 1;*/
6229
6230 /* pre_xmitframe */
6231 /*if (rtw_hal_xmit(padapter, pxmitframe) == _FALSE)*/
6232 if (rtw_intf_data_xmit(padapter, pxmitframe) == _FALSE)
6233 return 1;
6234
6235 return 0;
6236 }
6237
6238 /*
6239 * The main transmit(tx) entry
6240 *
6241 * Return
6242 * 1 enqueue
6243 * 0 success, hardware will handle this xmit frame(packet)
6244 * <0 fail
6245 */
rtw_xmit(_adapter * padapter,struct sk_buff ** ppkt,u16 os_qid)6246 s32 rtw_xmit(_adapter *padapter, struct sk_buff **ppkt, u16 os_qid)
6247 {
6248 static systime start = 0;
6249 static u32 drop_cnt = 0;
6250 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6251 struct xmit_frame *pxmitframe = NULL;
6252 s32 res;
6253
6254 DBG_COUNTER(padapter->tx_logs.core_tx);
6255
6256 if (IS_CH_WAITING(adapter_to_rfctl(padapter)))
6257 return -1;
6258
6259 if (rtw_linked_check(padapter) == _FALSE)
6260 return -1;
6261
6262 if (start == 0)
6263 start = rtw_get_current_time();
6264
6265 pxmitframe = rtw_alloc_xmitframe(pxmitpriv, os_qid);
6266
6267 if (rtw_get_passing_time_ms(start) > 2000) {
6268 if (drop_cnt)
6269 RTW_INFO("DBG_TX_DROP_FRAME %s no more pxmitframe, drop_cnt:%u\n", __FUNCTION__, drop_cnt);
6270 start = rtw_get_current_time();
6271 drop_cnt = 0;
6272 }
6273
6274 if (pxmitframe == NULL) {
6275 drop_cnt++;
6276 /*RTW_INFO("%s-"ADPT_FMT" no more xmitframe\n", __func__, ADPT_ARG(padapter));*/
6277 DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
6278 return -1;
6279 }
6280
6281 #ifdef CONFIG_BR_EXT
6282 if (MLME_IS_STA(padapter) || MLME_IS_ADHOC(padapter)) {
6283 void *br_port = NULL;
6284
6285 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
6286 br_port = padapter->pnetdev->br_port;
6287 #else
6288 rcu_read_lock();
6289 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
6290 rcu_read_unlock();
6291 #endif
6292
6293 if (br_port) {
6294 res = rtw_br_client_tx(padapter, ppkt);
6295 if (res == -1) {
6296 rtw_free_xmitframe(pxmitpriv, pxmitframe);
6297 DBG_COUNTER(padapter->tx_logs.core_tx_err_brtx);
6298 return -1;
6299 }
6300 }
6301 }
6302 #endif /* CONFIG_BR_EXT */
6303
6304 #ifdef CONFIG_RTW_MESH
6305 if (MLME_IS_MESH(padapter)) {
6306 _list f_list;
6307
6308 res = rtw_mesh_addr_resolve(padapter, pxmitframe, *ppkt, &f_list);
6309 if (res == RTW_RA_RESOLVING)
6310 return 1;
6311 if (res == _FAIL)
6312 return -1;
6313
6314 #if CONFIG_RTW_MESH_DATA_BMC_TO_UC
6315 if (!rtw_is_list_empty(&f_list)) {
6316 _list *list = get_next(&f_list);
6317 struct xmit_frame *fframe;
6318
6319 while ((rtw_end_of_queue_search(&f_list, list)) == _FALSE) {
6320 fframe = LIST_CONTAINOR(list, struct xmit_frame, list);
6321 list = get_next(list);
6322 rtw_list_delete(&fframe->list);
6323
6324 fframe->pkt = rtw_skb_copy(*ppkt);
6325 if (!fframe->pkt) {
6326 if (res == RTW_ORI_NO_NEED)
6327 res = _SUCCESS;
6328 rtw_free_xmitframe(pxmitpriv, fframe);
6329 continue;
6330 }
6331
6332 rtw_xmit_posthandle(padapter, fframe, fframe->pkt);
6333 }
6334 }
6335 #endif /* CONFIG_RTW_MESH_DATA_BMC_TO_UC */
6336
6337 if (res == RTW_ORI_NO_NEED) {
6338 rtw_free_xmitframe(&padapter->xmitpriv, pxmitframe);
6339 return 0;
6340 }
6341 }
6342 #endif /* CONFIG_RTW_MESH */
6343
6344 pxmitframe->pkt = NULL; /* let rtw_xmit_posthandle not to free pkt inside */
6345 res = rtw_xmit_posthandle(padapter, pxmitframe, *ppkt);
6346
6347 return res;
6348 }
6349
6350 #ifdef RTW_PHL_TX
6351
6352 #ifdef RTW_PHL_TEST_FPGA
6353 u32 test_seq;
6354 #endif
6355
get_head_from_txreq(_adapter * padapter,struct xmit_frame * pxframe,u8 frag_idx)6356 u8 *get_head_from_txreq(_adapter *padapter, struct xmit_frame *pxframe, u8 frag_idx)
6357 {
6358 return 0;
6359 }
6360
get_tail_from_txreq(_adapter * padapter,struct xmit_frame * pxframe,u8 frag_idx)6361 u8 *get_tail_from_txreq(_adapter *padapter, struct xmit_frame *pxframe, u8 frag_idx)
6362 {
6363 return 0;
6364 }
6365
dump_pkt(u8 * start,u32 len)6366 void dump_pkt(u8 *start, u32 len)
6367 {
6368 u32 idx = 0;
6369 for (idx = 0; idx < len; idx++) {
6370 printk("%02x ", start[idx]);
6371 if ((idx % 20) == 19)
6372 printk("\n");
6373 }
6374 printk("\n");
6375 }
6376
6377 /* TXREQ_QMGT */
get_txreq_buffer(_adapter * padapter,u8 ** txreq,u8 ** pkt_list,u8 ** head,u8 ** tail)6378 u8 *get_txreq_buffer(_adapter *padapter, u8 **txreq, u8 **pkt_list, u8 **head, u8 **tail)
6379 {
6380 struct xmit_txreq_buf *ptxreq_buf = NULL;
6381 _list *plist, *phead;
6382 _queue *pfree_txreq_queue = &padapter->free_txreq_queue;
6383 #ifdef CONFIG_CORE_TXSC
6384 u8 i = 0;
6385 #endif
6386
6387 _rtw_spinlock_bh(&pfree_txreq_queue->lock);
6388 if (_rtw_queue_empty(pfree_txreq_queue) == _TRUE) {
6389 padapter->txreq_full_cnt++;
6390 } else {
6391 phead = get_list_head(pfree_txreq_queue);
6392 plist = get_next(phead);
6393 ptxreq_buf = LIST_CONTAINOR(plist, struct xmit_txreq_buf, list);
6394 rtw_list_delete(&ptxreq_buf->list);
6395
6396 padapter->free_txreq_cnt--;
6397 }
6398 _rtw_spinunlock_bh(&pfree_txreq_queue->lock);
6399
6400 if (ptxreq_buf) {
6401
6402 if (txreq)
6403 *txreq = ptxreq_buf->txreq;
6404
6405 if (head)
6406 *head = ptxreq_buf->head;
6407
6408 if (tail)
6409 *tail = ptxreq_buf->tail;
6410
6411 if (pkt_list)
6412 *pkt_list = ptxreq_buf->pkt_list;
6413
6414 #ifdef CONFIG_CORE_TXSC
6415 for (i = 0; i < MAX_TXSC_SKB_NUM; i++)
6416 ptxreq_buf->pkt[i] = NULL;
6417 ptxreq_buf->pkt_cnt = 0;
6418 #endif
6419 }
6420
6421 return (u8 *)ptxreq_buf;
6422 }
6423
get_txreq_resources(_adapter * padapter,struct xmit_frame * pxframe,u8 ** txreq,u8 ** pkt_list,u8 ** head,u8 ** tail)6424 void get_txreq_resources(_adapter *padapter, struct xmit_frame *pxframe,
6425 u8 **txreq, u8 **pkt_list, u8 **head, u8 **tail)
6426 {
6427 u32 offset_head = (sizeof(struct rtw_xmit_req) * RTW_MAX_FRAG_NUM);
6428 u32 offset_tail = offset_head + (SZ_HEAD_BUF * RTW_MAX_FRAG_NUM);
6429 u32 offset_list = offset_tail + (SZ_TAIL_BUF * RTW_MAX_FRAG_NUM);
6430 u8 *pbuf = NULL;
6431
6432 PHLTX_ENTER;
6433
6434 //rtw_phl_tx todo: error handle, max tx req limit
6435 padapter->tx_ring_idx++;
6436 padapter->tx_ring_idx = (padapter->tx_ring_idx % MAX_TX_RING_NUM);
6437
6438 pbuf = padapter->tx_pool_ring[padapter->tx_ring_idx];
6439 //memset(pbuf, 0, (SZ_TX_RING*RTW_MAX_FRAG_NUM));
6440
6441 if (txreq)
6442 *txreq = pbuf;
6443
6444 if (head)
6445 *head = pbuf + offset_head;
6446
6447 if (tail)
6448 *tail = pbuf + offset_tail;
6449
6450 if (pkt_list)
6451 *pkt_list = pbuf + offset_list;
6452 }
6453
dump_xmitframe_txreq(_adapter * padapter,struct xmit_frame * pxframe)6454 void dump_xmitframe_txreq(_adapter *padapter, struct xmit_frame *pxframe)
6455 {
6456 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
6457 u32 idx, idx1 = 0;
6458
6459 PHLTX_ENTER;
6460 printk("total txreq=%d \n", pxframe->txreq_cnt);
6461
6462 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
6463 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6464 printk("txreq[%d] with %d pkts =====\n", idx, txreq->pkt_cnt);
6465 for (idx1 = 0; idx1 < txreq->pkt_cnt; idx1++) {
6466 printk("pkt[%d] 0x%p len=%d\n", idx1, (void *)pkt_list->vir_addr, pkt_list->length);
6467 dump_pkt(pkt_list->vir_addr, pkt_list->length);
6468 pkt_list++;
6469 }
6470 txreq++;
6471 }
6472 printk("\n");
6473 }
6474
6475 #ifdef CONFIG_PCI_HCI
core_recycle_txreq_phyaddr(_adapter * padapter,struct rtw_xmit_req * txreq)6476 void core_recycle_txreq_phyaddr(_adapter *padapter, struct rtw_xmit_req *txreq)
6477 {
6478 PPCI_DATA pci_data = dvobj_to_pci(padapter->dvobj);
6479 struct pci_dev *pdev = pci_data->ppcidev;
6480 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6481 dma_addr_t phy_addr = 0;
6482 u32 idx = 0;
6483
6484 for (idx = 0; idx < txreq->pkt_cnt; idx++) {
6485 phy_addr = pkt_list->phy_addr_l;
6486 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6487 {
6488 u64 phy_addr_h = pkt_list->phy_addr_h;
6489 phy_addr |= (phy_addr_h << 32);
6490 }
6491 #endif
6492 if (txreq->cache == VIRTUAL_ADDR) {
6493 pci_unmap_bus_addr(pdev, &phy_addr,
6494 pkt_list->length, PCI_DMA_TODEVICE);
6495 } else {
6496 pci_free_noncache_mem(pdev, pkt_list->vir_addr,
6497 &phy_addr, pkt_list->length);
6498 txreq->cache = VIRTUAL_ADDR;
6499 }
6500 pkt_list++;
6501 }
6502 }
6503
fill_txreq_phyaddr(_adapter * padapter,struct xmit_frame * pxframe)6504 void fill_txreq_phyaddr(_adapter *padapter, struct xmit_frame *pxframe)
6505 {
6506 PPCI_DATA pci_data = dvobj_to_pci(padapter->dvobj);
6507 struct pci_dev *pdev = pci_data->ppcidev;
6508 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
6509 u32 idx, idx1 = 0;
6510
6511 PHLTX_ENTER;
6512
6513 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
6514 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6515
6516 if (txreq->cache != VIRTUAL_ADDR)
6517 goto next;
6518
6519 for (idx1 = 0; idx1 < txreq->pkt_cnt; idx1++) {
6520 dma_addr_t phy_addr = 0;
6521 pci_get_bus_addr(pdev, pkt_list->vir_addr, &phy_addr, pkt_list->length, PCI_DMA_TODEVICE);
6522 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6523 pkt_list->phy_addr_h = phy_addr >> 32;
6524 #else
6525 pkt_list->phy_addr_h = 0x0;
6526 #endif
6527 pkt_list->phy_addr_l = phy_addr & 0xFFFFFFFF;
6528 pkt_list++;
6529 }
6530 next:
6531 txreq++;
6532 }
6533 }
6534 #endif
6535
_fill_txreq_list_skb(_adapter * padapter,struct rtw_xmit_req * txreq,struct rtw_pkt_buf_list ** pkt_list,struct sk_buff * skb,u32 * req_sz,s32 * req_offset)6536 static void _fill_txreq_list_skb(_adapter *padapter,
6537 struct rtw_xmit_req *txreq, struct rtw_pkt_buf_list **pkt_list,
6538 struct sk_buff *skb, u32 *req_sz, s32 *req_offset)
6539 {
6540 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
6541 #define skb_frag_off(f) ((f)->page_offset)
6542 #endif
6543 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
6544 #define skb_frag_page(f) ((f)->page)
6545 #define skb_frag_size(f) ((f)->size)
6546 #endif
6547 #define PKT_LIST_APPEND(_addr, _len) do { \
6548 u32 __len = _len; \
6549 if (__len == 0) \
6550 break; \
6551 list->vir_addr = _addr; \
6552 list->length = __len; \
6553 txreq->pkt_cnt++; \
6554 txreq->total_len += __len; \
6555 list++; \
6556 *pkt_list = list; \
6557 } while (0)
6558
6559 struct rtw_pkt_buf_list *list = *pkt_list;
6560 u8 nr_frags = skb_shinfo(skb)->nr_frags;
6561 s32 offset = *req_offset;
6562 u32 rem_sz = *req_sz;
6563 u32 cur_frag_total, cur_frag_rem;
6564 int i;
6565
6566 /* skb head frag */
6567 cur_frag_total = skb_headlen(skb);
6568
6569 if (cur_frag_total > offset) {
6570 cur_frag_rem = rtw_min(cur_frag_total - offset, rem_sz);
6571 PKT_LIST_APPEND(skb->data + offset, cur_frag_rem);
6572 rem_sz -= cur_frag_rem;
6573 offset = 0;
6574 } else {
6575 offset -= cur_frag_total;
6576 }
6577
6578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6579 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6580 u8 *addr;
6581
6582 addr = ((void *)page_address(skb_frag_page(frag))) + skb_frag_off(frag);
6583 cur_frag_total = skb_frag_size(frag);
6584
6585 if (offset < cur_frag_total) {
6586 cur_frag_rem = cur_frag_total - offset;
6587
6588 if (rem_sz < cur_frag_rem) {
6589 PKT_LIST_APPEND(addr + offset, rem_sz);
6590 RTW_WARN("%s:%d, size(rem_sz)=%d cur_frag_rem=%d txreq->total_length = %d\n",
6591 __func__, __LINE__, rem_sz, cur_frag_rem, txreq->total_len);
6592 rem_sz = 0;
6593 break;
6594 } else {
6595 PKT_LIST_APPEND(addr + offset, cur_frag_rem);
6596 RTW_DBG("%s:%d, size=%d txreq->total_length = %d\n",
6597 __func__, __LINE__, cur_frag_rem, txreq->total_len);
6598 rem_sz -= cur_frag_rem;
6599 }
6600
6601 offset = 0;
6602 } else {
6603 offset -= cur_frag_total;
6604 }
6605 }
6606
6607 *req_sz = rem_sz;
6608 *req_offset = offset;
6609
6610 #undef PKT_LIST_APPEND
6611 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
6612 #undef skb_frag_off
6613 #endif
6614 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
6615 #undef skb_frag_page
6616 #undef skb_frag_size
6617 #endif
6618 }
6619
skb_total_frag_nr(struct sk_buff * head_skb)6620 static int skb_total_frag_nr(struct sk_buff *head_skb)
6621 {
6622 struct sk_buff *skb;
6623 int nr;
6624
6625 nr = 1 + skb_shinfo(head_skb)->nr_frags;
6626
6627 skb_walk_frags(head_skb, skb)
6628 nr += 1 + skb_shinfo(skb)->nr_frags;
6629
6630 return nr;
6631 }
6632
fill_txreq_list_skb(_adapter * padapter,struct rtw_xmit_req * txreq,struct rtw_pkt_buf_list ** pkt_list,struct sk_buff * head_skb,u32 req_sz,s32 offset)6633 static void fill_txreq_list_skb(_adapter *padapter,
6634 struct rtw_xmit_req *txreq, struct rtw_pkt_buf_list **pkt_list,
6635 struct sk_buff *head_skb, u32 req_sz, s32 offset)
6636 {
6637 struct sk_buff *skb;
6638
6639 if (skb_total_frag_nr(head_skb) > NUM_PKT_LIST_PER_TXREQ - 2) {
6640 rtw_skb_linearize(head_skb);
6641 RTW_WARN("skb total frag nr over %d\n", NUM_PKT_LIST_PER_TXREQ - 2);
6642 }
6643
6644 _fill_txreq_list_skb(padapter, txreq, pkt_list, head_skb, &req_sz, &offset);
6645
6646 skb_walk_frags(head_skb, skb)
6647 _fill_txreq_list_skb(padapter, txreq, pkt_list, skb, &req_sz, &offset);
6648
6649 if (req_sz != 0)
6650 RTW_WARN("remain req_sz=%d should be zero\n", req_sz);
6651 }
6652
rtw_core_replace_skb(struct sk_buff ** pskb,u32 need_head,u32 need_tail)6653 s32 rtw_core_replace_skb(struct sk_buff **pskb, u32 need_head, u32 need_tail)
6654 {
6655 struct sk_buff *newskb;
6656 struct sk_buff *skb = *pskb;
6657
6658 newskb = rtw_skb_copy(skb);
6659
6660 if (newskb == NULL)
6661 return FAIL;
6662
6663 rtw_skb_free(skb);
6664 *pskb = newskb;
6665
6666 return SUCCESS;
6667 }
6668
6669 #ifdef CONFIG_BR_EXT
core_br_client_tx(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb)6670 s32 core_br_client_tx(_adapter *padapter, struct xmit_frame *pxframe, struct sk_buff **pskb)
6671 {
6672 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6673
6674 if (!adapter_use_wds(padapter) && check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
6675 void *br_port = NULL;
6676
6677 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
6678 br_port = padapter->pnetdev->br_port;
6679 #else
6680 rcu_read_lock();
6681 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
6682 rcu_read_unlock();
6683 #endif
6684
6685 if (br_port) {
6686 if (rtw_br_client_tx(padapter, pskb) == FAIL) {
6687 core_tx_free_xmitframe(padapter, pxframe);
6688 DBG_COUNTER(padapter->tx_logs.core_tx_err_brtx);
6689 return FAIL;
6690 }
6691 }
6692 }
6693 return SUCCESS;
6694 }
6695 #endif
6696
core_tx_update_pkt(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb)6697 s32 core_tx_update_pkt(_adapter *padapter, struct xmit_frame *pxframe, struct sk_buff **pskb)
6698 {
6699 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6700 struct sk_buff *skb_orig = *pskb;
6701
6702 PHLTX_LOG;
6703
6704 //rtw_phl_tx todo, BR EXT
6705 if (core_br_client_tx(padapter, pxframe, pskb) == FAIL)
6706 return FAIL;
6707
6708 return SUCCESS;
6709 }
6710
core_tx_update_xmitframe(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb,struct sta_info * psta,u8 type)6711 s32 core_tx_update_xmitframe(_adapter *padapter,
6712 struct xmit_frame *pxframe, struct sk_buff **pskb, struct sta_info *psta, u8 type)
6713 {
6714 pxframe->xftype = type;
6715 pxframe->pkt = *pskb;
6716
6717 PHLTX_LOG;
6718
6719 #if 1
6720 if (pxframe->xftype == RTW_TX_OS) {
6721 if (update_attrib(padapter, *pskb, &pxframe->attrib) != _SUCCESS)
6722 return FAIL;
6723 }
6724 #else
6725 pxframe->pkt = *pskb;
6726
6727 if (update_xmitframe_from_hdr(padapter, pxframe) == FAIL)
6728 return FAIL;
6729
6730 PHLTX_LOG;
6731
6732 if (update_xmitframe_qos(padapter, pxframe) == FAIL)
6733 return FAIL;
6734
6735 PHLTX_LOG;
6736
6737 if (update_xmitframe_security(padapter, pxframe) == FAIL)
6738 return FAIL;
6739
6740 PHLTX_LOG;
6741
6742 //if (update_xmitframe_hw(padapter, pxframe) == FAIL)
6743 //return FAIL;
6744
6745 PHLTX_LOG;
6746
6747 if (pxframe->xftype == RTW_TX_OS) {
6748 if (pxframe->attrib.bswenc
6749 && (skb_shared(*pskb) || skb_cloned(*pskb))
6750 && (rtw_core_replace_skb(pskb, RTW_MAX_WL_HEAD, RTW_MAX_WL_TAIL) == FAIL))
6751 return FAIL;
6752 }
6753 #endif
6754
6755 PHLTX_LOG;
6756
6757 return SUCCESS;
6758 }
6759
6760
6761
get_wl_frag_paras(_adapter * padapter,struct xmit_frame * pxframe,u32 * frag_perfr,u32 * wl_frags)6762 void get_wl_frag_paras(_adapter *padapter, struct xmit_frame *pxframe,
6763 u32 *frag_perfr, u32 *wl_frags)
6764 {
6765 u32 wl_head, wl_tail, payload_totalsz, payload_fragsz, wl_frag_num;
6766 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6767
6768 wl_head = wl_tail = payload_totalsz = 0;
6769
6770 wl_head += pxframe->attrib.hdrlen;
6771 wl_tail += RTW_SZ_FCS;
6772 if (pxframe->attrib.encrypt) {
6773 wl_head += pxframe->attrib.iv_len;
6774 wl_tail += pxframe->attrib.icv_len;
6775 }
6776
6777 payload_fragsz = pxmitpriv->frag_len - wl_head - wl_tail;
6778
6779 payload_totalsz = pxframe->attrib.pktlen;
6780 if (pxframe->xftype == RTW_TX_OS)
6781 payload_totalsz += RTW_SZ_LLC;
6782 if (pxframe->attrib.encrypt == _TKIP_)
6783 payload_totalsz += RTW_TKIP_MIC_LEN;
6784
6785 if (pxframe->attrib.amsdu)
6786 wl_frag_num = 1;
6787 else if (payload_fragsz < payload_totalsz)
6788 wl_frag_num = RTW_DIV_ROUND_UP(payload_totalsz, payload_fragsz);
6789 else
6790 wl_frag_num = 1;
6791
6792 pxframe->attrib.frag_datalen = *frag_perfr = payload_fragsz;
6793 pxframe->attrib.nr_frags = *wl_frags = wl_frag_num;
6794 #ifdef CONFIG_CORE_TXSC
6795 pxframe->attrib.frag_len_txsc = payload_fragsz - (payload_totalsz - pxframe->attrib.pktlen);
6796 #endif
6797 }
6798
fill_txreq_pkt_perfrag_txos(struct _ADAPTER * padapter,struct xmit_frame * pxframe,u32 frag_perfr,u32 wl_frags)6799 u8 fill_txreq_pkt_perfrag_txos(struct _ADAPTER *padapter,
6800 struct xmit_frame *pxframe,
6801 u32 frag_perfr, u32 wl_frags)
6802 {
6803 struct rtw_xmit_req *xf_txreq = NULL;
6804 struct rtw_pkt_buf_list *pkt_list = NULL;
6805 struct sk_buff *skb = pxframe->pkt;
6806 u8 *txreq, *head, *tail, *list;
6807 u32 head_sz, tail_sz, wlan_tail;
6808 u32 payload_sz, payload_offset;
6809 u8 idx;
6810 u8 *wlhdr[RTW_MAX_FRAG_NUM] = {NULL};
6811 u8 *wltail[RTW_MAX_FRAG_NUM] = {NULL};
6812 /* TXREQ_QMGT */
6813 struct xmit_txreq_buf *txreq_buf = NULL;
6814
6815 PHLTX_ENTER;
6816
6817 //printk("pxframe->attrib.pkt_hdrlen=%d pxframe->attrib.hdrlen=%d pxframe->attrib.iv_len=%d \n", pxframe->attrib.pkt_hdrlen, pxframe->attrib.hdrlen, pxframe->attrib.iv_len);
6818
6819 pxframe->txreq_cnt = wl_frags;
6820
6821 head_sz = pxframe->attrib.hdrlen + (pxframe->attrib.amsdu ? 0 : RTW_SZ_LLC);
6822 tail_sz = 0;
6823 if (pxframe->attrib.encrypt) {
6824 head_sz += pxframe->attrib.iv_len;
6825 if (pxframe->attrib.encrypt == _TKIP_)
6826 tail_sz += RTW_TKIP_MIC_LEN;
6827 if (pxframe->attrib.bswenc)
6828 tail_sz += pxframe->attrib.icv_len;
6829 }
6830
6831 PHLTX_LOG;
6832
6833 //get_txreq_resources(padapter, pxframe, &txreq, &list, &head, &tail);
6834 /* TXREQ_QMGT */
6835 txreq_buf = (struct xmit_txreq_buf *)get_txreq_buffer(padapter, &txreq, &list, &head, &tail);
6836 if (txreq_buf == NULL) {
6837 //do this in core_tx_init_xmitframe
6838 //pxframe->phl_txreq = NULL;
6839 //pxframe->ptxreq_buf = NULL;
6840
6841 //free in rtw_core_tx
6842 //pxframe->pkt = NULL;//for not recycle in abort_core_tx
6843 goto fail;
6844 }
6845 #ifdef USE_PREV_WLHDR_BUF /* CONFIG_CORE_TXSC */
6846 txreq_buf->macid = 0xff;
6847 txreq_buf->txsc_id = 0xff;
6848 #endif
6849 pxframe->ptxreq_buf = txreq_buf;
6850
6851 PHLTX_LOG;
6852
6853 #if 0
6854 payload = skb->data+pxframe->attrib.pkt_hdrlen;
6855 printk("num_txreq=%d, hw_head=%d, hw_tail=%d, list=0x%p\n",
6856 num_txreq, hw_head, hw_tail, (void *)list);
6857
6858 printk("p:txreq=0x%p, head=0x%p, tail=0x%p, payload=0x%p\n",
6859 (void *)txreq, (void *)head, (void *)tail, (void *)payload);
6860 #endif
6861
6862 pxframe->phl_txreq = xf_txreq = (struct rtw_xmit_req *)txreq;
6863 pkt_list = (struct rtw_pkt_buf_list *)list;
6864 #ifdef CONFIG_CORE_TXSC
6865 xf_txreq->shortcut_id = 0;
6866 xf_txreq->treq_type = RTW_PHL_TREQ_TYPE_NORMAL;
6867 #endif
6868
6869 PHLTX_LOG;
6870
6871 /* move to first payload position */
6872 payload_offset = pxframe->attrib.pkt_hdrlen;
6873
6874 for (idx = 0; idx < wl_frags; idx++) {
6875 /* for no memset */
6876 xf_txreq->pkt_cnt = 0;
6877 xf_txreq->total_len = 0;
6878 xf_txreq->pkt_list = (u8 *)pkt_list;
6879
6880 /* fill head into txreq */
6881 wlhdr[idx] = head;
6882 pkt_list->vir_addr = head;
6883 pkt_list->length = head_sz;
6884 if (idx) {
6885 /* deduct LLC size if not first fragment */
6886 pkt_list->length -= RTW_SZ_LLC;
6887 }
6888 head += pkt_list->length;
6889 xf_txreq->pkt_cnt++;
6890 xf_txreq->total_len += pkt_list->length;
6891 pkt_list++;
6892
6893 /* fill payload into txreq */
6894 if (idx == (wl_frags - 1)) {
6895 /* last payload size */
6896 payload_sz = skb->len - payload_offset;
6897 } else if (idx == 0) {
6898 /* first payload size should deduct LLC size */
6899 payload_sz = frag_perfr - RTW_SZ_LLC;
6900 } else {
6901 payload_sz = frag_perfr;
6902 }
6903 /* xf_txreq would be update and pkt_list++ inside */
6904 fill_txreq_list_skb(padapter, xf_txreq, &pkt_list, skb,
6905 payload_sz, payload_offset);
6906 payload_offset += payload_sz;
6907
6908 /* fill tail(if alloc) into txreq */
6909 if (tail_sz) {
6910 wlan_tail = tail_sz;
6911 if ((pxframe->attrib.encrypt == _TKIP_) && (idx != (wl_frags - 1))) {
6912 /* deduct MIC size if not last fragment with TKIP */
6913 wlan_tail -= RTW_TKIP_MIC_LEN;
6914 }
6915 if (wlan_tail) {
6916 wltail[idx] = tail;
6917 pkt_list->vir_addr = tail;
6918 pkt_list->length = wlan_tail;
6919 tail += pkt_list->length;
6920 xf_txreq->pkt_cnt++;
6921 xf_txreq->total_len += pkt_list->length;
6922 pkt_list++;
6923 }
6924 }
6925
6926 if (xf_txreq->pkt_cnt > NUM_PKT_LIST_PER_TXREQ)
6927 RTW_WARN("xf_txreq->pkt_cnt=%d > NUM_PKT_LIST_PER_TXREQ\n",
6928 xf_txreq->pkt_cnt);
6929
6930 xf_txreq++;
6931 }
6932
6933 _rtw_memcpy(pxframe->wlhdr, wlhdr, sizeof(wlhdr));
6934 _rtw_memcpy(pxframe->wltail, wltail, sizeof(wltail));
6935 PHLTX_EXIT;
6936 return _SUCCESS;
6937
6938 fail:
6939 return _FAIL;
6940 }
6941
6942 /* TXREQ_QMGT, MGT_TXREQ_QMGT */
fill_txreq_pkt_mgmt(_adapter * padapter,struct xmit_frame * pxframe)6943 u8 fill_txreq_pkt_mgmt(_adapter *padapter, struct xmit_frame *pxframe)
6944 {
6945 struct rtw_xmit_req *xf_txreq = NULL;
6946 struct rtw_pkt_buf_list *pkt_list = NULL;
6947 //u8 *txreq, *head, *tail, *list, *mgmt = NULL;
6948
6949 PHLTX_ENTER;
6950
6951 if (!pxframe->phl_txreq)
6952 goto fail;
6953
6954 xf_txreq = pxframe->phl_txreq;
6955 pkt_list = (struct rtw_pkt_buf_list *)xf_txreq->pkt_list;
6956
6957 //get_txreq_resources(padapter, pxframe,
6958 // (u8 **)&xf_txreq, (u8 **)&pkt_list, NULL, NULL);
6959 //printk("p:txreq=0x%p, pkt_list=0x%p \n", (void *)xf_txreq, (void *)pkt_list);
6960
6961 //for no memset
6962 xf_txreq->pkt_cnt = 0;
6963 xf_txreq->total_len = 0;
6964 #ifdef CONFIG_CORE_TXSC
6965 xf_txreq->shortcut_id = 0;
6966 #endif
6967
6968 pkt_list->vir_addr = pxframe->buf_addr;
6969 pkt_list->length = pxframe->attrib.pktlen;
6970
6971 xf_txreq->pkt_cnt = 1;
6972 //xf_txreq->pkt_list = (u8 *)pkt_list;
6973 xf_txreq->treq_type = RTW_PHL_TREQ_TYPE_NORMAL;
6974
6975 pxframe->txreq_cnt = 1;
6976 //pxframe->phl_txreq = xf_txreq;
6977
6978 xf_txreq->total_len = xf_txreq->total_len + pxframe->attrib.pktlen;
6979 //RTW_INFO("%s,%d, xf_txreq->total_length = %d\n", __func__, __LINE__, xf_txreq->total_len);
6980
6981 #ifdef RTW_PHL_TEST_FPGA
6982 {
6983 struct rtw_ieee80211_hdr *p = (struct rtw_ieee80211_hdr *)pxframe->buf_addr;
6984
6985 test_seq++;
6986 test_seq = test_seq%0xFFF;
6987 SetSeqNum(p, test_seq);
6988 }
6989 #endif
6990
6991 exit:
6992 return _SUCCESS;
6993
6994 fail:
6995 return _FAIL;
6996 }
6997
merge_txreq_to_one_piece(struct _ADAPTER * a,struct xmit_frame * xf)6998 static u8 merge_txreq_to_one_piece(struct _ADAPTER *a,
6999 struct xmit_frame *xf)
7000 {
7001 struct rtw_xmit_req *txreq = NULL;
7002 struct rtw_pkt_buf_list *pkt_list = NULL;
7003 int i, j;
7004 u32 total_sz;
7005 u8 *buf, *ptr;
7006
7007
7008 for (i = 0; i < xf->txreq_cnt; i++) {
7009 txreq = &xf->phl_txreq[i];
7010 total_sz = txreq->total_len;
7011 buf = rtw_zmalloc(total_sz);
7012 if (!buf)
7013 return _FAIL;
7014 xf->buf_need_free |= BIT(i);
7015
7016 ptr = buf;
7017 for (j = 0; j < txreq->pkt_cnt; j++) {
7018 pkt_list = &((struct rtw_pkt_buf_list *)txreq->pkt_list)[j];
7019 _rtw_memcpy(ptr, pkt_list->vir_addr, pkt_list->length);
7020 ptr += pkt_list->length;
7021 }
7022 txreq->pkt_cnt = 1;
7023 pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
7024 pkt_list->vir_addr = buf;
7025 pkt_list->length = total_sz;
7026 }
7027
7028 return _SUCCESS;
7029 }
7030
7031 #ifdef RTW_PHL_TEST_FPGA
7032 #define F_TX_MACID (0)
7033 #define F_TX_TID (1)
7034 #define F_TX_TYPE RTW_PHL_PKT_TYPE_DATA
7035 #define F_TX_RATE (0x8F) //HRATE_MCS15
7036 #define F_TX_BW (1)
7037 #define F_TX_DMACH (0)
7038 #endif
7039
get_security_cam_id(struct _ADAPTER * padapter,struct xmit_frame * pxframe,u8 keyid)7040 static u8 get_security_cam_id(struct _ADAPTER *padapter, struct xmit_frame *pxframe, u8 keyid)
7041 {
7042 struct dvobj_priv *d;
7043 void *phl;
7044 u8 sec_cam_id = 0;
7045 struct sta_priv *pstapriv = &padapter->stapriv;
7046 struct sta_info *sta;
7047 sint bmcast = IS_MCAST(pxframe->attrib.ra);
7048 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
7049 WLAN_BSSID_EX *pbss_network = (WLAN_BSSID_EX *)&pmlmepriv->cur_network.network;
7050
7051 if (bmcast == _TRUE) {
7052 /* WEP: use unicast key type to match halmac rule (see: setkey_hdl) */
7053 if (pxframe->attrib.encrypt == _WEP40_ || pxframe->attrib.encrypt == _WEP104_)
7054 bmcast = _FALSE;
7055
7056 sta = rtw_get_stainfo(pstapriv, pbss_network->MacAddress);
7057 } else {
7058 sta = rtw_get_stainfo(pstapriv, pxframe->attrib.ra);
7059 }
7060
7061 if (!sta) {
7062 RTW_ERR("%s sta not found\n", __func__);
7063 rtw_warn_on(1);
7064 return sec_cam_id;
7065 }
7066
7067 d = adapter_to_dvobj(padapter);
7068 phl = GET_PHL_INFO(d);
7069
7070 if (keyid >= 4)
7071 sec_cam_id = rtw_phl_get_sec_cam_idx(phl, sta->phl_sta, keyid,
7072 RTW_SEC_KEY_BIP);
7073 else
7074 sec_cam_id = rtw_phl_get_sec_cam_idx(phl, sta->phl_sta, keyid,
7075 bmcast ? RTW_SEC_KEY_MULTICAST : RTW_SEC_KEY_UNICAST);
7076
7077 return sec_cam_id;
7078 }
7079
7080 /* Todo: HE rate mapping not ready */
7081 static const enum rtw_data_rate mrate2phlrate_tbl[] = {
7082 [MGN_1M] = RTW_DATA_RATE_CCK1,
7083 [MGN_2M] = RTW_DATA_RATE_CCK2,
7084 [MGN_5_5M] = RTW_DATA_RATE_CCK5_5,
7085 [MGN_11M] = RTW_DATA_RATE_CCK11,
7086 [MGN_6M] = RTW_DATA_RATE_OFDM6,
7087 [MGN_9M] = RTW_DATA_RATE_OFDM9,
7088 [MGN_12M] = RTW_DATA_RATE_OFDM12,
7089 [MGN_18M] = RTW_DATA_RATE_OFDM18,
7090 [MGN_24M] = RTW_DATA_RATE_OFDM24,
7091 [MGN_36M] = RTW_DATA_RATE_OFDM36,
7092 [MGN_48M] = RTW_DATA_RATE_OFDM48,
7093 [MGN_54M] = RTW_DATA_RATE_OFDM54,
7094 [MGN_MCS0] = RTW_DATA_RATE_MCS0,
7095 [MGN_MCS1] = RTW_DATA_RATE_MCS1,
7096 [MGN_MCS2] = RTW_DATA_RATE_MCS2,
7097 [MGN_MCS3] = RTW_DATA_RATE_MCS3,
7098 [MGN_MCS4] = RTW_DATA_RATE_MCS4,
7099 [MGN_MCS5] = RTW_DATA_RATE_MCS5,
7100 [MGN_MCS6] = RTW_DATA_RATE_MCS6,
7101 [MGN_MCS7] = RTW_DATA_RATE_MCS7,
7102 [MGN_MCS8] = RTW_DATA_RATE_MCS8,
7103 [MGN_MCS9] = RTW_DATA_RATE_MCS9,
7104 [MGN_MCS10] = RTW_DATA_RATE_MCS10,
7105 [MGN_MCS11] = RTW_DATA_RATE_MCS11,
7106 [MGN_MCS12] = RTW_DATA_RATE_MCS12,
7107 [MGN_MCS13] = RTW_DATA_RATE_MCS13,
7108 [MGN_MCS14] = RTW_DATA_RATE_MCS14,
7109 [MGN_MCS15] = RTW_DATA_RATE_MCS15,
7110 [MGN_MCS16] = RTW_DATA_RATE_MCS16,
7111 [MGN_MCS17] = RTW_DATA_RATE_MCS17,
7112 [MGN_MCS18] = RTW_DATA_RATE_MCS18,
7113 [MGN_MCS19] = RTW_DATA_RATE_MCS19,
7114 [MGN_MCS20] = RTW_DATA_RATE_MCS20,
7115 [MGN_MCS21] = RTW_DATA_RATE_MCS21,
7116 [MGN_MCS22] = RTW_DATA_RATE_MCS22,
7117 [MGN_MCS23] = RTW_DATA_RATE_MCS23,
7118 [MGN_MCS24] = RTW_DATA_RATE_MCS24,
7119 [MGN_MCS25] = RTW_DATA_RATE_MCS25,
7120 [MGN_MCS26] = RTW_DATA_RATE_MCS26,
7121 [MGN_MCS27] = RTW_DATA_RATE_MCS27,
7122 [MGN_MCS28] = RTW_DATA_RATE_MCS28,
7123 [MGN_MCS29] = RTW_DATA_RATE_MCS29,
7124 [MGN_MCS30] = RTW_DATA_RATE_MCS30,
7125 [MGN_MCS31] = RTW_DATA_RATE_MCS31,
7126 [MGN_VHT1SS_MCS0] = RTW_DATA_RATE_VHT_NSS1_MCS0,
7127 [MGN_VHT1SS_MCS1] = RTW_DATA_RATE_VHT_NSS1_MCS1,
7128 [MGN_VHT1SS_MCS2] = RTW_DATA_RATE_VHT_NSS1_MCS2,
7129 [MGN_VHT1SS_MCS3] = RTW_DATA_RATE_VHT_NSS1_MCS3,
7130 [MGN_VHT1SS_MCS4] = RTW_DATA_RATE_VHT_NSS1_MCS4,
7131 [MGN_VHT1SS_MCS5] = RTW_DATA_RATE_VHT_NSS1_MCS5,
7132 [MGN_VHT1SS_MCS6] = RTW_DATA_RATE_VHT_NSS1_MCS6,
7133 [MGN_VHT1SS_MCS7] = RTW_DATA_RATE_VHT_NSS1_MCS7,
7134 [MGN_VHT1SS_MCS8] = RTW_DATA_RATE_VHT_NSS1_MCS8,
7135 [MGN_VHT1SS_MCS9] = RTW_DATA_RATE_VHT_NSS1_MCS9,
7136 [MGN_VHT2SS_MCS0] = RTW_DATA_RATE_VHT_NSS2_MCS0,
7137 [MGN_VHT2SS_MCS1] = RTW_DATA_RATE_VHT_NSS2_MCS1,
7138 [MGN_VHT2SS_MCS2] = RTW_DATA_RATE_VHT_NSS2_MCS2,
7139 [MGN_VHT2SS_MCS3] = RTW_DATA_RATE_VHT_NSS2_MCS3,
7140 [MGN_VHT2SS_MCS4] = RTW_DATA_RATE_VHT_NSS2_MCS4,
7141 [MGN_VHT2SS_MCS5] = RTW_DATA_RATE_VHT_NSS2_MCS5,
7142 [MGN_VHT2SS_MCS6] = RTW_DATA_RATE_VHT_NSS2_MCS6,
7143 [MGN_VHT2SS_MCS7] = RTW_DATA_RATE_VHT_NSS2_MCS7,
7144 [MGN_VHT2SS_MCS8] = RTW_DATA_RATE_VHT_NSS2_MCS8,
7145 [MGN_VHT2SS_MCS9] = RTW_DATA_RATE_VHT_NSS2_MCS9,
7146 [MGN_VHT3SS_MCS0] = RTW_DATA_RATE_VHT_NSS3_MCS0,
7147 [MGN_VHT3SS_MCS1] = RTW_DATA_RATE_VHT_NSS3_MCS1,
7148 [MGN_VHT3SS_MCS2] = RTW_DATA_RATE_VHT_NSS3_MCS2,
7149 [MGN_VHT3SS_MCS3] = RTW_DATA_RATE_VHT_NSS3_MCS3,
7150 [MGN_VHT3SS_MCS4] = RTW_DATA_RATE_VHT_NSS3_MCS4,
7151 [MGN_VHT3SS_MCS5] = RTW_DATA_RATE_VHT_NSS3_MCS5,
7152 [MGN_VHT3SS_MCS6] = RTW_DATA_RATE_VHT_NSS3_MCS6,
7153 [MGN_VHT3SS_MCS7] = RTW_DATA_RATE_VHT_NSS3_MCS7,
7154 [MGN_VHT3SS_MCS8] = RTW_DATA_RATE_VHT_NSS3_MCS8,
7155 [MGN_VHT3SS_MCS9] = RTW_DATA_RATE_VHT_NSS3_MCS9,
7156 [MGN_VHT4SS_MCS0] = RTW_DATA_RATE_VHT_NSS4_MCS0,
7157 [MGN_VHT4SS_MCS1] = RTW_DATA_RATE_VHT_NSS4_MCS1,
7158 [MGN_VHT4SS_MCS2] = RTW_DATA_RATE_VHT_NSS4_MCS2,
7159 [MGN_VHT4SS_MCS3] = RTW_DATA_RATE_VHT_NSS4_MCS3,
7160 [MGN_VHT4SS_MCS4] = RTW_DATA_RATE_VHT_NSS4_MCS4,
7161 [MGN_VHT4SS_MCS5] = RTW_DATA_RATE_VHT_NSS4_MCS5,
7162 [MGN_VHT4SS_MCS6] = RTW_DATA_RATE_VHT_NSS4_MCS6,
7163 [MGN_VHT4SS_MCS7] = RTW_DATA_RATE_VHT_NSS4_MCS7,
7164 [MGN_VHT4SS_MCS8] = RTW_DATA_RATE_VHT_NSS4_MCS8,
7165 [MGN_VHT4SS_MCS9] = RTW_DATA_RATE_VHT_NSS4_MCS9,
7166 [MGN_HE1SS_MCS0] = RTW_DATA_RATE_HE_NSS1_MCS0,
7167 [MGN_HE1SS_MCS1] = RTW_DATA_RATE_HE_NSS1_MCS1,
7168 [MGN_HE1SS_MCS2] = RTW_DATA_RATE_HE_NSS1_MCS2,
7169 [MGN_HE1SS_MCS3] = RTW_DATA_RATE_HE_NSS1_MCS3,
7170 [MGN_HE1SS_MCS4] = RTW_DATA_RATE_HE_NSS1_MCS4,
7171 [MGN_HE1SS_MCS5] = RTW_DATA_RATE_HE_NSS1_MCS5,
7172 [MGN_HE1SS_MCS6] = RTW_DATA_RATE_HE_NSS1_MCS6,
7173 [MGN_HE1SS_MCS7] = RTW_DATA_RATE_HE_NSS1_MCS7,
7174 [MGN_HE1SS_MCS8] = RTW_DATA_RATE_HE_NSS1_MCS8,
7175 [MGN_HE1SS_MCS9] = RTW_DATA_RATE_HE_NSS1_MCS9,
7176 [MGN_HE1SS_MCS10] = RTW_DATA_RATE_HE_NSS1_MCS10,
7177 [MGN_HE1SS_MCS11] = RTW_DATA_RATE_HE_NSS1_MCS11,
7178 [MGN_HE2SS_MCS0] = RTW_DATA_RATE_HE_NSS2_MCS0,
7179 [MGN_HE2SS_MCS1] = RTW_DATA_RATE_HE_NSS2_MCS1,
7180 [MGN_HE2SS_MCS2] = RTW_DATA_RATE_HE_NSS2_MCS2,
7181 [MGN_HE2SS_MCS3] = RTW_DATA_RATE_HE_NSS2_MCS3,
7182 [MGN_HE2SS_MCS4] = RTW_DATA_RATE_HE_NSS2_MCS4,
7183 [MGN_HE2SS_MCS5] = RTW_DATA_RATE_HE_NSS2_MCS5,
7184 [MGN_HE2SS_MCS6] = RTW_DATA_RATE_HE_NSS2_MCS6,
7185 [MGN_HE2SS_MCS7] = RTW_DATA_RATE_HE_NSS2_MCS7,
7186 [MGN_HE2SS_MCS8] = RTW_DATA_RATE_HE_NSS2_MCS8,
7187 [MGN_HE2SS_MCS9] = RTW_DATA_RATE_HE_NSS2_MCS9,
7188 [MGN_HE2SS_MCS10] = RTW_DATA_RATE_HE_NSS2_MCS10,
7189 [MGN_HE2SS_MCS11] = RTW_DATA_RATE_HE_NSS2_MCS11,
7190 [MGN_HE3SS_MCS0] = RTW_DATA_RATE_HE_NSS3_MCS0,
7191 [MGN_HE3SS_MCS1] = RTW_DATA_RATE_HE_NSS3_MCS1,
7192 [MGN_HE3SS_MCS2] = RTW_DATA_RATE_HE_NSS3_MCS2,
7193 [MGN_HE3SS_MCS3] = RTW_DATA_RATE_HE_NSS3_MCS3,
7194 [MGN_HE3SS_MCS4] = RTW_DATA_RATE_HE_NSS3_MCS4,
7195 [MGN_HE3SS_MCS5] = RTW_DATA_RATE_HE_NSS3_MCS5,
7196 [MGN_HE3SS_MCS6] = RTW_DATA_RATE_HE_NSS3_MCS6,
7197 [MGN_HE3SS_MCS7] = RTW_DATA_RATE_HE_NSS3_MCS7,
7198 [MGN_HE3SS_MCS8] = RTW_DATA_RATE_HE_NSS3_MCS8,
7199 [MGN_HE3SS_MCS9] = RTW_DATA_RATE_HE_NSS3_MCS9,
7200 [MGN_HE3SS_MCS10] = RTW_DATA_RATE_HE_NSS3_MCS10,
7201 [MGN_HE3SS_MCS11] = RTW_DATA_RATE_HE_NSS3_MCS11,
7202 [MGN_HE4SS_MCS0] = RTW_DATA_RATE_HE_NSS4_MCS0,
7203 [MGN_HE4SS_MCS1] = RTW_DATA_RATE_HE_NSS4_MCS1,
7204 [MGN_HE4SS_MCS2] = RTW_DATA_RATE_HE_NSS4_MCS2,
7205 [MGN_HE4SS_MCS3] = RTW_DATA_RATE_HE_NSS4_MCS3,
7206 [MGN_HE4SS_MCS4] = RTW_DATA_RATE_HE_NSS4_MCS4,
7207 [MGN_HE4SS_MCS5] = RTW_DATA_RATE_HE_NSS4_MCS5,
7208 [MGN_HE4SS_MCS6] = RTW_DATA_RATE_HE_NSS4_MCS6,
7209 [MGN_HE4SS_MCS7] = RTW_DATA_RATE_HE_NSS4_MCS7,
7210 [MGN_HE4SS_MCS8] = RTW_DATA_RATE_HE_NSS4_MCS8,
7211 [MGN_HE4SS_MCS9] = RTW_DATA_RATE_HE_NSS4_MCS9,
7212 [MGN_HE4SS_MCS10] = RTW_DATA_RATE_HE_NSS4_MCS10,
7213 [MGN_HE4SS_MCS11] = RTW_DATA_RATE_HE_NSS4_MCS11,
7214 };
7215
7216 /*
7217 * _rate_mrate2phl() - convert data rate from mrate to PHL(MAC)
7218 * @sta: struct sta_info *
7219 * @mrate: date rate of mrate type, enum MGN_RATE
7220 *
7221 * Convert data rate from MGN_RATE definition to PHL's definition.
7222 *
7223 * Return PHL's data rate definition "enum rtw_data_rate".
7224 * 0x0~0xB: CCK 1M ~ OFDM 54M
7225 * 0x80~0x9F: HT MCS0~MCS31
7226 * 0x100~0x109: VHT 1SS MCS0~MCS9
7227 * 0x110~0x119: VHT 2SS MCS0~MCS9
7228 * 0x120~0x129: VHT 3SS MCS0~MCS9
7229 * 0x130~0x139: VHT 4SS MCS0~MCS9
7230 * 0x180~0x18B: HE 1SS MCS0~MCS11
7231 * 0x190~0x19B: HE 2SS MCS0~MCS11
7232 * 0x1A0~0x1AB: HE 3SS MCS0~MCS11
7233 * 0x1B0~0x1BB: HE 4SS MCS0~MCS11
7234 */
_rate_mrate2phl(enum MGN_RATE mrate)7235 static enum rtw_data_rate _rate_mrate2phl(enum MGN_RATE mrate)
7236 {
7237 enum rtw_data_rate phl = RTW_DATA_RATE_CCK1;
7238
7239
7240 if (mrate < ARRAY_SIZE(mrate2phlrate_tbl))
7241 phl = mrate2phlrate_tbl[mrate];
7242
7243 if ((mrate != MGN_1M) && (phl == RTW_DATA_RATE_CCK1))
7244 RTW_WARN("%s: Invalid rate 0x%x\n", __func__, mrate);
7245
7246 return phl;
7247 }
7248
7249 /*
7250 * _rate_drv2phl() - convert data rate from drive to PHL(MAC)
7251 * @sta: struct sta_info *
7252 * @rate: date rate of driver
7253 * 0x0~0xB: CCK 1M ~ OFDM 54M
7254 * >0xB: HT/VHT/HE use the same bits field to represent each
7255 * data rate, so these bits's real definition depended on
7256 * sta's wireless mode.
7257 *
7258 * Convert driver's data rate definition to PHL's definition.
7259 *
7260 * Return PHL's data rate definition "enum rtw_data_rate".
7261 */
_rate_drv2phl(struct sta_info * sta,u8 rate)7262 static enum rtw_data_rate _rate_drv2phl(struct sta_info *sta, u8 rate)
7263 {
7264 enum rtw_data_rate phl = RTW_DATA_RATE_CCK1;
7265 u8 ht_support = 0, vht_support = 0, he_support = 0;
7266
7267
7268 if (rate < 12) {
7269 /* B/G mode, CCK/OFDM rate */
7270 return (enum rtw_data_rate)rate;
7271 }
7272
7273 #ifdef CONFIG_80211N_HT
7274 if (sta->htpriv.ht_option == _TRUE)
7275 ht_support = 1;
7276 #ifdef CONFIG_80211AC_VHT
7277 if (sta->vhtpriv.vht_option == _TRUE)
7278 vht_support = 1;
7279 #ifdef CONFIG_80211AX_HE
7280 if (sta->hepriv.he_option == _TRUE)
7281 he_support = 1;
7282 #endif /* CONFIG_80211AX_HE */
7283 #endif /* CONFIG_80211AC_VHT */
7284 #endif /* CONFIG_80211N_HT */
7285
7286 rate -= 12;
7287 if (he_support) {
7288 if (rate < 12)
7289 phl = RTW_DATA_RATE_HE_NSS1_MCS0 + rate;
7290 else if (rate < 24)
7291 phl = RTW_DATA_RATE_HE_NSS2_MCS0 + (rate - 12);
7292 else if (rate < 36)
7293 phl = RTW_DATA_RATE_HE_NSS3_MCS0 + (rate - 24);
7294 else
7295 phl = RTW_DATA_RATE_HE_NSS4_MCS0 + (rate - 36);
7296 } else if (vht_support) {
7297 if (rate < 10)
7298 phl = RTW_DATA_RATE_VHT_NSS1_MCS0 + rate;
7299 else if (rate < 20)
7300 phl = RTW_DATA_RATE_VHT_NSS2_MCS0 + (rate - 10);
7301 else if (rate < 30)
7302 phl = RTW_DATA_RATE_VHT_NSS3_MCS0 + (rate - 20);
7303 else
7304 phl = RTW_DATA_RATE_VHT_NSS4_MCS0 + (rate - 30);
7305 } else if (ht_support) {
7306 phl = RTW_DATA_RATE_MCS0 + rate;
7307 }
7308
7309 return phl;
7310 }
7311
fill_txreq_mdata(_adapter * padapter,struct xmit_frame * pxframe)7312 void fill_txreq_mdata(_adapter *padapter, struct xmit_frame *pxframe)
7313 {
7314 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
7315 struct sta_info *psta = pxframe->attrib.psta;
7316 struct rtw_phl_stainfo_t *phl_sta = NULL;
7317 struct rtw_t_meta_data *mdata = &(txreq->mdata);
7318 #ifdef BMC_ON_HIQ
7319 struct sta_priv *pstapriv = &padapter->stapriv;
7320 #endif
7321 u32 idx = 0;
7322 u8 htc_option = _FALSE;
7323 #ifdef CONFIG_XMIT_ACK
7324 struct xmit_priv *pxmitpriv = &(GET_PRIMARY_ADAPTER(padapter))->xmitpriv;
7325 #endif
7326
7327 PHLTX_LOG;
7328
7329 if (pxframe->attrib.order)
7330 htc_option = _TRUE;
7331
7332 /* packet identify */
7333 if (pxframe->xftype == RTW_TX_DRV_MGMT)
7334 mdata->type = RTW_PHL_PKT_TYPE_MGNT;
7335 else
7336 mdata->type = RTW_PHL_PKT_TYPE_DATA;
7337
7338 mdata->macid = pxframe->attrib.mac_id;
7339
7340 /* enable wd info by default */
7341 mdata->wdinfo_en = 1;
7342
7343 /* packet content */
7344 mdata->hdr_len = pxframe->attrib.hdrlen;
7345 mdata->hw_seq_mode = 0;
7346 mdata->sw_seq = pxframe->attrib.seqnum;
7347 mdata->hw_sec_iv = 0;
7348 mdata->nav_use_hdr = 0;
7349
7350 /* packet security */
7351 if (pxframe->attrib.encrypt == _NO_PRIVACY_ || pxframe->attrib.bswenc == _TRUE) {
7352 mdata->sec_hw_enc = _FALSE;
7353 mdata->sec_type = RTW_ENC_NONE;
7354 } else {
7355 mdata->sec_hw_enc = _TRUE;
7356 mdata->sec_type = rtw_sec_algo_drv2phl(pxframe->attrib.encrypt);
7357 mdata->sec_cam_idx = get_security_cam_id(padapter, pxframe, pxframe->attrib.key_idx);
7358 }
7359 /* Currently dump secrity settings for dbg */
7360 RTW_DBG("sec_type= %d sec_hw_enc= %d sec_cam_idx= %d \n",
7361 mdata->sec_type, mdata->sec_hw_enc, mdata->sec_cam_idx);
7362
7363 /* packet capability */
7364 if (pxframe->attrib.ampdu_en == _TRUE) {
7365 mdata->ampdu_en = 1;
7366 mdata->bk = 0;
7367 mdata->ampdu_density = pxframe->attrib.ampdu_spacing;
7368 mdata->max_agg_num = 0x3F; /* temporally fix to 64 */
7369 } else {
7370 mdata->ampdu_en = 0;
7371 mdata->bk = 1;
7372 }
7373 mdata->dis_data_rate_fb = 0;
7374 mdata->dis_rts_rate_fb = 0;
7375 mdata->data_tx_cnt_lmt_en = 0;
7376 mdata->data_tx_cnt_lmt = 0;
7377
7378 /* Set DATA_RTY_LOWEST_RATE: 2.4G to CCK1M & 5G to OFDM6M */
7379 if (rtw_get_oper_band(padapter) == BAND_ON_24G)
7380 mdata->data_rty_lowest_rate = RTW_DATA_RATE_CCK1;
7381 else if (rtw_get_oper_band(padapter) == BAND_ON_5G)
7382 mdata->data_rty_lowest_rate = RTW_DATA_RATE_OFDM6;
7383 else
7384 RTW_WARN("%s: mdata->data_rty_lowest_rate is not set.\n", __func__);
7385
7386 mdata->life_time_sel = 0;
7387 mdata->rts_en = pxframe->attrib.rtsen;
7388 mdata->cts2self = pxframe->attrib.cts2self;
7389 mdata->hw_rts_en = pxframe->attrib.hw_rts_en;
7390 mdata->rts_cca_mode = 0;
7391
7392 mdata->f_bw = pxframe->attrib.bwmode;
7393 /* Todo: GI and LTF not ready for HE */
7394 mdata->f_gi_ltf = pxframe->attrib.sgi;
7395
7396 mdata->mc = IS_MCAST(pxframe->attrib.ra) ? 1 : 0;
7397 mdata->bc = MacAddr_isBcst(pxframe->attrib.ra) ? 1 : 0;
7398
7399 #ifdef CONFIG_80211AX_HE
7400 if (psta && htc_option)
7401 mdata->a_ctrl_bsr = 1;
7402 #endif
7403 mdata->raw = 0;
7404
7405 #ifdef BMC_ON_HIQ
7406 if ((pxframe->xftype != RTW_TX_DRV_MGMT)
7407 && (mdata->mc || mdata->bc)
7408 && (rtw_tim_map_anyone_be_set(padapter, pstapriv->sta_dz_bitmap))) {
7409 mdata->tid = RTW_PHL_RING_CAT_HIQ; /* HIQ */
7410 mdata->mbssid = 0; /* ToDo: Consider MBSSID */
7411 mdata->hal_port = padapter->phl_role->hw_port;
7412 } else
7413 #endif
7414 {
7415 mdata->tid = pxframe->attrib.priority;
7416 }
7417
7418 #ifdef CONFIG_CORE_TXSC
7419 mdata->ampdu_density = 0;
7420 mdata->userate_sel = 0;
7421 #endif
7422
7423 if (pxframe->xftype == RTW_TX_DRV_MGMT) {
7424 mdata->userate_sel = 1;
7425 mdata->f_rate = _rate_mrate2phl(pxframe->attrib.rate);
7426 } else {
7427 /* low rate for EAPOL/ARP/DHCP */
7428 if ((pxframe->attrib.ether_type == 0x888e) ||
7429 (pxframe->attrib.ether_type == 0x0806) ||
7430 (pxframe->attrib.dhcp_pkt == 1)) {
7431
7432 mdata->userate_sel = 1;
7433 if (IS_CCK_RATE(padapter->mlmeextpriv.tx_rate))
7434 mdata->f_rate = RTW_DATA_RATE_CCK1;
7435 else
7436 mdata->f_rate = RTW_DATA_RATE_OFDM6;
7437 } else {
7438 /* fix rate for non specail packet */
7439 if (padapter->fix_rate != NO_FIX_RATE) {
7440 mdata->userate_sel = 1;
7441 mdata->f_rate = GET_FIX_RATE(padapter->fix_rate);
7442 mdata->f_gi_ltf = GET_FIX_RATE_SGI(padapter->fix_rate);
7443 if (!padapter->data_fb)
7444 mdata->dis_data_rate_fb = 1;
7445 } else {
7446 mdata->userate_sel = 0;
7447 }
7448
7449 if (padapter->fix_bw != NO_FIX_BW)
7450 mdata->f_bw = padapter->fix_bw;
7451 }
7452 }
7453 mdata->f_er = 0;
7454 mdata->f_dcm = 0;
7455 mdata->f_stbc = pxframe->attrib.stbc;
7456 mdata->f_ldpc = pxframe->attrib.ldpc;
7457
7458 mdata->band = 0;
7459 mdata->dma_ch = 0;
7460 mdata->spe_rpt = 0;
7461 mdata->sw_define = 0;
7462
7463 #ifdef CONFIG_XMIT_ACK
7464 if (pxframe->ack_report) {
7465 mdata->spe_rpt = 1;
7466 mdata->sw_define = pxmitpriv->seq_no;
7467 #ifdef RTW_WKARD_CCX_RPT_LIMIT_CTRL
7468 mdata->data_tx_cnt_lmt_en = 1;
7469 mdata->data_tx_cnt_lmt = 8;
7470 #endif
7471 }
7472 #endif
7473
7474 #ifdef CONFIG_CORE_TXSC
7475 mdata->pktlen = txreq->total_len;
7476 #endif
7477
7478 #ifdef RTW_PHL_TEST_FPGA
7479 mdata->type = F_TX_TYPE;
7480 mdata->macid = F_TX_MACID;
7481 mdata->tid = F_TX_TID;
7482 mdata->dma_ch = F_TX_DMACH;
7483 //mdata->band = cap->band;
7484 mdata->f_rate = F_TX_RATE;
7485 mdata->f_bw = F_TX_BW;
7486 mdata->f_gi_ltf = 0;
7487 mdata->f_stbc = 0;
7488 mdata->f_ldpc = 0;
7489
7490 mdata->userate_sel = 1;
7491 mdata->dis_data_rate_fb = 1;
7492 mdata->dis_rts_rate_fb = 1;
7493 #endif
7494
7495 #ifdef RTW_PHL_DBG_CMD
7496 if (pxframe->xftype != RTW_TX_DRV_MGMT) {
7497 if (padapter->txForce_enable) {
7498 if (padapter->txForce_rate != INV_TXFORCE_VAL)
7499 mdata->f_rate = padapter->txForce_rate;
7500 if (padapter->txForce_agg != INV_TXFORCE_VAL)
7501 mdata->ampdu_en = padapter->txForce_agg;
7502 if (padapter->txForce_aggnum != INV_TXFORCE_VAL)
7503 mdata->max_agg_num = padapter->txForce_aggnum;
7504 if (padapter->txForce_gi != INV_TXFORCE_VAL)
7505 mdata->f_gi_ltf = padapter->txForce_gi;
7506 }
7507 }
7508 #endif
7509
7510 #ifdef CONFIG_CORE_TXSC
7511 _print_txreq_mdata(mdata, __func__);
7512 #endif
7513
7514 if (pxframe->txreq_cnt > 1) {
7515 struct rtw_t_meta_data *mdata_tmp;
7516 txreq++;
7517 for (idx = 1; idx < pxframe->txreq_cnt; idx++) {
7518 #ifdef CONFIG_CORE_TXSC
7519 mdata->pktlen = txreq->total_len;
7520 #endif
7521 mdata_tmp = &(txreq->mdata);
7522 memcpy(mdata_tmp, mdata, sizeof(struct rtw_t_meta_data));
7523 txreq++;
7524 }
7525 }
7526
7527 }
7528
7529
fill_txreq_others(_adapter * padapter,struct xmit_frame * pxframe)7530 void fill_txreq_others(_adapter *padapter, struct xmit_frame *pxframe)
7531 {
7532 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
7533 u32 idx = 0;
7534
7535 PHLTX_ENTER;
7536
7537 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
7538 txreq->os_priv = pxframe;
7539 txreq++;
7540 }
7541 }
7542
core_wlan_fill_txreq_pre(_adapter * padapter,struct xmit_frame * pxframe)7543 u8 core_wlan_fill_txreq_pre(_adapter *padapter, struct xmit_frame *pxframe)
7544 {
7545 u32 frag_perfr, wl_frags = 0;
7546
7547 if (pxframe->xftype == RTW_TX_OS) {
7548 get_wl_frag_paras(padapter, pxframe, &frag_perfr, &wl_frags);
7549 if (fill_txreq_pkt_perfrag_txos(padapter, pxframe, frag_perfr, wl_frags) == _FAIL)
7550 return _FAIL;
7551 } else if (pxframe->xftype == RTW_TX_DRV_MGMT) {
7552 if (fill_txreq_pkt_mgmt(padapter, pxframe) == _FAIL)
7553 return _FAIL;
7554 }
7555
7556 return _SUCCESS;
7557 }
7558
core_migrate_to_coherent_buf(_adapter * padapter,struct xmit_frame * pxframe)7559 u8 core_migrate_to_coherent_buf(_adapter *padapter, struct xmit_frame *pxframe)
7560 {
7561 #if defined(CONFIG_PCI_HCI) && defined(CONFIG_DMA_TX_USE_COHERENT_MEM)
7562 PPCI_DATA pci_data = dvobj_to_pci(padapter->dvobj);
7563 struct pci_dev *pdev = pci_data->ppcidev;
7564 struct rtw_xmit_req *tx_req = NULL;
7565 struct rtw_pkt_buf_list *pkt_frag = NULL;
7566 char *tx_data, *ptr;
7567 dma_addr_t phy_addr;
7568 int i, j;
7569
7570
7571 tx_req = pxframe->phl_txreq;
7572
7573 for (i = 0; i < pxframe->txreq_cnt; i++) {
7574
7575 tx_data = pci_alloc_noncache_mem(pdev, &phy_addr, tx_req->total_len);
7576 if (!tx_data) {
7577 RTW_WARN("%s: pci_alloc_noncache_mem fail\n", __func__);
7578 return _FAIL;
7579 }
7580
7581 ptr = tx_data;
7582 pkt_frag = (struct rtw_pkt_buf_list *)tx_req->pkt_list;
7583
7584 for (j = 0; j < tx_req->pkt_cnt; j++) {
7585
7586 if (!pkt_frag) {
7587 pci_free_noncache_mem(pdev, tx_data,
7588 (dma_addr_t *)&phy_addr, tx_req->total_len);
7589 return _FAIL;
7590 }
7591
7592 if (pkt_frag->vir_addr) {
7593 _rtw_memcpy(ptr, pkt_frag->vir_addr, pkt_frag->length);
7594 ptr += pkt_frag->length;
7595 }
7596
7597 if (pxframe->buf_need_free & BIT(j)) {
7598 pxframe->buf_need_free &= ~BIT(j);
7599 rtw_mfree(pkt_frag->vir_addr, pkt_frag->length);
7600 }
7601 pkt_frag++;
7602 }
7603 pxframe->attrib.nr_frags = 1;
7604
7605 tx_req->pkt_cnt = 1;
7606 pkt_frag = (struct rtw_pkt_buf_list *)tx_req->pkt_list;
7607 pkt_frag->length = tx_req->total_len;
7608 pkt_frag->vir_addr = tx_data;
7609 pkt_frag->phy_addr_l = phy_addr & 0xFFFFFFFF;;
7610 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
7611 pkt_frag->phy_addr_h = (u32)(phy_addr >> 32);
7612 #else
7613 pkt_frag->phy_addr_h = 0x0;
7614 #endif
7615 tx_req->cache = DMA_ADDR;
7616 tx_req++;
7617 }
7618
7619 if (pxframe->pkt) {
7620 rtw_os_pkt_complete(padapter, pxframe->pkt);
7621 pxframe->pkt = NULL;
7622 }
7623
7624 #endif /* CONFIG_PCI_HCI */
7625 return _SUCCESS;
7626 }
7627
core_wlan_fill_txreq_post(_adapter * padapter,struct xmit_frame * pxframe)7628 void core_wlan_fill_txreq_post(_adapter *padapter, struct xmit_frame *pxframe)
7629 {
7630 fill_txreq_mdata(padapter, pxframe);
7631 fill_txreq_others(padapter, pxframe);
7632
7633 #ifdef CONFIG_PCI_HCI
7634 /*must be called after all pkt contents modified (cache sync)*/
7635 fill_txreq_phyaddr(padapter, pxframe);
7636 #endif
7637
7638 }
7639
core_wlan_fill_head(_adapter * padapter,struct xmit_frame * pxframe)7640 void core_wlan_fill_head(_adapter *padapter, struct xmit_frame *pxframe)
7641 {
7642 u32 idx = 0;
7643 if (pxframe->xftype == RTW_TX_OS) {
7644 for (idx = 0; idx < pxframe->attrib.nr_frags; idx++) {
7645 u8 *pwlanhdr = pxframe->wlhdr[idx];
7646
7647 if (!pwlanhdr) {
7648 PHLTX_ERR;
7649 continue;
7650 }
7651
7652 rtw_make_wlanhdr(padapter, pwlanhdr, &pxframe->attrib); //rtw_core_make_wlanhdr(padapter, pwlanhdr, pxframe);
7653
7654 if (idx == (pxframe->attrib.nr_frags - 1))
7655 ClearMFrag(pwlanhdr);
7656 else
7657 SetMFrag(pwlanhdr);
7658
7659 if (pxframe->attrib.iv_len) {
7660 update_attrib_sec_iv_info(padapter, &pxframe->attrib);
7661 _rtw_memcpy((pwlanhdr+pxframe->attrib.hdrlen), pxframe->attrib.iv, pxframe->attrib.iv_len);
7662 }
7663
7664 if (idx == 0 && !pxframe->attrib.amsdu) {
7665 /* Add LLC/SNAP to first fragment */
7666 rtw_put_snap(pwlanhdr+pxframe->attrib.hdrlen+pxframe->attrib.iv_len,
7667 pxframe->attrib.ether_type);
7668 }
7669
7670 #ifdef RTW_PHL_TEST_FPGA
7671 {
7672 struct rtw_ieee80211_hdr *p = (struct rtw_ieee80211_hdr *)pwlanhdr;
7673 unsigned short *fctrl;
7674 unsigned int pktlen = 0;
7675 u16 *qc;
7676
7677 test_seq++;
7678 test_seq = test_seq%0xFFF;
7679 SetSeqNum(p, test_seq);
7680 }
7681 #endif
7682
7683 }
7684 }
7685 }
7686
7687
core_wlan_fill_tail(_adapter * padapter,struct xmit_frame * pxframe)7688 void core_wlan_fill_tail(_adapter *padapter, struct xmit_frame *pxframe)
7689 {
7690 ;
7691
7692 }
7693
7694
core_wlan_fill_tkip_mic(_adapter * padapter,struct xmit_frame * pxframe)7695 u8 core_wlan_fill_tkip_mic(_adapter *padapter, struct xmit_frame *pxframe)
7696 {
7697 u8 *llc = NULL;
7698 u8 *payload = NULL;
7699 u8 mic[8] = {0x0};
7700 struct mic_data micdata;
7701 struct pkt_attrib *pattrib = &pxframe->attrib;
7702 struct security_priv *psecuritypriv = &padapter->securitypriv;
7703 s8 bmcst = IS_MCAST(pattrib->ra);
7704 u8 priority[4] = {0x0};
7705 int i = 0;
7706 struct rtw_xmit_req *xf_txreq = pxframe->phl_txreq;
7707 struct rtw_pkt_buf_list *pkt_list = NULL;
7708
7709 if (pattrib->encrypt == _TKIP_) {
7710 u8 null_key[16] = {0x0};
7711
7712 /* set TKIP MIC key */
7713 if (bmcst) {
7714 if (_rtw_memcmp(
7715 psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey,
7716 null_key, 16) == _TRUE)
7717 return _FAIL;
7718
7719 rtw_secmicsetkey(&micdata,
7720 psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
7721 } else {
7722 if (_rtw_memcmp(
7723 &pattrib->dot11tkiptxmickey.skey[0],
7724 null_key, 16) == _TRUE)
7725 return _FAIL;
7726
7727 rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
7728 }
7729
7730 /* set DA, SA */
7731 rtw_secmicappend(&micdata, &pattrib->dst[0], 6);
7732 rtw_secmicappend(&micdata, &pattrib->src[0], 6);
7733
7734 if (pattrib->qos_en)
7735 priority[0] = pattrib->priority;
7736
7737 /* set priority */
7738 rtw_secmicappend(&micdata, &priority[0], 4);
7739
7740 /* set LLC; TBD: should check if LLC is existed or not */
7741 llc = pxframe->wlhdr[0] + pxframe->attrib.hdrlen + pxframe->attrib.iv_len;
7742 rtw_secmicappend(&micdata, llc, SNAP_SIZE + sizeof(u16));
7743
7744 /* set MSDU payload */
7745 pkt_list = (struct rtw_pkt_buf_list *) xf_txreq->pkt_list;
7746
7747 /*ignore hdr move to payload*/
7748 pkt_list ++;
7749 /*for loop ignore tail*/
7750 for (i = 1; i < xf_txreq->pkt_cnt - 1; i++) {
7751 rtw_secmicappend(&micdata, pkt_list->vir_addr, pkt_list->length);
7752 pkt_list ++;
7753 }
7754
7755 /* calculate MIC */
7756 rtw_secgetmic(&micdata, &mic[0]);
7757
7758 /* append MIC to the last tail */
7759 _rtw_memcpy(pxframe->wltail[pxframe->attrib.nr_frags-1], &(mic[0]), 8);
7760 }
7761
7762 return _SUCCESS;
7763 }
7764
7765
core_wlan_sw_encrypt(_adapter * padapter,struct xmit_frame * pxframe)7766 static void core_wlan_sw_encrypt(_adapter *padapter, struct xmit_frame *pxframe)
7767 {
7768 struct pkt_attrib *attrib;
7769 u8 res;
7770
7771
7772 attrib = &pxframe->attrib;
7773 if (!attrib->encrypt)
7774 return;
7775 if (!attrib->bswenc)
7776 return;
7777
7778 /* convert txreq to one piece */
7779 res = merge_txreq_to_one_piece(padapter, pxframe);
7780 if (res != _SUCCESS) {
7781 RTW_ERR("%s: fail alloc buffer for sw enc!\n", __func__);
7782 return;
7783 }
7784 xmitframe_swencrypt(padapter, pxframe);
7785 }
7786
7787 #ifdef CONFIG_TX_AMSDU_SW_MODE
core_tx_amsdu_timeout(_adapter * padapter,struct pkt_attrib * pattrib)7788 static bool core_tx_amsdu_timeout(_adapter *padapter, struct pkt_attrib *pattrib)
7789 {
7790 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7791 u8 amsdu_timeout;
7792
7793 amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
7794
7795 if (amsdu_timeout == RTW_AMSDU_TIMER_UNSET) {
7796 rtw_amsdu_set_timer_status(padapter,
7797 pattrib->priority, RTW_AMSDU_TIMER_SETTING);
7798 rtw_amsdu_set_timer(padapter, pattrib->priority);
7799 pxmitpriv->amsdu_debug_set_timer++;
7800 return false;
7801 } else if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
7802 return false;
7803 } else if (amsdu_timeout == RTW_AMSDU_TIMER_TIMEOUT) {
7804 rtw_amsdu_set_timer_status(padapter,
7805 pattrib->priority, RTW_AMSDU_TIMER_UNSET);
7806 pxmitpriv->amsdu_debug_timeout++;
7807 return true;
7808 }
7809
7810 return false;
7811 }
7812
7813 /* 'pxframes[]' is array to store xframe to do AMSDU whose size is 'max_xf_nr',
7814 * and return value is real used size. If return size is 1, then set 'amsdu' to
7815 * decide normal frame or AMSDU one.
7816 */
core_tx_amsdu_dequeue(_adapter * padapter,struct xmit_frame * pxframes[],int max_xf_nr,bool * amsdu)7817 static int core_tx_amsdu_dequeue(_adapter *padapter, struct xmit_frame *pxframes[],
7818 int max_xf_nr, bool *amsdu)
7819 {
7820 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
7821 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7822 struct registry_priv *pregpriv = &padapter->registrypriv;
7823 struct xmit_frame *pxframe;
7824 int tx_amsdu = rtw_min(padapter->tx_amsdu, max_xf_nr);
7825 int tx_amsdu_rate = padapter->tx_amsdu_rate;
7826 int current_tx_rate = dvobj->traffic_stat.cur_tx_tp;
7827 int num_frame;
7828 int nr_xf;
7829
7830 if (tx_amsdu == 0)
7831 goto dequeue_normal_pkt;
7832
7833 if (!MLME_IS_STA(padapter))
7834 goto dequeue_normal_pkt;
7835
7836 if (tx_amsdu >= 2 && tx_amsdu_rate && current_tx_rate < tx_amsdu_rate)
7837 goto dequeue_normal_pkt;
7838
7839 /*No amsdu when wifi_spec on*/
7840 if (pregpriv->wifi_spec == 1)
7841 goto dequeue_normal_pkt;
7842
7843 pxframe = rtw_get_xframe(pxmitpriv, &num_frame);
7844 if (num_frame == 0 || !pxframe)
7845 return 0;
7846
7847 if (num_frame < tx_amsdu) { /* Not enough MSDU for specific A-MSDU */
7848 if (!core_tx_amsdu_timeout(padapter, &pxframe->attrib))
7849 return 0; /* Not timeout yet */
7850 }
7851
7852 for (nr_xf = 0; nr_xf < tx_amsdu; nr_xf++) {
7853 pxframe = rtw_get_xframe(pxmitpriv, &num_frame);
7854
7855 if (num_frame == 0 || !pxframe)
7856 break;
7857
7858 if (!check_amsdu(pxframe))
7859 break;
7860
7861 /* TODO: check if size is over peer's capability */
7862
7863 pxframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
7864 pxmitpriv->hwxmit_entry);
7865
7866 pxframes[nr_xf] = pxframe;
7867 }
7868
7869 if (nr_xf == 0) {
7870 if (num_frame > 0)
7871 goto dequeue_normal_pkt;
7872 RTW_WARN("%s: nr_xf=0, num_frame=%d\n", __func__, num_frame);
7873 return 0;
7874 }
7875
7876 if (nr_xf < AMSDU_DEBUG_MAX_COUNT)
7877 pxmitpriv->amsdu_debug_coalesce[nr_xf-1]++;
7878 else
7879 pxmitpriv->amsdu_debug_coalesce[AMSDU_DEBUG_MAX_COUNT-1]++;
7880
7881 *amsdu = (nr_xf == 1 && tx_amsdu >= 2) ? false : true;
7882
7883 return nr_xf;
7884
7885 dequeue_normal_pkt:
7886 pxframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
7887 pxmitpriv->hwxmit_entry);
7888 if (!pxframe)
7889 return 0;
7890
7891 pxframes[0] = pxframe;
7892 *amsdu = false;
7893
7894 return 1;
7895 }
7896
core_tx_amsdu_dump(_adapter * padapter,struct xmit_frame * pxframes[],int xf_nr,bool amsdu)7897 static bool core_tx_amsdu_dump(_adapter *padapter, struct xmit_frame *pxframes[],
7898 int xf_nr, bool amsdu)
7899 {
7900 struct xmit_frame *head_xframe;
7901 struct pkt_attrib *head_attrib;
7902 u32 pktlen;
7903
7904 /* prepare head xmitframe */
7905 head_xframe = pxframes[0];
7906 head_attrib = &head_xframe->attrib;
7907
7908 if (xf_nr == 1 && !amsdu)
7909 goto dump_pkt;
7910
7911 rtw_coalesce_tx_amsdu(padapter, pxframes, xf_nr, amsdu, &pktlen);
7912
7913 /* update proper attribute */
7914 head_attrib->amsdu = 1;
7915 head_attrib->pkt_hdrlen = 0;
7916 head_attrib->pktlen = pktlen;
7917
7918 dump_pkt:
7919 if (core_tx_prepare_phl(padapter, head_xframe) == FAIL)
7920 goto abort_core_tx;
7921
7922 if (core_tx_call_phl(padapter, head_xframe, NULL) == FAIL)
7923 goto abort_core_tx;
7924
7925 return true;
7926
7927 abort_core_tx:
7928 core_tx_free_xmitframe(padapter, head_xframe);
7929
7930 return true;
7931 }
7932
core_tx_amsdu_tasklet(_adapter * padapter)7933 void core_tx_amsdu_tasklet(_adapter *padapter)
7934 {
7935 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7936 struct xmit_frame *pxframes[5];
7937 int xf_nr;
7938 bool amsdu;
7939
7940 pxmitpriv->amsdu_debug_tasklet++;
7941
7942 while (1) {
7943 xf_nr = core_tx_amsdu_dequeue(padapter, pxframes, ARRAY_SIZE(pxframes),
7944 &amsdu);
7945 if (xf_nr == 0)
7946 break;
7947
7948 pxmitpriv->amsdu_debug_dequeue++;
7949
7950 core_tx_amsdu_dump(padapter, pxframes, xf_nr, amsdu);
7951 }
7952 }
7953
core_tx_amsdu_enqueue(_adapter * padapter,struct xmit_frame * pxframe)7954 static s32 core_tx_amsdu_enqueue(_adapter *padapter, struct xmit_frame *pxframe)
7955 {
7956 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7957 struct pkt_attrib *pattrib = &pxframe->attrib;
7958 int tx_amsdu = padapter->tx_amsdu;
7959 u8 amsdu_timeout;
7960 s32 res;
7961
7962 if (MLME_IS_STA(padapter) && check_amsdu_tx_support(padapter)) {
7963 if (IS_AMSDU_AMPDU_VALID(pattrib))
7964 goto enqueue;
7965 }
7966
7967 return FAIL;
7968
7969 enqueue:
7970 _rtw_spinlock_bh(&pxmitpriv->lock);
7971
7972 res = rtw_xmitframe_enqueue(padapter, pxframe);
7973 if (res == _FAIL) {
7974 _rtw_spinunlock_bh(&pxmitpriv->lock);
7975 return FAIL;
7976 }
7977
7978 pxmitpriv->amsdu_debug_enqueue++;
7979
7980 if (tx_amsdu >= 2) {
7981 amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
7982 if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
7983 rtw_amsdu_cancel_timer(padapter, pattrib->priority);
7984 rtw_amsdu_set_timer_status(padapter, pattrib->priority,
7985 RTW_AMSDU_TIMER_UNSET);
7986 }
7987 }
7988
7989 _rtw_spinunlock_bh(&pxmitpriv->lock);
7990
7991 rtw_tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
7992
7993 return _TRUE;
7994 }
7995 #endif /* CONFIG_TX_AMSDU_SW_MODE */
7996
core_tx_prepare_phl(_adapter * padapter,struct xmit_frame * pxframe)7997 s32 core_tx_prepare_phl(_adapter *padapter, struct xmit_frame *pxframe)
7998 {
7999 if (core_wlan_fill_txreq_pre(padapter, pxframe) == _FAIL)
8000 return FAIL;
8001
8002 if (pxframe->xftype == RTW_TX_OS) {
8003 core_wlan_fill_head(padapter, pxframe);
8004 if (core_wlan_fill_tkip_mic(padapter, pxframe) == _FAIL) {
8005 RTW_ERR("core_wlan_fill_tkip_mic FAIL\n");
8006 return FAIL;
8007 }
8008 }
8009 core_wlan_fill_tail(padapter, pxframe);
8010 core_wlan_sw_encrypt(padapter, pxframe);
8011
8012 if (core_migrate_to_coherent_buf(padapter, pxframe) == _FAIL)
8013 return FAIL;
8014
8015 core_wlan_fill_txreq_post(padapter, pxframe);
8016
8017 return SUCCESS;
8018 }
8019
8020
core_tx_call_phl(_adapter * padapter,struct xmit_frame * pxframe,void * txsc_pkt)8021 s32 core_tx_call_phl(_adapter *padapter, struct xmit_frame *pxframe, void *txsc_pkt)
8022 {
8023 struct rtw_xmit_req *txreq = NULL;
8024 void *phl = padapter->dvobj->phl;
8025 u32 idx = 0;
8026 u8 txreq_cnt = 0;
8027 #ifdef CONFIG_CORE_TXSC
8028 struct rtw_xmit_req *ptxsc_txreq = NULL;
8029 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8030 #endif
8031
8032 #ifdef CONFIG_CORE_TXSC
8033 struct txsc_pkt_entry *ptxsc_pkt = (struct txsc_pkt_entry *)txsc_pkt;
8034 if (ptxsc_pkt)
8035 ptxsc_txreq = ptxsc_pkt->ptxreq;
8036
8037 txreq = pxframe ? pxframe->phl_txreq : ptxsc_txreq;
8038 txreq_cnt = pxframe ? pxframe->txreq_cnt : 1;
8039 #else
8040 txreq = pxframe->phl_txreq;
8041 txreq_cnt = pxframe->txreq_cnt;
8042 #endif
8043
8044 for (idx = 0; idx < txreq_cnt; idx++) {
8045
8046 #ifdef RTW_PHL_TEST_FPGA
8047 core_add_record(padapter, REC_TX_PHL, txreq);
8048 #endif
8049
8050 if (rtw_phl_add_tx_req(phl, txreq) != RTW_PHL_STATUS_SUCCESS)
8051 return FAIL;
8052
8053 rtw_phl_tx_req_notify(phl);
8054
8055
8056 txreq++;
8057 }
8058
8059 /* should count tx status after add tx req is success */
8060 #ifdef CONFIG_CORE_TXSC
8061 if (ptxsc_txreq != NULL)
8062 rtw_count_tx_stats_tx_req(padapter, ptxsc_txreq, ptxsc_pkt->psta);
8063 else
8064 #endif
8065 rtw_count_tx_stats(padapter, pxframe, pxframe->attrib.pktlen);
8066
8067 return SUCCESS;
8068 }
8069
core_tx_per_packet(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb,struct sta_info * psta)8070 s32 core_tx_per_packet(_adapter *padapter, struct xmit_frame *pxframe,
8071 struct sk_buff **pskb, struct sta_info *psta)
8072 {
8073 #if defined(CONFIG_AP_MODE) || defined(CONFIG_CORE_TXSC)
8074 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8075 #endif
8076
8077 if (core_tx_update_xmitframe(padapter, pxframe, pskb, psta, RTW_TX_OS) == FAIL)
8078 goto abort_tx_per_packet;
8079
8080 #ifdef CONFIG_80211N_HT
8081 if ((pxframe->attrib.ether_type != 0x0806)
8082 && (pxframe->attrib.ether_type != 0x888e)
8083 && (pxframe->attrib.dhcp_pkt != 1))
8084 rtw_issue_addbareq_cmd(padapter, pxframe, _TRUE);
8085 #endif /* CONFIG_80211N_HT */
8086
8087 #ifdef CONFIG_TX_AMSDU_SW_MODE
8088 if (core_tx_amsdu_enqueue(padapter, pxframe) == _TRUE)
8089 return SUCCESS; /* queued to do AMSDU */
8090 #endif
8091
8092 if (core_tx_prepare_phl(padapter, pxframe) == FAIL)
8093 goto abort_tx_per_packet;
8094
8095 #ifdef CONFIG_AP_MODE
8096 _rtw_spinlock_bh(&pxmitpriv->lock);
8097 if (xmitframe_enqueue_for_sleeping_sta(padapter, pxframe) == _TRUE) {
8098 _rtw_spinunlock_bh(&pxmitpriv->lock);
8099 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue);
8100 return SUCCESS;
8101 }
8102 _rtw_spinunlock_bh(&pxmitpriv->lock);
8103 #endif
8104
8105 #if !defined(CONFIG_CORE_TXSC) || defined(CONFIG_RTW_DATA_BMC_TO_UC)
8106 if (core_tx_call_phl(padapter, pxframe, NULL) == SUCCESS)
8107 #endif
8108 return SUCCESS;
8109
8110
8111 abort_tx_per_packet:
8112 if (pxframe == NULL) {
8113 rtw_os_pkt_complete(padapter, *pskb);
8114 } else {
8115 if (pxframe->pkt == NULL)
8116 rtw_os_pkt_complete(padapter, *pskb);
8117 core_tx_free_xmitframe(padapter, pxframe);
8118 }
8119
8120 return FAIL;
8121 }
8122
rtw_core_tx(_adapter * padapter,struct sk_buff ** pskb,struct sta_info * psta,u16 os_qid)8123 s32 rtw_core_tx(_adapter *padapter, struct sk_buff **pskb, struct sta_info *psta, u16 os_qid)
8124 {
8125 struct xmit_frame *pxframe = NULL;
8126 #if defined(CONFIG_AP_MODE) || defined(CONFIG_CORE_TXSC)
8127 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8128 #endif
8129 s32 res = 0;
8130 #ifdef CONFIG_CORE_TXSC
8131 struct txsc_pkt_entry txsc_pkt;
8132 #endif
8133
8134 #ifdef CONFIG_CORE_TXSC
8135 if (txsc_get_sc_cached_entry(padapter, *pskb, &txsc_pkt) == _SUCCESS)
8136 goto core_txsc;
8137 #endif
8138
8139 if (core_tx_alloc_xmitframe(padapter, &pxframe, os_qid) == FAIL)
8140 goto abort_core_tx;
8141
8142 if (core_tx_update_pkt(padapter, pxframe, pskb) == FAIL)
8143 goto abort_core_tx;
8144
8145 #if defined(CONFIG_AP_MODE)
8146 if (MLME_STATE(padapter) & WIFI_AP_STATE) {
8147 _list f_list;
8148
8149 res = rtw_ap_addr_resolve(padapter, os_qid, pxframe, *pskb, &f_list);
8150 if (res == _FAIL)
8151 goto abort_core_tx;
8152
8153 #if defined(CONFIG_RTW_WDS) || CONFIG_RTW_DATA_BMC_TO_UC
8154 if (!rtw_is_list_empty(&f_list)) {
8155 _list *list = get_next(&f_list);
8156 struct xmit_frame *fframe;
8157
8158 while ((rtw_end_of_queue_search(&f_list, list)) == _FALSE) {
8159 fframe = LIST_CONTAINOR(list, struct xmit_frame, list);
8160 list = get_next(list);
8161 rtw_list_delete(&fframe->list);
8162
8163 if (res == RTW_ORI_NO_NEED && rtw_is_list_empty(&f_list)) {
8164 fframe->pkt = pxframe->pkt; /*last frame */
8165 pxframe->pkt = NULL;
8166 } else {
8167 fframe->pkt = rtw_skb_copy(*pskb);
8168 }
8169
8170 if (!fframe->pkt) {
8171 if (res == RTW_ORI_NO_NEED && IS_MCAST(pxframe->attrib.dst))
8172 res = _SUCCESS;
8173
8174 core_tx_free_xmitframe(padapter, fframe);
8175 continue;
8176 }
8177
8178 core_tx_per_packet(padapter, fframe, &fframe->pkt, NULL);
8179 }
8180 }
8181 #endif
8182
8183 if (res == RTW_ORI_NO_NEED) {
8184 core_tx_free_xmitframe(padapter, pxframe);
8185 return SUCCESS;
8186 }
8187 }
8188 #endif /* defined(CONFIG_AP_MODE) */
8189 #ifdef CONFIG_LAYER2_ROAMING
8190 if ((padapter->mlmepriv.roam_network) && ((*pskb)->protocol != htons(0x888e))) { /* eapol never enqueue.*/
8191 pxframe->pkt = *pskb;
8192 rtw_list_delete(&pxframe->list);
8193 _rtw_spinlock_bh(&pxmitpriv->rpkt_queue.lock);
8194 rtw_list_insert_tail(&(pxframe->list), get_list_head(&(pxmitpriv->rpkt_queue)));
8195 _rtw_spinunlock_bh(&pxmitpriv->rpkt_queue.lock);
8196 return SUCCESS;
8197 }
8198 #endif
8199
8200 res = core_tx_per_packet(padapter, pxframe, pskb, psta);
8201 if (res == FAIL)
8202 return FAIL;
8203
8204 #ifdef CONFIG_CORE_TXSC
8205 txsc_add_sc_cache_entry(padapter, pxframe, &txsc_pkt);
8206
8207 core_txsc:
8208
8209 if (txsc_apply_sc_cached_entry(padapter, &txsc_pkt) == _FAIL)
8210 goto abort_core_tx;
8211
8212 if (core_tx_call_phl(padapter, pxframe, &txsc_pkt) == FAIL)
8213 goto abort_core_tx;
8214 #endif
8215
8216 return SUCCESS;
8217
8218 abort_core_tx:
8219 if (pxframe == NULL) {
8220 #ifdef CONFIG_CORE_TXSC
8221 if (txsc_pkt.ptxreq)
8222 txsc_free_txreq(padapter, txsc_pkt.ptxreq);
8223 else
8224 #endif
8225 rtw_os_pkt_complete(padapter, *pskb);
8226 } else {
8227 if (pxframe->pkt == NULL)
8228 rtw_os_pkt_complete(padapter, *pskb);
8229
8230 core_tx_free_xmitframe(padapter, pxframe);
8231 }
8232
8233 return FAIL;
8234 }
8235
8236 enum rtw_phl_status
rtw_core_tx_recycle(void * drv_priv,struct rtw_xmit_req * txreq)8237 rtw_core_tx_recycle(void *drv_priv, struct rtw_xmit_req *txreq)
8238 {
8239 _adapter *padapter = NULL;
8240 struct xmit_frame *pxframe = NULL;
8241 #ifdef CONFIG_CORE_TXSC
8242 struct xmit_txreq_buf *ptxreq_buf = NULL;
8243 #endif
8244
8245 if (txreq->os_priv == NULL) {
8246 RTW_ERR("NULL txreq!\n");
8247 return RTW_PHL_STATUS_FAILURE;
8248 }
8249
8250 #ifdef CONFIG_CORE_TXSC
8251 if (txreq->treq_type == RTW_PHL_TREQ_TYPE_CORE_TXSC) {
8252 ptxreq_buf = (struct xmit_txreq_buf *)txreq->os_priv;
8253 padapter = ptxreq_buf->adapter;
8254 #ifdef RTW_PHL_DBG_CMD
8255 core_add_record(padapter, REC_TX_PHL_RCC, txreq);
8256 #endif
8257 txsc_free_txreq(padapter, txreq);
8258 return RTW_PHL_STATUS_SUCCESS;
8259 }
8260 #endif /* CONFIG_CORE_TXSC */
8261
8262 pxframe = (struct xmit_frame *)txreq->os_priv;
8263 if (pxframe == NULL) {
8264 RTW_ERR("%s: NULL xmitframe !!\n", __func__);
8265 rtw_warn_on(1);
8266 return RTW_PHL_STATUS_FAILURE;
8267 }
8268
8269 padapter = pxframe->padapter;
8270
8271 #ifdef RTW_PHL_DBG_CMD
8272 core_add_record(padapter, REC_TX_PHL_RCC, txreq);
8273 #endif
8274
8275 #ifdef CONFIG_PCI_HCI
8276 core_recycle_txreq_phyaddr(padapter, txreq);
8277 #endif
8278 core_tx_free_xmitframe(padapter, pxframe);
8279
8280 return RTW_PHL_STATUS_SUCCESS;
8281 }
8282 #endif
8283
8284
8285 #ifdef CONFIG_TDLS
xmitframe_enqueue_for_tdls_sleeping_sta(_adapter * padapter,struct xmit_frame * pxmitframe)8286 sint xmitframe_enqueue_for_tdls_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe)
8287 {
8288 sint ret = _FALSE;
8289
8290 struct sta_info *ptdls_sta = NULL;
8291 struct sta_priv *pstapriv = &padapter->stapriv;
8292 struct pkt_attrib *pattrib = &pxmitframe->attrib;
8293 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
8294 int i;
8295
8296 ptdls_sta = rtw_get_stainfo(pstapriv, pattrib->dst);
8297 if (ptdls_sta == NULL)
8298 return ret;
8299 else if (ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) {
8300
8301 if (pattrib->triggered == 1) {
8302 ret = _TRUE;
8303 return ret;
8304 }
8305
8306 _rtw_spinlock_bh(&ptdls_sta->sleep_q.lock);
8307
8308 if (ptdls_sta->state & WIFI_SLEEP_STATE) {
8309 rtw_list_delete(&pxmitframe->list);
8310
8311 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8312
8313 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptdls_sta->sleep_q));
8314
8315 ptdls_sta->sleepq_len++;
8316 ptdls_sta->sleepq_ac_len++;
8317
8318 /* indicate 4-AC queue bit in TDLS peer traffic indication */
8319 switch (pattrib->priority) {
8320 case 1:
8321 case 2:
8322 ptdls_sta->uapsd_bk |= BIT(1);
8323 break;
8324 case 4:
8325 case 5:
8326 ptdls_sta->uapsd_vi |= BIT(1);
8327 break;
8328 case 6:
8329 case 7:
8330 ptdls_sta->uapsd_vo |= BIT(1);
8331 break;
8332 case 0:
8333 case 3:
8334 default:
8335 ptdls_sta->uapsd_be |= BIT(1);
8336 break;
8337 }
8338
8339 /* Transmit TDLS PTI via AP */
8340 if (ptdls_sta->sleepq_len == 1)
8341 rtw_tdls_cmd(padapter, ptdls_sta->phl_sta->mac_addr, TDLS_ISSUE_PTI);
8342
8343 ret = _TRUE;
8344 }
8345
8346 _rtw_spinunlock_bh(&ptdls_sta->sleep_q.lock);
8347 }
8348
8349 return ret;
8350
8351 }
8352 #endif /* CONFIG_TDLS */
8353
8354 #define RTW_HIQ_FILTER_ALLOW_ALL 0
8355 #define RTW_HIQ_FILTER_ALLOW_SPECIAL 1
8356 #define RTW_HIQ_FILTER_DENY_ALL 2
8357
xmitframe_hiq_filter(struct xmit_frame * xmitframe)8358 inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
8359 {
8360 bool allow = _FALSE;
8361 _adapter *adapter = xmitframe->padapter;
8362 struct registry_priv *registry = &adapter->registrypriv;
8363
8364 if (adapter->registrypriv.wifi_spec == 1)
8365 allow = _TRUE;
8366 else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
8367
8368 struct pkt_attrib *attrib = &xmitframe->attrib;
8369
8370 if (attrib->ether_type == 0x0806
8371 || attrib->ether_type == 0x888e
8372 #ifdef CONFIG_WAPI_SUPPORT
8373 || attrib->ether_type == 0x88B4
8374 #endif
8375 || attrib->dhcp_pkt
8376 ) {
8377 if (0)
8378 RTW_INFO(FUNC_ADPT_FMT" ether_type:0x%04x%s\n", FUNC_ADPT_ARG(xmitframe->padapter)
8379 , attrib->ether_type, attrib->dhcp_pkt ? " DHCP" : "");
8380 allow = _TRUE;
8381 }
8382 } else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_ALL)
8383 allow = _TRUE;
8384 else if (registry->hiq_filter == RTW_HIQ_FILTER_DENY_ALL)
8385 allow = _FALSE;
8386 else
8387 rtw_warn_on(1);
8388
8389 return allow;
8390 }
8391
8392 #if defined(CONFIG_AP_MODE) || defined(CONFIG_TDLS)
8393
xmitframe_enqueue_for_sleeping_sta(_adapter * padapter,struct xmit_frame * pxmitframe)8394 sint xmitframe_enqueue_for_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe)
8395 {
8396 sint ret = _FALSE;
8397 struct sta_info *psta = NULL;
8398 struct sta_priv *pstapriv = &padapter->stapriv;
8399 struct pkt_attrib *pattrib = &pxmitframe->attrib;
8400 sint bmcst = IS_MCAST(pattrib->ra);
8401 bool update_tim = _FALSE;
8402 #ifdef CONFIG_TDLS
8403
8404 if (padapter->tdlsinfo.link_established == _TRUE)
8405 ret = xmitframe_enqueue_for_tdls_sleeping_sta(padapter, pxmitframe);
8406 #endif /* CONFIG_TDLS */
8407
8408 if (!MLME_IS_AP(padapter) && !MLME_IS_MESH(padapter)) {
8409 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
8410 return ret;
8411 }
8412 /*
8413 if(pattrib->psta)
8414 {
8415 psta = pattrib->psta;
8416 }
8417 else
8418 {
8419 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
8420 psta=rtw_get_stainfo(pstapriv, pattrib->ra);
8421 }
8422 */
8423 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
8424 if (pattrib->psta != psta) {
8425 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
8426 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
8427 return _FALSE;
8428 }
8429
8430 if (psta == NULL) {
8431 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_nosta);
8432 RTW_INFO("%s, psta==NUL\n", __func__);
8433 return _FALSE;
8434 }
8435
8436 if (!(psta->state & WIFI_ASOC_STATE)) {
8437 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_link);
8438 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
8439 return _FALSE;
8440 }
8441
8442 if (pattrib->triggered == 1) {
8443 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
8444 /* RTW_INFO("directly xmit pspoll_triggered packet\n"); */
8445
8446 /* pattrib->triggered=0; */
8447 if (bmcst && xmitframe_hiq_filter(pxmitframe) == _TRUE)
8448 pattrib->qsel = rtw_hal_get_qsel(padapter, QSLT_HIGH_ID);/* HIQ */
8449
8450 return ret;
8451 }
8452
8453
8454 if (bmcst) {
8455 #ifndef BMC_ON_HIQ
8456 _rtw_spinlock_bh(&psta->sleep_q.lock);
8457
8458 if (rtw_tim_map_anyone_be_set(padapter, pstapriv->sta_dz_bitmap)) { /* if anyone sta is in ps mode */
8459 /* pattrib->qsel = rtw_hal_get_qsel(padapter,QSLT_HIGH_ID);*/ /* HIQ */
8460
8461 rtw_list_delete(&pxmitframe->list);
8462
8463 /*_rtw_spinlock_bh(&psta->sleep_q.lock);*/
8464
8465 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
8466
8467 psta->sleepq_len++;
8468
8469 if (!(rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, 0)))
8470 update_tim = _TRUE;
8471
8472 rtw_tim_map_set(padapter, pstapriv->tim_bitmap, 0);
8473 rtw_tim_map_set(padapter, pstapriv->sta_dz_bitmap, 0);
8474
8475 /* RTW_INFO("enqueue, sq_len=%d\n", psta->sleepq_len); */
8476 /* RTW_INFO_DUMP("enqueue, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8477 if (update_tim == _TRUE) {
8478 if (is_broadcast_mac_addr(pattrib->ra))
8479 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer BC");
8480 else
8481 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer MC");
8482 } else
8483 chk_bmc_sleepq_cmd(padapter);
8484
8485 /*_rtw_spinunlock_bh(&psta->sleep_q.lock);*/
8486
8487 ret = _TRUE;
8488
8489 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
8490 }
8491
8492 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8493 #endif
8494 return ret;
8495
8496 }
8497
8498
8499 _rtw_spinlock_bh(&psta->sleep_q.lock);
8500
8501 if (psta->state & WIFI_SLEEP_STATE) {
8502 u8 wmmps_ac = 0;
8503
8504 if (rtw_tim_map_is_set(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid)) {
8505 rtw_list_delete(&pxmitframe->list);
8506
8507 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8508
8509 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
8510
8511 psta->sleepq_len++;
8512
8513 switch (pattrib->priority) {
8514 case 1:
8515 case 2:
8516 wmmps_ac = psta->uapsd_bk & BIT(0);
8517 break;
8518 case 4:
8519 case 5:
8520 wmmps_ac = psta->uapsd_vi & BIT(0);
8521 break;
8522 case 6:
8523 case 7:
8524 wmmps_ac = psta->uapsd_vo & BIT(0);
8525 break;
8526 case 0:
8527 case 3:
8528 default:
8529 wmmps_ac = psta->uapsd_be & BIT(0);
8530 break;
8531 }
8532
8533 if (wmmps_ac)
8534 psta->sleepq_ac_len++;
8535
8536 if (((psta->has_legacy_ac) && (!wmmps_ac)) || ((!psta->has_legacy_ac) && (wmmps_ac))) {
8537 if (!(rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid)))
8538 update_tim = _TRUE;
8539
8540 rtw_tim_map_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8541
8542 /* RTW_INFO("enqueue, sq_len=%d\n", psta->sleepq_len); */
8543 /* RTW_INFO_DUMP("enqueue, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8544
8545 if (update_tim == _TRUE) {
8546 /* RTW_INFO("sleepq_len==1, update BCNTIM\n"); */
8547 /* upate BCN for TIM IE */
8548 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer UC");
8549 }
8550 }
8551
8552 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8553
8554 /* if(psta->sleepq_len > (NR_XMITFRAME>>3)) */
8555 /* { */
8556 /* wakeup_sta_to_xmit(padapter, psta); */
8557 /* } */
8558
8559 ret = _TRUE;
8560
8561 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
8562 }
8563
8564 }
8565
8566 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8567
8568 return ret;
8569
8570 }
8571
dequeue_xmitframes_to_sleeping_queue(_adapter * padapter,struct sta_info * psta,_queue * pframequeue)8572 static void dequeue_xmitframes_to_sleeping_queue(_adapter *padapter, struct sta_info *psta, _queue *pframequeue)
8573 {
8574 sint ret;
8575 _list *plist, *phead;
8576 u8 ac_index;
8577 struct tx_servq *ptxservq;
8578 struct pkt_attrib *pattrib;
8579 struct xmit_frame *pxmitframe;
8580 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
8581
8582 phead = get_list_head(pframequeue);
8583 plist = get_next(phead);
8584
8585 while (rtw_end_of_queue_search(phead, plist) == _FALSE) {
8586 pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
8587
8588 plist = get_next(plist);
8589
8590 pattrib = &pxmitframe->attrib;
8591
8592 pattrib->triggered = 0;
8593
8594 ret = xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
8595
8596 if (_TRUE == ret) {
8597 ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
8598
8599 ptxservq->qcnt--;
8600 phwxmits[ac_index].accnt--;
8601 } else {
8602 /* RTW_INFO("xmitframe_enqueue_for_sleeping_sta return _FALSE\n"); */
8603 }
8604
8605 }
8606
8607 }
8608
stop_sta_xmit(_adapter * padapter,struct sta_info * psta)8609 void stop_sta_xmit(_adapter *padapter, struct sta_info *psta)
8610 {
8611 struct sta_info *psta_bmc;
8612 struct sta_xmit_priv *pstaxmitpriv;
8613 struct sta_priv *pstapriv = &padapter->stapriv;
8614 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8615
8616 pstaxmitpriv = &psta->sta_xmitpriv;
8617
8618 /* for BC/MC Frames */
8619 psta_bmc = rtw_get_bcmc_stainfo(padapter);
8620 if (!psta_bmc)
8621 rtw_warn_on(1);
8622
8623 _rtw_spinlock_bh(&pxmitpriv->lock);
8624
8625 psta->state |= WIFI_SLEEP_STATE;
8626
8627 #ifdef CONFIG_TDLS
8628 if (!(psta->tdls_sta_state & TDLS_LINKED_STATE))
8629 #endif /* CONFIG_TDLS */
8630 rtw_tim_map_set(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid);
8631
8632 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
8633 rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
8634 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
8635 rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
8636 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
8637 rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
8638 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
8639 rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
8640
8641 if (psta_bmc != NULL
8642 #ifdef CONFIG_TDLS
8643 && !(psta->tdls_sta_state & TDLS_LINKED_STATE)
8644 #endif
8645 ) {
8646 /* for BC/MC Frames */
8647 #ifndef BMC_ON_HIQ
8648 pstaxmitpriv = &psta_bmc->sta_xmitpriv;
8649 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->vo_q.sta_pending);
8650 rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
8651 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->vi_q.sta_pending);
8652 rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
8653 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
8654 rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
8655 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->bk_q.sta_pending);
8656 rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
8657 #endif
8658 }
8659 _rtw_spinunlock_bh(&pxmitpriv->lock);
8660
8661
8662 }
8663
wakeup_sta_to_xmit(_adapter * padapter,struct sta_info * psta)8664 void wakeup_sta_to_xmit(_adapter *padapter, struct sta_info *psta)
8665 {
8666 u8 update_mask = 0, wmmps_ac = 0;
8667 struct sta_info *psta_bmc;
8668 _list *xmitframe_plist, *xmitframe_phead;
8669 struct xmit_frame *pxmitframe = NULL;
8670 struct sta_priv *pstapriv = &padapter->stapriv;
8671 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8672
8673 psta_bmc = rtw_get_bcmc_stainfo(padapter);
8674
8675
8676 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8677 _rtw_spinlock_bh(&pxmitpriv->lock);
8678
8679 xmitframe_phead = get_list_head(&psta->sleep_q);
8680 xmitframe_plist = get_next(xmitframe_phead);
8681
8682 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8683 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8684
8685 xmitframe_plist = get_next(xmitframe_plist);
8686
8687 rtw_list_delete(&pxmitframe->list);
8688
8689 switch (pxmitframe->attrib.priority) {
8690 case 1:
8691 case 2:
8692 wmmps_ac = psta->uapsd_bk & BIT(1);
8693 break;
8694 case 4:
8695 case 5:
8696 wmmps_ac = psta->uapsd_vi & BIT(1);
8697 break;
8698 case 6:
8699 case 7:
8700 wmmps_ac = psta->uapsd_vo & BIT(1);
8701 break;
8702 case 0:
8703 case 3:
8704 default:
8705 wmmps_ac = psta->uapsd_be & BIT(1);
8706 break;
8707 }
8708
8709 psta->sleepq_len--;
8710 if (psta->sleepq_len > 0)
8711 pxmitframe->attrib.mdata = 1;
8712 else
8713 pxmitframe->attrib.mdata = 0;
8714
8715 if (wmmps_ac) {
8716 psta->sleepq_ac_len--;
8717 if (psta->sleepq_ac_len > 0) {
8718 pxmitframe->attrib.mdata = 1;
8719 pxmitframe->attrib.eosp = 0;
8720 } else {
8721 pxmitframe->attrib.mdata = 0;
8722 pxmitframe->attrib.eosp = 1;
8723 }
8724 }
8725
8726 pxmitframe->attrib.triggered = 1;
8727
8728 /*
8729 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8730 //rtw_intf_data_xmit
8731 if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE)
8732 {
8733 rtw_os_xmit_complete(padapter, pxmitframe);
8734 }
8735 _rtw_spinlock_bh(&psta->sleep_q.lock);
8736 */
8737 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8738
8739
8740 }
8741
8742 if (psta->sleepq_len == 0) {
8743 #ifdef CONFIG_TDLS
8744 if (psta->tdls_sta_state & TDLS_LINKED_STATE) {
8745 if (psta->state & WIFI_SLEEP_STATE)
8746 psta->state ^= WIFI_SLEEP_STATE;
8747
8748 _rtw_spinunlock_bh(&pxmitpriv->lock);
8749 return;
8750 }
8751 #endif /* CONFIG_TDLS */
8752
8753 if (rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid)) {
8754 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8755 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8756 /* upate BCN for TIM IE */
8757 /* update_BCNTIM(padapter); */
8758 update_mask = BIT(0);
8759 }
8760
8761 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8762
8763 if (psta->state & WIFI_SLEEP_STATE)
8764 psta->state ^= WIFI_SLEEP_STATE;
8765
8766 if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
8767 RTW_INFO("%s alive check\n", __func__);
8768 psta->expire_to = pstapriv->expire_to;
8769 psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
8770 }
8771
8772 rtw_tim_map_clear(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid);
8773 }
8774
8775 /* for BC/MC Frames */
8776 if (!psta_bmc)
8777 goto _exit;
8778
8779 if (!(rtw_tim_map_anyone_be_set_exclude_aid0(padapter, pstapriv->sta_dz_bitmap))) { /* no any sta in ps mode */
8780 xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
8781 xmitframe_plist = get_next(xmitframe_phead);
8782
8783 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8784 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8785
8786 xmitframe_plist = get_next(xmitframe_plist);
8787
8788 rtw_list_delete(&pxmitframe->list);
8789
8790 psta_bmc->sleepq_len--;
8791 if (psta_bmc->sleepq_len > 0)
8792 pxmitframe->attrib.mdata = 1;
8793 else
8794 pxmitframe->attrib.mdata = 0;
8795
8796
8797 pxmitframe->attrib.triggered = 1;
8798 /*
8799 _rtw_spinunlock_bh(&psta_bmc->sleep_q.lock);
8800 //rtw_intf_data_xmit
8801 if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE)
8802 {
8803 rtw_os_xmit_complete(padapter, pxmitframe);
8804 }
8805 _rtw_spinlock_bh(&psta_bmc->sleep_q.lock);
8806
8807 */
8808 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8809
8810 }
8811
8812 if (psta_bmc->sleepq_len == 0) {
8813 if (rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, 0)) {
8814 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8815 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8816 /* upate BCN for TIM IE */
8817 /* update_BCNTIM(padapter); */
8818 update_mask |= BIT(1);
8819 }
8820 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, 0);
8821 rtw_tim_map_clear(padapter, pstapriv->sta_dz_bitmap, 0);
8822 }
8823
8824 }
8825
8826 _exit:
8827
8828 /* _rtw_spinunlock_bh(&psta_bmc->sleep_q.lock); */
8829 _rtw_spinunlock_bh(&pxmitpriv->lock);
8830
8831 if (update_mask) {
8832 /* update_BCNTIM(padapter); */
8833 if ((update_mask & (BIT(0) | BIT(1))) == (BIT(0) | BIT(1)))
8834 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear UC&BMC");
8835 else if ((update_mask & BIT(1)) == BIT(1))
8836 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear BMC");
8837 else
8838 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear UC");
8839 }
8840
8841 }
8842
xmit_delivery_enabled_frames(_adapter * padapter,struct sta_info * psta)8843 void xmit_delivery_enabled_frames(_adapter *padapter, struct sta_info *psta)
8844 {
8845 u8 wmmps_ac = 0;
8846 _list *xmitframe_plist, *xmitframe_phead;
8847 struct xmit_frame *pxmitframe = NULL;
8848 struct sta_priv *pstapriv = &padapter->stapriv;
8849 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8850
8851
8852 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8853 _rtw_spinlock_bh(&pxmitpriv->lock);
8854
8855 xmitframe_phead = get_list_head(&psta->sleep_q);
8856 xmitframe_plist = get_next(xmitframe_phead);
8857
8858 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8859 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8860
8861 xmitframe_plist = get_next(xmitframe_plist);
8862
8863 switch (pxmitframe->attrib.priority) {
8864 case 1:
8865 case 2:
8866 wmmps_ac = psta->uapsd_bk & BIT(1);
8867 break;
8868 case 4:
8869 case 5:
8870 wmmps_ac = psta->uapsd_vi & BIT(1);
8871 break;
8872 case 6:
8873 case 7:
8874 wmmps_ac = psta->uapsd_vo & BIT(1);
8875 break;
8876 case 0:
8877 case 3:
8878 default:
8879 wmmps_ac = psta->uapsd_be & BIT(1);
8880 break;
8881 }
8882
8883 if (!wmmps_ac)
8884 continue;
8885
8886 rtw_list_delete(&pxmitframe->list);
8887
8888 psta->sleepq_len--;
8889 psta->sleepq_ac_len--;
8890
8891 if (psta->sleepq_ac_len > 0) {
8892 pxmitframe->attrib.mdata = 1;
8893 pxmitframe->attrib.eosp = 0;
8894 } else {
8895 pxmitframe->attrib.mdata = 0;
8896 pxmitframe->attrib.eosp = 1;
8897 }
8898
8899 pxmitframe->attrib.triggered = 1;
8900 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8901
8902 if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
8903 #ifdef CONFIG_TDLS
8904 if (psta->tdls_sta_state & TDLS_LINKED_STATE) {
8905 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8906 goto exit;
8907 }
8908 #endif /* CONFIG_TDLS */
8909 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8910
8911 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8912 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8913 /* upate BCN for TIM IE */
8914 /* update_BCNTIM(padapter); */
8915 rtw_update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0);
8916 /* update_mask = BIT(0); */
8917 }
8918
8919 }
8920
8921 #ifdef CONFIG_TDLS
8922 exit:
8923 #endif
8924 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8925 _rtw_spinunlock_bh(&pxmitpriv->lock);
8926
8927 return;
8928 }
8929
8930 #endif /* defined(CONFIG_AP_MODE) || defined(CONFIG_TDLS) */
8931
8932 #if 0 /*#ifdef CONFIG_XMIT_THREAD_MODE*/
8933 void enqueue_pending_xmitbuf(
8934 struct xmit_priv *pxmitpriv,
8935 struct xmit_buf *pxmitbuf)
8936 {
8937 _queue *pqueue;
8938 _adapter *pri_adapter = pxmitpriv->adapter;
8939
8940 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8941
8942 _rtw_spinlock_bh(&pqueue->lock);
8943 rtw_list_delete(&pxmitbuf->list);
8944 rtw_list_insert_tail(&pxmitbuf->list, get_list_head(pqueue));
8945 _rtw_spinunlock_bh(&pqueue->lock);
8946
8947 #if defined(CONFIG_SDIO_HCI) && defined(CONFIG_CONCURRENT_MODE)
8948 pri_adapter = GET_PRIMARY_ADAPTER(pri_adapter);
8949 #endif /*SDIO_HCI + CONCURRENT*/
8950 _rtw_up_sema(&(pri_adapter->xmitpriv.xmit_sema));
8951 }
8952
8953 void enqueue_pending_xmitbuf_to_head(
8954 struct xmit_priv *pxmitpriv,
8955 struct xmit_buf *pxmitbuf)
8956 {
8957 _queue *pqueue = &pxmitpriv->pending_xmitbuf_queue;
8958
8959 _rtw_spinlock_bh(&pqueue->lock);
8960 rtw_list_delete(&pxmitbuf->list);
8961 rtw_list_insert_head(&pxmitbuf->list, get_list_head(pqueue));
8962 _rtw_spinunlock_bh(&pqueue->lock);
8963 }
8964
8965 struct xmit_buf *dequeue_pending_xmitbuf(
8966 struct xmit_priv *pxmitpriv)
8967 {
8968 struct xmit_buf *pxmitbuf;
8969 _queue *pqueue;
8970
8971
8972 pxmitbuf = NULL;
8973 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8974
8975 _rtw_spinlock_bh(&pqueue->lock);
8976
8977 if (_rtw_queue_empty(pqueue) == _FALSE) {
8978 _list *plist, *phead;
8979
8980 phead = get_list_head(pqueue);
8981 plist = get_next(phead);
8982 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
8983 rtw_list_delete(&pxmitbuf->list);
8984 }
8985
8986 _rtw_spinunlock_bh(&pqueue->lock);
8987
8988 return pxmitbuf;
8989 }
8990
8991 static struct xmit_buf *dequeue_pending_xmitbuf_ext(
8992 struct xmit_priv *pxmitpriv)
8993 {
8994 struct xmit_buf *pxmitbuf;
8995 _queue *pqueue;
8996
8997 pxmitbuf = NULL;
8998 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8999
9000 _rtw_spinlock_bh(&pqueue->lock);
9001
9002 if (_rtw_queue_empty(pqueue) == _FALSE) {
9003 _list *plist, *phead;
9004
9005 phead = get_list_head(pqueue);
9006 plist = phead;
9007 do {
9008 plist = get_next(plist);
9009 if (plist == phead)
9010 break;
9011
9012 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
9013
9014 if (pxmitbuf->buf_tag == XMITBUF_MGNT) {
9015 rtw_list_delete(&pxmitbuf->list);
9016 break;
9017 }
9018 pxmitbuf = NULL;
9019 } while (1);
9020 }
9021
9022 _rtw_spinunlock_bh(&pqueue->lock);
9023
9024 return pxmitbuf;
9025 }
9026
9027 struct xmit_buf *select_and_dequeue_pending_xmitbuf(_adapter *padapter)
9028 {
9029 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9030 struct xmit_buf *pxmitbuf = NULL;
9031
9032 if (_TRUE == rtw_is_xmit_blocked(padapter))
9033 return pxmitbuf;
9034
9035 pxmitbuf = dequeue_pending_xmitbuf_ext(pxmitpriv);
9036 if (pxmitbuf == NULL && rtw_xmit_ac_blocked(padapter) != _TRUE)
9037 pxmitbuf = dequeue_pending_xmitbuf(pxmitpriv);
9038
9039 return pxmitbuf;
9040 }
9041
9042 sint check_pending_xmitbuf(
9043 struct xmit_priv *pxmitpriv)
9044 {
9045 _queue *pqueue;
9046 sint ret = _FALSE;
9047
9048 pqueue = &pxmitpriv->pending_xmitbuf_queue;
9049
9050 _rtw_spinlock_bh(&pqueue->lock);
9051
9052 if (_rtw_queue_empty(pqueue) == _FALSE)
9053 ret = _TRUE;
9054
9055 _rtw_spinunlock_bh(&pqueue->lock);
9056
9057 return ret;
9058 }
9059
9060 thread_return rtw_xmit_thread(thread_context context)
9061 {
9062 s32 err;
9063 _adapter *adapter;
9064 #ifdef RTW_XMIT_THREAD_HIGH_PRIORITY
9065 #ifdef PLATFORM_LINUX
9066 struct sched_param param = { .sched_priority = 1 };
9067
9068 sched_setscheduler(current, SCHED_FIFO, ¶m);
9069 #endif /* PLATFORM_LINUX */
9070 #endif /* RTW_XMIT_THREAD_HIGH_PRIORITY */
9071
9072 err = _SUCCESS;
9073 adapter = (_adapter *)context;
9074
9075 rtw_thread_enter("RTW_XMIT_THREAD");
9076
9077 do {
9078 err = rtw_intf_xmit_buf_handler(adapter);
9079 flush_signals_thread();
9080 } while (_SUCCESS == err);
9081
9082 RTW_INFO(FUNC_ADPT_FMT " Exit\n", FUNC_ADPT_ARG(adapter));
9083
9084 rtw_thread_wait_stop();
9085
9086 return 0;
9087 }
9088 #endif
9089
9090 #ifdef DBG_XMIT_BLOCK
dump_xmit_block(void * sel,_adapter * padapter)9091 void dump_xmit_block(void *sel, _adapter *padapter)
9092 {
9093 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9094
9095 RTW_PRINT_SEL(sel, "[XMIT-BLOCK] xmit_block :0x%02x\n", dvobj->xmit_block);
9096 if (dvobj->xmit_block & XMIT_BLOCK_REDLMEM)
9097 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_REDLMEM");
9098 if (dvobj->xmit_block & XMIT_BLOCK_SUSPEND)
9099 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_SUSPEND");
9100 if (dvobj->xmit_block == XMIT_BLOCK_NONE)
9101 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_NONE");
9102 }
dump_xmit_block_info(void * sel,const char * fun_name,_adapter * padapter)9103 void dump_xmit_block_info(void *sel, const char *fun_name, _adapter *padapter)
9104 {
9105 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9106
9107 RTW_INFO("\n"ADPT_FMT" call %s\n", ADPT_ARG(padapter), fun_name);
9108 dump_xmit_block(sel, padapter);
9109 }
9110 #define DBG_XMIT_BLOCK_DUMP(adapter) dump_xmit_block_info(RTW_DBGDUMP, __func__, adapter)
9111 #endif
9112
rtw_set_xmit_block(_adapter * padapter,enum XMIT_BLOCK_REASON reason)9113 void rtw_set_xmit_block(_adapter *padapter, enum XMIT_BLOCK_REASON reason)
9114 {
9115 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9116
9117 _rtw_spinlock_bh(&dvobj->xmit_block_lock);
9118 dvobj->xmit_block |= reason;
9119 _rtw_spinunlock_bh(&dvobj->xmit_block_lock);
9120
9121 #ifdef DBG_XMIT_BLOCK
9122 DBG_XMIT_BLOCK_DUMP(padapter);
9123 #endif
9124 }
9125
rtw_clr_xmit_block(_adapter * padapter,enum XMIT_BLOCK_REASON reason)9126 void rtw_clr_xmit_block(_adapter *padapter, enum XMIT_BLOCK_REASON reason)
9127 {
9128 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9129
9130 _rtw_spinlock_bh(&dvobj->xmit_block_lock);
9131 dvobj->xmit_block &= ~reason;
9132 _rtw_spinunlock_bh(&dvobj->xmit_block_lock);
9133
9134 #ifdef DBG_XMIT_BLOCK
9135 DBG_XMIT_BLOCK_DUMP(padapter);
9136 #endif
9137 }
rtw_is_xmit_blocked(_adapter * padapter)9138 bool rtw_is_xmit_blocked(_adapter *padapter)
9139 {
9140 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9141
9142 #ifdef DBG_XMIT_BLOCK
9143 DBG_XMIT_BLOCK_DUMP(padapter);
9144 #endif
9145 return ((dvobj->xmit_block) ? _TRUE : _FALSE);
9146 }
9147
rtw_xmit_ac_blocked(_adapter * adapter)9148 bool rtw_xmit_ac_blocked(_adapter *adapter)
9149 {
9150 struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
9151 struct rf_ctl_t *rfctl = adapter_to_rfctl(adapter);
9152 _adapter *iface;
9153 struct mlme_ext_priv *mlmeext;
9154 bool blocked = _FALSE;
9155 int i;
9156 #ifdef DBG_CONFIG_ERROR_DETECT
9157 #ifdef DBG_CONFIG_ERROR_RESET
9158 #ifdef CONFIG_USB_HCI
9159 if (rtw_hal_sreset_inprogress(adapter) == _TRUE) {
9160 blocked = _TRUE;
9161 goto exit;
9162 }
9163 #endif/* #ifdef CONFIG_USB_HCI */
9164 #endif/* #ifdef DBG_CONFIG_ERROR_RESET */
9165 #endif/* #ifdef DBG_CONFIG_ERROR_DETECT */
9166
9167 if (rfctl->offch_state != OFFCHS_NONE
9168 #if CONFIG_DFS
9169 || IS_RADAR_DETECTED(rfctl) || rfctl->csa_chandef.chan
9170 #endif
9171 ) {
9172 blocked = _TRUE;
9173 goto exit;
9174 }
9175
9176 for (i = 0; i < dvobj->iface_nums; i++) {
9177 iface = dvobj->padapters[i];
9178 mlmeext = &iface->mlmeextpriv;
9179
9180 /* check scan state */
9181 if (mlmeext_scan_state(mlmeext) != SCAN_DISABLE
9182 && mlmeext_scan_state(mlmeext) != SCAN_BACK_OP
9183 ) {
9184 blocked = _TRUE;
9185 goto exit;
9186 }
9187
9188 if (mlmeext_scan_state(mlmeext) == SCAN_BACK_OP
9189 && !mlmeext_chk_scan_backop_flags(mlmeext, SS_BACKOP_TX_RESUME)
9190 ) {
9191 blocked = _TRUE;
9192 goto exit;
9193 }
9194 }
9195
9196 exit:
9197 return blocked;
9198 }
9199
9200 #ifdef CONFIG_LAYER2_ROAMING
9201 /* dequeuq + xmit the cache skb during the roam procedure */
dequeuq_roam_pkt(_adapter * padapter)9202 void dequeuq_roam_pkt(_adapter *padapter)
9203 {
9204 struct xmit_frame *rframe;
9205 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9206 _list *plist = NULL, *phead = NULL;
9207
9208 if (padapter->mlmepriv.roam_network) {
9209 padapter->mlmepriv.roam_network = NULL;
9210 _rtw_spinlock_bh(&pxmitpriv->rpkt_queue.lock);
9211 phead = get_list_head(&pxmitpriv->rpkt_queue);
9212 plist = get_next(phead);
9213 while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) {
9214 rframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
9215 plist = get_next(plist);
9216 rtw_list_delete(&rframe->list);
9217 core_tx_per_packet(padapter, rframe, &rframe->pkt, NULL);
9218 }
9219 _rtw_spinunlock_bh(&pxmitpriv->rpkt_queue.lock);
9220 }
9221 }
9222 #endif
9223
9224 #ifdef CONFIG_TX_AMSDU
rtw_amsdu_vo_timeout_handler(void * FunctionContext)9225 void rtw_amsdu_vo_timeout_handler(void *FunctionContext)
9226 {
9227 _adapter *adapter = (_adapter *)FunctionContext;
9228
9229 adapter->xmitpriv.amsdu_vo_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9230
9231 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9232 }
9233
rtw_amsdu_vi_timeout_handler(void * FunctionContext)9234 void rtw_amsdu_vi_timeout_handler(void *FunctionContext)
9235 {
9236 _adapter *adapter = (_adapter *)FunctionContext;
9237
9238 adapter->xmitpriv.amsdu_vi_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9239
9240 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9241 }
9242
rtw_amsdu_be_timeout_handler(void * FunctionContext)9243 void rtw_amsdu_be_timeout_handler(void *FunctionContext)
9244 {
9245 _adapter *adapter = (_adapter *)FunctionContext;
9246
9247 adapter->xmitpriv.amsdu_be_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9248
9249 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9250 }
9251
rtw_amsdu_bk_timeout_handler(void * FunctionContext)9252 void rtw_amsdu_bk_timeout_handler(void *FunctionContext)
9253 {
9254 _adapter *adapter = (_adapter *)FunctionContext;
9255
9256 adapter->xmitpriv.amsdu_bk_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9257
9258 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9259 }
9260
rtw_amsdu_get_timer_status(_adapter * padapter,u8 priority)9261 u8 rtw_amsdu_get_timer_status(_adapter *padapter, u8 priority)
9262 {
9263 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9264 u8 status = RTW_AMSDU_TIMER_UNSET;
9265
9266 switch (priority) {
9267 case 1:
9268 case 2:
9269 status = pxmitpriv->amsdu_bk_timeout;
9270 break;
9271 case 4:
9272 case 5:
9273 status = pxmitpriv->amsdu_vi_timeout;
9274 break;
9275 case 6:
9276 case 7:
9277 status = pxmitpriv->amsdu_vo_timeout;
9278 break;
9279 case 0:
9280 case 3:
9281 default:
9282 status = pxmitpriv->amsdu_be_timeout;
9283 break;
9284 }
9285 return status;
9286 }
9287
rtw_amsdu_set_timer_status(_adapter * padapter,u8 priority,u8 status)9288 void rtw_amsdu_set_timer_status(_adapter *padapter, u8 priority, u8 status)
9289 {
9290 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9291
9292 switch (priority) {
9293 case 1:
9294 case 2:
9295 pxmitpriv->amsdu_bk_timeout = status;
9296 break;
9297 case 4:
9298 case 5:
9299 pxmitpriv->amsdu_vi_timeout = status;
9300 break;
9301 case 6:
9302 case 7:
9303 pxmitpriv->amsdu_vo_timeout = status;
9304 break;
9305 case 0:
9306 case 3:
9307 default:
9308 pxmitpriv->amsdu_be_timeout = status;
9309 break;
9310 }
9311 }
9312
rtw_amsdu_set_timer(_adapter * padapter,u8 priority)9313 void rtw_amsdu_set_timer(_adapter *padapter, u8 priority)
9314 {
9315 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9316 _timer *amsdu_timer = NULL;
9317
9318 switch (priority) {
9319 case 1:
9320 case 2:
9321 amsdu_timer = &pxmitpriv->amsdu_bk_timer;
9322 break;
9323 case 4:
9324 case 5:
9325 amsdu_timer = &pxmitpriv->amsdu_vi_timer;
9326 break;
9327 case 6:
9328 case 7:
9329 amsdu_timer = &pxmitpriv->amsdu_vo_timer;
9330 break;
9331 case 0:
9332 case 3:
9333 default:
9334 amsdu_timer = &pxmitpriv->amsdu_be_timer;
9335 break;
9336 }
9337 _set_timer(amsdu_timer, 1);
9338 }
9339
rtw_amsdu_cancel_timer(_adapter * padapter,u8 priority)9340 void rtw_amsdu_cancel_timer(_adapter *padapter, u8 priority)
9341 {
9342 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9343 _timer *amsdu_timer = NULL;
9344
9345 switch (priority) {
9346 case 1:
9347 case 2:
9348 amsdu_timer = &pxmitpriv->amsdu_bk_timer;
9349 break;
9350 case 4:
9351 case 5:
9352 amsdu_timer = &pxmitpriv->amsdu_vi_timer;
9353 break;
9354 case 6:
9355 case 7:
9356 amsdu_timer = &pxmitpriv->amsdu_vo_timer;
9357 break;
9358 case 0:
9359 case 3:
9360 default:
9361 amsdu_timer = &pxmitpriv->amsdu_be_timer;
9362 break;
9363 }
9364 _cancel_timer_ex(amsdu_timer);
9365 }
9366 #endif /* CONFIG_TX_AMSDU */
9367
9368 #if 0 /*def DBG_TXBD_DESC_DUMP*/
9369 static struct rtw_tx_desc_backup tx_backup[HW_QUEUE_ENTRY][TX_BAK_FRMAE_CNT];
9370 static u8 backup_idx[HW_QUEUE_ENTRY];
9371
9372 void rtw_tx_desc_backup(_adapter *padapter, struct xmit_frame *pxmitframe, u8 desc_size, u8 hwq)
9373 {
9374 u32 tmp32;
9375 u8 *pxmit_buf;
9376
9377 if (rtw_hw_get_init_completed(adapter_to_dvobj(padapter)) == _FALSE)
9378 return;
9379
9380 pxmit_buf = pxmitframe->pxmitbuf->pbuf;
9381
9382 _rtw_memcpy(tx_backup[hwq][backup_idx[hwq]].tx_bak_desc, pxmit_buf, desc_size);
9383 _rtw_memcpy(tx_backup[hwq][backup_idx[hwq]].tx_bak_data_hdr, pxmit_buf+desc_size, TX_BAK_DATA_LEN);
9384
9385 #if 0 /*GEORGIA_TODO_REDEFINE_IO*/
9386 tmp32 = rtw_read32(padapter, get_txbd_rw_reg(hwq));
9387 #else
9388 tmp32 = rtw_hal_get_txbd_rwreg(padapter);
9389 #endif
9390
9391 tx_backup[hwq][backup_idx[hwq]].tx_bak_rp = (tmp32>>16)&0xfff;
9392 tx_backup[hwq][backup_idx[hwq]].tx_bak_wp = tmp32&0xfff;
9393
9394 tx_backup[hwq][backup_idx[hwq]].tx_desc_size = desc_size;
9395
9396 backup_idx[hwq] = (backup_idx[hwq] + 1) % TX_BAK_FRMAE_CNT;
9397 }
9398
9399 void rtw_tx_desc_backup_reset(void)
9400 {
9401 int i, j;
9402
9403 for (i = 0; i < HW_QUEUE_ENTRY; i++) {
9404 for (j = 0; j < TX_BAK_FRMAE_CNT; j++)
9405 _rtw_memset(&tx_backup[i][j], 0, sizeof(struct rtw_tx_desc_backup));
9406
9407 backup_idx[i] = 0;
9408 }
9409 }
9410
9411 u8 rtw_get_tx_desc_backup(_adapter *padapter, u8 hwq, struct rtw_tx_desc_backup **pbak)
9412 {
9413 *pbak = &tx_backup[hwq][0];
9414
9415 return backup_idx[hwq];
9416 }
9417 #endif
9418
9419 #ifdef CONFIG_PCI_TX_POLLING
rtw_tx_poll_init(_adapter * padapter)9420 void rtw_tx_poll_init(_adapter *padapter)
9421 {
9422 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9423 _timer *timer = &pxmitpriv->tx_poll_timer;
9424
9425 if (!is_primary_adapter(padapter))
9426 return;
9427
9428 if (timer->function != NULL) {
9429 RTW_INFO("tx polling timer has been init.\n");
9430 return;
9431 }
9432
9433 rtw_init_timer(timer, rtw_tx_poll_timeout_handler, padapter);
9434 rtw_tx_poll_timer_set(padapter, 1);
9435 RTW_INFO("Tx poll timer init!\n");
9436 }
9437
rtw_tx_poll_timeout_handler(void * FunctionContext)9438 void rtw_tx_poll_timeout_handler(void *FunctionContext)
9439 {
9440 _adapter *adapter = (_adapter *)FunctionContext;
9441
9442 rtw_tx_poll_timer_set(adapter, 1);
9443
9444 if (adapter->dvobj->hal_func.tx_poll_handler)
9445 adapter->dvobj->hal_func.tx_poll_handler(adapter);
9446 else
9447 RTW_WARN("hal ops: tx_poll_handler is NULL\n");
9448 }
9449
rtw_tx_poll_timer_set(_adapter * padapter,u32 delay)9450 void rtw_tx_poll_timer_set(_adapter *padapter, u32 delay)
9451 {
9452 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9453 _timer *timer = NULL;
9454
9455 timer = &pxmitpriv->tx_poll_timer;
9456 _set_timer(timer, delay);
9457 }
9458
rtw_tx_poll_timer_cancel(_adapter * padapter)9459 void rtw_tx_poll_timer_cancel(_adapter *padapter)
9460 {
9461 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9462 _timer *timer = NULL;
9463
9464 if (!is_primary_adapter(padapter))
9465 return;
9466
9467 timer = &pxmitpriv->tx_poll_timer;
9468 _cancel_timer_ex(timer);
9469 timer->function = NULL;
9470 RTW_INFO("Tx poll timer cancel !\n");
9471 }
9472 #endif /* CONFIG_PCI_TX_POLLING */
9473
rtw_sctx_init(struct submit_ctx * sctx,int timeout_ms)9474 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
9475 {
9476 sctx->timeout_ms = timeout_ms;
9477 sctx->submit_time = rtw_get_current_time();
9478 _rtw_init_completion(&sctx->done);
9479 sctx->status = RTW_SCTX_SUBMITTED;
9480 }
9481
rtw_sctx_wait(struct submit_ctx * sctx,const char * msg)9482 int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
9483 {
9484 int ret = _FAIL;
9485 unsigned long expire;
9486 int status = 0;
9487
9488 #ifdef PLATFORM_LINUX
9489 expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
9490 if (!_rtw_wait_for_comp_timeout(&sctx->done, expire)) {
9491 /* timeout, do something?? */
9492 status = RTW_SCTX_DONE_TIMEOUT;
9493 RTW_INFO("%s timeout: %s\n", __func__, msg);
9494 } else
9495 status = sctx->status;
9496 #endif
9497
9498 if (status == RTW_SCTX_DONE_SUCCESS)
9499 ret = _SUCCESS;
9500
9501 return ret;
9502 }
9503
rtw_sctx_chk_waring_status(int status)9504 bool rtw_sctx_chk_waring_status(int status)
9505 {
9506 switch (status) {
9507 case RTW_SCTX_DONE_UNKNOWN:
9508 case RTW_SCTX_DONE_BUF_ALLOC:
9509 case RTW_SCTX_DONE_BUF_FREE:
9510
9511 case RTW_SCTX_DONE_DRV_STOP:
9512 case RTW_SCTX_DONE_DEV_REMOVE:
9513 return _TRUE;
9514 default:
9515 return _FALSE;
9516 }
9517 }
9518
rtw_sctx_done_err(struct submit_ctx ** sctx,int status)9519 void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
9520 {
9521 if (*sctx) {
9522 if (rtw_sctx_chk_waring_status(status))
9523 RTW_INFO("%s status:%d\n", __func__, status);
9524 (*sctx)->status = status;
9525 #ifdef PLATFORM_LINUX
9526 complete(&((*sctx)->done));
9527 #endif
9528 *sctx = NULL;
9529 }
9530 }
9531
rtw_sctx_done(struct submit_ctx ** sctx)9532 void rtw_sctx_done(struct submit_ctx **sctx)
9533 {
9534 rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
9535 }
9536
9537 #ifdef CONFIG_XMIT_ACK
rtw_ack_tx_wait(struct xmit_priv * pxmitpriv,u32 timeout_ms)9538 int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
9539 {
9540 struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
9541
9542 pack_tx_ops->submit_time = rtw_get_current_time();
9543 pack_tx_ops->timeout_ms = timeout_ms;
9544 pack_tx_ops->status = RTW_SCTX_SUBMITTED;
9545
9546 return rtw_sctx_wait(pack_tx_ops, __func__);
9547 }
9548
rtw_ack_tx_done(struct xmit_priv * pxmitpriv,int status)9549 void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
9550 {
9551 struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
9552
9553 if (pxmitpriv->ack_tx)
9554 rtw_sctx_done_err(&pack_tx_ops, status);
9555 else
9556 RTW_INFO("%s ack_tx not set\n", __func__);
9557 }
9558 #endif /* CONFIG_XMIT_ACK */
9559