1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _RTW_XMIT_C_
16
17 #include <drv_types.h>
18
19 static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
20 static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
21
_init_txservq(struct tx_servq * ptxservq)22 static void _init_txservq(struct tx_servq *ptxservq)
23 {
24 _rtw_init_listhead(&ptxservq->tx_pending);
25 _rtw_init_queue(&ptxservq->sta_pending);
26 ptxservq->qcnt = 0;
27 }
28
29
_rtw_init_sta_xmit_priv(struct sta_xmit_priv * psta_xmitpriv)30 void _rtw_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
31 {
32
33
34 _rtw_memset((unsigned char *)psta_xmitpriv, 0, sizeof(struct sta_xmit_priv));
35
36 _rtw_spinlock_init(&psta_xmitpriv->lock);
37
38 /* for(i = 0 ; i < MAX_NUMBLKS; i++) */
39 /* _init_txservq(&(psta_xmitpriv->blk_q[i])); */
40
41 _init_txservq(&psta_xmitpriv->be_q);
42 _init_txservq(&psta_xmitpriv->bk_q);
43 _init_txservq(&psta_xmitpriv->vi_q);
44 _init_txservq(&psta_xmitpriv->vo_q);
45 _rtw_init_listhead(&psta_xmitpriv->legacy_dz);
46 _rtw_init_listhead(&psta_xmitpriv->apsd);
47
48
49 }
50
rtw_init_xmit_block(_adapter * padapter)51 void rtw_init_xmit_block(_adapter *padapter)
52 {
53 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
54
55 _rtw_spinlock_init(&dvobj->xmit_block_lock);
56 dvobj->xmit_block = XMIT_BLOCK_NONE;
57
58 }
rtw_free_xmit_block(_adapter * padapter)59 void rtw_free_xmit_block(_adapter *padapter)
60 {
61 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
62
63 _rtw_spinlock_free(&dvobj->xmit_block_lock);
64 }
65
66 #ifdef RTW_PHL_TX
alloc_txring(_adapter * padapter)67 u8 alloc_txring(_adapter *padapter)
68 {
69 struct xmit_txreq_buf *ptxreq_buf = NULL;
70 u32 idx, alloc_sz = 0, alloc_sz_txreq = 0;
71 u8 res = _SUCCESS;
72
73 u32 offset_head = (sizeof(struct rtw_xmit_req) * RTW_MAX_FRAG_NUM);
74 u32 offset_tail = offset_head + (SZ_HEAD_BUF * RTW_MAX_FRAG_NUM);
75 u32 offset_list = offset_tail + (SZ_TAIL_BUF * RTW_MAX_FRAG_NUM);
76
77 PHLTX_ENTER;
78
79 alloc_sz = (SZ_TX_RING * RTW_MAX_FRAG_NUM);
80 alloc_sz_txreq = MAX_TX_RING_NUM * (sizeof(struct xmit_txreq_buf));
81
82 RTW_INFO("eric-tx [%s] alloc_sz = %d, alloc_sz_txreq = %d\n", __FUNCTION__, alloc_sz, alloc_sz_txreq);
83
84 padapter->pxmit_txreq_buf = rtw_vmalloc(alloc_sz_txreq);
85 ptxreq_buf = (struct xmit_txreq_buf *)padapter->pxmit_txreq_buf;
86
87 _rtw_init_queue(&padapter->free_txreq_queue);
88
89 for (idx = 0; idx < MAX_TX_RING_NUM; idx++) {
90
91 padapter->tx_pool_ring[idx] = rtw_zmalloc(alloc_sz);
92 if (!padapter->tx_pool_ring[idx]) {
93 RTW_ERR("[core] alloc txring fail, plz check.\n");
94 res = _FAIL;
95 break;
96 }
97 _rtw_init_listhead(&ptxreq_buf->list);
98 ptxreq_buf->txreq = padapter->tx_pool_ring[idx];
99 ptxreq_buf->head = padapter->tx_pool_ring[idx] + offset_head;
100 ptxreq_buf->tail = padapter->tx_pool_ring[idx] + offset_tail;
101 ptxreq_buf->pkt_list = padapter->tx_pool_ring[idx] + offset_list;
102
103 #ifdef USE_PREV_WLHDR_BUF /* CONFIG_CORE_TXSC */
104 ptxreq_buf->macid = 0xff;
105 ptxreq_buf->txsc_id = 0xff;
106 #endif
107
108 rtw_list_insert_tail(&(ptxreq_buf->list), &(padapter->free_txreq_queue.queue));
109
110 ptxreq_buf++;
111 }
112
113 padapter->free_txreq_cnt = MAX_TX_RING_NUM;
114
115 return res;
116 }
117
free_txring(_adapter * padapter)118 void free_txring(_adapter *padapter)
119 {
120 u32 idx, alloc_sz = 0, alloc_sz_txreq = 0;
121 #ifdef CONFIG_CORE_TXSC
122 struct rtw_xmit_req *txreq = NULL;
123 struct xmit_txreq_buf *txreq_buf = NULL;
124 u8 j;
125 #endif
126
127 PHLTX_ENTER;
128
129 alloc_sz = (SZ_TX_RING * RTW_MAX_FRAG_NUM);
130 alloc_sz_txreq = MAX_TX_RING_NUM * (sizeof(struct xmit_txreq_buf));
131
132 RTW_INFO("eric-tx [%s] alloc_sz = %d, alloc_sz_txreq = %d\n", __func__, alloc_sz, alloc_sz_txreq);
133
134 for (idx = 0; idx < MAX_TX_RING_NUM; idx++) {
135 if (padapter->tx_pool_ring[idx]) {
136 #ifdef CONFIG_CORE_TXSC
137 txreq = (struct rtw_xmit_req *)padapter->tx_pool_ring[idx];
138 if (txreq->treq_type == RTW_PHL_TREQ_TYPE_CORE_TXSC) {
139 txreq_buf = (struct xmit_txreq_buf *)txreq->os_priv;
140 if (txreq_buf) {
141 /* CONFGI_TXSC_AMSDU */
142 for (j = 0; j < txreq_buf->pkt_cnt; j++) {
143 if (txreq_buf->pkt[j])
144 rtw_os_pkt_complete(padapter, (void *)txreq_buf->pkt[j]);
145 }
146 }
147 }
148 #endif
149 rtw_mfree(padapter->tx_pool_ring[idx], alloc_sz);
150 }
151 }
152
153 _rtw_spinlock_free(&padapter->free_txreq_queue.lock);
154 rtw_vmfree(padapter->pxmit_txreq_buf, alloc_sz_txreq);
155 }
156
157 #endif
158
159
_rtw_init_xmit_priv(struct xmit_priv * pxmitpriv,_adapter * padapter)160 s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, _adapter *padapter)
161 {
162 int i;
163 #if 0 /*CONFIG_CORE_XMITBUF*/
164 struct xmit_buf *pxmitbuf;
165 #endif
166 struct xmit_frame *pxframe;
167 sint res = _SUCCESS;
168 /* MGT_TXREQ_MGT */
169 u8 *txreq = NULL, *pkt_list = NULL;
170
171 #if 0 /*CONFIG_CORE_XMITBUF*/
172 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
173
174 u8 xmitbuf_nr = GET_HAL_XMITBUF_NR(dvobj);
175 u16 xmitbuf_sz = GET_HAL_XMITBUF_SZ(dvobj);
176
177 u8 xmitbuf_ext_nr = GET_HAL_XMITBUF_EXT_NR(dvobj);
178 u16 xmitbuf_ext_sz = GET_HAL_XMITBUF_EXT_SZ(dvobj);
179 #endif
180
181 /* We don't need to memset padapter->XXX to zero, because adapter is allocated by rtw_zvmalloc(). */
182 /* _rtw_memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); */
183
184 _rtw_spinlock_init(&pxmitpriv->lock);
185 _rtw_spinlock_init(&pxmitpriv->lock_sctx);
186 #if 0 /*def CONFIG_XMIT_THREAD_MODE*/
187 _rtw_init_sema(&pxmitpriv->xmit_sema, 0);
188 #endif
189
190 /*
191 Please insert all the queue initializaiton using _rtw_init_queue below
192 */
193
194 pxmitpriv->adapter = padapter;
195
196 /* for(i = 0 ; i < MAX_NUMBLKS; i++) */
197 /* _rtw_init_queue(&pxmitpriv->blk_strms[i]); */
198
199 _rtw_init_queue(&pxmitpriv->be_pending);
200 _rtw_init_queue(&pxmitpriv->bk_pending);
201 _rtw_init_queue(&pxmitpriv->vi_pending);
202 _rtw_init_queue(&pxmitpriv->vo_pending);
203 _rtw_init_queue(&pxmitpriv->bm_pending);
204
205 /* _rtw_init_queue(&pxmitpriv->legacy_dz_queue); */
206 /* _rtw_init_queue(&pxmitpriv->apsd_queue); */
207
208 _rtw_init_queue(&pxmitpriv->free_xmit_queue);
209
210 /*
211 Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
212 and initialize free_xmit_frame below.
213 Please also apply free_txobj to link_up all the xmit_frames...
214 */
215
216 pxmitpriv->pallocated_frame_buf = rtw_zvmalloc(NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
217
218 if (pxmitpriv->pallocated_frame_buf == NULL) {
219 pxmitpriv->pxmit_frame_buf = NULL;
220 res = _FAIL;
221 goto exit;
222 }
223 pxmitpriv->pxmit_frame_buf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_frame_buf), 4);
224 /* pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 - */
225 /* ((SIZE_PTR) (pxmitpriv->pallocated_frame_buf) &3); */
226
227 pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
228
229 for (i = 0; i < NR_XMITFRAME; i++) {
230 _rtw_init_listhead(&(pxframe->list));
231
232 pxframe->padapter = padapter;
233 pxframe->frame_tag = NULL_FRAMETAG;
234
235 pxframe->pkt = NULL;
236
237 #if 0 /*CONFIG_CORE_XMITBUF*/
238 pxframe->buf_addr = NULL;
239 pxframe->pxmitbuf = NULL;
240 #else
241 /*alloc buf_addr*/
242 /*rtw_os_xmit_resource_alloc(padapter, pxframe);*/
243 #endif
244
245 rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xmit_queue.queue));
246
247 pxframe++;
248 }
249
250 pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
251
252 pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
253
254 #if 0 /*CONFIG_CORE_XMITBUF*/
255 /* init xmit_buf */
256 _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
257 _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
258
259 pxmitpriv->pallocated_xmitbuf = rtw_zvmalloc(xmitbuf_nr * sizeof(struct xmit_buf) + 4);
260
261 if (pxmitpriv->pallocated_xmitbuf == NULL) {
262 res = _FAIL;
263 goto exit;
264 }
265
266 pxmitpriv->pxmitbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmitbuf), 4);
267 /* pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - */
268 /* ((SIZE_PTR) (pxmitpriv->pallocated_xmitbuf) &3); */
269
270 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
271
272 for (i = 0; i < xmitbuf_nr; i++) {
273 _rtw_init_listhead(&pxmitbuf->list);
274
275 pxmitbuf->priv_data = NULL;
276 pxmitbuf->padapter = padapter;
277 pxmitbuf->buf_tag = XMITBUF_DATA;
278
279 /* Tx buf allocation may fail sometimes, so sleep and retry. */
280 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
281 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
282 if (res == _FAIL) {
283 rtw_msleep_os(10);
284 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
285 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
286 if (res == _FAIL)
287 goto exit;
288 }
289
290 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
291 pxmitbuf->phead = pxmitbuf->pbuf;
292 pxmitbuf->pend = pxmitbuf->pbuf + xmitbuf_sz;
293 pxmitbuf->len = 0;
294 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
295 #endif
296
297 pxmitbuf->flags = XMIT_VO_QUEUE;
298
299 rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue));
300 #ifdef DBG_XMIT_BUF
301 pxmitbuf->no = i;
302 #endif
303
304 pxmitbuf++;
305
306 }
307
308 pxmitpriv->free_xmitbuf_cnt = xmitbuf_nr;
309 #endif
310 /* init xframe_ext queue, the same count as extbuf */
311 _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue);
312 #ifdef CONFIG_LAYER2_ROAMING
313 _rtw_init_queue(&pxmitpriv->rpkt_queue);
314 #endif
315
316 pxmitpriv->xframe_ext_alloc_addr = rtw_zvmalloc(NR_XMITFRAME_EXT * sizeof(struct xmit_frame) + 4);
317
318 if (pxmitpriv->xframe_ext_alloc_addr == NULL) {
319 pxmitpriv->xframe_ext = NULL;
320 res = _FAIL;
321 goto exit;
322 }
323 pxmitpriv->xframe_ext = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->xframe_ext_alloc_addr), 4);
324 pxframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
325
326 /* MGT_TXREQ_QMGT */
327 pxmitpriv->xframe_ext_txreq_alloc_addr = rtw_zmalloc(NR_XMITFRAME_EXT * SZ_MGT_RING);
328 if (pxmitpriv->xframe_ext_txreq_alloc_addr == NULL) {
329 pxmitpriv->xframe_ext_txreq = NULL;
330 res = _FAIL;
331 goto exit;
332 }
333 pxmitpriv->xframe_ext_txreq = pxmitpriv->xframe_ext_txreq_alloc_addr;
334 txreq = pxmitpriv->xframe_ext_txreq;
335 pkt_list = pxmitpriv->xframe_ext_txreq + sizeof(struct rtw_xmit_req);
336
337 for (i = 0; i < NR_XMITFRAME_EXT; i++) {
338 _rtw_init_listhead(&(pxframe->list));
339
340 pxframe->padapter = padapter;
341 pxframe->frame_tag = NULL_FRAMETAG;
342
343 pxframe->pkt = NULL;
344
345 #if 0 /*CONFIG_CORE_XMITBUF*/
346 pxframe->buf_addr = NULL;
347 pxframe->pxmitbuf = NULL;
348 #else
349 /*alloc buf_addr*/
350 rtw_os_xmit_resource_alloc(padapter, pxframe);
351 #endif
352
353 pxframe->ext_tag = 1;
354
355 /* MGT_TXREQ_QMGT */
356 pxframe->phl_txreq = (struct rtw_xmit_req *)txreq;
357 pxframe->phl_txreq->pkt_list = pkt_list;
358
359 rtw_list_insert_tail(&(pxframe->list), &(pxmitpriv->free_xframe_ext_queue.queue));
360
361 pxframe++;
362 /* MGT_TXREQ_QMGT */
363 txreq += SZ_MGT_RING;
364 pkt_list += SZ_MGT_RING;
365 }
366 pxmitpriv->free_xframe_ext_cnt = NR_XMITFRAME_EXT;
367
368 #if 0 /*CONFIG_CORE_XMITBUF*/
369 /* Init xmit extension buff */
370 _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
371
372 pxmitpriv->pallocated_xmit_extbuf = rtw_zvmalloc(xmitbuf_ext_nr * sizeof(struct xmit_buf) + 4);
373
374 if (pxmitpriv->pallocated_xmit_extbuf == NULL) {
375 res = _FAIL;
376 goto exit;
377 }
378
379 pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitpriv->pallocated_xmit_extbuf), 4);
380
381 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
382
383 for (i = 0; i < xmitbuf_ext_nr; i++) {
384 _rtw_init_listhead(&pxmitbuf->list);
385
386 pxmitbuf->priv_data = NULL;
387 pxmitbuf->padapter = padapter;
388 pxmitbuf->buf_tag = XMITBUF_MGNT;
389
390 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
391 xmitbuf_ext_sz + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
392 if (res == _FAIL) {
393 res = _FAIL;
394 goto exit;
395 }
396
397 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
398 pxmitbuf->phead = pxmitbuf->pbuf;
399 pxmitbuf->pend = pxmitbuf->pbuf + xmitbuf_ext_sz;
400 pxmitbuf->len = 0;
401 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
402 #endif
403
404 rtw_list_insert_tail(&pxmitbuf->list, &(pxmitpriv->free_xmit_extbuf_queue.queue));
405 #ifdef DBG_XMIT_BUF_EXT
406 pxmitbuf->no = i;
407 #endif
408 pxmitbuf++;
409
410 }
411
412 pxmitpriv->free_xmit_extbuf_cnt = xmitbuf_ext_nr;
413
414 /*GEORGIA_TODO_FIXIT_IC_GEN_DEPENDENCE*/
415 for (i = 0; i < CMDBUF_MAX; i++) {
416 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
417 if (pxmitbuf) {
418 _rtw_init_listhead(&pxmitbuf->list);
419
420 pxmitbuf->priv_data = NULL;
421 pxmitbuf->padapter = padapter;
422 pxmitbuf->buf_tag = XMITBUF_CMD;
423
424 res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf,
425 MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
426 if (res == _FAIL) {
427 res = _FAIL;
428 goto exit;
429 }
430
431 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
432 pxmitbuf->phead = pxmitbuf->pbuf;
433 pxmitbuf->pend = pxmitbuf->pbuf + MAX_CMDBUF_SZ;
434 pxmitbuf->len = 0;
435 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
436 #endif
437 pxmitbuf->alloc_sz = MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT;
438 }
439 }
440 #endif
441 rtw_alloc_hwxmits(padapter);
442 rtw_init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
443
444 for (i = 0; i < 4; i++)
445 pxmitpriv->wmm_para_seq[i] = i;
446
447 #ifdef CONFIG_USB_HCI
448 pxmitpriv->txirp_cnt = 1;
449
450 _rtw_init_sema(&(pxmitpriv->tx_retevt), 0);
451
452 /* per AC pending irp */
453 pxmitpriv->beq_cnt = 0;
454 pxmitpriv->bkq_cnt = 0;
455 pxmitpriv->viq_cnt = 0;
456 pxmitpriv->voq_cnt = 0;
457 #endif
458
459
460 #ifdef CONFIG_XMIT_ACK
461 pxmitpriv->ack_tx = _FALSE;
462 _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
463 rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
464 #endif
465
466 #ifdef CONFIG_TX_AMSDU
467 rtw_init_timer(&(pxmitpriv->amsdu_vo_timer),
468 rtw_amsdu_vo_timeout_handler, padapter);
469 pxmitpriv->amsdu_vo_timeout = RTW_AMSDU_TIMER_UNSET;
470
471 rtw_init_timer(&(pxmitpriv->amsdu_vi_timer),
472 rtw_amsdu_vi_timeout_handler, padapter);
473 pxmitpriv->amsdu_vi_timeout = RTW_AMSDU_TIMER_UNSET;
474
475 rtw_init_timer(&(pxmitpriv->amsdu_be_timer),
476 rtw_amsdu_be_timeout_handler, padapter);
477 pxmitpriv->amsdu_be_timeout = RTW_AMSDU_TIMER_UNSET;
478
479 rtw_init_timer(&(pxmitpriv->amsdu_bk_timer),
480 rtw_amsdu_bk_timeout_handler, padapter);
481 pxmitpriv->amsdu_bk_timeout = RTW_AMSDU_TIMER_UNSET;
482
483 pxmitpriv->amsdu_debug_set_timer = 0;
484 pxmitpriv->amsdu_debug_timeout = 0;
485 pxmitpriv->amsdu_debug_tasklet = 0;
486 pxmitpriv->amsdu_debug_enqueue = 0;
487 pxmitpriv->amsdu_debug_dequeue = 0;
488 for (i = 0; i < AMSDU_DEBUG_MAX_COUNT; i++)
489 pxmitpriv->amsdu_debug_coalesce[i] = 0;
490 #endif
491 #ifdef DBG_TXBD_DESC_DUMP
492 pxmitpriv->dump_txbd_desc = 0;
493 #endif
494 rtw_init_xmit_block(padapter);
495 rtw_intf_init_xmit_priv(padapter);
496
497 #ifdef RTW_PHL_TX //alloc xmit resource
498 printk("eric-tx CALL alloc_txring !!!!\n");
499 if (alloc_txring(padapter) == _FAIL) {
500 RTW_ERR("[core] alloc_txring fail !!!\n");
501 res = _FAIL;
502 goto exit;
503 }
504 #endif
505
506 #if defined(CONFIG_CORE_TXSC)
507 _rtw_spinlock_init(&pxmitpriv->txsc_lock);
508 #endif
509
510 exit:
511
512 return res;
513 }
514
rtw_mfree_xmit_priv_lock(struct xmit_priv * pxmitpriv)515 void rtw_mfree_xmit_priv_lock(struct xmit_priv *pxmitpriv)
516 {
517 _rtw_spinlock_free(&pxmitpriv->lock);
518 #if 0 /*def CONFIG_XMIT_THREAD_MODE*/
519 _rtw_free_sema(&pxmitpriv->xmit_sema);
520 #endif
521
522 _rtw_spinlock_free(&pxmitpriv->be_pending.lock);
523 _rtw_spinlock_free(&pxmitpriv->bk_pending.lock);
524 _rtw_spinlock_free(&pxmitpriv->vi_pending.lock);
525 _rtw_spinlock_free(&pxmitpriv->vo_pending.lock);
526 _rtw_spinlock_free(&pxmitpriv->bm_pending.lock);
527
528 /* _rtw_spinlock_free(&pxmitpriv->legacy_dz_queue.lock); */
529 /* _rtw_spinlock_free(&pxmitpriv->apsd_queue.lock); */
530
531 _rtw_spinlock_free(&pxmitpriv->free_xmit_queue.lock);
532 #if 0 /*CONFIG_CORE_XMITBUF*/
533 _rtw_spinlock_free(&pxmitpriv->free_xmitbuf_queue.lock);
534 _rtw_spinlock_free(&pxmitpriv->pending_xmitbuf_queue.lock);
535 #endif
536 }
537
538
_rtw_free_xmit_priv(struct xmit_priv * pxmitpriv)539 void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
540 {
541 int i;
542 _adapter *padapter = pxmitpriv->adapter;
543 struct xmit_frame *pxmitframe;
544 #if 0 /*CONFIG_CORE_XMITBUF*/
545 struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
546 #endif
547
548 #if 0 /*CONFIG_CORE_XMITBUF*/
549 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
550 u8 xmitbuf_nr = GET_HAL_XMITBUF_NR(dvobj);
551 u16 xmitbuf_sz = GET_HAL_XMITBUF_SZ(dvobj);
552
553 u8 xmitbuf_ext_nr = GET_HAL_XMITBUF_EXT_NR(dvobj);
554 u16 xmitbuf_ext_sz = GET_HAL_XMITBUF_EXT_SZ(dvobj);
555 #endif
556
557 rtw_intf_free_xmit_priv(padapter);
558
559 rtw_mfree_xmit_priv_lock(pxmitpriv);
560
561 if (pxmitpriv->pxmit_frame_buf == NULL)
562 goto out;
563
564 pxmitframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
565
566 for (i = 0; i < NR_XMITFRAME; i++) {
567 rtw_os_xmit_complete(padapter, pxmitframe);
568 /*alloc buf_addr*/
569 /*rtw_os_xmit_resource_free(padapter, pxmitframe);*/
570 pxmitframe++;
571 }
572
573 #if 0 /*CONFIG_CORE_XMITBUF*/
574 for (i = 0; i < xmitbuf_nr; i++) {
575 rtw_os_xmit_resource_free(padapter, pxmitbuf,
576 (xmitbuf_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
577
578 pxmitbuf++;
579 }
580 #endif
581 if (pxmitpriv->pallocated_frame_buf)
582 rtw_vmfree(pxmitpriv->pallocated_frame_buf,
583 NR_XMITFRAME * sizeof(struct xmit_frame) + 4);
584
585 #if 0 /*CONFIG_CORE_XMITBUF*/
586 if (pxmitpriv->pallocated_xmitbuf)
587 rtw_vmfree(pxmitpriv->pallocated_xmitbuf,
588 xmitbuf_nr * sizeof(struct xmit_buf) + 4);
589 #endif
590
591 /* free xframe_ext queue, the same count as extbuf */
592 if (pxmitpriv->xframe_ext == NULL)
593 goto out;
594
595 pxmitframe = (struct xmit_frame *)pxmitpriv->xframe_ext;
596 for (i = 0; i < NR_XMITFRAME_EXT; i++) {
597 rtw_os_xmit_complete(padapter, pxmitframe);
598 /*free buf_addr*/
599 rtw_os_xmit_resource_free(padapter, pxmitframe);
600 pxmitframe++;
601 }
602
603 if (pxmitpriv->xframe_ext_alloc_addr)
604 rtw_vmfree(pxmitpriv->xframe_ext_alloc_addr,
605 NR_XMITFRAME_EXT * sizeof(struct xmit_frame) + 4);
606 _rtw_spinlock_free(&pxmitpriv->free_xframe_ext_queue.lock);
607
608 if (pxmitpriv->xframe_ext_txreq_alloc_addr)
609 rtw_mfree(pxmitpriv->xframe_ext_txreq_alloc_addr, NR_XMITFRAME_EXT * SZ_MGT_RING);
610
611 #if 0 /*CONFIG_CORE_XMITBUF*/
612
613 /* free xmit extension buff */
614 _rtw_spinlock_free(&pxmitpriv->free_xmit_extbuf_queue.lock);
615
616 pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
617 for (i = 0; i < xmitbuf_ext_nr; i++) {
618 rtw_os_xmit_resource_free(padapter, pxmitbuf,
619 (xmitbuf_ext_sz + SZ_ALIGN_XMITFRAME_EXT), _TRUE);
620
621 pxmitbuf++;
622 }
623
624 if (pxmitpriv->pallocated_xmit_extbuf)
625 rtw_vmfree(pxmitpriv->pallocated_xmit_extbuf,
626 xmitbuf_ext_nr * sizeof(struct xmit_buf) + 4);
627
628 for (i = 0; i < CMDBUF_MAX; i++) {
629 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[i];
630 if (pxmitbuf != NULL)
631 rtw_os_xmit_resource_free(padapter, pxmitbuf, MAX_CMDBUF_SZ + SZ_ALIGN_XMITFRAME_EXT, _TRUE);
632 }
633 #endif
634 rtw_free_hwxmits(padapter);
635
636 #ifdef CONFIG_LAYER2_ROAMING
637 _rtw_spinlock_free(&pxmitpriv->rpkt_queue.lock);
638 #endif
639
640 #ifdef RTW_PHL_TX
641 free_txring(padapter);
642 #endif
643
644 #ifdef CONFIG_CORE_TXSC
645 txsc_clear(padapter);
646 _rtw_spinlock_free(&pxmitpriv->txsc_lock);
647 #endif
648
649 #ifdef CONFIG_XMIT_ACK
650 _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
651 #endif
652 rtw_free_xmit_block(padapter);
653 out:
654 return;
655 }
656
rtw_init_lite_xmit_resource(struct dvobj_priv * dvobj)657 u8 rtw_init_lite_xmit_resource(struct dvobj_priv *dvobj)
658 {
659
660 u8 ret = _SUCCESS;
661 /*YiWei_todo need use correct litexmitbuf_nr urb_nr*/
662 u32 litexmitbuf_nr = RTW_LITEXMITBUF_NR;
663 u32 litexmitbuf_ext_nr = RTW_LITEXMITBUF_NR;
664 struct lite_data_buf *litexmitbuf;
665 struct trx_data_buf_q *litexmitbuf_q = &dvobj->litexmitbuf_q;
666 struct trx_data_buf_q *litexmit_extbuf_q = &dvobj->litexmit_extbuf_q;
667 int i;
668 #ifdef CONFIG_USB_HCI
669 struct trx_urb_buf_q *xmit_urb_q = &dvobj->xmit_urb_q;
670 struct data_urb *xmiturb;
671 u32 urb_nr = RTW_XMITURB_NR;
672 #endif
673
674 /* init lite_xmit_buf */
675 _rtw_init_queue(&litexmitbuf_q->free_data_buf_queue);
676
677 litexmitbuf_q->alloc_data_buf =
678 rtw_zvmalloc(litexmitbuf_nr * sizeof(struct lite_data_buf) + 4);
679
680 if (litexmitbuf_q->alloc_data_buf == NULL) {
681 ret = _FAIL;
682 goto exit;
683 }
684
685 litexmitbuf_q->data_buf =
686 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(litexmitbuf_q->alloc_data_buf), 4);
687
688 litexmitbuf = (struct lite_data_buf *)litexmitbuf_q->data_buf;
689
690 for (i = 0; i < litexmitbuf_nr; i++) {
691 _rtw_init_listhead(&litexmitbuf->list);
692 rtw_list_insert_tail(&litexmitbuf->list,
693 &(litexmitbuf_q->free_data_buf_queue.queue));
694 litexmitbuf++;
695 }
696 litexmitbuf_q->free_data_buf_cnt = litexmitbuf_nr;
697
698
699 /* Init lite xmit extension buff */
700 _rtw_init_queue(&litexmit_extbuf_q->free_data_buf_queue);
701
702 litexmit_extbuf_q->alloc_data_buf =
703 rtw_zvmalloc(litexmitbuf_ext_nr * sizeof(struct lite_data_buf) + 4);
704
705 if (litexmit_extbuf_q->alloc_data_buf == NULL) {
706 ret = _FAIL;
707 goto exit;
708 }
709
710 litexmit_extbuf_q->data_buf =
711 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(litexmit_extbuf_q->alloc_data_buf), 4);
712
713 litexmitbuf = (struct lite_data_buf *)litexmit_extbuf_q->data_buf;
714
715 for (i = 0; i < litexmitbuf_ext_nr; i++) {
716 _rtw_init_listhead(&litexmitbuf->list);
717 rtw_list_insert_tail(&litexmitbuf->list,
718 &(litexmit_extbuf_q->free_data_buf_queue.queue));
719 litexmitbuf++;
720 }
721 litexmit_extbuf_q->free_data_buf_cnt = litexmitbuf_ext_nr;
722
723 #ifdef CONFIG_USB_HCI
724 /* init xmit_urb */
725 _rtw_init_queue(&xmit_urb_q->free_urb_buf_queue);
726 xmit_urb_q->alloc_urb_buf =
727 rtw_zvmalloc(urb_nr * sizeof(struct data_urb) + 4);
728 if (xmit_urb_q->alloc_urb_buf == NULL) {
729 ret = _FAIL;
730 goto exit;
731 }
732
733 xmit_urb_q->urb_buf =
734 (u8 *)N_BYTE_ALIGNMENT((SIZE_PTR)(xmit_urb_q->alloc_urb_buf), 4);
735
736 xmiturb = (struct data_urb *)xmit_urb_q->urb_buf;
737 for (i = 0; i < urb_nr; i++) {
738 _rtw_init_listhead(&xmiturb->list);
739 ret = rtw_os_urb_resource_alloc(xmiturb);
740 rtw_list_insert_tail(&xmiturb->list,
741 &(xmit_urb_q->free_urb_buf_queue.queue));
742 xmiturb++;
743 }
744 xmit_urb_q->free_urb_buf_cnt = urb_nr;
745 #endif
746
747 exit:
748 return ret;
749 }
750
rtw_free_lite_xmit_resource(struct dvobj_priv * dvobj)751 void rtw_free_lite_xmit_resource(struct dvobj_priv *dvobj)
752 {
753 u8 ret = _SUCCESS;
754 /*YiWei_todo need use correct litexmitbuf_nr urb_nr*/
755 u32 litexmitbuf_nr = RTW_LITEXMITBUF_NR;
756 u32 litexmitbuf_ext_nr = RTW_LITEXMITBUF_NR;
757 struct trx_data_buf_q *litexmitbuf_q = &dvobj->litexmitbuf_q;
758 struct trx_data_buf_q *litexmit_extbuf_q = &dvobj->litexmit_extbuf_q;
759 #ifdef CONFIG_USB_HCI
760 struct data_urb *xmiturb;
761 struct trx_urb_buf_q *xmit_urb_q = &dvobj->xmit_urb_q;
762 u32 urb_nr = RTW_XMITURB_NR;
763 int i;
764 #endif
765
766 if (litexmitbuf_q->alloc_data_buf)
767 rtw_vmfree(litexmitbuf_q->alloc_data_buf,
768 litexmitbuf_nr * sizeof(struct lite_data_buf) + 4);
769
770 if (litexmit_extbuf_q->alloc_data_buf)
771 rtw_vmfree(litexmit_extbuf_q->alloc_data_buf,
772 litexmitbuf_ext_nr * sizeof(struct lite_data_buf) + 4);
773
774 #ifdef CONFIG_USB_HCI
775 xmiturb = (struct data_urb *)xmit_urb_q->urb_buf;
776 for (i = 0; i < urb_nr; i++) {
777 rtw_os_urb_resource_free(xmiturb);
778 xmiturb++;
779 }
780
781 if (xmit_urb_q->alloc_urb_buf)
782 rtw_vmfree(xmit_urb_q->alloc_urb_buf,
783 urb_nr * sizeof(struct data_urb) + 4);
784 #endif
785
786 }
787
788
rtw_get_tx_bw_mode(_adapter * adapter,struct sta_info * sta)789 u8 rtw_get_tx_bw_mode(_adapter *adapter, struct sta_info *sta)
790 {
791 u8 bw;
792
793 bw = sta->phl_sta->chandef.bw;
794 if (MLME_STATE(adapter) & WIFI_ASOC_STATE) {
795 if (adapter->mlmeextpriv.chandef.chan <= 14)
796 bw = rtw_min(bw, ADAPTER_TX_BW_2G(adapter));
797 else
798 bw = rtw_min(bw, ADAPTER_TX_BW_5G(adapter));
799 }
800
801 return bw;
802 }
803
rtw_get_adapter_tx_rate_bmp_by_bw(_adapter * adapter,u8 bw,u16 * r_bmp_cck_ofdm,u32 * r_bmp_ht,u64 * r_bmp_vht)804 void rtw_get_adapter_tx_rate_bmp_by_bw(_adapter *adapter, u8 bw, u16 *r_bmp_cck_ofdm, u32 *r_bmp_ht, u64 *r_bmp_vht)
805 {
806 /* ToDo */
807 #if 0
808 struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
809 struct macid_ctl_t *macid_ctl = dvobj_to_macidctl(dvobj);
810 u8 fix_bw = 0xFF;
811 u16 bmp_cck_ofdm = 0;
812 u32 bmp_ht = 0;
813 u64 bmp_vht = 0;
814 int i;
815
816 if (adapter->fix_rate != NO_FIX_RATE && adapter->fix_bw != NO_FIX_BW)
817 fix_bw = adapter->fix_bw;
818
819 /* TODO: adapter->fix_rate */
820
821 for (i = 0; i < macid_ctl->num; i++) {
822 if (!rtw_macid_is_used(macid_ctl, i))
823 continue;
824 if (!rtw_macid_is_iface_specific(macid_ctl, i, adapter))
825 continue;
826
827 if (bw == CHANNEL_WIDTH_20) /* CCK, OFDM always 20MHz */
828 bmp_cck_ofdm |= macid_ctl->rate_bmp0[i] & 0x00000FFF;
829
830 /* bypass mismatch bandwidth for HT, VHT */
831 if ((fix_bw != 0xFF && fix_bw != bw) || (fix_bw == 0xFF && macid_ctl->bw[i] != bw))
832 continue;
833
834 if (macid_ctl->vht_en[i])
835 bmp_vht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
836 else
837 bmp_ht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
838 }
839
840 /* TODO: mlmeext->tx_rate*/
841
842 if (r_bmp_cck_ofdm)
843 *r_bmp_cck_ofdm = bmp_cck_ofdm;
844 if (r_bmp_ht)
845 *r_bmp_ht = bmp_ht;
846 if (r_bmp_vht)
847 *r_bmp_vht = bmp_vht;
848 #endif
849 }
850
rtw_get_shared_macid_tx_rate_bmp_by_bw(struct dvobj_priv * dvobj,u8 bw,u16 * r_bmp_cck_ofdm,u32 * r_bmp_ht,u64 * r_bmp_vht)851 void rtw_get_shared_macid_tx_rate_bmp_by_bw(struct dvobj_priv *dvobj, u8 bw, u16 *r_bmp_cck_ofdm, u32 *r_bmp_ht, u64 *r_bmp_vht)
852 {
853 /* ToDo */
854 #if 0
855 struct macid_ctl_t *macid_ctl = dvobj_to_macidctl(dvobj);
856 u16 bmp_cck_ofdm = 0;
857 u32 bmp_ht = 0;
858 u64 bmp_vht = 0;
859 int i;
860
861 for (i = 0; i < macid_ctl->num; i++) {
862 if (!rtw_macid_is_used(macid_ctl, i))
863 continue;
864 if (!rtw_macid_is_iface_shared(macid_ctl, i))
865 continue;
866
867 if (bw == CHANNEL_WIDTH_20) /* CCK, OFDM always 20MHz */
868 bmp_cck_ofdm |= macid_ctl->rate_bmp0[i] & 0x00000FFF;
869
870 /* bypass mismatch bandwidth for HT, VHT */
871 if (macid_ctl->bw[i] != bw)
872 continue;
873
874 if (macid_ctl->vht_en[i])
875 bmp_vht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
876 else
877 bmp_ht |= (macid_ctl->rate_bmp0[i] >> 12) | (macid_ctl->rate_bmp1[i] << 20);
878 }
879
880 if (r_bmp_cck_ofdm)
881 *r_bmp_cck_ofdm = bmp_cck_ofdm;
882 if (r_bmp_ht)
883 *r_bmp_ht = bmp_ht;
884 if (r_bmp_vht)
885 *r_bmp_vht = bmp_vht;
886 #endif
887 }
888
rtw_update_tx_rate_bmp(struct dvobj_priv * dvobj)889 void rtw_update_tx_rate_bmp(struct dvobj_priv *dvobj)
890 {
891 #if 0 /*GEORGIA_TODO_FIXIT*/
892
893 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
894 _adapter *adapter = dvobj_get_primary_adapter(dvobj);
895 HAL_DATA_TYPE *hal_data = GET_PHL_COM(dvobj);
896 u8 bw;
897 u16 bmp_cck_ofdm, tmp_cck_ofdm;
898 u32 bmp_ht, tmp_ht, ori_bmp_ht[2];
899 u64 bmp_vht, tmp_vht, ori_bmp_vht[4];
900 int i;
901
902 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_160; bw++) {
903 /* backup the original ht & vht bmp */
904 if (bw <= CHANNEL_WIDTH_40)
905 ori_bmp_ht[bw] = rf_ctl->rate_bmp_ht_by_bw[bw];
906 if (bw <= CHANNEL_WIDTH_160)
907 ori_bmp_vht[bw] = rf_ctl->rate_bmp_vht_by_bw[bw];
908
909 bmp_cck_ofdm = bmp_ht = bmp_vht = 0;
910 if (rtw_hw_is_bw_support(dvobj, bw)) {
911 for (i = 0; i < dvobj->iface_nums; i++) {
912 if (!dvobj->padapters[i])
913 continue;
914 rtw_get_adapter_tx_rate_bmp_by_bw(dvobj->padapters[i], bw, &tmp_cck_ofdm, &tmp_ht, &tmp_vht);
915 bmp_cck_ofdm |= tmp_cck_ofdm;
916 bmp_ht |= tmp_ht;
917 bmp_vht |= tmp_vht;
918 }
919 rtw_get_shared_macid_tx_rate_bmp_by_bw(dvobj, bw, &tmp_cck_ofdm, &tmp_ht, &tmp_vht);
920 bmp_cck_ofdm |= tmp_cck_ofdm;
921 bmp_ht |= tmp_ht;
922 bmp_vht |= tmp_vht;
923 }
924 if (bw == CHANNEL_WIDTH_20)
925 rf_ctl->rate_bmp_cck_ofdm = bmp_cck_ofdm;
926 if (bw <= CHANNEL_WIDTH_40)
927 rf_ctl->rate_bmp_ht_by_bw[bw] = bmp_ht;
928 if (bw <= CHANNEL_WIDTH_160)
929 rf_ctl->rate_bmp_vht_by_bw[bw] = bmp_vht;
930 }
931
932 #if CONFIG_TXPWR_LIMIT
933 #ifndef DBG_HIGHEST_RATE_BMP_BW_CHANGE
934 #define DBG_HIGHEST_RATE_BMP_BW_CHANGE 0
935 #endif
936
937 if (hal_data->txpwr_limit_loaded) {
938 u8 ori_highest_ht_rate_bw_bmp;
939 u8 ori_highest_vht_rate_bw_bmp;
940 u8 highest_rate_bw;
941 u8 highest_rate_bw_bmp;
942 u8 update_ht_rs = _FALSE;
943 u8 update_vht_rs = _FALSE;
944
945 /* backup the original ht & vht highest bw bmp */
946 ori_highest_ht_rate_bw_bmp = rf_ctl->highest_ht_rate_bw_bmp;
947 ori_highest_vht_rate_bw_bmp = rf_ctl->highest_vht_rate_bw_bmp;
948
949 highest_rate_bw_bmp = BW_CAP_20M;
950 highest_rate_bw = CHANNEL_WIDTH_20;
951 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_40; bw++) {
952 if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw] < rf_ctl->rate_bmp_ht_by_bw[bw]) {
953 highest_rate_bw_bmp = ch_width_to_bw_cap(bw);
954 highest_rate_bw = bw;
955 } else if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw] == rf_ctl->rate_bmp_ht_by_bw[bw])
956 highest_rate_bw_bmp |= ch_width_to_bw_cap(bw);
957 }
958 rf_ctl->highest_ht_rate_bw_bmp = highest_rate_bw_bmp;
959
960 if (ori_highest_ht_rate_bw_bmp != rf_ctl->highest_ht_rate_bw_bmp
961 || largest_bit(ori_bmp_ht[highest_rate_bw]) != largest_bit(rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw])
962 ) {
963 if (DBG_HIGHEST_RATE_BMP_BW_CHANGE) {
964 RTW_INFO("highest_ht_rate_bw_bmp:0x%02x=>0x%02x\n", ori_highest_ht_rate_bw_bmp, rf_ctl->highest_ht_rate_bw_bmp);
965 RTW_INFO("rate_bmp_ht_by_bw[%u]:0x%08x=>0x%08x\n", highest_rate_bw, ori_bmp_ht[highest_rate_bw], rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw]);
966 }
967 if (rf_ctl->rate_bmp_ht_by_bw[highest_rate_bw])
968 update_ht_rs = _TRUE;
969 }
970
971 highest_rate_bw_bmp = BW_CAP_20M;
972 highest_rate_bw = CHANNEL_WIDTH_20;
973 for (bw = CHANNEL_WIDTH_20; bw <= CHANNEL_WIDTH_160; bw++) {
974 if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw] < rf_ctl->rate_bmp_vht_by_bw[bw]) {
975 highest_rate_bw_bmp = ch_width_to_bw_cap(bw);
976 highest_rate_bw = bw;
977 } else if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw] == rf_ctl->rate_bmp_vht_by_bw[bw])
978 highest_rate_bw_bmp |= ch_width_to_bw_cap(bw);
979 }
980 rf_ctl->highest_vht_rate_bw_bmp = highest_rate_bw_bmp;
981
982 if (ori_highest_vht_rate_bw_bmp != rf_ctl->highest_vht_rate_bw_bmp
983 || largest_bit_64(ori_bmp_vht[highest_rate_bw]) != largest_bit_64(rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw])
984 ) {
985 if (DBG_HIGHEST_RATE_BMP_BW_CHANGE) {
986 RTW_INFO("highest_vht_rate_bw_bmp:0x%02x=>0x%02x\n", ori_highest_vht_rate_bw_bmp, rf_ctl->highest_vht_rate_bw_bmp);
987 RTW_INFO("rate_bmp_vht_by_bw[%u]:0x%016llx=>0x%016llx\n", highest_rate_bw, ori_bmp_vht[highest_rate_bw], rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw]);
988 }
989 if (rf_ctl->rate_bmp_vht_by_bw[highest_rate_bw])
990 update_vht_rs = _TRUE;
991 }
992
993 /* TODO: per rfpath and rate section handling? */
994 if (update_ht_rs == _TRUE || update_vht_rs == _TRUE)
995 rtw_hal_set_tx_power_level(dvobj_get_primary_adapter(dvobj), hal_data->current_channel);
996 }
997 #endif /* CONFIG_TXPWR_LIMIT */
998 #endif
999 }
1000
rtw_get_tx_bw_bmp_of_ht_rate(struct dvobj_priv * dvobj,u8 rate,u8 max_bw)1001 u8 rtw_get_tx_bw_bmp_of_ht_rate(struct dvobj_priv *dvobj, u8 rate, u8 max_bw)
1002 {
1003 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
1004 u8 bw;
1005 u8 bw_bmp = 0;
1006 u32 rate_bmp;
1007
1008 if (!IS_HT_RATE(rate)) {
1009 rtw_warn_on(1);
1010 goto exit;
1011 }
1012
1013 rate_bmp = 1 << (rate - MGN_MCS0);
1014
1015 if (max_bw > CHANNEL_WIDTH_40)
1016 max_bw = CHANNEL_WIDTH_40;
1017
1018 for (bw = CHANNEL_WIDTH_20; bw <= max_bw; bw++) {
1019 /* RA may use lower rate for retry */
1020 if (rf_ctl->rate_bmp_ht_by_bw[bw] >= rate_bmp)
1021 bw_bmp |= ch_width_to_bw_cap(bw);
1022 }
1023
1024 exit:
1025 return bw_bmp;
1026 }
1027
rtw_get_tx_bw_bmp_of_vht_rate(struct dvobj_priv * dvobj,u8 rate,u8 max_bw)1028 u8 rtw_get_tx_bw_bmp_of_vht_rate(struct dvobj_priv *dvobj, u8 rate, u8 max_bw)
1029 {
1030 struct rf_ctl_t *rf_ctl = dvobj_to_rfctl(dvobj);
1031 u8 bw;
1032 u8 bw_bmp = 0;
1033 u64 rate_bmp;
1034
1035 if (!IS_VHT_RATE(rate)) {
1036 rtw_warn_on(1);
1037 goto exit;
1038 }
1039
1040 rate_bmp = BIT_ULL(rate - MGN_VHT1SS_MCS0);
1041
1042 if (max_bw > CHANNEL_WIDTH_160)
1043 max_bw = CHANNEL_WIDTH_160;
1044
1045 for (bw = CHANNEL_WIDTH_20; bw <= max_bw; bw++) {
1046 /* RA may use lower rate for retry */
1047 if (rf_ctl->rate_bmp_vht_by_bw[bw] >= rate_bmp)
1048 bw_bmp |= ch_width_to_bw_cap(bw);
1049 }
1050
1051 exit:
1052 return bw_bmp;
1053 }
1054
rtw_rfctl_get_oper_txpwr_max_mbm(struct rf_ctl_t * rfctl,u8 ch,u8 bw,u8 offset,u8 ifbmp_mod,u8 if_op,bool eirp)1055 s16 rtw_rfctl_get_oper_txpwr_max_mbm(struct rf_ctl_t *rfctl, u8 ch, u8 bw, u8 offset, u8 ifbmp_mod, u8 if_op, bool eirp)
1056 {
1057 /* TODO: get maximum txpower of current operating class & channel belongs to this radio */
1058 s16 mbm = 2000;
1059 return mbm;
1060 }
1061
rtw_rfctl_get_reg_max_txpwr_mbm(struct rf_ctl_t * rfctl,u8 ch,u8 bw,u8 offset,bool eirp)1062 s16 rtw_rfctl_get_reg_max_txpwr_mbm(struct rf_ctl_t *rfctl, u8 ch, u8 bw, u8 offset, bool eirp)
1063 {
1064 /* TODO: get maximum txpower of current operating class & channel belongs to this radio allowed by regulatory */
1065 s16 mbm = 1300;
1066 return mbm;
1067 }
1068
query_ra_short_GI(struct sta_info * psta,u8 bw)1069 u8 query_ra_short_GI(struct sta_info *psta, u8 bw)
1070 {
1071 u8 sgi = _FALSE, sgi_20m = _FALSE, sgi_40m = _FALSE, sgi_80m = _FALSE;
1072
1073 #ifdef CONFIG_80211N_HT
1074 #ifdef CONFIG_80211AC_VHT
1075 #ifdef CONFIG_80211AX_HE
1076 /* CONFIG_80211AX_HE_TODO */
1077 #endif /* CONFIG_80211AX_HE */
1078 if (psta->vhtpriv.vht_option)
1079 sgi_80m = psta->vhtpriv.sgi_80m;
1080 #endif
1081 sgi_20m = psta->htpriv.sgi_20m;
1082 sgi_40m = psta->htpriv.sgi_40m;
1083 #endif
1084
1085 switch (bw) {
1086 case CHANNEL_WIDTH_80:
1087 sgi = sgi_80m;
1088 break;
1089 case CHANNEL_WIDTH_40:
1090 sgi = sgi_40m;
1091 break;
1092 case CHANNEL_WIDTH_20:
1093 default:
1094 sgi = sgi_20m;
1095 break;
1096 }
1097
1098 return sgi;
1099 }
1100
update_attrib_vcs_info(_adapter * padapter,struct xmit_frame * pxmitframe)1101 static void update_attrib_vcs_info(_adapter *padapter, struct xmit_frame *pxmitframe)
1102 {
1103 u32 sz;
1104 struct pkt_attrib *pattrib = &pxmitframe->attrib;
1105 /* struct sta_info *psta = pattrib->psta; */
1106 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
1107 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
1108
1109 /*
1110 if(pattrib->psta)
1111 {
1112 psta = pattrib->psta;
1113 }
1114 else
1115 {
1116 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
1117 psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0] );
1118 }
1119
1120 if(psta==NULL)
1121 {
1122 RTW_INFO("%s, psta==NUL\n", __func__);
1123 return;
1124 }
1125
1126 if(!(psta->state &WIFI_ASOC_STATE))
1127 {
1128 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
1129 return;
1130 }
1131 */
1132
1133 if (pattrib->nr_frags != 1)
1134 sz = padapter->xmitpriv.frag_len;
1135 else /* no frag */
1136 sz = pattrib->last_txcmdsz;
1137
1138 /* (1) RTS_Threshold is compared to the MPDU, not MSDU. */
1139 /* (2) If there are more than one frag in this MSDU, only the first frag uses protection frame. */
1140 /* Other fragments are protected by previous fragment. */
1141 /* So we only need to check the length of first fragment. */
1142 if (pmlmeext->cur_wireless_mode < WLAN_MD_11N || padapter->registrypriv.wifi_spec) {
1143 if (sz > padapter->registrypriv.rts_thresh)
1144 pattrib->vcs_mode = RTS_CTS;
1145 else {
1146 if (pattrib->rtsen)
1147 pattrib->vcs_mode = RTS_CTS;
1148 else if (pattrib->cts2self)
1149 pattrib->vcs_mode = CTS_TO_SELF;
1150 else
1151 pattrib->vcs_mode = NONE_VCS;
1152 }
1153 } else {
1154 while (_TRUE) {
1155 #if 0 /* Todo */
1156 /* check IOT action */
1157 if (pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF) {
1158 pattrib->vcs_mode = CTS_TO_SELF;
1159 pattrib->rts_rate = MGN_24M;
1160 break;
1161 } else if (pHTInfo->IOTAction & (HT_IOT_ACT_FORCED_RTS | HT_IOT_ACT_PURE_N_MODE)) {
1162 pattrib->vcs_mode = RTS_CTS;
1163 pattrib->rts_rate = MGN_24M;
1164 break;
1165 }
1166 #endif
1167
1168 /* IOT action */
1169 if ((pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_ATHEROS) && (pattrib->ampdu_en == _TRUE) &&
1170 (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)) {
1171 pattrib->vcs_mode = CTS_TO_SELF;
1172 break;
1173 }
1174
1175
1176 /* check ERP protection */
1177 if (pattrib->rtsen || pattrib->cts2self) {
1178 if (pattrib->rtsen)
1179 pattrib->vcs_mode = RTS_CTS;
1180 else if (pattrib->cts2self)
1181 pattrib->vcs_mode = CTS_TO_SELF;
1182
1183 break;
1184 }
1185
1186 /* check HT op mode */
1187 if (pattrib->ht_en) {
1188 u8 HTOpMode = pmlmeinfo->HT_protection;
1189 if ((pmlmeext->chandef.bw && (HTOpMode == 2 || HTOpMode == 3)) ||
1190 (!pmlmeext->chandef.bw && HTOpMode == 3)) {
1191 pattrib->vcs_mode = RTS_CTS;
1192 break;
1193 }
1194 }
1195
1196 /* check rts */
1197 if (sz > padapter->registrypriv.rts_thresh) {
1198 pattrib->vcs_mode = RTS_CTS;
1199 break;
1200 }
1201
1202 /* to do list: check MIMO power save condition. */
1203
1204 /* check AMPDU aggregation for TXOP */
1205 if (pattrib->ampdu_en == _TRUE) {
1206 pattrib->vcs_mode = RTS_CTS;
1207 break;
1208 }
1209
1210 pattrib->vcs_mode = NONE_VCS;
1211 break;
1212 }
1213 }
1214
1215 /* for debug : force driver control vrtl_carrier_sense. */
1216 if (padapter->driver_vcs_en == 1) {
1217 /* u8 driver_vcs_en; */ /* Enable=1, Disable=0 driver control vrtl_carrier_sense. */
1218 /* u8 driver_vcs_type; */ /* force 0:disable VCS, 1:RTS-CTS, 2:CTS-to-self when vcs_en=1. */
1219 pattrib->vcs_mode = padapter->driver_vcs_type;
1220 }
1221
1222 }
1223
1224 #ifdef CONFIG_WMMPS_STA
1225 /*
1226 * update_attrib_trigger_frame_info
1227 * For Station mode, if a specific TID of driver setting and an AP support uapsd function, the data
1228 * frame with corresponding TID will be a trigger frame when driver is in wmm power saving mode.
1229 *
1230 * Arguments:
1231 * @padapter: _adapter pointer.
1232 * @pattrib: pkt_attrib pointer.
1233 *
1234 * Auther: Arvin Liu
1235 * Date: 2017/06/05
1236 */
update_attrib_trigger_frame_info(_adapter * padapter,struct pkt_attrib * pattrib)1237 static void update_attrib_trigger_frame_info(_adapter *padapter, struct pkt_attrib *pattrib)
1238 {
1239 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1240 struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
1241 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1242 u8 trigger_frame_en = 0;
1243
1244 if (MLME_IS_STA(padapter)) {
1245 if ((pwrpriv->pwr_mode == PM_PS_MODE_MIN) || (pwrpriv->pwr_mode == PM_PS_MODE_MAX)) {
1246 if ((pqospriv->uapsd_ap_supported) && ((pqospriv->uapsd_tid & BIT(pattrib->priority)) == _TRUE)) {
1247 trigger_frame_en = 1;
1248 RTW_INFO("[WMMPS]"FUNC_ADPT_FMT": This is a Trigger Frame\n", FUNC_ADPT_ARG(padapter));
1249 }
1250 }
1251 }
1252
1253 pattrib->trigger_frame = trigger_frame_en;
1254 }
1255 #endif /* CONFIG_WMMPS_STA */
1256
update_attrib_phy_info(_adapter * padapter,struct pkt_attrib * pattrib,struct sta_info * psta)1257 static void update_attrib_phy_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta)
1258 {
1259 struct mlme_ext_priv *mlmeext = &padapter->mlmeextpriv;
1260 u8 bw;
1261
1262 pattrib->rtsen = psta->rtsen;
1263 pattrib->cts2self = psta->cts2self;
1264 pattrib->hw_rts_en = psta->hw_rts_en;
1265
1266 pattrib->mdata = 0;
1267 pattrib->eosp = 0;
1268 #ifdef CONFIG_80211AX_HE
1269 if (psta->hepriv.he_option == _TRUE)
1270 pattrib->eosp = 1;
1271 #endif
1272 pattrib->triggered = 0;
1273 pattrib->ampdu_spacing = 0;
1274
1275 /* ht_en, init rate, ,bw, ch_offset, sgi */
1276
1277 /* ToDo: Need API to inform hal_sta->ra_info.rate_id */
1278 /* pattrib->raid = psta->phl_sta->ra_info.rate_id; */
1279
1280 bw = rtw_get_tx_bw_mode(padapter, psta);
1281 pattrib->bwmode = rtw_min(bw, mlmeext->chandef.bw);
1282 pattrib->sgi = query_ra_short_GI(psta, pattrib->bwmode);
1283
1284 if (psta->phl_sta->wmode & WLAN_MD_11AX) {
1285 pattrib->ldpc = psta->phl_sta->asoc_cap.he_ldpc;
1286 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_he_rx > 0) ? 1:0;
1287 } else if (psta->phl_sta->wmode & WLAN_MD_11AC) {
1288 pattrib->ldpc = psta->phl_sta->asoc_cap.vht_ldpc;
1289 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_vht_rx > 0) ? 1:0;
1290 } else if (psta->phl_sta->wmode & WLAN_MD_11N) {
1291 pattrib->ldpc = psta->phl_sta->asoc_cap.ht_ldpc;
1292 pattrib->stbc = (psta->phl_sta->asoc_cap.stbc_ht_rx > 0) ? 1:0;
1293 } else {
1294 pattrib->ldpc = 0;
1295 pattrib->stbc = 0;
1296 }
1297
1298 #ifdef CONFIG_80211N_HT
1299 if (padapter->registrypriv.ht_enable &&
1300 is_supported_ht(padapter->registrypriv.wireless_mode)) {
1301 pattrib->ht_en = psta->htpriv.ht_option;
1302 pattrib->ch_offset = psta->htpriv.ch_offset;
1303 pattrib->ampdu_en = _FALSE;
1304
1305 if (padapter->driver_ampdu_spacing != 0xFF) /* driver control AMPDU Density for peer sta's rx */
1306 pattrib->ampdu_spacing = padapter->driver_ampdu_spacing;
1307 else
1308 pattrib->ampdu_spacing = psta->htpriv.rx_ampdu_min_spacing;
1309
1310 /* check if enable ampdu */
1311 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
1312 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) {
1313 pattrib->ampdu_en = _TRUE;
1314 if (psta->htpriv.tx_amsdu_enable == _TRUE)
1315 pattrib->amsdu_ampdu_en = _TRUE;
1316 else
1317 pattrib->amsdu_ampdu_en = _FALSE;
1318 }
1319 }
1320 }
1321 #endif /* CONFIG_80211N_HT */
1322 /* if(pattrib->ht_en && psta->htpriv.ampdu_enable) */
1323 /* { */
1324 /* if(psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority)) */
1325 /* pattrib->ampdu_en = _TRUE; */
1326 /* } */
1327
1328 #ifdef CONFIG_TDLS
1329 if (pattrib->direct_link == _TRUE) {
1330 psta = pattrib->ptdls_sta;
1331
1332 pattrib->raid = psta->phl_sta->ra_info.rate_id;
1333 #ifdef CONFIG_80211N_HT
1334 if (padapter->registrypriv.ht_enable &&
1335 is_supported_ht(padapter->registrypriv.wireless_mode)) {
1336 pattrib->bwmode = rtw_get_tx_bw_mode(padapter, psta);
1337 pattrib->ht_en = psta->htpriv.ht_option;
1338 pattrib->ch_offset = psta->htpriv.ch_offset;
1339 pattrib->sgi = query_ra_short_GI(psta, pattrib->bwmode);
1340 }
1341 #endif /* CONFIG_80211N_HT */
1342 }
1343 #endif /* CONFIG_TDLS */
1344
1345 pattrib->retry_ctrl = _FALSE;
1346 }
1347
update_attrib_sec_iv_info(_adapter * padapter,struct pkt_attrib * pattrib)1348 static s32 update_attrib_sec_iv_info(_adapter *padapter, struct pkt_attrib *pattrib)
1349 {
1350 struct sta_info *psta = pattrib->psta;
1351 sint bmcast = IS_MCAST(pattrib->ra);
1352
1353 if (!psta)
1354 return _FAIL;
1355
1356 switch (pattrib->encrypt) {
1357 case _WEP40_:
1358 case _WEP104_:
1359 WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1360 break;
1361
1362 case _TKIP_:
1363 if (bmcast)
1364 TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1365 else
1366 TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
1367 break;
1368
1369 case _AES_:
1370 if (bmcast)
1371 AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1372 else
1373 AES_IV(pattrib->iv, psta->dot11txpn, 0);
1374
1375 break;
1376
1377 case _GCMP_:
1378 case _GCMP_256_:
1379 if (bmcast)
1380 GCMP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1381 else
1382 GCMP_IV(pattrib->iv, psta->dot11txpn, 0);
1383
1384 break;
1385
1386 case _CCMP_256_:
1387 if (bmcast)
1388 GCMP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
1389 else
1390 GCMP_IV(pattrib->iv, psta->dot11txpn, 0);
1391
1392 break;
1393
1394 #ifdef CONFIG_WAPI_SUPPORT
1395 case _SMS4_:
1396 rtw_wapi_get_iv(padapter, pattrib->ra, pattrib->iv);
1397 break;
1398 #endif
1399 default:
1400 break;
1401 }
1402
1403 return _SUCCESS;
1404 }
1405
update_attrib_sec_info(_adapter * padapter,struct pkt_attrib * pattrib,struct sta_info * psta,enum eap_type eapol_type)1406 static s32 update_attrib_sec_info(_adapter *padapter, struct pkt_attrib *pattrib, struct sta_info *psta, enum eap_type eapol_type)
1407 {
1408 sint res = _SUCCESS;
1409 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1410 struct security_priv *psecuritypriv = &padapter->securitypriv;
1411 sint bmcast = IS_MCAST(pattrib->ra);
1412 s8 hw_decrypted = _FALSE;
1413
1414 _rtw_memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
1415 _rtw_memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
1416 pattrib->mac_id = psta->phl_sta->macid;
1417
1418 /* Comment by Owen at 2020/05/19
1419 * Issue: RTK STA sends encrypted 4-way 4/4 when AP thinks the 4-way incomplete
1420 * In TCL pressure test, AP may resend 4-way 3/4 with new replay counter in 2 ms.
1421 * In this situation, STA sends unencrypted 4-way 4/4 with old replay counter after more
1422 * than 2 ms, followed by the encrypted 4-way 4/4 with new replay counter. Because the
1423 * AP only accepts unencrypted 4-way 4/4 with a new play counter, and the STA encrypts
1424 * each 4-way 4/4 at this time, the 4-way handshake cannot be completed.
1425 * So we modified that after STA receives unencrypted 4-way 1/4 and 4-way 3/4,
1426 * 4-way 2/4 and 4-way 4/4 sent by STA in the next 100 ms are not encrypted.
1427 */
1428 if (psta->ieee8021x_blocked == _TRUE ||
1429 ((eapol_type == EAPOL_2_4 || eapol_type == EAPOL_4_4) &&
1430 rtw_get_passing_time_ms(psta->resp_nonenc_eapol_key_starttime) <= 100)) {
1431
1432 if (eapol_type == EAPOL_2_4 || eapol_type == EAPOL_4_4)
1433 RTW_INFO("Respond unencrypted eapol key\n");
1434
1435 pattrib->encrypt = 0;
1436
1437 if ((pattrib->ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _FALSE)) {
1438 #ifdef DBG_TX_DROP_FRAME
1439 RTW_INFO("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%04x) != 0x888e\n", __FUNCTION__, pattrib->ether_type);
1440 #endif
1441 res = _FAIL;
1442 goto exit;
1443 }
1444 } else {
1445 GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
1446
1447 #ifdef CONFIG_WAPI_SUPPORT
1448 if (pattrib->ether_type == 0x88B4)
1449 pattrib->encrypt = _NO_PRIVACY_;
1450 #endif
1451
1452 switch (psecuritypriv->dot11AuthAlgrthm) {
1453 case dot11AuthAlgrthm_Open:
1454 case dot11AuthAlgrthm_Shared:
1455 case dot11AuthAlgrthm_Auto:
1456 pattrib->key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
1457 break;
1458 case dot11AuthAlgrthm_8021X:
1459 if (bmcast)
1460 pattrib->key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
1461 else
1462 pattrib->key_idx = 0;
1463 break;
1464 default:
1465 pattrib->key_idx = 0;
1466 break;
1467 }
1468
1469 /* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
1470 if (((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) && (pattrib->ether_type == 0x888e))
1471 pattrib->encrypt = _NO_PRIVACY_;
1472
1473 }
1474
1475 #ifdef CONFIG_TDLS
1476 if (pattrib->direct_link == _TRUE) {
1477 if (pattrib->encrypt > 0)
1478 pattrib->encrypt = _AES_;
1479 }
1480 #endif
1481
1482 switch (pattrib->encrypt) {
1483 case _WEP40_:
1484 case _WEP104_:
1485 pattrib->iv_len = 4;
1486 pattrib->icv_len = 4;
1487 break;
1488
1489 case _TKIP_:
1490 pattrib->iv_len = 8;
1491 pattrib->icv_len = 4;
1492
1493 if (psecuritypriv->busetkipkey == _FAIL) {
1494 #ifdef DBG_TX_DROP_FRAME
1495 RTW_INFO("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d)==_FAIL drop packet\n", __FUNCTION__, psecuritypriv->busetkipkey);
1496 #endif
1497 res = _FAIL;
1498 goto exit;
1499 }
1500
1501 _rtw_memcpy(pattrib->dot11tkiptxmickey.skey, psta->dot11tkiptxmickey.skey, 16);
1502
1503 break;
1504
1505 case _AES_:
1506
1507 pattrib->iv_len = 8;
1508 pattrib->icv_len = 8;
1509
1510 break;
1511
1512 case _GCMP_:
1513 case _GCMP_256_:
1514
1515 pattrib->iv_len = 8;
1516 pattrib->icv_len = 16;
1517
1518 break;
1519
1520 case _CCMP_256_:
1521
1522 pattrib->iv_len = 8;
1523 pattrib->icv_len = 16;
1524
1525 break;
1526
1527 #ifdef CONFIG_WAPI_SUPPORT
1528 case _SMS4_:
1529 pattrib->iv_len = 18;
1530 pattrib->icv_len = 16;
1531 break;
1532 #endif
1533 default:
1534 pattrib->iv_len = 0;
1535 pattrib->icv_len = 0;
1536 break;
1537 }
1538
1539 if (pattrib->encrypt > 0) {
1540 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
1541 , psta->dot118021x_UncstKey.skey
1542 , (pattrib->encrypt & _SEC_TYPE_256_) ? 32 : 16);
1543 }
1544
1545 if (!bmcast)
1546 hw_decrypted = psta->hw_decrypted;
1547 else
1548 hw_decrypted = psecuritypriv->hw_decrypted;
1549
1550 if (pattrib->encrypt &&
1551 (padapter->securitypriv.sw_encrypt == _TRUE || hw_decrypted == _FALSE || pattrib->encrypt == _TKIP_)) {
1552 pattrib->bswenc = _TRUE;
1553 } else {
1554 pattrib->bswenc = _FALSE;
1555 }
1556
1557 #if defined(CONFIG_CONCURRENT_MODE)
1558 pattrib->bmc_camid = padapter->securitypriv.dot118021x_bmc_cam_id;
1559 #endif
1560
1561 #ifdef CONFIG_WAPI_SUPPORT
1562 if (pattrib->encrypt == _SMS4_)
1563 pattrib->bswenc = _FALSE;
1564 #endif
1565
1566 if ((pattrib->encrypt) && (eapol_type == EAPOL_4_4))
1567 pattrib->bswenc = _TRUE;
1568
1569 exit:
1570
1571 return res;
1572
1573 }
1574
qos_acm(u8 acm_mask,u8 priority)1575 u8 qos_acm(u8 acm_mask, u8 priority)
1576 {
1577 u8 change_priority = priority;
1578
1579 switch (priority) {
1580 case 0:
1581 case 3:
1582 if (acm_mask & BIT(1))
1583 change_priority = 1;
1584 break;
1585 case 1:
1586 case 2:
1587 break;
1588 case 4:
1589 case 5:
1590 if (acm_mask & BIT(2))
1591 change_priority = 0;
1592 break;
1593 case 6:
1594 case 7:
1595 if (acm_mask & BIT(3))
1596 change_priority = 5;
1597 break;
1598 default:
1599 RTW_INFO("qos_acm(): invalid pattrib->priority: %d!!!\n", priority);
1600 break;
1601 }
1602
1603 return change_priority;
1604 }
1605
1606 /* refer to IEEE802.11-2016 Table R-3; Comply with IETF RFC4594 */
tos_to_up(u8 tos)1607 u8 tos_to_up(u8 tos)
1608 {
1609 u8 up = 0;
1610 u8 dscp;
1611 u8 mode = CONFIG_RTW_UP_MAPPING_RULE;
1612
1613
1614 /* tos precedence mapping */
1615 if (mode == 0) {
1616 up = tos >> 5;
1617 return up;
1618 }
1619
1620 /* refer to IEEE802.11-2016 Table R-3;
1621 * DCSP 32(CS4) comply with IETF RFC4594
1622 */
1623 dscp = (tos >> 2);
1624
1625 if (dscp == 0)
1626 up = 0;
1627 else if (dscp >= 1 && dscp <= 9)
1628 up = 1;
1629 else if (dscp >= 10 && dscp <= 16)
1630 up = 2;
1631 else if (dscp >= 17 && dscp <= 23)
1632 up = 3;
1633 else if (dscp >= 24 && dscp <= 31)
1634 up = 4;
1635 else if (dscp >= 33 && dscp <= 40)
1636 up = 5;
1637 else if ((dscp >= 41 && dscp <= 47) || (dscp == 32))
1638 up = 6;
1639 else if (dscp >= 48 && dscp <= 63)
1640 up = 7;
1641
1642 return up;
1643 }
1644
1645 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
1646 static void set_qos_core(struct xmit_frame *pxframe)
1647 {
1648 s32 UserPriority = 0;
1649
1650 if (!pxframe->pkt)
1651 goto null_pkt;
1652
1653 /* get UserPriority from IP hdr */
1654 if (pxframe->attrib.ether_type == 0x0800) {
1655 struct pkt_file ppktfile;
1656 struct ethhdr etherhdr;
1657 struct iphdr ip_hdr;
1658
1659 _rtw_open_pktfile(pxframe->pkt, &ppktfile);
1660 _rtw_pktfile_read(&ppktfile, (unsigned char *)ðerhdr, ETH_HLEN);
1661 _rtw_pktfile_read(&ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
1662 UserPriority = tos_to_up(ip_hdr.tos);
1663 }
1664
1665
1666 #ifdef CONFIG_ICMP_VOQ
1667 if (pxframe->attrib.icmp_pkt == 1)/*use VO queue to send icmp packet*/
1668 UserPriority = 7;
1669 #endif
1670 #ifdef CONFIG_IP_R_MONITOR
1671 if (pxframe->attrib.ether_type == ETH_P_ARP)
1672 UserPriority = 7;
1673 #endif/*CONFIG_IP_R_MONITOR*/
1674
1675 null_pkt:
1676 pxframe->attrib.priority = UserPriority;
1677 pxframe->attrib.hdrlen = WLAN_HDR_A3_QOS_LEN;
1678 pxframe->attrib.subtype = WIFI_QOS_DATA_TYPE;
1679 }
1680 #endif
1681
set_qos(struct sk_buff * pkt,struct pkt_attrib * pattrib)1682 static void set_qos(struct sk_buff *pkt, struct pkt_attrib *pattrib)
1683 {
1684 s32 UserPriority = 0;
1685
1686 if (!pkt)
1687 goto null_pkt;
1688
1689 /* get UserPriority from IP hdr */
1690 if (pattrib->ether_type == 0x0800) {
1691 struct pkt_file ppktfile;
1692 struct ethhdr etherhdr;
1693 struct iphdr ip_hdr;
1694
1695 _rtw_open_pktfile(pkt, &ppktfile);
1696 _rtw_pktfile_read(&ppktfile, (unsigned char *)ðerhdr, ETH_HLEN);
1697 _rtw_pktfile_read(&ppktfile, (u8 *)&ip_hdr, sizeof(ip_hdr));
1698 /* UserPriority = (ntohs(ip_hdr.tos) >> 5) & 0x3; */
1699 UserPriority = tos_to_up(ip_hdr.tos);
1700 }
1701 /*
1702 else if (pattrib->ether_type == 0x888e) {
1703
1704
1705 UserPriority = 7;
1706 }
1707 */
1708
1709 #ifdef CONFIG_ICMP_VOQ
1710 if (pattrib->icmp_pkt == 1)/*use VO queue to send icmp packet*/
1711 UserPriority = 7;
1712 #endif
1713 #ifdef CONFIG_IP_R_MONITOR
1714 if (pattrib->ether_type == ETH_P_ARP)
1715 UserPriority = 7;
1716 #endif/*CONFIG_IP_R_MONITOR*/
1717
1718 null_pkt:
1719 pattrib->priority = UserPriority;
1720 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_QOS_LEN : WLAN_HDR_A3_QOS_LEN;
1721 pattrib->subtype = WIFI_QOS_DATA_TYPE;
1722 }
1723
1724 #ifdef CONFIG_TDLS
rtw_check_tdls_established(_adapter * padapter,struct pkt_attrib * pattrib)1725 u8 rtw_check_tdls_established(_adapter *padapter, struct pkt_attrib *pattrib)
1726 {
1727 pattrib->ptdls_sta = NULL;
1728
1729 pattrib->direct_link = _FALSE;
1730 if (padapter->tdlsinfo.link_established == _TRUE) {
1731 pattrib->ptdls_sta = rtw_get_stainfo(&padapter->stapriv, pattrib->dst);
1732 #if 1
1733 if ((pattrib->ptdls_sta != NULL) &&
1734 (pattrib->ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) &&
1735 (pattrib->ether_type != 0x0806)) {
1736 pattrib->direct_link = _TRUE;
1737 /* RTW_INFO("send ptk to "MAC_FMT" using direct link\n", MAC_ARG(pattrib->dst)); */
1738 }
1739 #else
1740 if (pattrib->ptdls_sta != NULL &&
1741 pattrib->ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) {
1742 pattrib->direct_link = _TRUE;
1743 #if 0
1744 RTW_INFO("send ptk to "MAC_FMT" using direct link\n", MAC_ARG(pattrib->dst));
1745 #endif
1746 }
1747
1748 /* ARP frame may be helped by AP*/
1749 if (pattrib->ether_type != 0x0806)
1750 pattrib->direct_link = _FALSE;
1751 #endif
1752 }
1753
1754 return pattrib->direct_link;
1755 }
1756
update_tdls_attrib(_adapter * padapter,struct pkt_attrib * pattrib)1757 s32 update_tdls_attrib(_adapter *padapter, struct pkt_attrib *pattrib)
1758 {
1759
1760 struct sta_info *psta = NULL;
1761 struct sta_priv *pstapriv = &padapter->stapriv;
1762 struct security_priv *psecuritypriv = &padapter->securitypriv;
1763 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1764 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1765
1766 s32 res = _SUCCESS;
1767
1768 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
1769 if (psta == NULL) {
1770 res = _FAIL;
1771 goto exit;
1772 }
1773
1774 pattrib->mac_id = psta->phl_sta->macid;
1775 pattrib->psta = psta;
1776 pattrib->ack_policy = 0;
1777 /* get ether_hdr_len */
1778 pattrib->pkt_hdrlen = ETH_HLEN;
1779
1780 pattrib->qos_en = psta->qos_option;
1781
1782 /* [TDLS] TODO: setup req/rsp should be AC_BK */
1783 if (pqospriv->qos_option && psta->qos_option) {
1784 pattrib->priority = 4; /* tdls management frame should be AC_VI */
1785 pattrib->hdrlen = WLAN_HDR_A3_QOS_LEN;
1786 pattrib->subtype = WIFI_QOS_DATA_TYPE;
1787 } else {
1788 pattrib->priority = 0;
1789 pattrib->hdrlen = WLAN_HDR_A3_LEN;
1790 pattrib->subtype = WIFI_DATA_TYPE;
1791 }
1792
1793 /* TODO:_lock */
1794 if (update_attrib_sec_info(padapter, pattrib, psta, NON_EAPOL) == _FAIL) {
1795 res = _FAIL;
1796 goto exit;
1797 }
1798
1799 update_attrib_phy_info(padapter, pattrib, psta);
1800
1801
1802 exit:
1803
1804 return res;
1805 }
1806
1807 #endif /* CONFIG_TDLS */
1808
1809
1810 #ifdef CONFIG_LPS
1811 #define LPS_PT_NORMAL 0
1812 #define LPS_PT_SP 1/* only DHCP packets is as SPECIAL_PACKET*/
1813 #define LPS_PT_ICMP 2
1814
1815 /*If EAPOL , ARP , OR DHCP packet, driver must be in active mode.*/
_rtw_lps_chk_packet_type(struct pkt_attrib * pattrib)1816 static u8 _rtw_lps_chk_packet_type(struct pkt_attrib *pattrib)
1817 {
1818 u8 pkt_type = LPS_PT_NORMAL; /*normal data frame*/
1819
1820 #ifdef CONFIG_WAPI_SUPPORT
1821 if ((pattrib->ether_type == 0x88B4) || (pattrib->ether_type == 0x0806) || (pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
1822 pkt_type = LPS_PT_SP;
1823 #else /* !CONFIG_WAPI_SUPPORT */
1824
1825 #ifndef CONFIG_LPS_NOT_LEAVE_FOR_ICMP
1826 if (pattrib->icmp_pkt == 1)
1827 pkt_type = LPS_PT_ICMP;
1828 else
1829 #endif
1830 if (pattrib->dhcp_pkt == 1)
1831 pkt_type = LPS_PT_SP;
1832 #endif
1833 return pkt_type;
1834 }
1835 #endif
1836
1837 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
1838 static s32 update_xmitframe_from_hdr(_adapter *padapter, struct xmit_frame *pxframe)
1839 {
1840 uint i;
1841 struct pkt_file pktfile;
1842 struct sta_info *psta = NULL;
1843 struct ethhdr etherhdr;
1844 struct sk_buff *pkt = NULL;
1845 sint bmcast;
1846
1847 struct sta_priv *pstapriv = &padapter->stapriv;
1848 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
1849 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
1850 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
1851
1852 PHLTX_LOG;
1853
1854 if (pxframe->pkt)
1855 pkt = pxframe->pkt;
1856 else
1857 return FAIL;
1858
1859 PHLTX_LOG;
1860
1861 _rtw_open_pktfile(pkt, &pktfile);
1862 i = _rtw_pktfile_read(&pktfile, (u8 *)ðerhdr, ETH_HLEN);
1863
1864 pxframe->attrib.ether_type = ntohs(etherhdr.h_proto);
1865
1866 if (MLME_IS_MESH(padapter)) /* address resolve is done for mesh */
1867 goto get_sta_info;
1868
1869 _rtw_memcpy(pxframe->attrib.dst, ðerhdr.h_dest, ETH_ALEN);
1870 _rtw_memcpy(pxframe->attrib.src, ðerhdr.h_source, ETH_ALEN);
1871
1872 if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
1873 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
1874 _rtw_memcpy(pxframe->attrib.ra, pxframe->attrib.dst, ETH_ALEN);
1875 _rtw_memcpy(pxframe->attrib.ta, adapter_mac_addr(padapter), ETH_ALEN);
1876 } else if (MLME_IS_STA(padapter)) {
1877
1878 #if 0//def CONFIG_TDLS //rtw_phl_tx
1879 if (rtw_check_tdls_established(padapter, pattrib) == _TRUE)
1880 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); /* For TDLS direct link Tx, set ra to be same to dst */
1881 else
1882 #endif
1883 _rtw_memcpy(pxframe->attrib.ra, get_bssid(pmlmepriv), ETH_ALEN);
1884 _rtw_memcpy(pxframe->attrib.ta, adapter_mac_addr(padapter), ETH_ALEN);
1885 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta);
1886 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
1887 _rtw_memcpy(pxframe->attrib.ra, pxframe->attrib.dst, ETH_ALEN);
1888 _rtw_memcpy(pxframe->attrib.ta, get_bssid(pmlmepriv), ETH_ALEN);
1889 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_ap);
1890 } else
1891 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown);
1892
1893 PHLTX_LOG;
1894
1895 get_sta_info:
1896 bmcast = IS_MCAST(pxframe->attrib.ra);
1897 if (bmcast) {
1898 PHLTX_LOG;
1899 psta = rtw_get_bcmc_stainfo(padapter);
1900 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
1901 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta);
1902 #ifdef DBG_TX_DROP_FRAME
1903 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pxframe->attrib.ra));
1904 #endif
1905 return FAIL;
1906 }
1907 } else {
1908 PHLTX_LOG;
1909 psta = rtw_get_stainfo(pstapriv, pxframe->attrib.ra);
1910 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
1911 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta);
1912 #ifdef DBG_TX_DROP_FRAME
1913 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pxframe->attrib.ra));
1914 #endif
1915 return FAIL;
1916 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE && !(psta->state & WIFI_ASOC_STATE)) {
1917 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link);
1918 return FAIL;
1919 }
1920 }
1921
1922 PHLTX_LOG;
1923
1924 if (!(psta->state & WIFI_ASOC_STATE)) {
1925 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link);
1926 RTW_INFO("%s-"ADPT_FMT" psta("MAC_FMT")->state(0x%x) != WIFI_ASOC_STATE\n",
1927 __func__, ADPT_ARG(padapter), MAC_ARG(psta->phl_sta->mac_addr), psta->state);
1928 return FAIL;
1929 }
1930
1931 pxframe->attrib.psta = psta;
1932
1933 PHLTX_LOG;
1934
1935 pxframe->attrib.pktlen = pktfile.pkt_len;
1936
1937 /* TODO: 802.1Q VLAN header */
1938 /* TODO: IPV6 */
1939
1940 if (ETH_P_IP == pxframe->attrib.ether_type) {
1941 u8 ip[20];
1942
1943 _rtw_pktfile_read(&pktfile, ip, 20);
1944
1945 if (GET_IPV4_IHL(ip) * 4 > 20)
1946 _rtw_pktfile_read(&pktfile, NULL, GET_IPV4_IHL(ip) - 20);
1947
1948 pxframe->attrib.icmp_pkt = 0;
1949 pxframe->attrib.dhcp_pkt = 0;
1950 pxframe->attrib.hipriority_pkt = 0;
1951
1952 if (GET_IPV4_PROTOCOL(ip) == 0x01) { /* ICMP */
1953 pxframe->attrib.icmp_pkt = 1;
1954 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
1955
1956 } else if (GET_IPV4_PROTOCOL(ip) == 0x11) { /* UDP */
1957 u8 udp[24];
1958
1959 _rtw_pktfile_read(&pktfile, udp, 24);
1960
1961 if ((GET_UDP_SRC(udp) == 68 && GET_UDP_DST(udp) == 67)
1962 || (GET_UDP_SRC(udp) == 67 && GET_UDP_DST(udp) == 68)
1963 ) {
1964 /* 67 : UDP BOOTP server, 68 : UDP BOOTP client */
1965 if (pxframe->attrib.pktlen > 282) { /* MINIMUM_DHCP_PACKET_SIZE */
1966 pxframe->attrib.dhcp_pkt = 1;
1967 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp);
1968 if (0)
1969 RTW_INFO("send DHCP packet\n");
1970 }
1971 }
1972
1973 /* WaveAgent packet, increase priority so that the system can read data in time */
1974 if (((GET_UDP_SIG1(udp) == 0xcc) || (GET_UDP_SIG1(udp) == 0xdd)) &&
1975 (GET_UDP_SIG2(udp) == 0xe2)) {
1976 pxframe->attrib.hipriority_pkt = 1;
1977 }
1978
1979 } else if (GET_IPV4_PROTOCOL(ip) == 0x06 /* TCP */
1980 && rtw_st_ctl_chk_reg_s_proto(&psta->st_ctl, 0x06) == _TRUE
1981 ) {
1982 u8 tcp[20];
1983
1984 _rtw_pktfile_read(&pktfile, tcp, 20);
1985
1986 if (rtw_st_ctl_chk_reg_rule(&psta->st_ctl, padapter, IPV4_SRC(ip), TCP_SRC(tcp), IPV4_DST(ip), TCP_DST(tcp)) == _TRUE) {
1987 if (GET_TCP_SYN(tcp) && GET_TCP_ACK(tcp)) {
1988 session_tracker_add_cmd(padapter, psta
1989 , IPV4_SRC(ip), TCP_SRC(tcp)
1990 , IPV4_SRC(ip), TCP_DST(tcp));
1991 if (DBG_SESSION_TRACKER)
1992 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" SYN-ACK\n"
1993 , FUNC_ADPT_ARG(padapter)
1994 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
1995 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
1996 }
1997 if (GET_TCP_FIN(tcp)) {
1998 session_tracker_del_cmd(padapter, psta
1999 , IPV4_SRC(ip), TCP_SRC(tcp)
2000 , IPV4_SRC(ip), TCP_DST(tcp));
2001 if (DBG_SESSION_TRACKER)
2002 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" FIN\n"
2003 , FUNC_ADPT_ARG(padapter)
2004 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2005 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2006 }
2007 }
2008 }
2009
2010 } else if (0x888e == pxframe->attrib.ether_type)
2011 parsing_eapol_packet(padapter, pktfile.cur_addr, psta, 1);
2012 #if defined (DBG_ARP_DUMP) || defined (DBG_IP_R_MONITOR)
2013 else if (pxframe->attrib.ether_type == ETH_P_ARP) {
2014 u8 arp[28] = {0};
2015
2016 _rtw_pktfile_read(&pktfile, arp, 28);
2017 dump_arp_pkt(RTW_DBGDUMP, etherhdr.h_dest, etherhdr.h_source, arp, 1);
2018 }
2019 #endif
2020
2021 PHLTX_LOG;
2022
2023 if ((pxframe->attrib.ether_type == 0x888e) || (pxframe->attrib.dhcp_pkt == 1))
2024 rtw_mi_set_scan_deny(padapter, 3000);
2025
2026 if (MLME_IS_STA(padapter) &&
2027 pxframe->attrib.ether_type == ETH_P_ARP &&
2028 !IS_MCAST(pxframe->attrib.dst)) {
2029 rtw_mi_set_scan_deny(padapter, 1000);
2030 rtw_mi_scan_abort(padapter, _FALSE); /*rtw_scan_abort_no_wait*/
2031 }
2032
2033
2034 PHLTX_LOG;
2035
2036 /* get ether_hdr_len */
2037 pxframe->attrib.pkt_hdrlen = ETH_HLEN;/* (pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; */ /* vlan tag */
2038
2039 pxframe->attrib.hdrlen = WLAN_HDR_A3_LEN;
2040 pxframe->attrib.type = WIFI_DATA_TYPE;
2041 pxframe->attrib.subtype = WIFI_DATA_TYPE;
2042 pxframe->attrib.qos_en = pxframe->attrib.psta->qos_option;
2043 pxframe->attrib.priority = 0;
2044
2045 pxframe->attrib.frag_len = pxmitpriv->frag_len;
2046
2047
2048
2049 PHLTX_LOG;
2050
2051 return SUCCESS;
2052 }
2053
2054
2055 static s32 update_xmitframe_qos(_adapter *padapter, struct xmit_frame *pxframe)
2056 {
2057
2058 struct sta_priv *pstapriv = &padapter->stapriv;
2059 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2060 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2061 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2062
2063 if (!pxframe->attrib.qos_en)
2064 return SUCCESS;
2065
2066 if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_MESH_STATE
2067 | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)
2068 ) {
2069 set_qos_core(pxframe);
2070 #if 0//rtw_phl_tx def CONFIG_RTW_MESH
2071 if (MLME_IS_MESH(padapter))
2072 rtw_mesh_tx_set_whdr_mctrl_len(pattrib->mesh_frame_mode, pattrib);
2073 #endif
2074 } else {
2075 #if 0// def CONFIG_TDLS
2076 if (pattrib->direct_link == _TRUE) {
2077 if (pattrib->qos_en)
2078 set_qos(pkt, pattrib);
2079 } else
2080 #endif
2081 {
2082 if (pqospriv->qos_option) {
2083 set_qos_core(pxframe);
2084
2085 if (pmlmepriv->acm_mask != 0)
2086 pxframe->attrib.priority = qos_acm(pmlmepriv->acm_mask, pxframe->attrib.priority);
2087 }
2088 }
2089 }
2090
2091 return SUCCESS;
2092 }
2093
2094 static s32 update_xmitframe_security(_adapter *padapter, struct xmit_frame *pxframe) //rtw_phl_tx todo
2095 {
2096 sint res = _SUCCESS;
2097 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2098 struct security_priv *psecuritypriv = &padapter->securitypriv;
2099 sint bmcast = IS_MCAST(pxframe->attrib.ra);
2100
2101 #if 0
2102 _rtw_memset(pattrib->dot118021x_UncstKey.skey, 0, 16);
2103 _rtw_memset(pattrib->dot11tkiptxmickey.skey, 0, 16);
2104 pattrib->mac_id = psta->phl_sta->macid;
2105 #endif
2106
2107 if (pxframe->attrib.psta->ieee8021x_blocked == _TRUE) {
2108
2109 pxframe->attrib.encrypt = 0;
2110
2111 if ((pxframe->attrib.ether_type != 0x888e) && (check_fwstate(pmlmepriv, WIFI_MP_STATE) == _FALSE)) {
2112 #ifdef DBG_TX_DROP_FRAME
2113 RTW_INFO("DBG_TX_DROP_FRAME %s psta->ieee8021x_blocked == _TRUE, pattrib->ether_type(%04x) != 0x888e\n", __FUNCTION__, pxframe->attrib.ether_type);
2114 #endif
2115 res = _FAIL;
2116 goto exit;
2117 }
2118 } else {
2119 GET_ENCRY_ALGO(psecuritypriv, pxframe->attrib.psta, pxframe->attrib.encrypt, bmcast);
2120
2121 #ifdef CONFIG_WAPI_SUPPORT
2122 if (pxframe->attrib.ether_type == 0x88B4)
2123 pxframe->attrib.encrypt = _NO_PRIVACY_;
2124 #endif
2125
2126 switch (psecuritypriv->dot11AuthAlgrthm) {
2127 case dot11AuthAlgrthm_Open:
2128 case dot11AuthAlgrthm_Shared:
2129 case dot11AuthAlgrthm_Auto:
2130 pxframe->attrib.key_idx = (u8)psecuritypriv->dot11PrivacyKeyIndex;
2131 break;
2132 case dot11AuthAlgrthm_8021X:
2133 if (bmcast)
2134 pxframe->attrib.key_idx = (u8)psecuritypriv->dot118021XGrpKeyid;
2135 else
2136 pxframe->attrib.key_idx = 0;
2137 break;
2138 default:
2139 pxframe->attrib.key_idx = 0;
2140 break;
2141 }
2142
2143 /* For WPS 1.0 WEP, driver should not encrypt EAPOL Packet for WPS handshake. */
2144 if (((pxframe->attrib.encrypt == _WEP40_) || (pxframe->attrib.encrypt == _WEP104_)) && (pxframe->attrib.ether_type == 0x888e))
2145 pxframe->attrib.encrypt = _NO_PRIVACY_;
2146
2147 }
2148
2149 #if 0 //def CONFIG_TDLS
2150 if (pattrib->direct_link == _TRUE) {
2151 if (pxframe->attrib.encrypt > 0)
2152 pxframe->attrib.encrypt = _AES_;
2153 }
2154 #endif
2155
2156 switch (pxframe->attrib.encrypt) {
2157 case _WEP40_:
2158 case _WEP104_:
2159 pxframe->attrib.iv_len = 4;
2160 pxframe->attrib.icv_len = 4;
2161 WEP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2162 break;
2163
2164 case _TKIP_:
2165 pxframe->attrib.iv_len = 8;
2166 pxframe->attrib.icv_len = 4;
2167
2168 if (psecuritypriv->busetkipkey == _FAIL) {
2169 #ifdef DBG_TX_DROP_FRAME
2170 RTW_INFO("DBG_TX_DROP_FRAME %s psecuritypriv->busetkipkey(%d)==_FAIL drop packet\n", __FUNCTION__, psecuritypriv->busetkipkey);
2171 #endif
2172 res = _FAIL;
2173 goto exit;
2174 }
2175
2176 if (bmcast)
2177 TKIP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2178 else
2179 TKIP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2180
2181
2182 //_rtw_memcpy(pattrib->dot11tkiptxmickey.skey, pxframe->attrib.psta->dot11tkiptxmickey.skey, 16);
2183
2184 break;
2185
2186 case _AES_:
2187
2188 pxframe->attrib.iv_len = 8;
2189 pxframe->attrib.icv_len = 8;
2190
2191 if (bmcast)
2192 AES_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2193 else
2194 AES_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2195
2196 break;
2197
2198 case _GCMP_:
2199 case _GCMP_256_:
2200
2201 pxframe->attrib.iv_len = 8;
2202 pxframe->attrib.icv_len = 16;
2203
2204 if (bmcast)
2205 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2206 else
2207 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2208
2209 break;
2210
2211 case _CCMP_256_:
2212
2213 pxframe->attrib.iv_len = 8;
2214 pxframe->attrib.icv_len = 16;
2215
2216 if (bmcast)
2217 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, pxframe->attrib.key_idx);
2218 else
2219 GCMP_IV(pxframe->attrib.iv, pxframe->attrib.psta->dot11txpn, 0);
2220
2221 break;
2222
2223 #ifdef CONFIG_WAPI_SUPPORT
2224 case _SMS4_:
2225 pxframe->attrib.iv_len = 18;
2226 pxframe->attrib.icv_len = 16;
2227 rtw_wapi_get_iv(padapter, pxframe->attrib.ra, pxframe->attrib.iv);
2228 break;
2229 #endif
2230 default:
2231 pxframe->attrib.iv_len = 0;
2232 pxframe->attrib.icv_len = 0;
2233 break;
2234 }
2235
2236 #if 0
2237 if (pxframe->attrib.encrypt > 0) {
2238 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
2239 , pxframe->attrib.psta->dot118021x_UncstKey.skey
2240 , (pxframe->attrib.encrypt & _SEC_TYPE_256_) ? 32 : 16);
2241 }
2242 #endif
2243
2244 if (pxframe->attrib.encrypt &&
2245 ((padapter->securitypriv.sw_encrypt == _TRUE) || (psecuritypriv->hw_decrypted == _FALSE))) {
2246 pxframe->attrib.bswenc = _TRUE;
2247 } else {
2248 pxframe->attrib.bswenc = _FALSE;
2249 }
2250
2251 #if defined(CONFIG_CONCURRENT_MODE)
2252 //pattrib->bmc_camid = padapter->securitypriv.dot118021x_bmc_cam_id;
2253 #endif
2254
2255 #ifdef CONFIG_WAPI_SUPPORT
2256 if (pxframe->attrib.encrypt == _SMS4_)
2257 pxframe->attrib.bswenc = _FALSE;
2258 #endif
2259
2260 exit:
2261 return res;
2262
2263 }
2264
2265 static s32 update_xmitframe_hw(_adapter *padapter, struct xmit_frame *pxframe)
2266 {
2267 pxframe->phl_txreq->mdata.rts_en = pxframe->attrib.psta->rtsen;
2268 pxframe->phl_txreq->mdata.cts2self = pxframe->attrib.psta->cts2self;
2269 pxframe->phl_txreq->mdata.ampdu_density = 0;
2270 return 0;
2271 }
2272
2273 #if 0
2274 static s32 rtw_core_update_txattrib(_adapter *padapter, struct xmit_frame *pxframe)
2275 {
2276 uint i;
2277 struct pkt_file pktfile;
2278 struct sta_info *psta = NULL;
2279 struct ethhdr etherhdr;
2280 struct sk_buff *pkt = NULL;
2281
2282 struct sta_priv *pstapriv = &padapter->stapriv;
2283 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2284 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2285 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2286 sint res = _SUCCESS;
2287
2288 #if 0//rtw_phl_tx todo def CONFIG_LPS
2289 pkt_type = _rtw_lps_chk_packet_type(pattrib);
2290
2291 if (pkt_type == LPS_PT_SP) {/*packet is as SPECIAL_PACKET*/
2292 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active);
2293 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 0);
2294 } else if (pkt_type == LPS_PT_ICMP)
2295 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 0);
2296 #endif /* CONFIG_LPS */
2297
2298 #if 0//rtw_phl_tx todo def CONFIG_BEAMFORMING
2299 update_attrib_txbf_info(padapter, pattrib, psta);
2300 #endif
2301
2302 #if 0
2303 /* TODO:_lock */
2304 if (update_attrib_sec_info(padapter, pattrib, psta, NON_EAPOL) == _FAIL) {
2305 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
2306 res = _FAIL;
2307 goto exit;
2308 }
2309 #endif
2310
2311 update_attrib_phy_info(padapter, pattrib, psta);
2312
2313 /* RTW_INFO("%s ==> mac_id(%d)\n",__FUNCTION__,pattrib->mac_id ); */
2314 /* TODO:_unlock */
2315
2316 #ifdef CONFIG_AUTO_AP_MODE
2317 if (psta->isrc && psta->pid > 0)
2318 pattrib->pctrl = _TRUE;
2319 else
2320 #endif
2321 pattrib->pctrl = 0;
2322
2323 pattrib->ack_policy = 0;
2324
2325 if (bmcast)
2326 pattrib->rate = psta->init_rate;
2327
2328
2329 #ifdef CONFIG_WMMPS_STA
2330 update_attrib_trigger_frame_info(padapter, pattrib);
2331 #endif /* CONFIG_WMMPS_STA */
2332
2333 /* pattrib->priority = 5; */ /* force to used VI queue, for testing */
2334 pattrib->hw_ssn_sel = pxmitpriv->hw_ssn_seq_no;
2335
2336 pattrib->wdinfo_en = 1;/*FPGA_test YiWei need modify*/
2337
2338 rtw_set_tx_chksum_offload(pkt, pattrib);
2339
2340 exit:
2341 return res;
2342 }
2343 #endif
2344 #endif
2345
rtw_chk_htc_en(_adapter * padapter,struct sta_info * psta,struct pkt_attrib * pattrib)2346 static u8 rtw_chk_htc_en(_adapter *padapter, struct sta_info *psta, struct pkt_attrib *pattrib)
2347 {
2348
2349 #ifdef CONFIG_80211AX_HE
2350 if (psta->hepriv.he_option == _TRUE) {
2351 /*By test, some HE AP eapol & arp & dhcp pkt can not append ht control*/
2352 if ((0x888e == pattrib->ether_type) || (0x0806 == pattrib->ether_type) || (pattrib->dhcp_pkt == 1))
2353 return 0;
2354 else
2355 return rtw_he_htc_en(padapter, psta);
2356 }
2357 #endif
2358
2359 return 0;
2360 }
2361
update_attrib(_adapter * padapter,struct sk_buff * pkt,struct pkt_attrib * pattrib)2362 static s32 update_attrib(_adapter *padapter, struct sk_buff *pkt, struct pkt_attrib *pattrib)
2363 {
2364 uint i;
2365 struct pkt_file pktfile;
2366 struct sta_info *psta = NULL;
2367 struct ethhdr etherhdr;
2368
2369 sint bmcast;
2370 struct sta_priv *pstapriv = &padapter->stapriv;
2371 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2372 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2373 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2374 sint res = _SUCCESS;
2375 enum eap_type eapol_type = NON_EAPOL;
2376 #ifdef CONFIG_LPS
2377 u8 pkt_type = 0;
2378 #endif
2379
2380 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib);
2381
2382 _rtw_open_pktfile(pkt, &pktfile);
2383 i = _rtw_pktfile_read(&pktfile, (u8 *)ðerhdr, ETH_HLEN);
2384
2385 pattrib->ether_type = ntohs(etherhdr.h_proto);
2386
2387 if (MLME_STATE(padapter) & (WIFI_AP_STATE | WIFI_MESH_STATE)) /* address resolve is done for ap/mesh */
2388 goto get_sta_info;
2389
2390 _rtw_memcpy(pattrib->dst, ðerhdr.h_dest, ETH_ALEN);
2391 _rtw_memcpy(pattrib->src, ðerhdr.h_source, ETH_ALEN);
2392 _rtw_memcpy(pattrib->ta, adapter_mac_addr(padapter), ETH_ALEN);
2393
2394 if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
2395 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
2396 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
2397 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_adhoc);
2398 } else if (MLME_IS_STA(padapter)) {
2399 #ifdef CONFIG_TDLS
2400 if (rtw_check_tdls_established(padapter, pattrib) == _TRUE)
2401 _rtw_memcpy(pattrib->ra, pattrib->dst, ETH_ALEN); /* For TDLS direct link Tx, set ra to be same to dst */
2402 else
2403 #endif
2404 {
2405 _rtw_memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
2406 #ifdef CONFIG_RTW_WDS
2407 if (adapter_use_wds(padapter)
2408 && _rtw_memcmp(pattrib->src, pattrib->ta, ETH_ALEN) == _FALSE
2409 ) {
2410 pattrib->wds = 1;
2411 if (IS_MCAST(pattrib->dst))
2412 rtw_tx_wds_gptr_update(padapter, pattrib->src);
2413 }
2414 #endif
2415 }
2416 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_sta);
2417 } else
2418 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_unknown);
2419
2420 get_sta_info:
2421 bmcast = IS_MCAST(pattrib->ra);
2422 if (bmcast) {
2423 psta = rtw_get_bcmc_stainfo(padapter);
2424 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
2425 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sta);
2426 #ifdef DBG_TX_DROP_FRAME
2427 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
2428 #endif
2429 res = _FAIL;
2430 goto exit;
2431 }
2432 } else {
2433 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
2434 if (psta == NULL) { /* if we cannot get psta => drop the pkt */
2435 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_sta);
2436 #ifdef DBG_TX_DROP_FRAME
2437 RTW_INFO("DBG_TX_DROP_FRAME %s get sta_info fail, ra:" MAC_FMT"\n", __func__, MAC_ARG(pattrib->ra));
2438 #endif
2439 res = _FAIL;
2440 goto exit;
2441 } else if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE && !(psta->state & WIFI_ASOC_STATE)) {
2442 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_ucast_ap_link);
2443 res = _FAIL;
2444 goto exit;
2445 }
2446
2447 #ifdef CONFIG_RTW_WDS
2448 if (XATTRIB_GET_WDS(pattrib) && !(psta->flags & WLAN_STA_WDS))
2449 pattrib->wds = 0;
2450 #endif
2451 }
2452
2453 if (!(psta->state & WIFI_ASOC_STATE)) {
2454 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_link);
2455 RTW_INFO("%s-"ADPT_FMT" psta("MAC_FMT")->state(0x%x) != WIFI_ASOC_STATE\n",
2456 __func__, ADPT_ARG(padapter), MAC_ARG(psta->phl_sta->mac_addr), psta->state);
2457 res = _FAIL;
2458 goto exit;
2459 }
2460
2461 pattrib->pktlen = pktfile.pkt_len;
2462 #ifdef CONFIG_CORE_TXSC
2463 pattrib->frag_len = pxmitpriv->frag_len;
2464 #endif
2465
2466 /* TODO: 802.1Q VLAN header */
2467 /* TODO: IPV6 */
2468
2469 if (ETH_P_IP == pattrib->ether_type) {
2470 u8 ip[20];
2471
2472 _rtw_pktfile_read(&pktfile, ip, 20);
2473
2474 if (GET_IPV4_IHL(ip) * 4 > 20)
2475 _rtw_pktfile_read(&pktfile, NULL, GET_IPV4_IHL(ip) - 20);
2476
2477 pattrib->icmp_pkt = 0;
2478 pattrib->dhcp_pkt = 0;
2479 pattrib->hipriority_pkt = 0;
2480
2481 if (GET_IPV4_PROTOCOL(ip) == 0x01) { /* ICMP */
2482 pattrib->icmp_pkt = 1;
2483 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_icmp);
2484
2485 } else if (GET_IPV4_PROTOCOL(ip) == 0x11) { /* UDP */
2486 u8 udp[24];
2487
2488 _rtw_pktfile_read(&pktfile, udp, 24);
2489
2490 if ((GET_UDP_SRC(udp) == 68 && GET_UDP_DST(udp) == 67)
2491 || (GET_UDP_SRC(udp) == 67 && GET_UDP_DST(udp) == 68)
2492 ) {
2493 /* 67 : UDP BOOTP server, 68 : UDP BOOTP client */
2494 if (pattrib->pktlen > 282) { /* MINIMUM_DHCP_PACKET_SIZE */
2495 pattrib->dhcp_pkt = 1;
2496 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_dhcp);
2497 if (0)
2498 RTW_INFO("send DHCP packet\n");
2499 }
2500 }
2501
2502 /* WaveAgent packet, increase priority so that the system can read data in time */
2503 if (((GET_UDP_SIG1(udp) == 0xcc) || (GET_UDP_SIG1(udp) == 0xdd)) &&
2504 (GET_UDP_SIG2(udp) == 0xe2)) {
2505 pattrib->hipriority_pkt = 1;
2506 }
2507
2508 } else if (GET_IPV4_PROTOCOL(ip) == 0x06 /* TCP */
2509 && rtw_st_ctl_chk_reg_s_proto(&psta->st_ctl, 0x06) == _TRUE
2510 ) {
2511 u8 tcp[20];
2512
2513 _rtw_pktfile_read(&pktfile, tcp, 20);
2514
2515 if (rtw_st_ctl_chk_reg_rule(&psta->st_ctl, padapter, IPV4_SRC(ip), TCP_SRC(tcp), IPV4_DST(ip), TCP_DST(tcp)) == _TRUE) {
2516 if (GET_TCP_SYN(tcp) && GET_TCP_ACK(tcp)) {
2517 session_tracker_add_cmd(padapter, psta
2518 , IPV4_SRC(ip), TCP_SRC(tcp)
2519 , IPV4_SRC(ip), TCP_DST(tcp));
2520 if (DBG_SESSION_TRACKER)
2521 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" SYN-ACK\n"
2522 , FUNC_ADPT_ARG(padapter)
2523 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2524 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2525 }
2526 if (GET_TCP_FIN(tcp)) {
2527 session_tracker_del_cmd(padapter, psta
2528 , IPV4_SRC(ip), TCP_SRC(tcp)
2529 , IPV4_SRC(ip), TCP_DST(tcp));
2530 if (DBG_SESSION_TRACKER)
2531 RTW_INFO(FUNC_ADPT_FMT" local:"IP_FMT":"PORT_FMT", remote:"IP_FMT":"PORT_FMT" FIN\n"
2532 , FUNC_ADPT_ARG(padapter)
2533 , IP_ARG(IPV4_SRC(ip)), PORT_ARG(TCP_SRC(tcp))
2534 , IP_ARG(IPV4_DST(ip)), PORT_ARG(TCP_DST(tcp)));
2535 }
2536 }
2537 }
2538
2539 } else if (0x888e == pattrib->ether_type)
2540 eapol_type = parsing_eapol_packet(padapter, pktfile.cur_addr, psta, 1);
2541 #if defined (DBG_ARP_DUMP) || defined (DBG_IP_R_MONITOR)
2542 else if (pattrib->ether_type == ETH_P_ARP) {
2543 u8 arp[28] = {0};
2544
2545 _rtw_pktfile_read(&pktfile, arp, 28);
2546 dump_arp_pkt(RTW_DBGDUMP, etherhdr.h_dest, etherhdr.h_source, arp, 1);
2547 }
2548 #endif
2549
2550 if ((pattrib->ether_type == 0x888e) || (pattrib->dhcp_pkt == 1))
2551 rtw_mi_set_scan_deny(padapter, 3000);
2552
2553 if (MLME_IS_STA(padapter) &&
2554 pattrib->ether_type == ETH_P_ARP &&
2555 !IS_MCAST(pattrib->dst)) {
2556 rtw_mi_set_scan_deny(padapter, 1000);
2557 rtw_mi_scan_abort(padapter, _FALSE); /*rtw_scan_abort_no_wait*/
2558 }
2559
2560 #ifdef CONFIG_LPS
2561 pkt_type = _rtw_lps_chk_packet_type(pattrib);
2562
2563 if (pkt_type == LPS_PT_SP) {/*packet is as SPECIAL_PACKET*/
2564 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_active);
2565 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SPECIAL_PACKET, 0);
2566 } else if (pkt_type == LPS_PT_ICMP)
2567 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_LEAVE, 0);
2568 #endif /* CONFIG_LPS */
2569
2570 /* TODO:_lock */
2571 if (update_attrib_sec_info(padapter, pattrib, psta, eapol_type) == _FAIL) {
2572 DBG_COUNTER(padapter->tx_logs.core_tx_upd_attrib_err_sec);
2573 res = _FAIL;
2574 goto exit;
2575 }
2576
2577 /* get ether_hdr_len */
2578 pattrib->pkt_hdrlen = ETH_HLEN;/* (pattrib->ether_type == 0x8100) ? (14 + 4 ): 14; */ /* vlan tag */
2579
2580 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_LEN : WLAN_HDR_A3_LEN;
2581 pattrib->type = WIFI_DATA_TYPE;
2582 pattrib->subtype = WIFI_DATA_TYPE;
2583 pattrib->qos_en = psta->qos_option;
2584 pattrib->priority = 0;
2585
2586 if (check_fwstate(pmlmepriv, WIFI_AP_STATE | WIFI_MESH_STATE
2587 | WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE)
2588 ) {
2589 if (pattrib->qos_en) {
2590 set_qos(pkt, pattrib);
2591 #ifdef CONFIG_RTW_MESH
2592 if (MLME_IS_MESH(padapter))
2593 rtw_mesh_tx_set_whdr_mctrl_len(pattrib->mesh_frame_mode, pattrib);
2594 #endif
2595 }
2596 } else {
2597 #ifdef CONFIG_TDLS
2598 if (pattrib->direct_link == _TRUE) {
2599 if (pattrib->qos_en)
2600 set_qos(pkt, pattrib);
2601 } else
2602 #endif
2603 {
2604 if (pqospriv->qos_option) {
2605 set_qos(pkt, pattrib);
2606
2607 if (pmlmepriv->acm_mask != 0)
2608 pattrib->priority = qos_acm(pmlmepriv->acm_mask, pattrib->priority);
2609 }
2610 }
2611 }
2612
2613 pattrib->order = rtw_chk_htc_en(padapter, psta, pattrib);
2614 if (pattrib->order) {
2615 if (pattrib->qos_en)
2616 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_QOS_HTC_LEN : WLAN_HDR_A3_QOS_HTC_LEN;
2617 else
2618 pattrib->hdrlen = XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_HTC_LEN : WLAN_HDR_A3_HTC_LEN;
2619 }
2620
2621 update_attrib_phy_info(padapter, pattrib, psta);
2622
2623 /* RTW_INFO("%s ==> mac_id(%d)\n",__FUNCTION__,pattrib->mac_id ); */
2624
2625 pattrib->psta = psta;
2626 /* TODO:_unlock */
2627
2628 #ifdef CONFIG_AUTO_AP_MODE
2629 if (psta->isrc && psta->pid > 0)
2630 pattrib->pctrl = _TRUE;
2631 else
2632 #endif
2633 pattrib->pctrl = 0;
2634
2635 pattrib->ack_policy = 0;
2636
2637 if (bmcast)
2638 pattrib->rate = psta->init_rate;
2639
2640
2641 #ifdef CONFIG_WMMPS_STA
2642 update_attrib_trigger_frame_info(padapter, pattrib);
2643 #endif /* CONFIG_WMMPS_STA */
2644
2645 /* pattrib->priority = 5; */ /* force to used VI queue, for testing */
2646 pattrib->hw_ssn_sel = pxmitpriv->hw_ssn_seq_no;
2647
2648 pattrib->wdinfo_en = 1;/*FPGA_test YiWei need modify*/
2649
2650 rtw_set_tx_chksum_offload(pkt, pattrib);
2651
2652 exit:
2653
2654
2655 return res;
2656 }
2657
xmitframe_addmic(_adapter * padapter,struct xmit_frame * pxmitframe)2658 static s32 xmitframe_addmic(_adapter *padapter, struct xmit_frame *pxmitframe)
2659 {
2660 sint curfragnum, length;
2661 u8 *pframe, *payload, mic[8];
2662 struct mic_data micdata;
2663 /* struct sta_info *stainfo; */
2664 struct pkt_attrib *pattrib = &pxmitframe->attrib;
2665 struct security_priv *psecuritypriv = &padapter->securitypriv;
2666 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2667 u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
2668 u8 hw_hdr_offset = 0;
2669 sint bmcst = IS_MCAST(pattrib->ra);
2670
2671 /*
2672 if(pattrib->psta)
2673 {
2674 stainfo = pattrib->psta;
2675 }
2676 else
2677 {
2678 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
2679 stainfo=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
2680 }
2681
2682 if(stainfo==NULL)
2683 {
2684 RTW_INFO("%s, psta==NUL\n", __func__);
2685 return _FAIL;
2686 }
2687
2688 if(!(stainfo->state &WIFI_ASOC_STATE))
2689 {
2690 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, stainfo->state);
2691 return _FAIL;
2692 }
2693 */
2694
2695
2696 #ifdef CONFIG_USB_TX_AGGREGATION
2697 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);;
2698 #else
2699 #ifdef CONFIG_TX_EARLY_MODE
2700 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
2701 #else
2702 hw_hdr_offset = TXDESC_OFFSET;
2703 #endif
2704 #endif
2705
2706 if (pattrib->encrypt == _TKIP_) { /* if(psecuritypriv->dot11PrivacyAlgrthm==_TKIP_PRIVACY_) */
2707 /* encode mic code */
2708 /* if(stainfo!= NULL) */
2709 {
2710 u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2711
2712 pframe = pxmitframe->buf_addr + hw_hdr_offset;
2713
2714 if (bmcst) {
2715 if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16) == _TRUE) {
2716 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2717 /* rtw_msleep_os(10); */
2718 return _FAIL;
2719 }
2720 /* start to calculate the mic code */
2721 rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
2722 } else {
2723 if (_rtw_memcmp(&pattrib->dot11tkiptxmickey.skey[0], null_key, 16) == _TRUE) {
2724 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2725 /* rtw_msleep_os(10); */
2726 return _FAIL;
2727 }
2728 /* start to calculate the mic code */
2729 rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
2730 }
2731
2732 if (pframe[1] & 1) { /* ToDS==1 */
2733 rtw_secmicappend(&micdata, &pframe[16], 6); /* DA */
2734 if (pframe[1] & 2) /* From Ds==1 */
2735 rtw_secmicappend(&micdata, &pframe[24], 6);
2736 else
2737 rtw_secmicappend(&micdata, &pframe[10], 6);
2738 } else { /* ToDS==0 */
2739 rtw_secmicappend(&micdata, &pframe[4], 6); /* DA */
2740 if (pframe[1] & 2) /* From Ds==1 */
2741 rtw_secmicappend(&micdata, &pframe[16], 6);
2742 else
2743 rtw_secmicappend(&micdata, &pframe[10], 6);
2744
2745 }
2746
2747 if (pattrib->qos_en)
2748 priority[0] = (u8)pxmitframe->attrib.priority;
2749
2750
2751 rtw_secmicappend(&micdata, &priority[0], 4);
2752
2753 payload = pframe;
2754
2755 for (curfragnum = 0; curfragnum < pattrib->nr_frags; curfragnum++) {
2756 payload = (u8 *)RND4((SIZE_PTR)(payload));
2757
2758 payload = payload + pattrib->hdrlen + pattrib->iv_len;
2759 if ((curfragnum + 1) == pattrib->nr_frags) {
2760 length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
2761 rtw_secmicappend(&micdata, payload, length);
2762 payload = payload + length;
2763 } else {
2764 length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - ((pattrib->bswenc) ? pattrib->icv_len : 0);
2765 rtw_secmicappend(&micdata, payload, length);
2766 payload = payload + length + pattrib->icv_len;
2767 }
2768 }
2769 rtw_secgetmic(&micdata, &(mic[0]));
2770 /* add mic code and add the mic code length in last_txcmdsz */
2771
2772 _rtw_memcpy(payload, &(mic[0]), 8);
2773 pattrib->last_txcmdsz += 8;
2774
2775 payload = payload - pattrib->last_txcmdsz + 8;
2776 }
2777 }
2778
2779
2780 return _SUCCESS;
2781 }
2782
2783 /*#define DBG_TX_SW_ENCRYPTOR*/
2784
xmitframe_swencrypt(_adapter * padapter,struct xmit_frame * pxmitframe)2785 static s32 xmitframe_swencrypt(_adapter *padapter, struct xmit_frame *pxmitframe)
2786 {
2787 struct pkt_attrib *pattrib = &pxmitframe->attrib;
2788
2789
2790 if (!pattrib->bswenc)
2791 return _SUCCESS;
2792
2793 #ifdef DBG_TX_SW_ENCRYPTOR
2794 RTW_INFO(ADPT_FMT" - sec_type:%s DO SW encryption\n",
2795 ADPT_ARG(padapter), security_type_str(pattrib->encrypt));
2796 #endif
2797
2798 switch (pattrib->encrypt) {
2799 case _WEP40_:
2800 case _WEP104_:
2801 rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
2802 break;
2803 case _TKIP_:
2804 rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
2805 break;
2806 case _AES_:
2807 case _CCMP_256_:
2808 rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
2809 break;
2810 case _GCMP_:
2811 case _GCMP_256_:
2812 rtw_gcmp_encrypt(padapter, (u8 *)pxmitframe);
2813 break;
2814 #ifdef CONFIG_WAPI_SUPPORT
2815 case _SMS4_:
2816 rtw_sms4_encrypt(padapter, (u8 *)pxmitframe);
2817 #endif
2818 default:
2819 break;
2820 }
2821
2822 return _SUCCESS;
2823 }
2824
2825 #if 0 //RTW_PHL_TX: mark un-finished codes for reading
2826 static s32 rtw_core_xmitframe_addmic(_adapter *padapter, struct xmit_frame *pxframe)
2827 {
2828 sint curfragnum, payload_length;
2829 u8 *pwlhdr, *payload, mic[8];
2830 struct mic_data micdata;
2831 /* struct sta_info *stainfo; */
2832 struct security_priv *psecuritypriv = &padapter->securitypriv;
2833 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
2834 u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
2835 u8 hw_hdr_offset = 0;
2836 sint bmcst = IS_MCAST(pxframe->attrib.ra);
2837
2838 if (pxframe->attrib.encrypt == _TKIP_) { /* if(psecuritypriv->dot11PrivacyAlgrthm==_TKIP_PRIVACY_) */
2839 /* encode mic code */
2840 /* if(stainfo!= NULL) */
2841 {
2842 u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0};
2843
2844 pwlhdr = pxframe->wlhdr[0];
2845 payload = pxframe->pkt->data + pxframe->attrib.pkt_hdrlen;
2846 payload_length = pxframe->pkt->len - pxframe->attrib.pkt_hdrlen;
2847
2848 if (bmcst) {
2849 if (_rtw_memcmp(psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey, null_key, 16) == _TRUE) {
2850 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2851 /* rtw_msleep_os(10); */
2852 return _FAIL;
2853 }
2854 /* start to calculate the mic code */
2855 rtw_secmicsetkey(&micdata, psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
2856 } else {
2857 if (_rtw_memcmp(&pxframe->attrib.psta->dot11tkiptxmickey.skey[0], null_key, 16) == _TRUE) {
2858 /* DbgPrint("\nxmitframe_addmic:stainfo->dot11tkiptxmickey==0\n"); */
2859 /* rtw_msleep_os(10); */
2860 return _FAIL;
2861 }
2862 /* start to calculate the mic code */
2863 rtw_secmicsetkey(&micdata, &pxframe->attrib.psta->dot11tkiptxmickey.skey[0]);
2864 }
2865
2866 if (pwlhdr[1] & 1) { /* ToDS==1 */
2867 rtw_secmicappend(&micdata, &pwlhdr[16], 6); /* DA */
2868 if (pwlhdr[1] & 2) /* From Ds==1 */
2869 rtw_secmicappend(&micdata, &pwlhdr[24], 6);
2870 else
2871 rtw_secmicappend(&micdata, &pwlhdr[10], 6);
2872 } else { /* ToDS==0 */
2873 rtw_secmicappend(&micdata, &pwlhdr[4], 6); /* DA */
2874 if (pwlhdr[1] & 2) /* From Ds==1 */
2875 rtw_secmicappend(&micdata, &pwlhdr[16], 6);
2876 else
2877 rtw_secmicappend(&micdata, &pwlhdr[10], 6);
2878
2879 }
2880
2881 if (pxframe->attrib.qos_en)
2882 priority[0] = (u8)pxframe->attrib.qos_en;
2883
2884 rtw_secmicappend(&micdata, &priority[0], 4);
2885
2886 payload = (u8 *)RND4((SIZE_PTR)(payload));
2887 rtw_secmicappend(&micdata, payload, payload_length);
2888
2889 rtw_secgetmic(&micdata, &(mic[0]));
2890 /* add mic code and add the mic code length in last_txcmdsz */
2891
2892 _rtw_memcpy(pxframe->wltail[0]+pxframe->attrib.icv_len, &(mic[0]), 8);
2893 }
2894 }
2895
2896 return _SUCCESS;
2897 }
2898
2899 /*#define DBG_TX_SW_ENCRYPTOR*/
2900
2901 static s32 rtw_core_xmitframe_swencrypt(_adapter *padapter, struct xmit_frame *pxframe)
2902 {
2903 if (pxframe->attrib.bswenc) {
2904 #ifdef DBG_TX_SW_ENCRYPTOR
2905 RTW_INFO(ADPT_FMT" - sec_type:%s DO SW encryption\n",
2906 ADPT_ARG(padapter), security_type_str(pxframe->attrib.encrypt));
2907 #endif
2908
2909 switch (pxframe->attrib.encrypt) {
2910 case _WEP40_:
2911 case _WEP104_:
2912 //rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
2913 break;
2914 case _TKIP_:
2915 //rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
2916 break;
2917 case _AES_:
2918 case _CCMP_256_:
2919 rtw_core_aes_encrypt(padapter, (u8 *)pxframe);
2920 break;
2921 case _GCMP_:
2922 case _GCMP_256_:
2923 //rtw_gcmp_encrypt(padapter, (u8 *)pxmitframe);
2924 break;
2925 #ifdef CONFIG_WAPI_SUPPORT
2926 case _SMS4_:
2927 //rtw_sms4_encrypt(padapter, (u8 *)pxmitframe);
2928 #endif
2929 default:
2930 break;
2931 }
2932
2933 }
2934 return _SUCCESS;
2935 }
2936
2937
2938 s32 rtw_core_make_wlanhdr(_adapter *padapter, u8 *hdr, struct xmit_frame *pxframe)
2939 {
2940 u16 *qc;
2941
2942 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
2943 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
2944 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
2945 u8 qos_option = _FALSE;
2946 sint res = _SUCCESS;
2947 u16 *fctrl = &pwlanhdr->frame_ctl;
2948
2949 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
2950
2951 set_frame_sub_type(fctrl, pxframe->attrib.subtype);
2952
2953 if (pxframe->attrib.subtype & WIFI_DATA_TYPE) {
2954 if (MLME_IS_STA(padapter)) {
2955 #ifdef CONFIG_TDLS
2956 if (pattrib->direct_link == _TRUE) {
2957 /* TDLS data transfer, ToDS=0, FrDs=0 */
2958 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2959 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.src, ETH_ALEN);
2960 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
2961
2962 if (pxframe->attrib.qos_en)
2963 qos_option = _TRUE;
2964 } else
2965 #endif /* CONFIG_TDLS */
2966 {
2967 /* to_ds = 1, fr_ds = 0; */
2968 /* 1.Data transfer to AP */
2969 /* 2.Arp pkt will relayed by AP */
2970 SetToDs(fctrl);
2971 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
2972 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.ta, ETH_ALEN);
2973 _rtw_memcpy(pwlanhdr->addr3, pxframe->attrib.dst, ETH_ALEN);
2974
2975 if (pqospriv->qos_option)
2976 qos_option = _TRUE;
2977 }
2978 } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)) {
2979 /* to_ds = 0, fr_ds = 1; */
2980 SetFrDs(fctrl);
2981 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2982 _rtw_memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
2983 _rtw_memcpy(pwlanhdr->addr3, pxframe->attrib.src, ETH_ALEN);
2984
2985 if (pxframe->attrib.qos_en)
2986 qos_option = _TRUE;
2987 } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
2988 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
2989 _rtw_memcpy(pwlanhdr->addr1, pxframe->attrib.dst, ETH_ALEN);
2990 _rtw_memcpy(pwlanhdr->addr2, pxframe->attrib.ta, ETH_ALEN);
2991 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
2992
2993 if (pxframe->attrib.qos_en)
2994 qos_option = _TRUE;
2995 #ifdef CONFIG_RTW_MESH
2996 } else if (MLME_IS_STA(padapter)) {
2997 rtw_mesh_tx_build_whdr(padapter, pattrib, fctrl, pwlanhdr);
2998 if (pxframe->attrib.qos_en)
2999 qos_option = _TRUE;
3000 else {
3001 RTW_WARN("[%s] !qos_en in Mesh\n", __FUNCTION__);
3002 res = _FAIL;
3003 goto exit;
3004 }
3005 #endif
3006 } else {
3007 res = _FAIL;
3008 goto exit;
3009 }
3010
3011 if (pxframe->attrib.mdata)
3012 SetMData(fctrl);
3013
3014 if (pxframe->attrib.encrypt)
3015 SetPrivacy(fctrl);
3016
3017 if (qos_option) {
3018 qc = (unsigned short *)(hdr + pxframe->attrib.hdrlen - 2);
3019
3020 if (pxframe->attrib.priority)
3021 SetPriority(qc, pxframe->attrib.priority);
3022
3023 SetEOSP(qc, pxframe->attrib.eosp);
3024
3025 SetAckpolicy(qc, pxframe->attrib.ack_policy);
3026
3027 if (pxframe->attrib.amsdu)
3028 SetAMsdu(qc, pxframe->attrib.amsdu);
3029 #ifdef CONFIG_RTW_MESH
3030 if (MLME_IS_MESH(padapter)) {
3031 /* active: don't care, light sleep: 0, deep sleep: 1*/
3032 set_mps_lv(qc, 0); //TBD
3033
3034 /* TBD: temporary set (rspi, eosp) = (0, 1) which means End MPSP */
3035 set_rspi(qc, 0);
3036 SetEOSP(qc, 1);
3037
3038 set_mctrl_present(qc, 1);
3039 }
3040 #endif
3041 }
3042
3043 /* TODO: fill HT Control Field */
3044
3045 /* Update Seq Num will be handled by f/w */
3046 {
3047 struct sta_info *psta;
3048 psta = pxframe->attrib.psta;
3049
3050 if (psta == NULL) {
3051 RTW_INFO("%s, psta==NUL\n", __func__);
3052 return _FAIL;
3053 }
3054
3055 if (!(psta->state & WIFI_ASOC_STATE)) {
3056 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3057 return _FAIL;
3058 }
3059
3060 if (psta) {
3061 psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority]++;
3062 psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority] &= 0xFFF;
3063 pxframe->attrib.seqnum = psta->sta_xmitpriv.txseq_tid[pxframe->attrib.priority];
3064
3065 SetSeqNum(hdr, pxframe->attrib.seqnum);
3066
3067 #ifdef CONFIG_80211N_HT
3068 #if 0 /* move into update_attrib_phy_info(). */
3069 /* check if enable ampdu */
3070 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
3071 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
3072 pattrib->ampdu_en = _TRUE;
3073 }
3074 #endif
3075 /* re-check if enable ampdu by BA_starting_seqctrl */
3076 if (pxframe->attrib.ampdu_en == _TRUE) {
3077 u16 tx_seq;
3078
3079 tx_seq = psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f];
3080
3081 /* check BA_starting_seqctrl */
3082 if (SN_LESS(pxframe->attrib.seqnum, tx_seq)) {
3083 /* RTW_INFO("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
3084 pxframe->attrib.ampdu_en = _FALSE;/* AGG BK */
3085 } else if (SN_EQUAL(pxframe->attrib.seqnum, tx_seq)) {
3086 psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f] = (tx_seq + 1) & 0xfff;
3087
3088 pxframe->attrib.ampdu_en = _TRUE;/* AGG EN */
3089 } else {
3090 /* RTW_INFO("tx ampdu over run\n"); */
3091 psta->BA_starting_seqctrl[pxframe->attrib.priority & 0x0f] = (pxframe->attrib.seqnum + 1) & 0xfff;
3092 pxframe->attrib.ampdu_en = _TRUE;/* AGG EN */
3093 }
3094
3095 }
3096 #endif /* CONFIG_80211N_HT */
3097 }
3098 }
3099
3100 } else {
3101
3102 }
3103
3104 exit:
3105
3106
3107 return res;
3108 }
3109
3110
3111
3112
3113
3114 #endif
3115
rtw_fill_htc_in_wlanhdr(_adapter * padapter,struct pkt_attrib * pattrib,u32 * phtc_buf)3116 static void rtw_fill_htc_in_wlanhdr(_adapter *padapter, struct pkt_attrib *pattrib, u32 *phtc_buf)
3117 {
3118 #ifdef CONFIG_80211AX_HE
3119 rtw_he_fill_htc(padapter, pattrib, phtc_buf);
3120 #endif
3121 }
3122
rtw_make_wlanhdr(_adapter * padapter,u8 * hdr,struct pkt_attrib * pattrib)3123 s32 rtw_make_wlanhdr(_adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib)
3124 {
3125 u16 *qc;
3126 u32 *htc = NULL;
3127
3128 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
3129 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3130 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
3131 u8 qos_option = _FALSE, htc_option = _FALSE;
3132 sint res = _SUCCESS;
3133 u16 *fctrl = &pwlanhdr->frame_ctl;
3134
3135 /* struct sta_info *psta; */
3136
3137 /* sint bmcst = IS_MCAST(pattrib->ra); */
3138
3139
3140 /*
3141 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3142 if(pattrib->psta != psta)
3143 {
3144 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3145 return;
3146 }
3147
3148 if(psta==NULL)
3149 {
3150 RTW_INFO("%s, psta==NUL\n", __func__);
3151 return _FAIL;
3152 }
3153
3154 if(!(psta->state &WIFI_ASOC_STATE))
3155 {
3156 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3157 return _FAIL;
3158 }
3159 */
3160
3161 #ifdef RTW_PHL_TX
3162 _rtw_memset(hdr, 0, pattrib->hdrlen);
3163 #else
3164 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
3165 #endif
3166
3167 set_frame_sub_type(fctrl, pattrib->subtype);
3168
3169 if (pattrib->subtype & WIFI_DATA_TYPE) {
3170 if (MLME_IS_STA(padapter)) {
3171 #ifdef CONFIG_TDLS
3172 if (pattrib->direct_link == _TRUE) {
3173 /* TDLS data transfer, ToDS=0, FrDs=0 */
3174 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3175 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3176 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3177
3178 if (pattrib->qos_en)
3179 qos_option = _TRUE;
3180 } else
3181 #endif /* CONFIG_TDLS */
3182 {
3183 #ifdef CONFIG_RTW_WDS
3184 if (pattrib->wds) {
3185 SetToDs(fctrl);
3186 SetFrDs(fctrl);
3187 _rtw_memcpy(pwlanhdr->addr1, pattrib->ra, ETH_ALEN);
3188 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3189 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3190 _rtw_memcpy(pwlanhdr->addr4, pattrib->src, ETH_ALEN);
3191 } else
3192 #endif
3193 {
3194 /* to_ds = 1, fr_ds = 0; */
3195 /* 1.Data transfer to AP */
3196 /* 2.Arp pkt will relayed by AP */
3197 SetToDs(fctrl);
3198 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3199 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3200 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3201 }
3202
3203 if (pqospriv->qos_option)
3204 qos_option = _TRUE;
3205 }
3206 } else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)) {
3207 #ifdef CONFIG_RTW_WDS
3208 if (pattrib->wds) {
3209 SetToDs(fctrl);
3210 SetFrDs(fctrl);
3211 _rtw_memcpy(pwlanhdr->addr1, pattrib->ra, ETH_ALEN);
3212 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3213 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3214 _rtw_memcpy(pwlanhdr->addr4, pattrib->src, ETH_ALEN);
3215 } else
3216 #endif
3217 {
3218 /* to_ds = 0, fr_ds = 1; */
3219 SetFrDs(fctrl);
3220 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3221 _rtw_memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv), ETH_ALEN);
3222 _rtw_memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
3223 }
3224
3225 if (pattrib->qos_en)
3226 qos_option = _TRUE;
3227 } else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == _TRUE) ||
3228 (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == _TRUE)) {
3229 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3230 _rtw_memcpy(pwlanhdr->addr2, pattrib->ta, ETH_ALEN);
3231 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3232
3233 if (pattrib->qos_en)
3234 qos_option = _TRUE;
3235 #ifdef CONFIG_RTW_MESH
3236 } else if (check_fwstate(pmlmepriv, WIFI_MESH_STATE) == _TRUE) {
3237 rtw_mesh_tx_build_whdr(padapter, pattrib, fctrl, pwlanhdr);
3238 if (pattrib->qos_en)
3239 qos_option = _TRUE;
3240 else {
3241 RTW_WARN("[%s] !qos_en in Mesh\n", __FUNCTION__);
3242 res = _FAIL;
3243 goto exit;
3244 }
3245 #endif
3246 } else {
3247 res = _FAIL;
3248 goto exit;
3249 }
3250
3251 if (pattrib->mdata)
3252 SetMData(fctrl);
3253
3254 if (pattrib->encrypt)
3255 SetPrivacy(fctrl);
3256
3257 if (pattrib->order)
3258 htc_option = _TRUE;
3259
3260 if (qos_option) {
3261 qc = (unsigned short *)(hdr + (XATTRIB_GET_WDS(pattrib) ? WLAN_HDR_A4_LEN : WLAN_HDR_A3_LEN));
3262
3263 if (pattrib->priority)
3264 SetPriority(qc, pattrib->priority);
3265
3266 SetEOSP(qc, pattrib->eosp);
3267
3268 SetAckpolicy(qc, pattrib->ack_policy);
3269
3270 if (pattrib->amsdu)
3271 SetAMsdu(qc, pattrib->amsdu);
3272 #ifdef CONFIG_RTW_MESH
3273 if (MLME_IS_MESH(padapter)) {
3274 /* active: don't care, light sleep: 0, deep sleep: 1*/
3275 set_mps_lv(qc, 0); //TBD
3276
3277 /* TBD: temporary set (rspi, eosp) = (0, 1) which means End MPSP */
3278 set_rspi(qc, 0);
3279 SetEOSP(qc, 1);
3280
3281 set_mctrl_present(qc, 1);
3282 }
3283 #endif
3284 }
3285
3286 /* TODO: fill HT Control Field */
3287 if (htc_option == _TRUE) {
3288 set_htc_order_bit(fctrl);
3289
3290 htc = (u32 *)(hdr + pattrib->hdrlen - 4);
3291 rtw_fill_htc_in_wlanhdr(padapter, pattrib, htc);
3292 }
3293
3294 /* Update Seq Num will be handled by f/w */
3295 {
3296 struct sta_info *psta;
3297 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3298 if (pattrib->psta != psta) {
3299 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3300 return _FAIL;
3301 }
3302
3303 if (psta == NULL) {
3304 RTW_INFO("%s, psta==NUL\n", __func__);
3305 return _FAIL;
3306 }
3307
3308 if (!(psta->state & WIFI_ASOC_STATE)) {
3309 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3310 return _FAIL;
3311 }
3312
3313
3314 if (psta) {
3315 psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3316 psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3317 pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
3318
3319 SetSeqNum(hdr, pattrib->seqnum);
3320
3321 #ifdef CONFIG_80211N_HT
3322 #if 0 /* move into update_attrib_phy_info(). */
3323 /* check if enable ampdu */
3324 if (pattrib->ht_en && psta->htpriv.ampdu_enable) {
3325 if (psta->htpriv.agg_enable_bitmap & BIT(pattrib->priority))
3326 pattrib->ampdu_en = _TRUE;
3327 }
3328 #endif
3329 /* re-check if enable ampdu by BA_starting_seqctrl */
3330 if (pattrib->ampdu_en == _TRUE) {
3331 u16 tx_seq;
3332
3333 tx_seq = psta->BA_starting_seqctrl[pattrib->priority & 0x0f];
3334
3335 /* check BA_starting_seqctrl */
3336 if (SN_LESS(pattrib->seqnum, tx_seq)) {
3337 /* RTW_INFO("tx ampdu seqnum(%d) < tx_seq(%d)\n", pattrib->seqnum, tx_seq); */
3338 pattrib->ampdu_en = _FALSE;/* AGG BK */
3339 } else if (SN_EQUAL(pattrib->seqnum, tx_seq)) {
3340 psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (tx_seq + 1) & 0xfff;
3341
3342 pattrib->ampdu_en = _TRUE;/* AGG EN */
3343 } else {
3344 /* RTW_INFO("tx ampdu over run\n"); */
3345 psta->BA_starting_seqctrl[pattrib->priority & 0x0f] = (pattrib->seqnum + 1) & 0xfff;
3346 pattrib->ampdu_en = _TRUE;/* AGG EN */
3347 }
3348
3349 }
3350 #endif /* CONFIG_80211N_HT */
3351 }
3352 }
3353
3354 } else {
3355
3356 }
3357
3358 exit:
3359
3360
3361 return res;
3362 }
3363
rtw_txframes_pending(_adapter * padapter)3364 s32 rtw_txframes_pending(_adapter *padapter)
3365 {
3366 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
3367
3368 return ((_rtw_queue_empty(&pxmitpriv->be_pending) == _FALSE) ||
3369 (_rtw_queue_empty(&pxmitpriv->bk_pending) == _FALSE) ||
3370 (_rtw_queue_empty(&pxmitpriv->vi_pending) == _FALSE) ||
3371 (_rtw_queue_empty(&pxmitpriv->vo_pending) == _FALSE));
3372 }
3373
rtw_txframes_sta_ac_pending(_adapter * padapter,struct pkt_attrib * pattrib)3374 s32 rtw_txframes_sta_ac_pending(_adapter *padapter, struct pkt_attrib *pattrib)
3375 {
3376 struct sta_info *psta;
3377 struct tx_servq *ptxservq;
3378 int priority = pattrib->priority;
3379 /*
3380 if(pattrib->psta)
3381 {
3382 psta = pattrib->psta;
3383 }
3384 else
3385 {
3386 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
3387 psta=rtw_get_stainfo(&padapter->stapriv ,&pattrib->ra[0]);
3388 }
3389 */
3390 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3391 if (pattrib->psta != psta) {
3392 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
3393 return 0;
3394 }
3395
3396 if (psta == NULL) {
3397 RTW_INFO("%s, psta==NUL\n", __func__);
3398 return 0;
3399 }
3400
3401 if (!(psta->state & WIFI_ASOC_STATE)) {
3402 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
3403 return 0;
3404 }
3405
3406 switch (priority) {
3407 case 1:
3408 case 2:
3409 ptxservq = &(psta->sta_xmitpriv.bk_q);
3410 break;
3411 case 4:
3412 case 5:
3413 ptxservq = &(psta->sta_xmitpriv.vi_q);
3414 break;
3415 case 6:
3416 case 7:
3417 ptxservq = &(psta->sta_xmitpriv.vo_q);
3418 break;
3419 case 0:
3420 case 3:
3421 default:
3422 ptxservq = &(psta->sta_xmitpriv.be_q);
3423 break;
3424
3425 }
3426
3427 return ptxservq->qcnt;
3428 }
3429
3430 #ifdef CONFIG_TDLS
3431
rtw_build_tdls_ies(_adapter * padapter,struct xmit_frame * pxmitframe,u8 * pframe,struct tdls_txmgmt * ptxmgmt)3432 int rtw_build_tdls_ies(_adapter *padapter, struct xmit_frame *pxmitframe, u8 *pframe, struct tdls_txmgmt *ptxmgmt)
3433 {
3434 struct pkt_attrib *pattrib = &pxmitframe->attrib;
3435 struct sta_info *ptdls_sta = NULL;
3436 int res = _SUCCESS;
3437
3438 ptdls_sta = rtw_get_stainfo((&padapter->stapriv), pattrib->dst);
3439 if (ptdls_sta == NULL) {
3440 switch (ptxmgmt->action_code) {
3441 case TDLS_DISCOVERY_REQUEST:
3442 case TUNNELED_PROBE_REQ:
3443 case TUNNELED_PROBE_RSP:
3444 break;
3445 default:
3446 RTW_INFO("[TDLS] %s - Direct Link Peer = "MAC_FMT" not found for action = %d\n", __func__, MAC_ARG(pattrib->dst), ptxmgmt->action_code);
3447 res = _FAIL;
3448 goto exit;
3449 }
3450 }
3451
3452 switch (ptxmgmt->action_code) {
3453 case TDLS_SETUP_REQUEST:
3454 rtw_build_tdls_setup_req_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3455 break;
3456 case TDLS_SETUP_RESPONSE:
3457 rtw_build_tdls_setup_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3458 break;
3459 case TDLS_SETUP_CONFIRM:
3460 rtw_build_tdls_setup_cfm_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3461 break;
3462 case TDLS_TEARDOWN:
3463 rtw_build_tdls_teardown_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3464 break;
3465 case TDLS_DISCOVERY_REQUEST:
3466 rtw_build_tdls_dis_req_ies(padapter, pxmitframe, pframe, ptxmgmt);
3467 break;
3468 case TDLS_PEER_TRAFFIC_INDICATION:
3469 rtw_build_tdls_peer_traffic_indication_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3470 break;
3471 #ifdef CONFIG_TDLS_CH_SW
3472 case TDLS_CHANNEL_SWITCH_REQUEST:
3473 rtw_build_tdls_ch_switch_req_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3474 break;
3475 case TDLS_CHANNEL_SWITCH_RESPONSE:
3476 rtw_build_tdls_ch_switch_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3477 break;
3478 #endif
3479 case TDLS_PEER_TRAFFIC_RESPONSE:
3480 rtw_build_tdls_peer_traffic_rsp_ies(padapter, pxmitframe, pframe, ptxmgmt, ptdls_sta);
3481 break;
3482 #ifdef CONFIG_WFD
3483 case TUNNELED_PROBE_REQ:
3484 rtw_build_tunneled_probe_req_ies(padapter, pxmitframe, pframe);
3485 break;
3486 case TUNNELED_PROBE_RSP:
3487 rtw_build_tunneled_probe_rsp_ies(padapter, pxmitframe, pframe);
3488 break;
3489 #endif /* CONFIG_WFD */
3490 default:
3491 res = _FAIL;
3492 break;
3493 }
3494
3495 exit:
3496 return res;
3497 }
3498
rtw_make_tdls_wlanhdr(_adapter * padapter,u8 * hdr,struct pkt_attrib * pattrib,struct tdls_txmgmt * ptxmgmt)3499 s32 rtw_make_tdls_wlanhdr(_adapter *padapter, u8 *hdr, struct pkt_attrib *pattrib, struct tdls_txmgmt *ptxmgmt)
3500 {
3501 u16 *qc;
3502 struct rtw_ieee80211_hdr *pwlanhdr = (struct rtw_ieee80211_hdr *)hdr;
3503 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3504 struct qos_priv *pqospriv = &pmlmepriv->qospriv;
3505 struct sta_priv *pstapriv = &padapter->stapriv;
3506 struct sta_info *psta = NULL, *ptdls_sta = NULL;
3507 u8 tdls_seq = 0, baddr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
3508
3509 sint res = _SUCCESS;
3510 u16 *fctrl = &pwlanhdr->frame_ctl;
3511
3512
3513 _rtw_memset(hdr, 0, WLANHDR_OFFSET);
3514
3515 set_frame_sub_type(fctrl, pattrib->subtype);
3516
3517 switch (ptxmgmt->action_code) {
3518 case TDLS_SETUP_REQUEST:
3519 case TDLS_SETUP_RESPONSE:
3520 case TDLS_SETUP_CONFIRM:
3521 case TDLS_PEER_TRAFFIC_INDICATION:
3522 case TDLS_PEER_PSM_REQUEST:
3523 case TUNNELED_PROBE_REQ:
3524 case TUNNELED_PROBE_RSP:
3525 case TDLS_DISCOVERY_REQUEST:
3526 SetToDs(fctrl);
3527 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3528 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3529 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3530 break;
3531 case TDLS_CHANNEL_SWITCH_REQUEST:
3532 case TDLS_CHANNEL_SWITCH_RESPONSE:
3533 case TDLS_PEER_PSM_RESPONSE:
3534 case TDLS_PEER_TRAFFIC_RESPONSE:
3535 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3536 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3537 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3538 tdls_seq = 1;
3539 break;
3540 case TDLS_TEARDOWN:
3541 if (ptxmgmt->status_code == _RSON_TDLS_TEAR_UN_RSN_) {
3542 _rtw_memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
3543 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3544 _rtw_memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv), ETH_ALEN);
3545 tdls_seq = 1;
3546 } else {
3547 SetToDs(fctrl);
3548 _rtw_memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv), ETH_ALEN);
3549 _rtw_memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
3550 _rtw_memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
3551 }
3552 break;
3553 }
3554
3555 if (pattrib->encrypt)
3556 SetPrivacy(fctrl);
3557
3558 if (ptxmgmt->action_code == TDLS_PEER_TRAFFIC_RESPONSE)
3559 SetPwrMgt(fctrl);
3560
3561 if (pqospriv->qos_option) {
3562 qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
3563 if (pattrib->priority)
3564 SetPriority(qc, pattrib->priority);
3565 SetAckpolicy(qc, pattrib->ack_policy);
3566 }
3567
3568 psta = pattrib->psta;
3569
3570 /* 1. update seq_num per link by sta_info */
3571 /* 2. rewrite encrypt to _AES_, also rewrite iv_len, icv_len */
3572 if (tdls_seq == 1) {
3573 ptdls_sta = rtw_get_stainfo(pstapriv, pattrib->dst);
3574 if (ptdls_sta) {
3575 ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3576 ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3577 pattrib->seqnum = ptdls_sta->sta_xmitpriv.txseq_tid[pattrib->priority];
3578 SetSeqNum(hdr, pattrib->seqnum);
3579
3580 if (pattrib->encrypt) {
3581 pattrib->encrypt = _AES_;
3582 pattrib->iv_len = 8;
3583 pattrib->icv_len = 8;
3584 pattrib->bswenc = _FALSE;
3585 }
3586 pattrib->mac_id = ptdls_sta->phl_sta->macid;
3587 } else {
3588 res = _FAIL;
3589 goto exit;
3590 }
3591 } else if (psta) {
3592 psta->sta_xmitpriv.txseq_tid[pattrib->priority]++;
3593 psta->sta_xmitpriv.txseq_tid[pattrib->priority] &= 0xFFF;
3594 pattrib->seqnum = psta->sta_xmitpriv.txseq_tid[pattrib->priority];
3595 SetSeqNum(hdr, pattrib->seqnum);
3596 }
3597
3598
3599 exit:
3600
3601
3602 return res;
3603 }
3604
rtw_xmit_tdls_coalesce(_adapter * padapter,struct xmit_frame * pxmitframe,struct tdls_txmgmt * ptxmgmt)3605 s32 rtw_xmit_tdls_coalesce(_adapter *padapter, struct xmit_frame *pxmitframe, struct tdls_txmgmt *ptxmgmt)
3606 {
3607 s32 llc_sz;
3608
3609 u8 *pframe, *mem_start;
3610
3611 struct sta_info *psta;
3612 struct sta_priv *pstapriv = &padapter->stapriv;
3613 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
3614 struct pkt_attrib *pattrib = &pxmitframe->attrib;
3615 u8 *pbuf_start;
3616 s32 bmcst = IS_MCAST(pattrib->ra);
3617 s32 res = _SUCCESS;
3618
3619
3620 if (pattrib->psta)
3621 psta = pattrib->psta;
3622 else {
3623 if (bmcst)
3624 psta = rtw_get_bcmc_stainfo(padapter);
3625 else
3626 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
3627 }
3628
3629 if (psta == NULL) {
3630 res = _FAIL;
3631 goto exit;
3632 }
3633
3634 if (pxmitframe->buf_addr == NULL) {
3635 res = _FAIL;
3636 goto exit;
3637 }
3638
3639 pbuf_start = pxmitframe->buf_addr;
3640 mem_start = pbuf_start + TXDESC_OFFSET;
3641
3642 if (rtw_make_tdls_wlanhdr(padapter, mem_start, pattrib, ptxmgmt) == _FAIL) {
3643 res = _FAIL;
3644 goto exit;
3645 }
3646
3647 pframe = mem_start;
3648 pframe += pattrib->hdrlen;
3649
3650 /* adding icv, if necessary... */
3651 if (pattrib->iv_len) {
3652 if (psta != NULL) {
3653 switch (pattrib->encrypt) {
3654 case _WEP40_:
3655 case _WEP104_:
3656 WEP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3657 break;
3658 case _TKIP_:
3659 if (bmcst)
3660 TKIP_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3661 else
3662 TKIP_IV(pattrib->iv, psta->dot11txpn, 0);
3663 break;
3664 case _AES_:
3665 if (bmcst)
3666 AES_IV(pattrib->iv, psta->dot11txpn, pattrib->key_idx);
3667 else
3668 AES_IV(pattrib->iv, psta->dot11txpn, 0);
3669 break;
3670 }
3671 }
3672
3673 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
3674 pframe += pattrib->iv_len;
3675
3676 }
3677
3678 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
3679 pframe += llc_sz;
3680
3681 /* pattrib->pktlen will be counted in rtw_build_tdls_ies */
3682 pattrib->pktlen = 0;
3683
3684 rtw_build_tdls_ies(padapter, pxmitframe, pframe, ptxmgmt);
3685
3686 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
3687 pframe += pattrib->pktlen;
3688 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
3689 pframe += pattrib->icv_len;
3690 }
3691
3692 pattrib->nr_frags = 1;
3693 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len + llc_sz +
3694 ((pattrib->bswenc) ? pattrib->icv_len : 0) + pattrib->pktlen;
3695
3696 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
3697 res = _FAIL;
3698 goto exit;
3699 }
3700
3701 xmitframe_swencrypt(padapter, pxmitframe);
3702
3703 update_attrib_vcs_info(padapter, pxmitframe);
3704
3705 exit:
3706
3707
3708 return res;
3709 }
3710 #endif /* CONFIG_TDLS */
3711
3712 /*
3713 * Calculate wlan 802.11 packet MAX size from pkt_attrib
3714 * This function doesn't consider fragment case
3715 */
rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib * pattrib)3716 u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
3717 {
3718 u32 len = 0;
3719
3720 len = pattrib->hdrlen /* WLAN Header */
3721 + pattrib->iv_len /* IV */
3722 + XATTRIB_GET_MCTRL_LEN(pattrib)
3723 + SNAP_SIZE + sizeof(u16) /* LLC */
3724 + pattrib->pktlen
3725 + (pattrib->encrypt == _TKIP_ ? 8 : 0) /* MIC */
3726 + (pattrib->bswenc ? pattrib->icv_len : 0) /* ICV */
3727 ;
3728
3729 return len;
3730 }
3731
3732 #ifdef CONFIG_TX_AMSDU
check_amsdu(struct xmit_frame * pxmitframe)3733 s32 check_amsdu(struct xmit_frame *pxmitframe)
3734 {
3735 struct pkt_attrib *pattrib;
3736 struct sta_info *psta = NULL;
3737 s32 ret = _TRUE;
3738
3739 if (!pxmitframe)
3740 ret = _FALSE;
3741
3742 pattrib = &pxmitframe->attrib;
3743
3744 psta = rtw_get_stainfo(&pxmitframe->padapter->stapriv, &pattrib->ra[0]);
3745 if (psta) {
3746 if (psta->flags & WLAN_STA_AMSDU_DISABLE)
3747 ret =_FALSE;
3748 }
3749
3750 if (IS_MCAST(pattrib->ra))
3751 ret = _FALSE;
3752
3753 if ((pattrib->ether_type == 0x888e) ||
3754 (pattrib->ether_type == 0x0806) ||
3755 (pattrib->ether_type == 0x88b4) ||
3756 (pattrib->dhcp_pkt == 1))
3757 ret = _FALSE;
3758
3759 if ((pattrib->encrypt == _WEP40_) ||
3760 (pattrib->encrypt == _WEP104_) ||
3761 (pattrib->encrypt == _TKIP_))
3762 ret = _FALSE;
3763
3764 if (!pattrib->qos_en)
3765 ret = _FALSE;
3766
3767 if (IS_AMSDU_AMPDU_NOT_VALID(pattrib))
3768 ret = _FALSE;
3769
3770 return ret;
3771 }
3772
check_amsdu_tx_support(_adapter * padapter)3773 s32 check_amsdu_tx_support(_adapter *padapter)
3774 {
3775 struct dvobj_priv *pdvobjpriv;
3776 int tx_amsdu;
3777 int tx_amsdu_rate;
3778 int current_tx_rate;
3779 s32 ret = _FALSE;
3780
3781 pdvobjpriv = adapter_to_dvobj(padapter);
3782 tx_amsdu = padapter->tx_amsdu;
3783 tx_amsdu_rate = padapter->tx_amsdu_rate;
3784 current_tx_rate = pdvobjpriv->traffic_stat.cur_tx_tp;
3785
3786 if (tx_amsdu == 1)
3787 ret = _TRUE;
3788 else if (tx_amsdu >= 2 && (tx_amsdu_rate == 0 || current_tx_rate > tx_amsdu_rate))
3789 ret = _TRUE;
3790 else
3791 ret = _FALSE;
3792
3793 return ret;
3794 }
3795
rtw_xmitframe_coalesce_amsdu(_adapter * padapter,struct xmit_frame * pxmitframe,struct xmit_frame * pxmitframe_queue)3796 s32 rtw_xmitframe_coalesce_amsdu(_adapter *padapter, struct xmit_frame *pxmitframe, struct xmit_frame *pxmitframe_queue)
3797 {
3798
3799 struct pkt_file pktfile;
3800 struct pkt_attrib *pattrib;
3801 struct sk_buff *pkt;
3802
3803 struct pkt_file pktfile_queue;
3804 struct pkt_attrib *pattrib_queue;
3805 struct sk_buff *pkt_queue;
3806
3807 s32 llc_sz, mem_sz;
3808
3809 s32 padding = 0;
3810
3811 u8 *pframe, *mem_start;
3812 u8 hw_hdr_offset;
3813
3814 u16 *len;
3815 u8 *pbuf_start;
3816 s32 res = _SUCCESS;
3817
3818 if (pxmitframe->buf_addr == NULL) {
3819 RTW_INFO("==> %s buf_addr==NULL\n", __FUNCTION__);
3820 return _FAIL;
3821 }
3822
3823
3824 pbuf_start = pxmitframe->buf_addr;
3825
3826 #ifdef CONFIG_USB_TX_AGGREGATION
3827 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
3828 #else
3829 #ifdef CONFIG_TX_EARLY_MODE /* for SDIO && Tx Agg */
3830 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
3831 #else
3832 hw_hdr_offset = TXDESC_OFFSET;
3833 #endif
3834 #endif
3835
3836 mem_start = pbuf_start + hw_hdr_offset; //for DMA
3837
3838 pattrib = &pxmitframe->attrib;
3839
3840 pattrib->amsdu = 1;
3841
3842 if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
3843 RTW_INFO("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
3844 res = _FAIL;
3845 goto exit;
3846 }
3847
3848 llc_sz = 0;
3849
3850 pframe = mem_start;
3851
3852 //SetMFrag(mem_start);
3853 ClearMFrag(mem_start);
3854
3855 pframe += pattrib->hdrlen;
3856
3857 /* adding icv, if necessary... */
3858 if (pattrib->iv_len) {
3859 update_attrib_sec_iv_info(padapter, pattrib);
3860 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len); // queue or new?
3861
3862 RTW_DBG("%s: keyid=%d pattrib->iv[3]=%.2x pframe=%.2x %.2x %.2x %.2x\n",
3863 __func__, padapter->securitypriv.dot11PrivacyKeyIndex,
3864 pattrib->iv[3], *pframe, *(pframe + 1), *(pframe + 2), *(pframe + 3));
3865
3866 pframe += pattrib->iv_len;
3867 }
3868
3869 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len;
3870
3871 if (pxmitframe_queue) {
3872 pattrib_queue = &pxmitframe_queue->attrib;
3873 pkt_queue = pxmitframe_queue->pkt;
3874
3875 _rtw_open_pktfile(pkt_queue, &pktfile_queue);
3876 _rtw_pktfile_read(&pktfile_queue, NULL, pattrib_queue->pkt_hdrlen);
3877
3878 #ifdef CONFIG_RTW_MESH
3879 if (MLME_IS_MESH(padapter)) {
3880 /* mDA(6), mSA(6), len(2), mctrl */
3881 _rtw_memcpy(pframe, pattrib_queue->mda, ETH_ALEN);
3882 pframe += ETH_ALEN;
3883 _rtw_memcpy(pframe, pattrib_queue->msa, ETH_ALEN);
3884 pframe += ETH_ALEN;
3885 len = (u16 *)pframe;
3886 pframe += 2;
3887 rtw_mesh_tx_build_mctrl(padapter, pattrib_queue, pframe);
3888 pframe += XATTRIB_GET_MCTRL_LEN(pattrib_queue);
3889 } else
3890 #endif
3891 {
3892 /* 802.3 MAC Header DA(6) SA(6) Len(2)*/
3893 _rtw_memcpy(pframe, pattrib_queue->dst, ETH_ALEN);
3894 pframe += ETH_ALEN;
3895 _rtw_memcpy(pframe, pattrib_queue->src, ETH_ALEN);
3896 pframe += ETH_ALEN;
3897 len = (u16 *)pframe;
3898 pframe += 2;
3899 }
3900
3901 llc_sz = rtw_put_snap(pframe, pattrib_queue->ether_type);
3902 pframe += llc_sz;
3903
3904 mem_sz = _rtw_pktfile_read(&pktfile_queue, pframe, pattrib_queue->pktlen);
3905 pframe += mem_sz;
3906
3907 *len = htons(XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz);
3908
3909 //calc padding
3910 padding = 4 - ((ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz) & (4-1));
3911 if (padding == 4)
3912 padding = 0;
3913
3914 //_rtw_memset(pframe,0xaa, padding);
3915 pframe += padding;
3916
3917 pattrib->last_txcmdsz += ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib_queue) + llc_sz + mem_sz + padding ;
3918 }
3919
3920 //2nd mpdu
3921
3922 pkt = pxmitframe->pkt;
3923 _rtw_open_pktfile(pkt, &pktfile);
3924 _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
3925
3926 #ifdef CONFIG_RTW_MESH
3927 if (MLME_IS_MESH(padapter)) {
3928 /* mDA(6), mSA(6), len(2), mctrl */
3929 _rtw_memcpy(pframe, pattrib->mda, ETH_ALEN);
3930 pframe += ETH_ALEN;
3931 _rtw_memcpy(pframe, pattrib->msa, ETH_ALEN);
3932 pframe += ETH_ALEN;
3933 len = (u16 *)pframe;
3934 pframe += 2;
3935 rtw_mesh_tx_build_mctrl(padapter, pattrib, pframe);
3936 pframe += XATTRIB_GET_MCTRL_LEN(pattrib);
3937 } else
3938 #endif
3939 {
3940 /* 802.3 MAC Header DA(6) SA(6) Len(2) */
3941 _rtw_memcpy(pframe, pattrib->dst, ETH_ALEN);
3942 pframe += ETH_ALEN;
3943 _rtw_memcpy(pframe, pattrib->src, ETH_ALEN);
3944 pframe += ETH_ALEN;
3945 len = (u16 *)pframe;
3946 pframe += 2;
3947 }
3948
3949 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
3950 pframe += llc_sz;
3951
3952 mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
3953
3954 pframe += mem_sz;
3955
3956 *len = htons(XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz + mem_sz);
3957
3958 //the last ampdu has no padding
3959 padding = 0;
3960
3961 pattrib->nr_frags = 1;
3962
3963 pattrib->last_txcmdsz += ETH_HLEN + XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz + mem_sz + padding +
3964 ((pattrib->bswenc) ? pattrib->icv_len : 0) ;
3965
3966 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
3967 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
3968 pframe += pattrib->icv_len;
3969 }
3970
3971 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
3972 RTW_INFO("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n");
3973 res = _FAIL;
3974 goto exit;
3975 }
3976
3977 xmitframe_swencrypt(padapter, pxmitframe);
3978
3979 update_attrib_vcs_info(padapter, pxmitframe);
3980
3981 exit:
3982 return res;
3983 }
3984 #endif /* CONFIG_TX_AMSDU */
3985
3986 /*
3987
3988 This sub-routine will perform all the following:
3989
3990 1. remove 802.3 header.
3991 2. create wlan_header, based on the info in pxmitframe
3992 3. append sta's iv/ext-iv
3993 4. append LLC
3994 5. move frag chunk from pframe to pxmitframe->mem
3995 6. apply sw-encrypt, if necessary.
3996
3997 */
rtw_xmitframe_coalesce(_adapter * padapter,struct sk_buff * pkt,struct xmit_frame * pxmitframe)3998 s32 rtw_xmitframe_coalesce(_adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
3999 {
4000 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
4001 struct pkt_file pktfile;
4002
4003 s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
4004
4005 SIZE_PTR addr;
4006
4007 u8 *pframe, *mem_start;
4008 u8 hw_hdr_offset;
4009
4010 /* struct sta_info *psta; */
4011 /* struct sta_priv *pstapriv = &padapter->stapriv; */
4012 /* struct mlme_priv *pmlmepriv = &padapter->mlmepriv; */
4013 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4014
4015 struct pkt_attrib *pattrib = &pxmitframe->attrib;
4016
4017 u8 *pbuf_start;
4018
4019 s32 bmcst = IS_MCAST(pattrib->ra);
4020 s32 res = _SUCCESS;
4021
4022
4023 /*
4024 if (pattrib->psta)
4025 {
4026 psta = pattrib->psta;
4027 } else
4028 {
4029 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
4030 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
4031 }
4032
4033 if(psta==NULL)
4034 {
4035
4036 RTW_INFO("%s, psta==NUL\n", __func__);
4037 return _FAIL;
4038 }
4039
4040
4041 if(!(psta->state &WIFI_ASOC_STATE))
4042 {
4043 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
4044 return _FAIL;
4045 }
4046 */
4047 if (pxmitframe->buf_addr == NULL) {
4048 RTW_INFO("==> %s buf_addr==NULL\n", __FUNCTION__);
4049 return _FAIL;
4050 }
4051
4052 pbuf_start = pxmitframe->buf_addr;
4053
4054 #if 0
4055 #ifdef CONFIG_USB_TX_AGGREGATION
4056 hw_hdr_offset = TXDESC_SIZE + (pxmitframe->pkt_offset * PACKET_OFFSET_SZ);
4057 #else
4058 #ifdef CONFIG_TX_EARLY_MODE /* for SDIO && Tx Agg */
4059 hw_hdr_offset = TXDESC_OFFSET + EARLY_MODE_INFO_SIZE;
4060 #else
4061 hw_hdr_offset = TXDESC_OFFSET;
4062 #endif
4063 #endif
4064 #endif
4065 hw_hdr_offset = rtw_hal_get_txdesc_len(GET_PHL_COM(dvobj), pattrib); /*FPGA_test*/
4066
4067 mem_start = pbuf_start + hw_hdr_offset;
4068
4069 if (rtw_make_wlanhdr(padapter, mem_start, pattrib) == _FAIL) {
4070 RTW_INFO("%s: rtw_make_wlanhdr fail; drop pkt\n", __func__);
4071 res = _FAIL;
4072 goto exit;
4073 }
4074
4075 _rtw_open_pktfile(pkt, &pktfile);
4076 _rtw_pktfile_read(&pktfile, NULL, pattrib->pkt_hdrlen);
4077
4078 frg_inx = 0;
4079 frg_len = pxmitpriv->frag_len - 4;/* 2346-4 = 2342 */
4080
4081 while (1) {
4082 llc_sz = 0;
4083
4084 mpdu_len = frg_len;
4085
4086 pframe = mem_start;
4087
4088 SetMFrag(mem_start);
4089
4090 pframe += pattrib->hdrlen;
4091 mpdu_len -= pattrib->hdrlen;
4092
4093 /* adding icv, if necessary... */
4094 if (pattrib->iv_len) {
4095 update_attrib_sec_iv_info(padapter, pattrib);
4096 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
4097
4098
4099 pframe += pattrib->iv_len;
4100
4101 mpdu_len -= pattrib->iv_len;
4102 }
4103
4104 if (frg_inx == 0) {
4105 #ifdef CONFIG_RTW_MESH
4106 if (MLME_IS_MESH(padapter)) {
4107 rtw_mesh_tx_build_mctrl(padapter, pattrib, pframe);
4108 pframe += XATTRIB_GET_MCTRL_LEN(pattrib);
4109 mpdu_len -= XATTRIB_GET_MCTRL_LEN(pattrib);
4110 }
4111 #endif
4112
4113 llc_sz = rtw_put_snap(pframe, pattrib->ether_type);
4114 pframe += llc_sz;
4115 mpdu_len -= llc_sz;
4116 }
4117
4118 if ((pattrib->icv_len > 0) && (pattrib->bswenc))
4119 mpdu_len -= pattrib->icv_len;
4120
4121
4122 if (bmcst) {
4123 /* don't do fragment to broadcat/multicast packets */
4124 mem_sz = _rtw_pktfile_read(&pktfile, pframe, pattrib->pktlen);
4125 } else
4126 mem_sz = _rtw_pktfile_read(&pktfile, pframe, mpdu_len);
4127
4128 pframe += mem_sz;
4129
4130 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
4131 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
4132 pframe += pattrib->icv_len;
4133 }
4134
4135 frg_inx++;
4136
4137 if (bmcst || (rtw_endofpktfile(&pktfile) == _TRUE)) {
4138 pattrib->nr_frags = frg_inx;
4139
4140 pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->iv_len +
4141 ((pattrib->nr_frags == 1) ? (XATTRIB_GET_MCTRL_LEN(pattrib) + llc_sz) : 0) +
4142 ((pattrib->bswenc) ? pattrib->icv_len : 0) + mem_sz;
4143
4144 ClearMFrag(mem_start);
4145
4146 break;
4147 }
4148
4149 addr = (SIZE_PTR)(pframe);
4150
4151 mem_start = (unsigned char *)RND4(addr) + hw_hdr_offset;
4152 _rtw_memcpy(mem_start, pbuf_start + hw_hdr_offset, pattrib->hdrlen);
4153
4154 }
4155
4156 if (xmitframe_addmic(padapter, pxmitframe) == _FAIL) {
4157 RTW_INFO("xmitframe_addmic(padapter, pxmitframe)==_FAIL\n");
4158 res = _FAIL;
4159 goto exit;
4160 }
4161
4162 xmitframe_swencrypt(padapter, pxmitframe);
4163
4164 if (bmcst == _FALSE)
4165 update_attrib_vcs_info(padapter, pxmitframe);
4166 else
4167 pattrib->vcs_mode = NONE_VCS;
4168
4169 exit:
4170
4171
4172 return res;
4173 }
4174
4175 #if defined(CONFIG_IEEE80211W) || defined(CONFIG_RTW_MESH)
4176 /*
4177 * CCMP encryption for unicast robust mgmt frame and broadcast group privicy action
4178 * BIP for broadcast robust mgmt frame
4179 */
rtw_mgmt_xmitframe_coalesce(_adapter * padapter,struct sk_buff * pkt,struct xmit_frame * pxmitframe)4180 s32 rtw_mgmt_xmitframe_coalesce(_adapter *padapter, struct sk_buff *pkt, struct xmit_frame *pxmitframe)
4181 {
4182 #define DBG_MGMT_XMIT_COALESEC_DUMP 0
4183 #define DBG_MGMT_XMIT_BIP_DUMP 0
4184 #define DBG_MGMT_XMIT_ENC_DUMP 0
4185
4186 struct pkt_file pktfile;
4187 s32 frg_inx, frg_len, mpdu_len, llc_sz, mem_sz;
4188 SIZE_PTR addr;
4189 u8 *pframe, *mem_start = NULL, *tmp_buf = NULL;
4190 u8 hw_hdr_offset, subtype ;
4191 u8 category = 0xFF;
4192 struct sta_info *psta = NULL;
4193 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4194 struct pkt_attrib *pattrib = &pxmitframe->attrib;
4195 u8 *pbuf_start;
4196 s32 bmcst = IS_MCAST(pattrib->ra);
4197 s32 res = _FAIL;
4198 u8 *BIP_AAD = NULL;
4199 u8 *MGMT_body = NULL;
4200
4201 struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
4202 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4203 struct rtw_ieee80211_hdr *pwlanhdr;
4204 u8 mme_cont[_MME_IE_LENGTH_ - 2];
4205 u8 mme_clen;
4206
4207 u32 ori_len;
4208 union pn48 *pn = NULL;
4209 enum security_type cipher = _NO_PRIVACY_;
4210 u8 kid;
4211
4212 if (pxmitframe->buf_addr == NULL) {
4213 RTW_WARN(FUNC_ADPT_FMT" pxmitframe->buf_addr\n"
4214 , FUNC_ADPT_ARG(padapter));
4215 return _FAIL;
4216 }
4217
4218 mem_start = pframe = (u8 *)(pxmitframe->buf_addr) + TXDESC_OFFSET;
4219 pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
4220 subtype = get_frame_sub_type(pframe); /* bit(7)~bit(2) */
4221
4222 /* check if robust mgmt frame */
4223 if (subtype != WIFI_DEAUTH && subtype != WIFI_DISASSOC && subtype != WIFI_ACTION)
4224 return _SUCCESS;
4225 if (subtype == WIFI_ACTION) {
4226 category = *(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
4227 if (CATEGORY_IS_NON_ROBUST(category))
4228 return _SUCCESS;
4229 }
4230 if (!bmcst) {
4231 if (pattrib->psta)
4232 psta = pattrib->psta;
4233 else
4234 pattrib->psta = psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
4235 if (psta == NULL) {
4236 RTW_INFO(FUNC_ADPT_FMT" unicast sta == NULL\n", FUNC_ADPT_ARG(padapter));
4237 return _FAIL;
4238 }
4239 if (!(psta->flags & WLAN_STA_MFP)) {
4240 /* peer is not MFP capable, no need to encrypt */
4241 return _SUCCESS;
4242 }
4243 if (psta->bpairwise_key_installed != _TRUE) {
4244 RTW_INFO(FUNC_ADPT_FMT" PTK is not installed\n"
4245 , FUNC_ADPT_ARG(padapter));
4246 return _FAIL;
4247 }
4248 }
4249
4250 ori_len = BIP_AAD_SIZE + pattrib->pktlen + _MME_IE_LENGTH_;
4251 tmp_buf = BIP_AAD = rtw_zmalloc(ori_len);
4252 if (BIP_AAD == NULL)
4253 return _FAIL;
4254
4255 _rtw_spinlock_bh(&padapter->security_key_mutex);
4256
4257 if (bmcst) {
4258 if (subtype == WIFI_ACTION && CATEGORY_IS_GROUP_PRIVACY(category)) {
4259 /* broadcast group privacy action frame */
4260 #if DBG_MGMT_XMIT_COALESEC_DUMP
4261 RTW_INFO(FUNC_ADPT_FMT" broadcast gp action(%u)\n"
4262 , FUNC_ADPT_ARG(padapter), category);
4263 #endif
4264
4265 if (pattrib->psta)
4266 psta = pattrib->psta;
4267 else
4268 pattrib->psta = psta = rtw_get_bcmc_stainfo(padapter);
4269 if (psta == NULL) {
4270 RTW_INFO(FUNC_ADPT_FMT" broadcast sta == NULL\n"
4271 , FUNC_ADPT_ARG(padapter));
4272 goto xmitframe_coalesce_fail;
4273 }
4274 if (padapter->securitypriv.binstallGrpkey != _TRUE) {
4275 RTW_INFO(FUNC_ADPT_FMT" GTK is not installed\n"
4276 , FUNC_ADPT_ARG(padapter));
4277 goto xmitframe_coalesce_fail;
4278 }
4279
4280 pn = &psta->dot11txpn;
4281 cipher = padapter->securitypriv.dot118021XGrpPrivacy;
4282 kid = padapter->securitypriv.dot118021XGrpKeyid;
4283 } else {
4284 #ifdef CONFIG_IEEE80211W
4285 /* broadcast robust mgmt frame, using BIP */
4286 int frame_body_len;
4287 u8 mic[16];
4288
4289 /* IGTK key is not install ex: mesh MFP without IGTK */
4290 if (SEC_IS_BIP_KEY_INSTALLED(&padapter->securitypriv) != _TRUE)
4291 goto xmitframe_coalesce_success;
4292
4293 #if DBG_MGMT_XMIT_COALESEC_DUMP
4294 if (subtype == WIFI_DEAUTH)
4295 RTW_INFO(FUNC_ADPT_FMT" braodcast deauth\n", FUNC_ADPT_ARG(padapter));
4296 else if (subtype == WIFI_DISASSOC)
4297 RTW_INFO(FUNC_ADPT_FMT" braodcast disassoc\n", FUNC_ADPT_ARG(padapter));
4298 else if (subtype == WIFI_ACTION) {
4299 RTW_INFO(FUNC_ADPT_FMT" braodcast action(%u)\n"
4300 , FUNC_ADPT_ARG(padapter), category);
4301 }
4302 #endif
4303
4304 /*HW encrypt need to record encrypt type*/
4305 pattrib->encrypt = padapter->securitypriv.dot11wCipher;
4306
4307 _rtw_memset(mme_cont, 0, _MME_IE_LENGTH_ - 2);
4308 mme_clen = padapter->securitypriv.dot11wCipher == _BIP_CMAC_128_ ? 16 : 24;
4309
4310 MGMT_body = pframe + sizeof(struct rtw_ieee80211_hdr_3addr);
4311 pframe += pattrib->pktlen;
4312
4313 /* octent 0 and 1 is key index ,BIP keyid is 4 or 5, LSB only need octent 0 */
4314 mme_cont[0] = padapter->securitypriv.dot11wBIPKeyid;
4315 /* increase PN and apply to packet */
4316 padapter->securitypriv.dot11wBIPtxpn.val++;
4317 RTW_PUT_LE64(&mme_cont[2], padapter->securitypriv.dot11wBIPtxpn.val);
4318
4319 /* add MME IE with MIC all zero, MME string doesn't include element id and length */
4320 pframe = rtw_set_ie(pframe, _MME_IE_, mme_clen, mme_cont, &(pattrib->pktlen));
4321 pattrib->last_txcmdsz = pattrib->pktlen;
4322
4323 if (pattrib->encrypt &&
4324 (padapter->securitypriv.sw_encrypt == _TRUE || padapter->securitypriv.hw_decrypted == _FALSE)) {
4325 pattrib->bswenc = _TRUE;
4326 } else {
4327 /* currently HW only support _BIP_CMAC_128_ */
4328 if (pattrib->encrypt == _BIP_CMAC_128_)
4329 pattrib->bswenc = _FALSE;
4330 else
4331 pattrib->bswenc = _TRUE;
4332 }
4333
4334 if (!pattrib->bswenc) {
4335 pattrib->key_idx = padapter->securitypriv.dot11wBIPKeyid;
4336 /*Don't need to append MIC part of MME*/
4337 pattrib->pktlen -= (mme_clen == 16 ? 8 : 16);
4338 pattrib->last_txcmdsz = pattrib->pktlen;
4339 goto xmitframe_coalesce_success;
4340 }
4341
4342 /* total frame length - header length */
4343 frame_body_len = pattrib->pktlen - sizeof(struct rtw_ieee80211_hdr_3addr);
4344
4345 /* conscruct AAD, copy frame control field */
4346 _rtw_memcpy(BIP_AAD, &pwlanhdr->frame_ctl, 2);
4347 ClearRetry(BIP_AAD);
4348 ClearPwrMgt(BIP_AAD);
4349 ClearMData(BIP_AAD);
4350 /* conscruct AAD, copy address 1 to address 3 */
4351 _rtw_memcpy(BIP_AAD + 2, pwlanhdr->addr1, 18);
4352 /* copy management fram body */
4353 _rtw_memcpy(BIP_AAD + BIP_AAD_SIZE, MGMT_body, frame_body_len);
4354
4355 #if DBG_MGMT_XMIT_BIP_DUMP
4356 /* dump total packet include MME with zero MIC */
4357 {
4358 int i;
4359 printk("Total packet: ");
4360 for (i = 0; i < BIP_AAD_SIZE + frame_body_len; i++)
4361 printk(" %02x ", BIP_AAD[i]);
4362 printk("\n");
4363 }
4364 #endif
4365
4366 /* calculate mic */
4367 if (rtw_calculate_bip_mic(padapter->securitypriv.dot11wCipher,
4368 (u8 *)pwlanhdr, pattrib->pktlen,
4369 padapter->securitypriv.dot11wBIPKey[padapter->securitypriv.dot11wBIPKeyid].skey,
4370 BIP_AAD, (BIP_AAD_SIZE + frame_body_len), mic) == _FAIL)
4371 goto xmitframe_coalesce_fail;
4372
4373 #if DBG_MGMT_XMIT_BIP_DUMP
4374 /* dump calculated mic result */
4375 {
4376 int i;
4377 printk("Calculated mic result: ");
4378 for (i = 0; i < 16; i++)
4379 printk(" %02x ", mic[i]);
4380 printk("\n");
4381 }
4382 #endif
4383
4384 /* copy right BIP mic value, total is 128bits, we use the 0~63 bits */
4385 if (padapter->securitypriv.dot11wCipher == _BIP_CMAC_128_)
4386 _rtw_memcpy(pframe - 8, mic, 8);
4387 else
4388 _rtw_memcpy(pframe - 16, mic, 16);
4389
4390 #if DBG_MGMT_XMIT_BIP_DUMP
4391 /*dump all packet after mic ok */
4392 {
4393 int pp;
4394 printk("pattrib->pktlen = %d\n", pattrib->pktlen);
4395 for (pp = 0; pp < pattrib->pktlen; pp++)
4396 printk(" %02x ", mem_start[pp]);
4397 printk("\n");
4398 }
4399 #endif
4400
4401 #endif /* CONFIG_IEEE80211W */
4402
4403 goto xmitframe_coalesce_success;
4404 }
4405 } else {
4406 /* unicast robust mgmt frame */
4407 #if DBG_MGMT_XMIT_COALESEC_DUMP
4408 if (subtype == WIFI_DEAUTH) {
4409 RTW_INFO(FUNC_ADPT_FMT" unicast deauth to "MAC_FMT"\n"
4410 , FUNC_ADPT_ARG(padapter), MAC_ARG(pattrib->ra));
4411 } else if (subtype == WIFI_DISASSOC) {
4412 RTW_INFO(FUNC_ADPT_FMT" unicast disassoc to "MAC_FMT"\n"
4413 , FUNC_ADPT_ARG(padapter), MAC_ARG(pattrib->ra));
4414 } else if (subtype == WIFI_ACTION) {
4415 RTW_INFO(FUNC_ADPT_FMT" unicast action(%u) to "MAC_FMT"\n"
4416 , FUNC_ADPT_ARG(padapter), category, MAC_ARG(pattrib->ra));
4417 }
4418 #endif
4419
4420 pn = &psta->dot11txpn;
4421 cipher = psta->dot118021XPrivacy;
4422 kid = 0;
4423
4424 _rtw_memcpy(pattrib->dot118021x_UncstKey.skey
4425 , psta->dot118021x_UncstKey.skey
4426 , (cipher & _SEC_TYPE_256_) ? 32 : 16);
4427
4428 /* To use wrong key */
4429 if (pattrib->key_type == IEEE80211W_WRONG_KEY) {
4430 RTW_INFO("use wrong key\n");
4431 pattrib->dot118021x_UncstKey.skey[0] = 0xff;
4432 }
4433 }
4434
4435 #if DBG_MGMT_XMIT_ENC_DUMP
4436 /* before encrypt dump the management packet content */
4437 {
4438 int i;
4439 printk("Management pkt: ");
4440 for (i = 0; i < pattrib->pktlen; i++)
4441 printk(" %02x ", pframe[i]);
4442 printk("=======\n");
4443 }
4444 #endif
4445
4446 /* bakeup original management packet */
4447 _rtw_memcpy(tmp_buf, pframe, pattrib->pktlen);
4448 /* move to data portion */
4449 pframe += pattrib->hdrlen;
4450
4451 if (pattrib->key_type != IEEE80211W_NO_KEY) {
4452 pattrib->encrypt = cipher;
4453 pattrib->bswenc = _TRUE;
4454 }
4455
4456 /*
4457 * 802.11w encrypted management packet must be:
4458 * _AES_, _CCMP_256_, _GCMP_, _GCMP_256_
4459 */
4460 switch (pattrib->encrypt) {
4461 case _AES_:
4462 pattrib->iv_len = 8;
4463 pattrib->icv_len = 8;
4464 AES_IV(pattrib->iv, (*pn), kid);
4465 break;
4466 case _CCMP_256_:
4467 pattrib->iv_len = 8;
4468 pattrib->icv_len = 16;
4469 AES_IV(pattrib->iv, (*pn), kid);
4470 break;
4471 case _GCMP_:
4472 case _GCMP_256_:
4473 pattrib->iv_len = 8;
4474 pattrib->icv_len = 16;
4475 GCMP_IV(pattrib->iv, (*pn), kid);
4476 break;
4477 default:
4478 goto xmitframe_coalesce_fail;
4479 }
4480
4481 /* insert iv header into management frame */
4482 _rtw_memcpy(pframe, pattrib->iv, pattrib->iv_len);
4483 pframe += pattrib->iv_len;
4484 /* copy mgmt data portion after CCMP header */
4485 _rtw_memcpy(pframe, tmp_buf + pattrib->hdrlen, pattrib->pktlen - pattrib->hdrlen);
4486 /* move pframe to end of mgmt pkt */
4487 pframe += pattrib->pktlen - pattrib->hdrlen;
4488 /* add 8 bytes CCMP IV header to length */
4489 pattrib->pktlen += pattrib->iv_len;
4490
4491 #if DBG_MGMT_XMIT_ENC_DUMP
4492 /* dump management packet include AES IV header */
4493 {
4494 int i;
4495 printk("Management pkt + IV: ");
4496 /* for(i=0; i<pattrib->pktlen; i++) */
4497
4498 printk("@@@@@@@@@@@@@\n");
4499 }
4500 #endif
4501
4502 if (pattrib->encrypt &&
4503 (padapter->securitypriv.sw_encrypt == _TRUE || psta->hw_decrypted == _FALSE)) {
4504 pattrib->bswenc = _TRUE;
4505 } else {
4506 /* only right key can use HW encrypt */
4507 if (pattrib->key_type == IEEE80211W_RIGHT_KEY)
4508 pattrib->bswenc = _FALSE;
4509 else
4510 pattrib->bswenc = _TRUE;
4511 }
4512
4513 /* at the moment the security CAM may be cleaned already --> use SW encryption */
4514 if (subtype == WIFI_DEAUTH || subtype == WIFI_DISASSOC)
4515 pattrib->bswenc = _TRUE;
4516 if (!pattrib->bswenc) {
4517 pattrib->key_idx = kid;
4518 pattrib->last_txcmdsz = pattrib->pktlen;
4519 SetPrivacy(mem_start);
4520 goto xmitframe_coalesce_success;
4521 }
4522
4523 if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
4524 _rtw_memcpy(pframe, pattrib->icv, pattrib->icv_len);
4525 pframe += pattrib->icv_len;
4526 }
4527 /* add 8 bytes MIC */
4528 pattrib->pktlen += pattrib->icv_len;
4529 /* set final tx command size */
4530 pattrib->last_txcmdsz = pattrib->pktlen;
4531
4532 /* set protected bit must be beofre SW encrypt */
4533 SetPrivacy(mem_start);
4534
4535 #if DBG_MGMT_XMIT_ENC_DUMP
4536 /* dump management packet include AES header */
4537 {
4538 int i;
4539 printk("prepare to enc Management pkt + IV: ");
4540 for (i = 0; i < pattrib->pktlen; i++)
4541 printk(" %02x ", mem_start[i]);
4542 printk("@@@@@@@@@@@@@\n");
4543 }
4544 #endif
4545
4546 /* software encrypt */
4547 /* move to core_wlan_sw_encrypt() because of new txreq architecture */
4548
4549 xmitframe_coalesce_success:
4550 _rtw_spinunlock_bh(&padapter->security_key_mutex);
4551 rtw_mfree(BIP_AAD, ori_len);
4552 return _SUCCESS;
4553
4554 xmitframe_coalesce_fail:
4555 _rtw_spinunlock_bh(&padapter->security_key_mutex);
4556 rtw_mfree(BIP_AAD, ori_len);
4557
4558 return _FAIL;
4559 }
4560 #endif /* defined(CONFIG_IEEE80211W) || defined(CONFIG_RTW_MESH) */
4561
4562 /* Logical Link Control(LLC) SubNetwork Attachment Point(SNAP) header
4563 * IEEE LLC/SNAP header contains 8 octets
4564 * First 3 octets comprise the LLC portion
4565 * SNAP portion, 5 octets, is divided into two fields:
4566 * Organizationally Unique Identifier(OUI), 3 octets,
4567 * type, defined by that organization, 2 octets.
4568 */
rtw_put_snap(u8 * data,u16 h_proto)4569 s32 rtw_put_snap(u8 *data, u16 h_proto)
4570 {
4571 struct ieee80211_snap_hdr *snap;
4572 u8 *oui;
4573
4574
4575 snap = (struct ieee80211_snap_hdr *)data;
4576 snap->dsap = 0xaa;
4577 snap->ssap = 0xaa;
4578 snap->ctrl = 0x03;
4579
4580 if (h_proto == 0x8137 || h_proto == 0x80f3)
4581 oui = P802_1H_OUI;
4582 else
4583 oui = RFC1042_OUI;
4584
4585 snap->oui[0] = oui[0];
4586 snap->oui[1] = oui[1];
4587 snap->oui[2] = oui[2];
4588
4589 *(u16 *)(data + SNAP_SIZE) = htons(h_proto);
4590
4591
4592 return SNAP_SIZE + sizeof(u16);
4593 }
4594
rtw_update_protection(_adapter * padapter,u8 * ie,uint ie_len)4595 void rtw_update_protection(_adapter *padapter, u8 *ie, uint ie_len)
4596 {
4597
4598 uint protection;
4599 u8 *perp;
4600 sint erp_len;
4601 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4602 struct registry_priv *pregistrypriv = &padapter->registrypriv;
4603
4604
4605 switch (pxmitpriv->vcs_setting) {
4606 case DISABLE_VCS:
4607 pxmitpriv->vcs = NONE_VCS;
4608 break;
4609
4610 case ENABLE_VCS:
4611 break;
4612
4613 case AUTO_VCS:
4614 default:
4615 perp = rtw_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
4616 if (perp == NULL)
4617 pxmitpriv->vcs = NONE_VCS;
4618 else {
4619 protection = (*(perp + 2)) & BIT(1);
4620 if (protection) {
4621 if (pregistrypriv->vcs_type == RTS_CTS)
4622 pxmitpriv->vcs = RTS_CTS;
4623 else
4624 pxmitpriv->vcs = CTS_TO_SELF;
4625 } else
4626 pxmitpriv->vcs = NONE_VCS;
4627 }
4628
4629 break;
4630
4631 }
4632
4633
4634 }
4635
4636 #ifdef CONFIG_CORE_TXSC
rtw_count_tx_stats_tx_req(_adapter * padapter,struct rtw_xmit_req * txreq,struct sta_info * psta)4637 void rtw_count_tx_stats_tx_req(_adapter *padapter, struct rtw_xmit_req *txreq, struct sta_info *psta)
4638 {
4639 struct stainfo_stats *pstats = NULL;
4640 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4641 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4642 u32 sz = 0;
4643
4644 if (txreq->mdata.type == RTW_PHL_PKT_TYPE_DATA) {
4645 pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
4646 pxmitpriv->tx_pkts++;
4647 sz = txreq->mdata.pktlen - RTW_SZ_LLC - txreq->mdata.hdr_len;
4648 switch (txreq->mdata.sec_type) {
4649 case RTW_ENC_WEP104:
4650 case RTW_ENC_WEP40:
4651 sz -= 4;
4652 break;
4653 case RTW_ENC_TKIP:
4654 sz -= 8;
4655 break;
4656 case RTW_ENC_CCMP:
4657 sz -= 8;
4658 break;
4659 case RTW_ENC_WAPI:
4660 sz -= 18;
4661 break;
4662 case RTW_ENC_GCMP256:
4663 case RTW_ENC_GCMP:
4664 case RTW_ENC_CCMP256:
4665 sz -= 8;
4666 break;
4667 default:
4668 break;
4669 }
4670 pxmitpriv->tx_bytes += sz;
4671 if (psta) {
4672 pstats = &psta->sta_stats;
4673 pstats->tx_pkts++;
4674 pstats->tx_bytes += sz;
4675 #if 0
4676 if (is_multicast_mac_addr(psta->phl_sta->mac_addr))
4677 pxmitpriv->tx_mc_pkts++;
4678 else if (is_broadcast_mac_addr(psta->phl_sta->mac_addr))
4679 pxmitpriv->tx_bc_pkts++;
4680 else
4681 pxmitpriv->tx_uc_pkts++;
4682 #endif
4683 }
4684 }
4685 }
4686 #endif
4687
rtw_count_tx_stats(_adapter * padapter,struct xmit_frame * pxmitframe,int sz)4688 void rtw_count_tx_stats(_adapter *padapter, struct xmit_frame *pxmitframe, int sz)
4689 {
4690 struct sta_info *psta = NULL;
4691 struct stainfo_stats *pstats = NULL;
4692 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
4693 struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
4694
4695
4696 if (pxmitframe->xftype == RTW_TX_OS) {
4697 pmlmepriv->LinkDetectInfo.NumTxOkInPeriod++;
4698 pxmitpriv->tx_pkts++;
4699 pxmitpriv->tx_bytes += sz;
4700
4701 psta = pxmitframe->attrib.psta;
4702 if (psta) {
4703 pstats = &psta->sta_stats;
4704
4705 pstats->tx_pkts++;
4706 pstats->tx_bytes += sz;
4707 #if defined(CONFIG_CHECK_LEAVE_LPS) && defined(CONFIG_LPS_CHK_BY_TP)
4708 if (adapter_to_pwrctl(padapter)->lps_chk_by_tp)
4709 traffic_check_for_leave_lps_by_tp(padapter, _TRUE, psta);
4710 #endif /* CONFIG_LPS */
4711 }
4712
4713 #ifdef CONFIG_CHECK_LEAVE_LPS
4714 /* traffic_check_for_leave_lps(padapter, _TRUE); */
4715 #endif /* CONFIG_CHECK_LEAVE_LPS */
4716 }
4717 }
4718
4719 #if 0 /*CONFIG_CORE_XMITBUF*/
4720 static struct xmit_buf *__rtw_alloc_cmd_xmitbuf(struct xmit_priv *pxmitpriv,
4721 enum cmdbuf_type buf_type)
4722 {
4723 struct xmit_buf *pxmitbuf = NULL;
4724
4725
4726 pxmitbuf = &pxmitpriv->pcmd_xmitbuf[buf_type];
4727 if (pxmitbuf != NULL) {
4728 pxmitbuf->priv_data = NULL;
4729
4730 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4731 pxmitbuf->len = 0;
4732 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4733 pxmitbuf->agg_num = 0;
4734 pxmitbuf->pg_num = 0;
4735 #endif
4736 #ifdef CONFIG_PCI_HCI
4737 pxmitbuf->len = 0;
4738 #ifdef CONFIG_TRX_BD_ARCH
4739 /*pxmitbuf->buf_desc = NULL;*/
4740 #else
4741 pxmitbuf->desc = NULL;
4742 #endif
4743 #endif
4744
4745 if (pxmitbuf->sctx) {
4746 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4747 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4748 }
4749 } else
4750 RTW_INFO("%s fail, no xmitbuf available !!!\n", __func__);
4751
4752 return pxmitbuf;
4753 }
4754
4755 struct xmit_frame *__rtw_alloc_cmdxmitframe(struct xmit_priv *pxmitpriv,
4756 enum cmdbuf_type buf_type)
4757 {
4758 struct xmit_frame *pcmdframe;
4759 struct xmit_buf *pxmitbuf;
4760
4761 pcmdframe = rtw_alloc_xmitframe(pxmitpriv);
4762 if (pcmdframe == NULL) {
4763 RTW_INFO("%s, alloc xmitframe fail\n", __FUNCTION__);
4764 return NULL;
4765 }
4766
4767 pxmitbuf = __rtw_alloc_cmd_xmitbuf(pxmitpriv, buf_type);
4768 if (pxmitbuf == NULL) {
4769 RTW_INFO("%s, alloc xmitbuf fail\n", __FUNCTION__);
4770 rtw_free_xmitframe(pxmitpriv, pcmdframe);
4771 return NULL;
4772 }
4773
4774 pcmdframe->frame_tag = MGNT_FRAMETAG;
4775
4776 pcmdframe->pxmitbuf = pxmitbuf;
4777
4778 pcmdframe->buf_addr = pxmitbuf->pbuf;
4779
4780 /* initial memory to zero */
4781 _rtw_memset(pcmdframe->buf_addr, 0, MAX_CMDBUF_SZ);
4782
4783 pxmitbuf->priv_data = pcmdframe;
4784
4785 return pcmdframe;
4786
4787 }
4788
4789 struct xmit_buf *rtw_alloc_xmitbuf_ext(struct xmit_priv *pxmitpriv)
4790 {
4791 struct xmit_buf *pxmitbuf = NULL;
4792 _list *plist, *phead;
4793 _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
4794 unsigned long sp_flags;
4795
4796 _rtw_spinlock_irq(&pfree_queue->lock, &sp_flags);
4797
4798 if (_rtw_queue_empty(pfree_queue) == _TRUE)
4799 pxmitbuf = NULL;
4800 else {
4801
4802 phead = get_list_head(pfree_queue);
4803
4804 plist = get_next(phead);
4805
4806 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
4807
4808 rtw_list_delete(&(pxmitbuf->list));
4809 }
4810
4811 if (pxmitbuf != NULL) {
4812 pxmitpriv->free_xmit_extbuf_cnt--;
4813 #ifdef DBG_XMIT_BUF_EXT
4814 RTW_INFO("DBG_XMIT_BUF_EXT ALLOC no=%d, free_xmit_extbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
4815 #endif
4816
4817
4818 pxmitbuf->priv_data = NULL;
4819
4820 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4821 pxmitbuf->len = 0;
4822 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4823 pxmitbuf->agg_num = 1;
4824 #endif
4825 #ifdef CONFIG_PCI_HCI
4826 pxmitbuf->len = 0;
4827 #ifdef CONFIG_TRX_BD_ARCH
4828 /*pxmitbuf->buf_desc = NULL;*/
4829 #else
4830 pxmitbuf->desc = NULL;
4831 #endif
4832 #endif
4833
4834 if (pxmitbuf->sctx) {
4835 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4836 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4837 }
4838
4839 }
4840
4841 _rtw_spinunlock_irq(&pfree_queue->lock, &sp_flags);
4842
4843
4844 return pxmitbuf;
4845 }
4846
4847 s32 rtw_free_xmitbuf_ext(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
4848 {
4849 _queue *pfree_queue = &pxmitpriv->free_xmit_extbuf_queue;
4850 unsigned long sp_flags;
4851
4852 if (pxmitbuf == NULL)
4853 return _FAIL;
4854
4855 _rtw_spinlock_irq(&pfree_queue->lock, &sp_flags);
4856
4857 rtw_list_delete(&pxmitbuf->list);
4858
4859 rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_queue));
4860 pxmitpriv->free_xmit_extbuf_cnt++;
4861 #ifdef DBG_XMIT_BUF_EXT
4862 RTW_INFO("DBG_XMIT_BUF_EXT FREE no=%d, free_xmit_extbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmit_extbuf_cnt);
4863 #endif
4864
4865 _rtw_spinunlock_irq(&pfree_queue->lock, &sp_flags);
4866
4867
4868 return _SUCCESS;
4869 }
4870
4871 struct xmit_buf *rtw_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
4872 {
4873 struct xmit_buf *pxmitbuf = NULL;
4874 _list *plist, *phead;
4875 _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
4876 unsigned long sp_flags;
4877
4878 /* RTW_INFO("+rtw_alloc_xmitbuf\n"); */
4879
4880 _rtw_spinlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4881
4882 if (_rtw_queue_empty(pfree_xmitbuf_queue) == _TRUE)
4883 pxmitbuf = NULL;
4884 else {
4885
4886 phead = get_list_head(pfree_xmitbuf_queue);
4887
4888 plist = get_next(phead);
4889
4890 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
4891
4892 rtw_list_delete(&(pxmitbuf->list));
4893 }
4894
4895 if (pxmitbuf != NULL) {
4896 pxmitpriv->free_xmitbuf_cnt--;
4897 #ifdef DBG_XMIT_BUF
4898 RTW_INFO("DBG_XMIT_BUF ALLOC no=%d, free_xmitbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
4899 #endif
4900 /* RTW_INFO("alloc, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); */
4901
4902 pxmitbuf->priv_data = NULL;
4903
4904 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
4905 pxmitbuf->len = 0;
4906 pxmitbuf->pdata = pxmitbuf->ptail = pxmitbuf->phead;
4907 pxmitbuf->agg_num = 0;
4908 pxmitbuf->pg_num = 0;
4909 #endif
4910 #ifdef CONFIG_PCI_HCI
4911 pxmitbuf->len = 0;
4912 #ifdef CONFIG_TRX_BD_ARCH
4913 /*pxmitbuf->buf_desc = NULL;*/
4914 #else
4915 pxmitbuf->desc = NULL;
4916 #endif
4917 #endif
4918
4919 if (pxmitbuf->sctx) {
4920 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4921 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_ALLOC);
4922 }
4923 }
4924 #ifdef DBG_XMIT_BUF
4925 else
4926 RTW_INFO("DBG_XMIT_BUF rtw_alloc_xmitbuf return NULL\n");
4927 #endif
4928
4929 _rtw_spinunlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4930
4931
4932 return pxmitbuf;
4933 }
4934
4935 s32 rtw_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
4936 {
4937 _queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
4938 unsigned long sp_flags;
4939
4940 /* RTW_INFO("+rtw_free_xmitbuf\n"); */
4941
4942 if (pxmitbuf == NULL)
4943 return _FAIL;
4944
4945 if (pxmitbuf->sctx) {
4946 RTW_INFO("%s pxmitbuf->sctx is not NULL\n", __func__);
4947 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_BUF_FREE);
4948 }
4949
4950 if (pxmitbuf->buf_tag == XMITBUF_CMD) {
4951 } else if (pxmitbuf->buf_tag == XMITBUF_MGNT)
4952 rtw_free_xmitbuf_ext(pxmitpriv, pxmitbuf);
4953 else {
4954 _rtw_spinlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4955
4956 rtw_list_delete(&pxmitbuf->list);
4957
4958 rtw_list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
4959
4960 pxmitpriv->free_xmitbuf_cnt++;
4961 /* RTW_INFO("FREE, free_xmitbuf_cnt=%d\n", pxmitpriv->free_xmitbuf_cnt); */
4962 #ifdef DBG_XMIT_BUF
4963 RTW_INFO("DBG_XMIT_BUF FREE no=%d, free_xmitbuf_cnt=%d\n", pxmitbuf->no, pxmitpriv->free_xmitbuf_cnt);
4964 #endif
4965 _rtw_spinunlock_irq(&pfree_xmitbuf_queue->lock, &sp_flags);
4966 }
4967
4968
4969 return _SUCCESS;
4970 }
4971 #endif
4972
rtw_init_xmitframe(struct xmit_frame * pxframe)4973 void rtw_init_xmitframe(struct xmit_frame *pxframe)
4974 {
4975 if (pxframe != NULL) { /* default value setting */
4976 #if 0 /*CONFIG_CORE_XMITBUF*/
4977 pxframe->buf_addr = NULL;
4978 pxframe->pxmitbuf = NULL;
4979 #endif
4980
4981 _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
4982 /* pxframe->attrib.psta = NULL; */
4983
4984 pxframe->frame_tag = DATA_FRAMETAG;
4985
4986 #ifdef CONFIG_USB_HCI
4987 pxframe->pkt = NULL;
4988 #ifdef USB_PACKET_OFFSET_SZ
4989 pxframe->pkt_offset = (PACKET_OFFSET_SZ / 8);
4990 #else
4991 pxframe->pkt_offset = 1;/* default use pkt_offset to fill tx desc */
4992 #endif
4993
4994 #ifdef CONFIG_USB_TX_AGGREGATION
4995 pxframe->agg_num = 1;
4996 #endif
4997
4998 #endif /* #ifdef CONFIG_USB_HCI */
4999
5000 #if defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
5001 pxframe->pg_num = 1;
5002 pxframe->agg_num = 1;
5003 #endif
5004
5005 #ifdef CONFIG_XMIT_ACK
5006 pxframe->ack_report = 0;
5007 #endif
5008 pxframe->txfree_cnt = 0;
5009 }
5010 }
5011
5012 /*
5013 Calling context:
5014 1. OS_TXENTRY
5015 2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
5016
5017 If we turn on USE_RXTHREAD, then, no need for critical section.
5018 Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
5019
5020 Must be very very cautious...
5021
5022 */
5023
5024 #ifdef RTW_PHL_TX
core_tx_init_xmitframe(struct xmit_frame * pxframe)5025 void core_tx_init_xmitframe(struct xmit_frame *pxframe)
5026 {
5027 if (!pxframe)
5028 return;
5029 #if 0 /*CONFIG_CORE_XMITBUF*/
5030 pxframe->pxmitbuf = NULL;
5031 #endif
5032 _rtw_memset(&pxframe->attrib, 0, sizeof(struct pkt_attrib));
5033 /* TXREQ_QMGT */
5034 pxframe->ptxreq_buf = NULL;
5035 pxframe->phl_txreq = NULL;
5036
5037 pxframe->txreq_cnt = 0;
5038 pxframe->txfree_cnt = 0;
5039 }
5040
core_tx_alloc_xmitframe(_adapter * padapter,struct xmit_frame ** pxmitframe,u16 os_qid)5041 s32 core_tx_alloc_xmitframe(_adapter *padapter, struct xmit_frame **pxmitframe, u16 os_qid)
5042 {
5043 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5044 struct xmit_frame *pxframe = NULL;
5045 _queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
5046 _list *plist, *phead;
5047
5048 PHLTX_LOG;
5049
5050 _rtw_spinlock_bh(&pfree_xmit_queue->lock);
5051
5052 if (_rtw_queue_empty(pfree_xmit_queue) == _TRUE) {
5053 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5054 return FAIL;
5055 } else {
5056 phead = get_list_head(pfree_xmit_queue);
5057
5058 plist = get_next(phead);
5059
5060 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5061
5062 rtw_list_delete(&pxframe->list);
5063 pxmitpriv->free_xmitframe_cnt--;
5064 pxframe->os_qid = os_qid;
5065 }
5066
5067 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5068 rtw_os_check_stop_queue(pxmitpriv->adapter, os_qid);
5069 core_tx_init_xmitframe(pxframe);
5070
5071 *pxmitframe = pxframe;
5072 return SUCCESS;
5073 }
5074
core_tx_free_xmitframe(_adapter * padapter,struct xmit_frame * pxframe)5075 s32 core_tx_free_xmitframe(_adapter *padapter, struct xmit_frame *pxframe)
5076 {
5077 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5078 _queue *queue = NULL;
5079 /* TXREQ_QMGT */
5080 struct xmit_txreq_buf *ptxreq_buf = NULL;
5081 int i;
5082 struct rtw_xmit_req *txreq = NULL;
5083 struct rtw_pkt_buf_list *pkt_list = NULL;
5084
5085 PHLTX_LOG;
5086
5087 if (pxframe == NULL)
5088 goto exit;
5089
5090 /* TXREQ_QMGT */
5091 ptxreq_buf = pxframe->ptxreq_buf;
5092
5093 pxframe->txfree_cnt++;
5094
5095 /* ?? shall detail check, like free 1 2 3, not free 2 2 3 */
5096 /* ?? rtw_alloc_xmitframe_once case, seems no one use*/
5097
5098 if (pxframe->txfree_cnt < pxframe->txreq_cnt)
5099 goto exit;
5100
5101 #if 0 /*CONFIG_CORE_XMITBUF*/
5102 if (pxframe->pxmitbuf)
5103 rtw_free_xmitbuf(pxmitpriv, pxframe->pxmitbuf);
5104 #endif
5105
5106 for (i = 0; i < pxframe->txreq_cnt; i++) {
5107 if (!pxframe->buf_need_free)
5108 break;
5109 if (!(pxframe->buf_need_free & BIT(i)))
5110 continue;
5111 pxframe->buf_need_free &= ~BIT(i);
5112
5113 txreq = &pxframe->phl_txreq[i];
5114 rtw_warn_on(txreq->pkt_cnt != 1);
5115 pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
5116 if (pkt_list->vir_addr && pkt_list->length)
5117 rtw_mfree(pkt_list->vir_addr, pkt_list->length);
5118 }
5119
5120 if (ptxreq_buf) {
5121 queue = &padapter->free_txreq_queue;
5122 _rtw_spinlock_bh(&queue->lock);
5123
5124 rtw_list_delete(&ptxreq_buf->list);
5125 rtw_list_insert_tail(&ptxreq_buf->list, get_list_head(queue));
5126
5127 padapter->free_txreq_cnt++;
5128 _rtw_spinunlock_bh(&queue->lock);
5129 } else {
5130 if (pxframe->ext_tag == 0)
5131 ;//printk("%s:tx recyele: ptxreq_buf=NULL\n", __FUNCTION__);
5132 }
5133
5134 rtw_os_xmit_complete(padapter, pxframe);
5135
5136 if (pxframe->ext_tag == 0)
5137 queue = &pxmitpriv->free_xmit_queue;
5138 else if (pxframe->ext_tag == 1)
5139 queue = &pxmitpriv->free_xframe_ext_queue;
5140 else
5141 rtw_warn_on(1);
5142
5143 _rtw_spinlock_bh(&queue->lock);
5144
5145 rtw_list_delete(&pxframe->list);
5146 rtw_list_insert_tail(&pxframe->list, get_list_head(queue));
5147
5148 if (pxframe->ext_tag == 0)
5149 pxmitpriv->free_xmitframe_cnt++;
5150 else if (pxframe->ext_tag == 1)
5151 pxmitpriv->free_xframe_ext_cnt++;
5152
5153 _rtw_spinunlock_bh(&queue->lock);
5154
5155 if (queue == &pxmitpriv->free_xmit_queue)
5156 rtw_os_check_wakup_queue(padapter, pxframe->os_qid);
5157
5158 exit:
5159 return _SUCCESS;
5160 }
5161
5162 #endif
5163
rtw_alloc_xmitframe(struct xmit_priv * pxmitpriv,u16 os_qid)5164 struct xmit_frame *rtw_alloc_xmitframe(struct xmit_priv *pxmitpriv, u16 os_qid)/* (_queue *pfree_xmit_queue) */
5165 {
5166 /*
5167 Please remember to use all the osdep_service api,
5168 and lock/unlock or _enter/_exit critical to protect
5169 pfree_xmit_queue
5170 */
5171
5172 struct xmit_frame *pxframe = NULL;
5173 _list *plist, *phead;
5174 _queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
5175
5176
5177 _rtw_spinlock_bh(&pfree_xmit_queue->lock);
5178
5179 if (_rtw_queue_empty(pfree_xmit_queue) == _TRUE) {
5180 pxframe = NULL;
5181 } else {
5182 phead = get_list_head(pfree_xmit_queue);
5183
5184 plist = get_next(phead);
5185
5186 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5187
5188 rtw_list_delete(&(pxframe->list));
5189 pxmitpriv->free_xmitframe_cnt--;
5190 pxframe->os_qid = os_qid;
5191 }
5192
5193 _rtw_spinunlock_bh(&pfree_xmit_queue->lock);
5194
5195 if (pxframe)
5196 rtw_os_check_stop_queue(pxmitpriv->adapter, os_qid);
5197
5198 rtw_init_xmitframe(pxframe);
5199
5200
5201 return pxframe;
5202 }
5203
rtw_alloc_xmitframe_ext(struct xmit_priv * pxmitpriv)5204 struct xmit_frame *rtw_alloc_xmitframe_ext(struct xmit_priv *pxmitpriv)
5205 {
5206 struct xmit_frame *pxframe = NULL;
5207 _list *plist, *phead;
5208 _queue *queue = &pxmitpriv->free_xframe_ext_queue;
5209
5210
5211 _rtw_spinlock_bh(&queue->lock);
5212
5213 if (_rtw_queue_empty(queue) == _TRUE) {
5214 pxframe = NULL;
5215 } else {
5216 phead = get_list_head(queue);
5217 plist = get_next(phead);
5218 pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5219
5220 rtw_list_delete(&(pxframe->list));
5221 pxmitpriv->free_xframe_ext_cnt--;
5222 }
5223
5224 _rtw_spinunlock_bh(&queue->lock);
5225
5226 rtw_init_xmitframe(pxframe);
5227
5228
5229 return pxframe;
5230 }
5231
rtw_alloc_xmitframe_once(struct xmit_priv * pxmitpriv)5232 struct xmit_frame *rtw_alloc_xmitframe_once(struct xmit_priv *pxmitpriv)
5233 {
5234 struct xmit_frame *pxframe = NULL;
5235 u8 *alloc_addr;
5236
5237 alloc_addr = rtw_zmalloc(sizeof(struct xmit_frame) + 4);
5238
5239 if (alloc_addr == NULL)
5240 goto exit;
5241
5242 pxframe = (struct xmit_frame *)N_BYTE_ALIGMENT((SIZE_PTR)(alloc_addr), 4);
5243 pxframe->alloc_addr = alloc_addr;
5244
5245 pxframe->padapter = pxmitpriv->adapter;
5246 pxframe->frame_tag = NULL_FRAMETAG;
5247
5248 pxframe->pkt = NULL;
5249 #if 0 /*CONFIG_CORE_XMITBUF*/
5250 pxframe->buf_addr = NULL;
5251 pxframe->pxmitbuf = NULL;
5252 #endif
5253
5254 rtw_init_xmitframe(pxframe);
5255
5256 RTW_INFO("################## %s ##################\n", __func__);
5257
5258 exit:
5259 return pxframe;
5260 }
5261
rtw_free_xmitframe(struct xmit_priv * pxmitpriv,struct xmit_frame * pxmitframe)5262 s32 rtw_free_xmitframe(struct xmit_priv *pxmitpriv, struct xmit_frame *pxmitframe)
5263 {
5264 _queue *queue = NULL;
5265 _adapter *padapter = pxmitpriv->adapter;
5266 struct sk_buff *pndis_pkt = NULL;
5267
5268
5269 if (pxmitframe == NULL) {
5270 goto exit;
5271 }
5272
5273 if (pxmitframe->pkt) {
5274 pndis_pkt = pxmitframe->pkt;
5275 pxmitframe->pkt = NULL;
5276 }
5277
5278 if (pxmitframe->alloc_addr) {
5279 RTW_INFO("################## %s with alloc_addr ##################\n", __func__);
5280 rtw_mfree(pxmitframe->alloc_addr, sizeof(struct xmit_frame) + 4);
5281 goto check_pkt_complete;
5282 }
5283
5284 if (pxmitframe->ext_tag == 0)
5285 queue = &pxmitpriv->free_xmit_queue;
5286 else if (pxmitframe->ext_tag == 1)
5287 queue = &pxmitpriv->free_xframe_ext_queue;
5288 else
5289 rtw_warn_on(1);
5290
5291 _rtw_spinlock_bh(&queue->lock);
5292
5293 rtw_list_delete(&pxmitframe->list);
5294 rtw_list_insert_tail(&pxmitframe->list, get_list_head(queue));
5295 if (pxmitframe->ext_tag == 0) {
5296 pxmitpriv->free_xmitframe_cnt++;
5297 } else if (pxmitframe->ext_tag == 1) {
5298 pxmitpriv->free_xframe_ext_cnt++;
5299 } else {
5300 }
5301
5302 _rtw_spinunlock_bh(&queue->lock);
5303 if (queue == &pxmitpriv->free_xmit_queue)
5304 rtw_os_check_wakup_queue(padapter, pxmitframe->os_qid);
5305
5306 check_pkt_complete:
5307
5308 if (pndis_pkt)
5309 rtw_os_pkt_complete(padapter, pndis_pkt);
5310
5311 exit:
5312
5313
5314 return _SUCCESS;
5315 }
5316
rtw_free_xmitframe_queue(struct xmit_priv * pxmitpriv,_queue * pframequeue)5317 void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, _queue *pframequeue)
5318 {
5319 _list *plist, *phead;
5320 struct xmit_frame *pxmitframe;
5321
5322
5323 _rtw_spinlock_bh(&(pframequeue->lock));
5324
5325 phead = get_list_head(pframequeue);
5326 plist = get_next(phead);
5327
5328 while (rtw_end_of_queue_search(phead, plist) == _FALSE) {
5329
5330 pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
5331
5332 plist = get_next(plist);
5333
5334 rtw_free_xmitframe(pxmitpriv, pxmitframe);
5335
5336 }
5337 _rtw_spinunlock_bh(&(pframequeue->lock));
5338
5339 }
5340
rtw_xmitframe_enqueue(_adapter * padapter,struct xmit_frame * pxmitframe)5341 s32 rtw_xmitframe_enqueue(_adapter *padapter, struct xmit_frame *pxmitframe)
5342 {
5343 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue);
5344 if (rtw_xmit_classifier(padapter, pxmitframe) == _FAIL) {
5345 /* pxmitframe->pkt = NULL; */
5346 return _FAIL;
5347 }
5348
5349 return _SUCCESS;
5350 }
5351
dequeue_one_xmitframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit,struct tx_servq * ptxservq,_queue * pframe_queue)5352 static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, _queue *pframe_queue)
5353 {
5354 _list *xmitframe_plist, *xmitframe_phead;
5355 struct xmit_frame *pxmitframe = NULL;
5356
5357 xmitframe_phead = get_list_head(pframe_queue);
5358 xmitframe_plist = get_next(xmitframe_phead);
5359
5360 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
5361 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
5362
5363 /* xmitframe_plist = get_next(xmitframe_plist); */
5364
5365 /*#ifdef RTK_DMP_PLATFORM
5366 #ifdef CONFIG_USB_TX_AGGREGATION
5367 if((ptxservq->qcnt>0) && (ptxservq->qcnt<=2))
5368 {
5369 pxmitframe = NULL;
5370
5371 rtw_tasklet_schedule(&pxmitpriv->xmit_tasklet);
5372
5373 break;
5374 }
5375 #endif
5376 #endif*/
5377 rtw_list_delete(&pxmitframe->list);
5378
5379 ptxservq->qcnt--;
5380
5381 /* rtw_list_insert_tail(&pxmitframe->list, &phwxmit->pending); */
5382
5383 /* ptxservq->qcnt--; */
5384
5385 break;
5386
5387 /* pxmitframe = NULL; */
5388
5389 }
5390
5391 return pxmitframe;
5392 }
5393
get_one_xmitframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit,struct tx_servq * ptxservq,_queue * pframe_queue)5394 static struct xmit_frame *get_one_xmitframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit, struct tx_servq *ptxservq, _queue *pframe_queue)
5395 {
5396 _list *xmitframe_plist, *xmitframe_phead;
5397 struct xmit_frame *pxmitframe = NULL;
5398
5399 xmitframe_phead = get_list_head(pframe_queue);
5400 xmitframe_plist = get_next(xmitframe_phead);
5401
5402 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
5403 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
5404 break;
5405 }
5406
5407 return pxmitframe;
5408 }
5409
rtw_get_xframe(struct xmit_priv * pxmitpriv,int * num_frame)5410 struct xmit_frame *rtw_get_xframe(struct xmit_priv *pxmitpriv, int *num_frame)
5411 {
5412 _list *sta_plist, *sta_phead;
5413 struct hw_xmit *phwxmit_i = pxmitpriv->hwxmits;
5414 sint entry = pxmitpriv->hwxmit_entry;
5415
5416 struct hw_xmit *phwxmit;
5417 struct tx_servq *ptxservq = NULL;
5418 _queue *pframe_queue = NULL;
5419 struct xmit_frame *pxmitframe = NULL;
5420 _adapter *padapter = pxmitpriv->adapter;
5421 int i, inx[4];
5422
5423 inx[0] = 0;
5424 inx[1] = 1;
5425 inx[2] = 2;
5426 inx[3] = 3;
5427
5428 *num_frame = 0;
5429
5430 _rtw_spinlock_bh(&pxmitpriv->lock);
5431
5432 for (i = 0; i < entry; i++) {
5433 phwxmit = phwxmit_i + inx[i];
5434
5435 sta_phead = get_list_head(phwxmit->sta_queue);
5436 sta_plist = get_next(sta_phead);
5437
5438 while ((rtw_end_of_queue_search(sta_phead, sta_plist)) == _FALSE) {
5439
5440 ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
5441 pframe_queue = &ptxservq->sta_pending;
5442
5443 if (ptxservq->qcnt) {
5444 *num_frame = ptxservq->qcnt;
5445 pxmitframe = get_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
5446 goto exit;
5447 }
5448 sta_plist = get_next(sta_plist);
5449 }
5450 }
5451
5452 exit:
5453
5454 _rtw_spinunlock_bh(&pxmitpriv->lock);
5455
5456 return pxmitframe;
5457 }
5458
5459
rtw_dequeue_xframe(struct xmit_priv * pxmitpriv,struct hw_xmit * phwxmit_i,sint entry)5460 struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv, struct hw_xmit *phwxmit_i, sint entry)
5461 {
5462 _list *sta_plist, *sta_phead;
5463 struct hw_xmit *phwxmit;
5464 struct tx_servq *ptxservq = NULL;
5465 _queue *pframe_queue = NULL;
5466 struct xmit_frame *pxmitframe = NULL;
5467 _adapter *padapter = pxmitpriv->adapter;
5468 struct registry_priv *pregpriv = &padapter->registrypriv;
5469 int i, inx[4];
5470
5471 inx[0] = 0;
5472 inx[1] = 1;
5473 inx[2] = 2;
5474 inx[3] = 3;
5475
5476 if (pregpriv->wifi_spec == 1) {
5477 int j;
5478 #if 0
5479 if (flags < XMIT_QUEUE_ENTRY) {
5480 /* priority exchange according to the completed xmitbuf flags. */
5481 inx[flags] = 0;
5482 inx[0] = flags;
5483 }
5484 #endif
5485
5486 #if defined(CONFIG_USB_HCI) || defined(CONFIG_SDIO_HCI) || defined(CONFIG_PCI_HCI)
5487 for (j = 0; j < 4; j++)
5488 inx[j] = pxmitpriv->wmm_para_seq[j];
5489 #endif
5490 }
5491
5492 _rtw_spinlock_bh(&pxmitpriv->lock);
5493
5494 for (i = 0; i < entry; i++) {
5495 phwxmit = phwxmit_i + inx[i];
5496
5497 /* _rtw_spinlock_irq(&phwxmit->sta_queue->lock, &sp_flags); */
5498
5499 sta_phead = get_list_head(phwxmit->sta_queue);
5500 sta_plist = get_next(sta_phead);
5501
5502 while ((rtw_end_of_queue_search(sta_phead, sta_plist)) == _FALSE) {
5503
5504 ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq, tx_pending);
5505
5506 pframe_queue = &ptxservq->sta_pending;
5507
5508 pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit, ptxservq, pframe_queue);
5509
5510 if (pxmitframe) {
5511 phwxmit->accnt--;
5512
5513 /* Remove sta node when there is no pending packets. */
5514 if (_rtw_queue_empty(pframe_queue)) /* must be done after get_next and before break */
5515 rtw_list_delete(&ptxservq->tx_pending);
5516
5517 /* _rtw_spinunlock_irq(&phwxmit->sta_queue->lock, sp_flags); */
5518
5519 goto exit;
5520 }
5521
5522 sta_plist = get_next(sta_plist);
5523
5524 }
5525
5526 /* _rtw_spinunlock_irq(&phwxmit->sta_queue->lock, sp_flags); */
5527
5528 }
5529
5530 exit:
5531
5532 _rtw_spinunlock_bh(&pxmitpriv->lock);
5533
5534 return pxmitframe;
5535 }
5536
5537 #if 1
rtw_get_sta_pending(_adapter * padapter,struct sta_info * psta,sint up,u8 * ac)5538 struct tx_servq *rtw_get_sta_pending(_adapter *padapter, struct sta_info *psta, sint up, u8 *ac)
5539 {
5540 struct tx_servq *ptxservq = NULL;
5541
5542
5543 switch (up) {
5544 case 1:
5545 case 2:
5546 ptxservq = &(psta->sta_xmitpriv.bk_q);
5547 *(ac) = 3;
5548 break;
5549
5550 case 4:
5551 case 5:
5552 ptxservq = &(psta->sta_xmitpriv.vi_q);
5553 *(ac) = 1;
5554 break;
5555
5556 case 6:
5557 case 7:
5558 ptxservq = &(psta->sta_xmitpriv.vo_q);
5559 *(ac) = 0;
5560 break;
5561
5562 case 0:
5563 case 3:
5564 default:
5565 ptxservq = &(psta->sta_xmitpriv.be_q);
5566 *(ac) = 2;
5567 break;
5568
5569 }
5570
5571
5572 return ptxservq;
5573 }
5574 #else
rtw_get_sta_pending(_adapter * padapter,_queue ** ppstapending,struct sta_info * psta,sint up)5575 __inline static struct tx_servq *rtw_get_sta_pending
5576 (_adapter *padapter, _queue **ppstapending, struct sta_info *psta, sint up) {
5577 struct tx_servq *ptxservq;
5578 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
5579
5580
5581 #ifdef CONFIG_RTL8711
5582
5583 if (IS_MCAST(psta->phl_sta->mac_addr)) {
5584 ptxservq = &(psta->sta_xmitpriv.be_q); /* we will use be_q to queue bc/mc frames in BCMC_stainfo */
5585 *ppstapending = &padapter->xmitpriv.bm_pending;
5586 } else
5587 #endif
5588 {
5589 switch (up) {
5590 case 1:
5591 case 2:
5592 ptxservq = &(psta->sta_xmitpriv.bk_q);
5593 *ppstapending = &padapter->xmitpriv.bk_pending;
5594 (phwxmits + 3)->accnt++;
5595 break;
5596
5597 case 4:
5598 case 5:
5599 ptxservq = &(psta->sta_xmitpriv.vi_q);
5600 *ppstapending = &padapter->xmitpriv.vi_pending;
5601 (phwxmits + 1)->accnt++;
5602 break;
5603
5604 case 6:
5605 case 7:
5606 ptxservq = &(psta->sta_xmitpriv.vo_q);
5607 *ppstapending = &padapter->xmitpriv.vo_pending;
5608 (phwxmits + 0)->accnt++;
5609 break;
5610
5611 case 0:
5612 case 3:
5613 default:
5614 ptxservq = &(psta->sta_xmitpriv.be_q);
5615 *ppstapending = &padapter->xmitpriv.be_pending;
5616 (phwxmits + 2)->accnt++;
5617 break;
5618
5619 }
5620
5621 }
5622
5623
5624 return ptxservq;
5625 }
5626 #endif
5627
5628 /*
5629 * Will enqueue pxmitframe to the proper queue,
5630 * and indicate it to xx_pending list.....
5631 */
rtw_xmit_classifier(_adapter * padapter,struct xmit_frame * pxmitframe)5632 s32 rtw_xmit_classifier(_adapter *padapter, struct xmit_frame *pxmitframe)
5633 {
5634 u8 ac_index;
5635 struct sta_info *psta;
5636 struct tx_servq *ptxservq;
5637 struct pkt_attrib *pattrib = &pxmitframe->attrib;
5638 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
5639 sint res = _SUCCESS;
5640
5641
5642 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class);
5643
5644 /*
5645 if (pattrib->psta) {
5646 psta = pattrib->psta;
5647 } else {
5648 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
5649 psta = rtw_get_stainfo(pstapriv, pattrib->ra);
5650 }
5651 */
5652
5653 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
5654 if (pattrib->psta != psta) {
5655 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_sta);
5656 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
5657 return _FAIL;
5658 }
5659
5660 if (psta == NULL) {
5661 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_nosta);
5662 res = _FAIL;
5663 RTW_INFO("rtw_xmit_classifier: psta == NULL\n");
5664 goto exit;
5665 }
5666
5667 if (!(psta->state & WIFI_ASOC_STATE)) {
5668 DBG_COUNTER(padapter->tx_logs.core_tx_enqueue_class_err_fwlink);
5669 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
5670 return _FAIL;
5671 }
5672
5673 ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
5674
5675 /* _rtw_spinlock_irq(&pstapending->lock, &flags); */
5676
5677 if (rtw_is_list_empty(&ptxservq->tx_pending))
5678 rtw_list_insert_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
5679
5680 /* _rtw_spinlock_irq(&ptxservq->sta_pending.lock, &sp_flags); */
5681
5682 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
5683 ptxservq->qcnt++;
5684 phwxmits[ac_index].accnt++;
5685
5686 /* _rtw_spinunlock_irq(&ptxservq->sta_pending.lock, &sp_flags); */
5687
5688 /* _rtw_spinunlock_irq(&pstapending->lock, &flags); */
5689
5690 exit:
5691
5692
5693 return res;
5694 }
5695
rtw_alloc_hwxmits(_adapter * padapter)5696 void rtw_alloc_hwxmits(_adapter *padapter)
5697 {
5698 struct hw_xmit *hwxmits;
5699 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5700
5701 pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
5702
5703 pxmitpriv->hwxmits = NULL;
5704
5705 pxmitpriv->hwxmits = (struct hw_xmit *)rtw_zmalloc(sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry);
5706
5707 if (pxmitpriv->hwxmits == NULL) {
5708 RTW_INFO("alloc hwxmits fail!...\n");
5709 return;
5710 }
5711
5712 hwxmits = pxmitpriv->hwxmits;
5713
5714 if (pxmitpriv->hwxmit_entry == 5) {
5715 /* pxmitpriv->bmc_txqueue.head = 0; */
5716 /* hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue; */
5717 hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
5718
5719 /* pxmitpriv->vo_txqueue.head = 0; */
5720 /* hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue; */
5721 hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
5722
5723 /* pxmitpriv->vi_txqueue.head = 0; */
5724 /* hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue; */
5725 hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
5726
5727 /* pxmitpriv->bk_txqueue.head = 0; */
5728 /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
5729 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
5730
5731 /* pxmitpriv->be_txqueue.head = 0; */
5732 /* hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue; */
5733 hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
5734
5735 } else if (pxmitpriv->hwxmit_entry == 4) {
5736
5737 /* pxmitpriv->vo_txqueue.head = 0; */
5738 /* hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue; */
5739 hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
5740
5741 /* pxmitpriv->vi_txqueue.head = 0; */
5742 /* hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue; */
5743 hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
5744
5745 /* pxmitpriv->be_txqueue.head = 0; */
5746 /* hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue; */
5747 hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
5748
5749 /* pxmitpriv->bk_txqueue.head = 0; */
5750 /* hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue; */
5751 hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
5752 } else {
5753
5754
5755 }
5756
5757
5758 }
5759
rtw_free_hwxmits(_adapter * padapter)5760 void rtw_free_hwxmits(_adapter *padapter)
5761 {
5762 struct hw_xmit *hwxmits;
5763 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
5764
5765 hwxmits = pxmitpriv->hwxmits;
5766 if (hwxmits)
5767 rtw_mfree((u8 *)hwxmits, (sizeof(struct hw_xmit) * pxmitpriv->hwxmit_entry));
5768 }
5769
rtw_init_hwxmits(struct hw_xmit * phwxmit,sint entry)5770 void rtw_init_hwxmits(struct hw_xmit *phwxmit, sint entry)
5771 {
5772 sint i;
5773 for (i = 0; i < entry; i++, phwxmit++) {
5774 /* _rtw_spinlock_init(&phwxmit->xmit_lock); */
5775 /* _rtw_init_listhead(&phwxmit->pending); */
5776 /* phwxmit->txcmdcnt = 0; */
5777 phwxmit->accnt = 0;
5778 }
5779 }
5780
5781 #ifdef CONFIG_BR_EXT
rtw_br_client_tx(_adapter * padapter,struct sk_buff ** pskb)5782 int rtw_br_client_tx(_adapter *padapter, struct sk_buff **pskb)
5783 {
5784 struct sk_buff *skb = *pskb;
5785 /* if(MLME_IS_STA(adapter) */
5786 {
5787 void dhcp_flag_bcast(_adapter *priv, struct sk_buff *skb);
5788 int res, is_vlan_tag = 0, i, do_nat25 = 1;
5789 unsigned short vlan_hdr = 0;
5790 void *br_port = NULL;
5791
5792 /* mac_clone_handle_frame(priv, skb); */
5793
5794 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
5795 br_port = padapter->pnetdev->br_port;
5796 #else /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
5797 rcu_read_lock();
5798 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
5799 rcu_read_unlock();
5800 #endif /* (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) */
5801 _rtw_spinlock_bh(&padapter->br_ext_lock);
5802 if (!(skb->data[0] & 1) &&
5803 br_port &&
5804 _rtw_memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
5805 *((unsigned short *)(skb->data + MACADDRLEN * 2)) != __constant_htons(ETH_P_8021Q) &&
5806 *((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP) &&
5807 !_rtw_memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
5808 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5809 padapter->scdb_entry->ageing_timer = jiffies;
5810 _rtw_spinunlock_bh(&padapter->br_ext_lock);
5811 } else
5812 /* if (!priv->pmib->ethBrExtInfo.nat25_disable) */
5813 {
5814 /* if (priv->dev->br_port &&
5815 * !_rtw_memcmp(skb->data+MACADDRLEN, priv->br_mac, MACADDRLEN)) { */
5816 #if 1
5817 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) {
5818 is_vlan_tag = 1;
5819 vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
5820 for (i = 0; i < 6; i++)
5821 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
5822 skb_pull(skb, 4);
5823 }
5824 /* if SA == br_mac && skb== IP => copy SIP to br_ip ?? why */
5825 if (!_rtw_memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
5826 (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)))
5827 _rtw_memcpy(padapter->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
5828
5829 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)) {
5830 if (_rtw_memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN)) {
5831 void *scdb_findEntry(_adapter *priv, unsigned char *macAddr, unsigned char *ipAddr);
5832
5833 padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
5834 skb->data + MACADDRLEN, skb->data + WLAN_ETHHDR_LEN + 12);
5835 if (padapter->scdb_entry != NULL) {
5836 _rtw_memcpy(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN);
5837 _rtw_memcpy(padapter->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
5838 padapter->scdb_entry->ageing_timer = jiffies;
5839 do_nat25 = 0;
5840 }
5841 } else {
5842 if (padapter->scdb_entry) {
5843 padapter->scdb_entry->ageing_timer = jiffies;
5844 do_nat25 = 0;
5845 } else {
5846 _rtw_memset(padapter->scdb_mac, 0, MACADDRLEN);
5847 _rtw_memset(padapter->scdb_ip, 0, 4);
5848 }
5849 }
5850 }
5851 _rtw_spinunlock_bh(&padapter->br_ext_lock);
5852 #endif /* 1 */
5853 if (do_nat25) {
5854 int nat25_db_handle(_adapter *priv, struct sk_buff *skb, int method);
5855 if (nat25_db_handle(padapter, skb, NAT25_CHECK) == 0) {
5856 struct sk_buff *newskb;
5857
5858 if (is_vlan_tag) {
5859 skb_push(skb, 4);
5860 for (i = 0; i < 6; i++)
5861 *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
5862 *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
5863 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
5864 }
5865
5866 newskb = rtw_skb_copy(skb);
5867 if (newskb == NULL) {
5868 /* priv->ext_stats.tx_drops++; */
5869 DEBUG_ERR("TX DROP: rtw_skb_copy fail!\n");
5870 /* goto stop_proc; */
5871 return -1;
5872 }
5873 rtw_skb_free(skb);
5874
5875 *pskb = skb = newskb;
5876 if (is_vlan_tag) {
5877 vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
5878 for (i = 0; i < 6; i++)
5879 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
5880 skb_pull(skb, 4);
5881 }
5882 }
5883
5884 if (skb_is_nonlinear(skb))
5885 DEBUG_ERR("%s(): skb_is_nonlinear!!\n", __FUNCTION__);
5886
5887
5888 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18))
5889 res = skb_linearize(skb, GFP_ATOMIC);
5890 #else /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
5891 res = skb_linearize(skb);
5892 #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18)) */
5893 if (res < 0) {
5894 DEBUG_ERR("TX DROP: skb_linearize fail!\n");
5895 /* goto free_and_stop; */
5896 return -1;
5897 }
5898
5899 res = nat25_db_handle(padapter, skb, NAT25_INSERT);
5900 if (res < 0) {
5901 if (res == -2) {
5902 /* priv->ext_stats.tx_drops++; */
5903 DEBUG_ERR("TX DROP: nat25_db_handle fail!\n");
5904 /* goto free_and_stop; */
5905 return -1;
5906
5907 }
5908 /* we just print warning message and let it go */
5909 /* DEBUG_WARN("%s()-%d: nat25_db_handle INSERT Warning!\n", __FUNCTION__, __LINE__); */
5910 /* return -1; */ /* return -1 will cause system crash on 2011/08/30! */
5911 return 0;
5912 }
5913 }
5914
5915 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5916
5917 dhcp_flag_bcast(padapter, skb);
5918
5919 if (is_vlan_tag) {
5920 skb_push(skb, 4);
5921 for (i = 0; i < 6; i++)
5922 *((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
5923 *((unsigned short *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
5924 *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
5925 }
5926 }
5927 #if 0
5928 else {
5929 if (*((unsigned short *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q))
5930 is_vlan_tag = 1;
5931
5932 if (is_vlan_tag) {
5933 if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A_VALN(skb->data))
5934 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5935 } else {
5936 if (ICMPV6_MCAST_MAC(skb->data) && ICMPV6_PROTO1A(skb->data))
5937 _rtw_memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
5938 }
5939 }
5940 #endif /* 0 */
5941
5942 /* check if SA is equal to our MAC */
5943 if (_rtw_memcmp(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) {
5944 /* priv->ext_stats.tx_drops++; */
5945 DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
5946 skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
5947 /* goto free_and_stop; */
5948 return -1;
5949 }
5950 }
5951 return 0;
5952 }
5953 #endif /* CONFIG_BR_EXT */
5954
do_queue_select(_adapter * padapter,struct pkt_attrib * pattrib)5955 static void do_queue_select(_adapter *padapter, struct pkt_attrib *pattrib)
5956 {
5957 u8 qsel;
5958
5959 qsel = pattrib->priority;
5960
5961 /* high priority packet */
5962 if (pattrib->hipriority_pkt) {
5963 pattrib->qsel = rtw_hal_get_qsel(padapter, QSLT_VO_ID);
5964 pattrib->priority = rtw_hal_get_qsel(padapter, QSLT_VO_ID);
5965 }
5966 }
5967
5968 /*
5969 * The main transmit(tx) entry
5970 *
5971 * Return
5972 * 1 enqueue
5973 * 0 success, hardware will handle this xmit frame(packet)
5974 * <0 fail
5975 */
5976 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
rtw_monitor_xmit_entry(struct sk_buff * skb,struct net_device * ndev)5977 s32 rtw_monitor_xmit_entry(struct sk_buff *skb, struct net_device *ndev)
5978 {
5979 u16 frame_ctl;
5980 struct ieee80211_radiotap_header rtap_hdr;
5981 _adapter *padapter = (_adapter *)rtw_netdev_priv(ndev);
5982 struct pkt_file pktfile;
5983 struct rtw_ieee80211_hdr *pwlanhdr;
5984 struct pkt_attrib *pattrib;
5985 struct xmit_frame *pmgntframe;
5986 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
5987 struct xmit_priv *pxmitpriv = &(padapter->xmitpriv);
5988 unsigned char *pframe;
5989 u8 dummybuf[32];
5990 int len = skb->len, rtap_len;
5991
5992
5993 rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, skb->truesize);
5994
5995 #ifndef CONFIG_CUSTOMER_ALIBABA_GENERAL
5996 if (unlikely(skb->len < sizeof(struct ieee80211_radiotap_header)))
5997 goto fail;
5998
5999 _rtw_open_pktfile((struct sk_buff *)skb, &pktfile);
6000 _rtw_pktfile_read(&pktfile, (u8 *)(&rtap_hdr), sizeof(struct ieee80211_radiotap_header));
6001 rtap_len = ieee80211_get_radiotap_len((u8 *)(&rtap_hdr));
6002 if (unlikely(rtap_hdr.it_version))
6003 goto fail;
6004
6005 if (unlikely(skb->len < rtap_len))
6006 goto fail;
6007
6008 if (rtap_len != 12) {
6009 RTW_INFO("radiotap len (should be 14): %d\n", rtap_len);
6010 goto fail;
6011 }
6012 _rtw_pktfile_read(&pktfile, dummybuf, rtap_len-sizeof(struct ieee80211_radiotap_header));
6013 len = len - rtap_len;
6014 #endif
6015 pmgntframe = alloc_mgtxmitframe(pxmitpriv);
6016 if (pmgntframe == NULL) {
6017 rtw_udelay_os(500);
6018 goto fail;
6019 }
6020
6021 _rtw_memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
6022 pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
6023 // _rtw_memcpy(pframe, (void *)checking, len);
6024 _rtw_pktfile_read(&pktfile, pframe, len);
6025
6026
6027 /* Check DATA/MGNT frames */
6028 pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
6029 frame_ctl = le16_to_cpu(pwlanhdr->frame_ctl);
6030 if ((frame_ctl & RTW_IEEE80211_FCTL_FTYPE) == RTW_IEEE80211_FTYPE_DATA) {
6031
6032 pattrib = &pmgntframe->attrib;
6033 update_monitor_frame_attrib(padapter, pattrib);
6034
6035 if (is_broadcast_mac_addr(pwlanhdr->addr3) || is_broadcast_mac_addr(pwlanhdr->addr1))
6036 pattrib->rate = MGN_24M;
6037
6038 } else {
6039
6040 pattrib = &pmgntframe->attrib;
6041 update_mgntframe_attrib(padapter, pattrib);
6042
6043 }
6044 pattrib->retry_ctrl = _FALSE;
6045 pattrib->pktlen = len;
6046 pmlmeext->mgnt_seq = GetSequence(pwlanhdr);
6047 pattrib->seqnum = pmlmeext->mgnt_seq;
6048 pmlmeext->mgnt_seq++;
6049 pattrib->last_txcmdsz = pattrib->pktlen;
6050
6051 dump_mgntframe(padapter, pmgntframe);
6052
6053 fail:
6054 rtw_skb_free(skb);
6055 return 0;
6056 }
6057 #endif
6058
6059 /*
6060 *
6061 * Return _TRUE when frame has been put to queue, otherwise return _FALSE.
6062 */
xmit_enqueue(_adapter * a,struct xmit_frame * frame)6063 static u8 xmit_enqueue(_adapter *a, struct xmit_frame *frame)
6064 {
6065 struct sta_info *sta = NULL;
6066 struct pkt_attrib *attrib = NULL;
6067 _list *head;
6068 u8 ret = _TRUE;
6069
6070
6071 attrib = &frame->attrib;
6072 sta = attrib->psta;
6073 if (!sta)
6074 return _FALSE;
6075
6076 _rtw_spinlock_bh(&sta->tx_queue.lock);
6077
6078 head = get_list_head(&sta->tx_queue);
6079
6080 if ((rtw_is_list_empty(head) == _TRUE) && (!sta->tx_q_enable)) {
6081 ret = _FALSE;
6082 goto exit;
6083 }
6084
6085 rtw_list_insert_tail(&frame->list, head);
6086 RTW_INFO(FUNC_ADPT_FMT ": en-queue tx pkt for macid=%d\n",
6087 FUNC_ADPT_ARG(a), sta->phl_sta->macid);
6088
6089 exit:
6090 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6091
6092 return ret;
6093 }
6094
xmit_dequeue(struct sta_info * sta)6095 static void xmit_dequeue(struct sta_info *sta)
6096 {
6097 _adapter *a;
6098 _list *head, *list;
6099 struct xmit_frame *frame;
6100
6101
6102 a = sta->padapter;
6103
6104 _rtw_spinlock_bh(&sta->tx_queue.lock);
6105
6106 head = get_list_head(&sta->tx_queue);
6107
6108 do {
6109 if (rtw_is_list_empty(head) == _TRUE)
6110 break;
6111
6112 list = get_next(head);
6113 rtw_list_delete(list);
6114 frame = LIST_CONTAINOR(list, struct xmit_frame, list);
6115 RTW_INFO(FUNC_ADPT_FMT ": de-queue tx frame of macid=%d\n",
6116 FUNC_ADPT_ARG(a), sta->phl_sta->macid);
6117
6118 /*rtw_hal_xmit(a, frame);*/
6119 rtw_intf_data_xmit(a, frame);
6120 } while (1);
6121
6122 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6123 }
6124
rtw_xmit_dequeue_callback(_workitem * work)6125 void rtw_xmit_dequeue_callback(_workitem *work)
6126 {
6127 struct sta_info *sta;
6128
6129
6130 sta = container_of(work, struct sta_info, tx_q_work);
6131 xmit_dequeue(sta);
6132 }
6133
rtw_xmit_queue_set(struct sta_info * sta)6134 void rtw_xmit_queue_set(struct sta_info *sta)
6135 {
6136 _rtw_spinlock_bh(&sta->tx_queue.lock);
6137
6138 if (sta->tx_q_enable) {
6139 RTW_WARN(FUNC_ADPT_FMT ": duplicated set!\n",
6140 FUNC_ADPT_ARG(sta->padapter));
6141 goto exit;
6142 }
6143 sta->tx_q_enable = 1;
6144 RTW_INFO(FUNC_ADPT_FMT ": enable queue TX for macid=%d\n",
6145 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6146
6147 exit:
6148 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6149 }
6150
rtw_xmit_queue_clear(struct sta_info * sta)6151 void rtw_xmit_queue_clear(struct sta_info *sta)
6152 {
6153 _rtw_spinlock_bh(&sta->tx_queue.lock);
6154
6155 if (!sta->tx_q_enable) {
6156 RTW_WARN(FUNC_ADPT_FMT ": tx queue for macid=%d "
6157 "not be enabled!\n",
6158 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6159 goto exit;
6160 }
6161
6162 sta->tx_q_enable = 0;
6163 RTW_INFO(FUNC_ADPT_FMT ": disable queue TX for macid=%d\n",
6164 FUNC_ADPT_ARG(sta->padapter), sta->phl_sta->macid);
6165
6166 _set_workitem(&sta->tx_q_work);
6167
6168 exit:
6169 _rtw_spinunlock_bh(&sta->tx_queue.lock);
6170 }
6171
6172 /*
6173 * The main transmit(tx) entry post handle
6174 *
6175 * Return
6176 * 1 enqueue
6177 * 0 success, hardware will handle this xmit frame(packet)
6178 * <0 fail
6179 */
rtw_xmit_posthandle(_adapter * padapter,struct xmit_frame * pxmitframe,struct sk_buff * pkt)6180 s32 rtw_xmit_posthandle(_adapter *padapter, struct xmit_frame *pxmitframe,
6181 struct sk_buff *pkt)
6182 {
6183 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6184 s32 res;
6185
6186 res = update_attrib(padapter, pkt, &pxmitframe->attrib);
6187
6188 #ifdef CONFIG_WAPI_SUPPORT
6189 if (pxmitframe->attrib.ether_type != 0x88B4) {
6190 if (rtw_wapi_drop_for_key_absent(padapter, pxmitframe->attrib.ra)) {
6191 WAPI_TRACE(WAPI_RX, "drop for key absend when tx\n");
6192 res = _FAIL;
6193 }
6194 }
6195 #endif
6196 if (res == _FAIL) {
6197 /*RTW_INFO("%s-"ADPT_FMT" update attrib fail\n", __func__, ADPT_ARG(padapter));*/
6198 #ifdef DBG_TX_DROP_FRAME
6199 RTW_INFO("DBG_TX_DROP_FRAME %s update attrib fail\n", __FUNCTION__);
6200 #endif
6201 rtw_free_xmitframe(pxmitpriv, pxmitframe);
6202 return -1;
6203 }
6204 pxmitframe->pkt = pkt;
6205
6206 rtw_led_tx_control(padapter, pxmitframe->attrib.dst);
6207
6208 do_queue_select(padapter, &pxmitframe->attrib);
6209
6210 #ifdef CONFIG_AP_MODE
6211 _rtw_spinlock_bh(&pxmitpriv->lock);
6212 if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe) == _TRUE) {
6213 _rtw_spinunlock_bh(&pxmitpriv->lock);
6214 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue);
6215 return 1;
6216 }
6217 _rtw_spinunlock_bh(&pxmitpriv->lock);
6218 #endif
6219
6220 /*if (xmit_enqueue(padapter, pxmitframe) == _TRUE)*/
6221 /* return 1;*/
6222
6223 /* pre_xmitframe */
6224 /*if (rtw_hal_xmit(padapter, pxmitframe) == _FALSE)*/
6225 if (rtw_intf_data_xmit(padapter, pxmitframe) == _FALSE)
6226 return 1;
6227
6228 return 0;
6229 }
6230
6231 /*
6232 * The main transmit(tx) entry
6233 *
6234 * Return
6235 * 1 enqueue
6236 * 0 success, hardware will handle this xmit frame(packet)
6237 * <0 fail
6238 */
rtw_xmit(_adapter * padapter,struct sk_buff ** ppkt,u16 os_qid)6239 s32 rtw_xmit(_adapter *padapter, struct sk_buff **ppkt, u16 os_qid)
6240 {
6241 static systime start = 0;
6242 static u32 drop_cnt = 0;
6243 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6244 struct xmit_frame *pxmitframe = NULL;
6245 s32 res;
6246
6247 DBG_COUNTER(padapter->tx_logs.core_tx);
6248
6249 if (IS_CH_WAITING(adapter_to_rfctl(padapter)))
6250 return -1;
6251
6252 if (rtw_linked_check(padapter) == _FALSE)
6253 return -1;
6254
6255 if (start == 0)
6256 start = rtw_get_current_time();
6257
6258 pxmitframe = rtw_alloc_xmitframe(pxmitpriv, os_qid);
6259
6260 if (rtw_get_passing_time_ms(start) > 2000) {
6261 if (drop_cnt)
6262 RTW_INFO("DBG_TX_DROP_FRAME %s no more pxmitframe, drop_cnt:%u\n", __FUNCTION__, drop_cnt);
6263 start = rtw_get_current_time();
6264 drop_cnt = 0;
6265 }
6266
6267 if (pxmitframe == NULL) {
6268 drop_cnt++;
6269 /*RTW_INFO("%s-"ADPT_FMT" no more xmitframe\n", __func__, ADPT_ARG(padapter));*/
6270 DBG_COUNTER(padapter->tx_logs.core_tx_err_pxmitframe);
6271 return -1;
6272 }
6273
6274 #ifdef CONFIG_BR_EXT
6275 if (MLME_IS_STA(padapter) || MLME_IS_ADHOC(padapter)) {
6276 void *br_port = NULL;
6277
6278 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
6279 br_port = padapter->pnetdev->br_port;
6280 #else
6281 rcu_read_lock();
6282 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
6283 rcu_read_unlock();
6284 #endif
6285
6286 if (br_port) {
6287 res = rtw_br_client_tx(padapter, ppkt);
6288 if (res == -1) {
6289 rtw_free_xmitframe(pxmitpriv, pxmitframe);
6290 DBG_COUNTER(padapter->tx_logs.core_tx_err_brtx);
6291 return -1;
6292 }
6293 }
6294 }
6295 #endif /* CONFIG_BR_EXT */
6296
6297 #ifdef CONFIG_RTW_MESH
6298 if (MLME_IS_MESH(padapter)) {
6299 _list f_list;
6300
6301 res = rtw_mesh_addr_resolve(padapter, pxmitframe, *ppkt, &f_list);
6302 if (res == RTW_RA_RESOLVING)
6303 return 1;
6304 if (res == _FAIL)
6305 return -1;
6306
6307 #if CONFIG_RTW_MESH_DATA_BMC_TO_UC
6308 if (!rtw_is_list_empty(&f_list)) {
6309 _list *list = get_next(&f_list);
6310 struct xmit_frame *fframe;
6311
6312 while ((rtw_end_of_queue_search(&f_list, list)) == _FALSE) {
6313 fframe = LIST_CONTAINOR(list, struct xmit_frame, list);
6314 list = get_next(list);
6315 rtw_list_delete(&fframe->list);
6316
6317 fframe->pkt = rtw_skb_copy(*ppkt);
6318 if (!fframe->pkt) {
6319 if (res == RTW_ORI_NO_NEED)
6320 res = _SUCCESS;
6321 rtw_free_xmitframe(pxmitpriv, fframe);
6322 continue;
6323 }
6324
6325 rtw_xmit_posthandle(padapter, fframe, fframe->pkt);
6326 }
6327 }
6328 #endif /* CONFIG_RTW_MESH_DATA_BMC_TO_UC */
6329
6330 if (res == RTW_ORI_NO_NEED) {
6331 rtw_free_xmitframe(&padapter->xmitpriv, pxmitframe);
6332 return 0;
6333 }
6334 }
6335 #endif /* CONFIG_RTW_MESH */
6336
6337 pxmitframe->pkt = NULL; /* let rtw_xmit_posthandle not to free pkt inside */
6338 res = rtw_xmit_posthandle(padapter, pxmitframe, *ppkt);
6339
6340 return res;
6341 }
6342
6343 #ifdef RTW_PHL_TX
6344
6345 #ifdef RTW_PHL_TEST_FPGA
6346 u32 test_seq;
6347 #endif
6348
get_head_from_txreq(_adapter * padapter,struct xmit_frame * pxframe,u8 frag_idx)6349 u8 *get_head_from_txreq(_adapter *padapter, struct xmit_frame *pxframe, u8 frag_idx)
6350 {
6351 return 0;
6352 }
6353
get_tail_from_txreq(_adapter * padapter,struct xmit_frame * pxframe,u8 frag_idx)6354 u8 *get_tail_from_txreq(_adapter *padapter, struct xmit_frame *pxframe, u8 frag_idx)
6355 {
6356 return 0;
6357 }
6358
dump_pkt(u8 * start,u32 len)6359 void dump_pkt(u8 *start, u32 len)
6360 {
6361 u32 idx = 0;
6362 for (idx = 0; idx < len; idx++) {
6363 printk("%02x ", start[idx]);
6364 if ((idx % 20) == 19)
6365 printk("\n");
6366 }
6367 printk("\n");
6368 }
6369
6370 /* TXREQ_QMGT */
get_txreq_buffer(_adapter * padapter,u8 ** txreq,u8 ** pkt_list,u8 ** head,u8 ** tail)6371 u8 *get_txreq_buffer(_adapter *padapter, u8 **txreq, u8 **pkt_list, u8 **head, u8 **tail)
6372 {
6373 struct xmit_txreq_buf *ptxreq_buf = NULL;
6374 _list *plist, *phead;
6375 _queue *pfree_txreq_queue = &padapter->free_txreq_queue;
6376 #ifdef CONFIG_CORE_TXSC
6377 u8 i = 0;
6378 #endif
6379
6380 _rtw_spinlock_bh(&pfree_txreq_queue->lock);
6381 if (_rtw_queue_empty(pfree_txreq_queue) == _TRUE) {
6382 padapter->txreq_full_cnt++;
6383 } else {
6384 phead = get_list_head(pfree_txreq_queue);
6385 plist = get_next(phead);
6386 ptxreq_buf = LIST_CONTAINOR(plist, struct xmit_txreq_buf, list);
6387 rtw_list_delete(&ptxreq_buf->list);
6388
6389 padapter->free_txreq_cnt--;
6390 }
6391 _rtw_spinunlock_bh(&pfree_txreq_queue->lock);
6392
6393 if (ptxreq_buf) {
6394
6395 if (txreq)
6396 *txreq = ptxreq_buf->txreq;
6397
6398 if (head)
6399 *head = ptxreq_buf->head;
6400
6401 if (tail)
6402 *tail = ptxreq_buf->tail;
6403
6404 if (pkt_list)
6405 *pkt_list = ptxreq_buf->pkt_list;
6406
6407 #ifdef CONFIG_CORE_TXSC
6408 for (i = 0; i < MAX_TXSC_SKB_NUM; i++)
6409 ptxreq_buf->pkt[i] = NULL;
6410 ptxreq_buf->pkt_cnt = 0;
6411 #endif
6412 }
6413
6414 return (u8 *)ptxreq_buf;
6415 }
6416
get_txreq_resources(_adapter * padapter,struct xmit_frame * pxframe,u8 ** txreq,u8 ** pkt_list,u8 ** head,u8 ** tail)6417 void get_txreq_resources(_adapter *padapter, struct xmit_frame *pxframe,
6418 u8 **txreq, u8 **pkt_list, u8 **head, u8 **tail)
6419 {
6420 u32 offset_head = (sizeof(struct rtw_xmit_req) * RTW_MAX_FRAG_NUM);
6421 u32 offset_tail = offset_head + (SZ_HEAD_BUF * RTW_MAX_FRAG_NUM);
6422 u32 offset_list = offset_tail + (SZ_TAIL_BUF * RTW_MAX_FRAG_NUM);
6423 u8 *pbuf = NULL;
6424
6425 PHLTX_ENTER;
6426
6427 //rtw_phl_tx todo: error handle, max tx req limit
6428 padapter->tx_ring_idx++;
6429 padapter->tx_ring_idx = (padapter->tx_ring_idx % MAX_TX_RING_NUM);
6430
6431 pbuf = padapter->tx_pool_ring[padapter->tx_ring_idx];
6432 //memset(pbuf, 0, (SZ_TX_RING*RTW_MAX_FRAG_NUM));
6433
6434 if (txreq)
6435 *txreq = pbuf;
6436
6437 if (head)
6438 *head = pbuf + offset_head;
6439
6440 if (tail)
6441 *tail = pbuf + offset_tail;
6442
6443 if (pkt_list)
6444 *pkt_list = pbuf + offset_list;
6445 }
6446
dump_xmitframe_txreq(_adapter * padapter,struct xmit_frame * pxframe)6447 void dump_xmitframe_txreq(_adapter *padapter, struct xmit_frame *pxframe)
6448 {
6449 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
6450 u32 idx, idx1 = 0;
6451
6452 PHLTX_ENTER;
6453 printk("total txreq=%d \n", pxframe->txreq_cnt);
6454
6455 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
6456 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6457 printk("txreq[%d] with %d pkts =====\n", idx, txreq->pkt_cnt);
6458 for (idx1 = 0; idx1 < txreq->pkt_cnt; idx1++) {
6459 printk("pkt[%d] 0x%p len=%d\n", idx1, (void *)pkt_list->vir_addr, pkt_list->length);
6460 dump_pkt(pkt_list->vir_addr, pkt_list->length);
6461 pkt_list++;
6462 }
6463 txreq++;
6464 }
6465 printk("\n");
6466 }
6467
6468 #ifdef CONFIG_PCI_HCI
core_recycle_txreq_phyaddr(_adapter * padapter,struct rtw_xmit_req * txreq)6469 void core_recycle_txreq_phyaddr(_adapter *padapter, struct rtw_xmit_req *txreq)
6470 {
6471 PPCI_DATA pci_data = dvobj_to_pci(padapter->dvobj);
6472 struct pci_dev *pdev = pci_data->ppcidev;
6473 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6474 u32 idx = 0;
6475
6476 for (idx = 0; idx < txreq->pkt_cnt; idx++) {
6477 dma_addr_t phy_addr = (pkt_list->phy_addr_l);
6478
6479 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6480 {
6481 u64 phy_addr_h = pkt_list->phy_addr_h;
6482 phy_addr |= (phy_addr_h << 32);
6483 }
6484 #endif
6485 pci_unmap_bus_addr(pdev, &phy_addr, pkt_list->length, PCI_DMA_TODEVICE);
6486 pkt_list++;
6487 }
6488 }
6489
fill_txreq_phyaddr(_adapter * padapter,struct xmit_frame * pxframe)6490 void fill_txreq_phyaddr(_adapter *padapter, struct xmit_frame *pxframe)
6491 {
6492 PPCI_DATA pci_data = dvobj_to_pci(padapter->dvobj);
6493 struct pci_dev *pdev = pci_data->ppcidev;
6494 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
6495 u32 idx, idx1 = 0;
6496
6497 PHLTX_ENTER;
6498
6499 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
6500 struct rtw_pkt_buf_list *pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
6501
6502 for (idx1 = 0; idx1 < txreq->pkt_cnt; idx1++) {
6503 dma_addr_t phy_addr = 0;
6504 pci_get_bus_addr(pdev, pkt_list->vir_addr, &phy_addr, pkt_list->length, PCI_DMA_TODEVICE);
6505 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6506 pkt_list->phy_addr_h = phy_addr >> 32;
6507 #else
6508 pkt_list->phy_addr_h = 0x0;
6509 #endif
6510 pkt_list->phy_addr_l = phy_addr & 0xFFFFFFFF;
6511 pkt_list++;
6512 }
6513 txreq++;
6514 }
6515 }
6516 #endif
6517
_fill_txreq_list_skb(_adapter * padapter,struct rtw_xmit_req * txreq,struct rtw_pkt_buf_list ** pkt_list,struct sk_buff * skb,u32 * req_sz,s32 * req_offset)6518 static void _fill_txreq_list_skb(_adapter *padapter,
6519 struct rtw_xmit_req *txreq, struct rtw_pkt_buf_list **pkt_list,
6520 struct sk_buff *skb, u32 *req_sz, s32 *req_offset)
6521 {
6522 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
6523 #define skb_frag_off(f) ((f)->page_offset)
6524 #endif
6525 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
6526 #define skb_frag_page(f) ((f)->page)
6527 #define skb_frag_size(f) ((f)->size)
6528 #endif
6529 #define PKT_LIST_APPEND(_addr, _len) do { \
6530 u32 __len = _len; \
6531 if (__len == 0) \
6532 break; \
6533 list->vir_addr = _addr; \
6534 list->length = __len; \
6535 txreq->pkt_cnt++; \
6536 txreq->total_len += __len; \
6537 list++; \
6538 *pkt_list = list; \
6539 } while (0)
6540
6541 struct rtw_pkt_buf_list *list = *pkt_list;
6542 u8 nr_frags = skb_shinfo(skb)->nr_frags;
6543 s32 offset = *req_offset;
6544 u32 rem_sz = *req_sz;
6545 u32 cur_frag_total, cur_frag_rem;
6546 int i;
6547
6548 /* skb head frag */
6549 cur_frag_total = skb_headlen(skb);
6550
6551 if (cur_frag_total > offset) {
6552 cur_frag_rem = rtw_min(cur_frag_total - offset, rem_sz);
6553 PKT_LIST_APPEND(skb->data + offset, cur_frag_rem);
6554 rem_sz -= cur_frag_rem;
6555 offset = 0;
6556 } else {
6557 offset -= cur_frag_total;
6558 }
6559
6560 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6561 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6562 u8 *addr;
6563
6564 addr = ((void *)page_address(skb_frag_page(frag))) + skb_frag_off(frag);
6565 cur_frag_total = skb_frag_size(frag);
6566
6567 if (offset < cur_frag_total) {
6568 cur_frag_rem = cur_frag_total - offset;
6569
6570 if (rem_sz < cur_frag_rem) {
6571 PKT_LIST_APPEND(addr + offset, rem_sz);
6572 RTW_WARN("%s:%d, size(rem_sz)=%d cur_frag_rem=%d txreq->total_length = %d\n",
6573 __func__, __LINE__, rem_sz, cur_frag_rem, txreq->total_len);
6574 rem_sz = 0;
6575 break;
6576 } else {
6577 PKT_LIST_APPEND(addr + offset, cur_frag_rem);
6578 RTW_DBG("%s:%d, size=%d txreq->total_length = %d\n",
6579 __func__, __LINE__, cur_frag_rem, txreq->total_len);
6580 rem_sz -= cur_frag_rem;
6581 }
6582
6583 offset = 0;
6584 } else {
6585 offset -= cur_frag_total;
6586 }
6587 }
6588
6589 *req_sz = rem_sz;
6590 *req_offset = offset;
6591
6592 #undef PKT_LIST_APPEND
6593 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 4, 0))
6594 #undef skb_frag_off
6595 #endif
6596 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0))
6597 #undef skb_frag_page
6598 #undef skb_frag_size
6599 #endif
6600 }
6601
skb_total_frag_nr(struct sk_buff * head_skb)6602 static int skb_total_frag_nr(struct sk_buff *head_skb)
6603 {
6604 struct sk_buff *skb;
6605 int nr;
6606
6607 nr = 1 + skb_shinfo(head_skb)->nr_frags;
6608
6609 skb_walk_frags(head_skb, skb)
6610 nr += 1 + skb_shinfo(skb)->nr_frags;
6611
6612 return nr;
6613 }
6614
fill_txreq_list_skb(_adapter * padapter,struct rtw_xmit_req * txreq,struct rtw_pkt_buf_list ** pkt_list,struct sk_buff * head_skb,u32 req_sz,s32 offset)6615 static void fill_txreq_list_skb(_adapter *padapter,
6616 struct rtw_xmit_req *txreq, struct rtw_pkt_buf_list **pkt_list,
6617 struct sk_buff *head_skb, u32 req_sz, s32 offset)
6618 {
6619 struct sk_buff *skb;
6620
6621 if (skb_total_frag_nr(head_skb) > NUM_PKT_LIST_PER_TXREQ - 2) {
6622 rtw_skb_linearize(head_skb);
6623 RTW_WARN("skb total frag nr over %d\n", NUM_PKT_LIST_PER_TXREQ - 2);
6624 }
6625
6626 _fill_txreq_list_skb(padapter, txreq, pkt_list, head_skb, &req_sz, &offset);
6627
6628 skb_walk_frags(head_skb, skb)
6629 _fill_txreq_list_skb(padapter, txreq, pkt_list, skb, &req_sz, &offset);
6630
6631 if (req_sz != 0)
6632 RTW_WARN("remain req_sz=%d should be zero\n", req_sz);
6633 }
6634
rtw_core_replace_skb(struct sk_buff ** pskb,u32 need_head,u32 need_tail)6635 s32 rtw_core_replace_skb(struct sk_buff **pskb, u32 need_head, u32 need_tail)
6636 {
6637 struct sk_buff *newskb;
6638 struct sk_buff *skb = *pskb;
6639
6640 newskb = rtw_skb_copy(skb);
6641
6642 if (newskb == NULL)
6643 return FAIL;
6644
6645 rtw_skb_free(skb);
6646 *pskb = newskb;
6647
6648 return SUCCESS;
6649 }
6650
6651 #ifdef CONFIG_BR_EXT
core_br_client_tx(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb)6652 s32 core_br_client_tx(_adapter *padapter, struct xmit_frame *pxframe, struct sk_buff **pskb)
6653 {
6654 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6655
6656 if (!adapter_use_wds(padapter) && check_fwstate(&padapter->mlmepriv, WIFI_STATION_STATE | WIFI_ADHOC_STATE) == _TRUE) {
6657 void *br_port = NULL;
6658
6659 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35))
6660 br_port = padapter->pnetdev->br_port;
6661 #else
6662 rcu_read_lock();
6663 br_port = rcu_dereference(padapter->pnetdev->rx_handler_data);
6664 rcu_read_unlock();
6665 #endif
6666
6667 if (br_port) {
6668 if (rtw_br_client_tx(padapter, pskb) == FAIL) {
6669 core_tx_free_xmitframe(padapter, pxframe);
6670 DBG_COUNTER(padapter->tx_logs.core_tx_err_brtx);
6671 return FAIL;
6672 }
6673 }
6674 }
6675 return SUCCESS;
6676 }
6677 #endif
6678
core_tx_update_pkt(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb)6679 s32 core_tx_update_pkt(_adapter *padapter, struct xmit_frame *pxframe, struct sk_buff **pskb)
6680 {
6681 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6682 struct sk_buff *skb_orig = *pskb;
6683
6684 PHLTX_LOG;
6685
6686 //rtw_phl_tx todo, BR EXT
6687 if (core_br_client_tx(padapter, pxframe, pskb) == FAIL)
6688 return FAIL;
6689
6690 return SUCCESS;
6691 }
6692
core_tx_update_xmitframe(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb,struct sta_info * psta,u8 type)6693 s32 core_tx_update_xmitframe(_adapter *padapter,
6694 struct xmit_frame *pxframe, struct sk_buff **pskb, struct sta_info *psta, u8 type)
6695 {
6696 pxframe->xftype = type;
6697 pxframe->pkt = *pskb;
6698
6699 PHLTX_LOG;
6700
6701 #if 1
6702 if (pxframe->xftype == RTW_TX_OS) {
6703 if (update_attrib(padapter, *pskb, &pxframe->attrib) != _SUCCESS)
6704 return FAIL;
6705 }
6706 #else
6707 pxframe->pkt = *pskb;
6708
6709 if (update_xmitframe_from_hdr(padapter, pxframe) == FAIL)
6710 return FAIL;
6711
6712 PHLTX_LOG;
6713
6714 if (update_xmitframe_qos(padapter, pxframe) == FAIL)
6715 return FAIL;
6716
6717 PHLTX_LOG;
6718
6719 if (update_xmitframe_security(padapter, pxframe) == FAIL)
6720 return FAIL;
6721
6722 PHLTX_LOG;
6723
6724 //if (update_xmitframe_hw(padapter, pxframe) == FAIL)
6725 //return FAIL;
6726
6727 PHLTX_LOG;
6728
6729 if (pxframe->xftype == RTW_TX_OS) {
6730 if (pxframe->attrib.bswenc
6731 && (skb_shared(*pskb) || skb_cloned(*pskb))
6732 && (rtw_core_replace_skb(pskb, RTW_MAX_WL_HEAD, RTW_MAX_WL_TAIL) == FAIL))
6733 return FAIL;
6734 }
6735 #endif
6736
6737 PHLTX_LOG;
6738
6739 return SUCCESS;
6740 }
6741
6742
6743
get_wl_frag_paras(_adapter * padapter,struct xmit_frame * pxframe,u32 * frag_perfr,u32 * wl_frags)6744 void get_wl_frag_paras(_adapter *padapter, struct xmit_frame *pxframe,
6745 u32 *frag_perfr, u32 *wl_frags)
6746 {
6747 u32 wl_head, wl_tail, payload_totalsz, payload_fragsz, wl_frag_num;
6748 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
6749
6750 wl_head = wl_tail = payload_totalsz = 0;
6751
6752 wl_head += pxframe->attrib.hdrlen;
6753 wl_tail += RTW_SZ_FCS;
6754 if (pxframe->attrib.encrypt) {
6755 wl_head += pxframe->attrib.iv_len;
6756 wl_tail += pxframe->attrib.icv_len;
6757 }
6758
6759 payload_fragsz = pxmitpriv->frag_len - wl_head - wl_tail;
6760
6761 payload_totalsz = pxframe->attrib.pktlen;
6762 if (pxframe->xftype == RTW_TX_OS)
6763 payload_totalsz += RTW_SZ_LLC;
6764 if (pxframe->attrib.encrypt == _TKIP_)
6765 payload_totalsz += RTW_TKIP_MIC_LEN;
6766
6767 if (pxframe->attrib.amsdu)
6768 wl_frag_num = 1;
6769 else if (payload_fragsz < payload_totalsz)
6770 wl_frag_num = RTW_DIV_ROUND_UP(payload_totalsz, payload_fragsz);
6771 else
6772 wl_frag_num = 1;
6773
6774 pxframe->attrib.frag_datalen = *frag_perfr = payload_fragsz;
6775 pxframe->attrib.nr_frags = *wl_frags = wl_frag_num;
6776 #ifdef CONFIG_CORE_TXSC
6777 pxframe->attrib.frag_len_txsc = payload_fragsz - (payload_totalsz - pxframe->attrib.pktlen);
6778 #endif
6779 }
6780
fill_txreq_pkt_perfrag_txos(struct _ADAPTER * padapter,struct xmit_frame * pxframe,u32 frag_perfr,u32 wl_frags)6781 u8 fill_txreq_pkt_perfrag_txos(struct _ADAPTER *padapter,
6782 struct xmit_frame *pxframe,
6783 u32 frag_perfr, u32 wl_frags)
6784 {
6785 struct rtw_xmit_req *xf_txreq = NULL;
6786 struct rtw_pkt_buf_list *pkt_list = NULL;
6787 struct sk_buff *skb = pxframe->pkt;
6788 u8 *txreq, *head, *tail, *list;
6789 u32 head_sz, tail_sz, wlan_tail;
6790 u32 payload_sz, payload_offset;
6791 u8 idx;
6792 u8 *wlhdr[RTW_MAX_FRAG_NUM] = {NULL};
6793 u8 *wltail[RTW_MAX_FRAG_NUM] = {NULL};
6794 /* TXREQ_QMGT */
6795 struct xmit_txreq_buf *txreq_buf = NULL;
6796
6797 PHLTX_ENTER;
6798
6799 //printk("pxframe->attrib.pkt_hdrlen=%d pxframe->attrib.hdrlen=%d pxframe->attrib.iv_len=%d \n", pxframe->attrib.pkt_hdrlen, pxframe->attrib.hdrlen, pxframe->attrib.iv_len);
6800
6801 pxframe->txreq_cnt = wl_frags;
6802
6803 head_sz = pxframe->attrib.hdrlen + (pxframe->attrib.amsdu ? 0 : RTW_SZ_LLC);
6804 tail_sz = 0;
6805 if (pxframe->attrib.encrypt) {
6806 head_sz += pxframe->attrib.iv_len;
6807 if (pxframe->attrib.encrypt == _TKIP_)
6808 tail_sz += RTW_TKIP_MIC_LEN;
6809 if (pxframe->attrib.bswenc)
6810 tail_sz += pxframe->attrib.icv_len;
6811 }
6812
6813 PHLTX_LOG;
6814
6815 //get_txreq_resources(padapter, pxframe, &txreq, &list, &head, &tail);
6816 /* TXREQ_QMGT */
6817 txreq_buf = (struct xmit_txreq_buf *)get_txreq_buffer(padapter, &txreq, &list, &head, &tail);
6818 if (txreq_buf == NULL) {
6819 //do this in core_tx_init_xmitframe
6820 //pxframe->phl_txreq = NULL;
6821 //pxframe->ptxreq_buf = NULL;
6822
6823 //free in rtw_core_tx
6824 //pxframe->pkt = NULL;//for not recycle in abort_core_tx
6825 goto fail;
6826 }
6827 #ifdef USE_PREV_WLHDR_BUF /* CONFIG_CORE_TXSC */
6828 txreq_buf->macid = 0xff;
6829 txreq_buf->txsc_id = 0xff;
6830 #endif
6831 pxframe->ptxreq_buf = txreq_buf;
6832
6833 PHLTX_LOG;
6834
6835 #if 0
6836 payload = skb->data+pxframe->attrib.pkt_hdrlen;
6837 printk("num_txreq=%d, hw_head=%d, hw_tail=%d, list=0x%p\n",
6838 num_txreq, hw_head, hw_tail, (void *)list);
6839
6840 printk("p:txreq=0x%p, head=0x%p, tail=0x%p, payload=0x%p\n",
6841 (void *)txreq, (void *)head, (void *)tail, (void *)payload);
6842 #endif
6843
6844 pxframe->phl_txreq = xf_txreq = (struct rtw_xmit_req *)txreq;
6845 pkt_list = (struct rtw_pkt_buf_list *)list;
6846 #ifdef CONFIG_CORE_TXSC
6847 xf_txreq->shortcut_id = 0;
6848 xf_txreq->treq_type = RTW_PHL_TREQ_TYPE_NORMAL;
6849 #endif
6850
6851 PHLTX_LOG;
6852
6853 /* move to first payload position */
6854 payload_offset = pxframe->attrib.pkt_hdrlen;
6855
6856 for (idx = 0; idx < wl_frags; idx++) {
6857 /* for no memset */
6858 xf_txreq->pkt_cnt = 0;
6859 xf_txreq->total_len = 0;
6860 xf_txreq->pkt_list = (u8 *)pkt_list;
6861
6862 /* fill head into txreq */
6863 wlhdr[idx] = head;
6864 pkt_list->vir_addr = head;
6865 pkt_list->length = head_sz;
6866 if (idx) {
6867 /* deduct LLC size if not first fragment */
6868 pkt_list->length -= RTW_SZ_LLC;
6869 }
6870 head += pkt_list->length;
6871 xf_txreq->pkt_cnt++;
6872 xf_txreq->total_len += pkt_list->length;
6873 pkt_list++;
6874
6875 /* fill payload into txreq */
6876 if (idx == (wl_frags - 1)) {
6877 /* last payload size */
6878 payload_sz = skb->len - payload_offset;
6879 } else if (idx == 0) {
6880 /* first payload size should deduct LLC size */
6881 payload_sz = frag_perfr - RTW_SZ_LLC;
6882 } else {
6883 payload_sz = frag_perfr;
6884 }
6885 /* xf_txreq would be update and pkt_list++ inside */
6886 fill_txreq_list_skb(padapter, xf_txreq, &pkt_list, skb,
6887 payload_sz, payload_offset);
6888 payload_offset += payload_sz;
6889
6890 /* fill tail(if alloc) into txreq */
6891 if (tail_sz) {
6892 wlan_tail = tail_sz;
6893 if ((pxframe->attrib.encrypt == _TKIP_) && (idx != (wl_frags - 1))) {
6894 /* deduct MIC size if not last fragment with TKIP */
6895 wlan_tail -= RTW_TKIP_MIC_LEN;
6896 }
6897 if (wlan_tail) {
6898 wltail[idx] = tail;
6899 pkt_list->vir_addr = tail;
6900 pkt_list->length = wlan_tail;
6901 tail += pkt_list->length;
6902 xf_txreq->pkt_cnt++;
6903 xf_txreq->total_len += pkt_list->length;
6904 pkt_list++;
6905 }
6906 }
6907
6908 if (xf_txreq->pkt_cnt > NUM_PKT_LIST_PER_TXREQ)
6909 RTW_WARN("xf_txreq->pkt_cnt=%d > NUM_PKT_LIST_PER_TXREQ\n",
6910 xf_txreq->pkt_cnt);
6911
6912 xf_txreq++;
6913 }
6914
6915 _rtw_memcpy(pxframe->wlhdr, wlhdr, sizeof(wlhdr));
6916 _rtw_memcpy(pxframe->wltail, wltail, sizeof(wltail));
6917 PHLTX_EXIT;
6918 return _SUCCESS;
6919
6920 fail:
6921 return _FAIL;
6922 }
6923
6924 /* TXREQ_QMGT, MGT_TXREQ_QMGT */
fill_txreq_pkt_mgmt(_adapter * padapter,struct xmit_frame * pxframe)6925 u8 fill_txreq_pkt_mgmt(_adapter *padapter, struct xmit_frame *pxframe)
6926 {
6927 struct rtw_xmit_req *xf_txreq = NULL;
6928 struct rtw_pkt_buf_list *pkt_list = NULL;
6929 //u8 *txreq, *head, *tail, *list, *mgmt = NULL;
6930
6931 PHLTX_ENTER;
6932
6933 if (!pxframe->phl_txreq)
6934 goto fail;
6935
6936 xf_txreq = pxframe->phl_txreq;
6937 pkt_list = (struct rtw_pkt_buf_list *)xf_txreq->pkt_list;
6938
6939 //get_txreq_resources(padapter, pxframe,
6940 // (u8 **)&xf_txreq, (u8 **)&pkt_list, NULL, NULL);
6941 //printk("p:txreq=0x%p, pkt_list=0x%p \n", (void *)xf_txreq, (void *)pkt_list);
6942
6943 //for no memset
6944 xf_txreq->pkt_cnt = 0;
6945 xf_txreq->total_len = 0;
6946 #ifdef CONFIG_CORE_TXSC
6947 xf_txreq->shortcut_id = 0;
6948 #endif
6949
6950 pkt_list->vir_addr = pxframe->buf_addr;
6951 pkt_list->length = pxframe->attrib.pktlen;
6952
6953 xf_txreq->pkt_cnt = 1;
6954 //xf_txreq->pkt_list = (u8 *)pkt_list;
6955 xf_txreq->treq_type = RTW_PHL_TREQ_TYPE_NORMAL;
6956
6957 pxframe->txreq_cnt = 1;
6958 //pxframe->phl_txreq = xf_txreq;
6959
6960 xf_txreq->total_len = xf_txreq->total_len + pxframe->attrib.pktlen;
6961 //RTW_INFO("%s,%d, xf_txreq->total_length = %d\n", __func__, __LINE__, xf_txreq->total_len);
6962
6963 #ifdef RTW_PHL_TEST_FPGA
6964 {
6965 struct rtw_ieee80211_hdr *p = (struct rtw_ieee80211_hdr *)pxframe->buf_addr;
6966
6967 test_seq++;
6968 test_seq = test_seq%0xFFF;
6969 SetSeqNum(p, test_seq);
6970 }
6971 #endif
6972
6973 exit:
6974 return _SUCCESS;
6975
6976 fail:
6977 return _FAIL;
6978 }
6979
merge_txreq_to_one_piece(struct _ADAPTER * a,struct xmit_frame * xf)6980 static u8 merge_txreq_to_one_piece(struct _ADAPTER *a,
6981 struct xmit_frame *xf)
6982 {
6983 struct rtw_xmit_req *txreq = NULL;
6984 struct rtw_pkt_buf_list *pkt_list = NULL;
6985 int i, j;
6986 u32 total_sz;
6987 u8 *buf, *ptr;
6988
6989
6990 for (i = 0; i < xf->txreq_cnt; i++) {
6991 txreq = &xf->phl_txreq[i];
6992 total_sz = txreq->total_len;
6993 buf = rtw_zmalloc(total_sz);
6994 if (!buf)
6995 return _FAIL;
6996 xf->buf_need_free |= BIT(i);
6997
6998 ptr = buf;
6999 for (j = 0; j < txreq->pkt_cnt; j++) {
7000 pkt_list = &((struct rtw_pkt_buf_list *)txreq->pkt_list)[j];
7001 _rtw_memcpy(ptr, pkt_list->vir_addr, pkt_list->length);
7002 ptr += pkt_list->length;
7003 }
7004 txreq->pkt_cnt = 1;
7005 pkt_list = (struct rtw_pkt_buf_list *)txreq->pkt_list;
7006 pkt_list->vir_addr = buf;
7007 pkt_list->length = total_sz;
7008 }
7009
7010 return _SUCCESS;
7011 }
7012
7013 #ifdef RTW_PHL_TEST_FPGA
7014 #define F_TX_MACID (0)
7015 #define F_TX_TID (1)
7016 #define F_TX_TYPE RTW_PHL_PKT_TYPE_DATA
7017 #define F_TX_RATE (0x8F) //HRATE_MCS15
7018 #define F_TX_BW (1)
7019 #define F_TX_DMACH (0)
7020 #endif
7021
get_security_cam_id(struct _ADAPTER * padapter,struct xmit_frame * pxframe,u8 keyid)7022 static u8 get_security_cam_id(struct _ADAPTER *padapter, struct xmit_frame *pxframe, u8 keyid)
7023 {
7024 struct dvobj_priv *d;
7025 void *phl;
7026 u8 sec_cam_id = 0;
7027 struct sta_priv *pstapriv = &padapter->stapriv;
7028 struct sta_info *sta;
7029 sint bmcast = IS_MCAST(pxframe->attrib.ra);
7030 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
7031 WLAN_BSSID_EX *pbss_network = (WLAN_BSSID_EX *)&pmlmepriv->cur_network.network;
7032
7033 if (bmcast == _TRUE) {
7034 /* WEP: use unicast key type to match halmac rule (see: setkey_hdl) */
7035 if (pxframe->attrib.encrypt == _WEP40_ || pxframe->attrib.encrypt == _WEP104_)
7036 bmcast = _FALSE;
7037
7038 sta = rtw_get_stainfo(pstapriv, pbss_network->MacAddress);
7039 } else {
7040 sta = rtw_get_stainfo(pstapriv, pxframe->attrib.ra);
7041 }
7042
7043 if (!sta) {
7044 RTW_ERR("%s sta not found\n", __func__);
7045 rtw_warn_on(1);
7046 return sec_cam_id;
7047 }
7048
7049 d = adapter_to_dvobj(padapter);
7050 phl = GET_PHL_INFO(d);
7051
7052 if (keyid >= 4)
7053 sec_cam_id = rtw_phl_get_sec_cam_idx(phl, sta->phl_sta, keyid,
7054 RTW_SEC_KEY_BIP);
7055 else
7056 sec_cam_id = rtw_phl_get_sec_cam_idx(phl, sta->phl_sta, keyid,
7057 bmcast ? RTW_SEC_KEY_MULTICAST : RTW_SEC_KEY_UNICAST);
7058
7059 return sec_cam_id;
7060 }
7061
7062 /* Todo: HE rate mapping not ready */
7063 static const enum rtw_data_rate mrate2phlrate_tbl[] = {
7064 [MGN_1M] = RTW_DATA_RATE_CCK1,
7065 [MGN_2M] = RTW_DATA_RATE_CCK2,
7066 [MGN_5_5M] = RTW_DATA_RATE_CCK5_5,
7067 [MGN_11M] = RTW_DATA_RATE_CCK11,
7068 [MGN_6M] = RTW_DATA_RATE_OFDM6,
7069 [MGN_9M] = RTW_DATA_RATE_OFDM9,
7070 [MGN_12M] = RTW_DATA_RATE_OFDM12,
7071 [MGN_18M] = RTW_DATA_RATE_OFDM18,
7072 [MGN_24M] = RTW_DATA_RATE_OFDM24,
7073 [MGN_36M] = RTW_DATA_RATE_OFDM36,
7074 [MGN_48M] = RTW_DATA_RATE_OFDM48,
7075 [MGN_54M] = RTW_DATA_RATE_OFDM54,
7076 [MGN_MCS0] = RTW_DATA_RATE_MCS0,
7077 [MGN_MCS1] = RTW_DATA_RATE_MCS1,
7078 [MGN_MCS2] = RTW_DATA_RATE_MCS2,
7079 [MGN_MCS3] = RTW_DATA_RATE_MCS3,
7080 [MGN_MCS4] = RTW_DATA_RATE_MCS4,
7081 [MGN_MCS5] = RTW_DATA_RATE_MCS5,
7082 [MGN_MCS6] = RTW_DATA_RATE_MCS6,
7083 [MGN_MCS7] = RTW_DATA_RATE_MCS7,
7084 [MGN_MCS8] = RTW_DATA_RATE_MCS8,
7085 [MGN_MCS9] = RTW_DATA_RATE_MCS9,
7086 [MGN_MCS10] = RTW_DATA_RATE_MCS10,
7087 [MGN_MCS11] = RTW_DATA_RATE_MCS11,
7088 [MGN_MCS12] = RTW_DATA_RATE_MCS12,
7089 [MGN_MCS13] = RTW_DATA_RATE_MCS13,
7090 [MGN_MCS14] = RTW_DATA_RATE_MCS14,
7091 [MGN_MCS15] = RTW_DATA_RATE_MCS15,
7092 [MGN_MCS16] = RTW_DATA_RATE_MCS16,
7093 [MGN_MCS17] = RTW_DATA_RATE_MCS17,
7094 [MGN_MCS18] = RTW_DATA_RATE_MCS18,
7095 [MGN_MCS19] = RTW_DATA_RATE_MCS19,
7096 [MGN_MCS20] = RTW_DATA_RATE_MCS20,
7097 [MGN_MCS21] = RTW_DATA_RATE_MCS21,
7098 [MGN_MCS22] = RTW_DATA_RATE_MCS22,
7099 [MGN_MCS23] = RTW_DATA_RATE_MCS23,
7100 [MGN_MCS24] = RTW_DATA_RATE_MCS24,
7101 [MGN_MCS25] = RTW_DATA_RATE_MCS25,
7102 [MGN_MCS26] = RTW_DATA_RATE_MCS26,
7103 [MGN_MCS27] = RTW_DATA_RATE_MCS27,
7104 [MGN_MCS28] = RTW_DATA_RATE_MCS28,
7105 [MGN_MCS29] = RTW_DATA_RATE_MCS29,
7106 [MGN_MCS30] = RTW_DATA_RATE_MCS30,
7107 [MGN_MCS31] = RTW_DATA_RATE_MCS31,
7108 [MGN_VHT1SS_MCS0] = RTW_DATA_RATE_VHT_NSS1_MCS0,
7109 [MGN_VHT1SS_MCS1] = RTW_DATA_RATE_VHT_NSS1_MCS1,
7110 [MGN_VHT1SS_MCS2] = RTW_DATA_RATE_VHT_NSS1_MCS2,
7111 [MGN_VHT1SS_MCS3] = RTW_DATA_RATE_VHT_NSS1_MCS3,
7112 [MGN_VHT1SS_MCS4] = RTW_DATA_RATE_VHT_NSS1_MCS4,
7113 [MGN_VHT1SS_MCS5] = RTW_DATA_RATE_VHT_NSS1_MCS5,
7114 [MGN_VHT1SS_MCS6] = RTW_DATA_RATE_VHT_NSS1_MCS6,
7115 [MGN_VHT1SS_MCS7] = RTW_DATA_RATE_VHT_NSS1_MCS7,
7116 [MGN_VHT1SS_MCS8] = RTW_DATA_RATE_VHT_NSS1_MCS8,
7117 [MGN_VHT1SS_MCS9] = RTW_DATA_RATE_VHT_NSS1_MCS9,
7118 [MGN_VHT2SS_MCS0] = RTW_DATA_RATE_VHT_NSS2_MCS0,
7119 [MGN_VHT2SS_MCS1] = RTW_DATA_RATE_VHT_NSS2_MCS1,
7120 [MGN_VHT2SS_MCS2] = RTW_DATA_RATE_VHT_NSS2_MCS2,
7121 [MGN_VHT2SS_MCS3] = RTW_DATA_RATE_VHT_NSS2_MCS3,
7122 [MGN_VHT2SS_MCS4] = RTW_DATA_RATE_VHT_NSS2_MCS4,
7123 [MGN_VHT2SS_MCS5] = RTW_DATA_RATE_VHT_NSS2_MCS5,
7124 [MGN_VHT2SS_MCS6] = RTW_DATA_RATE_VHT_NSS2_MCS6,
7125 [MGN_VHT2SS_MCS7] = RTW_DATA_RATE_VHT_NSS2_MCS7,
7126 [MGN_VHT2SS_MCS8] = RTW_DATA_RATE_VHT_NSS2_MCS8,
7127 [MGN_VHT2SS_MCS9] = RTW_DATA_RATE_VHT_NSS2_MCS9,
7128 [MGN_VHT3SS_MCS0] = RTW_DATA_RATE_VHT_NSS3_MCS0,
7129 [MGN_VHT3SS_MCS1] = RTW_DATA_RATE_VHT_NSS3_MCS1,
7130 [MGN_VHT3SS_MCS2] = RTW_DATA_RATE_VHT_NSS3_MCS2,
7131 [MGN_VHT3SS_MCS3] = RTW_DATA_RATE_VHT_NSS3_MCS3,
7132 [MGN_VHT3SS_MCS4] = RTW_DATA_RATE_VHT_NSS3_MCS4,
7133 [MGN_VHT3SS_MCS5] = RTW_DATA_RATE_VHT_NSS3_MCS5,
7134 [MGN_VHT3SS_MCS6] = RTW_DATA_RATE_VHT_NSS3_MCS6,
7135 [MGN_VHT3SS_MCS7] = RTW_DATA_RATE_VHT_NSS3_MCS7,
7136 [MGN_VHT3SS_MCS8] = RTW_DATA_RATE_VHT_NSS3_MCS8,
7137 [MGN_VHT3SS_MCS9] = RTW_DATA_RATE_VHT_NSS3_MCS9,
7138 [MGN_VHT4SS_MCS0] = RTW_DATA_RATE_VHT_NSS4_MCS0,
7139 [MGN_VHT4SS_MCS1] = RTW_DATA_RATE_VHT_NSS4_MCS1,
7140 [MGN_VHT4SS_MCS2] = RTW_DATA_RATE_VHT_NSS4_MCS2,
7141 [MGN_VHT4SS_MCS3] = RTW_DATA_RATE_VHT_NSS4_MCS3,
7142 [MGN_VHT4SS_MCS4] = RTW_DATA_RATE_VHT_NSS4_MCS4,
7143 [MGN_VHT4SS_MCS5] = RTW_DATA_RATE_VHT_NSS4_MCS5,
7144 [MGN_VHT4SS_MCS6] = RTW_DATA_RATE_VHT_NSS4_MCS6,
7145 [MGN_VHT4SS_MCS7] = RTW_DATA_RATE_VHT_NSS4_MCS7,
7146 [MGN_VHT4SS_MCS8] = RTW_DATA_RATE_VHT_NSS4_MCS8,
7147 [MGN_VHT4SS_MCS9] = RTW_DATA_RATE_VHT_NSS4_MCS9,
7148 [MGN_HE1SS_MCS0] = RTW_DATA_RATE_HE_NSS1_MCS0,
7149 [MGN_HE1SS_MCS1] = RTW_DATA_RATE_HE_NSS1_MCS1,
7150 [MGN_HE1SS_MCS2] = RTW_DATA_RATE_HE_NSS1_MCS2,
7151 [MGN_HE1SS_MCS3] = RTW_DATA_RATE_HE_NSS1_MCS3,
7152 [MGN_HE1SS_MCS4] = RTW_DATA_RATE_HE_NSS1_MCS4,
7153 [MGN_HE1SS_MCS5] = RTW_DATA_RATE_HE_NSS1_MCS5,
7154 [MGN_HE1SS_MCS6] = RTW_DATA_RATE_HE_NSS1_MCS6,
7155 [MGN_HE1SS_MCS7] = RTW_DATA_RATE_HE_NSS1_MCS7,
7156 [MGN_HE1SS_MCS8] = RTW_DATA_RATE_HE_NSS1_MCS8,
7157 [MGN_HE1SS_MCS9] = RTW_DATA_RATE_HE_NSS1_MCS9,
7158 [MGN_HE1SS_MCS10] = RTW_DATA_RATE_HE_NSS1_MCS10,
7159 [MGN_HE1SS_MCS11] = RTW_DATA_RATE_HE_NSS1_MCS11,
7160 [MGN_HE2SS_MCS0] = RTW_DATA_RATE_HE_NSS2_MCS0,
7161 [MGN_HE2SS_MCS1] = RTW_DATA_RATE_HE_NSS2_MCS1,
7162 [MGN_HE2SS_MCS2] = RTW_DATA_RATE_HE_NSS2_MCS2,
7163 [MGN_HE2SS_MCS3] = RTW_DATA_RATE_HE_NSS2_MCS3,
7164 [MGN_HE2SS_MCS4] = RTW_DATA_RATE_HE_NSS2_MCS4,
7165 [MGN_HE2SS_MCS5] = RTW_DATA_RATE_HE_NSS2_MCS5,
7166 [MGN_HE2SS_MCS6] = RTW_DATA_RATE_HE_NSS2_MCS6,
7167 [MGN_HE2SS_MCS7] = RTW_DATA_RATE_HE_NSS2_MCS7,
7168 [MGN_HE2SS_MCS8] = RTW_DATA_RATE_HE_NSS2_MCS8,
7169 [MGN_HE2SS_MCS9] = RTW_DATA_RATE_HE_NSS2_MCS9,
7170 [MGN_HE2SS_MCS10] = RTW_DATA_RATE_HE_NSS2_MCS10,
7171 [MGN_HE2SS_MCS11] = RTW_DATA_RATE_HE_NSS2_MCS11,
7172 [MGN_HE3SS_MCS0] = RTW_DATA_RATE_HE_NSS3_MCS0,
7173 [MGN_HE3SS_MCS1] = RTW_DATA_RATE_HE_NSS3_MCS1,
7174 [MGN_HE3SS_MCS2] = RTW_DATA_RATE_HE_NSS3_MCS2,
7175 [MGN_HE3SS_MCS3] = RTW_DATA_RATE_HE_NSS3_MCS3,
7176 [MGN_HE3SS_MCS4] = RTW_DATA_RATE_HE_NSS3_MCS4,
7177 [MGN_HE3SS_MCS5] = RTW_DATA_RATE_HE_NSS3_MCS5,
7178 [MGN_HE3SS_MCS6] = RTW_DATA_RATE_HE_NSS3_MCS6,
7179 [MGN_HE3SS_MCS7] = RTW_DATA_RATE_HE_NSS3_MCS7,
7180 [MGN_HE3SS_MCS8] = RTW_DATA_RATE_HE_NSS3_MCS8,
7181 [MGN_HE3SS_MCS9] = RTW_DATA_RATE_HE_NSS3_MCS9,
7182 [MGN_HE3SS_MCS10] = RTW_DATA_RATE_HE_NSS3_MCS10,
7183 [MGN_HE3SS_MCS11] = RTW_DATA_RATE_HE_NSS3_MCS11,
7184 [MGN_HE4SS_MCS0] = RTW_DATA_RATE_HE_NSS4_MCS0,
7185 [MGN_HE4SS_MCS1] = RTW_DATA_RATE_HE_NSS4_MCS1,
7186 [MGN_HE4SS_MCS2] = RTW_DATA_RATE_HE_NSS4_MCS2,
7187 [MGN_HE4SS_MCS3] = RTW_DATA_RATE_HE_NSS4_MCS3,
7188 [MGN_HE4SS_MCS4] = RTW_DATA_RATE_HE_NSS4_MCS4,
7189 [MGN_HE4SS_MCS5] = RTW_DATA_RATE_HE_NSS4_MCS5,
7190 [MGN_HE4SS_MCS6] = RTW_DATA_RATE_HE_NSS4_MCS6,
7191 [MGN_HE4SS_MCS7] = RTW_DATA_RATE_HE_NSS4_MCS7,
7192 [MGN_HE4SS_MCS8] = RTW_DATA_RATE_HE_NSS4_MCS8,
7193 [MGN_HE4SS_MCS9] = RTW_DATA_RATE_HE_NSS4_MCS9,
7194 [MGN_HE4SS_MCS10] = RTW_DATA_RATE_HE_NSS4_MCS10,
7195 [MGN_HE4SS_MCS11] = RTW_DATA_RATE_HE_NSS4_MCS11,
7196 };
7197
7198 /*
7199 * _rate_mrate2phl() - convert data rate from mrate to PHL(MAC)
7200 * @sta: struct sta_info *
7201 * @mrate: date rate of mrate type, enum MGN_RATE
7202 *
7203 * Convert data rate from MGN_RATE definition to PHL's definition.
7204 *
7205 * Return PHL's data rate definition "enum rtw_data_rate".
7206 * 0x0~0xB: CCK 1M ~ OFDM 54M
7207 * 0x80~0x9F: HT MCS0~MCS31
7208 * 0x100~0x109: VHT 1SS MCS0~MCS9
7209 * 0x110~0x119: VHT 2SS MCS0~MCS9
7210 * 0x120~0x129: VHT 3SS MCS0~MCS9
7211 * 0x130~0x139: VHT 4SS MCS0~MCS9
7212 * 0x180~0x18B: HE 1SS MCS0~MCS11
7213 * 0x190~0x19B: HE 2SS MCS0~MCS11
7214 * 0x1A0~0x1AB: HE 3SS MCS0~MCS11
7215 * 0x1B0~0x1BB: HE 4SS MCS0~MCS11
7216 */
_rate_mrate2phl(enum MGN_RATE mrate)7217 static enum rtw_data_rate _rate_mrate2phl(enum MGN_RATE mrate)
7218 {
7219 enum rtw_data_rate phl = RTW_DATA_RATE_CCK1;
7220
7221
7222 if (mrate < ARRAY_SIZE(mrate2phlrate_tbl))
7223 phl = mrate2phlrate_tbl[mrate];
7224
7225 if ((mrate != MGN_1M) && (phl == RTW_DATA_RATE_CCK1))
7226 RTW_WARN("%s: Invalid rate 0x%x\n", __func__, mrate);
7227
7228 return phl;
7229 }
7230
7231 /*
7232 * _rate_drv2phl() - convert data rate from drive to PHL(MAC)
7233 * @sta: struct sta_info *
7234 * @rate: date rate of driver
7235 * 0x0~0xB: CCK 1M ~ OFDM 54M
7236 * >0xB: HT/VHT/HE use the same bits field to represent each
7237 * data rate, so these bits's real definition depended on
7238 * sta's wireless mode.
7239 *
7240 * Convert driver's data rate definition to PHL's definition.
7241 *
7242 * Return PHL's data rate definition "enum rtw_data_rate".
7243 */
_rate_drv2phl(struct sta_info * sta,u8 rate)7244 static enum rtw_data_rate _rate_drv2phl(struct sta_info *sta, u8 rate)
7245 {
7246 enum rtw_data_rate phl = RTW_DATA_RATE_CCK1;
7247 u8 ht_support = 0, vht_support = 0, he_support = 0;
7248
7249
7250 if (rate < 12) {
7251 /* B/G mode, CCK/OFDM rate */
7252 return (enum rtw_data_rate)rate;
7253 }
7254
7255 #ifdef CONFIG_80211N_HT
7256 if (sta->htpriv.ht_option == _TRUE)
7257 ht_support = 1;
7258 #ifdef CONFIG_80211AC_VHT
7259 if (sta->vhtpriv.vht_option == _TRUE)
7260 vht_support = 1;
7261 #ifdef CONFIG_80211AX_HE
7262 if (sta->hepriv.he_option == _TRUE)
7263 he_support = 1;
7264 #endif /* CONFIG_80211AX_HE */
7265 #endif /* CONFIG_80211AC_VHT */
7266 #endif /* CONFIG_80211N_HT */
7267
7268 rate -= 12;
7269 if (he_support) {
7270 if (rate < 12)
7271 phl = RTW_DATA_RATE_HE_NSS1_MCS0 + rate;
7272 else if (rate < 24)
7273 phl = RTW_DATA_RATE_HE_NSS2_MCS0 + (rate - 12);
7274 else if (rate < 36)
7275 phl = RTW_DATA_RATE_HE_NSS3_MCS0 + (rate - 24);
7276 else
7277 phl = RTW_DATA_RATE_HE_NSS4_MCS0 + (rate - 36);
7278 } else if (vht_support) {
7279 if (rate < 10)
7280 phl = RTW_DATA_RATE_VHT_NSS1_MCS0 + rate;
7281 else if (rate < 20)
7282 phl = RTW_DATA_RATE_VHT_NSS2_MCS0 + (rate - 10);
7283 else if (rate < 30)
7284 phl = RTW_DATA_RATE_VHT_NSS3_MCS0 + (rate - 20);
7285 else
7286 phl = RTW_DATA_RATE_VHT_NSS4_MCS0 + (rate - 30);
7287 } else if (ht_support) {
7288 phl = RTW_DATA_RATE_MCS0 + rate;
7289 }
7290
7291 return phl;
7292 }
7293
fill_txreq_mdata(_adapter * padapter,struct xmit_frame * pxframe)7294 void fill_txreq_mdata(_adapter *padapter, struct xmit_frame *pxframe)
7295 {
7296 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
7297 struct sta_info *psta = pxframe->attrib.psta;
7298 struct rtw_phl_stainfo_t *phl_sta = NULL;
7299 struct rtw_t_meta_data *mdata = &(txreq->mdata);
7300 #ifdef BMC_ON_HIQ
7301 struct sta_priv *pstapriv = &padapter->stapriv;
7302 #endif
7303 u32 idx = 0;
7304 u8 htc_option = _FALSE;
7305 #ifdef CONFIG_XMIT_ACK
7306 struct xmit_priv *pxmitpriv = &(GET_PRIMARY_ADAPTER(padapter))->xmitpriv;
7307 #endif
7308
7309 PHLTX_LOG;
7310
7311 if (pxframe->attrib.order)
7312 htc_option = _TRUE;
7313
7314 /* packet identify */
7315 if (pxframe->xftype == RTW_TX_DRV_MGMT)
7316 mdata->type = RTW_PHL_PKT_TYPE_MGNT;
7317 else
7318 mdata->type = RTW_PHL_PKT_TYPE_DATA;
7319
7320 mdata->macid = pxframe->attrib.mac_id;
7321
7322 /* enable wd info by default */
7323 mdata->wdinfo_en = 1;
7324
7325 /* packet content */
7326 mdata->hdr_len = pxframe->attrib.hdrlen;
7327 mdata->hw_seq_mode = 0;
7328 mdata->sw_seq = pxframe->attrib.seqnum;
7329 mdata->hw_sec_iv = 0;
7330 mdata->nav_use_hdr = 0;
7331
7332 /* packet security */
7333 if (pxframe->attrib.encrypt == _NO_PRIVACY_ || pxframe->attrib.bswenc == _TRUE) {
7334 mdata->sec_hw_enc = _FALSE;
7335 mdata->sec_type = RTW_ENC_NONE;
7336 } else {
7337 mdata->sec_hw_enc = _TRUE;
7338 mdata->sec_type = rtw_sec_algo_drv2phl(pxframe->attrib.encrypt);
7339 mdata->sec_cam_idx = get_security_cam_id(padapter, pxframe, pxframe->attrib.key_idx);
7340 }
7341 /* Currently dump secrity settings for dbg */
7342 RTW_DBG("sec_type= %d sec_hw_enc= %d sec_cam_idx= %d \n",
7343 mdata->sec_type, mdata->sec_hw_enc, mdata->sec_cam_idx);
7344
7345 /* packet capability */
7346 if (pxframe->attrib.ampdu_en == _TRUE) {
7347 mdata->ampdu_en = 1;
7348 mdata->bk = 0;
7349 mdata->ampdu_density = pxframe->attrib.ampdu_spacing;
7350 mdata->max_agg_num = 0x3F; /* temporally fix to 64 */
7351 } else {
7352 mdata->ampdu_en = 0;
7353 mdata->bk = 1;
7354 }
7355 mdata->dis_data_rate_fb = 0;
7356 mdata->dis_rts_rate_fb = 0;
7357 mdata->data_tx_cnt_lmt_en = 0;
7358 mdata->data_tx_cnt_lmt = 0;
7359
7360 /* Set DATA_RTY_LOWEST_RATE: 2.4G to CCK1M & 5G to OFDM6M */
7361 if (rtw_get_oper_band(padapter) == BAND_ON_24G)
7362 mdata->data_rty_lowest_rate = RTW_DATA_RATE_CCK1;
7363 else if (rtw_get_oper_band(padapter) == BAND_ON_5G)
7364 mdata->data_rty_lowest_rate = RTW_DATA_RATE_OFDM6;
7365 else
7366 RTW_WARN("%s: mdata->data_rty_lowest_rate is not set.\n", __func__);
7367
7368 mdata->life_time_sel = 0;
7369 mdata->rts_en = pxframe->attrib.rtsen;
7370 mdata->cts2self = pxframe->attrib.cts2self;
7371 mdata->hw_rts_en = pxframe->attrib.hw_rts_en;
7372 mdata->rts_cca_mode = 0;
7373
7374 mdata->f_bw = pxframe->attrib.bwmode;
7375 /* Todo: GI and LTF not ready for HE */
7376 mdata->f_gi_ltf = pxframe->attrib.sgi;
7377
7378 mdata->mc = IS_MCAST(pxframe->attrib.ra) ? 1 : 0;
7379 mdata->bc = MacAddr_isBcst(pxframe->attrib.ra) ? 1 : 0;
7380
7381 #ifdef CONFIG_80211AX_HE
7382 if (psta && htc_option)
7383 mdata->a_ctrl_bsr = 1;
7384 #endif
7385 mdata->raw = 0;
7386
7387 #ifdef BMC_ON_HIQ
7388 if ((pxframe->xftype != RTW_TX_DRV_MGMT)
7389 && (mdata->mc || mdata->bc)
7390 && (rtw_tim_map_anyone_be_set(padapter, pstapriv->sta_dz_bitmap))) {
7391 mdata->tid = RTW_PHL_RING_CAT_HIQ; /* HIQ */
7392 mdata->mbssid = 0; /* ToDo: Consider MBSSID */
7393 mdata->hal_port = padapter->phl_role->hw_port;
7394 } else
7395 #endif
7396 {
7397 mdata->tid = pxframe->attrib.priority;
7398 }
7399
7400 #ifdef CONFIG_CORE_TXSC
7401 mdata->ampdu_density = 0;
7402 mdata->userate_sel = 0;
7403 #endif
7404
7405 if (pxframe->xftype == RTW_TX_DRV_MGMT) {
7406 mdata->userate_sel = 1;
7407 mdata->f_rate = _rate_mrate2phl(pxframe->attrib.rate);
7408 } else {
7409 /* low rate for EAPOL/ARP/DHCP */
7410 if ((pxframe->attrib.ether_type == 0x888e) ||
7411 (pxframe->attrib.ether_type == 0x0806) ||
7412 (pxframe->attrib.dhcp_pkt == 1)) {
7413
7414 mdata->userate_sel = 1;
7415 if (IS_CCK_RATE(padapter->mlmeextpriv.tx_rate))
7416 mdata->f_rate = RTW_DATA_RATE_CCK1;
7417 else
7418 mdata->f_rate = RTW_DATA_RATE_OFDM6;
7419 } else {
7420 /* fix rate for non specail packet */
7421 if (padapter->fix_rate != NO_FIX_RATE) {
7422 mdata->userate_sel = 1;
7423 mdata->f_rate = GET_FIX_RATE(padapter->fix_rate);
7424 mdata->f_gi_ltf = GET_FIX_RATE_SGI(padapter->fix_rate);
7425 if (!padapter->data_fb)
7426 mdata->dis_data_rate_fb = 1;
7427 } else {
7428 mdata->userate_sel = 0;
7429 }
7430
7431 if (padapter->fix_bw != NO_FIX_BW)
7432 mdata->f_bw = padapter->fix_bw;
7433 }
7434 }
7435 mdata->f_er = 0;
7436 mdata->f_dcm = 0;
7437 mdata->f_stbc = pxframe->attrib.stbc;
7438 mdata->f_ldpc = pxframe->attrib.ldpc;
7439
7440 mdata->band = 0;
7441 mdata->dma_ch = 0;
7442 mdata->spe_rpt = 0;
7443 mdata->sw_define = 0;
7444
7445 #ifdef CONFIG_XMIT_ACK
7446 if (pxframe->ack_report) {
7447 mdata->spe_rpt = 1;
7448 mdata->sw_define = pxmitpriv->seq_no;
7449 #ifdef RTW_WKARD_CCX_RPT_LIMIT_CTRL
7450 mdata->data_tx_cnt_lmt_en = 1;
7451 mdata->data_tx_cnt_lmt = 8;
7452 #endif
7453 }
7454 #endif
7455
7456 #ifdef CONFIG_CORE_TXSC
7457 mdata->pktlen = txreq->total_len;
7458 #endif
7459
7460 #ifdef RTW_PHL_TEST_FPGA
7461 mdata->type = F_TX_TYPE;
7462 mdata->macid = F_TX_MACID;
7463 mdata->tid = F_TX_TID;
7464 mdata->dma_ch = F_TX_DMACH;
7465 //mdata->band = cap->band;
7466 mdata->f_rate = F_TX_RATE;
7467 mdata->f_bw = F_TX_BW;
7468 mdata->f_gi_ltf = 0;
7469 mdata->f_stbc = 0;
7470 mdata->f_ldpc = 0;
7471
7472 mdata->userate_sel = 1;
7473 mdata->dis_data_rate_fb = 1;
7474 mdata->dis_rts_rate_fb = 1;
7475 #endif
7476
7477 #ifdef RTW_PHL_DBG_CMD
7478 if (pxframe->xftype != RTW_TX_DRV_MGMT) {
7479 if (padapter->txForce_enable) {
7480 if (padapter->txForce_rate != INV_TXFORCE_VAL)
7481 mdata->f_rate = padapter->txForce_rate;
7482 if (padapter->txForce_agg != INV_TXFORCE_VAL)
7483 mdata->ampdu_en = padapter->txForce_agg;
7484 if (padapter->txForce_aggnum != INV_TXFORCE_VAL)
7485 mdata->max_agg_num = padapter->txForce_aggnum;
7486 if (padapter->txForce_gi != INV_TXFORCE_VAL)
7487 mdata->f_gi_ltf = padapter->txForce_gi;
7488 }
7489 }
7490 #endif
7491
7492 #ifdef CONFIG_CORE_TXSC
7493 _print_txreq_mdata(mdata, __func__);
7494 #endif
7495
7496 if (pxframe->txreq_cnt > 1) {
7497 struct rtw_t_meta_data *mdata_tmp;
7498 txreq++;
7499 for (idx = 1; idx < pxframe->txreq_cnt; idx++) {
7500 #ifdef CONFIG_CORE_TXSC
7501 mdata->pktlen = txreq->total_len;
7502 #endif
7503 mdata_tmp = &(txreq->mdata);
7504 memcpy(mdata_tmp, mdata, sizeof(struct rtw_t_meta_data));
7505 txreq++;
7506 }
7507 }
7508
7509 }
7510
7511
fill_txreq_others(_adapter * padapter,struct xmit_frame * pxframe)7512 void fill_txreq_others(_adapter *padapter, struct xmit_frame *pxframe)
7513 {
7514 struct rtw_xmit_req *txreq = pxframe->phl_txreq;
7515 u32 idx = 0;
7516
7517 PHLTX_ENTER;
7518
7519 for (idx = 0; idx < pxframe->txreq_cnt; idx++) {
7520 txreq->os_priv = pxframe;
7521 txreq++;
7522 }
7523 }
7524
core_wlan_fill_txreq_pre(_adapter * padapter,struct xmit_frame * pxframe)7525 u8 core_wlan_fill_txreq_pre(_adapter *padapter, struct xmit_frame *pxframe)
7526 {
7527 u32 frag_perfr, wl_frags = 0;
7528
7529 if (pxframe->xftype == RTW_TX_OS) {
7530 get_wl_frag_paras(padapter, pxframe, &frag_perfr, &wl_frags);
7531 if (fill_txreq_pkt_perfrag_txos(padapter, pxframe, frag_perfr, wl_frags) == _FAIL)
7532 return _FAIL;
7533 } else if (pxframe->xftype == RTW_TX_DRV_MGMT) {
7534 if (fill_txreq_pkt_mgmt(padapter, pxframe) == _FAIL)
7535 return _FAIL;
7536 }
7537
7538 return _SUCCESS;
7539 }
7540
core_wlan_fill_txreq_post(_adapter * padapter,struct xmit_frame * pxframe)7541 void core_wlan_fill_txreq_post(_adapter *padapter, struct xmit_frame *pxframe)
7542 {
7543 fill_txreq_mdata(padapter, pxframe);
7544 fill_txreq_others(padapter, pxframe);
7545
7546 #ifdef CONFIG_PCI_HCI
7547 /*must be called after all pkt contents modified (cache sync)*/
7548 fill_txreq_phyaddr(padapter, pxframe);
7549 #endif
7550
7551 }
7552
core_wlan_fill_head(_adapter * padapter,struct xmit_frame * pxframe)7553 void core_wlan_fill_head(_adapter *padapter, struct xmit_frame *pxframe)
7554 {
7555 u32 idx = 0;
7556 if (pxframe->xftype == RTW_TX_OS) {
7557 for (idx = 0; idx < pxframe->attrib.nr_frags; idx++) {
7558 u8 *pwlanhdr = pxframe->wlhdr[idx];
7559
7560 if (!pwlanhdr) {
7561 PHLTX_ERR;
7562 continue;
7563 }
7564
7565 rtw_make_wlanhdr(padapter, pwlanhdr, &pxframe->attrib); //rtw_core_make_wlanhdr(padapter, pwlanhdr, pxframe);
7566
7567 if (idx == (pxframe->attrib.nr_frags - 1))
7568 ClearMFrag(pwlanhdr);
7569 else
7570 SetMFrag(pwlanhdr);
7571
7572 if (pxframe->attrib.iv_len) {
7573 update_attrib_sec_iv_info(padapter, &pxframe->attrib);
7574 _rtw_memcpy((pwlanhdr+pxframe->attrib.hdrlen), pxframe->attrib.iv, pxframe->attrib.iv_len);
7575 }
7576
7577 if (idx == 0 && !pxframe->attrib.amsdu) {
7578 /* Add LLC/SNAP to first fragment */
7579 rtw_put_snap(pwlanhdr+pxframe->attrib.hdrlen+pxframe->attrib.iv_len,
7580 pxframe->attrib.ether_type);
7581 }
7582
7583 #ifdef RTW_PHL_TEST_FPGA
7584 {
7585 struct rtw_ieee80211_hdr *p = (struct rtw_ieee80211_hdr *)pwlanhdr;
7586 unsigned short *fctrl;
7587 unsigned int pktlen = 0;
7588 u16 *qc;
7589
7590 test_seq++;
7591 test_seq = test_seq%0xFFF;
7592 SetSeqNum(p, test_seq);
7593 }
7594 #endif
7595
7596 }
7597 }
7598 }
7599
7600
core_wlan_fill_tail(_adapter * padapter,struct xmit_frame * pxframe)7601 void core_wlan_fill_tail(_adapter *padapter, struct xmit_frame *pxframe)
7602 {
7603 ;
7604
7605 }
7606
7607
core_wlan_fill_tkip_mic(_adapter * padapter,struct xmit_frame * pxframe)7608 u8 core_wlan_fill_tkip_mic(_adapter *padapter, struct xmit_frame *pxframe)
7609 {
7610 u8 *llc = NULL;
7611 u8 *payload = NULL;
7612 u8 mic[8] = {0x0};
7613 struct mic_data micdata;
7614 struct pkt_attrib *pattrib = &pxframe->attrib;
7615 struct security_priv *psecuritypriv = &padapter->securitypriv;
7616 s8 bmcst = IS_MCAST(pattrib->ra);
7617 u8 priority[4] = {0x0};
7618 int i = 0;
7619 struct rtw_xmit_req *xf_txreq = pxframe->phl_txreq;
7620 struct rtw_pkt_buf_list *pkt_list = NULL;
7621
7622 if (pattrib->encrypt == _TKIP_) {
7623 u8 null_key[16] = {0x0};
7624
7625 /* set TKIP MIC key */
7626 if (bmcst) {
7627 if (_rtw_memcmp(
7628 psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey,
7629 null_key, 16) == _TRUE)
7630 return _FAIL;
7631
7632 rtw_secmicsetkey(&micdata,
7633 psecuritypriv->dot118021XGrptxmickey[psecuritypriv->dot118021XGrpKeyid].skey);
7634 } else {
7635 if (_rtw_memcmp(
7636 &pattrib->dot11tkiptxmickey.skey[0],
7637 null_key, 16) == _TRUE)
7638 return _FAIL;
7639
7640 rtw_secmicsetkey(&micdata, &pattrib->dot11tkiptxmickey.skey[0]);
7641 }
7642
7643 /* set DA, SA */
7644 rtw_secmicappend(&micdata, &pattrib->dst[0], 6);
7645 rtw_secmicappend(&micdata, &pattrib->src[0], 6);
7646
7647 if (pattrib->qos_en)
7648 priority[0] = pattrib->priority;
7649
7650 /* set priority */
7651 rtw_secmicappend(&micdata, &priority[0], 4);
7652
7653 /* set LLC; TBD: should check if LLC is existed or not */
7654 llc = pxframe->wlhdr[0] + pxframe->attrib.hdrlen + pxframe->attrib.iv_len;
7655 rtw_secmicappend(&micdata, llc, SNAP_SIZE + sizeof(u16));
7656
7657 /* set MSDU payload */
7658 pkt_list = (struct rtw_pkt_buf_list *) xf_txreq->pkt_list;
7659
7660 /*ignore hdr move to payload*/
7661 pkt_list ++;
7662 /*for loop ignore tail*/
7663 for (i = 1; i < xf_txreq->pkt_cnt - 1; i++) {
7664 rtw_secmicappend(&micdata, pkt_list->vir_addr, pkt_list->length);
7665 pkt_list ++;
7666 }
7667
7668 /* calculate MIC */
7669 rtw_secgetmic(&micdata, &mic[0]);
7670
7671 /* append MIC to the last tail */
7672 _rtw_memcpy(pxframe->wltail[pxframe->attrib.nr_frags-1], &(mic[0]), 8);
7673 }
7674
7675 return _SUCCESS;
7676 }
7677
7678
core_wlan_sw_encrypt(_adapter * padapter,struct xmit_frame * pxframe)7679 static void core_wlan_sw_encrypt(_adapter *padapter, struct xmit_frame *pxframe)
7680 {
7681 struct pkt_attrib *attrib;
7682 u8 res;
7683
7684
7685 attrib = &pxframe->attrib;
7686 if (!attrib->encrypt)
7687 return;
7688 if (!attrib->bswenc)
7689 return;
7690
7691 /* convert txreq to one piece */
7692 res = merge_txreq_to_one_piece(padapter, pxframe);
7693 if (res != _SUCCESS) {
7694 RTW_ERR("%s: fail alloc buffer for sw enc!\n", __func__);
7695 return;
7696 }
7697 xmitframe_swencrypt(padapter, pxframe);
7698 }
7699
7700 #ifdef CONFIG_TX_AMSDU_SW_MODE
core_tx_amsdu_timeout(_adapter * padapter,struct pkt_attrib * pattrib)7701 static bool core_tx_amsdu_timeout(_adapter *padapter, struct pkt_attrib *pattrib)
7702 {
7703 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7704 u8 amsdu_timeout;
7705
7706 amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
7707
7708 if (amsdu_timeout == RTW_AMSDU_TIMER_UNSET) {
7709 rtw_amsdu_set_timer_status(padapter,
7710 pattrib->priority, RTW_AMSDU_TIMER_SETTING);
7711 rtw_amsdu_set_timer(padapter, pattrib->priority);
7712 pxmitpriv->amsdu_debug_set_timer++;
7713 return false;
7714 } else if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
7715 return false;
7716 } else if (amsdu_timeout == RTW_AMSDU_TIMER_TIMEOUT) {
7717 rtw_amsdu_set_timer_status(padapter,
7718 pattrib->priority, RTW_AMSDU_TIMER_UNSET);
7719 pxmitpriv->amsdu_debug_timeout++;
7720 return true;
7721 }
7722
7723 return false;
7724 }
7725
7726 /* 'pxframes[]' is array to store xframe to do AMSDU whose size is 'max_xf_nr',
7727 * and return value is real used size. If return size is 1, then set 'amsdu' to
7728 * decide normal frame or AMSDU one.
7729 */
core_tx_amsdu_dequeue(_adapter * padapter,struct xmit_frame * pxframes[],int max_xf_nr,bool * amsdu)7730 static int core_tx_amsdu_dequeue(_adapter *padapter, struct xmit_frame *pxframes[],
7731 int max_xf_nr, bool *amsdu)
7732 {
7733 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
7734 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7735 struct registry_priv *pregpriv = &padapter->registrypriv;
7736 struct xmit_frame *pxframe;
7737 int tx_amsdu = rtw_min(padapter->tx_amsdu, max_xf_nr);
7738 int tx_amsdu_rate = padapter->tx_amsdu_rate;
7739 int current_tx_rate = dvobj->traffic_stat.cur_tx_tp;
7740 int num_frame;
7741 int nr_xf;
7742
7743 if (tx_amsdu == 0)
7744 goto dequeue_normal_pkt;
7745
7746 if (!MLME_IS_STA(padapter))
7747 goto dequeue_normal_pkt;
7748
7749 if (tx_amsdu >= 2 && tx_amsdu_rate && current_tx_rate < tx_amsdu_rate)
7750 goto dequeue_normal_pkt;
7751
7752 /*No amsdu when wifi_spec on*/
7753 if (pregpriv->wifi_spec == 1)
7754 goto dequeue_normal_pkt;
7755
7756 pxframe = rtw_get_xframe(pxmitpriv, &num_frame);
7757 if (num_frame == 0 || !pxframe)
7758 return 0;
7759
7760 if (num_frame < tx_amsdu) { /* Not enough MSDU for specific A-MSDU */
7761 if (!core_tx_amsdu_timeout(padapter, &pxframe->attrib))
7762 return 0; /* Not timeout yet */
7763 }
7764
7765 for (nr_xf = 0; nr_xf < tx_amsdu; nr_xf++) {
7766 pxframe = rtw_get_xframe(pxmitpriv, &num_frame);
7767
7768 if (num_frame == 0 || !pxframe)
7769 break;
7770
7771 if (!check_amsdu(pxframe))
7772 break;
7773
7774 /* TODO: check if size is over peer's capability */
7775
7776 pxframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
7777 pxmitpriv->hwxmit_entry);
7778
7779 pxframes[nr_xf] = pxframe;
7780 }
7781
7782 if (nr_xf == 0) {
7783 if (num_frame > 0)
7784 goto dequeue_normal_pkt;
7785 RTW_WARN("%s: nr_xf=0, num_frame=%d\n", __func__, num_frame);
7786 return 0;
7787 }
7788
7789 if (nr_xf < AMSDU_DEBUG_MAX_COUNT)
7790 pxmitpriv->amsdu_debug_coalesce[nr_xf-1]++;
7791 else
7792 pxmitpriv->amsdu_debug_coalesce[AMSDU_DEBUG_MAX_COUNT-1]++;
7793
7794 *amsdu = (nr_xf == 1 && tx_amsdu >= 2) ? false : true;
7795
7796 return nr_xf;
7797
7798 dequeue_normal_pkt:
7799 pxframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits,
7800 pxmitpriv->hwxmit_entry);
7801 if (!pxframe)
7802 return 0;
7803
7804 pxframes[0] = pxframe;
7805 *amsdu = false;
7806
7807 return 1;
7808 }
7809
core_tx_amsdu_dump(_adapter * padapter,struct xmit_frame * pxframes[],int xf_nr,bool amsdu)7810 static bool core_tx_amsdu_dump(_adapter *padapter, struct xmit_frame *pxframes[],
7811 int xf_nr, bool amsdu)
7812 {
7813 struct xmit_frame *head_xframe;
7814 struct pkt_attrib *head_attrib;
7815 u32 pktlen;
7816
7817 /* prepare head xmitframe */
7818 head_xframe = pxframes[0];
7819 head_attrib = &head_xframe->attrib;
7820
7821 if (xf_nr == 1 && !amsdu)
7822 goto dump_pkt;
7823
7824 rtw_coalesce_tx_amsdu(padapter, pxframes, xf_nr, amsdu, &pktlen);
7825
7826 /* update proper attribute */
7827 head_attrib->amsdu = 1;
7828 head_attrib->pkt_hdrlen = 0;
7829 head_attrib->pktlen = pktlen;
7830
7831 dump_pkt:
7832 if (core_tx_prepare_phl(padapter, head_xframe) == FAIL)
7833 goto abort_core_tx;
7834
7835 if (core_tx_call_phl(padapter, head_xframe, NULL) == FAIL)
7836 goto abort_core_tx;
7837
7838 return true;
7839
7840 abort_core_tx:
7841 core_tx_free_xmitframe(padapter, head_xframe);
7842
7843 return true;
7844 }
7845
core_tx_amsdu_tasklet(_adapter * padapter)7846 void core_tx_amsdu_tasklet(_adapter *padapter)
7847 {
7848 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7849 struct xmit_frame *pxframes[5];
7850 int xf_nr;
7851 bool amsdu;
7852
7853 pxmitpriv->amsdu_debug_tasklet++;
7854
7855 while (1) {
7856 xf_nr = core_tx_amsdu_dequeue(padapter, pxframes, ARRAY_SIZE(pxframes),
7857 &amsdu);
7858 if (xf_nr == 0)
7859 break;
7860
7861 pxmitpriv->amsdu_debug_dequeue++;
7862
7863 core_tx_amsdu_dump(padapter, pxframes, xf_nr, amsdu);
7864 }
7865 }
7866
core_tx_amsdu_enqueue(_adapter * padapter,struct xmit_frame * pxframe)7867 static s32 core_tx_amsdu_enqueue(_adapter *padapter, struct xmit_frame *pxframe)
7868 {
7869 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7870 struct pkt_attrib *pattrib = &pxframe->attrib;
7871 int tx_amsdu = padapter->tx_amsdu;
7872 u8 amsdu_timeout;
7873 s32 res;
7874
7875 if (MLME_IS_STA(padapter) && check_amsdu_tx_support(padapter)) {
7876 if (IS_AMSDU_AMPDU_VALID(pattrib))
7877 goto enqueue;
7878 }
7879
7880 return FAIL;
7881
7882 enqueue:
7883 _rtw_spinlock_bh(&pxmitpriv->lock);
7884
7885 res = rtw_xmitframe_enqueue(padapter, pxframe);
7886 if (res == _FAIL) {
7887 _rtw_spinunlock_bh(&pxmitpriv->lock);
7888 return FAIL;
7889 }
7890
7891 pxmitpriv->amsdu_debug_enqueue++;
7892
7893 if (tx_amsdu >= 2) {
7894 amsdu_timeout = rtw_amsdu_get_timer_status(padapter, pattrib->priority);
7895 if (amsdu_timeout == RTW_AMSDU_TIMER_SETTING) {
7896 rtw_amsdu_cancel_timer(padapter, pattrib->priority);
7897 rtw_amsdu_set_timer_status(padapter, pattrib->priority,
7898 RTW_AMSDU_TIMER_UNSET);
7899 }
7900 }
7901
7902 _rtw_spinunlock_bh(&pxmitpriv->lock);
7903
7904 rtw_tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
7905
7906 return _TRUE;
7907 }
7908 #endif /* CONFIG_TX_AMSDU_SW_MODE */
7909
core_tx_prepare_phl(_adapter * padapter,struct xmit_frame * pxframe)7910 s32 core_tx_prepare_phl(_adapter *padapter, struct xmit_frame *pxframe)
7911 {
7912 if (core_wlan_fill_txreq_pre(padapter, pxframe) == _FAIL)
7913 return FAIL;
7914
7915 if (pxframe->xftype == RTW_TX_OS) {
7916 core_wlan_fill_head(padapter, pxframe);
7917 if (core_wlan_fill_tkip_mic(padapter, pxframe) == _FAIL) {
7918 RTW_ERR("core_wlan_fill_tkip_mic FAIL\n");
7919 return FAIL;
7920 }
7921 }
7922 core_wlan_fill_tail(padapter, pxframe);
7923 core_wlan_sw_encrypt(padapter, pxframe);
7924
7925 core_wlan_fill_txreq_post(padapter, pxframe);
7926
7927 return SUCCESS;
7928 }
7929
7930
core_tx_call_phl(_adapter * padapter,struct xmit_frame * pxframe,void * txsc_pkt)7931 s32 core_tx_call_phl(_adapter *padapter, struct xmit_frame *pxframe, void *txsc_pkt)
7932 {
7933 struct rtw_xmit_req *txreq = NULL;
7934 void *phl = padapter->dvobj->phl;
7935 u32 idx = 0;
7936 u8 txreq_cnt = 0;
7937 #ifdef CONFIG_CORE_TXSC
7938 struct rtw_xmit_req *ptxsc_txreq = NULL;
7939 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7940 #endif
7941
7942 #ifdef CONFIG_CORE_TXSC
7943 struct txsc_pkt_entry *ptxsc_pkt = (struct txsc_pkt_entry *)txsc_pkt;
7944 if (ptxsc_pkt)
7945 ptxsc_txreq = ptxsc_pkt->ptxreq;
7946
7947 txreq = pxframe ? pxframe->phl_txreq : ptxsc_txreq;
7948 txreq_cnt = pxframe ? pxframe->txreq_cnt : 1;
7949 #else
7950 txreq = pxframe->phl_txreq;
7951 txreq_cnt = pxframe->txreq_cnt;
7952 #endif
7953
7954 for (idx = 0; idx < txreq_cnt; idx++) {
7955
7956 #ifdef RTW_PHL_TEST_FPGA
7957 core_add_record(padapter, REC_TX_PHL, txreq);
7958 #endif
7959
7960 if (rtw_phl_add_tx_req(phl, txreq) != RTW_PHL_STATUS_SUCCESS)
7961 return FAIL;
7962
7963 rtw_phl_tx_req_notify(phl);
7964
7965
7966 txreq++;
7967 }
7968
7969 /* should count tx status after add tx req is success */
7970 #ifdef CONFIG_CORE_TXSC
7971 if (ptxsc_txreq != NULL)
7972 rtw_count_tx_stats_tx_req(padapter, ptxsc_txreq, ptxsc_pkt->psta);
7973 else
7974 #endif
7975 rtw_count_tx_stats(padapter, pxframe, pxframe->attrib.pktlen);
7976
7977 return SUCCESS;
7978 }
7979
core_tx_per_packet(_adapter * padapter,struct xmit_frame * pxframe,struct sk_buff ** pskb,struct sta_info * psta)7980 s32 core_tx_per_packet(_adapter *padapter, struct xmit_frame *pxframe,
7981 struct sk_buff **pskb, struct sta_info *psta)
7982 {
7983 #if defined(CONFIG_AP_MODE) || defined(CONFIG_CORE_TXSC)
7984 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
7985 #endif
7986
7987 if (core_tx_update_xmitframe(padapter, pxframe, pskb, psta, RTW_TX_OS) == FAIL)
7988 goto abort_tx_per_packet;
7989
7990 #ifdef CONFIG_80211N_HT
7991 if ((pxframe->attrib.ether_type != 0x0806)
7992 && (pxframe->attrib.ether_type != 0x888e)
7993 && (pxframe->attrib.dhcp_pkt != 1))
7994 rtw_issue_addbareq_cmd(padapter, pxframe, _TRUE);
7995 #endif /* CONFIG_80211N_HT */
7996
7997 #ifdef CONFIG_TX_AMSDU_SW_MODE
7998 if (core_tx_amsdu_enqueue(padapter, pxframe) == _TRUE)
7999 return SUCCESS; /* queued to do AMSDU */
8000 #endif
8001
8002 if (core_tx_prepare_phl(padapter, pxframe) == FAIL)
8003 goto abort_tx_per_packet;
8004
8005 #ifdef CONFIG_AP_MODE
8006 _rtw_spinlock_bh(&pxmitpriv->lock);
8007 if (xmitframe_enqueue_for_sleeping_sta(padapter, pxframe) == _TRUE) {
8008 _rtw_spinunlock_bh(&pxmitpriv->lock);
8009 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue);
8010 return SUCCESS;
8011 }
8012 _rtw_spinunlock_bh(&pxmitpriv->lock);
8013 #endif
8014
8015 #if !defined(CONFIG_CORE_TXSC) || defined(CONFIG_RTW_DATA_BMC_TO_UC)
8016 if (core_tx_call_phl(padapter, pxframe, NULL) == SUCCESS)
8017 #endif
8018 return SUCCESS;
8019
8020
8021 abort_tx_per_packet:
8022 if (pxframe == NULL) {
8023 rtw_os_pkt_complete(padapter, *pskb);
8024 } else {
8025 if (pxframe->pkt == NULL)
8026 rtw_os_pkt_complete(padapter, *pskb);
8027 core_tx_free_xmitframe(padapter, pxframe);
8028 }
8029
8030 return FAIL;
8031 }
8032
rtw_core_tx(_adapter * padapter,struct sk_buff ** pskb,struct sta_info * psta,u16 os_qid)8033 s32 rtw_core_tx(_adapter *padapter, struct sk_buff **pskb, struct sta_info *psta, u16 os_qid)
8034 {
8035 struct xmit_frame *pxframe = NULL;
8036 #if defined(CONFIG_AP_MODE) || defined(CONFIG_CORE_TXSC)
8037 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8038 #endif
8039 s32 res = 0;
8040 #ifdef CONFIG_CORE_TXSC
8041 struct txsc_pkt_entry txsc_pkt;
8042 #endif
8043
8044 #ifdef CONFIG_CORE_TXSC
8045 if (txsc_get_sc_cached_entry(padapter, *pskb, &txsc_pkt) == _SUCCESS)
8046 goto core_txsc;
8047 #endif
8048
8049 if (core_tx_alloc_xmitframe(padapter, &pxframe, os_qid) == FAIL)
8050 goto abort_core_tx;
8051
8052 if (core_tx_update_pkt(padapter, pxframe, pskb) == FAIL)
8053 goto abort_core_tx;
8054
8055 #if defined(CONFIG_AP_MODE)
8056 if (MLME_STATE(padapter) & WIFI_AP_STATE) {
8057 _list f_list;
8058
8059 res = rtw_ap_addr_resolve(padapter, os_qid, pxframe, *pskb, &f_list);
8060 if (res == _FAIL)
8061 goto abort_core_tx;
8062
8063 #if defined(CONFIG_RTW_WDS) || CONFIG_RTW_DATA_BMC_TO_UC
8064 if (!rtw_is_list_empty(&f_list)) {
8065 _list *list = get_next(&f_list);
8066 struct xmit_frame *fframe;
8067
8068 while ((rtw_end_of_queue_search(&f_list, list)) == _FALSE) {
8069 fframe = LIST_CONTAINOR(list, struct xmit_frame, list);
8070 list = get_next(list);
8071 rtw_list_delete(&fframe->list);
8072
8073 if (res == RTW_ORI_NO_NEED && rtw_is_list_empty(&f_list)) {
8074 fframe->pkt = pxframe->pkt; /*last frame */
8075 pxframe->pkt = NULL;
8076 } else {
8077 fframe->pkt = rtw_skb_copy(*pskb);
8078 }
8079
8080 if (!fframe->pkt) {
8081 if (res == RTW_ORI_NO_NEED && IS_MCAST(pxframe->attrib.dst))
8082 res = _SUCCESS;
8083
8084 core_tx_free_xmitframe(padapter, fframe);
8085 continue;
8086 }
8087
8088 core_tx_per_packet(padapter, fframe, &fframe->pkt, NULL);
8089 }
8090 }
8091 #endif
8092
8093 if (res == RTW_ORI_NO_NEED) {
8094 core_tx_free_xmitframe(padapter, pxframe);
8095 return SUCCESS;
8096 }
8097 }
8098 #endif /* defined(CONFIG_AP_MODE) */
8099 #ifdef CONFIG_LAYER2_ROAMING
8100 if ((padapter->mlmepriv.roam_network) && ((*pskb)->protocol != htons(0x888e))) { /* eapol never enqueue.*/
8101 pxframe->pkt = *pskb;
8102 rtw_list_delete(&pxframe->list);
8103 _rtw_spinlock_bh(&pxmitpriv->rpkt_queue.lock);
8104 rtw_list_insert_tail(&(pxframe->list), get_list_head(&(pxmitpriv->rpkt_queue)));
8105 _rtw_spinunlock_bh(&pxmitpriv->rpkt_queue.lock);
8106 return SUCCESS;
8107 }
8108 #endif
8109
8110 res = core_tx_per_packet(padapter, pxframe, pskb, psta);
8111 if (res == FAIL)
8112 return FAIL;
8113
8114 #ifdef CONFIG_CORE_TXSC
8115 txsc_add_sc_cache_entry(padapter, pxframe, &txsc_pkt);
8116
8117 core_txsc:
8118
8119 if (txsc_apply_sc_cached_entry(padapter, &txsc_pkt) == _FAIL)
8120 goto abort_core_tx;
8121
8122 if (core_tx_call_phl(padapter, pxframe, &txsc_pkt) == FAIL)
8123 goto abort_core_tx;
8124 #endif
8125
8126 return SUCCESS;
8127
8128 abort_core_tx:
8129 if (pxframe == NULL) {
8130 #ifdef CONFIG_CORE_TXSC
8131 if (txsc_pkt.ptxreq)
8132 txsc_free_txreq(padapter, txsc_pkt.ptxreq);
8133 else
8134 #endif
8135 rtw_os_pkt_complete(padapter, *pskb);
8136 } else {
8137 if (pxframe->pkt == NULL)
8138 rtw_os_pkt_complete(padapter, *pskb);
8139
8140 core_tx_free_xmitframe(padapter, pxframe);
8141 }
8142
8143 return FAIL;
8144 }
8145
8146 enum rtw_phl_status
rtw_core_tx_recycle(void * drv_priv,struct rtw_xmit_req * txreq)8147 rtw_core_tx_recycle(void *drv_priv, struct rtw_xmit_req *txreq)
8148 {
8149 _adapter *padapter = NULL;
8150 struct xmit_frame *pxframe = NULL;
8151 #ifdef CONFIG_CORE_TXSC
8152 struct xmit_txreq_buf *ptxreq_buf = NULL;
8153 #endif
8154
8155 if (txreq->os_priv == NULL) {
8156 RTW_ERR("NULL txreq!\n");
8157 return RTW_PHL_STATUS_FAILURE;
8158 }
8159
8160 #ifdef CONFIG_CORE_TXSC
8161 if (txreq->treq_type == RTW_PHL_TREQ_TYPE_CORE_TXSC) {
8162 ptxreq_buf = (struct xmit_txreq_buf *)txreq->os_priv;
8163 padapter = ptxreq_buf->adapter;
8164 #ifdef RTW_PHL_DBG_CMD
8165 core_add_record(padapter, REC_TX_PHL_RCC, txreq);
8166 #endif
8167 txsc_free_txreq(padapter, txreq);
8168 return RTW_PHL_STATUS_SUCCESS;
8169 }
8170 #endif /* CONFIG_CORE_TXSC */
8171
8172 pxframe = (struct xmit_frame *)txreq->os_priv;
8173 if (pxframe == NULL) {
8174 RTW_ERR("%s: NULL xmitframe !!\n", __func__);
8175 rtw_warn_on(1);
8176 return RTW_PHL_STATUS_FAILURE;
8177 }
8178
8179 padapter = pxframe->padapter;
8180
8181 #ifdef RTW_PHL_DBG_CMD
8182 core_add_record(padapter, REC_TX_PHL_RCC, txreq);
8183 #endif
8184
8185 #ifdef CONFIG_PCI_HCI
8186 core_recycle_txreq_phyaddr(padapter, txreq);
8187 #endif
8188 core_tx_free_xmitframe(padapter, pxframe);
8189
8190 return RTW_PHL_STATUS_SUCCESS;
8191 }
8192 #endif
8193
8194
8195 #ifdef CONFIG_TDLS
xmitframe_enqueue_for_tdls_sleeping_sta(_adapter * padapter,struct xmit_frame * pxmitframe)8196 sint xmitframe_enqueue_for_tdls_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe)
8197 {
8198 sint ret = _FALSE;
8199
8200 struct sta_info *ptdls_sta = NULL;
8201 struct sta_priv *pstapriv = &padapter->stapriv;
8202 struct pkt_attrib *pattrib = &pxmitframe->attrib;
8203 struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv);
8204 int i;
8205
8206 ptdls_sta = rtw_get_stainfo(pstapriv, pattrib->dst);
8207 if (ptdls_sta == NULL)
8208 return ret;
8209 else if (ptdls_sta->tdls_sta_state & TDLS_LINKED_STATE) {
8210
8211 if (pattrib->triggered == 1) {
8212 ret = _TRUE;
8213 return ret;
8214 }
8215
8216 _rtw_spinlock_bh(&ptdls_sta->sleep_q.lock);
8217
8218 if (ptdls_sta->state & WIFI_SLEEP_STATE) {
8219 rtw_list_delete(&pxmitframe->list);
8220
8221 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8222
8223 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&ptdls_sta->sleep_q));
8224
8225 ptdls_sta->sleepq_len++;
8226 ptdls_sta->sleepq_ac_len++;
8227
8228 /* indicate 4-AC queue bit in TDLS peer traffic indication */
8229 switch (pattrib->priority) {
8230 case 1:
8231 case 2:
8232 ptdls_sta->uapsd_bk |= BIT(1);
8233 break;
8234 case 4:
8235 case 5:
8236 ptdls_sta->uapsd_vi |= BIT(1);
8237 break;
8238 case 6:
8239 case 7:
8240 ptdls_sta->uapsd_vo |= BIT(1);
8241 break;
8242 case 0:
8243 case 3:
8244 default:
8245 ptdls_sta->uapsd_be |= BIT(1);
8246 break;
8247 }
8248
8249 /* Transmit TDLS PTI via AP */
8250 if (ptdls_sta->sleepq_len == 1)
8251 rtw_tdls_cmd(padapter, ptdls_sta->phl_sta->mac_addr, TDLS_ISSUE_PTI);
8252
8253 ret = _TRUE;
8254 }
8255
8256 _rtw_spinunlock_bh(&ptdls_sta->sleep_q.lock);
8257 }
8258
8259 return ret;
8260
8261 }
8262 #endif /* CONFIG_TDLS */
8263
8264 #define RTW_HIQ_FILTER_ALLOW_ALL 0
8265 #define RTW_HIQ_FILTER_ALLOW_SPECIAL 1
8266 #define RTW_HIQ_FILTER_DENY_ALL 2
8267
xmitframe_hiq_filter(struct xmit_frame * xmitframe)8268 inline bool xmitframe_hiq_filter(struct xmit_frame *xmitframe)
8269 {
8270 bool allow = _FALSE;
8271 _adapter *adapter = xmitframe->padapter;
8272 struct registry_priv *registry = &adapter->registrypriv;
8273
8274 if (adapter->registrypriv.wifi_spec == 1)
8275 allow = _TRUE;
8276 else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_SPECIAL) {
8277
8278 struct pkt_attrib *attrib = &xmitframe->attrib;
8279
8280 if (attrib->ether_type == 0x0806
8281 || attrib->ether_type == 0x888e
8282 #ifdef CONFIG_WAPI_SUPPORT
8283 || attrib->ether_type == 0x88B4
8284 #endif
8285 || attrib->dhcp_pkt
8286 ) {
8287 if (0)
8288 RTW_INFO(FUNC_ADPT_FMT" ether_type:0x%04x%s\n", FUNC_ADPT_ARG(xmitframe->padapter)
8289 , attrib->ether_type, attrib->dhcp_pkt ? " DHCP" : "");
8290 allow = _TRUE;
8291 }
8292 } else if (registry->hiq_filter == RTW_HIQ_FILTER_ALLOW_ALL)
8293 allow = _TRUE;
8294 else if (registry->hiq_filter == RTW_HIQ_FILTER_DENY_ALL)
8295 allow = _FALSE;
8296 else
8297 rtw_warn_on(1);
8298
8299 return allow;
8300 }
8301
8302 #if defined(CONFIG_AP_MODE) || defined(CONFIG_TDLS)
8303
xmitframe_enqueue_for_sleeping_sta(_adapter * padapter,struct xmit_frame * pxmitframe)8304 sint xmitframe_enqueue_for_sleeping_sta(_adapter *padapter, struct xmit_frame *pxmitframe)
8305 {
8306 sint ret = _FALSE;
8307 struct sta_info *psta = NULL;
8308 struct sta_priv *pstapriv = &padapter->stapriv;
8309 struct pkt_attrib *pattrib = &pxmitframe->attrib;
8310 sint bmcst = IS_MCAST(pattrib->ra);
8311 bool update_tim = _FALSE;
8312 #ifdef CONFIG_TDLS
8313
8314 if (padapter->tdlsinfo.link_established == _TRUE)
8315 ret = xmitframe_enqueue_for_tdls_sleeping_sta(padapter, pxmitframe);
8316 #endif /* CONFIG_TDLS */
8317
8318 if (!MLME_IS_AP(padapter) && !MLME_IS_MESH(padapter)) {
8319 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_fwstate);
8320 return ret;
8321 }
8322 /*
8323 if(pattrib->psta)
8324 {
8325 psta = pattrib->psta;
8326 }
8327 else
8328 {
8329 RTW_INFO("%s, call rtw_get_stainfo()\n", __func__);
8330 psta=rtw_get_stainfo(pstapriv, pattrib->ra);
8331 }
8332 */
8333 psta = rtw_get_stainfo(&padapter->stapriv, pattrib->ra);
8334 if (pattrib->psta != psta) {
8335 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_sta);
8336 RTW_INFO("%s, pattrib->psta(%p) != psta(%p)\n", __func__, pattrib->psta, psta);
8337 return _FALSE;
8338 }
8339
8340 if (psta == NULL) {
8341 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_nosta);
8342 RTW_INFO("%s, psta==NUL\n", __func__);
8343 return _FALSE;
8344 }
8345
8346 if (!(psta->state & WIFI_ASOC_STATE)) {
8347 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_link);
8348 RTW_INFO("%s, psta->state(0x%x) != WIFI_ASOC_STATE\n", __func__, psta->state);
8349 return _FALSE;
8350 }
8351
8352 if (pattrib->triggered == 1) {
8353 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_warn_trigger);
8354 /* RTW_INFO("directly xmit pspoll_triggered packet\n"); */
8355
8356 /* pattrib->triggered=0; */
8357 if (bmcst && xmitframe_hiq_filter(pxmitframe) == _TRUE)
8358 pattrib->qsel = rtw_hal_get_qsel(padapter, QSLT_HIGH_ID);/* HIQ */
8359
8360 return ret;
8361 }
8362
8363
8364 if (bmcst) {
8365 #ifndef BMC_ON_HIQ
8366 _rtw_spinlock_bh(&psta->sleep_q.lock);
8367
8368 if (rtw_tim_map_anyone_be_set(padapter, pstapriv->sta_dz_bitmap)) { /* if anyone sta is in ps mode */
8369 /* pattrib->qsel = rtw_hal_get_qsel(padapter,QSLT_HIGH_ID);*/ /* HIQ */
8370
8371 rtw_list_delete(&pxmitframe->list);
8372
8373 /*_rtw_spinlock_bh(&psta->sleep_q.lock);*/
8374
8375 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
8376
8377 psta->sleepq_len++;
8378
8379 if (!(rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, 0)))
8380 update_tim = _TRUE;
8381
8382 rtw_tim_map_set(padapter, pstapriv->tim_bitmap, 0);
8383 rtw_tim_map_set(padapter, pstapriv->sta_dz_bitmap, 0);
8384
8385 /* RTW_INFO("enqueue, sq_len=%d\n", psta->sleepq_len); */
8386 /* RTW_INFO_DUMP("enqueue, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8387 if (update_tim == _TRUE) {
8388 if (is_broadcast_mac_addr(pattrib->ra))
8389 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer BC");
8390 else
8391 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer MC");
8392 } else
8393 chk_bmc_sleepq_cmd(padapter);
8394
8395 /*_rtw_spinunlock_bh(&psta->sleep_q.lock);*/
8396
8397 ret = _TRUE;
8398
8399 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_mcast);
8400 }
8401
8402 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8403 #endif
8404 return ret;
8405
8406 }
8407
8408
8409 _rtw_spinlock_bh(&psta->sleep_q.lock);
8410
8411 if (psta->state & WIFI_SLEEP_STATE) {
8412 u8 wmmps_ac = 0;
8413
8414 if (rtw_tim_map_is_set(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid)) {
8415 rtw_list_delete(&pxmitframe->list);
8416
8417 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8418
8419 rtw_list_insert_tail(&pxmitframe->list, get_list_head(&psta->sleep_q));
8420
8421 psta->sleepq_len++;
8422
8423 switch (pattrib->priority) {
8424 case 1:
8425 case 2:
8426 wmmps_ac = psta->uapsd_bk & BIT(0);
8427 break;
8428 case 4:
8429 case 5:
8430 wmmps_ac = psta->uapsd_vi & BIT(0);
8431 break;
8432 case 6:
8433 case 7:
8434 wmmps_ac = psta->uapsd_vo & BIT(0);
8435 break;
8436 case 0:
8437 case 3:
8438 default:
8439 wmmps_ac = psta->uapsd_be & BIT(0);
8440 break;
8441 }
8442
8443 if (wmmps_ac)
8444 psta->sleepq_ac_len++;
8445
8446 if (((psta->has_legacy_ac) && (!wmmps_ac)) || ((!psta->has_legacy_ac) && (wmmps_ac))) {
8447 if (!(rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid)))
8448 update_tim = _TRUE;
8449
8450 rtw_tim_map_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8451
8452 /* RTW_INFO("enqueue, sq_len=%d\n", psta->sleepq_len); */
8453 /* RTW_INFO_DUMP("enqueue, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8454
8455 if (update_tim == _TRUE) {
8456 /* RTW_INFO("sleepq_len==1, update BCNTIM\n"); */
8457 /* upate BCN for TIM IE */
8458 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "buffer UC");
8459 }
8460 }
8461
8462 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8463
8464 /* if(psta->sleepq_len > (NR_XMITFRAME>>3)) */
8465 /* { */
8466 /* wakeup_sta_to_xmit(padapter, psta); */
8467 /* } */
8468
8469 ret = _TRUE;
8470
8471 DBG_COUNTER(padapter->tx_logs.core_tx_ap_enqueue_ucast);
8472 }
8473
8474 }
8475
8476 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8477
8478 return ret;
8479
8480 }
8481
dequeue_xmitframes_to_sleeping_queue(_adapter * padapter,struct sta_info * psta,_queue * pframequeue)8482 static void dequeue_xmitframes_to_sleeping_queue(_adapter *padapter, struct sta_info *psta, _queue *pframequeue)
8483 {
8484 sint ret;
8485 _list *plist, *phead;
8486 u8 ac_index;
8487 struct tx_servq *ptxservq;
8488 struct pkt_attrib *pattrib;
8489 struct xmit_frame *pxmitframe;
8490 struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
8491
8492 phead = get_list_head(pframequeue);
8493 plist = get_next(phead);
8494
8495 while (rtw_end_of_queue_search(phead, plist) == _FALSE) {
8496 pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
8497
8498 plist = get_next(plist);
8499
8500 pattrib = &pxmitframe->attrib;
8501
8502 pattrib->triggered = 0;
8503
8504 ret = xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe);
8505
8506 if (_TRUE == ret) {
8507 ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
8508
8509 ptxservq->qcnt--;
8510 phwxmits[ac_index].accnt--;
8511 } else {
8512 /* RTW_INFO("xmitframe_enqueue_for_sleeping_sta return _FALSE\n"); */
8513 }
8514
8515 }
8516
8517 }
8518
stop_sta_xmit(_adapter * padapter,struct sta_info * psta)8519 void stop_sta_xmit(_adapter *padapter, struct sta_info *psta)
8520 {
8521 struct sta_info *psta_bmc;
8522 struct sta_xmit_priv *pstaxmitpriv;
8523 struct sta_priv *pstapriv = &padapter->stapriv;
8524 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8525
8526 pstaxmitpriv = &psta->sta_xmitpriv;
8527
8528 /* for BC/MC Frames */
8529 psta_bmc = rtw_get_bcmc_stainfo(padapter);
8530 if (!psta_bmc)
8531 rtw_warn_on(1);
8532
8533 _rtw_spinlock_bh(&pxmitpriv->lock);
8534
8535 psta->state |= WIFI_SLEEP_STATE;
8536
8537 #ifdef CONFIG_TDLS
8538 if (!(psta->tdls_sta_state & TDLS_LINKED_STATE))
8539 #endif /* CONFIG_TDLS */
8540 rtw_tim_map_set(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid);
8541
8542 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vo_q.sta_pending);
8543 rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
8544 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->vi_q.sta_pending);
8545 rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
8546 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->be_q.sta_pending);
8547 rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
8548 dequeue_xmitframes_to_sleeping_queue(padapter, psta, &pstaxmitpriv->bk_q.sta_pending);
8549 rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
8550
8551 if (psta_bmc != NULL
8552 #ifdef CONFIG_TDLS
8553 && !(psta->tdls_sta_state & TDLS_LINKED_STATE)
8554 #endif
8555 ) {
8556 /* for BC/MC Frames */
8557 #ifndef BMC_ON_HIQ
8558 pstaxmitpriv = &psta_bmc->sta_xmitpriv;
8559 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->vo_q.sta_pending);
8560 rtw_list_delete(&(pstaxmitpriv->vo_q.tx_pending));
8561 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->vi_q.sta_pending);
8562 rtw_list_delete(&(pstaxmitpriv->vi_q.tx_pending));
8563 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->be_q.sta_pending);
8564 rtw_list_delete(&(pstaxmitpriv->be_q.tx_pending));
8565 dequeue_xmitframes_to_sleeping_queue(padapter, psta_bmc, &pstaxmitpriv->bk_q.sta_pending);
8566 rtw_list_delete(&(pstaxmitpriv->bk_q.tx_pending));
8567 #endif
8568 }
8569 _rtw_spinunlock_bh(&pxmitpriv->lock);
8570
8571
8572 }
8573
wakeup_sta_to_xmit(_adapter * padapter,struct sta_info * psta)8574 void wakeup_sta_to_xmit(_adapter *padapter, struct sta_info *psta)
8575 {
8576 u8 update_mask = 0, wmmps_ac = 0;
8577 struct sta_info *psta_bmc;
8578 _list *xmitframe_plist, *xmitframe_phead;
8579 struct xmit_frame *pxmitframe = NULL;
8580 struct sta_priv *pstapriv = &padapter->stapriv;
8581 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8582
8583 psta_bmc = rtw_get_bcmc_stainfo(padapter);
8584
8585
8586 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8587 _rtw_spinlock_bh(&pxmitpriv->lock);
8588
8589 xmitframe_phead = get_list_head(&psta->sleep_q);
8590 xmitframe_plist = get_next(xmitframe_phead);
8591
8592 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8593 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8594
8595 xmitframe_plist = get_next(xmitframe_plist);
8596
8597 rtw_list_delete(&pxmitframe->list);
8598
8599 switch (pxmitframe->attrib.priority) {
8600 case 1:
8601 case 2:
8602 wmmps_ac = psta->uapsd_bk & BIT(1);
8603 break;
8604 case 4:
8605 case 5:
8606 wmmps_ac = psta->uapsd_vi & BIT(1);
8607 break;
8608 case 6:
8609 case 7:
8610 wmmps_ac = psta->uapsd_vo & BIT(1);
8611 break;
8612 case 0:
8613 case 3:
8614 default:
8615 wmmps_ac = psta->uapsd_be & BIT(1);
8616 break;
8617 }
8618
8619 psta->sleepq_len--;
8620 if (psta->sleepq_len > 0)
8621 pxmitframe->attrib.mdata = 1;
8622 else
8623 pxmitframe->attrib.mdata = 0;
8624
8625 if (wmmps_ac) {
8626 psta->sleepq_ac_len--;
8627 if (psta->sleepq_ac_len > 0) {
8628 pxmitframe->attrib.mdata = 1;
8629 pxmitframe->attrib.eosp = 0;
8630 } else {
8631 pxmitframe->attrib.mdata = 0;
8632 pxmitframe->attrib.eosp = 1;
8633 }
8634 }
8635
8636 pxmitframe->attrib.triggered = 1;
8637
8638 /*
8639 _rtw_spinunlock_bh(&psta->sleep_q.lock);
8640 //rtw_intf_data_xmit
8641 if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE)
8642 {
8643 rtw_os_xmit_complete(padapter, pxmitframe);
8644 }
8645 _rtw_spinlock_bh(&psta->sleep_q.lock);
8646 */
8647 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8648
8649
8650 }
8651
8652 if (psta->sleepq_len == 0) {
8653 #ifdef CONFIG_TDLS
8654 if (psta->tdls_sta_state & TDLS_LINKED_STATE) {
8655 if (psta->state & WIFI_SLEEP_STATE)
8656 psta->state ^= WIFI_SLEEP_STATE;
8657
8658 _rtw_spinunlock_bh(&pxmitpriv->lock);
8659 return;
8660 }
8661 #endif /* CONFIG_TDLS */
8662
8663 if (rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid)) {
8664 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8665 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8666 /* upate BCN for TIM IE */
8667 /* update_BCNTIM(padapter); */
8668 update_mask = BIT(0);
8669 }
8670
8671 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8672
8673 if (psta->state & WIFI_SLEEP_STATE)
8674 psta->state ^= WIFI_SLEEP_STATE;
8675
8676 if (psta->state & WIFI_STA_ALIVE_CHK_STATE) {
8677 RTW_INFO("%s alive check\n", __func__);
8678 psta->expire_to = pstapriv->expire_to;
8679 psta->state ^= WIFI_STA_ALIVE_CHK_STATE;
8680 }
8681
8682 rtw_tim_map_clear(padapter, pstapriv->sta_dz_bitmap, psta->phl_sta->aid);
8683 }
8684
8685 /* for BC/MC Frames */
8686 if (!psta_bmc)
8687 goto _exit;
8688
8689 if (!(rtw_tim_map_anyone_be_set_exclude_aid0(padapter, pstapriv->sta_dz_bitmap))) { /* no any sta in ps mode */
8690 xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
8691 xmitframe_plist = get_next(xmitframe_phead);
8692
8693 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8694 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8695
8696 xmitframe_plist = get_next(xmitframe_plist);
8697
8698 rtw_list_delete(&pxmitframe->list);
8699
8700 psta_bmc->sleepq_len--;
8701 if (psta_bmc->sleepq_len > 0)
8702 pxmitframe->attrib.mdata = 1;
8703 else
8704 pxmitframe->attrib.mdata = 0;
8705
8706
8707 pxmitframe->attrib.triggered = 1;
8708 /*
8709 _rtw_spinunlock_bh(&psta_bmc->sleep_q.lock);
8710 //rtw_intf_data_xmit
8711 if(rtw_hal_xmit(padapter, pxmitframe) == _TRUE)
8712 {
8713 rtw_os_xmit_complete(padapter, pxmitframe);
8714 }
8715 _rtw_spinlock_bh(&psta_bmc->sleep_q.lock);
8716
8717 */
8718 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8719
8720 }
8721
8722 if (psta_bmc->sleepq_len == 0) {
8723 if (rtw_tim_map_is_set(padapter, pstapriv->tim_bitmap, 0)) {
8724 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8725 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8726 /* upate BCN for TIM IE */
8727 /* update_BCNTIM(padapter); */
8728 update_mask |= BIT(1);
8729 }
8730 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, 0);
8731 rtw_tim_map_clear(padapter, pstapriv->sta_dz_bitmap, 0);
8732 }
8733
8734 }
8735
8736 _exit:
8737
8738 /* _rtw_spinunlock_bh(&psta_bmc->sleep_q.lock); */
8739 _rtw_spinunlock_bh(&pxmitpriv->lock);
8740
8741 if (update_mask) {
8742 /* update_BCNTIM(padapter); */
8743 if ((update_mask & (BIT(0) | BIT(1))) == (BIT(0) | BIT(1)))
8744 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear UC&BMC");
8745 else if ((update_mask & BIT(1)) == BIT(1))
8746 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear BMC");
8747 else
8748 _update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0, "clear UC");
8749 }
8750
8751 }
8752
xmit_delivery_enabled_frames(_adapter * padapter,struct sta_info * psta)8753 void xmit_delivery_enabled_frames(_adapter *padapter, struct sta_info *psta)
8754 {
8755 u8 wmmps_ac = 0;
8756 _list *xmitframe_plist, *xmitframe_phead;
8757 struct xmit_frame *pxmitframe = NULL;
8758 struct sta_priv *pstapriv = &padapter->stapriv;
8759 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8760
8761
8762 /* _rtw_spinlock_bh(&psta->sleep_q.lock); */
8763 _rtw_spinlock_bh(&pxmitpriv->lock);
8764
8765 xmitframe_phead = get_list_head(&psta->sleep_q);
8766 xmitframe_plist = get_next(xmitframe_phead);
8767
8768 while ((rtw_end_of_queue_search(xmitframe_phead, xmitframe_plist)) == _FALSE) {
8769 pxmitframe = LIST_CONTAINOR(xmitframe_plist, struct xmit_frame, list);
8770
8771 xmitframe_plist = get_next(xmitframe_plist);
8772
8773 switch (pxmitframe->attrib.priority) {
8774 case 1:
8775 case 2:
8776 wmmps_ac = psta->uapsd_bk & BIT(1);
8777 break;
8778 case 4:
8779 case 5:
8780 wmmps_ac = psta->uapsd_vi & BIT(1);
8781 break;
8782 case 6:
8783 case 7:
8784 wmmps_ac = psta->uapsd_vo & BIT(1);
8785 break;
8786 case 0:
8787 case 3:
8788 default:
8789 wmmps_ac = psta->uapsd_be & BIT(1);
8790 break;
8791 }
8792
8793 if (!wmmps_ac)
8794 continue;
8795
8796 rtw_list_delete(&pxmitframe->list);
8797
8798 psta->sleepq_len--;
8799 psta->sleepq_ac_len--;
8800
8801 if (psta->sleepq_ac_len > 0) {
8802 pxmitframe->attrib.mdata = 1;
8803 pxmitframe->attrib.eosp = 0;
8804 } else {
8805 pxmitframe->attrib.mdata = 0;
8806 pxmitframe->attrib.eosp = 1;
8807 }
8808
8809 pxmitframe->attrib.triggered = 1;
8810 rtw_intf_xmitframe_enqueue(padapter, pxmitframe);
8811
8812 if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
8813 #ifdef CONFIG_TDLS
8814 if (psta->tdls_sta_state & TDLS_LINKED_STATE) {
8815 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8816 goto exit;
8817 }
8818 #endif /* CONFIG_TDLS */
8819 rtw_tim_map_clear(padapter, pstapriv->tim_bitmap, psta->phl_sta->aid);
8820
8821 /* RTW_INFO("wakeup to xmit, qlen==0\n"); */
8822 /* RTW_INFO_DUMP("update_BCNTIM, tim=", pstapriv->tim_bitmap, pstapriv->aid_bmp_len); */
8823 /* upate BCN for TIM IE */
8824 /* update_BCNTIM(padapter); */
8825 rtw_update_beacon(padapter, _TIM_IE_, NULL, _TRUE, 0);
8826 /* update_mask = BIT(0); */
8827 }
8828
8829 }
8830
8831 #ifdef CONFIG_TDLS
8832 exit:
8833 #endif
8834 /* _rtw_spinunlock_bh(&psta->sleep_q.lock); */
8835 _rtw_spinunlock_bh(&pxmitpriv->lock);
8836
8837 return;
8838 }
8839
8840 #endif /* defined(CONFIG_AP_MODE) || defined(CONFIG_TDLS) */
8841
8842 #if 0 /*#ifdef CONFIG_XMIT_THREAD_MODE*/
8843 void enqueue_pending_xmitbuf(
8844 struct xmit_priv *pxmitpriv,
8845 struct xmit_buf *pxmitbuf)
8846 {
8847 _queue *pqueue;
8848 _adapter *pri_adapter = pxmitpriv->adapter;
8849
8850 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8851
8852 _rtw_spinlock_bh(&pqueue->lock);
8853 rtw_list_delete(&pxmitbuf->list);
8854 rtw_list_insert_tail(&pxmitbuf->list, get_list_head(pqueue));
8855 _rtw_spinunlock_bh(&pqueue->lock);
8856
8857 #if defined(CONFIG_SDIO_HCI) && defined(CONFIG_CONCURRENT_MODE)
8858 pri_adapter = GET_PRIMARY_ADAPTER(pri_adapter);
8859 #endif /*SDIO_HCI + CONCURRENT*/
8860 _rtw_up_sema(&(pri_adapter->xmitpriv.xmit_sema));
8861 }
8862
8863 void enqueue_pending_xmitbuf_to_head(
8864 struct xmit_priv *pxmitpriv,
8865 struct xmit_buf *pxmitbuf)
8866 {
8867 _queue *pqueue = &pxmitpriv->pending_xmitbuf_queue;
8868
8869 _rtw_spinlock_bh(&pqueue->lock);
8870 rtw_list_delete(&pxmitbuf->list);
8871 rtw_list_insert_head(&pxmitbuf->list, get_list_head(pqueue));
8872 _rtw_spinunlock_bh(&pqueue->lock);
8873 }
8874
8875 struct xmit_buf *dequeue_pending_xmitbuf(
8876 struct xmit_priv *pxmitpriv)
8877 {
8878 struct xmit_buf *pxmitbuf;
8879 _queue *pqueue;
8880
8881
8882 pxmitbuf = NULL;
8883 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8884
8885 _rtw_spinlock_bh(&pqueue->lock);
8886
8887 if (_rtw_queue_empty(pqueue) == _FALSE) {
8888 _list *plist, *phead;
8889
8890 phead = get_list_head(pqueue);
8891 plist = get_next(phead);
8892 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
8893 rtw_list_delete(&pxmitbuf->list);
8894 }
8895
8896 _rtw_spinunlock_bh(&pqueue->lock);
8897
8898 return pxmitbuf;
8899 }
8900
8901 static struct xmit_buf *dequeue_pending_xmitbuf_ext(
8902 struct xmit_priv *pxmitpriv)
8903 {
8904 struct xmit_buf *pxmitbuf;
8905 _queue *pqueue;
8906
8907 pxmitbuf = NULL;
8908 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8909
8910 _rtw_spinlock_bh(&pqueue->lock);
8911
8912 if (_rtw_queue_empty(pqueue) == _FALSE) {
8913 _list *plist, *phead;
8914
8915 phead = get_list_head(pqueue);
8916 plist = phead;
8917 do {
8918 plist = get_next(plist);
8919 if (plist == phead)
8920 break;
8921
8922 pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
8923
8924 if (pxmitbuf->buf_tag == XMITBUF_MGNT) {
8925 rtw_list_delete(&pxmitbuf->list);
8926 break;
8927 }
8928 pxmitbuf = NULL;
8929 } while (1);
8930 }
8931
8932 _rtw_spinunlock_bh(&pqueue->lock);
8933
8934 return pxmitbuf;
8935 }
8936
8937 struct xmit_buf *select_and_dequeue_pending_xmitbuf(_adapter *padapter)
8938 {
8939 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
8940 struct xmit_buf *pxmitbuf = NULL;
8941
8942 if (_TRUE == rtw_is_xmit_blocked(padapter))
8943 return pxmitbuf;
8944
8945 pxmitbuf = dequeue_pending_xmitbuf_ext(pxmitpriv);
8946 if (pxmitbuf == NULL && rtw_xmit_ac_blocked(padapter) != _TRUE)
8947 pxmitbuf = dequeue_pending_xmitbuf(pxmitpriv);
8948
8949 return pxmitbuf;
8950 }
8951
8952 sint check_pending_xmitbuf(
8953 struct xmit_priv *pxmitpriv)
8954 {
8955 _queue *pqueue;
8956 sint ret = _FALSE;
8957
8958 pqueue = &pxmitpriv->pending_xmitbuf_queue;
8959
8960 _rtw_spinlock_bh(&pqueue->lock);
8961
8962 if (_rtw_queue_empty(pqueue) == _FALSE)
8963 ret = _TRUE;
8964
8965 _rtw_spinunlock_bh(&pqueue->lock);
8966
8967 return ret;
8968 }
8969
8970 thread_return rtw_xmit_thread(thread_context context)
8971 {
8972 s32 err;
8973 _adapter *adapter;
8974 #ifdef RTW_XMIT_THREAD_HIGH_PRIORITY
8975 #ifdef PLATFORM_LINUX
8976 struct sched_param param = { .sched_priority = 1 };
8977
8978 sched_setscheduler(current, SCHED_FIFO, ¶m);
8979 #endif /* PLATFORM_LINUX */
8980 #endif /* RTW_XMIT_THREAD_HIGH_PRIORITY */
8981
8982 err = _SUCCESS;
8983 adapter = (_adapter *)context;
8984
8985 rtw_thread_enter("RTW_XMIT_THREAD");
8986
8987 do {
8988 err = rtw_intf_xmit_buf_handler(adapter);
8989 flush_signals_thread();
8990 } while (_SUCCESS == err);
8991
8992 RTW_INFO(FUNC_ADPT_FMT " Exit\n", FUNC_ADPT_ARG(adapter));
8993
8994 rtw_thread_wait_stop();
8995
8996 return 0;
8997 }
8998 #endif
8999
9000 #ifdef DBG_XMIT_BLOCK
dump_xmit_block(void * sel,_adapter * padapter)9001 void dump_xmit_block(void *sel, _adapter *padapter)
9002 {
9003 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9004
9005 RTW_PRINT_SEL(sel, "[XMIT-BLOCK] xmit_block :0x%02x\n", dvobj->xmit_block);
9006 if (dvobj->xmit_block & XMIT_BLOCK_REDLMEM)
9007 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_REDLMEM");
9008 if (dvobj->xmit_block & XMIT_BLOCK_SUSPEND)
9009 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_SUSPEND");
9010 if (dvobj->xmit_block == XMIT_BLOCK_NONE)
9011 RTW_PRINT_SEL(sel, "Reason:%s\n", "XMIT_BLOCK_NONE");
9012 }
dump_xmit_block_info(void * sel,const char * fun_name,_adapter * padapter)9013 void dump_xmit_block_info(void *sel, const char *fun_name, _adapter *padapter)
9014 {
9015 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9016
9017 RTW_INFO("\n"ADPT_FMT" call %s\n", ADPT_ARG(padapter), fun_name);
9018 dump_xmit_block(sel, padapter);
9019 }
9020 #define DBG_XMIT_BLOCK_DUMP(adapter) dump_xmit_block_info(RTW_DBGDUMP, __func__, adapter)
9021 #endif
9022
rtw_set_xmit_block(_adapter * padapter,enum XMIT_BLOCK_REASON reason)9023 void rtw_set_xmit_block(_adapter *padapter, enum XMIT_BLOCK_REASON reason)
9024 {
9025 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9026
9027 _rtw_spinlock_bh(&dvobj->xmit_block_lock);
9028 dvobj->xmit_block |= reason;
9029 _rtw_spinunlock_bh(&dvobj->xmit_block_lock);
9030
9031 #ifdef DBG_XMIT_BLOCK
9032 DBG_XMIT_BLOCK_DUMP(padapter);
9033 #endif
9034 }
9035
rtw_clr_xmit_block(_adapter * padapter,enum XMIT_BLOCK_REASON reason)9036 void rtw_clr_xmit_block(_adapter *padapter, enum XMIT_BLOCK_REASON reason)
9037 {
9038 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9039
9040 _rtw_spinlock_bh(&dvobj->xmit_block_lock);
9041 dvobj->xmit_block &= ~reason;
9042 _rtw_spinunlock_bh(&dvobj->xmit_block_lock);
9043
9044 #ifdef DBG_XMIT_BLOCK
9045 DBG_XMIT_BLOCK_DUMP(padapter);
9046 #endif
9047 }
rtw_is_xmit_blocked(_adapter * padapter)9048 bool rtw_is_xmit_blocked(_adapter *padapter)
9049 {
9050 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
9051
9052 #ifdef DBG_XMIT_BLOCK
9053 DBG_XMIT_BLOCK_DUMP(padapter);
9054 #endif
9055 return ((dvobj->xmit_block) ? _TRUE : _FALSE);
9056 }
9057
rtw_xmit_ac_blocked(_adapter * adapter)9058 bool rtw_xmit_ac_blocked(_adapter *adapter)
9059 {
9060 struct dvobj_priv *dvobj = adapter_to_dvobj(adapter);
9061 struct rf_ctl_t *rfctl = adapter_to_rfctl(adapter);
9062 _adapter *iface;
9063 struct mlme_ext_priv *mlmeext;
9064 bool blocked = _FALSE;
9065 int i;
9066 #ifdef DBG_CONFIG_ERROR_DETECT
9067 #ifdef DBG_CONFIG_ERROR_RESET
9068 #ifdef CONFIG_USB_HCI
9069 if (rtw_hal_sreset_inprogress(adapter) == _TRUE) {
9070 blocked = _TRUE;
9071 goto exit;
9072 }
9073 #endif/* #ifdef CONFIG_USB_HCI */
9074 #endif/* #ifdef DBG_CONFIG_ERROR_RESET */
9075 #endif/* #ifdef DBG_CONFIG_ERROR_DETECT */
9076
9077 if (rfctl->offch_state != OFFCHS_NONE
9078 #if CONFIG_DFS
9079 || IS_RADAR_DETECTED(rfctl) || rfctl->csa_chandef.chan
9080 #endif
9081 ) {
9082 blocked = _TRUE;
9083 goto exit;
9084 }
9085
9086 for (i = 0; i < dvobj->iface_nums; i++) {
9087 iface = dvobj->padapters[i];
9088 mlmeext = &iface->mlmeextpriv;
9089
9090 /* check scan state */
9091 if (mlmeext_scan_state(mlmeext) != SCAN_DISABLE
9092 && mlmeext_scan_state(mlmeext) != SCAN_BACK_OP
9093 ) {
9094 blocked = _TRUE;
9095 goto exit;
9096 }
9097
9098 if (mlmeext_scan_state(mlmeext) == SCAN_BACK_OP
9099 && !mlmeext_chk_scan_backop_flags(mlmeext, SS_BACKOP_TX_RESUME)
9100 ) {
9101 blocked = _TRUE;
9102 goto exit;
9103 }
9104 }
9105
9106 exit:
9107 return blocked;
9108 }
9109
9110 #ifdef CONFIG_LAYER2_ROAMING
9111 /* dequeuq + xmit the cache skb during the roam procedure */
dequeuq_roam_pkt(_adapter * padapter)9112 void dequeuq_roam_pkt(_adapter *padapter)
9113 {
9114 struct xmit_frame *rframe;
9115 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9116 _list *plist = NULL, *phead = NULL;
9117
9118 if (padapter->mlmepriv.roam_network) {
9119 padapter->mlmepriv.roam_network = NULL;
9120 _rtw_spinlock_bh(&pxmitpriv->rpkt_queue.lock);
9121 phead = get_list_head(&pxmitpriv->rpkt_queue);
9122 plist = get_next(phead);
9123 while ((rtw_end_of_queue_search(phead, plist)) == _FALSE) {
9124 rframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
9125 plist = get_next(plist);
9126 rtw_list_delete(&rframe->list);
9127 core_tx_per_packet(padapter, rframe, &rframe->pkt, NULL);
9128 }
9129 _rtw_spinunlock_bh(&pxmitpriv->rpkt_queue.lock);
9130 }
9131 }
9132 #endif
9133
9134 #ifdef CONFIG_TX_AMSDU
rtw_amsdu_vo_timeout_handler(void * FunctionContext)9135 void rtw_amsdu_vo_timeout_handler(void *FunctionContext)
9136 {
9137 _adapter *adapter = (_adapter *)FunctionContext;
9138
9139 adapter->xmitpriv.amsdu_vo_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9140
9141 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9142 }
9143
rtw_amsdu_vi_timeout_handler(void * FunctionContext)9144 void rtw_amsdu_vi_timeout_handler(void *FunctionContext)
9145 {
9146 _adapter *adapter = (_adapter *)FunctionContext;
9147
9148 adapter->xmitpriv.amsdu_vi_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9149
9150 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9151 }
9152
rtw_amsdu_be_timeout_handler(void * FunctionContext)9153 void rtw_amsdu_be_timeout_handler(void *FunctionContext)
9154 {
9155 _adapter *adapter = (_adapter *)FunctionContext;
9156
9157 adapter->xmitpriv.amsdu_be_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9158
9159 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9160 }
9161
rtw_amsdu_bk_timeout_handler(void * FunctionContext)9162 void rtw_amsdu_bk_timeout_handler(void *FunctionContext)
9163 {
9164 _adapter *adapter = (_adapter *)FunctionContext;
9165
9166 adapter->xmitpriv.amsdu_bk_timeout = RTW_AMSDU_TIMER_TIMEOUT;
9167
9168 rtw_tasklet_hi_schedule(&adapter->xmitpriv.xmit_tasklet);
9169 }
9170
rtw_amsdu_get_timer_status(_adapter * padapter,u8 priority)9171 u8 rtw_amsdu_get_timer_status(_adapter *padapter, u8 priority)
9172 {
9173 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9174 u8 status = RTW_AMSDU_TIMER_UNSET;
9175
9176 switch (priority) {
9177 case 1:
9178 case 2:
9179 status = pxmitpriv->amsdu_bk_timeout;
9180 break;
9181 case 4:
9182 case 5:
9183 status = pxmitpriv->amsdu_vi_timeout;
9184 break;
9185 case 6:
9186 case 7:
9187 status = pxmitpriv->amsdu_vo_timeout;
9188 break;
9189 case 0:
9190 case 3:
9191 default:
9192 status = pxmitpriv->amsdu_be_timeout;
9193 break;
9194 }
9195 return status;
9196 }
9197
rtw_amsdu_set_timer_status(_adapter * padapter,u8 priority,u8 status)9198 void rtw_amsdu_set_timer_status(_adapter *padapter, u8 priority, u8 status)
9199 {
9200 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9201
9202 switch (priority) {
9203 case 1:
9204 case 2:
9205 pxmitpriv->amsdu_bk_timeout = status;
9206 break;
9207 case 4:
9208 case 5:
9209 pxmitpriv->amsdu_vi_timeout = status;
9210 break;
9211 case 6:
9212 case 7:
9213 pxmitpriv->amsdu_vo_timeout = status;
9214 break;
9215 case 0:
9216 case 3:
9217 default:
9218 pxmitpriv->amsdu_be_timeout = status;
9219 break;
9220 }
9221 }
9222
rtw_amsdu_set_timer(_adapter * padapter,u8 priority)9223 void rtw_amsdu_set_timer(_adapter *padapter, u8 priority)
9224 {
9225 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9226 _timer *amsdu_timer = NULL;
9227
9228 switch (priority) {
9229 case 1:
9230 case 2:
9231 amsdu_timer = &pxmitpriv->amsdu_bk_timer;
9232 break;
9233 case 4:
9234 case 5:
9235 amsdu_timer = &pxmitpriv->amsdu_vi_timer;
9236 break;
9237 case 6:
9238 case 7:
9239 amsdu_timer = &pxmitpriv->amsdu_vo_timer;
9240 break;
9241 case 0:
9242 case 3:
9243 default:
9244 amsdu_timer = &pxmitpriv->amsdu_be_timer;
9245 break;
9246 }
9247 _set_timer(amsdu_timer, 1);
9248 }
9249
rtw_amsdu_cancel_timer(_adapter * padapter,u8 priority)9250 void rtw_amsdu_cancel_timer(_adapter *padapter, u8 priority)
9251 {
9252 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9253 _timer *amsdu_timer = NULL;
9254
9255 switch (priority) {
9256 case 1:
9257 case 2:
9258 amsdu_timer = &pxmitpriv->amsdu_bk_timer;
9259 break;
9260 case 4:
9261 case 5:
9262 amsdu_timer = &pxmitpriv->amsdu_vi_timer;
9263 break;
9264 case 6:
9265 case 7:
9266 amsdu_timer = &pxmitpriv->amsdu_vo_timer;
9267 break;
9268 case 0:
9269 case 3:
9270 default:
9271 amsdu_timer = &pxmitpriv->amsdu_be_timer;
9272 break;
9273 }
9274 _cancel_timer_ex(amsdu_timer);
9275 }
9276 #endif /* CONFIG_TX_AMSDU */
9277
9278 #if 0 /*def DBG_TXBD_DESC_DUMP*/
9279 static struct rtw_tx_desc_backup tx_backup[HW_QUEUE_ENTRY][TX_BAK_FRMAE_CNT];
9280 static u8 backup_idx[HW_QUEUE_ENTRY];
9281
9282 void rtw_tx_desc_backup(_adapter *padapter, struct xmit_frame *pxmitframe, u8 desc_size, u8 hwq)
9283 {
9284 u32 tmp32;
9285 u8 *pxmit_buf;
9286
9287 if (rtw_hw_get_init_completed(adapter_to_dvobj(padapter)) == _FALSE)
9288 return;
9289
9290 pxmit_buf = pxmitframe->pxmitbuf->pbuf;
9291
9292 _rtw_memcpy(tx_backup[hwq][backup_idx[hwq]].tx_bak_desc, pxmit_buf, desc_size);
9293 _rtw_memcpy(tx_backup[hwq][backup_idx[hwq]].tx_bak_data_hdr, pxmit_buf+desc_size, TX_BAK_DATA_LEN);
9294
9295 #if 0 /*GEORGIA_TODO_REDEFINE_IO*/
9296 tmp32 = rtw_read32(padapter, get_txbd_rw_reg(hwq));
9297 #else
9298 tmp32 = rtw_hal_get_txbd_rwreg(padapter);
9299 #endif
9300
9301 tx_backup[hwq][backup_idx[hwq]].tx_bak_rp = (tmp32>>16)&0xfff;
9302 tx_backup[hwq][backup_idx[hwq]].tx_bak_wp = tmp32&0xfff;
9303
9304 tx_backup[hwq][backup_idx[hwq]].tx_desc_size = desc_size;
9305
9306 backup_idx[hwq] = (backup_idx[hwq] + 1) % TX_BAK_FRMAE_CNT;
9307 }
9308
9309 void rtw_tx_desc_backup_reset(void)
9310 {
9311 int i, j;
9312
9313 for (i = 0; i < HW_QUEUE_ENTRY; i++) {
9314 for (j = 0; j < TX_BAK_FRMAE_CNT; j++)
9315 _rtw_memset(&tx_backup[i][j], 0, sizeof(struct rtw_tx_desc_backup));
9316
9317 backup_idx[i] = 0;
9318 }
9319 }
9320
9321 u8 rtw_get_tx_desc_backup(_adapter *padapter, u8 hwq, struct rtw_tx_desc_backup **pbak)
9322 {
9323 *pbak = &tx_backup[hwq][0];
9324
9325 return backup_idx[hwq];
9326 }
9327 #endif
9328
9329 #ifdef CONFIG_PCI_TX_POLLING
rtw_tx_poll_init(_adapter * padapter)9330 void rtw_tx_poll_init(_adapter *padapter)
9331 {
9332 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9333 _timer *timer = &pxmitpriv->tx_poll_timer;
9334
9335 if (!is_primary_adapter(padapter))
9336 return;
9337
9338 if (timer->function != NULL) {
9339 RTW_INFO("tx polling timer has been init.\n");
9340 return;
9341 }
9342
9343 rtw_init_timer(timer, rtw_tx_poll_timeout_handler, padapter);
9344 rtw_tx_poll_timer_set(padapter, 1);
9345 RTW_INFO("Tx poll timer init!\n");
9346 }
9347
rtw_tx_poll_timeout_handler(void * FunctionContext)9348 void rtw_tx_poll_timeout_handler(void *FunctionContext)
9349 {
9350 _adapter *adapter = (_adapter *)FunctionContext;
9351
9352 rtw_tx_poll_timer_set(adapter, 1);
9353
9354 if (adapter->dvobj->hal_func.tx_poll_handler)
9355 adapter->dvobj->hal_func.tx_poll_handler(adapter);
9356 else
9357 RTW_WARN("hal ops: tx_poll_handler is NULL\n");
9358 }
9359
rtw_tx_poll_timer_set(_adapter * padapter,u32 delay)9360 void rtw_tx_poll_timer_set(_adapter *padapter, u32 delay)
9361 {
9362 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9363 _timer *timer = NULL;
9364
9365 timer = &pxmitpriv->tx_poll_timer;
9366 _set_timer(timer, delay);
9367 }
9368
rtw_tx_poll_timer_cancel(_adapter * padapter)9369 void rtw_tx_poll_timer_cancel(_adapter *padapter)
9370 {
9371 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
9372 _timer *timer = NULL;
9373
9374 if (!is_primary_adapter(padapter))
9375 return;
9376
9377 timer = &pxmitpriv->tx_poll_timer;
9378 _cancel_timer_ex(timer);
9379 timer->function = NULL;
9380 RTW_INFO("Tx poll timer cancel !\n");
9381 }
9382 #endif /* CONFIG_PCI_TX_POLLING */
9383
rtw_sctx_init(struct submit_ctx * sctx,int timeout_ms)9384 void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
9385 {
9386 sctx->timeout_ms = timeout_ms;
9387 sctx->submit_time = rtw_get_current_time();
9388 _rtw_init_completion(&sctx->done);
9389 sctx->status = RTW_SCTX_SUBMITTED;
9390 }
9391
rtw_sctx_wait(struct submit_ctx * sctx,const char * msg)9392 int rtw_sctx_wait(struct submit_ctx *sctx, const char *msg)
9393 {
9394 int ret = _FAIL;
9395 unsigned long expire;
9396 int status = 0;
9397
9398 #ifdef PLATFORM_LINUX
9399 expire = sctx->timeout_ms ? msecs_to_jiffies(sctx->timeout_ms) : MAX_SCHEDULE_TIMEOUT;
9400 if (!_rtw_wait_for_comp_timeout(&sctx->done, expire)) {
9401 /* timeout, do something?? */
9402 status = RTW_SCTX_DONE_TIMEOUT;
9403 RTW_INFO("%s timeout: %s\n", __func__, msg);
9404 } else
9405 status = sctx->status;
9406 #endif
9407
9408 if (status == RTW_SCTX_DONE_SUCCESS)
9409 ret = _SUCCESS;
9410
9411 return ret;
9412 }
9413
rtw_sctx_chk_waring_status(int status)9414 bool rtw_sctx_chk_waring_status(int status)
9415 {
9416 switch (status) {
9417 case RTW_SCTX_DONE_UNKNOWN:
9418 case RTW_SCTX_DONE_BUF_ALLOC:
9419 case RTW_SCTX_DONE_BUF_FREE:
9420
9421 case RTW_SCTX_DONE_DRV_STOP:
9422 case RTW_SCTX_DONE_DEV_REMOVE:
9423 return _TRUE;
9424 default:
9425 return _FALSE;
9426 }
9427 }
9428
rtw_sctx_done_err(struct submit_ctx ** sctx,int status)9429 void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
9430 {
9431 if (*sctx) {
9432 if (rtw_sctx_chk_waring_status(status))
9433 RTW_INFO("%s status:%d\n", __func__, status);
9434 (*sctx)->status = status;
9435 #ifdef PLATFORM_LINUX
9436 complete(&((*sctx)->done));
9437 #endif
9438 *sctx = NULL;
9439 }
9440 }
9441
rtw_sctx_done(struct submit_ctx ** sctx)9442 void rtw_sctx_done(struct submit_ctx **sctx)
9443 {
9444 rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
9445 }
9446
9447 #ifdef CONFIG_XMIT_ACK
rtw_ack_tx_wait(struct xmit_priv * pxmitpriv,u32 timeout_ms)9448 int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
9449 {
9450 struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
9451
9452 pack_tx_ops->submit_time = rtw_get_current_time();
9453 pack_tx_ops->timeout_ms = timeout_ms;
9454 pack_tx_ops->status = RTW_SCTX_SUBMITTED;
9455
9456 return rtw_sctx_wait(pack_tx_ops, __func__);
9457 }
9458
rtw_ack_tx_done(struct xmit_priv * pxmitpriv,int status)9459 void rtw_ack_tx_done(struct xmit_priv *pxmitpriv, int status)
9460 {
9461 struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
9462
9463 if (pxmitpriv->ack_tx)
9464 rtw_sctx_done_err(&pack_tx_ops, status);
9465 else
9466 RTW_INFO("%s ack_tx not set\n", __func__);
9467 }
9468 #endif /* CONFIG_XMIT_ACK */
9469