1 /******************************************************************************
2 *
3 * Copyright(c) 2007 - 2019 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 *****************************************************************************/
15 #define _XMIT_OSDEP_C_
16
17 #include <drv_types.h>
18
19 #define DBG_DUMP_OS_QUEUE_CTL 0
20
rtw_remainder_len(struct pkt_file * pfile)21 uint rtw_remainder_len(struct pkt_file *pfile)
22 {
23 return pfile->buf_len - ((SIZE_PTR)(pfile->cur_addr) - (SIZE_PTR)(pfile->buf_start));
24 }
25
_rtw_open_pktfile(struct sk_buff * pktptr,struct pkt_file * pfile)26 void _rtw_open_pktfile(struct sk_buff *pktptr, struct pkt_file *pfile)
27 {
28
29 pfile->pkt = pktptr;
30 pfile->cur_addr = pfile->buf_start = pktptr->data;
31 pfile->pkt_len = pfile->buf_len = pktptr->len;
32
33 pfile->cur_buffer = pfile->buf_start ;
34
35 }
36
_rtw_pktfile_read(struct pkt_file * pfile,u8 * rmem,uint rlen)37 uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen)
38 {
39 uint len = 0;
40
41
42 len = rtw_remainder_len(pfile);
43 len = (rlen > len) ? len : rlen;
44
45 if (rmem)
46 skb_copy_bits(pfile->pkt, pfile->buf_len - pfile->pkt_len, rmem, len);
47
48 pfile->cur_addr += len;
49 pfile->pkt_len -= len;
50
51
52 return len;
53 }
54
rtw_endofpktfile(struct pkt_file * pfile)55 sint rtw_endofpktfile(struct pkt_file *pfile)
56 {
57
58 if (pfile->pkt_len == 0) {
59 return _TRUE;
60 }
61
62
63 return _FALSE;
64 }
65
rtw_set_tx_chksum_offload(struct sk_buff * pkt,struct pkt_attrib * pattrib)66 void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib)
67 {
68 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
69 struct sk_buff *skb = (struct sk_buff *)pkt;
70 struct iphdr *iph = NULL;
71 struct ipv6hdr *i6ph = NULL;
72 struct udphdr *uh = NULL;
73 struct tcphdr *th = NULL;
74 u8 protocol = 0xFF;
75
76 if (skb->protocol == htons(ETH_P_IP)) {
77 iph = (struct iphdr *)skb_network_header(skb);
78 protocol = iph->protocol;
79 } else if (skb->protocol == htons(ETH_P_IPV6)) {
80 i6ph = (struct ipv6hdr *)skb_network_header(skb);
81 protocol = i6ph->nexthdr;
82 } else
83 {}
84
85 /* HW unable to compute CSUM if header & payload was be encrypted by SW(cause TXDMA error) */
86 if (pattrib->bswenc == _TRUE) {
87 if (skb->ip_summed == CHECKSUM_PARTIAL)
88 skb_checksum_help(skb);
89 return;
90 }
91
92 /* For HW rule, clear ipv4_csum & UDP/TCP_csum if it is UDP/TCP packet */
93 switch (protocol) {
94 case IPPROTO_UDP:
95 uh = (struct udphdr *)skb_transport_header(skb);
96 uh->check = 0;
97 if (iph)
98 iph->check = 0;
99 pattrib->hw_csum = _TRUE;
100 break;
101 case IPPROTO_TCP:
102 th = (struct tcphdr *)skb_transport_header(skb);
103 th->check = 0;
104 if (iph)
105 iph->check = 0;
106 pattrib->hw_csum = _TRUE;
107 break;
108 default:
109 break;
110 }
111 #endif
112
113 }
114 #if 0 /*CONFIG_CORE_XMITBUF*/
115 int rtw_os_xmit_resource_alloc(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz, u8 flag)
116 {
117 if (alloc_sz > 0) {
118 #ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
119 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
120 struct usb_device *pusbd = dvobj_to_usb(pdvobjpriv)->pusbdev;
121
122 pxmitbuf->pallocated_buf = rtw_usb_buffer_alloc(pusbd, (size_t)alloc_sz, &pxmitbuf->dma_transfer_addr);
123 pxmitbuf->pbuf = pxmitbuf->pallocated_buf;
124 if (pxmitbuf->pallocated_buf == NULL)
125 return _FAIL;
126 #else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
127
128 pxmitbuf->pallocated_buf = rtw_zmalloc(alloc_sz);
129 if (pxmitbuf->pallocated_buf == NULL)
130 return _FAIL;
131
132 pxmitbuf->pbuf = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxmitbuf->pallocated_buf), SZ_ALIGN_XMITFRAME_EXT);
133
134 #endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
135 }
136
137 if (flag) {
138 #ifdef CONFIG_USB_HCI
139 int i;
140 for (i = 0; i < 8; i++) {
141 pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
142 if (pxmitbuf->pxmit_urb[i] == NULL) {
143 RTW_INFO("pxmitbuf->pxmit_urb[i]==NULL");
144 return _FAIL;
145 }
146 }
147 #endif
148 }
149
150 return _SUCCESS;
151 }
152
153 void rtw_os_xmit_resource_free(_adapter *padapter, struct xmit_buf *pxmitbuf, u32 free_sz, u8 flag)
154 {
155 if (flag) {
156 #ifdef CONFIG_USB_HCI
157 int i;
158
159 for (i = 0; i < 8; i++) {
160 if (pxmitbuf->pxmit_urb[i]) {
161 /* usb_kill_urb(pxmitbuf->pxmit_urb[i]); */
162 usb_free_urb(pxmitbuf->pxmit_urb[i]);
163 }
164 }
165 #endif
166 }
167
168 if (free_sz > 0) {
169 #ifdef CONFIG_USE_USB_BUFFER_ALLOC_TX
170 struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter);
171 struct usb_device *pusbd = dvobj_to_usb(pdvobjpriv)->pusbdev;
172
173 rtw_usb_buffer_free(pusbd, (size_t)free_sz, pxmitbuf->pallocated_buf, pxmitbuf->dma_transfer_addr);
174 pxmitbuf->pallocated_buf = NULL;
175 pxmitbuf->dma_transfer_addr = 0;
176 #else /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
177 if (pxmitbuf->pallocated_buf)
178 rtw_mfree(pxmitbuf->pallocated_buf, free_sz);
179 #endif /* CONFIG_USE_USB_BUFFER_ALLOC_TX */
180 }
181 }
182 #else
rtw_os_xmit_resource_alloc(_adapter * padapter,struct xmit_frame * pxframe)183 u8 rtw_os_xmit_resource_alloc(_adapter *padapter, struct xmit_frame *pxframe)
184 {
185 u32 alloc_sz = SZ_XMITFRAME_EXT + SZ_ALIGN_XMITFRAME_EXT;
186
187 #if 0 /*def CONFIG_USE_USB_BUFFER_ALLOC_TX*/
188 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
189 struct usb_device *pusbd = dvobj_to_usb(dvobj)->pusbdev;
190
191 pxframe->prealloc_buf_addr = rtw_usb_buffer_alloc(pusbd, (size_t)alloc_sz, &pxframe->dma_transfer_addr);
192 if (pxframe->prealloc_buf_addr == NULL) {
193 RTW_ERR("%s prealloc_buf_addr failed\n", __func__);
194 rtw_warn_on(1);
195 return _FAIL;
196 }
197 pxframe->buf_addr = pxframe->prealloc_buf_addr;
198 #else
199 pxframe->prealloc_buf_addr = rtw_zmalloc(alloc_sz);
200 if (pxframe->prealloc_buf_addr == NULL) {
201 RTW_ERR("%s prealloc_buf_addr failed\n", __func__);
202 rtw_warn_on(1);
203 return _FAIL;
204 }
205 pxframe->buf_addr = (u8 *)N_BYTE_ALIGMENT((SIZE_PTR)(pxframe->prealloc_buf_addr), SZ_ALIGN_XMITFRAME_EXT);
206 #endif
207 return _SUCCESS;
208 }
209
rtw_os_xmit_resource_free(_adapter * padapter,struct xmit_frame * pxframe)210 void rtw_os_xmit_resource_free(_adapter *padapter, struct xmit_frame *pxframe)
211 {
212 u32 free_sz = SZ_XMITFRAME_EXT + SZ_ALIGN_XMITFRAME_EXT;
213
214 #if 0 /*def CONFIG_USE_USB_BUFFER_ALLOC_TX*/
215 struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
216 struct usb_device *pusbd = dvobj_to_usb(dvobj)->pusbdev;
217
218 if (pxframe->prealloc_buf_addr) {
219 rtw_usb_buffer_free(pusbd, (size_t)free_sz, pxframe->prealloc_buf_addr, pxframe->dma_transfer_addr);
220 pxframe->prealloc_buf_addr = NULL;
221 pxframe->buf_addr = NULL;
222 pxframe->dma_transfer_addr = 0;
223 }
224 #else
225 if (pxframe->prealloc_buf_addr) {
226 rtw_mfree(pxframe->prealloc_buf_addr, free_sz);
227 pxframe->prealloc_buf_addr = NULL;
228 pxframe->buf_addr = NULL;
229 }
230 #endif
231 }
232 #endif
233
dump_os_queue(void * sel,_adapter * padapter)234 void dump_os_queue(void *sel, _adapter *padapter)
235 {
236 struct net_device *ndev = padapter->pnetdev;
237
238 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
239 int i;
240
241 for (i = 0; i < 4; i++) {
242 RTW_PRINT_SEL(sel, "os_queue[%d]:%s\n"
243 , i, __netif_subqueue_stopped(ndev, i) ? "stopped" : "waked");
244 }
245 #else
246 RTW_PRINT_SEL(sel, "os_queue:%s\n"
247 , netif_queue_stopped(ndev) ? "stopped" : "waked");
248 #endif
249 }
250
251 #define WMM_XMIT_THRESHOLD (NR_XMITFRAME*2/5)
252
rtw_os_need_wake_queue(_adapter * padapter,u16 os_qid)253 static inline bool rtw_os_need_wake_queue(_adapter *padapter, u16 os_qid)
254 {
255 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
256 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
257
258 if (padapter->registrypriv.wifi_spec) {
259 if (pxmitpriv->hwxmits[os_qid].accnt < WMM_XMIT_THRESHOLD)
260 return _TRUE;
261 #ifdef DBG_CONFIG_ERROR_DETECT
262 #ifdef DBG_CONFIG_ERROR_RESET
263 } else if (rtw_hal_sreset_inprogress(padapter) == _TRUE) {
264 return _FALSE;
265 #endif/* #ifdef DBG_CONFIG_ERROR_RESET */
266 #endif/* #ifdef DBG_CONFIG_ERROR_DETECT */
267 } else {
268 return _TRUE;
269 }
270 return _FALSE;
271 #else
272 return _TRUE;
273 #endif
274 }
275
rtw_os_need_stop_queue(_adapter * padapter,u16 os_qid)276 static inline bool rtw_os_need_stop_queue(_adapter *padapter, u16 os_qid)
277 {
278 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
279 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
280 if (padapter->registrypriv.wifi_spec) {
281 /* No free space for Tx, tx_worker is too slow */
282 if (pxmitpriv->hwxmits[os_qid].accnt > WMM_XMIT_THRESHOLD)
283 return _TRUE;
284 } else {
285 if (pxmitpriv->free_xmitframe_cnt <= 4)
286 return _TRUE;
287 }
288 #else
289 if (pxmitpriv->free_xmitframe_cnt <= 4)
290 return _TRUE;
291 #endif
292 return _FALSE;
293 }
294
rtw_os_pkt_complete(_adapter * padapter,struct sk_buff * pkt)295 void rtw_os_pkt_complete(_adapter *padapter, struct sk_buff *pkt)
296 {
297 rtw_skb_free(pkt);
298 }
299
rtw_os_xmit_complete(_adapter * padapter,struct xmit_frame * pxframe)300 void rtw_os_xmit_complete(_adapter *padapter, struct xmit_frame *pxframe)
301 {
302 if (pxframe->pkt)
303 rtw_os_pkt_complete(padapter, pxframe->pkt);
304
305 pxframe->pkt = NULL;
306 }
307
rtw_os_xmit_schedule(_adapter * padapter)308 void rtw_os_xmit_schedule(_adapter *padapter)
309 {
310 #if 0 /*defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)*/
311 _adapter *pri_adapter;
312
313 if (!padapter)
314 return;
315 pri_adapter = GET_PRIMARY_ADAPTER(padapter);
316
317 if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
318 _rtw_up_sema(&pri_adapter->xmitpriv.xmit_sema);
319
320
321 #elif defined(CONFIG_PCI_HCI) || defined(CONFIG_USB_HCI)
322 struct xmit_priv *pxmitpriv;
323
324 if (!padapter)
325 return;
326
327 pxmitpriv = &padapter->xmitpriv;
328
329 _rtw_spinlock_bh(&pxmitpriv->lock);
330
331 if (rtw_txframes_pending(padapter))
332 rtw_tasklet_hi_schedule(&pxmitpriv->xmit_tasklet);
333
334 _rtw_spinunlock_bh(&pxmitpriv->lock);
335
336 #if 0 /*defined(CONFIG_PCI_HCI) && defined(CONFIG_XMIT_THREAD_MODE)*/
337 if (_rtw_queue_empty(&padapter->xmitpriv.pending_xmitbuf_queue) == _FALSE)
338 _rtw_up_sema(&padapter->xmitpriv.xmit_sema);
339 #endif
340
341
342 #endif
343 }
344
rtw_os_check_wakup_queue(_adapter * padapter,u16 os_qid)345 void rtw_os_check_wakup_queue(_adapter *padapter, u16 os_qid)
346 {
347 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
348 if (rtw_os_need_wake_queue(padapter, os_qid)) {
349 if (DBG_DUMP_OS_QUEUE_CTL)
350 RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), os_qid);
351 netif_wake_subqueue(padapter->pnetdev, os_qid);
352 }
353 #else
354 if (rtw_os_need_wake_queue(padapter, 0)) {
355 if (DBG_DUMP_OS_QUEUE_CTL)
356 RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter));
357 netif_wake_queue(padapter->pnetdev);
358 }
359 #endif
360 }
361
rtw_os_check_stop_queue(_adapter * padapter,u16 os_qid)362 bool rtw_os_check_stop_queue(_adapter *padapter, u16 os_qid)
363 {
364 bool busy = _FALSE;
365
366 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
367 if (rtw_os_need_stop_queue(padapter, os_qid)) {
368 if (DBG_DUMP_OS_QUEUE_CTL)
369 RTW_INFO(FUNC_ADPT_FMT": netif_stop_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), os_qid);
370 netif_stop_subqueue(padapter->pnetdev, os_qid);
371 busy = _TRUE;
372 }
373 #else
374 if (rtw_os_need_stop_queue(padapter, 0)) {
375 if (DBG_DUMP_OS_QUEUE_CTL)
376 RTW_INFO(FUNC_ADPT_FMT": netif_stop_queue\n", FUNC_ADPT_ARG(padapter));
377 rtw_netif_stop_queue(padapter->pnetdev);
378 busy = _TRUE;
379 }
380 #endif
381 return busy;
382 }
383
rtw_os_wake_queue_at_free_stainfo(_adapter * padapter,int * qcnt_freed)384 void rtw_os_wake_queue_at_free_stainfo(_adapter *padapter, int *qcnt_freed)
385 {
386 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
387 int i;
388
389 for (i = 0; i < 4; i++) {
390 if (qcnt_freed[i] == 0)
391 continue;
392
393 if (rtw_os_need_wake_queue(padapter, i)) {
394 if (DBG_DUMP_OS_QUEUE_CTL)
395 RTW_INFO(FUNC_ADPT_FMT": netif_wake_subqueue[%d]\n", FUNC_ADPT_ARG(padapter), i);
396 netif_wake_subqueue(padapter->pnetdev, i);
397 }
398 }
399 #else
400 if (qcnt_freed[0] || qcnt_freed[1] || qcnt_freed[2] || qcnt_freed[3]) {
401 if (rtw_os_need_wake_queue(padapter, 0)) {
402 if (DBG_DUMP_OS_QUEUE_CTL)
403 RTW_INFO(FUNC_ADPT_FMT": netif_wake_queue\n", FUNC_ADPT_ARG(padapter));
404 netif_wake_queue(padapter->pnetdev);
405 }
406 }
407 #endif
408 }
409
_rtw_xmit_entry(struct sk_buff * pkt,_nic_hdl pnetdev)410 int _rtw_xmit_entry(struct sk_buff *pkt, _nic_hdl pnetdev)
411 {
412 _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
413 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
414 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
415 struct sk_buff *skb = pkt;
416 struct sk_buff *segs, *nskb;
417 netdev_features_t features = padapter->pnetdev->features;
418 #endif
419 u16 os_qid = 0;
420 s32 res = 0;
421
422 if (padapter->registrypriv.mp_mode) {
423 RTW_INFO("MP_TX_DROP_OS_FRAME\n");
424 goto drop_packet;
425 }
426 DBG_COUNTER(padapter->tx_logs.os_tx);
427
428 if (rtw_if_up(padapter) == _FALSE) {
429 DBG_COUNTER(padapter->tx_logs.os_tx_err_up);
430 #ifdef DBG_TX_DROP_FRAME
431 RTW_INFO("DBG_TX_DROP_FRAME %s if_up fail\n", __FUNCTION__);
432 #endif
433 goto drop_packet;
434 }
435
436 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
437 os_qid = skb_get_queue_mapping(pkt);
438 #endif
439
440 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
441 if (skb_shinfo(skb)->gso_size) {
442 /* split a big(65k) skb into several small(1.5k) skbs */
443 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
444 segs = skb_gso_segment(skb, features);
445 if (IS_ERR(segs) || !segs)
446 goto drop_packet;
447
448 do {
449 nskb = segs;
450 segs = segs->next;
451 nskb->next = NULL;
452 rtw_mstat_update( MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, nskb->truesize);
453 res = rtw_xmit(padapter, &nskb, os_qid);
454 if (res < 0) {
455 #ifdef DBG_TX_DROP_FRAME
456 RTW_INFO("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
457 #endif
458 pxmitpriv->tx_drop++;
459 rtw_os_pkt_complete(padapter, nskb);
460 }
461 } while (segs);
462 rtw_os_pkt_complete(padapter, skb);
463 goto exit;
464 }
465 #endif
466
467 res = rtw_xmit(padapter, &pkt, os_qid);
468 if (res < 0) {
469 #ifdef DBG_TX_DROP_FRAME
470 RTW_INFO("DBG_TX_DROP_FRAME %s rtw_xmit fail\n", __FUNCTION__);
471 #endif
472 goto drop_packet;
473 }
474
475 goto exit;
476
477 drop_packet:
478 pxmitpriv->tx_drop++;
479 rtw_os_pkt_complete(padapter, pkt);
480
481 exit:
482
483
484 return 0;
485 }
486
487 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
488 /* copy from skbuff.c to be compatible with old kernel */
kfree_skb_list(struct sk_buff * segs)489 static void kfree_skb_list(struct sk_buff *segs)
490 {
491 while (segs) {
492 struct sk_buff *next = segs->next;
493
494 kfree_skb(segs);
495 segs = next;
496 }
497 }
498 #endif
499
500
501 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
rtw_xmit_entry(struct sk_buff * pkt,_nic_hdl pnetdev)502 netdev_tx_t rtw_xmit_entry(struct sk_buff *pkt, _nic_hdl pnetdev)
503 #else
504 int rtw_xmit_entry(struct sk_buff *pkt, _nic_hdl pnetdev)
505 #endif
506 {
507 _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
508 struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
509 int ret = 0;
510
511 if (pkt) {
512 if (check_fwstate(pmlmepriv, WIFI_MONITOR_STATE) == _TRUE) {
513 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24))
514 rtw_monitor_xmit_entry((struct sk_buff *)pkt, pnetdev);
515 #endif
516 }
517 else {
518 #ifdef CONFIG_RTW_NETIF_SG
519 /* After turning on SG, net stack may (0.0025%) TX
520 * strange skb that is skb_has_frag_list() but linear
521 * (i.e. skb_is_nonlinear() is false). This is out of
522 * our expectation, so I free fragment list to be
523 * compatible with our design.
524 */
525 if (skb_has_frag_list(pkt)) {
526 if (!skb_is_nonlinear(pkt)) {
527 kfree_skb_list(skb_shinfo(pkt)->frag_list);
528 skb_shinfo(pkt)->frag_list = NULL;
529 RTW_DBG("%s:%d free frag list\n", __func__, __LINE__);
530 } else {
531 RTW_DBG("%s:%d nonlinear frag list\n", __func__, __LINE__);
532 }
533 }
534 #endif
535 rtw_mstat_update(MSTAT_TYPE_SKB, MSTAT_ALLOC_SUCCESS, pkt->truesize);
536 #ifdef CONFIG_TX_SKB_ORPHAN
537 skb_orphan(pkt);
538 #endif
539 ret = rtw_os_tx(pkt, pnetdev);
540 }
541
542 }
543
544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32))
545 return (ret == 0) ? NETDEV_TX_OK : NETDEV_TX_BUSY;
546 #else
547 return ret;
548 #endif
549 }
550
551
552
553 #ifdef RTW_PHL_TX
rtw_os_is_adapter_ready(_adapter * padapter,struct sk_buff * pkt)554 int rtw_os_is_adapter_ready(_adapter *padapter, struct sk_buff *pkt)
555 {
556
557 if (padapter->registrypriv.mp_mode) {
558 RTW_INFO("MP_TX_DROP_OS_FRAME\n");
559 return _FALSE;
560 }
561
562 DBG_COUNTER(padapter->tx_logs.os_tx);
563
564 if (rtw_if_up(padapter) == _FALSE) {
565 PHLTX_LOG;
566 DBG_COUNTER(padapter->tx_logs.os_tx_err_up);
567 #ifdef DBG_TX_DROP_FRAME
568 RTW_INFO("DBG_TX_DROP_FRAME %s if_up fail\n", __FUNCTION__);
569 #endif
570 return _FALSE;
571 }
572
573 if (IS_CH_WAITING(adapter_to_rfctl(padapter))){
574 PHLTX_LOG;
575 return _FALSE;
576 }
577
578 if (rtw_linked_check(padapter) == _FALSE){
579 PHLTX_LOG;
580 return _FALSE;
581 }
582
583 return _TRUE;
584 }
585
rtw_os_tx(struct sk_buff * pkt,_nic_hdl pnetdev)586 int rtw_os_tx(struct sk_buff *pkt, _nic_hdl pnetdev)
587 {
588 _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev);
589 struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
590 u16 os_qid = 0;
591 s32 res = 0;
592
593 #ifdef RTW_PHL_DBG_CMD
594 core_add_record(padapter, REC_TX_DATA, pkt);
595 #endif
596
597 PHLTX_LOG;
598
599 if (pkt->len == 0)
600 return 0;
601
602 if ((rtw_os_is_adapter_ready(padapter, pkt) == _FALSE)
603 #ifdef CONFIG_LAYER2_ROAMING
604 && (!padapter->mlmepriv.roam_network)
605 #endif
606 )
607 goto drop_packet;
608
609 PHLTX_LOG;
610
611 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 35))
612 os_qid = skb_get_queue_mapping(pkt);
613 #endif
614
615 PHLTX_LOG;
616 if (rtw_core_tx(padapter, &pkt, NULL, os_qid) == FAIL)
617 goto inc_drop_cnt;
618
619 PHLTX_LOG;
620
621 goto exit;
622
623 drop_packet:
624 rtw_os_pkt_complete(padapter, pkt);
625
626 inc_drop_cnt:
627 pxmitpriv->tx_drop++;
628
629 exit:
630 return 0;
631 }
632 #endif
633
634 #ifdef CONFIG_TX_AMSDU_SW_MODE
ieee8023_header_to_rfc1042(struct sk_buff * skb,int pads)635 static void ieee8023_header_to_rfc1042(struct sk_buff *skb, int pads)
636 {
637 void *data;
638 int pad;
639 __be16 len;
640 const int headroom = SNAP_SIZE + 2 + pads;
641
642 if (!skb)
643 return;
644
645 if (skb_headroom(skb) < headroom) {
646 RTW_WARN("%s: headroom=%d isn't enough\n", __func__, skb_headroom(skb));
647 if (pskb_expand_head(skb, headroom, 0, GFP_ATOMIC)) {
648 RTW_ERR("%s: no headroom=%d for skb\n",
649 __func__, headroom);
650 return;
651 }
652 }
653
654 data = skb_push(skb, headroom);
655 memset(data, 0, pads);
656 data += pads;
657 memmove(data, data + SNAP_SIZE + 2, 2 * ETH_ALEN);
658 data += 2 * ETH_ALEN;
659 len = cpu_to_be16(skb->len - pads - 2 * ETH_ALEN - 2);
660 memcpy(data, &len, 2);
661 memcpy(data + 2, rtw_rfc1042_header, SNAP_SIZE);
662 }
663
rtw_coalesce_tx_amsdu(_adapter * padapter,struct xmit_frame * pxframes[],int xf_nr,bool amsdu,u32 * pktlen)664 void rtw_coalesce_tx_amsdu(_adapter *padapter, struct xmit_frame *pxframes[],
665 int xf_nr, bool amsdu, u32 *pktlen)
666 {
667 struct xmit_frame *head_xframe;
668 struct xmit_frame *pxframe;
669 struct sk_buff *skb;
670 struct sk_buff *head_skb;
671 struct sk_buff **frag_tail;
672 int pads;
673 int i;
674
675 /* prepare head xmitframe */
676 head_xframe = pxframes[0];
677 head_skb = head_xframe->pkt;
678
679 ieee8023_header_to_rfc1042(head_skb, 0);
680
681 frag_tail = &skb_shinfo(head_skb)->frag_list;
682 while (*frag_tail)
683 frag_tail = &(*frag_tail)->next;
684
685 for (i = 1; i < xf_nr; i++) {
686 pxframe = pxframes[i];
687 skb = pxframe->pkt;
688
689 if (head_skb->len & 0x03)
690 pads = 4 - (head_skb->len & 0x03);
691 else
692 pads = 0;
693
694 ieee8023_header_to_rfc1042(skb, pads);
695
696 /* free sk accounting to have TP like doing skb_linearize() */
697 if (skb->destructor)
698 skb_orphan(skb);
699
700 /* add this skb to head_skb */
701 head_skb->len += skb->len;
702 head_skb->data_len += skb->len;
703 *frag_tail = skb;
704 while (*frag_tail)
705 frag_tail = &(*frag_tail)->next;
706
707 /* free this xframe */
708 pxframe->pkt = NULL; /* head xframe own */
709 core_tx_free_xmitframe(padapter, pxframe);
710 }
711
712 /* total skb length (includes all fragments) */
713 *pktlen = head_skb->len;
714 }
715 #endif /* CONFIG_TX_AMSDU_SW_MODE */
716