1 /*
2 * Linux Packet (skb) interface
3 *
4 * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5 *
6 * Copyright (C) 1999-2017, Broadcom Corporation
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: linux_pkt.c 691183 2017-03-21 05:49:14Z $
30 */
31
32 #include <typedefs.h>
33 #include <bcmendian.h>
34 #include <linuxver.h>
35 #include <bcmdefs.h>
36
37 #include <linux/random.h>
38
39 #include <osl.h>
40 #include <bcmutils.h>
41 #include <pcicfg.h>
42
43 #if defined(BCMASSERT_LOG) && !defined(OEM_ANDROID)
44 #include <bcm_assert_log.h>
45 #endif // endif
46 #include <linux/fs.h>
47 #include "linux_osl_priv.h"
48
49 #ifdef CONFIG_DHD_USE_STATIC_BUF
50
51 bcm_static_buf_t *bcm_static_buf = 0;
52 bcm_static_pkt_t *bcm_static_skb = 0;
53
54 void* wifi_platform_prealloc(void *adapter, int section, unsigned long size);
55 #endif /* CONFIG_DHD_USE_STATIC_BUF */
56
57 #ifdef BCM_OBJECT_TRACE
58 /* don't clear the first 4 byte that is the pkt sn */
59 #define OSL_PKTTAG_CLEAR(p) \
60 do { \
61 struct sk_buff *s = (struct sk_buff *)(p); \
62 uint tagsz = sizeof(s->cb); \
63 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
64 memset(s->cb + 4, 0, tagsz - 4); \
65 } while (0)
66 #else
67 #define OSL_PKTTAG_CLEAR(p) \
68 do { \
69 struct sk_buff *s = (struct sk_buff *)(p); \
70 uint tagsz = sizeof(s->cb); \
71 ASSERT(OSL_PKTTAG_SZ <= tagsz); \
72 memset(s->cb, 0, tagsz); \
73 } while (0)
74 #endif /* BCM_OBJECT_TRACE */
75
osl_static_mem_init(osl_t * osh,void * adapter)76 int osl_static_mem_init(osl_t *osh, void *adapter)
77 {
78 #ifdef CONFIG_DHD_USE_STATIC_BUF
79 if (!bcm_static_buf && adapter) {
80 if (!(bcm_static_buf = (bcm_static_buf_t *)wifi_platform_prealloc(adapter,
81 3, STATIC_BUF_SIZE + STATIC_BUF_TOTAL_LEN))) {
82 printk("can not alloc static buf!\n");
83 bcm_static_skb = NULL;
84 ASSERT(osh->magic == OS_HANDLE_MAGIC);
85 return -ENOMEM;
86 } else {
87 printk("succeed to alloc static buf\n");
88 }
89
90 spin_lock_init(&bcm_static_buf->static_lock);
91
92 bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE;
93 }
94
95 #if defined(BCMSDIO) || defined(DHD_USE_STATIC_CTRLBUF)
96 if (!bcm_static_skb && adapter) {
97 int i;
98 void *skb_buff_ptr = 0;
99 bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048);
100 skb_buff_ptr = wifi_platform_prealloc(adapter, 4, 0);
101 if (!skb_buff_ptr) {
102 printk("cannot alloc static buf!\n");
103 bcm_static_buf = NULL;
104 bcm_static_skb = NULL;
105 ASSERT(osh->magic == OS_HANDLE_MAGIC);
106 return -ENOMEM;
107 }
108
109 bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *) *
110 (STATIC_PKT_MAX_NUM));
111 for (i = 0; i < STATIC_PKT_MAX_NUM; i++) {
112 bcm_static_skb->pkt_use[i] = 0;
113 }
114
115 #ifdef DHD_USE_STATIC_CTRLBUF
116 spin_lock_init(&bcm_static_skb->osl_pkt_lock);
117 bcm_static_skb->last_allocated_index = 0;
118 #else
119 sema_init(&bcm_static_skb->osl_pkt_sem, 1);
120 #endif /* DHD_USE_STATIC_CTRLBUF */
121 }
122 #endif /* BCMSDIO || DHD_USE_STATIC_CTRLBUF */
123 #endif /* CONFIG_DHD_USE_STATIC_BUF */
124
125 return 0;
126 }
127
osl_static_mem_deinit(osl_t * osh,void * adapter)128 int osl_static_mem_deinit(osl_t *osh, void *adapter)
129 {
130 #ifdef CONFIG_DHD_USE_STATIC_BUF
131 if (bcm_static_buf) {
132 bcm_static_buf = 0;
133 }
134 #ifdef BCMSDIO
135 if (bcm_static_skb) {
136 bcm_static_skb = 0;
137 }
138 #endif /* BCMSDIO */
139 #endif /* CONFIG_DHD_USE_STATIC_BUF */
140 return 0;
141 }
142
143 /*
144 * To avoid ACP latency, a fwder buf will be sent directly to DDR using
145 * DDR aliasing into non-ACP address space. Such Fwder buffers must be
146 * explicitly managed from a coherency perspective.
147 */
148 static inline void BCMFASTPATH
osl_fwderbuf_reset(osl_t * osh,struct sk_buff * skb)149 osl_fwderbuf_reset(osl_t *osh, struct sk_buff *skb)
150 {
151 }
152
153 static struct sk_buff * BCMFASTPATH
osl_alloc_skb(osl_t * osh,unsigned int len)154 osl_alloc_skb(osl_t *osh, unsigned int len)
155 {
156 struct sk_buff *skb;
157 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
158 gfp_t flags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
159 #ifdef DHD_USE_ATOMIC_PKTGET
160 flags = GFP_ATOMIC;
161 #endif /* DHD_USE_ATOMIC_PKTGET */
162 skb = __dev_alloc_skb(len, flags);
163 #else
164 skb = dev_alloc_skb(len);
165 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) */
166
167 return skb;
168 }
169
170 /* Convert a driver packet to native(OS) packet
171 * In the process, packettag is zeroed out before sending up
172 * IP code depends on skb->cb to be setup correctly with various options
173 * In our case, that means it should be 0
174 */
175 struct sk_buff * BCMFASTPATH
osl_pkt_tonative(osl_t * osh,void * pkt)176 osl_pkt_tonative(osl_t *osh, void *pkt)
177 {
178 struct sk_buff *nskb;
179
180 if (osh->pub.pkttag)
181 OSL_PKTTAG_CLEAR(pkt);
182
183 /* Decrement the packet counter */
184 for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
185 atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->cmn->pktalloced);
186
187 }
188 return (struct sk_buff *)pkt;
189 }
190
191 /* Convert a native(OS) packet to driver packet.
192 * In the process, native packet is destroyed, there is no copying
193 * Also, a packettag is zeroed out
194 */
195 void * BCMFASTPATH
osl_pkt_frmnative(osl_t * osh,void * pkt)196 osl_pkt_frmnative(osl_t *osh, void *pkt)
197 {
198 struct sk_buff *cskb;
199 struct sk_buff *nskb;
200 unsigned long pktalloced = 0;
201
202 if (osh->pub.pkttag)
203 OSL_PKTTAG_CLEAR(pkt);
204
205 /* walk the PKTCLINK() list */
206 for (cskb = (struct sk_buff *)pkt;
207 cskb != NULL;
208 cskb = PKTISCHAINED(cskb) ? PKTCLINK(cskb) : NULL) {
209
210 /* walk the pkt buffer list */
211 for (nskb = cskb; nskb; nskb = nskb->next) {
212
213 /* Increment the packet counter */
214 pktalloced++;
215
216 /* clean the 'prev' pointer
217 * Kernel 3.18 is leaving skb->prev pointer set to skb
218 * to indicate a non-fragmented skb
219 */
220 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0))
221 nskb->prev = NULL;
222 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) */
223
224 }
225 }
226
227 /* Increment the packet counter */
228 atomic_add(pktalloced, &osh->cmn->pktalloced);
229
230 return (void *)pkt;
231 }
232
233 /* Return a new packet. zero out pkttag */
234 #ifdef BCM_OBJECT_TRACE
235 void * BCMFASTPATH
linux_pktget(osl_t * osh,uint len,int line,const char * caller)236 linux_pktget(osl_t *osh, uint len, int line, const char *caller)
237 #else
238 void * BCMFASTPATH
239 linux_pktget(osl_t *osh, uint len)
240 #endif /* BCM_OBJECT_TRACE */
241 {
242 struct sk_buff *skb;
243 uchar num = 0;
244 if (lmtest != FALSE) {
245 get_random_bytes(&num, sizeof(uchar));
246 if ((num + 1) <= (256 * lmtest / 100))
247 return NULL;
248 }
249
250 if ((skb = osl_alloc_skb(osh, len))) {
251 skb->tail += len;
252 skb->len += len;
253 skb->priority = 0;
254
255 atomic_inc(&osh->cmn->pktalloced);
256 #ifdef BCM_OBJECT_TRACE
257 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, caller, line);
258 #endif /* BCM_OBJECT_TRACE */
259 }
260
261 return ((void*) skb);
262 }
263
264 /* Free the driver packet. Free the tag if present */
265 #ifdef BCM_OBJECT_TRACE
266 void BCMFASTPATH
linux_pktfree(osl_t * osh,void * p,bool send,int line,const char * caller)267 linux_pktfree(osl_t *osh, void *p, bool send, int line, const char *caller)
268 #else
269 void BCMFASTPATH
270 linux_pktfree(osl_t *osh, void *p, bool send)
271 #endif /* BCM_OBJECT_TRACE */
272 {
273 struct sk_buff *skb, *nskb;
274 if (osh == NULL)
275 return;
276
277 skb = (struct sk_buff*) p;
278
279 if (send) {
280 if (osh->pub.tx_fn) {
281 osh->pub.tx_fn(osh->pub.tx_ctx, p, 0);
282 }
283 } else {
284 if (osh->pub.rx_fn) {
285 osh->pub.rx_fn(osh->pub.rx_ctx, p);
286 }
287 }
288
289 PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE);
290
291 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_CTRLBUF)
292 if (skb && (skb->mac_len == PREALLOC_USED_MAGIC)) {
293 printk("%s: pkt %p is from static pool\n",
294 __FUNCTION__, p);
295 dump_stack();
296 return;
297 }
298
299 if (skb && (skb->mac_len == PREALLOC_FREE_MAGIC)) {
300 printk("%s: pkt %p is from static pool and not in used\n",
301 __FUNCTION__, p);
302 dump_stack();
303 return;
304 }
305 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_CTRLBUF */
306
307 /* perversion: we use skb->next to chain multi-skb packets */
308 while (skb) {
309 nskb = skb->next;
310 skb->next = NULL;
311
312 #ifdef BCM_OBJECT_TRACE
313 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, caller, line);
314 #endif /* BCM_OBJECT_TRACE */
315
316 {
317 if (skb->destructor) {
318 /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
319 * destructor exists
320 */
321 dev_kfree_skb_any(skb);
322 } else {
323 /* can free immediately (even in_irq()) if destructor
324 * does not exist
325 */
326 dev_kfree_skb(skb);
327 }
328 }
329 atomic_dec(&osh->cmn->pktalloced);
330 skb = nskb;
331 }
332 }
333
334 #ifdef CONFIG_DHD_USE_STATIC_BUF
335 void*
osl_pktget_static(osl_t * osh,uint len)336 osl_pktget_static(osl_t *osh, uint len)
337 {
338 int i = 0;
339 struct sk_buff *skb;
340 #ifdef DHD_USE_STATIC_CTRLBUF
341 unsigned long flags;
342 #endif /* DHD_USE_STATIC_CTRLBUF */
343
344 if (!bcm_static_skb)
345 return linux_pktget(osh, len);
346
347 if (len > DHD_SKB_MAX_BUFSIZE) {
348 printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len);
349 return linux_pktget(osh, len);
350 }
351
352 #ifdef DHD_USE_STATIC_CTRLBUF
353 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
354
355 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
356 uint32 index;
357 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
358 index = bcm_static_skb->last_allocated_index % STATIC_PKT_2PAGE_NUM;
359 bcm_static_skb->last_allocated_index++;
360 if (bcm_static_skb->skb_8k[index] &&
361 bcm_static_skb->pkt_use[index] == 0) {
362 break;
363 }
364 }
365
366 if (i < STATIC_PKT_2PAGE_NUM) {
367 bcm_static_skb->pkt_use[index] = 1;
368 skb = bcm_static_skb->skb_8k[index];
369 skb->data = skb->head;
370 #ifdef NET_SKBUFF_DATA_USES_OFFSET
371 skb_set_tail_pointer(skb, PKT_HEADROOM_DEFAULT);
372 #else
373 skb->tail = skb->data + PKT_HEADROOM_DEFAULT;
374 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
375 skb->data += PKT_HEADROOM_DEFAULT;
376 skb->cloned = 0;
377 skb->priority = 0;
378 #ifdef NET_SKBUFF_DATA_USES_OFFSET
379 skb_set_tail_pointer(skb, len);
380 #else
381 skb->tail = skb->data + len;
382 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
383 skb->len = len;
384 skb->mac_len = PREALLOC_USED_MAGIC;
385 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
386 return skb;
387 }
388 }
389
390 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
391 printk("%s: all static pkt in use!\n", __FUNCTION__);
392 return NULL;
393 #else
394 down(&bcm_static_skb->osl_pkt_sem);
395
396 if (len <= DHD_SKB_1PAGE_BUFSIZE) {
397 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
398 if (bcm_static_skb->skb_4k[i] &&
399 bcm_static_skb->pkt_use[i] == 0) {
400 break;
401 }
402 }
403
404 if (i != STATIC_PKT_1PAGE_NUM) {
405 bcm_static_skb->pkt_use[i] = 1;
406
407 skb = bcm_static_skb->skb_4k[i];
408 #ifdef NET_SKBUFF_DATA_USES_OFFSET
409 skb_set_tail_pointer(skb, len);
410 #else
411 skb->tail = skb->data + len;
412 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
413 skb->len = len;
414
415 up(&bcm_static_skb->osl_pkt_sem);
416 return skb;
417 }
418 }
419
420 if (len <= DHD_SKB_2PAGE_BUFSIZE) {
421 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
422 if (bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM] &&
423 bcm_static_skb->pkt_use[i] == 0) {
424 break;
425 }
426 }
427
428 if ((i >= STATIC_PKT_1PAGE_NUM) && (i < STATIC_PKT_1_2PAGE_NUM)) {
429 bcm_static_skb->pkt_use[i] = 1;
430 skb = bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM];
431 #ifdef NET_SKBUFF_DATA_USES_OFFSET
432 skb_set_tail_pointer(skb, len);
433 #else
434 skb->tail = skb->data + len;
435 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
436 skb->len = len;
437
438 up(&bcm_static_skb->osl_pkt_sem);
439 return skb;
440 }
441 }
442
443 #if defined(ENHANCED_STATIC_BUF)
444 if (bcm_static_skb->skb_16k &&
445 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] == 0) {
446 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 1;
447
448 skb = bcm_static_skb->skb_16k;
449 #ifdef NET_SKBUFF_DATA_USES_OFFSET
450 skb_set_tail_pointer(skb, len);
451 #else
452 skb->tail = skb->data + len;
453 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
454 skb->len = len;
455
456 up(&bcm_static_skb->osl_pkt_sem);
457 return skb;
458 }
459 #endif /* ENHANCED_STATIC_BUF */
460
461 up(&bcm_static_skb->osl_pkt_sem);
462 printk("%s: all static pkt in use!\n", __FUNCTION__);
463 return linux_pktget(osh, len);
464 #endif /* DHD_USE_STATIC_CTRLBUF */
465 }
466
467 void
osl_pktfree_static(osl_t * osh,void * p,bool send)468 osl_pktfree_static(osl_t *osh, void *p, bool send)
469 {
470 int i;
471 #ifdef DHD_USE_STATIC_CTRLBUF
472 struct sk_buff *skb = (struct sk_buff *)p;
473 unsigned long flags;
474 #endif /* DHD_USE_STATIC_CTRLBUF */
475
476 if (!p) {
477 return;
478 }
479
480 if (!bcm_static_skb) {
481 linux_pktfree(osh, p, send);
482 return;
483 }
484
485 #ifdef DHD_USE_STATIC_CTRLBUF
486 spin_lock_irqsave(&bcm_static_skb->osl_pkt_lock, flags);
487
488 for (i = 0; i < STATIC_PKT_2PAGE_NUM; i++) {
489 if (p == bcm_static_skb->skb_8k[i]) {
490 if (bcm_static_skb->pkt_use[i] == 0) {
491 printk("%s: static pkt idx %d(%p) is double free\n",
492 __FUNCTION__, i, p);
493 } else {
494 bcm_static_skb->pkt_use[i] = 0;
495 }
496
497 if (skb->mac_len != PREALLOC_USED_MAGIC) {
498 printk("%s: static pkt idx %d(%p) is not in used\n",
499 __FUNCTION__, i, p);
500 }
501
502 skb->mac_len = PREALLOC_FREE_MAGIC;
503 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
504 return;
505 }
506 }
507
508 spin_unlock_irqrestore(&bcm_static_skb->osl_pkt_lock, flags);
509 printk("%s: packet %p does not exist in the pool\n", __FUNCTION__, p);
510 #else
511 down(&bcm_static_skb->osl_pkt_sem);
512 for (i = 0; i < STATIC_PKT_1PAGE_NUM; i++) {
513 if (p == bcm_static_skb->skb_4k[i]) {
514 bcm_static_skb->pkt_use[i] = 0;
515 up(&bcm_static_skb->osl_pkt_sem);
516 return;
517 }
518 }
519
520 for (i = STATIC_PKT_1PAGE_NUM; i < STATIC_PKT_1_2PAGE_NUM; i++) {
521 if (p == bcm_static_skb->skb_8k[i - STATIC_PKT_1PAGE_NUM]) {
522 bcm_static_skb->pkt_use[i] = 0;
523 up(&bcm_static_skb->osl_pkt_sem);
524 return;
525 }
526 }
527 #ifdef ENHANCED_STATIC_BUF
528 if (p == bcm_static_skb->skb_16k) {
529 bcm_static_skb->pkt_use[STATIC_PKT_MAX_NUM - 1] = 0;
530 up(&bcm_static_skb->osl_pkt_sem);
531 return;
532 }
533 #endif // endif
534 up(&bcm_static_skb->osl_pkt_sem);
535 #endif /* DHD_USE_STATIC_CTRLBUF */
536 linux_pktfree(osh, p, send);
537 }
538 #endif /* CONFIG_DHD_USE_STATIC_BUF */
539
540 /* Clone a packet.
541 * The pkttag contents are NOT cloned.
542 */
543 #ifdef BCM_OBJECT_TRACE
544 void *
osl_pktdup(osl_t * osh,void * skb,int line,const char * caller)545 osl_pktdup(osl_t *osh, void *skb, int line, const char *caller)
546 #else
547 void *
548 osl_pktdup(osl_t *osh, void *skb)
549 #endif /* BCM_OBJECT_TRACE */
550 {
551 void * p;
552
553 ASSERT(!PKTISCHAINED(skb));
554
555 /* clear the CTFBUF flag if set and map the rest of the buffer
556 * before cloning.
557 */
558 PKTCTFMAP(osh, skb);
559
560 if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL)
561 return NULL;
562
563 /* skb_clone copies skb->cb.. we don't want that */
564 if (osh->pub.pkttag)
565 OSL_PKTTAG_CLEAR(p);
566
567 /* Increment the packet counter */
568 atomic_inc(&osh->cmn->pktalloced);
569 #ifdef BCM_OBJECT_TRACE
570 bcm_object_trace_opr(p, BCM_OBJDBG_ADD_PKT, caller, line);
571 #endif /* BCM_OBJECT_TRACE */
572
573 return (p);
574 }
575
576 /*
577 * BINOSL selects the slightly slower function-call-based binary compatible osl.
578 */
579
580 uint
osl_pktalloced(osl_t * osh)581 osl_pktalloced(osl_t *osh)
582 {
583 if (atomic_read(&osh->cmn->refcount) == 1)
584 return (atomic_read(&osh->cmn->pktalloced));
585 else
586 return 0;
587 }
588
589 #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) && defined(TSQ_MULTIPLIER)
590 #include <linux/kallsyms.h>
591 #include <net/sock.h>
592 void
osl_pkt_orphan_partial(struct sk_buff * skb)593 osl_pkt_orphan_partial(struct sk_buff *skb)
594 {
595 uint32 fraction;
596 static void *p_tcp_wfree = NULL;
597
598 if (!skb->destructor || skb->destructor == sock_wfree)
599 return;
600
601 if (unlikely(!p_tcp_wfree)) {
602 char sym[KSYM_SYMBOL_LEN];
603 sprint_symbol(sym, (unsigned long)skb->destructor);
604 sym[9] = 0;
605 if (!strcmp(sym, "tcp_wfree"))
606 p_tcp_wfree = skb->destructor;
607 else
608 return;
609 }
610
611 if (unlikely(skb->destructor != p_tcp_wfree || !skb->sk))
612 return;
613
614 /* abstract a certain portion of skb truesize from the socket
615 * sk_wmem_alloc to allow more skb can be allocated for this
616 * socket for better cusion meeting WiFi device requirement
617 */
618 fraction = skb->truesize * (TSQ_MULTIPLIER - 1) / TSQ_MULTIPLIER;
619 skb->truesize -= fraction;
620
621 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 13, 0)
622 atomic_sub(fraction, &skb->sk->sk_wmem_alloc.refs);
623 #else
624 atomic_sub(fraction, &skb->sk->sk_wmem_alloc);
625 #endif // endif
626 }
627 #endif /* LINUX_VERSION >= 3.6.0 && TSQ_MULTIPLIER */
628