Lines Matching refs:skb
103 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
107 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
108 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
109 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
113 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
115 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
118 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
120 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
188 struct sk_buff *skb; in __alloc_skb() local
199 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb()
200 if (!skb) in __alloc_skb()
202 prefetchw(skb); in __alloc_skb()
226 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
228 skb->truesize = SKB_TRUESIZE(size); in __alloc_skb()
229 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
230 refcount_set(&skb->users, 1); in __alloc_skb()
231 skb->head = data; in __alloc_skb()
232 skb->data = data; in __alloc_skb()
233 skb_reset_tail_pointer(skb); in __alloc_skb()
234 skb->end = skb->tail + size; in __alloc_skb()
235 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb()
236 skb->transport_header = (typeof(skb->transport_header))~0U; in __alloc_skb()
239 shinfo = skb_shinfo(skb); in __alloc_skb()
246 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
248 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
254 skb_set_kcov_handle(skb, kcov_common_handle()); in __alloc_skb()
257 return skb; in __alloc_skb()
259 kmem_cache_free(cache, skb); in __alloc_skb()
260 skb = NULL; in __alloc_skb()
266 static struct sk_buff *__build_skb_around(struct sk_buff *skb, in __build_skb_around() argument
275 skb->truesize = SKB_TRUESIZE(size); in __build_skb_around()
276 refcount_set(&skb->users, 1); in __build_skb_around()
277 skb->head = data; in __build_skb_around()
278 skb->data = data; in __build_skb_around()
279 skb_reset_tail_pointer(skb); in __build_skb_around()
280 skb->end = skb->tail + size; in __build_skb_around()
281 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb_around()
282 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb_around()
285 shinfo = skb_shinfo(skb); in __build_skb_around()
289 skb_set_kcov_handle(skb, kcov_common_handle()); in __build_skb_around()
291 return skb; in __build_skb_around()
315 struct sk_buff *skb; in __build_skb() local
317 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
318 if (unlikely(!skb)) in __build_skb()
321 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
323 return __build_skb_around(skb, data, frag_size); in __build_skb()
333 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
335 if (skb && frag_size) { in build_skb()
336 skb->head_frag = 1; in build_skb()
338 skb->pfmemalloc = 1; in build_skb()
340 return skb; in build_skb()
350 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
353 if (unlikely(!skb)) in build_skb_around()
356 skb = __build_skb_around(skb, data, frag_size); in build_skb_around()
358 if (skb && frag_size) { in build_skb_around()
359 skb->head_frag = 1; in build_skb_around()
361 skb->pfmemalloc = 1; in build_skb_around()
363 return skb; in build_skb_around()
435 struct sk_buff *skb; in __netdev_alloc_skb() local
447 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
448 if (!skb) in __netdev_alloc_skb()
474 skb = __build_skb(data, len); in __netdev_alloc_skb()
475 if (unlikely(!skb)) { in __netdev_alloc_skb()
481 skb->pfmemalloc = 1; in __netdev_alloc_skb()
482 skb->head_frag = 1; in __netdev_alloc_skb()
485 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
486 skb->dev = dev; in __netdev_alloc_skb()
489 return skb; in __netdev_alloc_skb()
510 struct sk_buff *skb; in __napi_alloc_skb() local
521 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __napi_alloc_skb()
522 if (!skb) in __napi_alloc_skb()
538 skb = __build_skb(data, len); in __napi_alloc_skb()
539 if (unlikely(!skb)) { in __napi_alloc_skb()
545 skb->pfmemalloc = 1; in __napi_alloc_skb()
546 skb->head_frag = 1; in __napi_alloc_skb()
549 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
550 skb->dev = napi->dev; in __napi_alloc_skb()
553 return skb; in __napi_alloc_skb()
557 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
560 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
561 skb->len += size; in skb_add_rx_frag()
562 skb->data_len += size; in skb_add_rx_frag()
563 skb->truesize += truesize; in skb_add_rx_frag()
567 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
570 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
573 skb->len += size; in skb_coalesce_rx_frag()
574 skb->data_len += size; in skb_coalesce_rx_frag()
575 skb->truesize += truesize; in skb_coalesce_rx_frag()
585 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
587 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
590 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
594 skb_walk_frags(skb, list) in skb_clone_fraglist()
598 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
600 unsigned char *head = skb->head; in skb_free_head()
602 if (skb->head_frag) in skb_free_head()
608 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
610 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
613 if (skb->cloned && in skb_release_data()
614 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
624 skb_zcopy_clear(skb, true); in skb_release_data()
625 skb_free_head(skb); in skb_release_data()
631 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
635 switch (skb->fclone) { in kfree_skbmem()
637 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
641 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
652 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
661 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
663 nf_reset_ct(skb); in skb_release_head_state()
664 skb_dst_drop(skb); in skb_release_head_state()
665 if (skb->destructor) { in skb_release_head_state()
667 skb->destructor(skb); in skb_release_head_state()
670 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
672 skb_ext_put(skb); in skb_release_head_state()
676 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
678 skb_release_head_state(skb); in skb_release_all()
679 if (likely(skb->head)) in skb_release_all()
680 skb_release_data(skb); in skb_release_all()
692 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
694 skb_release_all(skb); in __kfree_skb()
695 kfree_skbmem(skb); in __kfree_skb()
706 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
708 if (!skb_unref(skb)) in kfree_skb()
711 trace_android_vh_kfree_skb(skb); in kfree_skb()
712 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
713 __kfree_skb(skb); in kfree_skb()
734 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
736 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
737 struct net_device *dev = skb->dev; in skb_dump()
738 struct sock *sk = skb->sk; in skb_dump()
745 len = skb->len; in skb_dump()
747 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
749 headroom = skb_headroom(skb); in skb_dump()
750 tailroom = skb_tailroom(skb); in skb_dump()
752 has_mac = skb_mac_header_was_set(skb); in skb_dump()
753 has_trans = skb_transport_header_was_set(skb); in skb_dump()
760 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
761 has_mac ? skb->mac_header : -1, in skb_dump()
762 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
763 skb->network_header, in skb_dump()
764 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
765 has_trans ? skb->transport_header : -1, in skb_dump()
768 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
769 skb->csum_valid, skb->csum_level, in skb_dump()
770 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
771 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
782 16, 1, skb->head, headroom, false); in skb_dump()
784 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
787 16, 1, skb->data, seg_len, false); in skb_dump()
792 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
794 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
795 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
815 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
817 skb_walk_frags(skb, list_skb) in skb_dump()
830 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
832 skb_zcopy_clear(skb, true); in skb_tx_error()
845 void consume_skb(struct sk_buff *skb) in consume_skb() argument
847 if (!skb_unref(skb)) in consume_skb()
850 trace_consume_skb(skb); in consume_skb()
851 __kfree_skb(skb); in consume_skb()
863 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
865 trace_consume_skb(skb); in __consume_stateless_skb()
866 skb_release_data(skb); in __consume_stateless_skb()
867 kfree_skbmem(skb); in __consume_stateless_skb()
882 static inline void _kfree_skb_defer(struct sk_buff *skb) in _kfree_skb_defer() argument
887 skb_release_all(skb); in _kfree_skb_defer()
890 nc->skb_cache[nc->skb_count++] = skb; in _kfree_skb_defer()
894 prefetchw(skb); in _kfree_skb_defer()
904 void __kfree_skb_defer(struct sk_buff *skb) in __kfree_skb_defer() argument
906 _kfree_skb_defer(skb); in __kfree_skb_defer()
909 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
913 dev_consume_skb_any(skb); in napi_consume_skb()
917 if (!skb_unref(skb)) in napi_consume_skb()
921 trace_consume_skb(skb); in napi_consume_skb()
924 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
925 __kfree_skb(skb); in napi_consume_skb()
929 _kfree_skb_defer(skb); in napi_consume_skb()
992 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
994 #define C(x) n->x = skb->x in __skb_clone()
998 __copy_skb_header(n, skb); in __skb_clone()
1003 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1017 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1018 skb->cloned = 1; in __skb_clone()
1109 struct sk_buff *skb; in sock_zerocopy_alloc() local
1113 skb = sock_omalloc(sk, 0, GFP_KERNEL); in sock_zerocopy_alloc()
1114 if (!skb) in sock_zerocopy_alloc()
1117 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in sock_zerocopy_alloc()
1118 uarg = (void *)skb->cb; in sock_zerocopy_alloc()
1122 kfree_skb(skb); in sock_zerocopy_alloc()
1187 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1189 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1209 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in sock_zerocopy_callback() local
1211 struct sock *sk = skb->sk; in sock_zerocopy_callback()
1229 serr = SKB_EXT_ERR(skb); in sock_zerocopy_callback()
1243 __skb_queue_tail(q, skb); in sock_zerocopy_callback()
1244 skb = NULL; in sock_zerocopy_callback()
1251 consume_skb(skb); in sock_zerocopy_callback()
1281 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) in skb_zerocopy_iter_dgram() argument
1283 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1287 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1291 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1293 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1301 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1302 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1303 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1307 skb->sk = sk; in skb_zerocopy_iter_stream()
1308 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1309 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1313 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1314 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1353 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1355 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1360 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1366 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; in skb_copy_ubufs()
1384 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1411 skb_frag_unref(skb, i); in skb_copy_ubufs()
1415 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); in skb_copy_ubufs()
1418 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1419 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1422 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1441 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1443 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1448 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1451 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1456 if (skb_pfmemalloc(skb)) in skb_clone()
1466 return __skb_clone(n, skb); in skb_clone()
1470 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1473 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1474 skb->csum_start += off; in skb_headers_offset_update()
1476 skb->transport_header += off; in skb_headers_offset_update()
1477 skb->network_header += off; in skb_headers_offset_update()
1478 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1479 skb->mac_header += off; in skb_headers_offset_update()
1480 skb->inner_transport_header += off; in skb_headers_offset_update()
1481 skb->inner_network_header += off; in skb_headers_offset_update()
1482 skb->inner_mac_header += off; in skb_headers_offset_update()
1496 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1498 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1520 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1522 int headerlen = skb_headroom(skb); in skb_copy()
1523 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1525 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1533 skb_put(n, skb->len); in skb_copy()
1535 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
1537 skb_copy_header(n, skb); in skb_copy()
1559 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1562 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1563 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1572 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1574 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1576 n->truesize += skb->data_len; in __pskb_copy_fclone()
1577 n->data_len = skb->data_len; in __pskb_copy_fclone()
1578 n->len = skb->len; in __pskb_copy_fclone()
1580 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1583 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
1584 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
1589 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1590 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1591 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1596 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1597 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1601 skb_copy_header(n, skb); in __pskb_copy_fclone()
1623 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1626 int i, osize = skb_end_offset(skb); in pskb_expand_head()
1633 BUG_ON(skb_shared(skb)); in pskb_expand_head()
1637 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1648 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1651 skb_shinfo(skb), in pskb_expand_head()
1652 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1659 if (skb_cloned(skb)) { in pskb_expand_head()
1660 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1662 if (skb_zcopy(skb)) in pskb_expand_head()
1663 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
1664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1665 skb_frag_ref(skb, i); in pskb_expand_head()
1667 if (skb_has_frag_list(skb)) in pskb_expand_head()
1668 skb_clone_fraglist(skb); in pskb_expand_head()
1670 skb_release_data(skb); in pskb_expand_head()
1672 skb_free_head(skb); in pskb_expand_head()
1674 off = (data + nhead) - skb->head; in pskb_expand_head()
1676 skb->head = data; in pskb_expand_head()
1677 skb->head_frag = 0; in pskb_expand_head()
1678 skb->data += off; in pskb_expand_head()
1680 skb_set_end_offset(skb, size); in pskb_expand_head()
1684 skb->tail += off; in pskb_expand_head()
1685 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1686 skb->cloned = 0; in pskb_expand_head()
1687 skb->hdr_len = 0; in pskb_expand_head()
1688 skb->nohdr = 0; in pskb_expand_head()
1689 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1691 skb_metadata_clear(skb); in pskb_expand_head()
1697 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
1698 skb->truesize += size - osize; in pskb_expand_head()
1711 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1714 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1717 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1719 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1730 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
1736 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
1737 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
1739 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
1743 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
1745 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
1748 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
1753 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
1757 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
1780 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1787 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1788 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1790 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1799 skb_put(n, skb->len); in skb_copy_expand()
1809 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1810 skb->len + head_copy_len)); in skb_copy_expand()
1812 skb_copy_header(n, skb); in skb_copy_expand()
1834 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
1840 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
1841 memset(skb->data+skb->len, 0, pad); in __skb_pad()
1845 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
1846 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
1847 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
1855 err = skb_linearize(skb); in __skb_pad()
1859 memset(skb->data + skb->len, 0, pad); in __skb_pad()
1864 kfree_skb(skb); in __skb_pad()
1882 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
1884 if (tail != skb) { in pskb_put()
1885 skb->data_len += len; in pskb_put()
1886 skb->len += len; in pskb_put()
1901 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1903 void *tmp = skb_tail_pointer(skb); in skb_put()
1904 SKB_LINEAR_ASSERT(skb); in skb_put()
1905 skb->tail += len; in skb_put()
1906 skb->len += len; in skb_put()
1907 if (unlikely(skb->tail > skb->end)) in skb_put()
1908 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
1922 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
1924 skb->data -= len; in skb_push()
1925 skb->len += len; in skb_push()
1926 if (unlikely(skb->data < skb->head)) in skb_push()
1927 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
1928 return skb->data; in skb_push()
1942 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
1944 return skb_pull_inline(skb, len); in skb_pull()
1957 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
1959 if (skb->len > len) in skb_trim()
1960 __skb_trim(skb, len); in skb_trim()
1967 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
1971 int offset = skb_headlen(skb); in ___pskb_trim()
1972 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
1976 if (skb_cloned(skb) && in ___pskb_trim()
1977 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
1985 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1992 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1995 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
1998 skb_frag_unref(skb, i); in ___pskb_trim()
2000 if (skb_has_frag_list(skb)) in ___pskb_trim()
2001 skb_drop_fraglist(skb); in ___pskb_trim()
2005 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2037 if (len > skb_headlen(skb)) { in ___pskb_trim()
2038 skb->data_len -= skb->len - len; in ___pskb_trim()
2039 skb->len = len; in ___pskb_trim()
2041 skb->len = len; in ___pskb_trim()
2042 skb->data_len = 0; in ___pskb_trim()
2043 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2046 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2047 skb_condense(skb); in ___pskb_trim()
2054 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2056 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2057 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2059 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2060 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2062 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2063 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2064 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2069 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2098 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2104 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2106 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2107 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2112 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2113 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2118 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2123 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2124 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2139 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2173 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2174 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2180 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2188 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2189 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2192 skb_frag_unref(skb, i); in __pskb_pull_tail()
2195 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2197 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2208 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2211 skb->tail += delta; in __pskb_pull_tail()
2212 skb->data_len -= delta; in __pskb_pull_tail()
2214 if (!skb->data_len) in __pskb_pull_tail()
2215 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2217 return skb_tail_pointer(skb); in __pskb_pull_tail()
2236 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2238 int start = skb_headlen(skb); in skb_copy_bits()
2242 if (offset > (int)skb->len - len) in skb_copy_bits()
2249 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2256 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2258 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2287 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
2421 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
2433 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
2434 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
2435 skb_headlen(skb), in __skb_splice_bits()
2437 skb_head_is_locked(skb), in __skb_splice_bits()
2444 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
2445 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
2453 skb_walk_frags(skb, iter) { in __skb_splice_bits()
2473 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
2488 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
2498 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
2502 struct sk_buff *head = skb; in skb_send_sock_locked()
2509 while (offset < skb_headlen(skb) && len) { in skb_send_sock_locked()
2513 slen = min_t(int, len, skb_headlen(skb) - offset); in skb_send_sock_locked()
2514 kv.iov_base = skb->data + offset; in skb_send_sock_locked()
2532 offset -= skb_headlen(skb); in skb_send_sock_locked()
2535 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in skb_send_sock_locked()
2536 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in skb_send_sock_locked()
2544 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in skb_send_sock_locked()
2545 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in skb_send_sock_locked()
2567 if (skb == head) { in skb_send_sock_locked()
2568 if (skb_has_frag_list(skb)) { in skb_send_sock_locked()
2569 skb = skb_shinfo(skb)->frag_list; in skb_send_sock_locked()
2572 } else if (skb->next) { in skb_send_sock_locked()
2573 skb = skb->next; in skb_send_sock_locked()
2598 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
2600 int start = skb_headlen(skb); in skb_store_bits()
2604 if (offset > (int)skb->len - len) in skb_store_bits()
2610 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2617 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2618 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2648 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2676 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2679 int start = skb_headlen(skb); in __skb_checksum()
2689 skb->data + offset, copy, csum); in __skb_checksum()
2696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
2698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2733 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2760 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2768 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2774 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2777 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2787 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2801 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2803 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2832 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2859 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
2863 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
2866 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
2867 !skb->csum_complete_sw) in __skb_checksum_complete_head()
2868 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
2870 if (!skb_shared(skb)) in __skb_checksum_complete_head()
2871 skb->csum_valid = !sum; in __skb_checksum_complete_head()
2885 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
2890 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
2892 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
2901 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
2902 !skb->csum_complete_sw) in __skb_checksum_complete()
2903 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
2906 if (!skb_shared(skb)) { in __skb_checksum_complete()
2908 skb->csum = csum; in __skb_checksum_complete()
2909 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
2910 skb->csum_complete_sw = 1; in __skb_checksum_complete()
2911 skb->csum_valid = !sum; in __skb_checksum_complete()
3050 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3055 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3056 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3058 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3060 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3062 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3065 if (csstart != skb->len) in skb_copy_and_csum_dev()
3066 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3067 skb->len - csstart); in skb_copy_and_csum_dev()
3069 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3070 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3128 struct sk_buff *skb; in skb_queue_purge() local
3129 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
3130 kfree_skb(skb); in skb_queue_purge()
3150 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3153 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3154 sum += skb->truesize; in skb_rbtree_purge()
3155 kfree_skb(skb); in skb_rbtree_purge()
3212 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3217 __skb_unlink(skb, list); in skb_unlink()
3242 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3248 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3251 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3252 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3254 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3255 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3256 skb1->data_len = skb->data_len; in skb_split_inside_header()
3258 skb->data_len = 0; in skb_split_inside_header()
3259 skb->len = len; in skb_split_inside_header()
3260 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
3263 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
3268 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3270 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3271 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3272 skb->len = len; in skb_split_no_header()
3273 skb->data_len = len - pos; in skb_split_no_header()
3276 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3279 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3290 skb_frag_ref(skb, i); in skb_split_no_header()
3293 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3294 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3298 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3310 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
3312 int pos = skb_headlen(skb); in skb_split()
3314 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & in skb_split()
3316 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
3318 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
3320 skb_split_no_header(skb, skb1, len, pos); in skb_split()
3328 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
3330 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
3351 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
3356 BUG_ON(shiftlen > skb->len); in skb_shift()
3358 if (skb_headlen(skb)) in skb_shift()
3360 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
3366 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3380 if (skb_prepare_for_shift(skb) || in skb_shift()
3385 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3399 if ((shiftlen == skb->len) && in skb_shift()
3400 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
3403 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
3406 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
3410 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3438 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
3447 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
3448 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
3449 skb_shinfo(skb)->nr_frags = to; in skb_shift()
3451 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
3458 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
3461 skb->len -= shiftlen; in skb_shift()
3462 skb->data_len -= shiftlen; in skb_shift()
3463 skb->truesize -= shiftlen; in skb_shift()
3481 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
3486 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
3624 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
3633 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
3640 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
3643 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
3645 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
3646 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
3649 skb_fill_page_desc(skb, i, page, offset, size); in skb_append_pagefrags()
3669 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
3671 unsigned char *data = skb->data; in skb_pull_rcsum()
3673 BUG_ON(len > skb->len); in skb_pull_rcsum()
3674 __skb_pull(skb, len); in skb_pull_rcsum()
3675 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
3676 return skb->data; in skb_pull_rcsum()
3693 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
3697 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
3698 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
3705 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
3707 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
3727 skb->next = nskb; in skb_segment_list()
3743 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
3744 __copy_skb_header(nskb, skb); in skb_segment_list()
3746 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
3748 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
3758 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
3759 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
3760 skb->len = skb->len - delta_len; in skb_segment_list()
3762 skb_gso_reset(skb); in skb_segment_list()
3764 skb->prev = tail; in skb_segment_list()
3766 if (skb_needs_linearize(skb, features) && in skb_segment_list()
3767 __skb_linearize(skb)) in skb_segment_list()
3770 skb_get(skb); in skb_segment_list()
3772 return skb; in skb_segment_list()
3775 kfree_skb_list(skb->next); in skb_segment_list()
3776 skb->next = NULL; in skb_segment_list()
3781 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive_list() argument
3783 if (unlikely(p->len + skb->len >= 65536)) in skb_gro_receive_list()
3787 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
3789 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive_list()
3791 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
3793 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive_list()
3795 p->data_len += skb->len; in skb_gro_receive_list()
3796 p->truesize += skb->truesize; in skb_gro_receive_list()
3797 p->len += skb->len; in skb_gro_receive_list()
3799 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive_list()
4156 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument
4158 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
4159 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
4160 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
4161 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
4165 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
4194 delta_truesize = skb->truesize - in skb_gro_receive()
4195 SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
4197 skb->truesize -= skb->data_len; in skb_gro_receive()
4198 skb->len -= skb->data_len; in skb_gro_receive()
4199 skb->data_len = 0; in skb_gro_receive()
4201 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
4203 } else if (skb->head_frag) { in skb_gro_receive()
4206 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
4213 first_offset = skb->data - in skb_gro_receive()
4226 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()
4227 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
4232 delta_truesize = skb->truesize; in skb_gro_receive()
4238 skb->data_len -= eat; in skb_gro_receive()
4239 skb->len -= eat; in skb_gro_receive()
4243 __skb_pull(skb, offset); in skb_gro_receive()
4246 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
4248 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
4249 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
4250 __skb_header_release(skb); in skb_gro_receive()
4263 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
4343 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
4346 int start = skb_headlen(skb); in __skb_to_sgvec()
4357 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4364 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4369 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4371 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4387 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
4426 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
4428 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
4458 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
4461 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
4484 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
4494 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
4495 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
4499 if (!skb_has_frag_list(skb)) { in skb_cow_data()
4505 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
4506 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
4510 *trailer = skb; in skb_cow_data()
4517 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
4577 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
4579 struct sock *sk = skb->sk; in sock_rmem_free()
4581 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
4584 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
4589 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
4596 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
4598 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
4602 skb_orphan(skb); in sock_queue_err_skb()
4603 skb->sk = sk; in sock_queue_err_skb()
4604 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
4605 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
4606 skb_set_err_queue(skb); in sock_queue_err_skb()
4609 skb_dst_force(skb); in sock_queue_err_skb()
4611 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
4618 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
4620 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
4621 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
4627 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
4632 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
4633 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
4640 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
4646 return skb; in sock_dequeue_err_skb()
4663 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
4665 struct sock *sk = skb->sk; in skb_clone_sk()
4671 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
4684 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
4692 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
4694 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
4700 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
4702 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
4708 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
4711 kfree_skb(skb); in __skb_complete_tx_timestamp()
4728 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
4731 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
4740 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
4741 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
4747 kfree_skb(skb); in skb_complete_tx_timestamp()
4755 struct sk_buff *skb; in __skb_tstamp_tx() local
4774 skb = tcp_get_timestamping_opt_stats(sk, orig_skb); in __skb_tstamp_tx()
4778 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
4780 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
4782 if (!skb) in __skb_tstamp_tx()
4786 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
4788 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
4792 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
4794 skb->tstamp = ktime_get_real(); in __skb_tstamp_tx()
4796 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
4808 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
4810 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
4814 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
4815 skb->wifi_acked = acked; in skb_complete_wifi_ack()
4817 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
4826 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
4830 kfree_skb(skb); in skb_complete_wifi_ack()
4846 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
4849 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
4851 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
4853 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
4856 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
4857 skb->csum_start = csum_start; in skb_partial_csum_set()
4858 skb->csum_offset = off; in skb_partial_csum_set()
4859 skb_set_transport_header(skb, start); in skb_partial_csum_set()
4864 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
4867 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
4873 if (max > skb->len) in skb_maybe_pull_tail()
4874 max = skb->len; in skb_maybe_pull_tail()
4876 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
4879 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
4887 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
4895 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
4897 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
4901 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
4904 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
4906 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
4910 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
4921 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
4930 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
4936 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
4939 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
4946 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
4951 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
4952 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
4953 skb->len - off, in skb_checksum_setup_ipv4()
4954 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
4966 #define OPT_HDR(type, skb, off) \ argument
4967 (type *)(skb_network_header(skb) + (off))
4969 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
4984 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
4988 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
4990 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
4998 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5005 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5013 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5020 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5028 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5035 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5055 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5060 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5061 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5062 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5074 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5078 switch (skb->protocol) { in skb_checksum_setup()
5080 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5084 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5109 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5113 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5116 if (skb->len < len) in skb_checksum_maybe_trim()
5118 else if (skb->len == len) in skb_checksum_maybe_trim()
5119 return skb; in skb_checksum_maybe_trim()
5121 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5149 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5151 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5154 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5157 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
5174 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5182 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5185 skb->dev->name); in __skb_warn_lro_forwarding()
5189 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5192 skb_release_head_state(skb); in kfree_skb_partial()
5193 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
5195 __kfree_skb(skb); in kfree_skb_partial()
5297 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
5299 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5300 skb->skb_iif = 0; in skb_scrub_packet()
5301 skb->ignore_df = 0; in skb_scrub_packet()
5302 skb_dst_drop(skb); in skb_scrub_packet()
5303 skb_ext_reset(skb); in skb_scrub_packet()
5304 nf_reset_ct(skb); in skb_scrub_packet()
5305 nf_reset_trace(skb); in skb_scrub_packet()
5308 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5309 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5315 ipvs_reset(skb); in skb_scrub_packet()
5316 skb->mark = 0; in skb_scrub_packet()
5317 skb->tstamp = 0; in skb_scrub_packet()
5331 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
5333 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
5336 if (skb->encapsulation) { in skb_gso_transport_seglen()
5337 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
5338 skb_transport_header(skb); in skb_gso_transport_seglen()
5341 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
5343 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
5344 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
5366 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
5368 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
5369 skb_network_header(skb); in skb_gso_network_seglen()
5371 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
5383 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
5385 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
5387 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
5411 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
5414 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
5423 skb_walk_frags(skb, iter) { in skb_gso_size_check()
5441 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
5443 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
5456 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
5458 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()
5462 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
5467 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
5468 kfree_skb(skb); in skb_reorder_vlan_header()
5472 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5474 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
5478 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
5480 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5484 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5485 return skb; in skb_reorder_vlan_header()
5488 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
5493 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
5495 return skb; in skb_vlan_untag()
5498 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
5499 if (unlikely(!skb)) in skb_vlan_untag()
5502 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
5505 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5507 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5509 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
5510 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
5512 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
5513 if (unlikely(!skb)) in skb_vlan_untag()
5516 skb_reset_network_header(skb); in skb_vlan_untag()
5517 skb_reset_transport_header(skb); in skb_vlan_untag()
5518 skb_reset_mac_len(skb); in skb_vlan_untag()
5520 return skb; in skb_vlan_untag()
5523 kfree_skb(skb); in skb_vlan_untag()
5528 int skb_ensure_writable(struct sk_buff *skb, int write_len) in skb_ensure_writable() argument
5530 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
5533 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
5536 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
5543 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
5546 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
5555 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
5559 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
5561 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
5564 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
5565 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
5567 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
5568 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
5570 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
5571 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
5573 skb_reset_mac_len(skb); in __skb_vlan_pop()
5582 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
5588 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
5589 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
5591 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5594 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5599 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5602 vlan_proto = skb->protocol; in skb_vlan_pop()
5603 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5607 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
5615 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
5617 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
5618 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
5627 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
5628 skb_vlan_tag_get(skb)); in skb_vlan_push()
5632 skb->protocol = skb->vlan_proto; in skb_vlan_push()
5633 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
5635 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
5637 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
5654 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
5656 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
5657 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
5660 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
5661 skb_reset_mac_header(skb); in skb_eth_pop()
5662 skb_reset_mac_len(skb); in skb_eth_pop()
5681 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
5687 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
5690 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
5694 skb_push(skb, sizeof(*eth)); in skb_eth_push()
5695 skb_reset_mac_header(skb); in skb_eth_push()
5696 skb_reset_mac_len(skb); in skb_eth_push()
5698 eth = eth_hdr(skb); in skb_eth_push()
5701 eth->h_proto = skb->protocol; in skb_eth_push()
5703 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
5710 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
5713 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
5716 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
5737 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
5747 if (skb->encapsulation) in skb_mpls_push()
5750 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
5754 if (!skb->inner_protocol) { in skb_mpls_push()
5755 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
5756 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
5759 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
5760 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
5762 skb_reset_mac_header(skb); in skb_mpls_push()
5763 skb_set_network_header(skb, mac_len); in skb_mpls_push()
5764 skb_reset_mac_len(skb); in skb_mpls_push()
5766 lse = mpls_hdr(skb); in skb_mpls_push()
5768 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
5771 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
5772 skb->protocol = mpls_proto; in skb_mpls_push()
5790 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
5795 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
5798 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
5802 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
5803 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
5806 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
5807 skb_reset_mac_header(skb); in skb_mpls_pop()
5808 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
5814 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
5815 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
5817 skb->protocol = next_proto; in skb_mpls_pop()
5833 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
5837 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
5840 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
5844 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
5845 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
5847 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
5850 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
5865 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
5870 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
5873 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
5876 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
5884 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
5907 struct sk_buff *skb; in alloc_skb_with_frags() local
5919 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
5920 if (!skb) in alloc_skb_with_frags()
5923 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
5948 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
5952 return skb; in alloc_skb_with_frags()
5955 kfree_skb(skb); in alloc_skb_with_frags()
5961 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
5965 int size = skb_end_offset(skb); in pskb_carve_inside_header()
5971 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
5982 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
5983 skb->len -= off; in pskb_carve_inside_header()
5986 skb_shinfo(skb), in pskb_carve_inside_header()
5988 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
5989 if (skb_cloned(skb)) { in pskb_carve_inside_header()
5991 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
5995 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
5996 skb_frag_ref(skb, i); in pskb_carve_inside_header()
5997 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
5998 skb_clone_fraglist(skb); in pskb_carve_inside_header()
5999 skb_release_data(skb); in pskb_carve_inside_header()
6004 skb_free_head(skb); in pskb_carve_inside_header()
6007 skb->head = data; in pskb_carve_inside_header()
6008 skb->data = data; in pskb_carve_inside_header()
6009 skb->head_frag = 0; in pskb_carve_inside_header()
6010 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6011 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6012 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6013 skb->cloned = 0; in pskb_carve_inside_header()
6014 skb->hdr_len = 0; in pskb_carve_inside_header()
6015 skb->nohdr = 0; in pskb_carve_inside_header()
6016 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6021 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6026 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6080 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6084 int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6086 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6091 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6102 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6103 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6109 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6112 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6126 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6132 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6133 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6136 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6138 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6139 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6143 skb_release_data(skb); in pskb_carve_inside_nonlinear()
6145 skb->head = data; in pskb_carve_inside_nonlinear()
6146 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6147 skb->data = data; in pskb_carve_inside_nonlinear()
6148 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6149 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6150 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6151 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6152 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6153 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6154 skb->len -= off; in pskb_carve_inside_nonlinear()
6155 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6156 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6161 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6163 int headlen = skb_headlen(skb); in pskb_carve()
6166 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6168 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6174 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6177 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6203 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6205 if (skb->data_len) { in skb_condense()
6206 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6207 skb_cloned(skb)) in skb_condense()
6211 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6220 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6288 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
6293 skb_ext_put(skb); in __skb_ext_set()
6297 skb->extensions = ext; in __skb_ext_set()
6298 skb->active_extensions = 1 << id; in __skb_ext_set()
6316 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6321 if (skb->active_extensions) { in skb_ext_add()
6322 old = skb->extensions; in skb_ext_add()
6324 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6344 skb->extensions = new; in skb_ext_add()
6345 skb->active_extensions |= 1 << id; in skb_ext_add()
6360 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6362 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6364 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6365 if (skb->active_extensions == 0) { in __skb_ext_del()
6366 skb->extensions = NULL; in __skb_ext_del()