xref: /OK3568_Linux_fs/kernel/net/core/skbuff.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *	Routines having to do with the 'struct sk_buff' memory handlers.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
6*4882a593Smuzhiyun  *			Florian La Roche <rzsfl@rz.uni-sb.de>
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *	Fixes:
9*4882a593Smuzhiyun  *		Alan Cox	:	Fixed the worst of the load
10*4882a593Smuzhiyun  *					balancer bugs.
11*4882a593Smuzhiyun  *		Dave Platt	:	Interrupt stacking fix.
12*4882a593Smuzhiyun  *	Richard Kooijman	:	Timestamp fixes.
13*4882a593Smuzhiyun  *		Alan Cox	:	Changed buffer format.
14*4882a593Smuzhiyun  *		Alan Cox	:	destructor hook for AF_UNIX etc.
15*4882a593Smuzhiyun  *		Linus Torvalds	:	Better skb_clone.
16*4882a593Smuzhiyun  *		Alan Cox	:	Added skb_copy.
17*4882a593Smuzhiyun  *		Alan Cox	:	Added all the changed routines Linus
18*4882a593Smuzhiyun  *					only put in the headers
19*4882a593Smuzhiyun  *		Ray VanTassle	:	Fixed --skb->lock in free
20*4882a593Smuzhiyun  *		Alan Cox	:	skb_copy copy arp field
21*4882a593Smuzhiyun  *		Andi Kleen	:	slabified it.
22*4882a593Smuzhiyun  *		Robert Olsson	:	Removed skb_head_pool
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  *	NOTE:
25*4882a593Smuzhiyun  *		The __skb_ routines should be called with interrupts
26*4882a593Smuzhiyun  *	disabled, or you better be *real* sure that the operation is atomic
27*4882a593Smuzhiyun  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
28*4882a593Smuzhiyun  *	or via disabling bottom half handlers, etc).
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  *	The functions in this file will not compile correctly with gcc 2.4.x
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <linux/module.h>
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/mm.h>
41*4882a593Smuzhiyun #include <linux/interrupt.h>
42*4882a593Smuzhiyun #include <linux/in.h>
43*4882a593Smuzhiyun #include <linux/inet.h>
44*4882a593Smuzhiyun #include <linux/slab.h>
45*4882a593Smuzhiyun #include <linux/tcp.h>
46*4882a593Smuzhiyun #include <linux/udp.h>
47*4882a593Smuzhiyun #include <linux/sctp.h>
48*4882a593Smuzhiyun #include <linux/netdevice.h>
49*4882a593Smuzhiyun #ifdef CONFIG_NET_CLS_ACT
50*4882a593Smuzhiyun #include <net/pkt_sched.h>
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun #include <linux/string.h>
53*4882a593Smuzhiyun #include <linux/skbuff.h>
54*4882a593Smuzhiyun #include <linux/splice.h>
55*4882a593Smuzhiyun #include <linux/cache.h>
56*4882a593Smuzhiyun #include <linux/rtnetlink.h>
57*4882a593Smuzhiyun #include <linux/init.h>
58*4882a593Smuzhiyun #include <linux/scatterlist.h>
59*4882a593Smuzhiyun #include <linux/errqueue.h>
60*4882a593Smuzhiyun #include <linux/prefetch.h>
61*4882a593Smuzhiyun #include <linux/if_vlan.h>
62*4882a593Smuzhiyun #include <linux/mpls.h>
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #include <net/protocol.h>
65*4882a593Smuzhiyun #include <net/dst.h>
66*4882a593Smuzhiyun #include <net/sock.h>
67*4882a593Smuzhiyun #include <net/checksum.h>
68*4882a593Smuzhiyun #include <net/ip6_checksum.h>
69*4882a593Smuzhiyun #include <net/xfrm.h>
70*4882a593Smuzhiyun #include <net/mpls.h>
71*4882a593Smuzhiyun #include <net/mptcp.h>
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #include <linux/uaccess.h>
74*4882a593Smuzhiyun #include <trace/events/skb.h>
75*4882a593Smuzhiyun #include <linux/highmem.h>
76*4882a593Smuzhiyun #include <linux/capability.h>
77*4882a593Smuzhiyun #include <linux/user_namespace.h>
78*4882a593Smuzhiyun #include <linux/indirect_call_wrapper.h>
79*4882a593Smuzhiyun #include <trace/hooks/net.h>
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #include "datagram.h"
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun struct kmem_cache *skbuff_head_cache __ro_after_init;
84*4882a593Smuzhiyun static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
85*4882a593Smuzhiyun #ifdef CONFIG_SKB_EXTENSIONS
86*4882a593Smuzhiyun static struct kmem_cache *skbuff_ext_cache __ro_after_init;
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
89*4882a593Smuzhiyun EXPORT_SYMBOL(sysctl_max_skb_frags);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /**
92*4882a593Smuzhiyun  *	skb_panic - private function for out-of-line support
93*4882a593Smuzhiyun  *	@skb:	buffer
94*4882a593Smuzhiyun  *	@sz:	size
95*4882a593Smuzhiyun  *	@addr:	address
96*4882a593Smuzhiyun  *	@msg:	skb_over_panic or skb_under_panic
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  *	Out-of-line support for skb_put() and skb_push().
99*4882a593Smuzhiyun  *	Called via the wrapper skb_over_panic() or skb_under_panic().
100*4882a593Smuzhiyun  *	Keep out of line to prevent kernel bloat.
101*4882a593Smuzhiyun  *	__builtin_return_address is not used because it is not always reliable.
102*4882a593Smuzhiyun  */
skb_panic(struct sk_buff * skb,unsigned int sz,void * addr,const char msg[])103*4882a593Smuzhiyun static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
104*4882a593Smuzhiyun 		      const char msg[])
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
107*4882a593Smuzhiyun 		 msg, addr, skb->len, sz, skb->head, skb->data,
108*4882a593Smuzhiyun 		 (unsigned long)skb->tail, (unsigned long)skb->end,
109*4882a593Smuzhiyun 		 skb->dev ? skb->dev->name : "<NULL>");
110*4882a593Smuzhiyun 	BUG();
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
skb_over_panic(struct sk_buff * skb,unsigned int sz,void * addr)113*4882a593Smuzhiyun static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	skb_panic(skb, sz, addr, __func__);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
skb_under_panic(struct sk_buff * skb,unsigned int sz,void * addr)118*4882a593Smuzhiyun static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	skb_panic(skb, sz, addr, __func__);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
125*4882a593Smuzhiyun  * the caller if emergency pfmemalloc reserves are being used. If it is and
126*4882a593Smuzhiyun  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
127*4882a593Smuzhiyun  * may be used. Otherwise, the packet data may be discarded until enough
128*4882a593Smuzhiyun  * memory is free
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
131*4882a593Smuzhiyun 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
132*4882a593Smuzhiyun 
__kmalloc_reserve(size_t size,gfp_t flags,int node,unsigned long ip,bool * pfmemalloc)133*4882a593Smuzhiyun static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
134*4882a593Smuzhiyun 			       unsigned long ip, bool *pfmemalloc)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	void *obj;
137*4882a593Smuzhiyun 	bool ret_pfmemalloc = false;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	/*
140*4882a593Smuzhiyun 	 * Try a regular allocation, when that fails and we're not entitled
141*4882a593Smuzhiyun 	 * to the reserves, fail.
142*4882a593Smuzhiyun 	 */
143*4882a593Smuzhiyun 	obj = kmalloc_node_track_caller(size,
144*4882a593Smuzhiyun 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
145*4882a593Smuzhiyun 					node);
146*4882a593Smuzhiyun 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
147*4882a593Smuzhiyun 		goto out;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* Try again but now we are using pfmemalloc reserves */
150*4882a593Smuzhiyun 	ret_pfmemalloc = true;
151*4882a593Smuzhiyun 	obj = kmalloc_node_track_caller(size, flags, node);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun out:
154*4882a593Smuzhiyun 	if (pfmemalloc)
155*4882a593Smuzhiyun 		*pfmemalloc = ret_pfmemalloc;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return obj;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
161*4882a593Smuzhiyun  *	'private' fields and also do memory statistics to find all the
162*4882a593Smuzhiyun  *	[BEEP] leaks.
163*4882a593Smuzhiyun  *
164*4882a593Smuzhiyun  */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  *	__alloc_skb	-	allocate a network buffer
168*4882a593Smuzhiyun  *	@size: size to allocate
169*4882a593Smuzhiyun  *	@gfp_mask: allocation mask
170*4882a593Smuzhiyun  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
171*4882a593Smuzhiyun  *		instead of head cache and allocate a cloned (child) skb.
172*4882a593Smuzhiyun  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
173*4882a593Smuzhiyun  *		allocations in case the data is required for writeback
174*4882a593Smuzhiyun  *	@node: numa node to allocate memory on
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
177*4882a593Smuzhiyun  *	tail room of at least size bytes. The object has a reference count
178*4882a593Smuzhiyun  *	of one. The return is the buffer. On a failure the return is %NULL.
179*4882a593Smuzhiyun  *
180*4882a593Smuzhiyun  *	Buffers may only be allocated from interrupts using a @gfp_mask of
181*4882a593Smuzhiyun  *	%GFP_ATOMIC.
182*4882a593Smuzhiyun  */
__alloc_skb(unsigned int size,gfp_t gfp_mask,int flags,int node)183*4882a593Smuzhiyun struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
184*4882a593Smuzhiyun 			    int flags, int node)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct kmem_cache *cache;
187*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
188*4882a593Smuzhiyun 	struct sk_buff *skb;
189*4882a593Smuzhiyun 	u8 *data;
190*4882a593Smuzhiyun 	bool pfmemalloc;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	cache = (flags & SKB_ALLOC_FCLONE)
193*4882a593Smuzhiyun 		? skbuff_fclone_cache : skbuff_head_cache;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
196*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	/* Get the HEAD */
199*4882a593Smuzhiyun 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
200*4882a593Smuzhiyun 	if (!skb)
201*4882a593Smuzhiyun 		goto out;
202*4882a593Smuzhiyun 	prefetchw(skb);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	/* We do our best to align skb_shared_info on a separate cache
205*4882a593Smuzhiyun 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
206*4882a593Smuzhiyun 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
207*4882a593Smuzhiyun 	 * Both skb->head and skb_shared_info are cache line aligned.
208*4882a593Smuzhiyun 	 */
209*4882a593Smuzhiyun 	size = SKB_DATA_ALIGN(size);
210*4882a593Smuzhiyun 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
211*4882a593Smuzhiyun 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
212*4882a593Smuzhiyun 	if (!data)
213*4882a593Smuzhiyun 		goto nodata;
214*4882a593Smuzhiyun 	/* kmalloc(size) might give us more room than requested.
215*4882a593Smuzhiyun 	 * Put skb_shared_info exactly at the end of allocated zone,
216*4882a593Smuzhiyun 	 * to allow max possible filling before reallocation.
217*4882a593Smuzhiyun 	 */
218*4882a593Smuzhiyun 	size = SKB_WITH_OVERHEAD(ksize(data));
219*4882a593Smuzhiyun 	prefetchw(data + size);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/*
222*4882a593Smuzhiyun 	 * Only clear those fields we need to clear, not those that we will
223*4882a593Smuzhiyun 	 * actually initialise below. Hence, don't put any more fields after
224*4882a593Smuzhiyun 	 * the tail pointer in struct sk_buff!
225*4882a593Smuzhiyun 	 */
226*4882a593Smuzhiyun 	memset(skb, 0, offsetof(struct sk_buff, tail));
227*4882a593Smuzhiyun 	/* Account for allocated memory : skb + skb->head */
228*4882a593Smuzhiyun 	skb->truesize = SKB_TRUESIZE(size);
229*4882a593Smuzhiyun 	skb->pfmemalloc = pfmemalloc;
230*4882a593Smuzhiyun 	refcount_set(&skb->users, 1);
231*4882a593Smuzhiyun 	skb->head = data;
232*4882a593Smuzhiyun 	skb->data = data;
233*4882a593Smuzhiyun 	skb_reset_tail_pointer(skb);
234*4882a593Smuzhiyun 	skb->end = skb->tail + size;
235*4882a593Smuzhiyun 	skb->mac_header = (typeof(skb->mac_header))~0U;
236*4882a593Smuzhiyun 	skb->transport_header = (typeof(skb->transport_header))~0U;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* make sure we initialize shinfo sequentially */
239*4882a593Smuzhiyun 	shinfo = skb_shinfo(skb);
240*4882a593Smuzhiyun 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
241*4882a593Smuzhiyun 	atomic_set(&shinfo->dataref, 1);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (flags & SKB_ALLOC_FCLONE) {
244*4882a593Smuzhiyun 		struct sk_buff_fclones *fclones;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		skb->fclone = SKB_FCLONE_ORIG;
249*4882a593Smuzhiyun 		refcount_set(&fclones->fclone_ref, 1);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 		fclones->skb2.fclone = SKB_FCLONE_CLONE;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	skb_set_kcov_handle(skb, kcov_common_handle());
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun out:
257*4882a593Smuzhiyun 	return skb;
258*4882a593Smuzhiyun nodata:
259*4882a593Smuzhiyun 	kmem_cache_free(cache, skb);
260*4882a593Smuzhiyun 	skb = NULL;
261*4882a593Smuzhiyun 	goto out;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun EXPORT_SYMBOL(__alloc_skb);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /* Caller must provide SKB that is memset cleared */
__build_skb_around(struct sk_buff * skb,void * data,unsigned int frag_size)266*4882a593Smuzhiyun static struct sk_buff *__build_skb_around(struct sk_buff *skb,
267*4882a593Smuzhiyun 					  void *data, unsigned int frag_size)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
270*4882a593Smuzhiyun 	unsigned int size = frag_size ? : ksize(data);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/* Assumes caller memset cleared SKB */
275*4882a593Smuzhiyun 	skb->truesize = SKB_TRUESIZE(size);
276*4882a593Smuzhiyun 	refcount_set(&skb->users, 1);
277*4882a593Smuzhiyun 	skb->head = data;
278*4882a593Smuzhiyun 	skb->data = data;
279*4882a593Smuzhiyun 	skb_reset_tail_pointer(skb);
280*4882a593Smuzhiyun 	skb->end = skb->tail + size;
281*4882a593Smuzhiyun 	skb->mac_header = (typeof(skb->mac_header))~0U;
282*4882a593Smuzhiyun 	skb->transport_header = (typeof(skb->transport_header))~0U;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	/* make sure we initialize shinfo sequentially */
285*4882a593Smuzhiyun 	shinfo = skb_shinfo(skb);
286*4882a593Smuzhiyun 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
287*4882a593Smuzhiyun 	atomic_set(&shinfo->dataref, 1);
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	skb_set_kcov_handle(skb, kcov_common_handle());
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	return skb;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /**
295*4882a593Smuzhiyun  * __build_skb - build a network buffer
296*4882a593Smuzhiyun  * @data: data buffer provided by caller
297*4882a593Smuzhiyun  * @frag_size: size of data, or 0 if head was kmalloced
298*4882a593Smuzhiyun  *
299*4882a593Smuzhiyun  * Allocate a new &sk_buff. Caller provides space holding head and
300*4882a593Smuzhiyun  * skb_shared_info. @data must have been allocated by kmalloc() only if
301*4882a593Smuzhiyun  * @frag_size is 0, otherwise data should come from the page allocator
302*4882a593Smuzhiyun  *  or vmalloc()
303*4882a593Smuzhiyun  * The return is the new skb buffer.
304*4882a593Smuzhiyun  * On a failure the return is %NULL, and @data is not freed.
305*4882a593Smuzhiyun  * Notes :
306*4882a593Smuzhiyun  *  Before IO, driver allocates only data buffer where NIC put incoming frame
307*4882a593Smuzhiyun  *  Driver should add room at head (NET_SKB_PAD) and
308*4882a593Smuzhiyun  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
309*4882a593Smuzhiyun  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
310*4882a593Smuzhiyun  *  before giving packet to stack.
311*4882a593Smuzhiyun  *  RX rings only contains data buffers, not full skbs.
312*4882a593Smuzhiyun  */
__build_skb(void * data,unsigned int frag_size)313*4882a593Smuzhiyun struct sk_buff *__build_skb(void *data, unsigned int frag_size)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct sk_buff *skb;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
318*4882a593Smuzhiyun 	if (unlikely(!skb))
319*4882a593Smuzhiyun 		return NULL;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	memset(skb, 0, offsetof(struct sk_buff, tail));
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return __build_skb_around(skb, data, frag_size);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun /* build_skb() is wrapper over __build_skb(), that specifically
327*4882a593Smuzhiyun  * takes care of skb->head and skb->pfmemalloc
328*4882a593Smuzhiyun  * This means that if @frag_size is not zero, then @data must be backed
329*4882a593Smuzhiyun  * by a page fragment, not kmalloc() or vmalloc()
330*4882a593Smuzhiyun  */
build_skb(void * data,unsigned int frag_size)331*4882a593Smuzhiyun struct sk_buff *build_skb(void *data, unsigned int frag_size)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	struct sk_buff *skb = __build_skb(data, frag_size);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (skb && frag_size) {
336*4882a593Smuzhiyun 		skb->head_frag = 1;
337*4882a593Smuzhiyun 		if (page_is_pfmemalloc(virt_to_head_page(data)))
338*4882a593Smuzhiyun 			skb->pfmemalloc = 1;
339*4882a593Smuzhiyun 	}
340*4882a593Smuzhiyun 	return skb;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun EXPORT_SYMBOL(build_skb);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /**
345*4882a593Smuzhiyun  * build_skb_around - build a network buffer around provided skb
346*4882a593Smuzhiyun  * @skb: sk_buff provide by caller, must be memset cleared
347*4882a593Smuzhiyun  * @data: data buffer provided by caller
348*4882a593Smuzhiyun  * @frag_size: size of data, or 0 if head was kmalloced
349*4882a593Smuzhiyun  */
build_skb_around(struct sk_buff * skb,void * data,unsigned int frag_size)350*4882a593Smuzhiyun struct sk_buff *build_skb_around(struct sk_buff *skb,
351*4882a593Smuzhiyun 				 void *data, unsigned int frag_size)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	if (unlikely(!skb))
354*4882a593Smuzhiyun 		return NULL;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	skb = __build_skb_around(skb, data, frag_size);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	if (skb && frag_size) {
359*4882a593Smuzhiyun 		skb->head_frag = 1;
360*4882a593Smuzhiyun 		if (page_is_pfmemalloc(virt_to_head_page(data)))
361*4882a593Smuzhiyun 			skb->pfmemalloc = 1;
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 	return skb;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL(build_skb_around);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun #define NAPI_SKB_CACHE_SIZE	64
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun struct napi_alloc_cache {
370*4882a593Smuzhiyun 	struct page_frag_cache page;
371*4882a593Smuzhiyun 	unsigned int skb_count;
372*4882a593Smuzhiyun 	void *skb_cache[NAPI_SKB_CACHE_SIZE];
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
376*4882a593Smuzhiyun static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
377*4882a593Smuzhiyun 
__napi_alloc_frag(unsigned int fragsz,gfp_t gfp_mask)378*4882a593Smuzhiyun static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
napi_alloc_frag(unsigned int fragsz)385*4882a593Smuzhiyun void *napi_alloc_frag(unsigned int fragsz)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	fragsz = SKB_DATA_ALIGN(fragsz);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return __napi_alloc_frag(fragsz, GFP_ATOMIC);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun EXPORT_SYMBOL(napi_alloc_frag);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun  * netdev_alloc_frag - allocate a page fragment
395*4882a593Smuzhiyun  * @fragsz: fragment size
396*4882a593Smuzhiyun  *
397*4882a593Smuzhiyun  * Allocates a frag from a page for receive buffer.
398*4882a593Smuzhiyun  * Uses GFP_ATOMIC allocations.
399*4882a593Smuzhiyun  */
netdev_alloc_frag(unsigned int fragsz)400*4882a593Smuzhiyun void *netdev_alloc_frag(unsigned int fragsz)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	struct page_frag_cache *nc;
403*4882a593Smuzhiyun 	void *data;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	fragsz = SKB_DATA_ALIGN(fragsz);
406*4882a593Smuzhiyun 	if (in_irq() || irqs_disabled()) {
407*4882a593Smuzhiyun 		nc = this_cpu_ptr(&netdev_alloc_cache);
408*4882a593Smuzhiyun 		data = page_frag_alloc(nc, fragsz, GFP_ATOMIC);
409*4882a593Smuzhiyun 	} else {
410*4882a593Smuzhiyun 		local_bh_disable();
411*4882a593Smuzhiyun 		data = __napi_alloc_frag(fragsz, GFP_ATOMIC);
412*4882a593Smuzhiyun 		local_bh_enable();
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 	return data;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun EXPORT_SYMBOL(netdev_alloc_frag);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /**
419*4882a593Smuzhiyun  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
420*4882a593Smuzhiyun  *	@dev: network device to receive on
421*4882a593Smuzhiyun  *	@len: length to allocate
422*4882a593Smuzhiyun  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
423*4882a593Smuzhiyun  *
424*4882a593Smuzhiyun  *	Allocate a new &sk_buff and assign it a usage count of one. The
425*4882a593Smuzhiyun  *	buffer has NET_SKB_PAD headroom built in. Users should allocate
426*4882a593Smuzhiyun  *	the headroom they think they need without accounting for the
427*4882a593Smuzhiyun  *	built in space. The built in space is used for optimisations.
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  *	%NULL is returned if there is no free memory.
430*4882a593Smuzhiyun  */
__netdev_alloc_skb(struct net_device * dev,unsigned int len,gfp_t gfp_mask)431*4882a593Smuzhiyun struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
432*4882a593Smuzhiyun 				   gfp_t gfp_mask)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct page_frag_cache *nc;
435*4882a593Smuzhiyun 	struct sk_buff *skb;
436*4882a593Smuzhiyun 	bool pfmemalloc;
437*4882a593Smuzhiyun 	void *data;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	len += NET_SKB_PAD;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/* If requested length is either too small or too big,
442*4882a593Smuzhiyun 	 * we use kmalloc() for skb->head allocation.
443*4882a593Smuzhiyun 	 */
444*4882a593Smuzhiyun 	if (len <= SKB_WITH_OVERHEAD(1024) ||
445*4882a593Smuzhiyun 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
446*4882a593Smuzhiyun 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
447*4882a593Smuzhiyun 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
448*4882a593Smuzhiyun 		if (!skb)
449*4882a593Smuzhiyun 			goto skb_fail;
450*4882a593Smuzhiyun 		goto skb_success;
451*4882a593Smuzhiyun 	}
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
454*4882a593Smuzhiyun 	len = SKB_DATA_ALIGN(len);
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (sk_memalloc_socks())
457*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	if (in_irq() || irqs_disabled()) {
460*4882a593Smuzhiyun 		nc = this_cpu_ptr(&netdev_alloc_cache);
461*4882a593Smuzhiyun 		data = page_frag_alloc(nc, len, gfp_mask);
462*4882a593Smuzhiyun 		pfmemalloc = nc->pfmemalloc;
463*4882a593Smuzhiyun 	} else {
464*4882a593Smuzhiyun 		local_bh_disable();
465*4882a593Smuzhiyun 		nc = this_cpu_ptr(&napi_alloc_cache.page);
466*4882a593Smuzhiyun 		data = page_frag_alloc(nc, len, gfp_mask);
467*4882a593Smuzhiyun 		pfmemalloc = nc->pfmemalloc;
468*4882a593Smuzhiyun 		local_bh_enable();
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (unlikely(!data))
472*4882a593Smuzhiyun 		return NULL;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	skb = __build_skb(data, len);
475*4882a593Smuzhiyun 	if (unlikely(!skb)) {
476*4882a593Smuzhiyun 		skb_free_frag(data);
477*4882a593Smuzhiyun 		return NULL;
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	if (pfmemalloc)
481*4882a593Smuzhiyun 		skb->pfmemalloc = 1;
482*4882a593Smuzhiyun 	skb->head_frag = 1;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun skb_success:
485*4882a593Smuzhiyun 	skb_reserve(skb, NET_SKB_PAD);
486*4882a593Smuzhiyun 	skb->dev = dev;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun skb_fail:
489*4882a593Smuzhiyun 	return skb;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun EXPORT_SYMBOL(__netdev_alloc_skb);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun /**
494*4882a593Smuzhiyun  *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
495*4882a593Smuzhiyun  *	@napi: napi instance this buffer was allocated for
496*4882a593Smuzhiyun  *	@len: length to allocate
497*4882a593Smuzhiyun  *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
498*4882a593Smuzhiyun  *
499*4882a593Smuzhiyun  *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
500*4882a593Smuzhiyun  *	attempt to allocate the head from a special reserved region used
501*4882a593Smuzhiyun  *	only for NAPI Rx allocation.  By doing this we can save several
502*4882a593Smuzhiyun  *	CPU cycles by avoiding having to disable and re-enable IRQs.
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  *	%NULL is returned if there is no free memory.
505*4882a593Smuzhiyun  */
__napi_alloc_skb(struct napi_struct * napi,unsigned int len,gfp_t gfp_mask)506*4882a593Smuzhiyun struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
507*4882a593Smuzhiyun 				 gfp_t gfp_mask)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun 	struct napi_alloc_cache *nc;
510*4882a593Smuzhiyun 	struct sk_buff *skb;
511*4882a593Smuzhiyun 	void *data;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	len += NET_SKB_PAD + NET_IP_ALIGN;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* If requested length is either too small or too big,
516*4882a593Smuzhiyun 	 * we use kmalloc() for skb->head allocation.
517*4882a593Smuzhiyun 	 */
518*4882a593Smuzhiyun 	if (len <= SKB_WITH_OVERHEAD(1024) ||
519*4882a593Smuzhiyun 	    len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
520*4882a593Smuzhiyun 	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
521*4882a593Smuzhiyun 		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
522*4882a593Smuzhiyun 		if (!skb)
523*4882a593Smuzhiyun 			goto skb_fail;
524*4882a593Smuzhiyun 		goto skb_success;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	nc = this_cpu_ptr(&napi_alloc_cache);
528*4882a593Smuzhiyun 	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
529*4882a593Smuzhiyun 	len = SKB_DATA_ALIGN(len);
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	if (sk_memalloc_socks())
532*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	data = page_frag_alloc(&nc->page, len, gfp_mask);
535*4882a593Smuzhiyun 	if (unlikely(!data))
536*4882a593Smuzhiyun 		return NULL;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	skb = __build_skb(data, len);
539*4882a593Smuzhiyun 	if (unlikely(!skb)) {
540*4882a593Smuzhiyun 		skb_free_frag(data);
541*4882a593Smuzhiyun 		return NULL;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	if (nc->page.pfmemalloc)
545*4882a593Smuzhiyun 		skb->pfmemalloc = 1;
546*4882a593Smuzhiyun 	skb->head_frag = 1;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun skb_success:
549*4882a593Smuzhiyun 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
550*4882a593Smuzhiyun 	skb->dev = napi->dev;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun skb_fail:
553*4882a593Smuzhiyun 	return skb;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun EXPORT_SYMBOL(__napi_alloc_skb);
556*4882a593Smuzhiyun 
skb_add_rx_frag(struct sk_buff * skb,int i,struct page * page,int off,int size,unsigned int truesize)557*4882a593Smuzhiyun void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
558*4882a593Smuzhiyun 		     int size, unsigned int truesize)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	skb_fill_page_desc(skb, i, page, off, size);
561*4882a593Smuzhiyun 	skb->len += size;
562*4882a593Smuzhiyun 	skb->data_len += size;
563*4882a593Smuzhiyun 	skb->truesize += truesize;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun EXPORT_SYMBOL(skb_add_rx_frag);
566*4882a593Smuzhiyun 
skb_coalesce_rx_frag(struct sk_buff * skb,int i,int size,unsigned int truesize)567*4882a593Smuzhiyun void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
568*4882a593Smuzhiyun 			  unsigned int truesize)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	skb_frag_size_add(frag, size);
573*4882a593Smuzhiyun 	skb->len += size;
574*4882a593Smuzhiyun 	skb->data_len += size;
575*4882a593Smuzhiyun 	skb->truesize += truesize;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun EXPORT_SYMBOL(skb_coalesce_rx_frag);
578*4882a593Smuzhiyun 
skb_drop_list(struct sk_buff ** listp)579*4882a593Smuzhiyun static void skb_drop_list(struct sk_buff **listp)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	kfree_skb_list(*listp);
582*4882a593Smuzhiyun 	*listp = NULL;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
skb_drop_fraglist(struct sk_buff * skb)585*4882a593Smuzhiyun static inline void skb_drop_fraglist(struct sk_buff *skb)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	skb_drop_list(&skb_shinfo(skb)->frag_list);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
skb_clone_fraglist(struct sk_buff * skb)590*4882a593Smuzhiyun static void skb_clone_fraglist(struct sk_buff *skb)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	struct sk_buff *list;
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	skb_walk_frags(skb, list)
595*4882a593Smuzhiyun 		skb_get(list);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
skb_free_head(struct sk_buff * skb)598*4882a593Smuzhiyun static void skb_free_head(struct sk_buff *skb)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	unsigned char *head = skb->head;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (skb->head_frag)
603*4882a593Smuzhiyun 		skb_free_frag(head);
604*4882a593Smuzhiyun 	else
605*4882a593Smuzhiyun 		kfree(head);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun 
skb_release_data(struct sk_buff * skb)608*4882a593Smuzhiyun static void skb_release_data(struct sk_buff *skb)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	struct skb_shared_info *shinfo = skb_shinfo(skb);
611*4882a593Smuzhiyun 	int i;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	if (skb->cloned &&
614*4882a593Smuzhiyun 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
615*4882a593Smuzhiyun 			      &shinfo->dataref))
616*4882a593Smuzhiyun 		return;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	for (i = 0; i < shinfo->nr_frags; i++)
619*4882a593Smuzhiyun 		__skb_frag_unref(&shinfo->frags[i]);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	if (shinfo->frag_list)
622*4882a593Smuzhiyun 		kfree_skb_list(shinfo->frag_list);
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	skb_zcopy_clear(skb, true);
625*4882a593Smuzhiyun 	skb_free_head(skb);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun /*
629*4882a593Smuzhiyun  *	Free an skbuff by memory without cleaning the state.
630*4882a593Smuzhiyun  */
kfree_skbmem(struct sk_buff * skb)631*4882a593Smuzhiyun static void kfree_skbmem(struct sk_buff *skb)
632*4882a593Smuzhiyun {
633*4882a593Smuzhiyun 	struct sk_buff_fclones *fclones;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	switch (skb->fclone) {
636*4882a593Smuzhiyun 	case SKB_FCLONE_UNAVAILABLE:
637*4882a593Smuzhiyun 		kmem_cache_free(skbuff_head_cache, skb);
638*4882a593Smuzhiyun 		return;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	case SKB_FCLONE_ORIG:
641*4882a593Smuzhiyun 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 		/* We usually free the clone (TX completion) before original skb
644*4882a593Smuzhiyun 		 * This test would have no chance to be true for the clone,
645*4882a593Smuzhiyun 		 * while here, branch prediction will be good.
646*4882a593Smuzhiyun 		 */
647*4882a593Smuzhiyun 		if (refcount_read(&fclones->fclone_ref) == 1)
648*4882a593Smuzhiyun 			goto fastpath;
649*4882a593Smuzhiyun 		break;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	default: /* SKB_FCLONE_CLONE */
652*4882a593Smuzhiyun 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
653*4882a593Smuzhiyun 		break;
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 	if (!refcount_dec_and_test(&fclones->fclone_ref))
656*4882a593Smuzhiyun 		return;
657*4882a593Smuzhiyun fastpath:
658*4882a593Smuzhiyun 	kmem_cache_free(skbuff_fclone_cache, fclones);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun 
skb_release_head_state(struct sk_buff * skb)661*4882a593Smuzhiyun void skb_release_head_state(struct sk_buff *skb)
662*4882a593Smuzhiyun {
663*4882a593Smuzhiyun 	nf_reset_ct(skb);
664*4882a593Smuzhiyun 	skb_dst_drop(skb);
665*4882a593Smuzhiyun 	if (skb->destructor) {
666*4882a593Smuzhiyun 		WARN_ON(in_irq());
667*4882a593Smuzhiyun 		skb->destructor(skb);
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NF_CONNTRACK)
670*4882a593Smuzhiyun 	nf_conntrack_put(skb_nfct(skb));
671*4882a593Smuzhiyun #endif
672*4882a593Smuzhiyun 	skb_ext_put(skb);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun /* Free everything but the sk_buff shell. */
skb_release_all(struct sk_buff * skb)676*4882a593Smuzhiyun static void skb_release_all(struct sk_buff *skb)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	skb_release_head_state(skb);
679*4882a593Smuzhiyun 	if (likely(skb->head))
680*4882a593Smuzhiyun 		skb_release_data(skb);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun /**
684*4882a593Smuzhiyun  *	__kfree_skb - private function
685*4882a593Smuzhiyun  *	@skb: buffer
686*4882a593Smuzhiyun  *
687*4882a593Smuzhiyun  *	Free an sk_buff. Release anything attached to the buffer.
688*4882a593Smuzhiyun  *	Clean the state. This is an internal helper function. Users should
689*4882a593Smuzhiyun  *	always call kfree_skb
690*4882a593Smuzhiyun  */
691*4882a593Smuzhiyun 
__kfree_skb(struct sk_buff * skb)692*4882a593Smuzhiyun void __kfree_skb(struct sk_buff *skb)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	skb_release_all(skb);
695*4882a593Smuzhiyun 	kfree_skbmem(skb);
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun EXPORT_SYMBOL(__kfree_skb);
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /**
700*4882a593Smuzhiyun  *	kfree_skb - free an sk_buff
701*4882a593Smuzhiyun  *	@skb: buffer to free
702*4882a593Smuzhiyun  *
703*4882a593Smuzhiyun  *	Drop a reference to the buffer and free it if the usage count has
704*4882a593Smuzhiyun  *	hit zero.
705*4882a593Smuzhiyun  */
kfree_skb(struct sk_buff * skb)706*4882a593Smuzhiyun void kfree_skb(struct sk_buff *skb)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun 	if (!skb_unref(skb))
709*4882a593Smuzhiyun 		return;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	trace_android_vh_kfree_skb(skb);
712*4882a593Smuzhiyun 	trace_kfree_skb(skb, __builtin_return_address(0));
713*4882a593Smuzhiyun 	__kfree_skb(skb);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun EXPORT_SYMBOL(kfree_skb);
716*4882a593Smuzhiyun 
kfree_skb_list(struct sk_buff * segs)717*4882a593Smuzhiyun void kfree_skb_list(struct sk_buff *segs)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun 	while (segs) {
720*4882a593Smuzhiyun 		struct sk_buff *next = segs->next;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		kfree_skb(segs);
723*4882a593Smuzhiyun 		segs = next;
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun }
726*4882a593Smuzhiyun EXPORT_SYMBOL(kfree_skb_list);
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /* Dump skb information and contents.
729*4882a593Smuzhiyun  *
730*4882a593Smuzhiyun  * Must only be called from net_ratelimit()-ed paths.
731*4882a593Smuzhiyun  *
732*4882a593Smuzhiyun  * Dumps whole packets if full_pkt, only headers otherwise.
733*4882a593Smuzhiyun  */
skb_dump(const char * level,const struct sk_buff * skb,bool full_pkt)734*4882a593Smuzhiyun void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	struct skb_shared_info *sh = skb_shinfo(skb);
737*4882a593Smuzhiyun 	struct net_device *dev = skb->dev;
738*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
739*4882a593Smuzhiyun 	struct sk_buff *list_skb;
740*4882a593Smuzhiyun 	bool has_mac, has_trans;
741*4882a593Smuzhiyun 	int headroom, tailroom;
742*4882a593Smuzhiyun 	int i, len, seg_len;
743*4882a593Smuzhiyun 
744*4882a593Smuzhiyun 	if (full_pkt)
745*4882a593Smuzhiyun 		len = skb->len;
746*4882a593Smuzhiyun 	else
747*4882a593Smuzhiyun 		len = min_t(int, skb->len, MAX_HEADER + 128);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	headroom = skb_headroom(skb);
750*4882a593Smuzhiyun 	tailroom = skb_tailroom(skb);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	has_mac = skb_mac_header_was_set(skb);
753*4882a593Smuzhiyun 	has_trans = skb_transport_header_was_set(skb);
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
756*4882a593Smuzhiyun 	       "mac=(%d,%d) net=(%d,%d) trans=%d\n"
757*4882a593Smuzhiyun 	       "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
758*4882a593Smuzhiyun 	       "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
759*4882a593Smuzhiyun 	       "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
760*4882a593Smuzhiyun 	       level, skb->len, headroom, skb_headlen(skb), tailroom,
761*4882a593Smuzhiyun 	       has_mac ? skb->mac_header : -1,
762*4882a593Smuzhiyun 	       has_mac ? skb_mac_header_len(skb) : -1,
763*4882a593Smuzhiyun 	       skb->network_header,
764*4882a593Smuzhiyun 	       has_trans ? skb_network_header_len(skb) : -1,
765*4882a593Smuzhiyun 	       has_trans ? skb->transport_header : -1,
766*4882a593Smuzhiyun 	       sh->tx_flags, sh->nr_frags,
767*4882a593Smuzhiyun 	       sh->gso_size, sh->gso_type, sh->gso_segs,
768*4882a593Smuzhiyun 	       skb->csum, skb->ip_summed, skb->csum_complete_sw,
769*4882a593Smuzhiyun 	       skb->csum_valid, skb->csum_level,
770*4882a593Smuzhiyun 	       skb->hash, skb->sw_hash, skb->l4_hash,
771*4882a593Smuzhiyun 	       ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun 	if (dev)
774*4882a593Smuzhiyun 		printk("%sdev name=%s feat=%pNF\n",
775*4882a593Smuzhiyun 		       level, dev->name, &dev->features);
776*4882a593Smuzhiyun 	if (sk)
777*4882a593Smuzhiyun 		printk("%ssk family=%hu type=%u proto=%u\n",
778*4882a593Smuzhiyun 		       level, sk->sk_family, sk->sk_type, sk->sk_protocol);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	if (full_pkt && headroom)
781*4882a593Smuzhiyun 		print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
782*4882a593Smuzhiyun 			       16, 1, skb->head, headroom, false);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	seg_len = min_t(int, skb_headlen(skb), len);
785*4882a593Smuzhiyun 	if (seg_len)
786*4882a593Smuzhiyun 		print_hex_dump(level, "skb linear:   ", DUMP_PREFIX_OFFSET,
787*4882a593Smuzhiyun 			       16, 1, skb->data, seg_len, false);
788*4882a593Smuzhiyun 	len -= seg_len;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	if (full_pkt && tailroom)
791*4882a593Smuzhiyun 		print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
792*4882a593Smuzhiyun 			       16, 1, skb_tail_pointer(skb), tailroom, false);
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
795*4882a593Smuzhiyun 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
796*4882a593Smuzhiyun 		u32 p_off, p_len, copied;
797*4882a593Smuzhiyun 		struct page *p;
798*4882a593Smuzhiyun 		u8 *vaddr;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		skb_frag_foreach_page(frag, skb_frag_off(frag),
801*4882a593Smuzhiyun 				      skb_frag_size(frag), p, p_off, p_len,
802*4882a593Smuzhiyun 				      copied) {
803*4882a593Smuzhiyun 			seg_len = min_t(int, p_len, len);
804*4882a593Smuzhiyun 			vaddr = kmap_atomic(p);
805*4882a593Smuzhiyun 			print_hex_dump(level, "skb frag:     ",
806*4882a593Smuzhiyun 				       DUMP_PREFIX_OFFSET,
807*4882a593Smuzhiyun 				       16, 1, vaddr + p_off, seg_len, false);
808*4882a593Smuzhiyun 			kunmap_atomic(vaddr);
809*4882a593Smuzhiyun 			len -= seg_len;
810*4882a593Smuzhiyun 			if (!len)
811*4882a593Smuzhiyun 				break;
812*4882a593Smuzhiyun 		}
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (full_pkt && skb_has_frag_list(skb)) {
816*4882a593Smuzhiyun 		printk("skb fraglist:\n");
817*4882a593Smuzhiyun 		skb_walk_frags(skb, list_skb)
818*4882a593Smuzhiyun 			skb_dump(level, list_skb, true);
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun EXPORT_SYMBOL(skb_dump);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun /**
824*4882a593Smuzhiyun  *	skb_tx_error - report an sk_buff xmit error
825*4882a593Smuzhiyun  *	@skb: buffer that triggered an error
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  *	Report xmit error if a device callback is tracking this skb.
828*4882a593Smuzhiyun  *	skb must be freed afterwards.
829*4882a593Smuzhiyun  */
skb_tx_error(struct sk_buff * skb)830*4882a593Smuzhiyun void skb_tx_error(struct sk_buff *skb)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	skb_zcopy_clear(skb, true);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun EXPORT_SYMBOL(skb_tx_error);
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun #ifdef CONFIG_TRACEPOINTS
837*4882a593Smuzhiyun /**
838*4882a593Smuzhiyun  *	consume_skb - free an skbuff
839*4882a593Smuzhiyun  *	@skb: buffer to free
840*4882a593Smuzhiyun  *
841*4882a593Smuzhiyun  *	Drop a ref to the buffer and free it if the usage count has hit zero
842*4882a593Smuzhiyun  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
843*4882a593Smuzhiyun  *	is being dropped after a failure and notes that
844*4882a593Smuzhiyun  */
consume_skb(struct sk_buff * skb)845*4882a593Smuzhiyun void consume_skb(struct sk_buff *skb)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	if (!skb_unref(skb))
848*4882a593Smuzhiyun 		return;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	trace_consume_skb(skb);
851*4882a593Smuzhiyun 	__kfree_skb(skb);
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun EXPORT_SYMBOL(consume_skb);
854*4882a593Smuzhiyun #endif
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun /**
857*4882a593Smuzhiyun  *	consume_stateless_skb - free an skbuff, assuming it is stateless
858*4882a593Smuzhiyun  *	@skb: buffer to free
859*4882a593Smuzhiyun  *
860*4882a593Smuzhiyun  *	Alike consume_skb(), but this variant assumes that this is the last
861*4882a593Smuzhiyun  *	skb reference and all the head states have been already dropped
862*4882a593Smuzhiyun  */
__consume_stateless_skb(struct sk_buff * skb)863*4882a593Smuzhiyun void __consume_stateless_skb(struct sk_buff *skb)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	trace_consume_skb(skb);
866*4882a593Smuzhiyun 	skb_release_data(skb);
867*4882a593Smuzhiyun 	kfree_skbmem(skb);
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun 
__kfree_skb_flush(void)870*4882a593Smuzhiyun void __kfree_skb_flush(void)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	/* flush skb_cache if containing objects */
875*4882a593Smuzhiyun 	if (nc->skb_count) {
876*4882a593Smuzhiyun 		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
877*4882a593Smuzhiyun 				     nc->skb_cache);
878*4882a593Smuzhiyun 		nc->skb_count = 0;
879*4882a593Smuzhiyun 	}
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
_kfree_skb_defer(struct sk_buff * skb)882*4882a593Smuzhiyun static inline void _kfree_skb_defer(struct sk_buff *skb)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	/* drop skb->head and call any destructors for packet */
887*4882a593Smuzhiyun 	skb_release_all(skb);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/* record skb to CPU local list */
890*4882a593Smuzhiyun 	nc->skb_cache[nc->skb_count++] = skb;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun #ifdef CONFIG_SLUB
893*4882a593Smuzhiyun 	/* SLUB writes into objects when freeing */
894*4882a593Smuzhiyun 	prefetchw(skb);
895*4882a593Smuzhiyun #endif
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/* flush skb_cache if it is filled */
898*4882a593Smuzhiyun 	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
899*4882a593Smuzhiyun 		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
900*4882a593Smuzhiyun 				     nc->skb_cache);
901*4882a593Smuzhiyun 		nc->skb_count = 0;
902*4882a593Smuzhiyun 	}
903*4882a593Smuzhiyun }
__kfree_skb_defer(struct sk_buff * skb)904*4882a593Smuzhiyun void __kfree_skb_defer(struct sk_buff *skb)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	_kfree_skb_defer(skb);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
napi_consume_skb(struct sk_buff * skb,int budget)909*4882a593Smuzhiyun void napi_consume_skb(struct sk_buff *skb, int budget)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	/* Zero budget indicate non-NAPI context called us, like netpoll */
912*4882a593Smuzhiyun 	if (unlikely(!budget)) {
913*4882a593Smuzhiyun 		dev_consume_skb_any(skb);
914*4882a593Smuzhiyun 		return;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (!skb_unref(skb))
918*4882a593Smuzhiyun 		return;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* if reaching here SKB is ready to free */
921*4882a593Smuzhiyun 	trace_consume_skb(skb);
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* if SKB is a clone, don't handle this case */
924*4882a593Smuzhiyun 	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
925*4882a593Smuzhiyun 		__kfree_skb(skb);
926*4882a593Smuzhiyun 		return;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	_kfree_skb_defer(skb);
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun EXPORT_SYMBOL(napi_consume_skb);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun /* Make sure a field is enclosed inside headers_start/headers_end section */
934*4882a593Smuzhiyun #define CHECK_SKB_FIELD(field) \
935*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
936*4882a593Smuzhiyun 		     offsetof(struct sk_buff, headers_start));	\
937*4882a593Smuzhiyun 	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
938*4882a593Smuzhiyun 		     offsetof(struct sk_buff, headers_end));	\
939*4882a593Smuzhiyun 
__copy_skb_header(struct sk_buff * new,const struct sk_buff * old)940*4882a593Smuzhiyun static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	new->tstamp		= old->tstamp;
943*4882a593Smuzhiyun 	/* We do not copy old->sk */
944*4882a593Smuzhiyun 	new->dev		= old->dev;
945*4882a593Smuzhiyun 	memcpy(new->cb, old->cb, sizeof(old->cb));
946*4882a593Smuzhiyun 	skb_dst_copy(new, old);
947*4882a593Smuzhiyun 	__skb_ext_copy(new, old);
948*4882a593Smuzhiyun 	__nf_copy(new, old, false);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	/* Note : this field could be in headers_start/headers_end section
951*4882a593Smuzhiyun 	 * It is not yet because we do not want to have a 16 bit hole
952*4882a593Smuzhiyun 	 */
953*4882a593Smuzhiyun 	new->queue_mapping = old->queue_mapping;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	memcpy(&new->headers_start, &old->headers_start,
956*4882a593Smuzhiyun 	       offsetof(struct sk_buff, headers_end) -
957*4882a593Smuzhiyun 	       offsetof(struct sk_buff, headers_start));
958*4882a593Smuzhiyun 	CHECK_SKB_FIELD(protocol);
959*4882a593Smuzhiyun 	CHECK_SKB_FIELD(csum);
960*4882a593Smuzhiyun 	CHECK_SKB_FIELD(hash);
961*4882a593Smuzhiyun 	CHECK_SKB_FIELD(priority);
962*4882a593Smuzhiyun 	CHECK_SKB_FIELD(skb_iif);
963*4882a593Smuzhiyun 	CHECK_SKB_FIELD(vlan_proto);
964*4882a593Smuzhiyun 	CHECK_SKB_FIELD(vlan_tci);
965*4882a593Smuzhiyun 	CHECK_SKB_FIELD(transport_header);
966*4882a593Smuzhiyun 	CHECK_SKB_FIELD(network_header);
967*4882a593Smuzhiyun 	CHECK_SKB_FIELD(mac_header);
968*4882a593Smuzhiyun 	CHECK_SKB_FIELD(inner_protocol);
969*4882a593Smuzhiyun 	CHECK_SKB_FIELD(inner_transport_header);
970*4882a593Smuzhiyun 	CHECK_SKB_FIELD(inner_network_header);
971*4882a593Smuzhiyun 	CHECK_SKB_FIELD(inner_mac_header);
972*4882a593Smuzhiyun 	CHECK_SKB_FIELD(mark);
973*4882a593Smuzhiyun #ifdef CONFIG_NETWORK_SECMARK
974*4882a593Smuzhiyun 	CHECK_SKB_FIELD(secmark);
975*4882a593Smuzhiyun #endif
976*4882a593Smuzhiyun #ifdef CONFIG_NET_RX_BUSY_POLL
977*4882a593Smuzhiyun 	CHECK_SKB_FIELD(napi_id);
978*4882a593Smuzhiyun #endif
979*4882a593Smuzhiyun #ifdef CONFIG_XPS
980*4882a593Smuzhiyun 	CHECK_SKB_FIELD(sender_cpu);
981*4882a593Smuzhiyun #endif
982*4882a593Smuzhiyun #ifdef CONFIG_NET_SCHED
983*4882a593Smuzhiyun 	CHECK_SKB_FIELD(tc_index);
984*4882a593Smuzhiyun #endif
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun /*
989*4882a593Smuzhiyun  * You should not add any new code to this function.  Add it to
990*4882a593Smuzhiyun  * __copy_skb_header above instead.
991*4882a593Smuzhiyun  */
__skb_clone(struct sk_buff * n,struct sk_buff * skb)992*4882a593Smuzhiyun static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun #define C(x) n->x = skb->x
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	n->next = n->prev = NULL;
997*4882a593Smuzhiyun 	n->sk = NULL;
998*4882a593Smuzhiyun 	__copy_skb_header(n, skb);
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 	C(len);
1001*4882a593Smuzhiyun 	C(data_len);
1002*4882a593Smuzhiyun 	C(mac_len);
1003*4882a593Smuzhiyun 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1004*4882a593Smuzhiyun 	n->cloned = 1;
1005*4882a593Smuzhiyun 	n->nohdr = 0;
1006*4882a593Smuzhiyun 	n->peeked = 0;
1007*4882a593Smuzhiyun 	C(pfmemalloc);
1008*4882a593Smuzhiyun 	n->destructor = NULL;
1009*4882a593Smuzhiyun 	C(tail);
1010*4882a593Smuzhiyun 	C(end);
1011*4882a593Smuzhiyun 	C(head);
1012*4882a593Smuzhiyun 	C(head_frag);
1013*4882a593Smuzhiyun 	C(data);
1014*4882a593Smuzhiyun 	C(truesize);
1015*4882a593Smuzhiyun 	refcount_set(&n->users, 1);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	atomic_inc(&(skb_shinfo(skb)->dataref));
1018*4882a593Smuzhiyun 	skb->cloned = 1;
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	return n;
1021*4882a593Smuzhiyun #undef C
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun /**
1025*4882a593Smuzhiyun  * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1026*4882a593Smuzhiyun  * @first: first sk_buff of the msg
1027*4882a593Smuzhiyun  */
alloc_skb_for_msg(struct sk_buff * first)1028*4882a593Smuzhiyun struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1029*4882a593Smuzhiyun {
1030*4882a593Smuzhiyun 	struct sk_buff *n;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	n = alloc_skb(0, GFP_ATOMIC);
1033*4882a593Smuzhiyun 	if (!n)
1034*4882a593Smuzhiyun 		return NULL;
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	n->len = first->len;
1037*4882a593Smuzhiyun 	n->data_len = first->len;
1038*4882a593Smuzhiyun 	n->truesize = first->truesize;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	skb_shinfo(n)->frag_list = first;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	__copy_skb_header(n, first);
1043*4882a593Smuzhiyun 	n->destructor = NULL;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	return n;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun /**
1050*4882a593Smuzhiyun  *	skb_morph	-	morph one skb into another
1051*4882a593Smuzhiyun  *	@dst: the skb to receive the contents
1052*4882a593Smuzhiyun  *	@src: the skb to supply the contents
1053*4882a593Smuzhiyun  *
1054*4882a593Smuzhiyun  *	This is identical to skb_clone except that the target skb is
1055*4882a593Smuzhiyun  *	supplied by the user.
1056*4882a593Smuzhiyun  *
1057*4882a593Smuzhiyun  *	The target skb is returned upon exit.
1058*4882a593Smuzhiyun  */
skb_morph(struct sk_buff * dst,struct sk_buff * src)1059*4882a593Smuzhiyun struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1060*4882a593Smuzhiyun {
1061*4882a593Smuzhiyun 	skb_release_all(dst);
1062*4882a593Smuzhiyun 	return __skb_clone(dst, src);
1063*4882a593Smuzhiyun }
1064*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_morph);
1065*4882a593Smuzhiyun 
mm_account_pinned_pages(struct mmpin * mmp,size_t size)1066*4882a593Smuzhiyun int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	unsigned long max_pg, num_pg, new_pg, old_pg;
1069*4882a593Smuzhiyun 	struct user_struct *user;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	if (capable(CAP_IPC_LOCK) || !size)
1072*4882a593Smuzhiyun 		return 0;
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
1075*4882a593Smuzhiyun 	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1076*4882a593Smuzhiyun 	user = mmp->user ? : current_user();
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	do {
1079*4882a593Smuzhiyun 		old_pg = atomic_long_read(&user->locked_vm);
1080*4882a593Smuzhiyun 		new_pg = old_pg + num_pg;
1081*4882a593Smuzhiyun 		if (new_pg > max_pg)
1082*4882a593Smuzhiyun 			return -ENOBUFS;
1083*4882a593Smuzhiyun 	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1084*4882a593Smuzhiyun 		 old_pg);
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	if (!mmp->user) {
1087*4882a593Smuzhiyun 		mmp->user = get_uid(user);
1088*4882a593Smuzhiyun 		mmp->num_pg = num_pg;
1089*4882a593Smuzhiyun 	} else {
1090*4882a593Smuzhiyun 		mmp->num_pg += num_pg;
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	return 0;
1094*4882a593Smuzhiyun }
1095*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1096*4882a593Smuzhiyun 
mm_unaccount_pinned_pages(struct mmpin * mmp)1097*4882a593Smuzhiyun void mm_unaccount_pinned_pages(struct mmpin *mmp)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun 	if (mmp->user) {
1100*4882a593Smuzhiyun 		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1101*4882a593Smuzhiyun 		free_uid(mmp->user);
1102*4882a593Smuzhiyun 	}
1103*4882a593Smuzhiyun }
1104*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1105*4882a593Smuzhiyun 
sock_zerocopy_alloc(struct sock * sk,size_t size)1106*4882a593Smuzhiyun struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	struct ubuf_info *uarg;
1109*4882a593Smuzhiyun 	struct sk_buff *skb;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	WARN_ON_ONCE(!in_task());
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	skb = sock_omalloc(sk, 0, GFP_KERNEL);
1114*4882a593Smuzhiyun 	if (!skb)
1115*4882a593Smuzhiyun 		return NULL;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1118*4882a593Smuzhiyun 	uarg = (void *)skb->cb;
1119*4882a593Smuzhiyun 	uarg->mmp.user = NULL;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	if (mm_account_pinned_pages(&uarg->mmp, size)) {
1122*4882a593Smuzhiyun 		kfree_skb(skb);
1123*4882a593Smuzhiyun 		return NULL;
1124*4882a593Smuzhiyun 	}
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	uarg->callback = sock_zerocopy_callback;
1127*4882a593Smuzhiyun 	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1128*4882a593Smuzhiyun 	uarg->len = 1;
1129*4882a593Smuzhiyun 	uarg->bytelen = size;
1130*4882a593Smuzhiyun 	uarg->zerocopy = 1;
1131*4882a593Smuzhiyun 	refcount_set(&uarg->refcnt, 1);
1132*4882a593Smuzhiyun 	sock_hold(sk);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	return uarg;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
1137*4882a593Smuzhiyun 
skb_from_uarg(struct ubuf_info * uarg)1138*4882a593Smuzhiyun static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun 	return container_of((void *)uarg, struct sk_buff, cb);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
sock_zerocopy_realloc(struct sock * sk,size_t size,struct ubuf_info * uarg)1143*4882a593Smuzhiyun struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
1144*4882a593Smuzhiyun 					struct ubuf_info *uarg)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	if (uarg) {
1147*4882a593Smuzhiyun 		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
1148*4882a593Smuzhiyun 		u32 bytelen, next;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 		/* realloc only when socket is locked (TCP, UDP cork),
1151*4882a593Smuzhiyun 		 * so uarg->len and sk_zckey access is serialized
1152*4882a593Smuzhiyun 		 */
1153*4882a593Smuzhiyun 		if (!sock_owned_by_user(sk)) {
1154*4882a593Smuzhiyun 			WARN_ON_ONCE(1);
1155*4882a593Smuzhiyun 			return NULL;
1156*4882a593Smuzhiyun 		}
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 		bytelen = uarg->bytelen + size;
1159*4882a593Smuzhiyun 		if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1160*4882a593Smuzhiyun 			/* TCP can create new skb to attach new uarg */
1161*4882a593Smuzhiyun 			if (sk->sk_type == SOCK_STREAM)
1162*4882a593Smuzhiyun 				goto new_alloc;
1163*4882a593Smuzhiyun 			return NULL;
1164*4882a593Smuzhiyun 		}
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 		next = (u32)atomic_read(&sk->sk_zckey);
1167*4882a593Smuzhiyun 		if ((u32)(uarg->id + uarg->len) == next) {
1168*4882a593Smuzhiyun 			if (mm_account_pinned_pages(&uarg->mmp, size))
1169*4882a593Smuzhiyun 				return NULL;
1170*4882a593Smuzhiyun 			uarg->len++;
1171*4882a593Smuzhiyun 			uarg->bytelen = bytelen;
1172*4882a593Smuzhiyun 			atomic_set(&sk->sk_zckey, ++next);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 			/* no extra ref when appending to datagram (MSG_MORE) */
1175*4882a593Smuzhiyun 			if (sk->sk_type == SOCK_STREAM)
1176*4882a593Smuzhiyun 				sock_zerocopy_get(uarg);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 			return uarg;
1179*4882a593Smuzhiyun 		}
1180*4882a593Smuzhiyun 	}
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun new_alloc:
1183*4882a593Smuzhiyun 	return sock_zerocopy_alloc(sk, size);
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1186*4882a593Smuzhiyun 
skb_zerocopy_notify_extend(struct sk_buff * skb,u32 lo,u16 len)1187*4882a593Smuzhiyun static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1188*4882a593Smuzhiyun {
1189*4882a593Smuzhiyun 	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1190*4882a593Smuzhiyun 	u32 old_lo, old_hi;
1191*4882a593Smuzhiyun 	u64 sum_len;
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	old_lo = serr->ee.ee_info;
1194*4882a593Smuzhiyun 	old_hi = serr->ee.ee_data;
1195*4882a593Smuzhiyun 	sum_len = old_hi - old_lo + 1ULL + len;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (sum_len >= (1ULL << 32))
1198*4882a593Smuzhiyun 		return false;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	if (lo != old_hi + 1)
1201*4882a593Smuzhiyun 		return false;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	serr->ee.ee_data += len;
1204*4882a593Smuzhiyun 	return true;
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
sock_zerocopy_callback(struct ubuf_info * uarg,bool success)1207*4882a593Smuzhiyun void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1210*4882a593Smuzhiyun 	struct sock_exterr_skb *serr;
1211*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
1212*4882a593Smuzhiyun 	struct sk_buff_head *q;
1213*4882a593Smuzhiyun 	unsigned long flags;
1214*4882a593Smuzhiyun 	u32 lo, hi;
1215*4882a593Smuzhiyun 	u16 len;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	mm_unaccount_pinned_pages(&uarg->mmp);
1218*4882a593Smuzhiyun 
1219*4882a593Smuzhiyun 	/* if !len, there was only 1 call, and it was aborted
1220*4882a593Smuzhiyun 	 * so do not queue a completion notification
1221*4882a593Smuzhiyun 	 */
1222*4882a593Smuzhiyun 	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1223*4882a593Smuzhiyun 		goto release;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	len = uarg->len;
1226*4882a593Smuzhiyun 	lo = uarg->id;
1227*4882a593Smuzhiyun 	hi = uarg->id + len - 1;
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	serr = SKB_EXT_ERR(skb);
1230*4882a593Smuzhiyun 	memset(serr, 0, sizeof(*serr));
1231*4882a593Smuzhiyun 	serr->ee.ee_errno = 0;
1232*4882a593Smuzhiyun 	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1233*4882a593Smuzhiyun 	serr->ee.ee_data = hi;
1234*4882a593Smuzhiyun 	serr->ee.ee_info = lo;
1235*4882a593Smuzhiyun 	if (!success)
1236*4882a593Smuzhiyun 		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	q = &sk->sk_error_queue;
1239*4882a593Smuzhiyun 	spin_lock_irqsave(&q->lock, flags);
1240*4882a593Smuzhiyun 	tail = skb_peek_tail(q);
1241*4882a593Smuzhiyun 	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1242*4882a593Smuzhiyun 	    !skb_zerocopy_notify_extend(tail, lo, len)) {
1243*4882a593Smuzhiyun 		__skb_queue_tail(q, skb);
1244*4882a593Smuzhiyun 		skb = NULL;
1245*4882a593Smuzhiyun 	}
1246*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->lock, flags);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 	sk->sk_error_report(sk);
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun release:
1251*4882a593Smuzhiyun 	consume_skb(skb);
1252*4882a593Smuzhiyun 	sock_put(sk);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1255*4882a593Smuzhiyun 
sock_zerocopy_put(struct ubuf_info * uarg)1256*4882a593Smuzhiyun void sock_zerocopy_put(struct ubuf_info *uarg)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1259*4882a593Smuzhiyun 		if (uarg->callback)
1260*4882a593Smuzhiyun 			uarg->callback(uarg, uarg->zerocopy);
1261*4882a593Smuzhiyun 		else
1262*4882a593Smuzhiyun 			consume_skb(skb_from_uarg(uarg));
1263*4882a593Smuzhiyun 	}
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1266*4882a593Smuzhiyun 
sock_zerocopy_put_abort(struct ubuf_info * uarg,bool have_uref)1267*4882a593Smuzhiyun void sock_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1268*4882a593Smuzhiyun {
1269*4882a593Smuzhiyun 	if (uarg) {
1270*4882a593Smuzhiyun 		struct sock *sk = skb_from_uarg(uarg)->sk;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 		atomic_dec(&sk->sk_zckey);
1273*4882a593Smuzhiyun 		uarg->len--;
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 		if (have_uref)
1276*4882a593Smuzhiyun 			sock_zerocopy_put(uarg);
1277*4882a593Smuzhiyun 	}
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1280*4882a593Smuzhiyun 
skb_zerocopy_iter_dgram(struct sk_buff * skb,struct msghdr * msg,int len)1281*4882a593Smuzhiyun int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun 	return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1284*4882a593Smuzhiyun }
1285*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1286*4882a593Smuzhiyun 
skb_zerocopy_iter_stream(struct sock * sk,struct sk_buff * skb,struct msghdr * msg,int len,struct ubuf_info * uarg)1287*4882a593Smuzhiyun int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1288*4882a593Smuzhiyun 			     struct msghdr *msg, int len,
1289*4882a593Smuzhiyun 			     struct ubuf_info *uarg)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	struct ubuf_info *orig_uarg = skb_zcopy(skb);
1292*4882a593Smuzhiyun 	struct iov_iter orig_iter = msg->msg_iter;
1293*4882a593Smuzhiyun 	int err, orig_len = skb->len;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/* An skb can only point to one uarg. This edge case happens when
1296*4882a593Smuzhiyun 	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1297*4882a593Smuzhiyun 	 */
1298*4882a593Smuzhiyun 	if (orig_uarg && uarg != orig_uarg)
1299*4882a593Smuzhiyun 		return -EEXIST;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1302*4882a593Smuzhiyun 	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1303*4882a593Smuzhiyun 		struct sock *save_sk = skb->sk;
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 		/* Streams do not free skb on error. Reset to prev state. */
1306*4882a593Smuzhiyun 		msg->msg_iter = orig_iter;
1307*4882a593Smuzhiyun 		skb->sk = sk;
1308*4882a593Smuzhiyun 		___pskb_trim(skb, orig_len);
1309*4882a593Smuzhiyun 		skb->sk = save_sk;
1310*4882a593Smuzhiyun 		return err;
1311*4882a593Smuzhiyun 	}
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 	skb_zcopy_set(skb, uarg, NULL);
1314*4882a593Smuzhiyun 	return skb->len - orig_len;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1317*4882a593Smuzhiyun 
skb_zerocopy_clone(struct sk_buff * nskb,struct sk_buff * orig,gfp_t gfp_mask)1318*4882a593Smuzhiyun static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1319*4882a593Smuzhiyun 			      gfp_t gfp_mask)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun 	if (skb_zcopy(orig)) {
1322*4882a593Smuzhiyun 		if (skb_zcopy(nskb)) {
1323*4882a593Smuzhiyun 			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1324*4882a593Smuzhiyun 			if (!gfp_mask) {
1325*4882a593Smuzhiyun 				WARN_ON_ONCE(1);
1326*4882a593Smuzhiyun 				return -ENOMEM;
1327*4882a593Smuzhiyun 			}
1328*4882a593Smuzhiyun 			if (skb_uarg(nskb) == skb_uarg(orig))
1329*4882a593Smuzhiyun 				return 0;
1330*4882a593Smuzhiyun 			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1331*4882a593Smuzhiyun 				return -EIO;
1332*4882a593Smuzhiyun 		}
1333*4882a593Smuzhiyun 		skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1334*4882a593Smuzhiyun 	}
1335*4882a593Smuzhiyun 	return 0;
1336*4882a593Smuzhiyun }
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun /**
1339*4882a593Smuzhiyun  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
1340*4882a593Smuzhiyun  *	@skb: the skb to modify
1341*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1342*4882a593Smuzhiyun  *
1343*4882a593Smuzhiyun  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
1344*4882a593Smuzhiyun  *	It will copy all frags into kernel and drop the reference
1345*4882a593Smuzhiyun  *	to userspace pages.
1346*4882a593Smuzhiyun  *
1347*4882a593Smuzhiyun  *	If this function is called from an interrupt gfp_mask() must be
1348*4882a593Smuzhiyun  *	%GFP_ATOMIC.
1349*4882a593Smuzhiyun  *
1350*4882a593Smuzhiyun  *	Returns 0 on success or a negative error code on failure
1351*4882a593Smuzhiyun  *	to allocate kernel memory to copy to.
1352*4882a593Smuzhiyun  */
skb_copy_ubufs(struct sk_buff * skb,gfp_t gfp_mask)1353*4882a593Smuzhiyun int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1354*4882a593Smuzhiyun {
1355*4882a593Smuzhiyun 	int num_frags = skb_shinfo(skb)->nr_frags;
1356*4882a593Smuzhiyun 	struct page *page, *head = NULL;
1357*4882a593Smuzhiyun 	int i, new_frags;
1358*4882a593Smuzhiyun 	u32 d_off;
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1361*4882a593Smuzhiyun 		return -EINVAL;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	if (!num_frags)
1364*4882a593Smuzhiyun 		goto release;
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1367*4882a593Smuzhiyun 	for (i = 0; i < new_frags; i++) {
1368*4882a593Smuzhiyun 		page = alloc_page(gfp_mask);
1369*4882a593Smuzhiyun 		if (!page) {
1370*4882a593Smuzhiyun 			while (head) {
1371*4882a593Smuzhiyun 				struct page *next = (struct page *)page_private(head);
1372*4882a593Smuzhiyun 				put_page(head);
1373*4882a593Smuzhiyun 				head = next;
1374*4882a593Smuzhiyun 			}
1375*4882a593Smuzhiyun 			return -ENOMEM;
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 		set_page_private(page, (unsigned long)head);
1378*4882a593Smuzhiyun 		head = page;
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	page = head;
1382*4882a593Smuzhiyun 	d_off = 0;
1383*4882a593Smuzhiyun 	for (i = 0; i < num_frags; i++) {
1384*4882a593Smuzhiyun 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1385*4882a593Smuzhiyun 		u32 p_off, p_len, copied;
1386*4882a593Smuzhiyun 		struct page *p;
1387*4882a593Smuzhiyun 		u8 *vaddr;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 		skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1390*4882a593Smuzhiyun 				      p, p_off, p_len, copied) {
1391*4882a593Smuzhiyun 			u32 copy, done = 0;
1392*4882a593Smuzhiyun 			vaddr = kmap_atomic(p);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 			while (done < p_len) {
1395*4882a593Smuzhiyun 				if (d_off == PAGE_SIZE) {
1396*4882a593Smuzhiyun 					d_off = 0;
1397*4882a593Smuzhiyun 					page = (struct page *)page_private(page);
1398*4882a593Smuzhiyun 				}
1399*4882a593Smuzhiyun 				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1400*4882a593Smuzhiyun 				memcpy(page_address(page) + d_off,
1401*4882a593Smuzhiyun 				       vaddr + p_off + done, copy);
1402*4882a593Smuzhiyun 				done += copy;
1403*4882a593Smuzhiyun 				d_off += copy;
1404*4882a593Smuzhiyun 			}
1405*4882a593Smuzhiyun 			kunmap_atomic(vaddr);
1406*4882a593Smuzhiyun 		}
1407*4882a593Smuzhiyun 	}
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	/* skb frags release userspace buffers */
1410*4882a593Smuzhiyun 	for (i = 0; i < num_frags; i++)
1411*4882a593Smuzhiyun 		skb_frag_unref(skb, i);
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	/* skb frags point to kernel buffers */
1414*4882a593Smuzhiyun 	for (i = 0; i < new_frags - 1; i++) {
1415*4882a593Smuzhiyun 		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1416*4882a593Smuzhiyun 		head = (struct page *)page_private(head);
1417*4882a593Smuzhiyun 	}
1418*4882a593Smuzhiyun 	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1419*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags = new_frags;
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun release:
1422*4882a593Smuzhiyun 	skb_zcopy_clear(skb, false);
1423*4882a593Smuzhiyun 	return 0;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun /**
1428*4882a593Smuzhiyun  *	skb_clone	-	duplicate an sk_buff
1429*4882a593Smuzhiyun  *	@skb: buffer to clone
1430*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1431*4882a593Smuzhiyun  *
1432*4882a593Smuzhiyun  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
1433*4882a593Smuzhiyun  *	copies share the same packet data but not structure. The new
1434*4882a593Smuzhiyun  *	buffer has a reference count of 1. If the allocation fails the
1435*4882a593Smuzhiyun  *	function returns %NULL otherwise the new buffer is returned.
1436*4882a593Smuzhiyun  *
1437*4882a593Smuzhiyun  *	If this function is called from an interrupt gfp_mask() must be
1438*4882a593Smuzhiyun  *	%GFP_ATOMIC.
1439*4882a593Smuzhiyun  */
1440*4882a593Smuzhiyun 
skb_clone(struct sk_buff * skb,gfp_t gfp_mask)1441*4882a593Smuzhiyun struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1442*4882a593Smuzhiyun {
1443*4882a593Smuzhiyun 	struct sk_buff_fclones *fclones = container_of(skb,
1444*4882a593Smuzhiyun 						       struct sk_buff_fclones,
1445*4882a593Smuzhiyun 						       skb1);
1446*4882a593Smuzhiyun 	struct sk_buff *n;
1447*4882a593Smuzhiyun 
1448*4882a593Smuzhiyun 	if (skb_orphan_frags(skb, gfp_mask))
1449*4882a593Smuzhiyun 		return NULL;
1450*4882a593Smuzhiyun 
1451*4882a593Smuzhiyun 	if (skb->fclone == SKB_FCLONE_ORIG &&
1452*4882a593Smuzhiyun 	    refcount_read(&fclones->fclone_ref) == 1) {
1453*4882a593Smuzhiyun 		n = &fclones->skb2;
1454*4882a593Smuzhiyun 		refcount_set(&fclones->fclone_ref, 2);
1455*4882a593Smuzhiyun 	} else {
1456*4882a593Smuzhiyun 		if (skb_pfmemalloc(skb))
1457*4882a593Smuzhiyun 			gfp_mask |= __GFP_MEMALLOC;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1460*4882a593Smuzhiyun 		if (!n)
1461*4882a593Smuzhiyun 			return NULL;
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 		n->fclone = SKB_FCLONE_UNAVAILABLE;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 	return __skb_clone(n, skb);
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun EXPORT_SYMBOL(skb_clone);
1469*4882a593Smuzhiyun 
skb_headers_offset_update(struct sk_buff * skb,int off)1470*4882a593Smuzhiyun void skb_headers_offset_update(struct sk_buff *skb, int off)
1471*4882a593Smuzhiyun {
1472*4882a593Smuzhiyun 	/* Only adjust this if it actually is csum_start rather than csum */
1473*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1474*4882a593Smuzhiyun 		skb->csum_start += off;
1475*4882a593Smuzhiyun 	/* {transport,network,mac}_header and tail are relative to skb->head */
1476*4882a593Smuzhiyun 	skb->transport_header += off;
1477*4882a593Smuzhiyun 	skb->network_header   += off;
1478*4882a593Smuzhiyun 	if (skb_mac_header_was_set(skb))
1479*4882a593Smuzhiyun 		skb->mac_header += off;
1480*4882a593Smuzhiyun 	skb->inner_transport_header += off;
1481*4882a593Smuzhiyun 	skb->inner_network_header += off;
1482*4882a593Smuzhiyun 	skb->inner_mac_header += off;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun EXPORT_SYMBOL(skb_headers_offset_update);
1485*4882a593Smuzhiyun 
skb_copy_header(struct sk_buff * new,const struct sk_buff * old)1486*4882a593Smuzhiyun void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun 	__copy_skb_header(new, old);
1489*4882a593Smuzhiyun 
1490*4882a593Smuzhiyun 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1491*4882a593Smuzhiyun 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1492*4882a593Smuzhiyun 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy_header);
1495*4882a593Smuzhiyun 
skb_alloc_rx_flag(const struct sk_buff * skb)1496*4882a593Smuzhiyun static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	if (skb_pfmemalloc(skb))
1499*4882a593Smuzhiyun 		return SKB_ALLOC_RX;
1500*4882a593Smuzhiyun 	return 0;
1501*4882a593Smuzhiyun }
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun /**
1504*4882a593Smuzhiyun  *	skb_copy	-	create private copy of an sk_buff
1505*4882a593Smuzhiyun  *	@skb: buffer to copy
1506*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1507*4882a593Smuzhiyun  *
1508*4882a593Smuzhiyun  *	Make a copy of both an &sk_buff and its data. This is used when the
1509*4882a593Smuzhiyun  *	caller wishes to modify the data and needs a private copy of the
1510*4882a593Smuzhiyun  *	data to alter. Returns %NULL on failure or the pointer to the buffer
1511*4882a593Smuzhiyun  *	on success. The returned buffer has a reference count of 1.
1512*4882a593Smuzhiyun  *
1513*4882a593Smuzhiyun  *	As by-product this function converts non-linear &sk_buff to linear
1514*4882a593Smuzhiyun  *	one, so that &sk_buff becomes completely private and caller is allowed
1515*4882a593Smuzhiyun  *	to modify all the data of returned buffer. This means that this
1516*4882a593Smuzhiyun  *	function is not recommended for use in circumstances when only
1517*4882a593Smuzhiyun  *	header is going to be modified. Use pskb_copy() instead.
1518*4882a593Smuzhiyun  */
1519*4882a593Smuzhiyun 
skb_copy(const struct sk_buff * skb,gfp_t gfp_mask)1520*4882a593Smuzhiyun struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	int headerlen = skb_headroom(skb);
1523*4882a593Smuzhiyun 	unsigned int size = skb_end_offset(skb) + skb->data_len;
1524*4882a593Smuzhiyun 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
1525*4882a593Smuzhiyun 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	if (!n)
1528*4882a593Smuzhiyun 		return NULL;
1529*4882a593Smuzhiyun 
1530*4882a593Smuzhiyun 	/* Set the data pointer */
1531*4882a593Smuzhiyun 	skb_reserve(n, headerlen);
1532*4882a593Smuzhiyun 	/* Set the tail pointer and length */
1533*4882a593Smuzhiyun 	skb_put(n, skb->len);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	skb_copy_header(n, skb);
1538*4882a593Smuzhiyun 	return n;
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy);
1541*4882a593Smuzhiyun 
1542*4882a593Smuzhiyun /**
1543*4882a593Smuzhiyun  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
1544*4882a593Smuzhiyun  *	@skb: buffer to copy
1545*4882a593Smuzhiyun  *	@headroom: headroom of new skb
1546*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1547*4882a593Smuzhiyun  *	@fclone: if true allocate the copy of the skb from the fclone
1548*4882a593Smuzhiyun  *	cache instead of the head cache; it is recommended to set this
1549*4882a593Smuzhiyun  *	to true for the cases where the copy will likely be cloned
1550*4882a593Smuzhiyun  *
1551*4882a593Smuzhiyun  *	Make a copy of both an &sk_buff and part of its data, located
1552*4882a593Smuzhiyun  *	in header. Fragmented data remain shared. This is used when
1553*4882a593Smuzhiyun  *	the caller wishes to modify only header of &sk_buff and needs
1554*4882a593Smuzhiyun  *	private copy of the header to alter. Returns %NULL on failure
1555*4882a593Smuzhiyun  *	or the pointer to the buffer on success.
1556*4882a593Smuzhiyun  *	The returned buffer has a reference count of 1.
1557*4882a593Smuzhiyun  */
1558*4882a593Smuzhiyun 
__pskb_copy_fclone(struct sk_buff * skb,int headroom,gfp_t gfp_mask,bool fclone)1559*4882a593Smuzhiyun struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1560*4882a593Smuzhiyun 				   gfp_t gfp_mask, bool fclone)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun 	unsigned int size = skb_headlen(skb) + headroom;
1563*4882a593Smuzhiyun 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1564*4882a593Smuzhiyun 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (!n)
1567*4882a593Smuzhiyun 		goto out;
1568*4882a593Smuzhiyun 
1569*4882a593Smuzhiyun 	/* Set the data pointer */
1570*4882a593Smuzhiyun 	skb_reserve(n, headroom);
1571*4882a593Smuzhiyun 	/* Set the tail pointer and length */
1572*4882a593Smuzhiyun 	skb_put(n, skb_headlen(skb));
1573*4882a593Smuzhiyun 	/* Copy the bytes */
1574*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, n->data, n->len);
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 	n->truesize += skb->data_len;
1577*4882a593Smuzhiyun 	n->data_len  = skb->data_len;
1578*4882a593Smuzhiyun 	n->len	     = skb->len;
1579*4882a593Smuzhiyun 
1580*4882a593Smuzhiyun 	if (skb_shinfo(skb)->nr_frags) {
1581*4882a593Smuzhiyun 		int i;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 		if (skb_orphan_frags(skb, gfp_mask) ||
1584*4882a593Smuzhiyun 		    skb_zerocopy_clone(n, skb, gfp_mask)) {
1585*4882a593Smuzhiyun 			kfree_skb(n);
1586*4882a593Smuzhiyun 			n = NULL;
1587*4882a593Smuzhiyun 			goto out;
1588*4882a593Smuzhiyun 		}
1589*4882a593Smuzhiyun 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1590*4882a593Smuzhiyun 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1591*4882a593Smuzhiyun 			skb_frag_ref(skb, i);
1592*4882a593Smuzhiyun 		}
1593*4882a593Smuzhiyun 		skb_shinfo(n)->nr_frags = i;
1594*4882a593Smuzhiyun 	}
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 	if (skb_has_frag_list(skb)) {
1597*4882a593Smuzhiyun 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1598*4882a593Smuzhiyun 		skb_clone_fraglist(n);
1599*4882a593Smuzhiyun 	}
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	skb_copy_header(n, skb);
1602*4882a593Smuzhiyun out:
1603*4882a593Smuzhiyun 	return n;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun EXPORT_SYMBOL(__pskb_copy_fclone);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun /**
1608*4882a593Smuzhiyun  *	pskb_expand_head - reallocate header of &sk_buff
1609*4882a593Smuzhiyun  *	@skb: buffer to reallocate
1610*4882a593Smuzhiyun  *	@nhead: room to add at head
1611*4882a593Smuzhiyun  *	@ntail: room to add at tail
1612*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1613*4882a593Smuzhiyun  *
1614*4882a593Smuzhiyun  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1615*4882a593Smuzhiyun  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1616*4882a593Smuzhiyun  *	reference count of 1. Returns zero in the case of success or error,
1617*4882a593Smuzhiyun  *	if expansion failed. In the last case, &sk_buff is not changed.
1618*4882a593Smuzhiyun  *
1619*4882a593Smuzhiyun  *	All the pointers pointing into skb header may change and must be
1620*4882a593Smuzhiyun  *	reloaded after call to this function.
1621*4882a593Smuzhiyun  */
1622*4882a593Smuzhiyun 
pskb_expand_head(struct sk_buff * skb,int nhead,int ntail,gfp_t gfp_mask)1623*4882a593Smuzhiyun int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1624*4882a593Smuzhiyun 		     gfp_t gfp_mask)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	int i, osize = skb_end_offset(skb);
1627*4882a593Smuzhiyun 	int size = osize + nhead + ntail;
1628*4882a593Smuzhiyun 	long off;
1629*4882a593Smuzhiyun 	u8 *data;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	BUG_ON(nhead < 0);
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	BUG_ON(skb_shared(skb));
1634*4882a593Smuzhiyun 
1635*4882a593Smuzhiyun 	size = SKB_DATA_ALIGN(size);
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	if (skb_pfmemalloc(skb))
1638*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
1639*4882a593Smuzhiyun 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1640*4882a593Smuzhiyun 			       gfp_mask, NUMA_NO_NODE, NULL);
1641*4882a593Smuzhiyun 	if (!data)
1642*4882a593Smuzhiyun 		goto nodata;
1643*4882a593Smuzhiyun 	size = SKB_WITH_OVERHEAD(ksize(data));
1644*4882a593Smuzhiyun 
1645*4882a593Smuzhiyun 	/* Copy only real data... and, alas, header. This should be
1646*4882a593Smuzhiyun 	 * optimized for the cases when header is void.
1647*4882a593Smuzhiyun 	 */
1648*4882a593Smuzhiyun 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 	memcpy((struct skb_shared_info *)(data + size),
1651*4882a593Smuzhiyun 	       skb_shinfo(skb),
1652*4882a593Smuzhiyun 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 	/*
1655*4882a593Smuzhiyun 	 * if shinfo is shared we must drop the old head gracefully, but if it
1656*4882a593Smuzhiyun 	 * is not we can just drop the old head and let the existing refcount
1657*4882a593Smuzhiyun 	 * be since all we did is relocate the values
1658*4882a593Smuzhiyun 	 */
1659*4882a593Smuzhiyun 	if (skb_cloned(skb)) {
1660*4882a593Smuzhiyun 		if (skb_orphan_frags(skb, gfp_mask))
1661*4882a593Smuzhiyun 			goto nofrags;
1662*4882a593Smuzhiyun 		if (skb_zcopy(skb))
1663*4882a593Smuzhiyun 			refcount_inc(&skb_uarg(skb)->refcnt);
1664*4882a593Smuzhiyun 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1665*4882a593Smuzhiyun 			skb_frag_ref(skb, i);
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 		if (skb_has_frag_list(skb))
1668*4882a593Smuzhiyun 			skb_clone_fraglist(skb);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 		skb_release_data(skb);
1671*4882a593Smuzhiyun 	} else {
1672*4882a593Smuzhiyun 		skb_free_head(skb);
1673*4882a593Smuzhiyun 	}
1674*4882a593Smuzhiyun 	off = (data + nhead) - skb->head;
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	skb->head     = data;
1677*4882a593Smuzhiyun 	skb->head_frag = 0;
1678*4882a593Smuzhiyun 	skb->data    += off;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	skb_set_end_offset(skb, size);
1681*4882a593Smuzhiyun #ifdef NET_SKBUFF_DATA_USES_OFFSET
1682*4882a593Smuzhiyun 	off           = nhead;
1683*4882a593Smuzhiyun #endif
1684*4882a593Smuzhiyun 	skb->tail	      += off;
1685*4882a593Smuzhiyun 	skb_headers_offset_update(skb, nhead);
1686*4882a593Smuzhiyun 	skb->cloned   = 0;
1687*4882a593Smuzhiyun 	skb->hdr_len  = 0;
1688*4882a593Smuzhiyun 	skb->nohdr    = 0;
1689*4882a593Smuzhiyun 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	skb_metadata_clear(skb);
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	/* It is not generally safe to change skb->truesize.
1694*4882a593Smuzhiyun 	 * For the moment, we really care of rx path, or
1695*4882a593Smuzhiyun 	 * when skb is orphaned (not attached to a socket).
1696*4882a593Smuzhiyun 	 */
1697*4882a593Smuzhiyun 	if (!skb->sk || skb->destructor == sock_edemux)
1698*4882a593Smuzhiyun 		skb->truesize += size - osize;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun 	return 0;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun nofrags:
1703*4882a593Smuzhiyun 	kfree(data);
1704*4882a593Smuzhiyun nodata:
1705*4882a593Smuzhiyun 	return -ENOMEM;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun EXPORT_SYMBOL(pskb_expand_head);
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun /* Make private copy of skb with writable head and some headroom */
1710*4882a593Smuzhiyun 
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1711*4882a593Smuzhiyun struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1712*4882a593Smuzhiyun {
1713*4882a593Smuzhiyun 	struct sk_buff *skb2;
1714*4882a593Smuzhiyun 	int delta = headroom - skb_headroom(skb);
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun 	if (delta <= 0)
1717*4882a593Smuzhiyun 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1718*4882a593Smuzhiyun 	else {
1719*4882a593Smuzhiyun 		skb2 = skb_clone(skb, GFP_ATOMIC);
1720*4882a593Smuzhiyun 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1721*4882a593Smuzhiyun 					     GFP_ATOMIC)) {
1722*4882a593Smuzhiyun 			kfree_skb(skb2);
1723*4882a593Smuzhiyun 			skb2 = NULL;
1724*4882a593Smuzhiyun 		}
1725*4882a593Smuzhiyun 	}
1726*4882a593Smuzhiyun 	return skb2;
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun EXPORT_SYMBOL(skb_realloc_headroom);
1729*4882a593Smuzhiyun 
__skb_unclone_keeptruesize(struct sk_buff * skb,gfp_t pri)1730*4882a593Smuzhiyun int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri)
1731*4882a593Smuzhiyun {
1732*4882a593Smuzhiyun 	unsigned int saved_end_offset, saved_truesize;
1733*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
1734*4882a593Smuzhiyun 	int res;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	saved_end_offset = skb_end_offset(skb);
1737*4882a593Smuzhiyun 	saved_truesize = skb->truesize;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	res = pskb_expand_head(skb, 0, 0, pri);
1740*4882a593Smuzhiyun 	if (res)
1741*4882a593Smuzhiyun 		return res;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	skb->truesize = saved_truesize;
1744*4882a593Smuzhiyun 
1745*4882a593Smuzhiyun 	if (likely(skb_end_offset(skb) == saved_end_offset))
1746*4882a593Smuzhiyun 		return 0;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	shinfo = skb_shinfo(skb);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	/* We are about to change back skb->end,
1751*4882a593Smuzhiyun 	 * we need to move skb_shinfo() to its new location.
1752*4882a593Smuzhiyun 	 */
1753*4882a593Smuzhiyun 	memmove(skb->head + saved_end_offset,
1754*4882a593Smuzhiyun 		shinfo,
1755*4882a593Smuzhiyun 		offsetof(struct skb_shared_info, frags[shinfo->nr_frags]));
1756*4882a593Smuzhiyun 
1757*4882a593Smuzhiyun 	skb_set_end_offset(skb, saved_end_offset);
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	return 0;
1760*4882a593Smuzhiyun }
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun /**
1763*4882a593Smuzhiyun  *	skb_copy_expand	-	copy and expand sk_buff
1764*4882a593Smuzhiyun  *	@skb: buffer to copy
1765*4882a593Smuzhiyun  *	@newheadroom: new free bytes at head
1766*4882a593Smuzhiyun  *	@newtailroom: new free bytes at tail
1767*4882a593Smuzhiyun  *	@gfp_mask: allocation priority
1768*4882a593Smuzhiyun  *
1769*4882a593Smuzhiyun  *	Make a copy of both an &sk_buff and its data and while doing so
1770*4882a593Smuzhiyun  *	allocate additional space.
1771*4882a593Smuzhiyun  *
1772*4882a593Smuzhiyun  *	This is used when the caller wishes to modify the data and needs a
1773*4882a593Smuzhiyun  *	private copy of the data to alter as well as more space for new fields.
1774*4882a593Smuzhiyun  *	Returns %NULL on failure or the pointer to the buffer
1775*4882a593Smuzhiyun  *	on success. The returned buffer has a reference count of 1.
1776*4882a593Smuzhiyun  *
1777*4882a593Smuzhiyun  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1778*4882a593Smuzhiyun  *	is called from an interrupt.
1779*4882a593Smuzhiyun  */
skb_copy_expand(const struct sk_buff * skb,int newheadroom,int newtailroom,gfp_t gfp_mask)1780*4882a593Smuzhiyun struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1781*4882a593Smuzhiyun 				int newheadroom, int newtailroom,
1782*4882a593Smuzhiyun 				gfp_t gfp_mask)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun 	/*
1785*4882a593Smuzhiyun 	 *	Allocate the copy buffer
1786*4882a593Smuzhiyun 	 */
1787*4882a593Smuzhiyun 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1788*4882a593Smuzhiyun 					gfp_mask, skb_alloc_rx_flag(skb),
1789*4882a593Smuzhiyun 					NUMA_NO_NODE);
1790*4882a593Smuzhiyun 	int oldheadroom = skb_headroom(skb);
1791*4882a593Smuzhiyun 	int head_copy_len, head_copy_off;
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun 	if (!n)
1794*4882a593Smuzhiyun 		return NULL;
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	skb_reserve(n, newheadroom);
1797*4882a593Smuzhiyun 
1798*4882a593Smuzhiyun 	/* Set the tail pointer and length */
1799*4882a593Smuzhiyun 	skb_put(n, skb->len);
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 	head_copy_len = oldheadroom;
1802*4882a593Smuzhiyun 	head_copy_off = 0;
1803*4882a593Smuzhiyun 	if (newheadroom <= head_copy_len)
1804*4882a593Smuzhiyun 		head_copy_len = newheadroom;
1805*4882a593Smuzhiyun 	else
1806*4882a593Smuzhiyun 		head_copy_off = newheadroom - head_copy_len;
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 	/* Copy the linear header and data. */
1809*4882a593Smuzhiyun 	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1810*4882a593Smuzhiyun 			     skb->len + head_copy_len));
1811*4882a593Smuzhiyun 
1812*4882a593Smuzhiyun 	skb_copy_header(n, skb);
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	return n;
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy_expand);
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun /**
1821*4882a593Smuzhiyun  *	__skb_pad		-	zero pad the tail of an skb
1822*4882a593Smuzhiyun  *	@skb: buffer to pad
1823*4882a593Smuzhiyun  *	@pad: space to pad
1824*4882a593Smuzhiyun  *	@free_on_error: free buffer on error
1825*4882a593Smuzhiyun  *
1826*4882a593Smuzhiyun  *	Ensure that a buffer is followed by a padding area that is zero
1827*4882a593Smuzhiyun  *	filled. Used by network drivers which may DMA or transfer data
1828*4882a593Smuzhiyun  *	beyond the buffer end onto the wire.
1829*4882a593Smuzhiyun  *
1830*4882a593Smuzhiyun  *	May return error in out of memory cases. The skb is freed on error
1831*4882a593Smuzhiyun  *	if @free_on_error is true.
1832*4882a593Smuzhiyun  */
1833*4882a593Smuzhiyun 
__skb_pad(struct sk_buff * skb,int pad,bool free_on_error)1834*4882a593Smuzhiyun int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1835*4882a593Smuzhiyun {
1836*4882a593Smuzhiyun 	int err;
1837*4882a593Smuzhiyun 	int ntail;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	/* If the skbuff is non linear tailroom is always zero.. */
1840*4882a593Smuzhiyun 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1841*4882a593Smuzhiyun 		memset(skb->data+skb->len, 0, pad);
1842*4882a593Smuzhiyun 		return 0;
1843*4882a593Smuzhiyun 	}
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1846*4882a593Smuzhiyun 	if (likely(skb_cloned(skb) || ntail > 0)) {
1847*4882a593Smuzhiyun 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1848*4882a593Smuzhiyun 		if (unlikely(err))
1849*4882a593Smuzhiyun 			goto free_skb;
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/* FIXME: The use of this function with non-linear skb's really needs
1853*4882a593Smuzhiyun 	 * to be audited.
1854*4882a593Smuzhiyun 	 */
1855*4882a593Smuzhiyun 	err = skb_linearize(skb);
1856*4882a593Smuzhiyun 	if (unlikely(err))
1857*4882a593Smuzhiyun 		goto free_skb;
1858*4882a593Smuzhiyun 
1859*4882a593Smuzhiyun 	memset(skb->data + skb->len, 0, pad);
1860*4882a593Smuzhiyun 	return 0;
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun free_skb:
1863*4882a593Smuzhiyun 	if (free_on_error)
1864*4882a593Smuzhiyun 		kfree_skb(skb);
1865*4882a593Smuzhiyun 	return err;
1866*4882a593Smuzhiyun }
1867*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_pad);
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun /**
1870*4882a593Smuzhiyun  *	pskb_put - add data to the tail of a potentially fragmented buffer
1871*4882a593Smuzhiyun  *	@skb: start of the buffer to use
1872*4882a593Smuzhiyun  *	@tail: tail fragment of the buffer to use
1873*4882a593Smuzhiyun  *	@len: amount of data to add
1874*4882a593Smuzhiyun  *
1875*4882a593Smuzhiyun  *	This function extends the used data area of the potentially
1876*4882a593Smuzhiyun  *	fragmented buffer. @tail must be the last fragment of @skb -- or
1877*4882a593Smuzhiyun  *	@skb itself. If this would exceed the total buffer size the kernel
1878*4882a593Smuzhiyun  *	will panic. A pointer to the first byte of the extra data is
1879*4882a593Smuzhiyun  *	returned.
1880*4882a593Smuzhiyun  */
1881*4882a593Smuzhiyun 
pskb_put(struct sk_buff * skb,struct sk_buff * tail,int len)1882*4882a593Smuzhiyun void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1883*4882a593Smuzhiyun {
1884*4882a593Smuzhiyun 	if (tail != skb) {
1885*4882a593Smuzhiyun 		skb->data_len += len;
1886*4882a593Smuzhiyun 		skb->len += len;
1887*4882a593Smuzhiyun 	}
1888*4882a593Smuzhiyun 	return skb_put(tail, len);
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pskb_put);
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun /**
1893*4882a593Smuzhiyun  *	skb_put - add data to a buffer
1894*4882a593Smuzhiyun  *	@skb: buffer to use
1895*4882a593Smuzhiyun  *	@len: amount of data to add
1896*4882a593Smuzhiyun  *
1897*4882a593Smuzhiyun  *	This function extends the used data area of the buffer. If this would
1898*4882a593Smuzhiyun  *	exceed the total buffer size the kernel will panic. A pointer to the
1899*4882a593Smuzhiyun  *	first byte of the extra data is returned.
1900*4882a593Smuzhiyun  */
skb_put(struct sk_buff * skb,unsigned int len)1901*4882a593Smuzhiyun void *skb_put(struct sk_buff *skb, unsigned int len)
1902*4882a593Smuzhiyun {
1903*4882a593Smuzhiyun 	void *tmp = skb_tail_pointer(skb);
1904*4882a593Smuzhiyun 	SKB_LINEAR_ASSERT(skb);
1905*4882a593Smuzhiyun 	skb->tail += len;
1906*4882a593Smuzhiyun 	skb->len  += len;
1907*4882a593Smuzhiyun 	if (unlikely(skb->tail > skb->end))
1908*4882a593Smuzhiyun 		skb_over_panic(skb, len, __builtin_return_address(0));
1909*4882a593Smuzhiyun 	return tmp;
1910*4882a593Smuzhiyun }
1911*4882a593Smuzhiyun EXPORT_SYMBOL(skb_put);
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun /**
1914*4882a593Smuzhiyun  *	skb_push - add data to the start of a buffer
1915*4882a593Smuzhiyun  *	@skb: buffer to use
1916*4882a593Smuzhiyun  *	@len: amount of data to add
1917*4882a593Smuzhiyun  *
1918*4882a593Smuzhiyun  *	This function extends the used data area of the buffer at the buffer
1919*4882a593Smuzhiyun  *	start. If this would exceed the total buffer headroom the kernel will
1920*4882a593Smuzhiyun  *	panic. A pointer to the first byte of the extra data is returned.
1921*4882a593Smuzhiyun  */
skb_push(struct sk_buff * skb,unsigned int len)1922*4882a593Smuzhiyun void *skb_push(struct sk_buff *skb, unsigned int len)
1923*4882a593Smuzhiyun {
1924*4882a593Smuzhiyun 	skb->data -= len;
1925*4882a593Smuzhiyun 	skb->len  += len;
1926*4882a593Smuzhiyun 	if (unlikely(skb->data < skb->head))
1927*4882a593Smuzhiyun 		skb_under_panic(skb, len, __builtin_return_address(0));
1928*4882a593Smuzhiyun 	return skb->data;
1929*4882a593Smuzhiyun }
1930*4882a593Smuzhiyun EXPORT_SYMBOL(skb_push);
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun /**
1933*4882a593Smuzhiyun  *	skb_pull - remove data from the start of a buffer
1934*4882a593Smuzhiyun  *	@skb: buffer to use
1935*4882a593Smuzhiyun  *	@len: amount of data to remove
1936*4882a593Smuzhiyun  *
1937*4882a593Smuzhiyun  *	This function removes data from the start of a buffer, returning
1938*4882a593Smuzhiyun  *	the memory to the headroom. A pointer to the next data in the buffer
1939*4882a593Smuzhiyun  *	is returned. Once the data has been pulled future pushes will overwrite
1940*4882a593Smuzhiyun  *	the old data.
1941*4882a593Smuzhiyun  */
skb_pull(struct sk_buff * skb,unsigned int len)1942*4882a593Smuzhiyun void *skb_pull(struct sk_buff *skb, unsigned int len)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun 	return skb_pull_inline(skb, len);
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun EXPORT_SYMBOL(skb_pull);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun /**
1949*4882a593Smuzhiyun  *	skb_trim - remove end from a buffer
1950*4882a593Smuzhiyun  *	@skb: buffer to alter
1951*4882a593Smuzhiyun  *	@len: new length
1952*4882a593Smuzhiyun  *
1953*4882a593Smuzhiyun  *	Cut the length of a buffer down by removing data from the tail. If
1954*4882a593Smuzhiyun  *	the buffer is already under the length specified it is not modified.
1955*4882a593Smuzhiyun  *	The skb must be linear.
1956*4882a593Smuzhiyun  */
skb_trim(struct sk_buff * skb,unsigned int len)1957*4882a593Smuzhiyun void skb_trim(struct sk_buff *skb, unsigned int len)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun 	if (skb->len > len)
1960*4882a593Smuzhiyun 		__skb_trim(skb, len);
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun EXPORT_SYMBOL(skb_trim);
1963*4882a593Smuzhiyun 
1964*4882a593Smuzhiyun /* Trims skb to length len. It can change skb pointers.
1965*4882a593Smuzhiyun  */
1966*4882a593Smuzhiyun 
___pskb_trim(struct sk_buff * skb,unsigned int len)1967*4882a593Smuzhiyun int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1968*4882a593Smuzhiyun {
1969*4882a593Smuzhiyun 	struct sk_buff **fragp;
1970*4882a593Smuzhiyun 	struct sk_buff *frag;
1971*4882a593Smuzhiyun 	int offset = skb_headlen(skb);
1972*4882a593Smuzhiyun 	int nfrags = skb_shinfo(skb)->nr_frags;
1973*4882a593Smuzhiyun 	int i;
1974*4882a593Smuzhiyun 	int err;
1975*4882a593Smuzhiyun 
1976*4882a593Smuzhiyun 	if (skb_cloned(skb) &&
1977*4882a593Smuzhiyun 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1978*4882a593Smuzhiyun 		return err;
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	i = 0;
1981*4882a593Smuzhiyun 	if (offset >= len)
1982*4882a593Smuzhiyun 		goto drop_pages;
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	for (; i < nfrags; i++) {
1985*4882a593Smuzhiyun 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1986*4882a593Smuzhiyun 
1987*4882a593Smuzhiyun 		if (end < len) {
1988*4882a593Smuzhiyun 			offset = end;
1989*4882a593Smuzhiyun 			continue;
1990*4882a593Smuzhiyun 		}
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun drop_pages:
1995*4882a593Smuzhiyun 		skb_shinfo(skb)->nr_frags = i;
1996*4882a593Smuzhiyun 
1997*4882a593Smuzhiyun 		for (; i < nfrags; i++)
1998*4882a593Smuzhiyun 			skb_frag_unref(skb, i);
1999*4882a593Smuzhiyun 
2000*4882a593Smuzhiyun 		if (skb_has_frag_list(skb))
2001*4882a593Smuzhiyun 			skb_drop_fraglist(skb);
2002*4882a593Smuzhiyun 		goto done;
2003*4882a593Smuzhiyun 	}
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2006*4882a593Smuzhiyun 	     fragp = &frag->next) {
2007*4882a593Smuzhiyun 		int end = offset + frag->len;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 		if (skb_shared(frag)) {
2010*4882a593Smuzhiyun 			struct sk_buff *nfrag;
2011*4882a593Smuzhiyun 
2012*4882a593Smuzhiyun 			nfrag = skb_clone(frag, GFP_ATOMIC);
2013*4882a593Smuzhiyun 			if (unlikely(!nfrag))
2014*4882a593Smuzhiyun 				return -ENOMEM;
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 			nfrag->next = frag->next;
2017*4882a593Smuzhiyun 			consume_skb(frag);
2018*4882a593Smuzhiyun 			frag = nfrag;
2019*4882a593Smuzhiyun 			*fragp = frag;
2020*4882a593Smuzhiyun 		}
2021*4882a593Smuzhiyun 
2022*4882a593Smuzhiyun 		if (end < len) {
2023*4882a593Smuzhiyun 			offset = end;
2024*4882a593Smuzhiyun 			continue;
2025*4882a593Smuzhiyun 		}
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 		if (end > len &&
2028*4882a593Smuzhiyun 		    unlikely((err = pskb_trim(frag, len - offset))))
2029*4882a593Smuzhiyun 			return err;
2030*4882a593Smuzhiyun 
2031*4882a593Smuzhiyun 		if (frag->next)
2032*4882a593Smuzhiyun 			skb_drop_list(&frag->next);
2033*4882a593Smuzhiyun 		break;
2034*4882a593Smuzhiyun 	}
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun done:
2037*4882a593Smuzhiyun 	if (len > skb_headlen(skb)) {
2038*4882a593Smuzhiyun 		skb->data_len -= skb->len - len;
2039*4882a593Smuzhiyun 		skb->len       = len;
2040*4882a593Smuzhiyun 	} else {
2041*4882a593Smuzhiyun 		skb->len       = len;
2042*4882a593Smuzhiyun 		skb->data_len  = 0;
2043*4882a593Smuzhiyun 		skb_set_tail_pointer(skb, len);
2044*4882a593Smuzhiyun 	}
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	if (!skb->sk || skb->destructor == sock_edemux)
2047*4882a593Smuzhiyun 		skb_condense(skb);
2048*4882a593Smuzhiyun 	return 0;
2049*4882a593Smuzhiyun }
2050*4882a593Smuzhiyun EXPORT_SYMBOL(___pskb_trim);
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun /* Note : use pskb_trim_rcsum() instead of calling this directly
2053*4882a593Smuzhiyun  */
pskb_trim_rcsum_slow(struct sk_buff * skb,unsigned int len)2054*4882a593Smuzhiyun int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2055*4882a593Smuzhiyun {
2056*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
2057*4882a593Smuzhiyun 		int delta = skb->len - len;
2058*4882a593Smuzhiyun 
2059*4882a593Smuzhiyun 		skb->csum = csum_block_sub(skb->csum,
2060*4882a593Smuzhiyun 					   skb_checksum(skb, len, delta, 0),
2061*4882a593Smuzhiyun 					   len);
2062*4882a593Smuzhiyun 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2063*4882a593Smuzhiyun 		int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2064*4882a593Smuzhiyun 		int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 		if (offset + sizeof(__sum16) > hdlen)
2067*4882a593Smuzhiyun 			return -EINVAL;
2068*4882a593Smuzhiyun 	}
2069*4882a593Smuzhiyun 	return __pskb_trim(skb, len);
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun /**
2074*4882a593Smuzhiyun  *	__pskb_pull_tail - advance tail of skb header
2075*4882a593Smuzhiyun  *	@skb: buffer to reallocate
2076*4882a593Smuzhiyun  *	@delta: number of bytes to advance tail
2077*4882a593Smuzhiyun  *
2078*4882a593Smuzhiyun  *	The function makes a sense only on a fragmented &sk_buff,
2079*4882a593Smuzhiyun  *	it expands header moving its tail forward and copying necessary
2080*4882a593Smuzhiyun  *	data from fragmented part.
2081*4882a593Smuzhiyun  *
2082*4882a593Smuzhiyun  *	&sk_buff MUST have reference count of 1.
2083*4882a593Smuzhiyun  *
2084*4882a593Smuzhiyun  *	Returns %NULL (and &sk_buff does not change) if pull failed
2085*4882a593Smuzhiyun  *	or value of new tail of skb in the case of success.
2086*4882a593Smuzhiyun  *
2087*4882a593Smuzhiyun  *	All the pointers pointing into skb header may change and must be
2088*4882a593Smuzhiyun  *	reloaded after call to this function.
2089*4882a593Smuzhiyun  */
2090*4882a593Smuzhiyun 
2091*4882a593Smuzhiyun /* Moves tail of skb head forward, copying data from fragmented part,
2092*4882a593Smuzhiyun  * when it is necessary.
2093*4882a593Smuzhiyun  * 1. It may fail due to malloc failure.
2094*4882a593Smuzhiyun  * 2. It may change skb pointers.
2095*4882a593Smuzhiyun  *
2096*4882a593Smuzhiyun  * It is pretty complicated. Luckily, it is called only in exceptional cases.
2097*4882a593Smuzhiyun  */
__pskb_pull_tail(struct sk_buff * skb,int delta)2098*4882a593Smuzhiyun void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2099*4882a593Smuzhiyun {
2100*4882a593Smuzhiyun 	/* If skb has not enough free space at tail, get new one
2101*4882a593Smuzhiyun 	 * plus 128 bytes for future expansions. If we have enough
2102*4882a593Smuzhiyun 	 * room at tail, reallocate without expansion only if skb is cloned.
2103*4882a593Smuzhiyun 	 */
2104*4882a593Smuzhiyun 	int i, k, eat = (skb->tail + delta) - skb->end;
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	if (eat > 0 || skb_cloned(skb)) {
2107*4882a593Smuzhiyun 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2108*4882a593Smuzhiyun 				     GFP_ATOMIC))
2109*4882a593Smuzhiyun 			return NULL;
2110*4882a593Smuzhiyun 	}
2111*4882a593Smuzhiyun 
2112*4882a593Smuzhiyun 	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2113*4882a593Smuzhiyun 			     skb_tail_pointer(skb), delta));
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	/* Optimization: no fragments, no reasons to preestimate
2116*4882a593Smuzhiyun 	 * size of pulled pages. Superb.
2117*4882a593Smuzhiyun 	 */
2118*4882a593Smuzhiyun 	if (!skb_has_frag_list(skb))
2119*4882a593Smuzhiyun 		goto pull_pages;
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	/* Estimate size of pulled pages. */
2122*4882a593Smuzhiyun 	eat = delta;
2123*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2124*4882a593Smuzhiyun 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 		if (size >= eat)
2127*4882a593Smuzhiyun 			goto pull_pages;
2128*4882a593Smuzhiyun 		eat -= size;
2129*4882a593Smuzhiyun 	}
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	/* If we need update frag list, we are in troubles.
2132*4882a593Smuzhiyun 	 * Certainly, it is possible to add an offset to skb data,
2133*4882a593Smuzhiyun 	 * but taking into account that pulling is expected to
2134*4882a593Smuzhiyun 	 * be very rare operation, it is worth to fight against
2135*4882a593Smuzhiyun 	 * further bloating skb head and crucify ourselves here instead.
2136*4882a593Smuzhiyun 	 * Pure masohism, indeed. 8)8)
2137*4882a593Smuzhiyun 	 */
2138*4882a593Smuzhiyun 	if (eat) {
2139*4882a593Smuzhiyun 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
2140*4882a593Smuzhiyun 		struct sk_buff *clone = NULL;
2141*4882a593Smuzhiyun 		struct sk_buff *insp = NULL;
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 		do {
2144*4882a593Smuzhiyun 			if (list->len <= eat) {
2145*4882a593Smuzhiyun 				/* Eaten as whole. */
2146*4882a593Smuzhiyun 				eat -= list->len;
2147*4882a593Smuzhiyun 				list = list->next;
2148*4882a593Smuzhiyun 				insp = list;
2149*4882a593Smuzhiyun 			} else {
2150*4882a593Smuzhiyun 				/* Eaten partially. */
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 				if (skb_shared(list)) {
2153*4882a593Smuzhiyun 					/* Sucks! We need to fork list. :-( */
2154*4882a593Smuzhiyun 					clone = skb_clone(list, GFP_ATOMIC);
2155*4882a593Smuzhiyun 					if (!clone)
2156*4882a593Smuzhiyun 						return NULL;
2157*4882a593Smuzhiyun 					insp = list->next;
2158*4882a593Smuzhiyun 					list = clone;
2159*4882a593Smuzhiyun 				} else {
2160*4882a593Smuzhiyun 					/* This may be pulled without
2161*4882a593Smuzhiyun 					 * problems. */
2162*4882a593Smuzhiyun 					insp = list;
2163*4882a593Smuzhiyun 				}
2164*4882a593Smuzhiyun 				if (!pskb_pull(list, eat)) {
2165*4882a593Smuzhiyun 					kfree_skb(clone);
2166*4882a593Smuzhiyun 					return NULL;
2167*4882a593Smuzhiyun 				}
2168*4882a593Smuzhiyun 				break;
2169*4882a593Smuzhiyun 			}
2170*4882a593Smuzhiyun 		} while (eat);
2171*4882a593Smuzhiyun 
2172*4882a593Smuzhiyun 		/* Free pulled out fragments. */
2173*4882a593Smuzhiyun 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
2174*4882a593Smuzhiyun 			skb_shinfo(skb)->frag_list = list->next;
2175*4882a593Smuzhiyun 			consume_skb(list);
2176*4882a593Smuzhiyun 		}
2177*4882a593Smuzhiyun 		/* And insert new clone at head. */
2178*4882a593Smuzhiyun 		if (clone) {
2179*4882a593Smuzhiyun 			clone->next = list;
2180*4882a593Smuzhiyun 			skb_shinfo(skb)->frag_list = clone;
2181*4882a593Smuzhiyun 		}
2182*4882a593Smuzhiyun 	}
2183*4882a593Smuzhiyun 	/* Success! Now we may commit changes to skb data. */
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun pull_pages:
2186*4882a593Smuzhiyun 	eat = delta;
2187*4882a593Smuzhiyun 	k = 0;
2188*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2189*4882a593Smuzhiyun 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 		if (size <= eat) {
2192*4882a593Smuzhiyun 			skb_frag_unref(skb, i);
2193*4882a593Smuzhiyun 			eat -= size;
2194*4882a593Smuzhiyun 		} else {
2195*4882a593Smuzhiyun 			skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 			*frag = skb_shinfo(skb)->frags[i];
2198*4882a593Smuzhiyun 			if (eat) {
2199*4882a593Smuzhiyun 				skb_frag_off_add(frag, eat);
2200*4882a593Smuzhiyun 				skb_frag_size_sub(frag, eat);
2201*4882a593Smuzhiyun 				if (!i)
2202*4882a593Smuzhiyun 					goto end;
2203*4882a593Smuzhiyun 				eat = 0;
2204*4882a593Smuzhiyun 			}
2205*4882a593Smuzhiyun 			k++;
2206*4882a593Smuzhiyun 		}
2207*4882a593Smuzhiyun 	}
2208*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags = k;
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun end:
2211*4882a593Smuzhiyun 	skb->tail     += delta;
2212*4882a593Smuzhiyun 	skb->data_len -= delta;
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	if (!skb->data_len)
2215*4882a593Smuzhiyun 		skb_zcopy_clear(skb, false);
2216*4882a593Smuzhiyun 
2217*4882a593Smuzhiyun 	return skb_tail_pointer(skb);
2218*4882a593Smuzhiyun }
2219*4882a593Smuzhiyun EXPORT_SYMBOL(__pskb_pull_tail);
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun /**
2222*4882a593Smuzhiyun  *	skb_copy_bits - copy bits from skb to kernel buffer
2223*4882a593Smuzhiyun  *	@skb: source skb
2224*4882a593Smuzhiyun  *	@offset: offset in source
2225*4882a593Smuzhiyun  *	@to: destination buffer
2226*4882a593Smuzhiyun  *	@len: number of bytes to copy
2227*4882a593Smuzhiyun  *
2228*4882a593Smuzhiyun  *	Copy the specified number of bytes from the source skb to the
2229*4882a593Smuzhiyun  *	destination buffer.
2230*4882a593Smuzhiyun  *
2231*4882a593Smuzhiyun  *	CAUTION ! :
2232*4882a593Smuzhiyun  *		If its prototype is ever changed,
2233*4882a593Smuzhiyun  *		check arch/{*}/net/{*}.S files,
2234*4882a593Smuzhiyun  *		since it is called from BPF assembly code.
2235*4882a593Smuzhiyun  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)2236*4882a593Smuzhiyun int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2237*4882a593Smuzhiyun {
2238*4882a593Smuzhiyun 	int start = skb_headlen(skb);
2239*4882a593Smuzhiyun 	struct sk_buff *frag_iter;
2240*4882a593Smuzhiyun 	int i, copy;
2241*4882a593Smuzhiyun 
2242*4882a593Smuzhiyun 	if (offset > (int)skb->len - len)
2243*4882a593Smuzhiyun 		goto fault;
2244*4882a593Smuzhiyun 
2245*4882a593Smuzhiyun 	/* Copy header. */
2246*4882a593Smuzhiyun 	if ((copy = start - offset) > 0) {
2247*4882a593Smuzhiyun 		if (copy > len)
2248*4882a593Smuzhiyun 			copy = len;
2249*4882a593Smuzhiyun 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
2250*4882a593Smuzhiyun 		if ((len -= copy) == 0)
2251*4882a593Smuzhiyun 			return 0;
2252*4882a593Smuzhiyun 		offset += copy;
2253*4882a593Smuzhiyun 		to     += copy;
2254*4882a593Smuzhiyun 	}
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2257*4882a593Smuzhiyun 		int end;
2258*4882a593Smuzhiyun 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 		end = start + skb_frag_size(f);
2263*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2264*4882a593Smuzhiyun 			u32 p_off, p_len, copied;
2265*4882a593Smuzhiyun 			struct page *p;
2266*4882a593Smuzhiyun 			u8 *vaddr;
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 			if (copy > len)
2269*4882a593Smuzhiyun 				copy = len;
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 			skb_frag_foreach_page(f,
2272*4882a593Smuzhiyun 					      skb_frag_off(f) + offset - start,
2273*4882a593Smuzhiyun 					      copy, p, p_off, p_len, copied) {
2274*4882a593Smuzhiyun 				vaddr = kmap_atomic(p);
2275*4882a593Smuzhiyun 				memcpy(to + copied, vaddr + p_off, p_len);
2276*4882a593Smuzhiyun 				kunmap_atomic(vaddr);
2277*4882a593Smuzhiyun 			}
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2280*4882a593Smuzhiyun 				return 0;
2281*4882a593Smuzhiyun 			offset += copy;
2282*4882a593Smuzhiyun 			to     += copy;
2283*4882a593Smuzhiyun 		}
2284*4882a593Smuzhiyun 		start = end;
2285*4882a593Smuzhiyun 	}
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	skb_walk_frags(skb, frag_iter) {
2288*4882a593Smuzhiyun 		int end;
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2291*4882a593Smuzhiyun 
2292*4882a593Smuzhiyun 		end = start + frag_iter->len;
2293*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2294*4882a593Smuzhiyun 			if (copy > len)
2295*4882a593Smuzhiyun 				copy = len;
2296*4882a593Smuzhiyun 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
2297*4882a593Smuzhiyun 				goto fault;
2298*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2299*4882a593Smuzhiyun 				return 0;
2300*4882a593Smuzhiyun 			offset += copy;
2301*4882a593Smuzhiyun 			to     += copy;
2302*4882a593Smuzhiyun 		}
2303*4882a593Smuzhiyun 		start = end;
2304*4882a593Smuzhiyun 	}
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 	if (!len)
2307*4882a593Smuzhiyun 		return 0;
2308*4882a593Smuzhiyun 
2309*4882a593Smuzhiyun fault:
2310*4882a593Smuzhiyun 	return -EFAULT;
2311*4882a593Smuzhiyun }
2312*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy_bits);
2313*4882a593Smuzhiyun 
2314*4882a593Smuzhiyun /*
2315*4882a593Smuzhiyun  * Callback from splice_to_pipe(), if we need to release some pages
2316*4882a593Smuzhiyun  * at the end of the spd in case we error'ed out in filling the pipe.
2317*4882a593Smuzhiyun  */
sock_spd_release(struct splice_pipe_desc * spd,unsigned int i)2318*4882a593Smuzhiyun static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2319*4882a593Smuzhiyun {
2320*4882a593Smuzhiyun 	put_page(spd->pages[i]);
2321*4882a593Smuzhiyun }
2322*4882a593Smuzhiyun 
linear_to_page(struct page * page,unsigned int * len,unsigned int * offset,struct sock * sk)2323*4882a593Smuzhiyun static struct page *linear_to_page(struct page *page, unsigned int *len,
2324*4882a593Smuzhiyun 				   unsigned int *offset,
2325*4882a593Smuzhiyun 				   struct sock *sk)
2326*4882a593Smuzhiyun {
2327*4882a593Smuzhiyun 	struct page_frag *pfrag = sk_page_frag(sk);
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	if (!sk_page_frag_refill(sk, pfrag))
2330*4882a593Smuzhiyun 		return NULL;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	memcpy(page_address(pfrag->page) + pfrag->offset,
2335*4882a593Smuzhiyun 	       page_address(page) + *offset, *len);
2336*4882a593Smuzhiyun 	*offset = pfrag->offset;
2337*4882a593Smuzhiyun 	pfrag->offset += *len;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	return pfrag->page;
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun 
spd_can_coalesce(const struct splice_pipe_desc * spd,struct page * page,unsigned int offset)2342*4882a593Smuzhiyun static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2343*4882a593Smuzhiyun 			     struct page *page,
2344*4882a593Smuzhiyun 			     unsigned int offset)
2345*4882a593Smuzhiyun {
2346*4882a593Smuzhiyun 	return	spd->nr_pages &&
2347*4882a593Smuzhiyun 		spd->pages[spd->nr_pages - 1] == page &&
2348*4882a593Smuzhiyun 		(spd->partial[spd->nr_pages - 1].offset +
2349*4882a593Smuzhiyun 		 spd->partial[spd->nr_pages - 1].len == offset);
2350*4882a593Smuzhiyun }
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun /*
2353*4882a593Smuzhiyun  * Fill page/offset/length into spd, if it can hold more pages.
2354*4882a593Smuzhiyun  */
spd_fill_page(struct splice_pipe_desc * spd,struct pipe_inode_info * pipe,struct page * page,unsigned int * len,unsigned int offset,bool linear,struct sock * sk)2355*4882a593Smuzhiyun static bool spd_fill_page(struct splice_pipe_desc *spd,
2356*4882a593Smuzhiyun 			  struct pipe_inode_info *pipe, struct page *page,
2357*4882a593Smuzhiyun 			  unsigned int *len, unsigned int offset,
2358*4882a593Smuzhiyun 			  bool linear,
2359*4882a593Smuzhiyun 			  struct sock *sk)
2360*4882a593Smuzhiyun {
2361*4882a593Smuzhiyun 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2362*4882a593Smuzhiyun 		return true;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	if (linear) {
2365*4882a593Smuzhiyun 		page = linear_to_page(page, len, &offset, sk);
2366*4882a593Smuzhiyun 		if (!page)
2367*4882a593Smuzhiyun 			return true;
2368*4882a593Smuzhiyun 	}
2369*4882a593Smuzhiyun 	if (spd_can_coalesce(spd, page, offset)) {
2370*4882a593Smuzhiyun 		spd->partial[spd->nr_pages - 1].len += *len;
2371*4882a593Smuzhiyun 		return false;
2372*4882a593Smuzhiyun 	}
2373*4882a593Smuzhiyun 	get_page(page);
2374*4882a593Smuzhiyun 	spd->pages[spd->nr_pages] = page;
2375*4882a593Smuzhiyun 	spd->partial[spd->nr_pages].len = *len;
2376*4882a593Smuzhiyun 	spd->partial[spd->nr_pages].offset = offset;
2377*4882a593Smuzhiyun 	spd->nr_pages++;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	return false;
2380*4882a593Smuzhiyun }
2381*4882a593Smuzhiyun 
__splice_segment(struct page * page,unsigned int poff,unsigned int plen,unsigned int * off,unsigned int * len,struct splice_pipe_desc * spd,bool linear,struct sock * sk,struct pipe_inode_info * pipe)2382*4882a593Smuzhiyun static bool __splice_segment(struct page *page, unsigned int poff,
2383*4882a593Smuzhiyun 			     unsigned int plen, unsigned int *off,
2384*4882a593Smuzhiyun 			     unsigned int *len,
2385*4882a593Smuzhiyun 			     struct splice_pipe_desc *spd, bool linear,
2386*4882a593Smuzhiyun 			     struct sock *sk,
2387*4882a593Smuzhiyun 			     struct pipe_inode_info *pipe)
2388*4882a593Smuzhiyun {
2389*4882a593Smuzhiyun 	if (!*len)
2390*4882a593Smuzhiyun 		return true;
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 	/* skip this segment if already processed */
2393*4882a593Smuzhiyun 	if (*off >= plen) {
2394*4882a593Smuzhiyun 		*off -= plen;
2395*4882a593Smuzhiyun 		return false;
2396*4882a593Smuzhiyun 	}
2397*4882a593Smuzhiyun 
2398*4882a593Smuzhiyun 	/* ignore any bits we already processed */
2399*4882a593Smuzhiyun 	poff += *off;
2400*4882a593Smuzhiyun 	plen -= *off;
2401*4882a593Smuzhiyun 	*off = 0;
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	do {
2404*4882a593Smuzhiyun 		unsigned int flen = min(*len, plen);
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 		if (spd_fill_page(spd, pipe, page, &flen, poff,
2407*4882a593Smuzhiyun 				  linear, sk))
2408*4882a593Smuzhiyun 			return true;
2409*4882a593Smuzhiyun 		poff += flen;
2410*4882a593Smuzhiyun 		plen -= flen;
2411*4882a593Smuzhiyun 		*len -= flen;
2412*4882a593Smuzhiyun 	} while (*len && plen);
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	return false;
2415*4882a593Smuzhiyun }
2416*4882a593Smuzhiyun 
2417*4882a593Smuzhiyun /*
2418*4882a593Smuzhiyun  * Map linear and fragment data from the skb to spd. It reports true if the
2419*4882a593Smuzhiyun  * pipe is full or if we already spliced the requested length.
2420*4882a593Smuzhiyun  */
__skb_splice_bits(struct sk_buff * skb,struct pipe_inode_info * pipe,unsigned int * offset,unsigned int * len,struct splice_pipe_desc * spd,struct sock * sk)2421*4882a593Smuzhiyun static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2422*4882a593Smuzhiyun 			      unsigned int *offset, unsigned int *len,
2423*4882a593Smuzhiyun 			      struct splice_pipe_desc *spd, struct sock *sk)
2424*4882a593Smuzhiyun {
2425*4882a593Smuzhiyun 	int seg;
2426*4882a593Smuzhiyun 	struct sk_buff *iter;
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun 	/* map the linear part :
2429*4882a593Smuzhiyun 	 * If skb->head_frag is set, this 'linear' part is backed by a
2430*4882a593Smuzhiyun 	 * fragment, and if the head is not shared with any clones then
2431*4882a593Smuzhiyun 	 * we can avoid a copy since we own the head portion of this page.
2432*4882a593Smuzhiyun 	 */
2433*4882a593Smuzhiyun 	if (__splice_segment(virt_to_page(skb->data),
2434*4882a593Smuzhiyun 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
2435*4882a593Smuzhiyun 			     skb_headlen(skb),
2436*4882a593Smuzhiyun 			     offset, len, spd,
2437*4882a593Smuzhiyun 			     skb_head_is_locked(skb),
2438*4882a593Smuzhiyun 			     sk, pipe))
2439*4882a593Smuzhiyun 		return true;
2440*4882a593Smuzhiyun 
2441*4882a593Smuzhiyun 	/*
2442*4882a593Smuzhiyun 	 * then map the fragments
2443*4882a593Smuzhiyun 	 */
2444*4882a593Smuzhiyun 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2445*4882a593Smuzhiyun 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2446*4882a593Smuzhiyun 
2447*4882a593Smuzhiyun 		if (__splice_segment(skb_frag_page(f),
2448*4882a593Smuzhiyun 				     skb_frag_off(f), skb_frag_size(f),
2449*4882a593Smuzhiyun 				     offset, len, spd, false, sk, pipe))
2450*4882a593Smuzhiyun 			return true;
2451*4882a593Smuzhiyun 	}
2452*4882a593Smuzhiyun 
2453*4882a593Smuzhiyun 	skb_walk_frags(skb, iter) {
2454*4882a593Smuzhiyun 		if (*offset >= iter->len) {
2455*4882a593Smuzhiyun 			*offset -= iter->len;
2456*4882a593Smuzhiyun 			continue;
2457*4882a593Smuzhiyun 		}
2458*4882a593Smuzhiyun 		/* __skb_splice_bits() only fails if the output has no room
2459*4882a593Smuzhiyun 		 * left, so no point in going over the frag_list for the error
2460*4882a593Smuzhiyun 		 * case.
2461*4882a593Smuzhiyun 		 */
2462*4882a593Smuzhiyun 		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2463*4882a593Smuzhiyun 			return true;
2464*4882a593Smuzhiyun 	}
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 	return false;
2467*4882a593Smuzhiyun }
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun /*
2470*4882a593Smuzhiyun  * Map data from the skb to a pipe. Should handle both the linear part,
2471*4882a593Smuzhiyun  * the fragments, and the frag list.
2472*4882a593Smuzhiyun  */
skb_splice_bits(struct sk_buff * skb,struct sock * sk,unsigned int offset,struct pipe_inode_info * pipe,unsigned int tlen,unsigned int flags)2473*4882a593Smuzhiyun int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2474*4882a593Smuzhiyun 		    struct pipe_inode_info *pipe, unsigned int tlen,
2475*4882a593Smuzhiyun 		    unsigned int flags)
2476*4882a593Smuzhiyun {
2477*4882a593Smuzhiyun 	struct partial_page partial[MAX_SKB_FRAGS];
2478*4882a593Smuzhiyun 	struct page *pages[MAX_SKB_FRAGS];
2479*4882a593Smuzhiyun 	struct splice_pipe_desc spd = {
2480*4882a593Smuzhiyun 		.pages = pages,
2481*4882a593Smuzhiyun 		.partial = partial,
2482*4882a593Smuzhiyun 		.nr_pages_max = MAX_SKB_FRAGS,
2483*4882a593Smuzhiyun 		.ops = &nosteal_pipe_buf_ops,
2484*4882a593Smuzhiyun 		.spd_release = sock_spd_release,
2485*4882a593Smuzhiyun 	};
2486*4882a593Smuzhiyun 	int ret = 0;
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2489*4882a593Smuzhiyun 
2490*4882a593Smuzhiyun 	if (spd.nr_pages)
2491*4882a593Smuzhiyun 		ret = splice_to_pipe(pipe, &spd);
2492*4882a593Smuzhiyun 
2493*4882a593Smuzhiyun 	return ret;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_splice_bits);
2496*4882a593Smuzhiyun 
2497*4882a593Smuzhiyun /* Send skb data on a socket. Socket must be locked. */
skb_send_sock_locked(struct sock * sk,struct sk_buff * skb,int offset,int len)2498*4882a593Smuzhiyun int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2499*4882a593Smuzhiyun 			 int len)
2500*4882a593Smuzhiyun {
2501*4882a593Smuzhiyun 	unsigned int orig_len = len;
2502*4882a593Smuzhiyun 	struct sk_buff *head = skb;
2503*4882a593Smuzhiyun 	unsigned short fragidx;
2504*4882a593Smuzhiyun 	int slen, ret;
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun do_frag_list:
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	/* Deal with head data */
2509*4882a593Smuzhiyun 	while (offset < skb_headlen(skb) && len) {
2510*4882a593Smuzhiyun 		struct kvec kv;
2511*4882a593Smuzhiyun 		struct msghdr msg;
2512*4882a593Smuzhiyun 
2513*4882a593Smuzhiyun 		slen = min_t(int, len, skb_headlen(skb) - offset);
2514*4882a593Smuzhiyun 		kv.iov_base = skb->data + offset;
2515*4882a593Smuzhiyun 		kv.iov_len = slen;
2516*4882a593Smuzhiyun 		memset(&msg, 0, sizeof(msg));
2517*4882a593Smuzhiyun 		msg.msg_flags = MSG_DONTWAIT;
2518*4882a593Smuzhiyun 
2519*4882a593Smuzhiyun 		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2520*4882a593Smuzhiyun 		if (ret <= 0)
2521*4882a593Smuzhiyun 			goto error;
2522*4882a593Smuzhiyun 
2523*4882a593Smuzhiyun 		offset += ret;
2524*4882a593Smuzhiyun 		len -= ret;
2525*4882a593Smuzhiyun 	}
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	/* All the data was skb head? */
2528*4882a593Smuzhiyun 	if (!len)
2529*4882a593Smuzhiyun 		goto out;
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	/* Make offset relative to start of frags */
2532*4882a593Smuzhiyun 	offset -= skb_headlen(skb);
2533*4882a593Smuzhiyun 
2534*4882a593Smuzhiyun 	/* Find where we are in frag list */
2535*4882a593Smuzhiyun 	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2536*4882a593Smuzhiyun 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2537*4882a593Smuzhiyun 
2538*4882a593Smuzhiyun 		if (offset < skb_frag_size(frag))
2539*4882a593Smuzhiyun 			break;
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 		offset -= skb_frag_size(frag);
2542*4882a593Smuzhiyun 	}
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2545*4882a593Smuzhiyun 		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2546*4882a593Smuzhiyun 
2547*4882a593Smuzhiyun 		slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 		while (slen) {
2550*4882a593Smuzhiyun 			ret = kernel_sendpage_locked(sk, skb_frag_page(frag),
2551*4882a593Smuzhiyun 						     skb_frag_off(frag) + offset,
2552*4882a593Smuzhiyun 						     slen, MSG_DONTWAIT);
2553*4882a593Smuzhiyun 			if (ret <= 0)
2554*4882a593Smuzhiyun 				goto error;
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 			len -= ret;
2557*4882a593Smuzhiyun 			offset += ret;
2558*4882a593Smuzhiyun 			slen -= ret;
2559*4882a593Smuzhiyun 		}
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 		offset = 0;
2562*4882a593Smuzhiyun 	}
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	if (len) {
2565*4882a593Smuzhiyun 		/* Process any frag lists */
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 		if (skb == head) {
2568*4882a593Smuzhiyun 			if (skb_has_frag_list(skb)) {
2569*4882a593Smuzhiyun 				skb = skb_shinfo(skb)->frag_list;
2570*4882a593Smuzhiyun 				goto do_frag_list;
2571*4882a593Smuzhiyun 			}
2572*4882a593Smuzhiyun 		} else if (skb->next) {
2573*4882a593Smuzhiyun 			skb = skb->next;
2574*4882a593Smuzhiyun 			goto do_frag_list;
2575*4882a593Smuzhiyun 		}
2576*4882a593Smuzhiyun 	}
2577*4882a593Smuzhiyun 
2578*4882a593Smuzhiyun out:
2579*4882a593Smuzhiyun 	return orig_len - len;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun error:
2582*4882a593Smuzhiyun 	return orig_len == len ? ret : orig_len - len;
2583*4882a593Smuzhiyun }
2584*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2585*4882a593Smuzhiyun 
2586*4882a593Smuzhiyun /**
2587*4882a593Smuzhiyun  *	skb_store_bits - store bits from kernel buffer to skb
2588*4882a593Smuzhiyun  *	@skb: destination buffer
2589*4882a593Smuzhiyun  *	@offset: offset in destination
2590*4882a593Smuzhiyun  *	@from: source buffer
2591*4882a593Smuzhiyun  *	@len: number of bytes to copy
2592*4882a593Smuzhiyun  *
2593*4882a593Smuzhiyun  *	Copy the specified number of bytes from the source buffer to the
2594*4882a593Smuzhiyun  *	destination skb.  This function handles all the messy bits of
2595*4882a593Smuzhiyun  *	traversing fragment lists and such.
2596*4882a593Smuzhiyun  */
2597*4882a593Smuzhiyun 
skb_store_bits(struct sk_buff * skb,int offset,const void * from,int len)2598*4882a593Smuzhiyun int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun 	int start = skb_headlen(skb);
2601*4882a593Smuzhiyun 	struct sk_buff *frag_iter;
2602*4882a593Smuzhiyun 	int i, copy;
2603*4882a593Smuzhiyun 
2604*4882a593Smuzhiyun 	if (offset > (int)skb->len - len)
2605*4882a593Smuzhiyun 		goto fault;
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	if ((copy = start - offset) > 0) {
2608*4882a593Smuzhiyun 		if (copy > len)
2609*4882a593Smuzhiyun 			copy = len;
2610*4882a593Smuzhiyun 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
2611*4882a593Smuzhiyun 		if ((len -= copy) == 0)
2612*4882a593Smuzhiyun 			return 0;
2613*4882a593Smuzhiyun 		offset += copy;
2614*4882a593Smuzhiyun 		from += copy;
2615*4882a593Smuzhiyun 	}
2616*4882a593Smuzhiyun 
2617*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2618*4882a593Smuzhiyun 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2619*4882a593Smuzhiyun 		int end;
2620*4882a593Smuzhiyun 
2621*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 		end = start + skb_frag_size(frag);
2624*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2625*4882a593Smuzhiyun 			u32 p_off, p_len, copied;
2626*4882a593Smuzhiyun 			struct page *p;
2627*4882a593Smuzhiyun 			u8 *vaddr;
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 			if (copy > len)
2630*4882a593Smuzhiyun 				copy = len;
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 			skb_frag_foreach_page(frag,
2633*4882a593Smuzhiyun 					      skb_frag_off(frag) + offset - start,
2634*4882a593Smuzhiyun 					      copy, p, p_off, p_len, copied) {
2635*4882a593Smuzhiyun 				vaddr = kmap_atomic(p);
2636*4882a593Smuzhiyun 				memcpy(vaddr + p_off, from + copied, p_len);
2637*4882a593Smuzhiyun 				kunmap_atomic(vaddr);
2638*4882a593Smuzhiyun 			}
2639*4882a593Smuzhiyun 
2640*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2641*4882a593Smuzhiyun 				return 0;
2642*4882a593Smuzhiyun 			offset += copy;
2643*4882a593Smuzhiyun 			from += copy;
2644*4882a593Smuzhiyun 		}
2645*4882a593Smuzhiyun 		start = end;
2646*4882a593Smuzhiyun 	}
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	skb_walk_frags(skb, frag_iter) {
2649*4882a593Smuzhiyun 		int end;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 		end = start + frag_iter->len;
2654*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2655*4882a593Smuzhiyun 			if (copy > len)
2656*4882a593Smuzhiyun 				copy = len;
2657*4882a593Smuzhiyun 			if (skb_store_bits(frag_iter, offset - start,
2658*4882a593Smuzhiyun 					   from, copy))
2659*4882a593Smuzhiyun 				goto fault;
2660*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2661*4882a593Smuzhiyun 				return 0;
2662*4882a593Smuzhiyun 			offset += copy;
2663*4882a593Smuzhiyun 			from += copy;
2664*4882a593Smuzhiyun 		}
2665*4882a593Smuzhiyun 		start = end;
2666*4882a593Smuzhiyun 	}
2667*4882a593Smuzhiyun 	if (!len)
2668*4882a593Smuzhiyun 		return 0;
2669*4882a593Smuzhiyun 
2670*4882a593Smuzhiyun fault:
2671*4882a593Smuzhiyun 	return -EFAULT;
2672*4882a593Smuzhiyun }
2673*4882a593Smuzhiyun EXPORT_SYMBOL(skb_store_bits);
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun /* Checksum skb data. */
__skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum,const struct skb_checksum_ops * ops)2676*4882a593Smuzhiyun __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2677*4882a593Smuzhiyun 		      __wsum csum, const struct skb_checksum_ops *ops)
2678*4882a593Smuzhiyun {
2679*4882a593Smuzhiyun 	int start = skb_headlen(skb);
2680*4882a593Smuzhiyun 	int i, copy = start - offset;
2681*4882a593Smuzhiyun 	struct sk_buff *frag_iter;
2682*4882a593Smuzhiyun 	int pos = 0;
2683*4882a593Smuzhiyun 
2684*4882a593Smuzhiyun 	/* Checksum header. */
2685*4882a593Smuzhiyun 	if (copy > 0) {
2686*4882a593Smuzhiyun 		if (copy > len)
2687*4882a593Smuzhiyun 			copy = len;
2688*4882a593Smuzhiyun 		csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2689*4882a593Smuzhiyun 				       skb->data + offset, copy, csum);
2690*4882a593Smuzhiyun 		if ((len -= copy) == 0)
2691*4882a593Smuzhiyun 			return csum;
2692*4882a593Smuzhiyun 		offset += copy;
2693*4882a593Smuzhiyun 		pos	= copy;
2694*4882a593Smuzhiyun 	}
2695*4882a593Smuzhiyun 
2696*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2697*4882a593Smuzhiyun 		int end;
2698*4882a593Smuzhiyun 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2699*4882a593Smuzhiyun 
2700*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 		end = start + skb_frag_size(frag);
2703*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2704*4882a593Smuzhiyun 			u32 p_off, p_len, copied;
2705*4882a593Smuzhiyun 			struct page *p;
2706*4882a593Smuzhiyun 			__wsum csum2;
2707*4882a593Smuzhiyun 			u8 *vaddr;
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 			if (copy > len)
2710*4882a593Smuzhiyun 				copy = len;
2711*4882a593Smuzhiyun 
2712*4882a593Smuzhiyun 			skb_frag_foreach_page(frag,
2713*4882a593Smuzhiyun 					      skb_frag_off(frag) + offset - start,
2714*4882a593Smuzhiyun 					      copy, p, p_off, p_len, copied) {
2715*4882a593Smuzhiyun 				vaddr = kmap_atomic(p);
2716*4882a593Smuzhiyun 				csum2 = INDIRECT_CALL_1(ops->update,
2717*4882a593Smuzhiyun 							csum_partial_ext,
2718*4882a593Smuzhiyun 							vaddr + p_off, p_len, 0);
2719*4882a593Smuzhiyun 				kunmap_atomic(vaddr);
2720*4882a593Smuzhiyun 				csum = INDIRECT_CALL_1(ops->combine,
2721*4882a593Smuzhiyun 						       csum_block_add_ext, csum,
2722*4882a593Smuzhiyun 						       csum2, pos, p_len);
2723*4882a593Smuzhiyun 				pos += p_len;
2724*4882a593Smuzhiyun 			}
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun 			if (!(len -= copy))
2727*4882a593Smuzhiyun 				return csum;
2728*4882a593Smuzhiyun 			offset += copy;
2729*4882a593Smuzhiyun 		}
2730*4882a593Smuzhiyun 		start = end;
2731*4882a593Smuzhiyun 	}
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	skb_walk_frags(skb, frag_iter) {
2734*4882a593Smuzhiyun 		int end;
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 		end = start + frag_iter->len;
2739*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2740*4882a593Smuzhiyun 			__wsum csum2;
2741*4882a593Smuzhiyun 			if (copy > len)
2742*4882a593Smuzhiyun 				copy = len;
2743*4882a593Smuzhiyun 			csum2 = __skb_checksum(frag_iter, offset - start,
2744*4882a593Smuzhiyun 					       copy, 0, ops);
2745*4882a593Smuzhiyun 			csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
2746*4882a593Smuzhiyun 					       csum, csum2, pos, copy);
2747*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2748*4882a593Smuzhiyun 				return csum;
2749*4882a593Smuzhiyun 			offset += copy;
2750*4882a593Smuzhiyun 			pos    += copy;
2751*4882a593Smuzhiyun 		}
2752*4882a593Smuzhiyun 		start = end;
2753*4882a593Smuzhiyun 	}
2754*4882a593Smuzhiyun 	BUG_ON(len);
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 	return csum;
2757*4882a593Smuzhiyun }
2758*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_checksum);
2759*4882a593Smuzhiyun 
skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum)2760*4882a593Smuzhiyun __wsum skb_checksum(const struct sk_buff *skb, int offset,
2761*4882a593Smuzhiyun 		    int len, __wsum csum)
2762*4882a593Smuzhiyun {
2763*4882a593Smuzhiyun 	const struct skb_checksum_ops ops = {
2764*4882a593Smuzhiyun 		.update  = csum_partial_ext,
2765*4882a593Smuzhiyun 		.combine = csum_block_add_ext,
2766*4882a593Smuzhiyun 	};
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 	return __skb_checksum(skb, offset, len, csum, &ops);
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun EXPORT_SYMBOL(skb_checksum);
2771*4882a593Smuzhiyun 
2772*4882a593Smuzhiyun /* Both of above in one bottle. */
2773*4882a593Smuzhiyun 
skb_copy_and_csum_bits(const struct sk_buff * skb,int offset,u8 * to,int len)2774*4882a593Smuzhiyun __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2775*4882a593Smuzhiyun 				    u8 *to, int len)
2776*4882a593Smuzhiyun {
2777*4882a593Smuzhiyun 	int start = skb_headlen(skb);
2778*4882a593Smuzhiyun 	int i, copy = start - offset;
2779*4882a593Smuzhiyun 	struct sk_buff *frag_iter;
2780*4882a593Smuzhiyun 	int pos = 0;
2781*4882a593Smuzhiyun 	__wsum csum = 0;
2782*4882a593Smuzhiyun 
2783*4882a593Smuzhiyun 	/* Copy header. */
2784*4882a593Smuzhiyun 	if (copy > 0) {
2785*4882a593Smuzhiyun 		if (copy > len)
2786*4882a593Smuzhiyun 			copy = len;
2787*4882a593Smuzhiyun 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2788*4882a593Smuzhiyun 						 copy);
2789*4882a593Smuzhiyun 		if ((len -= copy) == 0)
2790*4882a593Smuzhiyun 			return csum;
2791*4882a593Smuzhiyun 		offset += copy;
2792*4882a593Smuzhiyun 		to     += copy;
2793*4882a593Smuzhiyun 		pos	= copy;
2794*4882a593Smuzhiyun 	}
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2797*4882a593Smuzhiyun 		int end;
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2802*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2803*4882a593Smuzhiyun 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2804*4882a593Smuzhiyun 			u32 p_off, p_len, copied;
2805*4882a593Smuzhiyun 			struct page *p;
2806*4882a593Smuzhiyun 			__wsum csum2;
2807*4882a593Smuzhiyun 			u8 *vaddr;
2808*4882a593Smuzhiyun 
2809*4882a593Smuzhiyun 			if (copy > len)
2810*4882a593Smuzhiyun 				copy = len;
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 			skb_frag_foreach_page(frag,
2813*4882a593Smuzhiyun 					      skb_frag_off(frag) + offset - start,
2814*4882a593Smuzhiyun 					      copy, p, p_off, p_len, copied) {
2815*4882a593Smuzhiyun 				vaddr = kmap_atomic(p);
2816*4882a593Smuzhiyun 				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2817*4882a593Smuzhiyun 								  to + copied,
2818*4882a593Smuzhiyun 								  p_len);
2819*4882a593Smuzhiyun 				kunmap_atomic(vaddr);
2820*4882a593Smuzhiyun 				csum = csum_block_add(csum, csum2, pos);
2821*4882a593Smuzhiyun 				pos += p_len;
2822*4882a593Smuzhiyun 			}
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 			if (!(len -= copy))
2825*4882a593Smuzhiyun 				return csum;
2826*4882a593Smuzhiyun 			offset += copy;
2827*4882a593Smuzhiyun 			to     += copy;
2828*4882a593Smuzhiyun 		}
2829*4882a593Smuzhiyun 		start = end;
2830*4882a593Smuzhiyun 	}
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun 	skb_walk_frags(skb, frag_iter) {
2833*4882a593Smuzhiyun 		__wsum csum2;
2834*4882a593Smuzhiyun 		int end;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
2837*4882a593Smuzhiyun 
2838*4882a593Smuzhiyun 		end = start + frag_iter->len;
2839*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
2840*4882a593Smuzhiyun 			if (copy > len)
2841*4882a593Smuzhiyun 				copy = len;
2842*4882a593Smuzhiyun 			csum2 = skb_copy_and_csum_bits(frag_iter,
2843*4882a593Smuzhiyun 						       offset - start,
2844*4882a593Smuzhiyun 						       to, copy);
2845*4882a593Smuzhiyun 			csum = csum_block_add(csum, csum2, pos);
2846*4882a593Smuzhiyun 			if ((len -= copy) == 0)
2847*4882a593Smuzhiyun 				return csum;
2848*4882a593Smuzhiyun 			offset += copy;
2849*4882a593Smuzhiyun 			to     += copy;
2850*4882a593Smuzhiyun 			pos    += copy;
2851*4882a593Smuzhiyun 		}
2852*4882a593Smuzhiyun 		start = end;
2853*4882a593Smuzhiyun 	}
2854*4882a593Smuzhiyun 	BUG_ON(len);
2855*4882a593Smuzhiyun 	return csum;
2856*4882a593Smuzhiyun }
2857*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy_and_csum_bits);
2858*4882a593Smuzhiyun 
__skb_checksum_complete_head(struct sk_buff * skb,int len)2859*4882a593Smuzhiyun __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2860*4882a593Smuzhiyun {
2861*4882a593Smuzhiyun 	__sum16 sum;
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
2864*4882a593Smuzhiyun 	/* See comments in __skb_checksum_complete(). */
2865*4882a593Smuzhiyun 	if (likely(!sum)) {
2866*4882a593Smuzhiyun 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2867*4882a593Smuzhiyun 		    !skb->csum_complete_sw)
2868*4882a593Smuzhiyun 			netdev_rx_csum_fault(skb->dev, skb);
2869*4882a593Smuzhiyun 	}
2870*4882a593Smuzhiyun 	if (!skb_shared(skb))
2871*4882a593Smuzhiyun 		skb->csum_valid = !sum;
2872*4882a593Smuzhiyun 	return sum;
2873*4882a593Smuzhiyun }
2874*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_checksum_complete_head);
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun /* This function assumes skb->csum already holds pseudo header's checksum,
2877*4882a593Smuzhiyun  * which has been changed from the hardware checksum, for example, by
2878*4882a593Smuzhiyun  * __skb_checksum_validate_complete(). And, the original skb->csum must
2879*4882a593Smuzhiyun  * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
2880*4882a593Smuzhiyun  *
2881*4882a593Smuzhiyun  * It returns non-zero if the recomputed checksum is still invalid, otherwise
2882*4882a593Smuzhiyun  * zero. The new checksum is stored back into skb->csum unless the skb is
2883*4882a593Smuzhiyun  * shared.
2884*4882a593Smuzhiyun  */
__skb_checksum_complete(struct sk_buff * skb)2885*4882a593Smuzhiyun __sum16 __skb_checksum_complete(struct sk_buff *skb)
2886*4882a593Smuzhiyun {
2887*4882a593Smuzhiyun 	__wsum csum;
2888*4882a593Smuzhiyun 	__sum16 sum;
2889*4882a593Smuzhiyun 
2890*4882a593Smuzhiyun 	csum = skb_checksum(skb, 0, skb->len, 0);
2891*4882a593Smuzhiyun 
2892*4882a593Smuzhiyun 	sum = csum_fold(csum_add(skb->csum, csum));
2893*4882a593Smuzhiyun 	/* This check is inverted, because we already knew the hardware
2894*4882a593Smuzhiyun 	 * checksum is invalid before calling this function. So, if the
2895*4882a593Smuzhiyun 	 * re-computed checksum is valid instead, then we have a mismatch
2896*4882a593Smuzhiyun 	 * between the original skb->csum and skb_checksum(). This means either
2897*4882a593Smuzhiyun 	 * the original hardware checksum is incorrect or we screw up skb->csum
2898*4882a593Smuzhiyun 	 * when moving skb->data around.
2899*4882a593Smuzhiyun 	 */
2900*4882a593Smuzhiyun 	if (likely(!sum)) {
2901*4882a593Smuzhiyun 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2902*4882a593Smuzhiyun 		    !skb->csum_complete_sw)
2903*4882a593Smuzhiyun 			netdev_rx_csum_fault(skb->dev, skb);
2904*4882a593Smuzhiyun 	}
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	if (!skb_shared(skb)) {
2907*4882a593Smuzhiyun 		/* Save full packet checksum */
2908*4882a593Smuzhiyun 		skb->csum = csum;
2909*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_COMPLETE;
2910*4882a593Smuzhiyun 		skb->csum_complete_sw = 1;
2911*4882a593Smuzhiyun 		skb->csum_valid = !sum;
2912*4882a593Smuzhiyun 	}
2913*4882a593Smuzhiyun 
2914*4882a593Smuzhiyun 	return sum;
2915*4882a593Smuzhiyun }
2916*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_checksum_complete);
2917*4882a593Smuzhiyun 
warn_crc32c_csum_update(const void * buff,int len,__wsum sum)2918*4882a593Smuzhiyun static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2919*4882a593Smuzhiyun {
2920*4882a593Smuzhiyun 	net_warn_ratelimited(
2921*4882a593Smuzhiyun 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
2922*4882a593Smuzhiyun 		__func__);
2923*4882a593Smuzhiyun 	return 0;
2924*4882a593Smuzhiyun }
2925*4882a593Smuzhiyun 
warn_crc32c_csum_combine(__wsum csum,__wsum csum2,int offset,int len)2926*4882a593Smuzhiyun static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2927*4882a593Smuzhiyun 				       int offset, int len)
2928*4882a593Smuzhiyun {
2929*4882a593Smuzhiyun 	net_warn_ratelimited(
2930*4882a593Smuzhiyun 		"%s: attempt to compute crc32c without libcrc32c.ko\n",
2931*4882a593Smuzhiyun 		__func__);
2932*4882a593Smuzhiyun 	return 0;
2933*4882a593Smuzhiyun }
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun static const struct skb_checksum_ops default_crc32c_ops = {
2936*4882a593Smuzhiyun 	.update  = warn_crc32c_csum_update,
2937*4882a593Smuzhiyun 	.combine = warn_crc32c_csum_combine,
2938*4882a593Smuzhiyun };
2939*4882a593Smuzhiyun 
2940*4882a593Smuzhiyun const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2941*4882a593Smuzhiyun 	&default_crc32c_ops;
2942*4882a593Smuzhiyun EXPORT_SYMBOL(crc32c_csum_stub);
2943*4882a593Smuzhiyun 
2944*4882a593Smuzhiyun  /**
2945*4882a593Smuzhiyun  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2946*4882a593Smuzhiyun  *	@from: source buffer
2947*4882a593Smuzhiyun  *
2948*4882a593Smuzhiyun  *	Calculates the amount of linear headroom needed in the 'to' skb passed
2949*4882a593Smuzhiyun  *	into skb_zerocopy().
2950*4882a593Smuzhiyun  */
2951*4882a593Smuzhiyun unsigned int
skb_zerocopy_headlen(const struct sk_buff * from)2952*4882a593Smuzhiyun skb_zerocopy_headlen(const struct sk_buff *from)
2953*4882a593Smuzhiyun {
2954*4882a593Smuzhiyun 	unsigned int hlen = 0;
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun 	if (!from->head_frag ||
2957*4882a593Smuzhiyun 	    skb_headlen(from) < L1_CACHE_BYTES ||
2958*4882a593Smuzhiyun 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
2959*4882a593Smuzhiyun 		hlen = skb_headlen(from);
2960*4882a593Smuzhiyun 		if (!hlen)
2961*4882a593Smuzhiyun 			hlen = from->len;
2962*4882a593Smuzhiyun 	}
2963*4882a593Smuzhiyun 
2964*4882a593Smuzhiyun 	if (skb_has_frag_list(from))
2965*4882a593Smuzhiyun 		hlen = from->len;
2966*4882a593Smuzhiyun 
2967*4882a593Smuzhiyun 	return hlen;
2968*4882a593Smuzhiyun }
2969*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun /**
2972*4882a593Smuzhiyun  *	skb_zerocopy - Zero copy skb to skb
2973*4882a593Smuzhiyun  *	@to: destination buffer
2974*4882a593Smuzhiyun  *	@from: source buffer
2975*4882a593Smuzhiyun  *	@len: number of bytes to copy from source buffer
2976*4882a593Smuzhiyun  *	@hlen: size of linear headroom in destination buffer
2977*4882a593Smuzhiyun  *
2978*4882a593Smuzhiyun  *	Copies up to `len` bytes from `from` to `to` by creating references
2979*4882a593Smuzhiyun  *	to the frags in the source buffer.
2980*4882a593Smuzhiyun  *
2981*4882a593Smuzhiyun  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2982*4882a593Smuzhiyun  *	headroom in the `to` buffer.
2983*4882a593Smuzhiyun  *
2984*4882a593Smuzhiyun  *	Return value:
2985*4882a593Smuzhiyun  *	0: everything is OK
2986*4882a593Smuzhiyun  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
2987*4882a593Smuzhiyun  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2988*4882a593Smuzhiyun  */
2989*4882a593Smuzhiyun int
skb_zerocopy(struct sk_buff * to,struct sk_buff * from,int len,int hlen)2990*4882a593Smuzhiyun skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2991*4882a593Smuzhiyun {
2992*4882a593Smuzhiyun 	int i, j = 0;
2993*4882a593Smuzhiyun 	int plen = 0; /* length of skb->head fragment */
2994*4882a593Smuzhiyun 	int ret;
2995*4882a593Smuzhiyun 	struct page *page;
2996*4882a593Smuzhiyun 	unsigned int offset;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 	BUG_ON(!from->head_frag && !hlen);
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	/* dont bother with small payloads */
3001*4882a593Smuzhiyun 	if (len <= skb_tailroom(to))
3002*4882a593Smuzhiyun 		return skb_copy_bits(from, 0, skb_put(to, len), len);
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	if (hlen) {
3005*4882a593Smuzhiyun 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3006*4882a593Smuzhiyun 		if (unlikely(ret))
3007*4882a593Smuzhiyun 			return ret;
3008*4882a593Smuzhiyun 		len -= hlen;
3009*4882a593Smuzhiyun 	} else {
3010*4882a593Smuzhiyun 		plen = min_t(int, skb_headlen(from), len);
3011*4882a593Smuzhiyun 		if (plen) {
3012*4882a593Smuzhiyun 			page = virt_to_head_page(from->head);
3013*4882a593Smuzhiyun 			offset = from->data - (unsigned char *)page_address(page);
3014*4882a593Smuzhiyun 			__skb_fill_page_desc(to, 0, page, offset, plen);
3015*4882a593Smuzhiyun 			get_page(page);
3016*4882a593Smuzhiyun 			j = 1;
3017*4882a593Smuzhiyun 			len -= plen;
3018*4882a593Smuzhiyun 		}
3019*4882a593Smuzhiyun 	}
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun 	to->truesize += len + plen;
3022*4882a593Smuzhiyun 	to->len += len + plen;
3023*4882a593Smuzhiyun 	to->data_len += len + plen;
3024*4882a593Smuzhiyun 
3025*4882a593Smuzhiyun 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3026*4882a593Smuzhiyun 		skb_tx_error(from);
3027*4882a593Smuzhiyun 		return -ENOMEM;
3028*4882a593Smuzhiyun 	}
3029*4882a593Smuzhiyun 	skb_zerocopy_clone(to, from, GFP_ATOMIC);
3030*4882a593Smuzhiyun 
3031*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3032*4882a593Smuzhiyun 		int size;
3033*4882a593Smuzhiyun 
3034*4882a593Smuzhiyun 		if (!len)
3035*4882a593Smuzhiyun 			break;
3036*4882a593Smuzhiyun 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3037*4882a593Smuzhiyun 		size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3038*4882a593Smuzhiyun 					len);
3039*4882a593Smuzhiyun 		skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3040*4882a593Smuzhiyun 		len -= size;
3041*4882a593Smuzhiyun 		skb_frag_ref(to, j);
3042*4882a593Smuzhiyun 		j++;
3043*4882a593Smuzhiyun 	}
3044*4882a593Smuzhiyun 	skb_shinfo(to)->nr_frags = j;
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 	return 0;
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_zerocopy);
3049*4882a593Smuzhiyun 
skb_copy_and_csum_dev(const struct sk_buff * skb,u8 * to)3050*4882a593Smuzhiyun void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3051*4882a593Smuzhiyun {
3052*4882a593Smuzhiyun 	__wsum csum;
3053*4882a593Smuzhiyun 	long csstart;
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL)
3056*4882a593Smuzhiyun 		csstart = skb_checksum_start_offset(skb);
3057*4882a593Smuzhiyun 	else
3058*4882a593Smuzhiyun 		csstart = skb_headlen(skb);
3059*4882a593Smuzhiyun 
3060*4882a593Smuzhiyun 	BUG_ON(csstart > skb_headlen(skb));
3061*4882a593Smuzhiyun 
3062*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, to, csstart);
3063*4882a593Smuzhiyun 
3064*4882a593Smuzhiyun 	csum = 0;
3065*4882a593Smuzhiyun 	if (csstart != skb->len)
3066*4882a593Smuzhiyun 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3067*4882a593Smuzhiyun 					      skb->len - csstart);
3068*4882a593Smuzhiyun 
3069*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
3070*4882a593Smuzhiyun 		long csstuff = csstart + skb->csum_offset;
3071*4882a593Smuzhiyun 
3072*4882a593Smuzhiyun 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
3073*4882a593Smuzhiyun 	}
3074*4882a593Smuzhiyun }
3075*4882a593Smuzhiyun EXPORT_SYMBOL(skb_copy_and_csum_dev);
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun /**
3078*4882a593Smuzhiyun  *	skb_dequeue - remove from the head of the queue
3079*4882a593Smuzhiyun  *	@list: list to dequeue from
3080*4882a593Smuzhiyun  *
3081*4882a593Smuzhiyun  *	Remove the head of the list. The list lock is taken so the function
3082*4882a593Smuzhiyun  *	may be used safely with other locking list functions. The head item is
3083*4882a593Smuzhiyun  *	returned or %NULL if the list is empty.
3084*4882a593Smuzhiyun  */
3085*4882a593Smuzhiyun 
skb_dequeue(struct sk_buff_head * list)3086*4882a593Smuzhiyun struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3087*4882a593Smuzhiyun {
3088*4882a593Smuzhiyun 	unsigned long flags;
3089*4882a593Smuzhiyun 	struct sk_buff *result;
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3092*4882a593Smuzhiyun 	result = __skb_dequeue(list);
3093*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3094*4882a593Smuzhiyun 	return result;
3095*4882a593Smuzhiyun }
3096*4882a593Smuzhiyun EXPORT_SYMBOL(skb_dequeue);
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun /**
3099*4882a593Smuzhiyun  *	skb_dequeue_tail - remove from the tail of the queue
3100*4882a593Smuzhiyun  *	@list: list to dequeue from
3101*4882a593Smuzhiyun  *
3102*4882a593Smuzhiyun  *	Remove the tail of the list. The list lock is taken so the function
3103*4882a593Smuzhiyun  *	may be used safely with other locking list functions. The tail item is
3104*4882a593Smuzhiyun  *	returned or %NULL if the list is empty.
3105*4882a593Smuzhiyun  */
skb_dequeue_tail(struct sk_buff_head * list)3106*4882a593Smuzhiyun struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3107*4882a593Smuzhiyun {
3108*4882a593Smuzhiyun 	unsigned long flags;
3109*4882a593Smuzhiyun 	struct sk_buff *result;
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3112*4882a593Smuzhiyun 	result = __skb_dequeue_tail(list);
3113*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3114*4882a593Smuzhiyun 	return result;
3115*4882a593Smuzhiyun }
3116*4882a593Smuzhiyun EXPORT_SYMBOL(skb_dequeue_tail);
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun /**
3119*4882a593Smuzhiyun  *	skb_queue_purge - empty a list
3120*4882a593Smuzhiyun  *	@list: list to empty
3121*4882a593Smuzhiyun  *
3122*4882a593Smuzhiyun  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
3123*4882a593Smuzhiyun  *	the list and one reference dropped. This function takes the list
3124*4882a593Smuzhiyun  *	lock and is atomic with respect to other list locking functions.
3125*4882a593Smuzhiyun  */
skb_queue_purge(struct sk_buff_head * list)3126*4882a593Smuzhiyun void skb_queue_purge(struct sk_buff_head *list)
3127*4882a593Smuzhiyun {
3128*4882a593Smuzhiyun 	struct sk_buff *skb;
3129*4882a593Smuzhiyun 	while ((skb = skb_dequeue(list)) != NULL)
3130*4882a593Smuzhiyun 		kfree_skb(skb);
3131*4882a593Smuzhiyun }
3132*4882a593Smuzhiyun EXPORT_SYMBOL(skb_queue_purge);
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun /**
3135*4882a593Smuzhiyun  *	skb_rbtree_purge - empty a skb rbtree
3136*4882a593Smuzhiyun  *	@root: root of the rbtree to empty
3137*4882a593Smuzhiyun  *	Return value: the sum of truesizes of all purged skbs.
3138*4882a593Smuzhiyun  *
3139*4882a593Smuzhiyun  *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3140*4882a593Smuzhiyun  *	the list and one reference dropped. This function does not take
3141*4882a593Smuzhiyun  *	any lock. Synchronization should be handled by the caller (e.g., TCP
3142*4882a593Smuzhiyun  *	out-of-order queue is protected by the socket lock).
3143*4882a593Smuzhiyun  */
skb_rbtree_purge(struct rb_root * root)3144*4882a593Smuzhiyun unsigned int skb_rbtree_purge(struct rb_root *root)
3145*4882a593Smuzhiyun {
3146*4882a593Smuzhiyun 	struct rb_node *p = rb_first(root);
3147*4882a593Smuzhiyun 	unsigned int sum = 0;
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 	while (p) {
3150*4882a593Smuzhiyun 		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 		p = rb_next(p);
3153*4882a593Smuzhiyun 		rb_erase(&skb->rbnode, root);
3154*4882a593Smuzhiyun 		sum += skb->truesize;
3155*4882a593Smuzhiyun 		kfree_skb(skb);
3156*4882a593Smuzhiyun 	}
3157*4882a593Smuzhiyun 	return sum;
3158*4882a593Smuzhiyun }
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun /**
3161*4882a593Smuzhiyun  *	skb_queue_head - queue a buffer at the list head
3162*4882a593Smuzhiyun  *	@list: list to use
3163*4882a593Smuzhiyun  *	@newsk: buffer to queue
3164*4882a593Smuzhiyun  *
3165*4882a593Smuzhiyun  *	Queue a buffer at the start of the list. This function takes the
3166*4882a593Smuzhiyun  *	list lock and can be used safely with other locking &sk_buff functions
3167*4882a593Smuzhiyun  *	safely.
3168*4882a593Smuzhiyun  *
3169*4882a593Smuzhiyun  *	A buffer cannot be placed on two lists at the same time.
3170*4882a593Smuzhiyun  */
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)3171*4882a593Smuzhiyun void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3172*4882a593Smuzhiyun {
3173*4882a593Smuzhiyun 	unsigned long flags;
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3176*4882a593Smuzhiyun 	__skb_queue_head(list, newsk);
3177*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3178*4882a593Smuzhiyun }
3179*4882a593Smuzhiyun EXPORT_SYMBOL(skb_queue_head);
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun /**
3182*4882a593Smuzhiyun  *	skb_queue_tail - queue a buffer at the list tail
3183*4882a593Smuzhiyun  *	@list: list to use
3184*4882a593Smuzhiyun  *	@newsk: buffer to queue
3185*4882a593Smuzhiyun  *
3186*4882a593Smuzhiyun  *	Queue a buffer at the tail of the list. This function takes the
3187*4882a593Smuzhiyun  *	list lock and can be used safely with other locking &sk_buff functions
3188*4882a593Smuzhiyun  *	safely.
3189*4882a593Smuzhiyun  *
3190*4882a593Smuzhiyun  *	A buffer cannot be placed on two lists at the same time.
3191*4882a593Smuzhiyun  */
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)3192*4882a593Smuzhiyun void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3193*4882a593Smuzhiyun {
3194*4882a593Smuzhiyun 	unsigned long flags;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3197*4882a593Smuzhiyun 	__skb_queue_tail(list, newsk);
3198*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3199*4882a593Smuzhiyun }
3200*4882a593Smuzhiyun EXPORT_SYMBOL(skb_queue_tail);
3201*4882a593Smuzhiyun 
3202*4882a593Smuzhiyun /**
3203*4882a593Smuzhiyun  *	skb_unlink	-	remove a buffer from a list
3204*4882a593Smuzhiyun  *	@skb: buffer to remove
3205*4882a593Smuzhiyun  *	@list: list to use
3206*4882a593Smuzhiyun  *
3207*4882a593Smuzhiyun  *	Remove a packet from a list. The list locks are taken and this
3208*4882a593Smuzhiyun  *	function is atomic with respect to other list locked calls
3209*4882a593Smuzhiyun  *
3210*4882a593Smuzhiyun  *	You must know what list the SKB is on.
3211*4882a593Smuzhiyun  */
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)3212*4882a593Smuzhiyun void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3213*4882a593Smuzhiyun {
3214*4882a593Smuzhiyun 	unsigned long flags;
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3217*4882a593Smuzhiyun 	__skb_unlink(skb, list);
3218*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3219*4882a593Smuzhiyun }
3220*4882a593Smuzhiyun EXPORT_SYMBOL(skb_unlink);
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun /**
3223*4882a593Smuzhiyun  *	skb_append	-	append a buffer
3224*4882a593Smuzhiyun  *	@old: buffer to insert after
3225*4882a593Smuzhiyun  *	@newsk: buffer to insert
3226*4882a593Smuzhiyun  *	@list: list to use
3227*4882a593Smuzhiyun  *
3228*4882a593Smuzhiyun  *	Place a packet after a given packet in a list. The list locks are taken
3229*4882a593Smuzhiyun  *	and this function is atomic with respect to other list locked calls.
3230*4882a593Smuzhiyun  *	A buffer cannot be placed on two lists at the same time.
3231*4882a593Smuzhiyun  */
skb_append(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)3232*4882a593Smuzhiyun void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3233*4882a593Smuzhiyun {
3234*4882a593Smuzhiyun 	unsigned long flags;
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	spin_lock_irqsave(&list->lock, flags);
3237*4882a593Smuzhiyun 	__skb_queue_after(list, old, newsk);
3238*4882a593Smuzhiyun 	spin_unlock_irqrestore(&list->lock, flags);
3239*4882a593Smuzhiyun }
3240*4882a593Smuzhiyun EXPORT_SYMBOL(skb_append);
3241*4882a593Smuzhiyun 
skb_split_inside_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,const int pos)3242*4882a593Smuzhiyun static inline void skb_split_inside_header(struct sk_buff *skb,
3243*4882a593Smuzhiyun 					   struct sk_buff* skb1,
3244*4882a593Smuzhiyun 					   const u32 len, const int pos)
3245*4882a593Smuzhiyun {
3246*4882a593Smuzhiyun 	int i;
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3249*4882a593Smuzhiyun 					 pos - len);
3250*4882a593Smuzhiyun 	/* And move data appendix as is. */
3251*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3252*4882a593Smuzhiyun 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3253*4882a593Smuzhiyun 
3254*4882a593Smuzhiyun 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3255*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags  = 0;
3256*4882a593Smuzhiyun 	skb1->data_len		   = skb->data_len;
3257*4882a593Smuzhiyun 	skb1->len		   += skb1->data_len;
3258*4882a593Smuzhiyun 	skb->data_len		   = 0;
3259*4882a593Smuzhiyun 	skb->len		   = len;
3260*4882a593Smuzhiyun 	skb_set_tail_pointer(skb, len);
3261*4882a593Smuzhiyun }
3262*4882a593Smuzhiyun 
skb_split_no_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,int pos)3263*4882a593Smuzhiyun static inline void skb_split_no_header(struct sk_buff *skb,
3264*4882a593Smuzhiyun 				       struct sk_buff* skb1,
3265*4882a593Smuzhiyun 				       const u32 len, int pos)
3266*4882a593Smuzhiyun {
3267*4882a593Smuzhiyun 	int i, k = 0;
3268*4882a593Smuzhiyun 	const int nfrags = skb_shinfo(skb)->nr_frags;
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags = 0;
3271*4882a593Smuzhiyun 	skb1->len		  = skb1->data_len = skb->len - len;
3272*4882a593Smuzhiyun 	skb->len		  = len;
3273*4882a593Smuzhiyun 	skb->data_len		  = len - pos;
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun 	for (i = 0; i < nfrags; i++) {
3276*4882a593Smuzhiyun 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun 		if (pos + size > len) {
3279*4882a593Smuzhiyun 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3280*4882a593Smuzhiyun 
3281*4882a593Smuzhiyun 			if (pos < len) {
3282*4882a593Smuzhiyun 				/* Split frag.
3283*4882a593Smuzhiyun 				 * We have two variants in this case:
3284*4882a593Smuzhiyun 				 * 1. Move all the frag to the second
3285*4882a593Smuzhiyun 				 *    part, if it is possible. F.e.
3286*4882a593Smuzhiyun 				 *    this approach is mandatory for TUX,
3287*4882a593Smuzhiyun 				 *    where splitting is expensive.
3288*4882a593Smuzhiyun 				 * 2. Split is accurately. We make this.
3289*4882a593Smuzhiyun 				 */
3290*4882a593Smuzhiyun 				skb_frag_ref(skb, i);
3291*4882a593Smuzhiyun 				skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3292*4882a593Smuzhiyun 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3293*4882a593Smuzhiyun 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3294*4882a593Smuzhiyun 				skb_shinfo(skb)->nr_frags++;
3295*4882a593Smuzhiyun 			}
3296*4882a593Smuzhiyun 			k++;
3297*4882a593Smuzhiyun 		} else
3298*4882a593Smuzhiyun 			skb_shinfo(skb)->nr_frags++;
3299*4882a593Smuzhiyun 		pos += size;
3300*4882a593Smuzhiyun 	}
3301*4882a593Smuzhiyun 	skb_shinfo(skb1)->nr_frags = k;
3302*4882a593Smuzhiyun }
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun /**
3305*4882a593Smuzhiyun  * skb_split - Split fragmented skb to two parts at length len.
3306*4882a593Smuzhiyun  * @skb: the buffer to split
3307*4882a593Smuzhiyun  * @skb1: the buffer to receive the second part
3308*4882a593Smuzhiyun  * @len: new length for skb
3309*4882a593Smuzhiyun  */
skb_split(struct sk_buff * skb,struct sk_buff * skb1,const u32 len)3310*4882a593Smuzhiyun void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3311*4882a593Smuzhiyun {
3312*4882a593Smuzhiyun 	int pos = skb_headlen(skb);
3313*4882a593Smuzhiyun 
3314*4882a593Smuzhiyun 	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3315*4882a593Smuzhiyun 				      SKBTX_SHARED_FRAG;
3316*4882a593Smuzhiyun 	skb_zerocopy_clone(skb1, skb, 0);
3317*4882a593Smuzhiyun 	if (len < pos)	/* Split line is inside header. */
3318*4882a593Smuzhiyun 		skb_split_inside_header(skb, skb1, len, pos);
3319*4882a593Smuzhiyun 	else		/* Second chunk has no header, nothing to copy. */
3320*4882a593Smuzhiyun 		skb_split_no_header(skb, skb1, len, pos);
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun EXPORT_SYMBOL(skb_split);
3323*4882a593Smuzhiyun 
3324*4882a593Smuzhiyun /* Shifting from/to a cloned skb is a no-go.
3325*4882a593Smuzhiyun  *
3326*4882a593Smuzhiyun  * Caller cannot keep skb_shinfo related pointers past calling here!
3327*4882a593Smuzhiyun  */
skb_prepare_for_shift(struct sk_buff * skb)3328*4882a593Smuzhiyun static int skb_prepare_for_shift(struct sk_buff *skb)
3329*4882a593Smuzhiyun {
3330*4882a593Smuzhiyun 	return skb_unclone_keeptruesize(skb, GFP_ATOMIC);
3331*4882a593Smuzhiyun }
3332*4882a593Smuzhiyun 
3333*4882a593Smuzhiyun /**
3334*4882a593Smuzhiyun  * skb_shift - Shifts paged data partially from skb to another
3335*4882a593Smuzhiyun  * @tgt: buffer into which tail data gets added
3336*4882a593Smuzhiyun  * @skb: buffer from which the paged data comes from
3337*4882a593Smuzhiyun  * @shiftlen: shift up to this many bytes
3338*4882a593Smuzhiyun  *
3339*4882a593Smuzhiyun  * Attempts to shift up to shiftlen worth of bytes, which may be less than
3340*4882a593Smuzhiyun  * the length of the skb, from skb to tgt. Returns number bytes shifted.
3341*4882a593Smuzhiyun  * It's up to caller to free skb if everything was shifted.
3342*4882a593Smuzhiyun  *
3343*4882a593Smuzhiyun  * If @tgt runs out of frags, the whole operation is aborted.
3344*4882a593Smuzhiyun  *
3345*4882a593Smuzhiyun  * Skb cannot include anything else but paged data while tgt is allowed
3346*4882a593Smuzhiyun  * to have non-paged data as well.
3347*4882a593Smuzhiyun  *
3348*4882a593Smuzhiyun  * TODO: full sized shift could be optimized but that would need
3349*4882a593Smuzhiyun  * specialized skb free'er to handle frags without up-to-date nr_frags.
3350*4882a593Smuzhiyun  */
skb_shift(struct sk_buff * tgt,struct sk_buff * skb,int shiftlen)3351*4882a593Smuzhiyun int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3352*4882a593Smuzhiyun {
3353*4882a593Smuzhiyun 	int from, to, merge, todo;
3354*4882a593Smuzhiyun 	skb_frag_t *fragfrom, *fragto;
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun 	BUG_ON(shiftlen > skb->len);
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun 	if (skb_headlen(skb))
3359*4882a593Smuzhiyun 		return 0;
3360*4882a593Smuzhiyun 	if (skb_zcopy(tgt) || skb_zcopy(skb))
3361*4882a593Smuzhiyun 		return 0;
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	todo = shiftlen;
3364*4882a593Smuzhiyun 	from = 0;
3365*4882a593Smuzhiyun 	to = skb_shinfo(tgt)->nr_frags;
3366*4882a593Smuzhiyun 	fragfrom = &skb_shinfo(skb)->frags[from];
3367*4882a593Smuzhiyun 
3368*4882a593Smuzhiyun 	/* Actual merge is delayed until the point when we know we can
3369*4882a593Smuzhiyun 	 * commit all, so that we don't have to undo partial changes
3370*4882a593Smuzhiyun 	 */
3371*4882a593Smuzhiyun 	if (!to ||
3372*4882a593Smuzhiyun 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3373*4882a593Smuzhiyun 			      skb_frag_off(fragfrom))) {
3374*4882a593Smuzhiyun 		merge = -1;
3375*4882a593Smuzhiyun 	} else {
3376*4882a593Smuzhiyun 		merge = to - 1;
3377*4882a593Smuzhiyun 
3378*4882a593Smuzhiyun 		todo -= skb_frag_size(fragfrom);
3379*4882a593Smuzhiyun 		if (todo < 0) {
3380*4882a593Smuzhiyun 			if (skb_prepare_for_shift(skb) ||
3381*4882a593Smuzhiyun 			    skb_prepare_for_shift(tgt))
3382*4882a593Smuzhiyun 				return 0;
3383*4882a593Smuzhiyun 
3384*4882a593Smuzhiyun 			/* All previous frag pointers might be stale! */
3385*4882a593Smuzhiyun 			fragfrom = &skb_shinfo(skb)->frags[from];
3386*4882a593Smuzhiyun 			fragto = &skb_shinfo(tgt)->frags[merge];
3387*4882a593Smuzhiyun 
3388*4882a593Smuzhiyun 			skb_frag_size_add(fragto, shiftlen);
3389*4882a593Smuzhiyun 			skb_frag_size_sub(fragfrom, shiftlen);
3390*4882a593Smuzhiyun 			skb_frag_off_add(fragfrom, shiftlen);
3391*4882a593Smuzhiyun 
3392*4882a593Smuzhiyun 			goto onlymerged;
3393*4882a593Smuzhiyun 		}
3394*4882a593Smuzhiyun 
3395*4882a593Smuzhiyun 		from++;
3396*4882a593Smuzhiyun 	}
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun 	/* Skip full, not-fitting skb to avoid expensive operations */
3399*4882a593Smuzhiyun 	if ((shiftlen == skb->len) &&
3400*4882a593Smuzhiyun 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3401*4882a593Smuzhiyun 		return 0;
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3404*4882a593Smuzhiyun 		return 0;
3405*4882a593Smuzhiyun 
3406*4882a593Smuzhiyun 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3407*4882a593Smuzhiyun 		if (to == MAX_SKB_FRAGS)
3408*4882a593Smuzhiyun 			return 0;
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun 		fragfrom = &skb_shinfo(skb)->frags[from];
3411*4882a593Smuzhiyun 		fragto = &skb_shinfo(tgt)->frags[to];
3412*4882a593Smuzhiyun 
3413*4882a593Smuzhiyun 		if (todo >= skb_frag_size(fragfrom)) {
3414*4882a593Smuzhiyun 			*fragto = *fragfrom;
3415*4882a593Smuzhiyun 			todo -= skb_frag_size(fragfrom);
3416*4882a593Smuzhiyun 			from++;
3417*4882a593Smuzhiyun 			to++;
3418*4882a593Smuzhiyun 
3419*4882a593Smuzhiyun 		} else {
3420*4882a593Smuzhiyun 			__skb_frag_ref(fragfrom);
3421*4882a593Smuzhiyun 			skb_frag_page_copy(fragto, fragfrom);
3422*4882a593Smuzhiyun 			skb_frag_off_copy(fragto, fragfrom);
3423*4882a593Smuzhiyun 			skb_frag_size_set(fragto, todo);
3424*4882a593Smuzhiyun 
3425*4882a593Smuzhiyun 			skb_frag_off_add(fragfrom, todo);
3426*4882a593Smuzhiyun 			skb_frag_size_sub(fragfrom, todo);
3427*4882a593Smuzhiyun 			todo = 0;
3428*4882a593Smuzhiyun 
3429*4882a593Smuzhiyun 			to++;
3430*4882a593Smuzhiyun 			break;
3431*4882a593Smuzhiyun 		}
3432*4882a593Smuzhiyun 	}
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 	/* Ready to "commit" this state change to tgt */
3435*4882a593Smuzhiyun 	skb_shinfo(tgt)->nr_frags = to;
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 	if (merge >= 0) {
3438*4882a593Smuzhiyun 		fragfrom = &skb_shinfo(skb)->frags[0];
3439*4882a593Smuzhiyun 		fragto = &skb_shinfo(tgt)->frags[merge];
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3442*4882a593Smuzhiyun 		__skb_frag_unref(fragfrom);
3443*4882a593Smuzhiyun 	}
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	/* Reposition in the original skb */
3446*4882a593Smuzhiyun 	to = 0;
3447*4882a593Smuzhiyun 	while (from < skb_shinfo(skb)->nr_frags)
3448*4882a593Smuzhiyun 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3449*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags = to;
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3452*4882a593Smuzhiyun 
3453*4882a593Smuzhiyun onlymerged:
3454*4882a593Smuzhiyun 	/* Most likely the tgt won't ever need its checksum anymore, skb on
3455*4882a593Smuzhiyun 	 * the other hand might need it if it needs to be resent
3456*4882a593Smuzhiyun 	 */
3457*4882a593Smuzhiyun 	tgt->ip_summed = CHECKSUM_PARTIAL;
3458*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_PARTIAL;
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 	/* Yak, is it really working this way? Some helper please? */
3461*4882a593Smuzhiyun 	skb->len -= shiftlen;
3462*4882a593Smuzhiyun 	skb->data_len -= shiftlen;
3463*4882a593Smuzhiyun 	skb->truesize -= shiftlen;
3464*4882a593Smuzhiyun 	tgt->len += shiftlen;
3465*4882a593Smuzhiyun 	tgt->data_len += shiftlen;
3466*4882a593Smuzhiyun 	tgt->truesize += shiftlen;
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 	return shiftlen;
3469*4882a593Smuzhiyun }
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun /**
3472*4882a593Smuzhiyun  * skb_prepare_seq_read - Prepare a sequential read of skb data
3473*4882a593Smuzhiyun  * @skb: the buffer to read
3474*4882a593Smuzhiyun  * @from: lower offset of data to be read
3475*4882a593Smuzhiyun  * @to: upper offset of data to be read
3476*4882a593Smuzhiyun  * @st: state variable
3477*4882a593Smuzhiyun  *
3478*4882a593Smuzhiyun  * Initializes the specified state variable. Must be called before
3479*4882a593Smuzhiyun  * invoking skb_seq_read() for the first time.
3480*4882a593Smuzhiyun  */
skb_prepare_seq_read(struct sk_buff * skb,unsigned int from,unsigned int to,struct skb_seq_state * st)3481*4882a593Smuzhiyun void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3482*4882a593Smuzhiyun 			  unsigned int to, struct skb_seq_state *st)
3483*4882a593Smuzhiyun {
3484*4882a593Smuzhiyun 	st->lower_offset = from;
3485*4882a593Smuzhiyun 	st->upper_offset = to;
3486*4882a593Smuzhiyun 	st->root_skb = st->cur_skb = skb;
3487*4882a593Smuzhiyun 	st->frag_idx = st->stepped_offset = 0;
3488*4882a593Smuzhiyun 	st->frag_data = NULL;
3489*4882a593Smuzhiyun }
3490*4882a593Smuzhiyun EXPORT_SYMBOL(skb_prepare_seq_read);
3491*4882a593Smuzhiyun 
3492*4882a593Smuzhiyun /**
3493*4882a593Smuzhiyun  * skb_seq_read - Sequentially read skb data
3494*4882a593Smuzhiyun  * @consumed: number of bytes consumed by the caller so far
3495*4882a593Smuzhiyun  * @data: destination pointer for data to be returned
3496*4882a593Smuzhiyun  * @st: state variable
3497*4882a593Smuzhiyun  *
3498*4882a593Smuzhiyun  * Reads a block of skb data at @consumed relative to the
3499*4882a593Smuzhiyun  * lower offset specified to skb_prepare_seq_read(). Assigns
3500*4882a593Smuzhiyun  * the head of the data block to @data and returns the length
3501*4882a593Smuzhiyun  * of the block or 0 if the end of the skb data or the upper
3502*4882a593Smuzhiyun  * offset has been reached.
3503*4882a593Smuzhiyun  *
3504*4882a593Smuzhiyun  * The caller is not required to consume all of the data
3505*4882a593Smuzhiyun  * returned, i.e. @consumed is typically set to the number
3506*4882a593Smuzhiyun  * of bytes already consumed and the next call to
3507*4882a593Smuzhiyun  * skb_seq_read() will return the remaining part of the block.
3508*4882a593Smuzhiyun  *
3509*4882a593Smuzhiyun  * Note 1: The size of each block of data returned can be arbitrary,
3510*4882a593Smuzhiyun  *       this limitation is the cost for zerocopy sequential
3511*4882a593Smuzhiyun  *       reads of potentially non linear data.
3512*4882a593Smuzhiyun  *
3513*4882a593Smuzhiyun  * Note 2: Fragment lists within fragments are not implemented
3514*4882a593Smuzhiyun  *       at the moment, state->root_skb could be replaced with
3515*4882a593Smuzhiyun  *       a stack for this purpose.
3516*4882a593Smuzhiyun  */
skb_seq_read(unsigned int consumed,const u8 ** data,struct skb_seq_state * st)3517*4882a593Smuzhiyun unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3518*4882a593Smuzhiyun 			  struct skb_seq_state *st)
3519*4882a593Smuzhiyun {
3520*4882a593Smuzhiyun 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3521*4882a593Smuzhiyun 	skb_frag_t *frag;
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	if (unlikely(abs_offset >= st->upper_offset)) {
3524*4882a593Smuzhiyun 		if (st->frag_data) {
3525*4882a593Smuzhiyun 			kunmap_atomic(st->frag_data);
3526*4882a593Smuzhiyun 			st->frag_data = NULL;
3527*4882a593Smuzhiyun 		}
3528*4882a593Smuzhiyun 		return 0;
3529*4882a593Smuzhiyun 	}
3530*4882a593Smuzhiyun 
3531*4882a593Smuzhiyun next_skb:
3532*4882a593Smuzhiyun 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3533*4882a593Smuzhiyun 
3534*4882a593Smuzhiyun 	if (abs_offset < block_limit && !st->frag_data) {
3535*4882a593Smuzhiyun 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3536*4882a593Smuzhiyun 		return block_limit - abs_offset;
3537*4882a593Smuzhiyun 	}
3538*4882a593Smuzhiyun 
3539*4882a593Smuzhiyun 	if (st->frag_idx == 0 && !st->frag_data)
3540*4882a593Smuzhiyun 		st->stepped_offset += skb_headlen(st->cur_skb);
3541*4882a593Smuzhiyun 
3542*4882a593Smuzhiyun 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3543*4882a593Smuzhiyun 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3544*4882a593Smuzhiyun 		block_limit = skb_frag_size(frag) + st->stepped_offset;
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 		if (abs_offset < block_limit) {
3547*4882a593Smuzhiyun 			if (!st->frag_data)
3548*4882a593Smuzhiyun 				st->frag_data = kmap_atomic(skb_frag_page(frag));
3549*4882a593Smuzhiyun 
3550*4882a593Smuzhiyun 			*data = (u8 *) st->frag_data + skb_frag_off(frag) +
3551*4882a593Smuzhiyun 				(abs_offset - st->stepped_offset);
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun 			return block_limit - abs_offset;
3554*4882a593Smuzhiyun 		}
3555*4882a593Smuzhiyun 
3556*4882a593Smuzhiyun 		if (st->frag_data) {
3557*4882a593Smuzhiyun 			kunmap_atomic(st->frag_data);
3558*4882a593Smuzhiyun 			st->frag_data = NULL;
3559*4882a593Smuzhiyun 		}
3560*4882a593Smuzhiyun 
3561*4882a593Smuzhiyun 		st->frag_idx++;
3562*4882a593Smuzhiyun 		st->stepped_offset += skb_frag_size(frag);
3563*4882a593Smuzhiyun 	}
3564*4882a593Smuzhiyun 
3565*4882a593Smuzhiyun 	if (st->frag_data) {
3566*4882a593Smuzhiyun 		kunmap_atomic(st->frag_data);
3567*4882a593Smuzhiyun 		st->frag_data = NULL;
3568*4882a593Smuzhiyun 	}
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3571*4882a593Smuzhiyun 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3572*4882a593Smuzhiyun 		st->frag_idx = 0;
3573*4882a593Smuzhiyun 		goto next_skb;
3574*4882a593Smuzhiyun 	} else if (st->cur_skb->next) {
3575*4882a593Smuzhiyun 		st->cur_skb = st->cur_skb->next;
3576*4882a593Smuzhiyun 		st->frag_idx = 0;
3577*4882a593Smuzhiyun 		goto next_skb;
3578*4882a593Smuzhiyun 	}
3579*4882a593Smuzhiyun 
3580*4882a593Smuzhiyun 	return 0;
3581*4882a593Smuzhiyun }
3582*4882a593Smuzhiyun EXPORT_SYMBOL(skb_seq_read);
3583*4882a593Smuzhiyun 
3584*4882a593Smuzhiyun /**
3585*4882a593Smuzhiyun  * skb_abort_seq_read - Abort a sequential read of skb data
3586*4882a593Smuzhiyun  * @st: state variable
3587*4882a593Smuzhiyun  *
3588*4882a593Smuzhiyun  * Must be called if skb_seq_read() was not called until it
3589*4882a593Smuzhiyun  * returned 0.
3590*4882a593Smuzhiyun  */
skb_abort_seq_read(struct skb_seq_state * st)3591*4882a593Smuzhiyun void skb_abort_seq_read(struct skb_seq_state *st)
3592*4882a593Smuzhiyun {
3593*4882a593Smuzhiyun 	if (st->frag_data)
3594*4882a593Smuzhiyun 		kunmap_atomic(st->frag_data);
3595*4882a593Smuzhiyun }
3596*4882a593Smuzhiyun EXPORT_SYMBOL(skb_abort_seq_read);
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
3599*4882a593Smuzhiyun 
skb_ts_get_next_block(unsigned int offset,const u8 ** text,struct ts_config * conf,struct ts_state * state)3600*4882a593Smuzhiyun static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3601*4882a593Smuzhiyun 					  struct ts_config *conf,
3602*4882a593Smuzhiyun 					  struct ts_state *state)
3603*4882a593Smuzhiyun {
3604*4882a593Smuzhiyun 	return skb_seq_read(offset, text, TS_SKB_CB(state));
3605*4882a593Smuzhiyun }
3606*4882a593Smuzhiyun 
skb_ts_finish(struct ts_config * conf,struct ts_state * state)3607*4882a593Smuzhiyun static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3608*4882a593Smuzhiyun {
3609*4882a593Smuzhiyun 	skb_abort_seq_read(TS_SKB_CB(state));
3610*4882a593Smuzhiyun }
3611*4882a593Smuzhiyun 
3612*4882a593Smuzhiyun /**
3613*4882a593Smuzhiyun  * skb_find_text - Find a text pattern in skb data
3614*4882a593Smuzhiyun  * @skb: the buffer to look in
3615*4882a593Smuzhiyun  * @from: search offset
3616*4882a593Smuzhiyun  * @to: search limit
3617*4882a593Smuzhiyun  * @config: textsearch configuration
3618*4882a593Smuzhiyun  *
3619*4882a593Smuzhiyun  * Finds a pattern in the skb data according to the specified
3620*4882a593Smuzhiyun  * textsearch configuration. Use textsearch_next() to retrieve
3621*4882a593Smuzhiyun  * subsequent occurrences of the pattern. Returns the offset
3622*4882a593Smuzhiyun  * to the first occurrence or UINT_MAX if no match was found.
3623*4882a593Smuzhiyun  */
skb_find_text(struct sk_buff * skb,unsigned int from,unsigned int to,struct ts_config * config)3624*4882a593Smuzhiyun unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3625*4882a593Smuzhiyun 			   unsigned int to, struct ts_config *config)
3626*4882a593Smuzhiyun {
3627*4882a593Smuzhiyun 	struct ts_state state;
3628*4882a593Smuzhiyun 	unsigned int ret;
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 	config->get_next_block = skb_ts_get_next_block;
3631*4882a593Smuzhiyun 	config->finish = skb_ts_finish;
3632*4882a593Smuzhiyun 
3633*4882a593Smuzhiyun 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3634*4882a593Smuzhiyun 
3635*4882a593Smuzhiyun 	ret = textsearch_find(config, &state);
3636*4882a593Smuzhiyun 	return (ret <= to - from ? ret : UINT_MAX);
3637*4882a593Smuzhiyun }
3638*4882a593Smuzhiyun EXPORT_SYMBOL(skb_find_text);
3639*4882a593Smuzhiyun 
skb_append_pagefrags(struct sk_buff * skb,struct page * page,int offset,size_t size)3640*4882a593Smuzhiyun int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3641*4882a593Smuzhiyun 			 int offset, size_t size)
3642*4882a593Smuzhiyun {
3643*4882a593Smuzhiyun 	int i = skb_shinfo(skb)->nr_frags;
3644*4882a593Smuzhiyun 
3645*4882a593Smuzhiyun 	if (skb_can_coalesce(skb, i, page, offset)) {
3646*4882a593Smuzhiyun 		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3647*4882a593Smuzhiyun 	} else if (i < MAX_SKB_FRAGS) {
3648*4882a593Smuzhiyun 		get_page(page);
3649*4882a593Smuzhiyun 		skb_fill_page_desc(skb, i, page, offset, size);
3650*4882a593Smuzhiyun 	} else {
3651*4882a593Smuzhiyun 		return -EMSGSIZE;
3652*4882a593Smuzhiyun 	}
3653*4882a593Smuzhiyun 
3654*4882a593Smuzhiyun 	return 0;
3655*4882a593Smuzhiyun }
3656*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun /**
3659*4882a593Smuzhiyun  *	skb_pull_rcsum - pull skb and update receive checksum
3660*4882a593Smuzhiyun  *	@skb: buffer to update
3661*4882a593Smuzhiyun  *	@len: length of data pulled
3662*4882a593Smuzhiyun  *
3663*4882a593Smuzhiyun  *	This function performs an skb_pull on the packet and updates
3664*4882a593Smuzhiyun  *	the CHECKSUM_COMPLETE checksum.  It should be used on
3665*4882a593Smuzhiyun  *	receive path processing instead of skb_pull unless you know
3666*4882a593Smuzhiyun  *	that the checksum difference is zero (e.g., a valid IP header)
3667*4882a593Smuzhiyun  *	or you are setting ip_summed to CHECKSUM_NONE.
3668*4882a593Smuzhiyun  */
skb_pull_rcsum(struct sk_buff * skb,unsigned int len)3669*4882a593Smuzhiyun void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3670*4882a593Smuzhiyun {
3671*4882a593Smuzhiyun 	unsigned char *data = skb->data;
3672*4882a593Smuzhiyun 
3673*4882a593Smuzhiyun 	BUG_ON(len > skb->len);
3674*4882a593Smuzhiyun 	__skb_pull(skb, len);
3675*4882a593Smuzhiyun 	skb_postpull_rcsum(skb, data, len);
3676*4882a593Smuzhiyun 	return skb->data;
3677*4882a593Smuzhiyun }
3678*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3679*4882a593Smuzhiyun 
skb_head_frag_to_page_desc(struct sk_buff * frag_skb)3680*4882a593Smuzhiyun static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3681*4882a593Smuzhiyun {
3682*4882a593Smuzhiyun 	skb_frag_t head_frag;
3683*4882a593Smuzhiyun 	struct page *page;
3684*4882a593Smuzhiyun 
3685*4882a593Smuzhiyun 	page = virt_to_head_page(frag_skb->head);
3686*4882a593Smuzhiyun 	__skb_frag_set_page(&head_frag, page);
3687*4882a593Smuzhiyun 	skb_frag_off_set(&head_frag, frag_skb->data -
3688*4882a593Smuzhiyun 			 (unsigned char *)page_address(page));
3689*4882a593Smuzhiyun 	skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
3690*4882a593Smuzhiyun 	return head_frag;
3691*4882a593Smuzhiyun }
3692*4882a593Smuzhiyun 
skb_segment_list(struct sk_buff * skb,netdev_features_t features,unsigned int offset)3693*4882a593Smuzhiyun struct sk_buff *skb_segment_list(struct sk_buff *skb,
3694*4882a593Smuzhiyun 				 netdev_features_t features,
3695*4882a593Smuzhiyun 				 unsigned int offset)
3696*4882a593Smuzhiyun {
3697*4882a593Smuzhiyun 	struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
3698*4882a593Smuzhiyun 	unsigned int tnl_hlen = skb_tnl_header_len(skb);
3699*4882a593Smuzhiyun 	unsigned int delta_truesize = 0;
3700*4882a593Smuzhiyun 	unsigned int delta_len = 0;
3701*4882a593Smuzhiyun 	struct sk_buff *tail = NULL;
3702*4882a593Smuzhiyun 	struct sk_buff *nskb, *tmp;
3703*4882a593Smuzhiyun 	int len_diff, err;
3704*4882a593Smuzhiyun 
3705*4882a593Smuzhiyun 	skb_push(skb, -skb_network_offset(skb) + offset);
3706*4882a593Smuzhiyun 
3707*4882a593Smuzhiyun 	skb_shinfo(skb)->frag_list = NULL;
3708*4882a593Smuzhiyun 
3709*4882a593Smuzhiyun 	do {
3710*4882a593Smuzhiyun 		nskb = list_skb;
3711*4882a593Smuzhiyun 		list_skb = list_skb->next;
3712*4882a593Smuzhiyun 
3713*4882a593Smuzhiyun 		err = 0;
3714*4882a593Smuzhiyun 		delta_truesize += nskb->truesize;
3715*4882a593Smuzhiyun 		if (skb_shared(nskb)) {
3716*4882a593Smuzhiyun 			tmp = skb_clone(nskb, GFP_ATOMIC);
3717*4882a593Smuzhiyun 			if (tmp) {
3718*4882a593Smuzhiyun 				consume_skb(nskb);
3719*4882a593Smuzhiyun 				nskb = tmp;
3720*4882a593Smuzhiyun 				err = skb_unclone(nskb, GFP_ATOMIC);
3721*4882a593Smuzhiyun 			} else {
3722*4882a593Smuzhiyun 				err = -ENOMEM;
3723*4882a593Smuzhiyun 			}
3724*4882a593Smuzhiyun 		}
3725*4882a593Smuzhiyun 
3726*4882a593Smuzhiyun 		if (!tail)
3727*4882a593Smuzhiyun 			skb->next = nskb;
3728*4882a593Smuzhiyun 		else
3729*4882a593Smuzhiyun 			tail->next = nskb;
3730*4882a593Smuzhiyun 
3731*4882a593Smuzhiyun 		if (unlikely(err)) {
3732*4882a593Smuzhiyun 			nskb->next = list_skb;
3733*4882a593Smuzhiyun 			goto err_linearize;
3734*4882a593Smuzhiyun 		}
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 		tail = nskb;
3737*4882a593Smuzhiyun 
3738*4882a593Smuzhiyun 		delta_len += nskb->len;
3739*4882a593Smuzhiyun 
3740*4882a593Smuzhiyun 		skb_push(nskb, -skb_network_offset(nskb) + offset);
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 		skb_release_head_state(nskb);
3743*4882a593Smuzhiyun 		len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb);
3744*4882a593Smuzhiyun 		 __copy_skb_header(nskb, skb);
3745*4882a593Smuzhiyun 
3746*4882a593Smuzhiyun 		skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
3747*4882a593Smuzhiyun 		nskb->transport_header += len_diff;
3748*4882a593Smuzhiyun 		skb_copy_from_linear_data_offset(skb, -tnl_hlen,
3749*4882a593Smuzhiyun 						 nskb->data - tnl_hlen,
3750*4882a593Smuzhiyun 						 offset + tnl_hlen);
3751*4882a593Smuzhiyun 
3752*4882a593Smuzhiyun 		if (skb_needs_linearize(nskb, features) &&
3753*4882a593Smuzhiyun 		    __skb_linearize(nskb))
3754*4882a593Smuzhiyun 			goto err_linearize;
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	} while (list_skb);
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun 	skb->truesize = skb->truesize - delta_truesize;
3759*4882a593Smuzhiyun 	skb->data_len = skb->data_len - delta_len;
3760*4882a593Smuzhiyun 	skb->len = skb->len - delta_len;
3761*4882a593Smuzhiyun 
3762*4882a593Smuzhiyun 	skb_gso_reset(skb);
3763*4882a593Smuzhiyun 
3764*4882a593Smuzhiyun 	skb->prev = tail;
3765*4882a593Smuzhiyun 
3766*4882a593Smuzhiyun 	if (skb_needs_linearize(skb, features) &&
3767*4882a593Smuzhiyun 	    __skb_linearize(skb))
3768*4882a593Smuzhiyun 		goto err_linearize;
3769*4882a593Smuzhiyun 
3770*4882a593Smuzhiyun 	skb_get(skb);
3771*4882a593Smuzhiyun 
3772*4882a593Smuzhiyun 	return skb;
3773*4882a593Smuzhiyun 
3774*4882a593Smuzhiyun err_linearize:
3775*4882a593Smuzhiyun 	kfree_skb_list(skb->next);
3776*4882a593Smuzhiyun 	skb->next = NULL;
3777*4882a593Smuzhiyun 	return ERR_PTR(-ENOMEM);
3778*4882a593Smuzhiyun }
3779*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_segment_list);
3780*4882a593Smuzhiyun 
skb_gro_receive_list(struct sk_buff * p,struct sk_buff * skb)3781*4882a593Smuzhiyun int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
3782*4882a593Smuzhiyun {
3783*4882a593Smuzhiyun 	if (unlikely(p->len + skb->len >= 65536))
3784*4882a593Smuzhiyun 		return -E2BIG;
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun 	if (NAPI_GRO_CB(p)->last == p)
3787*4882a593Smuzhiyun 		skb_shinfo(p)->frag_list = skb;
3788*4882a593Smuzhiyun 	else
3789*4882a593Smuzhiyun 		NAPI_GRO_CB(p)->last->next = skb;
3790*4882a593Smuzhiyun 
3791*4882a593Smuzhiyun 	skb_pull(skb, skb_gro_offset(skb));
3792*4882a593Smuzhiyun 
3793*4882a593Smuzhiyun 	NAPI_GRO_CB(p)->last = skb;
3794*4882a593Smuzhiyun 	NAPI_GRO_CB(p)->count++;
3795*4882a593Smuzhiyun 	p->data_len += skb->len;
3796*4882a593Smuzhiyun 	p->truesize += skb->truesize;
3797*4882a593Smuzhiyun 	p->len += skb->len;
3798*4882a593Smuzhiyun 
3799*4882a593Smuzhiyun 	NAPI_GRO_CB(skb)->same_flow = 1;
3800*4882a593Smuzhiyun 
3801*4882a593Smuzhiyun 	return 0;
3802*4882a593Smuzhiyun }
3803*4882a593Smuzhiyun 
3804*4882a593Smuzhiyun /**
3805*4882a593Smuzhiyun  *	skb_segment - Perform protocol segmentation on skb.
3806*4882a593Smuzhiyun  *	@head_skb: buffer to segment
3807*4882a593Smuzhiyun  *	@features: features for the output path (see dev->features)
3808*4882a593Smuzhiyun  *
3809*4882a593Smuzhiyun  *	This function performs segmentation on the given skb.  It returns
3810*4882a593Smuzhiyun  *	a pointer to the first in a list of new skbs for the segments.
3811*4882a593Smuzhiyun  *	In case of error it returns ERR_PTR(err).
3812*4882a593Smuzhiyun  */
skb_segment(struct sk_buff * head_skb,netdev_features_t features)3813*4882a593Smuzhiyun struct sk_buff *skb_segment(struct sk_buff *head_skb,
3814*4882a593Smuzhiyun 			    netdev_features_t features)
3815*4882a593Smuzhiyun {
3816*4882a593Smuzhiyun 	struct sk_buff *segs = NULL;
3817*4882a593Smuzhiyun 	struct sk_buff *tail = NULL;
3818*4882a593Smuzhiyun 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3819*4882a593Smuzhiyun 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3820*4882a593Smuzhiyun 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
3821*4882a593Smuzhiyun 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3822*4882a593Smuzhiyun 	struct sk_buff *frag_skb = head_skb;
3823*4882a593Smuzhiyun 	unsigned int offset = doffset;
3824*4882a593Smuzhiyun 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3825*4882a593Smuzhiyun 	unsigned int partial_segs = 0;
3826*4882a593Smuzhiyun 	unsigned int headroom;
3827*4882a593Smuzhiyun 	unsigned int len = head_skb->len;
3828*4882a593Smuzhiyun 	__be16 proto;
3829*4882a593Smuzhiyun 	bool csum, sg;
3830*4882a593Smuzhiyun 	int nfrags = skb_shinfo(head_skb)->nr_frags;
3831*4882a593Smuzhiyun 	int err = -ENOMEM;
3832*4882a593Smuzhiyun 	int i = 0;
3833*4882a593Smuzhiyun 	int pos;
3834*4882a593Smuzhiyun 
3835*4882a593Smuzhiyun 	if ((skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY) &&
3836*4882a593Smuzhiyun 	    mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) {
3837*4882a593Smuzhiyun 		struct sk_buff *check_skb;
3838*4882a593Smuzhiyun 
3839*4882a593Smuzhiyun 		for (check_skb = list_skb; check_skb; check_skb = check_skb->next) {
3840*4882a593Smuzhiyun 			if (skb_headlen(check_skb) && !check_skb->head_frag) {
3841*4882a593Smuzhiyun 				/* gso_size is untrusted, and we have a frag_list with
3842*4882a593Smuzhiyun 				 * a linear non head_frag item.
3843*4882a593Smuzhiyun 				 *
3844*4882a593Smuzhiyun 				 * If head_skb's headlen does not fit requested gso_size,
3845*4882a593Smuzhiyun 				 * it means that the frag_list members do NOT terminate
3846*4882a593Smuzhiyun 				 * on exact gso_size boundaries. Hence we cannot perform
3847*4882a593Smuzhiyun 				 * skb_frag_t page sharing. Therefore we must fallback to
3848*4882a593Smuzhiyun 				 * copying the frag_list skbs; we do so by disabling SG.
3849*4882a593Smuzhiyun 				 */
3850*4882a593Smuzhiyun 				features &= ~NETIF_F_SG;
3851*4882a593Smuzhiyun 				break;
3852*4882a593Smuzhiyun 			}
3853*4882a593Smuzhiyun 		}
3854*4882a593Smuzhiyun 	}
3855*4882a593Smuzhiyun 
3856*4882a593Smuzhiyun 	__skb_push(head_skb, doffset);
3857*4882a593Smuzhiyun 	proto = skb_network_protocol(head_skb, NULL);
3858*4882a593Smuzhiyun 	if (unlikely(!proto))
3859*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
3860*4882a593Smuzhiyun 
3861*4882a593Smuzhiyun 	sg = !!(features & NETIF_F_SG);
3862*4882a593Smuzhiyun 	csum = !!can_checksum_protocol(features, proto);
3863*4882a593Smuzhiyun 
3864*4882a593Smuzhiyun 	if (sg && csum && (mss != GSO_BY_FRAGS))  {
3865*4882a593Smuzhiyun 		if (!(features & NETIF_F_GSO_PARTIAL)) {
3866*4882a593Smuzhiyun 			struct sk_buff *iter;
3867*4882a593Smuzhiyun 			unsigned int frag_len;
3868*4882a593Smuzhiyun 
3869*4882a593Smuzhiyun 			if (!list_skb ||
3870*4882a593Smuzhiyun 			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3871*4882a593Smuzhiyun 				goto normal;
3872*4882a593Smuzhiyun 
3873*4882a593Smuzhiyun 			/* If we get here then all the required
3874*4882a593Smuzhiyun 			 * GSO features except frag_list are supported.
3875*4882a593Smuzhiyun 			 * Try to split the SKB to multiple GSO SKBs
3876*4882a593Smuzhiyun 			 * with no frag_list.
3877*4882a593Smuzhiyun 			 * Currently we can do that only when the buffers don't
3878*4882a593Smuzhiyun 			 * have a linear part and all the buffers except
3879*4882a593Smuzhiyun 			 * the last are of the same length.
3880*4882a593Smuzhiyun 			 */
3881*4882a593Smuzhiyun 			frag_len = list_skb->len;
3882*4882a593Smuzhiyun 			skb_walk_frags(head_skb, iter) {
3883*4882a593Smuzhiyun 				if (frag_len != iter->len && iter->next)
3884*4882a593Smuzhiyun 					goto normal;
3885*4882a593Smuzhiyun 				if (skb_headlen(iter) && !iter->head_frag)
3886*4882a593Smuzhiyun 					goto normal;
3887*4882a593Smuzhiyun 
3888*4882a593Smuzhiyun 				len -= iter->len;
3889*4882a593Smuzhiyun 			}
3890*4882a593Smuzhiyun 
3891*4882a593Smuzhiyun 			if (len != frag_len)
3892*4882a593Smuzhiyun 				goto normal;
3893*4882a593Smuzhiyun 		}
3894*4882a593Smuzhiyun 
3895*4882a593Smuzhiyun 		/* GSO partial only requires that we trim off any excess that
3896*4882a593Smuzhiyun 		 * doesn't fit into an MSS sized block, so take care of that
3897*4882a593Smuzhiyun 		 * now.
3898*4882a593Smuzhiyun 		 */
3899*4882a593Smuzhiyun 		partial_segs = len / mss;
3900*4882a593Smuzhiyun 		if (partial_segs > 1)
3901*4882a593Smuzhiyun 			mss *= partial_segs;
3902*4882a593Smuzhiyun 		else
3903*4882a593Smuzhiyun 			partial_segs = 0;
3904*4882a593Smuzhiyun 	}
3905*4882a593Smuzhiyun 
3906*4882a593Smuzhiyun normal:
3907*4882a593Smuzhiyun 	headroom = skb_headroom(head_skb);
3908*4882a593Smuzhiyun 	pos = skb_headlen(head_skb);
3909*4882a593Smuzhiyun 
3910*4882a593Smuzhiyun 	do {
3911*4882a593Smuzhiyun 		struct sk_buff *nskb;
3912*4882a593Smuzhiyun 		skb_frag_t *nskb_frag;
3913*4882a593Smuzhiyun 		int hsize;
3914*4882a593Smuzhiyun 		int size;
3915*4882a593Smuzhiyun 
3916*4882a593Smuzhiyun 		if (unlikely(mss == GSO_BY_FRAGS)) {
3917*4882a593Smuzhiyun 			len = list_skb->len;
3918*4882a593Smuzhiyun 		} else {
3919*4882a593Smuzhiyun 			len = head_skb->len - offset;
3920*4882a593Smuzhiyun 			if (len > mss)
3921*4882a593Smuzhiyun 				len = mss;
3922*4882a593Smuzhiyun 		}
3923*4882a593Smuzhiyun 
3924*4882a593Smuzhiyun 		hsize = skb_headlen(head_skb) - offset;
3925*4882a593Smuzhiyun 		if (hsize < 0)
3926*4882a593Smuzhiyun 			hsize = 0;
3927*4882a593Smuzhiyun 		if (hsize > len || !sg)
3928*4882a593Smuzhiyun 			hsize = len;
3929*4882a593Smuzhiyun 
3930*4882a593Smuzhiyun 		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3931*4882a593Smuzhiyun 		    (skb_headlen(list_skb) == len || sg)) {
3932*4882a593Smuzhiyun 			BUG_ON(skb_headlen(list_skb) > len);
3933*4882a593Smuzhiyun 
3934*4882a593Smuzhiyun 			i = 0;
3935*4882a593Smuzhiyun 			nfrags = skb_shinfo(list_skb)->nr_frags;
3936*4882a593Smuzhiyun 			frag = skb_shinfo(list_skb)->frags;
3937*4882a593Smuzhiyun 			frag_skb = list_skb;
3938*4882a593Smuzhiyun 			pos += skb_headlen(list_skb);
3939*4882a593Smuzhiyun 
3940*4882a593Smuzhiyun 			while (pos < offset + len) {
3941*4882a593Smuzhiyun 				BUG_ON(i >= nfrags);
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 				size = skb_frag_size(frag);
3944*4882a593Smuzhiyun 				if (pos + size > offset + len)
3945*4882a593Smuzhiyun 					break;
3946*4882a593Smuzhiyun 
3947*4882a593Smuzhiyun 				i++;
3948*4882a593Smuzhiyun 				pos += size;
3949*4882a593Smuzhiyun 				frag++;
3950*4882a593Smuzhiyun 			}
3951*4882a593Smuzhiyun 
3952*4882a593Smuzhiyun 			nskb = skb_clone(list_skb, GFP_ATOMIC);
3953*4882a593Smuzhiyun 			list_skb = list_skb->next;
3954*4882a593Smuzhiyun 
3955*4882a593Smuzhiyun 			if (unlikely(!nskb))
3956*4882a593Smuzhiyun 				goto err;
3957*4882a593Smuzhiyun 
3958*4882a593Smuzhiyun 			if (unlikely(pskb_trim(nskb, len))) {
3959*4882a593Smuzhiyun 				kfree_skb(nskb);
3960*4882a593Smuzhiyun 				goto err;
3961*4882a593Smuzhiyun 			}
3962*4882a593Smuzhiyun 
3963*4882a593Smuzhiyun 			hsize = skb_end_offset(nskb);
3964*4882a593Smuzhiyun 			if (skb_cow_head(nskb, doffset + headroom)) {
3965*4882a593Smuzhiyun 				kfree_skb(nskb);
3966*4882a593Smuzhiyun 				goto err;
3967*4882a593Smuzhiyun 			}
3968*4882a593Smuzhiyun 
3969*4882a593Smuzhiyun 			nskb->truesize += skb_end_offset(nskb) - hsize;
3970*4882a593Smuzhiyun 			skb_release_head_state(nskb);
3971*4882a593Smuzhiyun 			__skb_push(nskb, doffset);
3972*4882a593Smuzhiyun 		} else {
3973*4882a593Smuzhiyun 			nskb = __alloc_skb(hsize + doffset + headroom,
3974*4882a593Smuzhiyun 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3975*4882a593Smuzhiyun 					   NUMA_NO_NODE);
3976*4882a593Smuzhiyun 
3977*4882a593Smuzhiyun 			if (unlikely(!nskb))
3978*4882a593Smuzhiyun 				goto err;
3979*4882a593Smuzhiyun 
3980*4882a593Smuzhiyun 			skb_reserve(nskb, headroom);
3981*4882a593Smuzhiyun 			__skb_put(nskb, doffset);
3982*4882a593Smuzhiyun 		}
3983*4882a593Smuzhiyun 
3984*4882a593Smuzhiyun 		if (segs)
3985*4882a593Smuzhiyun 			tail->next = nskb;
3986*4882a593Smuzhiyun 		else
3987*4882a593Smuzhiyun 			segs = nskb;
3988*4882a593Smuzhiyun 		tail = nskb;
3989*4882a593Smuzhiyun 
3990*4882a593Smuzhiyun 		__copy_skb_header(nskb, head_skb);
3991*4882a593Smuzhiyun 
3992*4882a593Smuzhiyun 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3993*4882a593Smuzhiyun 		skb_reset_mac_len(nskb);
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3996*4882a593Smuzhiyun 						 nskb->data - tnl_hlen,
3997*4882a593Smuzhiyun 						 doffset + tnl_hlen);
3998*4882a593Smuzhiyun 
3999*4882a593Smuzhiyun 		if (nskb->len == len + doffset)
4000*4882a593Smuzhiyun 			goto perform_csum_check;
4001*4882a593Smuzhiyun 
4002*4882a593Smuzhiyun 		if (!sg) {
4003*4882a593Smuzhiyun 			if (!csum) {
4004*4882a593Smuzhiyun 				if (!nskb->remcsum_offload)
4005*4882a593Smuzhiyun 					nskb->ip_summed = CHECKSUM_NONE;
4006*4882a593Smuzhiyun 				SKB_GSO_CB(nskb)->csum =
4007*4882a593Smuzhiyun 					skb_copy_and_csum_bits(head_skb, offset,
4008*4882a593Smuzhiyun 							       skb_put(nskb,
4009*4882a593Smuzhiyun 								       len),
4010*4882a593Smuzhiyun 							       len);
4011*4882a593Smuzhiyun 				SKB_GSO_CB(nskb)->csum_start =
4012*4882a593Smuzhiyun 					skb_headroom(nskb) + doffset;
4013*4882a593Smuzhiyun 			} else {
4014*4882a593Smuzhiyun 				if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len))
4015*4882a593Smuzhiyun 					goto err;
4016*4882a593Smuzhiyun 			}
4017*4882a593Smuzhiyun 			continue;
4018*4882a593Smuzhiyun 		}
4019*4882a593Smuzhiyun 
4020*4882a593Smuzhiyun 		nskb_frag = skb_shinfo(nskb)->frags;
4021*4882a593Smuzhiyun 
4022*4882a593Smuzhiyun 		skb_copy_from_linear_data_offset(head_skb, offset,
4023*4882a593Smuzhiyun 						 skb_put(nskb, hsize), hsize);
4024*4882a593Smuzhiyun 
4025*4882a593Smuzhiyun 		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
4026*4882a593Smuzhiyun 					      SKBTX_SHARED_FRAG;
4027*4882a593Smuzhiyun 
4028*4882a593Smuzhiyun 		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4029*4882a593Smuzhiyun 		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4030*4882a593Smuzhiyun 			goto err;
4031*4882a593Smuzhiyun 
4032*4882a593Smuzhiyun 		while (pos < offset + len) {
4033*4882a593Smuzhiyun 			if (i >= nfrags) {
4034*4882a593Smuzhiyun 				i = 0;
4035*4882a593Smuzhiyun 				nfrags = skb_shinfo(list_skb)->nr_frags;
4036*4882a593Smuzhiyun 				frag = skb_shinfo(list_skb)->frags;
4037*4882a593Smuzhiyun 				frag_skb = list_skb;
4038*4882a593Smuzhiyun 				if (!skb_headlen(list_skb)) {
4039*4882a593Smuzhiyun 					BUG_ON(!nfrags);
4040*4882a593Smuzhiyun 				} else {
4041*4882a593Smuzhiyun 					BUG_ON(!list_skb->head_frag);
4042*4882a593Smuzhiyun 
4043*4882a593Smuzhiyun 					/* to make room for head_frag. */
4044*4882a593Smuzhiyun 					i--;
4045*4882a593Smuzhiyun 					frag--;
4046*4882a593Smuzhiyun 				}
4047*4882a593Smuzhiyun 				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4048*4882a593Smuzhiyun 				    skb_zerocopy_clone(nskb, frag_skb,
4049*4882a593Smuzhiyun 						       GFP_ATOMIC))
4050*4882a593Smuzhiyun 					goto err;
4051*4882a593Smuzhiyun 
4052*4882a593Smuzhiyun 				list_skb = list_skb->next;
4053*4882a593Smuzhiyun 			}
4054*4882a593Smuzhiyun 
4055*4882a593Smuzhiyun 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
4056*4882a593Smuzhiyun 				     MAX_SKB_FRAGS)) {
4057*4882a593Smuzhiyun 				net_warn_ratelimited(
4058*4882a593Smuzhiyun 					"skb_segment: too many frags: %u %u\n",
4059*4882a593Smuzhiyun 					pos, mss);
4060*4882a593Smuzhiyun 				err = -EINVAL;
4061*4882a593Smuzhiyun 				goto err;
4062*4882a593Smuzhiyun 			}
4063*4882a593Smuzhiyun 
4064*4882a593Smuzhiyun 			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4065*4882a593Smuzhiyun 			__skb_frag_ref(nskb_frag);
4066*4882a593Smuzhiyun 			size = skb_frag_size(nskb_frag);
4067*4882a593Smuzhiyun 
4068*4882a593Smuzhiyun 			if (pos < offset) {
4069*4882a593Smuzhiyun 				skb_frag_off_add(nskb_frag, offset - pos);
4070*4882a593Smuzhiyun 				skb_frag_size_sub(nskb_frag, offset - pos);
4071*4882a593Smuzhiyun 			}
4072*4882a593Smuzhiyun 
4073*4882a593Smuzhiyun 			skb_shinfo(nskb)->nr_frags++;
4074*4882a593Smuzhiyun 
4075*4882a593Smuzhiyun 			if (pos + size <= offset + len) {
4076*4882a593Smuzhiyun 				i++;
4077*4882a593Smuzhiyun 				frag++;
4078*4882a593Smuzhiyun 				pos += size;
4079*4882a593Smuzhiyun 			} else {
4080*4882a593Smuzhiyun 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4081*4882a593Smuzhiyun 				goto skip_fraglist;
4082*4882a593Smuzhiyun 			}
4083*4882a593Smuzhiyun 
4084*4882a593Smuzhiyun 			nskb_frag++;
4085*4882a593Smuzhiyun 		}
4086*4882a593Smuzhiyun 
4087*4882a593Smuzhiyun skip_fraglist:
4088*4882a593Smuzhiyun 		nskb->data_len = len - hsize;
4089*4882a593Smuzhiyun 		nskb->len += nskb->data_len;
4090*4882a593Smuzhiyun 		nskb->truesize += nskb->data_len;
4091*4882a593Smuzhiyun 
4092*4882a593Smuzhiyun perform_csum_check:
4093*4882a593Smuzhiyun 		if (!csum) {
4094*4882a593Smuzhiyun 			if (skb_has_shared_frag(nskb) &&
4095*4882a593Smuzhiyun 			    __skb_linearize(nskb))
4096*4882a593Smuzhiyun 				goto err;
4097*4882a593Smuzhiyun 
4098*4882a593Smuzhiyun 			if (!nskb->remcsum_offload)
4099*4882a593Smuzhiyun 				nskb->ip_summed = CHECKSUM_NONE;
4100*4882a593Smuzhiyun 			SKB_GSO_CB(nskb)->csum =
4101*4882a593Smuzhiyun 				skb_checksum(nskb, doffset,
4102*4882a593Smuzhiyun 					     nskb->len - doffset, 0);
4103*4882a593Smuzhiyun 			SKB_GSO_CB(nskb)->csum_start =
4104*4882a593Smuzhiyun 				skb_headroom(nskb) + doffset;
4105*4882a593Smuzhiyun 		}
4106*4882a593Smuzhiyun 	} while ((offset += len) < head_skb->len);
4107*4882a593Smuzhiyun 
4108*4882a593Smuzhiyun 	/* Some callers want to get the end of the list.
4109*4882a593Smuzhiyun 	 * Put it in segs->prev to avoid walking the list.
4110*4882a593Smuzhiyun 	 * (see validate_xmit_skb_list() for example)
4111*4882a593Smuzhiyun 	 */
4112*4882a593Smuzhiyun 	segs->prev = tail;
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun 	if (partial_segs) {
4115*4882a593Smuzhiyun 		struct sk_buff *iter;
4116*4882a593Smuzhiyun 		int type = skb_shinfo(head_skb)->gso_type;
4117*4882a593Smuzhiyun 		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4118*4882a593Smuzhiyun 
4119*4882a593Smuzhiyun 		/* Update type to add partial and then remove dodgy if set */
4120*4882a593Smuzhiyun 		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4121*4882a593Smuzhiyun 		type &= ~SKB_GSO_DODGY;
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 		/* Update GSO info and prepare to start updating headers on
4124*4882a593Smuzhiyun 		 * our way back down the stack of protocols.
4125*4882a593Smuzhiyun 		 */
4126*4882a593Smuzhiyun 		for (iter = segs; iter; iter = iter->next) {
4127*4882a593Smuzhiyun 			skb_shinfo(iter)->gso_size = gso_size;
4128*4882a593Smuzhiyun 			skb_shinfo(iter)->gso_segs = partial_segs;
4129*4882a593Smuzhiyun 			skb_shinfo(iter)->gso_type = type;
4130*4882a593Smuzhiyun 			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4131*4882a593Smuzhiyun 		}
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 		if (tail->len - doffset <= gso_size)
4134*4882a593Smuzhiyun 			skb_shinfo(tail)->gso_size = 0;
4135*4882a593Smuzhiyun 		else if (tail != segs)
4136*4882a593Smuzhiyun 			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4137*4882a593Smuzhiyun 	}
4138*4882a593Smuzhiyun 
4139*4882a593Smuzhiyun 	/* Following permits correct backpressure, for protocols
4140*4882a593Smuzhiyun 	 * using skb_set_owner_w().
4141*4882a593Smuzhiyun 	 * Idea is to tranfert ownership from head_skb to last segment.
4142*4882a593Smuzhiyun 	 */
4143*4882a593Smuzhiyun 	if (head_skb->destructor == sock_wfree) {
4144*4882a593Smuzhiyun 		swap(tail->truesize, head_skb->truesize);
4145*4882a593Smuzhiyun 		swap(tail->destructor, head_skb->destructor);
4146*4882a593Smuzhiyun 		swap(tail->sk, head_skb->sk);
4147*4882a593Smuzhiyun 	}
4148*4882a593Smuzhiyun 	return segs;
4149*4882a593Smuzhiyun 
4150*4882a593Smuzhiyun err:
4151*4882a593Smuzhiyun 	kfree_skb_list(segs);
4152*4882a593Smuzhiyun 	return ERR_PTR(err);
4153*4882a593Smuzhiyun }
4154*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_segment);
4155*4882a593Smuzhiyun 
skb_gro_receive(struct sk_buff * p,struct sk_buff * skb)4156*4882a593Smuzhiyun int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
4157*4882a593Smuzhiyun {
4158*4882a593Smuzhiyun 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
4159*4882a593Smuzhiyun 	unsigned int offset = skb_gro_offset(skb);
4160*4882a593Smuzhiyun 	unsigned int headlen = skb_headlen(skb);
4161*4882a593Smuzhiyun 	unsigned int len = skb_gro_len(skb);
4162*4882a593Smuzhiyun 	unsigned int delta_truesize;
4163*4882a593Smuzhiyun 	struct sk_buff *lp;
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun 	if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
4166*4882a593Smuzhiyun 		return -E2BIG;
4167*4882a593Smuzhiyun 
4168*4882a593Smuzhiyun 	lp = NAPI_GRO_CB(p)->last;
4169*4882a593Smuzhiyun 	pinfo = skb_shinfo(lp);
4170*4882a593Smuzhiyun 
4171*4882a593Smuzhiyun 	if (headlen <= offset) {
4172*4882a593Smuzhiyun 		skb_frag_t *frag;
4173*4882a593Smuzhiyun 		skb_frag_t *frag2;
4174*4882a593Smuzhiyun 		int i = skbinfo->nr_frags;
4175*4882a593Smuzhiyun 		int nr_frags = pinfo->nr_frags + i;
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 		if (nr_frags > MAX_SKB_FRAGS)
4178*4882a593Smuzhiyun 			goto merge;
4179*4882a593Smuzhiyun 
4180*4882a593Smuzhiyun 		offset -= headlen;
4181*4882a593Smuzhiyun 		pinfo->nr_frags = nr_frags;
4182*4882a593Smuzhiyun 		skbinfo->nr_frags = 0;
4183*4882a593Smuzhiyun 
4184*4882a593Smuzhiyun 		frag = pinfo->frags + nr_frags;
4185*4882a593Smuzhiyun 		frag2 = skbinfo->frags + i;
4186*4882a593Smuzhiyun 		do {
4187*4882a593Smuzhiyun 			*--frag = *--frag2;
4188*4882a593Smuzhiyun 		} while (--i);
4189*4882a593Smuzhiyun 
4190*4882a593Smuzhiyun 		skb_frag_off_add(frag, offset);
4191*4882a593Smuzhiyun 		skb_frag_size_sub(frag, offset);
4192*4882a593Smuzhiyun 
4193*4882a593Smuzhiyun 		/* all fragments truesize : remove (head size + sk_buff) */
4194*4882a593Smuzhiyun 		delta_truesize = skb->truesize -
4195*4882a593Smuzhiyun 				 SKB_TRUESIZE(skb_end_offset(skb));
4196*4882a593Smuzhiyun 
4197*4882a593Smuzhiyun 		skb->truesize -= skb->data_len;
4198*4882a593Smuzhiyun 		skb->len -= skb->data_len;
4199*4882a593Smuzhiyun 		skb->data_len = 0;
4200*4882a593Smuzhiyun 
4201*4882a593Smuzhiyun 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
4202*4882a593Smuzhiyun 		goto done;
4203*4882a593Smuzhiyun 	} else if (skb->head_frag) {
4204*4882a593Smuzhiyun 		int nr_frags = pinfo->nr_frags;
4205*4882a593Smuzhiyun 		skb_frag_t *frag = pinfo->frags + nr_frags;
4206*4882a593Smuzhiyun 		struct page *page = virt_to_head_page(skb->head);
4207*4882a593Smuzhiyun 		unsigned int first_size = headlen - offset;
4208*4882a593Smuzhiyun 		unsigned int first_offset;
4209*4882a593Smuzhiyun 
4210*4882a593Smuzhiyun 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
4211*4882a593Smuzhiyun 			goto merge;
4212*4882a593Smuzhiyun 
4213*4882a593Smuzhiyun 		first_offset = skb->data -
4214*4882a593Smuzhiyun 			       (unsigned char *)page_address(page) +
4215*4882a593Smuzhiyun 			       offset;
4216*4882a593Smuzhiyun 
4217*4882a593Smuzhiyun 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4218*4882a593Smuzhiyun 
4219*4882a593Smuzhiyun 		__skb_frag_set_page(frag, page);
4220*4882a593Smuzhiyun 		skb_frag_off_set(frag, first_offset);
4221*4882a593Smuzhiyun 		skb_frag_size_set(frag, first_size);
4222*4882a593Smuzhiyun 
4223*4882a593Smuzhiyun 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4224*4882a593Smuzhiyun 		/* We dont need to clear skbinfo->nr_frags here */
4225*4882a593Smuzhiyun 
4226*4882a593Smuzhiyun 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4227*4882a593Smuzhiyun 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4228*4882a593Smuzhiyun 		goto done;
4229*4882a593Smuzhiyun 	}
4230*4882a593Smuzhiyun 
4231*4882a593Smuzhiyun merge:
4232*4882a593Smuzhiyun 	delta_truesize = skb->truesize;
4233*4882a593Smuzhiyun 	if (offset > headlen) {
4234*4882a593Smuzhiyun 		unsigned int eat = offset - headlen;
4235*4882a593Smuzhiyun 
4236*4882a593Smuzhiyun 		skb_frag_off_add(&skbinfo->frags[0], eat);
4237*4882a593Smuzhiyun 		skb_frag_size_sub(&skbinfo->frags[0], eat);
4238*4882a593Smuzhiyun 		skb->data_len -= eat;
4239*4882a593Smuzhiyun 		skb->len -= eat;
4240*4882a593Smuzhiyun 		offset = headlen;
4241*4882a593Smuzhiyun 	}
4242*4882a593Smuzhiyun 
4243*4882a593Smuzhiyun 	__skb_pull(skb, offset);
4244*4882a593Smuzhiyun 
4245*4882a593Smuzhiyun 	if (NAPI_GRO_CB(p)->last == p)
4246*4882a593Smuzhiyun 		skb_shinfo(p)->frag_list = skb;
4247*4882a593Smuzhiyun 	else
4248*4882a593Smuzhiyun 		NAPI_GRO_CB(p)->last->next = skb;
4249*4882a593Smuzhiyun 	NAPI_GRO_CB(p)->last = skb;
4250*4882a593Smuzhiyun 	__skb_header_release(skb);
4251*4882a593Smuzhiyun 	lp = p;
4252*4882a593Smuzhiyun 
4253*4882a593Smuzhiyun done:
4254*4882a593Smuzhiyun 	NAPI_GRO_CB(p)->count++;
4255*4882a593Smuzhiyun 	p->data_len += len;
4256*4882a593Smuzhiyun 	p->truesize += delta_truesize;
4257*4882a593Smuzhiyun 	p->len += len;
4258*4882a593Smuzhiyun 	if (lp != p) {
4259*4882a593Smuzhiyun 		lp->data_len += len;
4260*4882a593Smuzhiyun 		lp->truesize += delta_truesize;
4261*4882a593Smuzhiyun 		lp->len += len;
4262*4882a593Smuzhiyun 	}
4263*4882a593Smuzhiyun 	NAPI_GRO_CB(skb)->same_flow = 1;
4264*4882a593Smuzhiyun 	return 0;
4265*4882a593Smuzhiyun }
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun #ifdef CONFIG_SKB_EXTENSIONS
4268*4882a593Smuzhiyun #define SKB_EXT_ALIGN_VALUE	8
4269*4882a593Smuzhiyun #define SKB_EXT_CHUNKSIZEOF(x)	(ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4270*4882a593Smuzhiyun 
4271*4882a593Smuzhiyun static const u8 skb_ext_type_len[] = {
4272*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4273*4882a593Smuzhiyun 	[SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4274*4882a593Smuzhiyun #endif
4275*4882a593Smuzhiyun #ifdef CONFIG_XFRM
4276*4882a593Smuzhiyun 	[SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4277*4882a593Smuzhiyun #endif
4278*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4279*4882a593Smuzhiyun 	[TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4280*4882a593Smuzhiyun #endif
4281*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MPTCP)
4282*4882a593Smuzhiyun 	[SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4283*4882a593Smuzhiyun #endif
4284*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_KCOV)
4285*4882a593Smuzhiyun 	[SKB_EXT_KCOV_HANDLE] = SKB_EXT_CHUNKSIZEOF(u64),
4286*4882a593Smuzhiyun #endif
4287*4882a593Smuzhiyun };
4288*4882a593Smuzhiyun 
skb_ext_total_length(void)4289*4882a593Smuzhiyun static __always_inline unsigned int skb_ext_total_length(void)
4290*4882a593Smuzhiyun {
4291*4882a593Smuzhiyun 	return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4292*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4293*4882a593Smuzhiyun 		skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4294*4882a593Smuzhiyun #endif
4295*4882a593Smuzhiyun #ifdef CONFIG_XFRM
4296*4882a593Smuzhiyun 		skb_ext_type_len[SKB_EXT_SEC_PATH] +
4297*4882a593Smuzhiyun #endif
4298*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4299*4882a593Smuzhiyun 		skb_ext_type_len[TC_SKB_EXT] +
4300*4882a593Smuzhiyun #endif
4301*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MPTCP)
4302*4882a593Smuzhiyun 		skb_ext_type_len[SKB_EXT_MPTCP] +
4303*4882a593Smuzhiyun #endif
4304*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_KCOV)
4305*4882a593Smuzhiyun 		skb_ext_type_len[SKB_EXT_KCOV_HANDLE] +
4306*4882a593Smuzhiyun #endif
4307*4882a593Smuzhiyun 		0;
4308*4882a593Smuzhiyun }
4309*4882a593Smuzhiyun 
skb_extensions_init(void)4310*4882a593Smuzhiyun static void skb_extensions_init(void)
4311*4882a593Smuzhiyun {
4312*4882a593Smuzhiyun 	BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4313*4882a593Smuzhiyun 	BUILD_BUG_ON(skb_ext_total_length() > 255);
4314*4882a593Smuzhiyun 
4315*4882a593Smuzhiyun 	skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4316*4882a593Smuzhiyun 					     SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4317*4882a593Smuzhiyun 					     0,
4318*4882a593Smuzhiyun 					     SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4319*4882a593Smuzhiyun 					     NULL);
4320*4882a593Smuzhiyun }
4321*4882a593Smuzhiyun #else
skb_extensions_init(void)4322*4882a593Smuzhiyun static void skb_extensions_init(void) {}
4323*4882a593Smuzhiyun #endif
4324*4882a593Smuzhiyun 
skb_init(void)4325*4882a593Smuzhiyun void __init skb_init(void)
4326*4882a593Smuzhiyun {
4327*4882a593Smuzhiyun 	skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4328*4882a593Smuzhiyun 					      sizeof(struct sk_buff),
4329*4882a593Smuzhiyun 					      0,
4330*4882a593Smuzhiyun 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4331*4882a593Smuzhiyun 					      offsetof(struct sk_buff, cb),
4332*4882a593Smuzhiyun 					      sizeof_field(struct sk_buff, cb),
4333*4882a593Smuzhiyun 					      NULL);
4334*4882a593Smuzhiyun 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4335*4882a593Smuzhiyun 						sizeof(struct sk_buff_fclones),
4336*4882a593Smuzhiyun 						0,
4337*4882a593Smuzhiyun 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4338*4882a593Smuzhiyun 						NULL);
4339*4882a593Smuzhiyun 	skb_extensions_init();
4340*4882a593Smuzhiyun }
4341*4882a593Smuzhiyun 
4342*4882a593Smuzhiyun static int
__skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len,unsigned int recursion_level)4343*4882a593Smuzhiyun __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4344*4882a593Smuzhiyun 	       unsigned int recursion_level)
4345*4882a593Smuzhiyun {
4346*4882a593Smuzhiyun 	int start = skb_headlen(skb);
4347*4882a593Smuzhiyun 	int i, copy = start - offset;
4348*4882a593Smuzhiyun 	struct sk_buff *frag_iter;
4349*4882a593Smuzhiyun 	int elt = 0;
4350*4882a593Smuzhiyun 
4351*4882a593Smuzhiyun 	if (unlikely(recursion_level >= 24))
4352*4882a593Smuzhiyun 		return -EMSGSIZE;
4353*4882a593Smuzhiyun 
4354*4882a593Smuzhiyun 	if (copy > 0) {
4355*4882a593Smuzhiyun 		if (copy > len)
4356*4882a593Smuzhiyun 			copy = len;
4357*4882a593Smuzhiyun 		sg_set_buf(sg, skb->data + offset, copy);
4358*4882a593Smuzhiyun 		elt++;
4359*4882a593Smuzhiyun 		if ((len -= copy) == 0)
4360*4882a593Smuzhiyun 			return elt;
4361*4882a593Smuzhiyun 		offset += copy;
4362*4882a593Smuzhiyun 	}
4363*4882a593Smuzhiyun 
4364*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4365*4882a593Smuzhiyun 		int end;
4366*4882a593Smuzhiyun 
4367*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
4368*4882a593Smuzhiyun 
4369*4882a593Smuzhiyun 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4370*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
4371*4882a593Smuzhiyun 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4372*4882a593Smuzhiyun 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4373*4882a593Smuzhiyun 				return -EMSGSIZE;
4374*4882a593Smuzhiyun 
4375*4882a593Smuzhiyun 			if (copy > len)
4376*4882a593Smuzhiyun 				copy = len;
4377*4882a593Smuzhiyun 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4378*4882a593Smuzhiyun 				    skb_frag_off(frag) + offset - start);
4379*4882a593Smuzhiyun 			elt++;
4380*4882a593Smuzhiyun 			if (!(len -= copy))
4381*4882a593Smuzhiyun 				return elt;
4382*4882a593Smuzhiyun 			offset += copy;
4383*4882a593Smuzhiyun 		}
4384*4882a593Smuzhiyun 		start = end;
4385*4882a593Smuzhiyun 	}
4386*4882a593Smuzhiyun 
4387*4882a593Smuzhiyun 	skb_walk_frags(skb, frag_iter) {
4388*4882a593Smuzhiyun 		int end, ret;
4389*4882a593Smuzhiyun 
4390*4882a593Smuzhiyun 		WARN_ON(start > offset + len);
4391*4882a593Smuzhiyun 
4392*4882a593Smuzhiyun 		end = start + frag_iter->len;
4393*4882a593Smuzhiyun 		if ((copy = end - offset) > 0) {
4394*4882a593Smuzhiyun 			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4395*4882a593Smuzhiyun 				return -EMSGSIZE;
4396*4882a593Smuzhiyun 
4397*4882a593Smuzhiyun 			if (copy > len)
4398*4882a593Smuzhiyun 				copy = len;
4399*4882a593Smuzhiyun 			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4400*4882a593Smuzhiyun 					      copy, recursion_level + 1);
4401*4882a593Smuzhiyun 			if (unlikely(ret < 0))
4402*4882a593Smuzhiyun 				return ret;
4403*4882a593Smuzhiyun 			elt += ret;
4404*4882a593Smuzhiyun 			if ((len -= copy) == 0)
4405*4882a593Smuzhiyun 				return elt;
4406*4882a593Smuzhiyun 			offset += copy;
4407*4882a593Smuzhiyun 		}
4408*4882a593Smuzhiyun 		start = end;
4409*4882a593Smuzhiyun 	}
4410*4882a593Smuzhiyun 	BUG_ON(len);
4411*4882a593Smuzhiyun 	return elt;
4412*4882a593Smuzhiyun }
4413*4882a593Smuzhiyun 
4414*4882a593Smuzhiyun /**
4415*4882a593Smuzhiyun  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4416*4882a593Smuzhiyun  *	@skb: Socket buffer containing the buffers to be mapped
4417*4882a593Smuzhiyun  *	@sg: The scatter-gather list to map into
4418*4882a593Smuzhiyun  *	@offset: The offset into the buffer's contents to start mapping
4419*4882a593Smuzhiyun  *	@len: Length of buffer space to be mapped
4420*4882a593Smuzhiyun  *
4421*4882a593Smuzhiyun  *	Fill the specified scatter-gather list with mappings/pointers into a
4422*4882a593Smuzhiyun  *	region of the buffer space attached to a socket buffer. Returns either
4423*4882a593Smuzhiyun  *	the number of scatterlist items used, or -EMSGSIZE if the contents
4424*4882a593Smuzhiyun  *	could not fit.
4425*4882a593Smuzhiyun  */
skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)4426*4882a593Smuzhiyun int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4427*4882a593Smuzhiyun {
4428*4882a593Smuzhiyun 	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4429*4882a593Smuzhiyun 
4430*4882a593Smuzhiyun 	if (nsg <= 0)
4431*4882a593Smuzhiyun 		return nsg;
4432*4882a593Smuzhiyun 
4433*4882a593Smuzhiyun 	sg_mark_end(&sg[nsg - 1]);
4434*4882a593Smuzhiyun 
4435*4882a593Smuzhiyun 	return nsg;
4436*4882a593Smuzhiyun }
4437*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_to_sgvec);
4438*4882a593Smuzhiyun 
4439*4882a593Smuzhiyun /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4440*4882a593Smuzhiyun  * sglist without mark the sg which contain last skb data as the end.
4441*4882a593Smuzhiyun  * So the caller can mannipulate sg list as will when padding new data after
4442*4882a593Smuzhiyun  * the first call without calling sg_unmark_end to expend sg list.
4443*4882a593Smuzhiyun  *
4444*4882a593Smuzhiyun  * Scenario to use skb_to_sgvec_nomark:
4445*4882a593Smuzhiyun  * 1. sg_init_table
4446*4882a593Smuzhiyun  * 2. skb_to_sgvec_nomark(payload1)
4447*4882a593Smuzhiyun  * 3. skb_to_sgvec_nomark(payload2)
4448*4882a593Smuzhiyun  *
4449*4882a593Smuzhiyun  * This is equivalent to:
4450*4882a593Smuzhiyun  * 1. sg_init_table
4451*4882a593Smuzhiyun  * 2. skb_to_sgvec(payload1)
4452*4882a593Smuzhiyun  * 3. sg_unmark_end
4453*4882a593Smuzhiyun  * 4. skb_to_sgvec(payload2)
4454*4882a593Smuzhiyun  *
4455*4882a593Smuzhiyun  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4456*4882a593Smuzhiyun  * is more preferable.
4457*4882a593Smuzhiyun  */
skb_to_sgvec_nomark(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)4458*4882a593Smuzhiyun int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4459*4882a593Smuzhiyun 			int offset, int len)
4460*4882a593Smuzhiyun {
4461*4882a593Smuzhiyun 	return __skb_to_sgvec(skb, sg, offset, len, 0);
4462*4882a593Smuzhiyun }
4463*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4464*4882a593Smuzhiyun 
4465*4882a593Smuzhiyun 
4466*4882a593Smuzhiyun 
4467*4882a593Smuzhiyun /**
4468*4882a593Smuzhiyun  *	skb_cow_data - Check that a socket buffer's data buffers are writable
4469*4882a593Smuzhiyun  *	@skb: The socket buffer to check.
4470*4882a593Smuzhiyun  *	@tailbits: Amount of trailing space to be added
4471*4882a593Smuzhiyun  *	@trailer: Returned pointer to the skb where the @tailbits space begins
4472*4882a593Smuzhiyun  *
4473*4882a593Smuzhiyun  *	Make sure that the data buffers attached to a socket buffer are
4474*4882a593Smuzhiyun  *	writable. If they are not, private copies are made of the data buffers
4475*4882a593Smuzhiyun  *	and the socket buffer is set to use these instead.
4476*4882a593Smuzhiyun  *
4477*4882a593Smuzhiyun  *	If @tailbits is given, make sure that there is space to write @tailbits
4478*4882a593Smuzhiyun  *	bytes of data beyond current end of socket buffer.  @trailer will be
4479*4882a593Smuzhiyun  *	set to point to the skb in which this space begins.
4480*4882a593Smuzhiyun  *
4481*4882a593Smuzhiyun  *	The number of scatterlist elements required to completely map the
4482*4882a593Smuzhiyun  *	COW'd and extended socket buffer will be returned.
4483*4882a593Smuzhiyun  */
skb_cow_data(struct sk_buff * skb,int tailbits,struct sk_buff ** trailer)4484*4882a593Smuzhiyun int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4485*4882a593Smuzhiyun {
4486*4882a593Smuzhiyun 	int copyflag;
4487*4882a593Smuzhiyun 	int elt;
4488*4882a593Smuzhiyun 	struct sk_buff *skb1, **skb_p;
4489*4882a593Smuzhiyun 
4490*4882a593Smuzhiyun 	/* If skb is cloned or its head is paged, reallocate
4491*4882a593Smuzhiyun 	 * head pulling out all the pages (pages are considered not writable
4492*4882a593Smuzhiyun 	 * at the moment even if they are anonymous).
4493*4882a593Smuzhiyun 	 */
4494*4882a593Smuzhiyun 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4495*4882a593Smuzhiyun 	    !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4496*4882a593Smuzhiyun 		return -ENOMEM;
4497*4882a593Smuzhiyun 
4498*4882a593Smuzhiyun 	/* Easy case. Most of packets will go this way. */
4499*4882a593Smuzhiyun 	if (!skb_has_frag_list(skb)) {
4500*4882a593Smuzhiyun 		/* A little of trouble, not enough of space for trailer.
4501*4882a593Smuzhiyun 		 * This should not happen, when stack is tuned to generate
4502*4882a593Smuzhiyun 		 * good frames. OK, on miss we reallocate and reserve even more
4503*4882a593Smuzhiyun 		 * space, 128 bytes is fair. */
4504*4882a593Smuzhiyun 
4505*4882a593Smuzhiyun 		if (skb_tailroom(skb) < tailbits &&
4506*4882a593Smuzhiyun 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4507*4882a593Smuzhiyun 			return -ENOMEM;
4508*4882a593Smuzhiyun 
4509*4882a593Smuzhiyun 		/* Voila! */
4510*4882a593Smuzhiyun 		*trailer = skb;
4511*4882a593Smuzhiyun 		return 1;
4512*4882a593Smuzhiyun 	}
4513*4882a593Smuzhiyun 
4514*4882a593Smuzhiyun 	/* Misery. We are in troubles, going to mincer fragments... */
4515*4882a593Smuzhiyun 
4516*4882a593Smuzhiyun 	elt = 1;
4517*4882a593Smuzhiyun 	skb_p = &skb_shinfo(skb)->frag_list;
4518*4882a593Smuzhiyun 	copyflag = 0;
4519*4882a593Smuzhiyun 
4520*4882a593Smuzhiyun 	while ((skb1 = *skb_p) != NULL) {
4521*4882a593Smuzhiyun 		int ntail = 0;
4522*4882a593Smuzhiyun 
4523*4882a593Smuzhiyun 		/* The fragment is partially pulled by someone,
4524*4882a593Smuzhiyun 		 * this can happen on input. Copy it and everything
4525*4882a593Smuzhiyun 		 * after it. */
4526*4882a593Smuzhiyun 
4527*4882a593Smuzhiyun 		if (skb_shared(skb1))
4528*4882a593Smuzhiyun 			copyflag = 1;
4529*4882a593Smuzhiyun 
4530*4882a593Smuzhiyun 		/* If the skb is the last, worry about trailer. */
4531*4882a593Smuzhiyun 
4532*4882a593Smuzhiyun 		if (skb1->next == NULL && tailbits) {
4533*4882a593Smuzhiyun 			if (skb_shinfo(skb1)->nr_frags ||
4534*4882a593Smuzhiyun 			    skb_has_frag_list(skb1) ||
4535*4882a593Smuzhiyun 			    skb_tailroom(skb1) < tailbits)
4536*4882a593Smuzhiyun 				ntail = tailbits + 128;
4537*4882a593Smuzhiyun 		}
4538*4882a593Smuzhiyun 
4539*4882a593Smuzhiyun 		if (copyflag ||
4540*4882a593Smuzhiyun 		    skb_cloned(skb1) ||
4541*4882a593Smuzhiyun 		    ntail ||
4542*4882a593Smuzhiyun 		    skb_shinfo(skb1)->nr_frags ||
4543*4882a593Smuzhiyun 		    skb_has_frag_list(skb1)) {
4544*4882a593Smuzhiyun 			struct sk_buff *skb2;
4545*4882a593Smuzhiyun 
4546*4882a593Smuzhiyun 			/* Fuck, we are miserable poor guys... */
4547*4882a593Smuzhiyun 			if (ntail == 0)
4548*4882a593Smuzhiyun 				skb2 = skb_copy(skb1, GFP_ATOMIC);
4549*4882a593Smuzhiyun 			else
4550*4882a593Smuzhiyun 				skb2 = skb_copy_expand(skb1,
4551*4882a593Smuzhiyun 						       skb_headroom(skb1),
4552*4882a593Smuzhiyun 						       ntail,
4553*4882a593Smuzhiyun 						       GFP_ATOMIC);
4554*4882a593Smuzhiyun 			if (unlikely(skb2 == NULL))
4555*4882a593Smuzhiyun 				return -ENOMEM;
4556*4882a593Smuzhiyun 
4557*4882a593Smuzhiyun 			if (skb1->sk)
4558*4882a593Smuzhiyun 				skb_set_owner_w(skb2, skb1->sk);
4559*4882a593Smuzhiyun 
4560*4882a593Smuzhiyun 			/* Looking around. Are we still alive?
4561*4882a593Smuzhiyun 			 * OK, link new skb, drop old one */
4562*4882a593Smuzhiyun 
4563*4882a593Smuzhiyun 			skb2->next = skb1->next;
4564*4882a593Smuzhiyun 			*skb_p = skb2;
4565*4882a593Smuzhiyun 			kfree_skb(skb1);
4566*4882a593Smuzhiyun 			skb1 = skb2;
4567*4882a593Smuzhiyun 		}
4568*4882a593Smuzhiyun 		elt++;
4569*4882a593Smuzhiyun 		*trailer = skb1;
4570*4882a593Smuzhiyun 		skb_p = &skb1->next;
4571*4882a593Smuzhiyun 	}
4572*4882a593Smuzhiyun 
4573*4882a593Smuzhiyun 	return elt;
4574*4882a593Smuzhiyun }
4575*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_cow_data);
4576*4882a593Smuzhiyun 
sock_rmem_free(struct sk_buff * skb)4577*4882a593Smuzhiyun static void sock_rmem_free(struct sk_buff *skb)
4578*4882a593Smuzhiyun {
4579*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
4580*4882a593Smuzhiyun 
4581*4882a593Smuzhiyun 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4582*4882a593Smuzhiyun }
4583*4882a593Smuzhiyun 
skb_set_err_queue(struct sk_buff * skb)4584*4882a593Smuzhiyun static void skb_set_err_queue(struct sk_buff *skb)
4585*4882a593Smuzhiyun {
4586*4882a593Smuzhiyun 	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4587*4882a593Smuzhiyun 	 * So, it is safe to (mis)use it to mark skbs on the error queue.
4588*4882a593Smuzhiyun 	 */
4589*4882a593Smuzhiyun 	skb->pkt_type = PACKET_OUTGOING;
4590*4882a593Smuzhiyun 	BUILD_BUG_ON(PACKET_OUTGOING == 0);
4591*4882a593Smuzhiyun }
4592*4882a593Smuzhiyun 
4593*4882a593Smuzhiyun /*
4594*4882a593Smuzhiyun  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4595*4882a593Smuzhiyun  */
sock_queue_err_skb(struct sock * sk,struct sk_buff * skb)4596*4882a593Smuzhiyun int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4597*4882a593Smuzhiyun {
4598*4882a593Smuzhiyun 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4599*4882a593Smuzhiyun 	    (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4600*4882a593Smuzhiyun 		return -ENOMEM;
4601*4882a593Smuzhiyun 
4602*4882a593Smuzhiyun 	skb_orphan(skb);
4603*4882a593Smuzhiyun 	skb->sk = sk;
4604*4882a593Smuzhiyun 	skb->destructor = sock_rmem_free;
4605*4882a593Smuzhiyun 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4606*4882a593Smuzhiyun 	skb_set_err_queue(skb);
4607*4882a593Smuzhiyun 
4608*4882a593Smuzhiyun 	/* before exiting rcu section, make sure dst is refcounted */
4609*4882a593Smuzhiyun 	skb_dst_force(skb);
4610*4882a593Smuzhiyun 
4611*4882a593Smuzhiyun 	skb_queue_tail(&sk->sk_error_queue, skb);
4612*4882a593Smuzhiyun 	if (!sock_flag(sk, SOCK_DEAD))
4613*4882a593Smuzhiyun 		sk->sk_error_report(sk);
4614*4882a593Smuzhiyun 	return 0;
4615*4882a593Smuzhiyun }
4616*4882a593Smuzhiyun EXPORT_SYMBOL(sock_queue_err_skb);
4617*4882a593Smuzhiyun 
is_icmp_err_skb(const struct sk_buff * skb)4618*4882a593Smuzhiyun static bool is_icmp_err_skb(const struct sk_buff *skb)
4619*4882a593Smuzhiyun {
4620*4882a593Smuzhiyun 	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4621*4882a593Smuzhiyun 		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4622*4882a593Smuzhiyun }
4623*4882a593Smuzhiyun 
sock_dequeue_err_skb(struct sock * sk)4624*4882a593Smuzhiyun struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4625*4882a593Smuzhiyun {
4626*4882a593Smuzhiyun 	struct sk_buff_head *q = &sk->sk_error_queue;
4627*4882a593Smuzhiyun 	struct sk_buff *skb, *skb_next = NULL;
4628*4882a593Smuzhiyun 	bool icmp_next = false;
4629*4882a593Smuzhiyun 	unsigned long flags;
4630*4882a593Smuzhiyun 
4631*4882a593Smuzhiyun 	spin_lock_irqsave(&q->lock, flags);
4632*4882a593Smuzhiyun 	skb = __skb_dequeue(q);
4633*4882a593Smuzhiyun 	if (skb && (skb_next = skb_peek(q))) {
4634*4882a593Smuzhiyun 		icmp_next = is_icmp_err_skb(skb_next);
4635*4882a593Smuzhiyun 		if (icmp_next)
4636*4882a593Smuzhiyun 			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
4637*4882a593Smuzhiyun 	}
4638*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->lock, flags);
4639*4882a593Smuzhiyun 
4640*4882a593Smuzhiyun 	if (is_icmp_err_skb(skb) && !icmp_next)
4641*4882a593Smuzhiyun 		sk->sk_err = 0;
4642*4882a593Smuzhiyun 
4643*4882a593Smuzhiyun 	if (skb_next)
4644*4882a593Smuzhiyun 		sk->sk_error_report(sk);
4645*4882a593Smuzhiyun 
4646*4882a593Smuzhiyun 	return skb;
4647*4882a593Smuzhiyun }
4648*4882a593Smuzhiyun EXPORT_SYMBOL(sock_dequeue_err_skb);
4649*4882a593Smuzhiyun 
4650*4882a593Smuzhiyun /**
4651*4882a593Smuzhiyun  * skb_clone_sk - create clone of skb, and take reference to socket
4652*4882a593Smuzhiyun  * @skb: the skb to clone
4653*4882a593Smuzhiyun  *
4654*4882a593Smuzhiyun  * This function creates a clone of a buffer that holds a reference on
4655*4882a593Smuzhiyun  * sk_refcnt.  Buffers created via this function are meant to be
4656*4882a593Smuzhiyun  * returned using sock_queue_err_skb, or free via kfree_skb.
4657*4882a593Smuzhiyun  *
4658*4882a593Smuzhiyun  * When passing buffers allocated with this function to sock_queue_err_skb
4659*4882a593Smuzhiyun  * it is necessary to wrap the call with sock_hold/sock_put in order to
4660*4882a593Smuzhiyun  * prevent the socket from being released prior to being enqueued on
4661*4882a593Smuzhiyun  * the sk_error_queue.
4662*4882a593Smuzhiyun  */
skb_clone_sk(struct sk_buff * skb)4663*4882a593Smuzhiyun struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4664*4882a593Smuzhiyun {
4665*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
4666*4882a593Smuzhiyun 	struct sk_buff *clone;
4667*4882a593Smuzhiyun 
4668*4882a593Smuzhiyun 	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4669*4882a593Smuzhiyun 		return NULL;
4670*4882a593Smuzhiyun 
4671*4882a593Smuzhiyun 	clone = skb_clone(skb, GFP_ATOMIC);
4672*4882a593Smuzhiyun 	if (!clone) {
4673*4882a593Smuzhiyun 		sock_put(sk);
4674*4882a593Smuzhiyun 		return NULL;
4675*4882a593Smuzhiyun 	}
4676*4882a593Smuzhiyun 
4677*4882a593Smuzhiyun 	clone->sk = sk;
4678*4882a593Smuzhiyun 	clone->destructor = sock_efree;
4679*4882a593Smuzhiyun 
4680*4882a593Smuzhiyun 	return clone;
4681*4882a593Smuzhiyun }
4682*4882a593Smuzhiyun EXPORT_SYMBOL(skb_clone_sk);
4683*4882a593Smuzhiyun 
__skb_complete_tx_timestamp(struct sk_buff * skb,struct sock * sk,int tstype,bool opt_stats)4684*4882a593Smuzhiyun static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4685*4882a593Smuzhiyun 					struct sock *sk,
4686*4882a593Smuzhiyun 					int tstype,
4687*4882a593Smuzhiyun 					bool opt_stats)
4688*4882a593Smuzhiyun {
4689*4882a593Smuzhiyun 	struct sock_exterr_skb *serr;
4690*4882a593Smuzhiyun 	int err;
4691*4882a593Smuzhiyun 
4692*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4693*4882a593Smuzhiyun 
4694*4882a593Smuzhiyun 	serr = SKB_EXT_ERR(skb);
4695*4882a593Smuzhiyun 	memset(serr, 0, sizeof(*serr));
4696*4882a593Smuzhiyun 	serr->ee.ee_errno = ENOMSG;
4697*4882a593Smuzhiyun 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4698*4882a593Smuzhiyun 	serr->ee.ee_info = tstype;
4699*4882a593Smuzhiyun 	serr->opt_stats = opt_stats;
4700*4882a593Smuzhiyun 	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4701*4882a593Smuzhiyun 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4702*4882a593Smuzhiyun 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
4703*4882a593Smuzhiyun 		if (sk->sk_protocol == IPPROTO_TCP &&
4704*4882a593Smuzhiyun 		    sk->sk_type == SOCK_STREAM)
4705*4882a593Smuzhiyun 			serr->ee.ee_data -= sk->sk_tskey;
4706*4882a593Smuzhiyun 	}
4707*4882a593Smuzhiyun 
4708*4882a593Smuzhiyun 	err = sock_queue_err_skb(sk, skb);
4709*4882a593Smuzhiyun 
4710*4882a593Smuzhiyun 	if (err)
4711*4882a593Smuzhiyun 		kfree_skb(skb);
4712*4882a593Smuzhiyun }
4713*4882a593Smuzhiyun 
skb_may_tx_timestamp(struct sock * sk,bool tsonly)4714*4882a593Smuzhiyun static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4715*4882a593Smuzhiyun {
4716*4882a593Smuzhiyun 	bool ret;
4717*4882a593Smuzhiyun 
4718*4882a593Smuzhiyun 	if (likely(READ_ONCE(sysctl_tstamp_allow_data) || tsonly))
4719*4882a593Smuzhiyun 		return true;
4720*4882a593Smuzhiyun 
4721*4882a593Smuzhiyun 	read_lock_bh(&sk->sk_callback_lock);
4722*4882a593Smuzhiyun 	ret = sk->sk_socket && sk->sk_socket->file &&
4723*4882a593Smuzhiyun 	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4724*4882a593Smuzhiyun 	read_unlock_bh(&sk->sk_callback_lock);
4725*4882a593Smuzhiyun 	return ret;
4726*4882a593Smuzhiyun }
4727*4882a593Smuzhiyun 
skb_complete_tx_timestamp(struct sk_buff * skb,struct skb_shared_hwtstamps * hwtstamps)4728*4882a593Smuzhiyun void skb_complete_tx_timestamp(struct sk_buff *skb,
4729*4882a593Smuzhiyun 			       struct skb_shared_hwtstamps *hwtstamps)
4730*4882a593Smuzhiyun {
4731*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
4732*4882a593Smuzhiyun 
4733*4882a593Smuzhiyun 	if (!skb_may_tx_timestamp(sk, false))
4734*4882a593Smuzhiyun 		goto err;
4735*4882a593Smuzhiyun 
4736*4882a593Smuzhiyun 	/* Take a reference to prevent skb_orphan() from freeing the socket,
4737*4882a593Smuzhiyun 	 * but only if the socket refcount is not zero.
4738*4882a593Smuzhiyun 	 */
4739*4882a593Smuzhiyun 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4740*4882a593Smuzhiyun 		*skb_hwtstamps(skb) = *hwtstamps;
4741*4882a593Smuzhiyun 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4742*4882a593Smuzhiyun 		sock_put(sk);
4743*4882a593Smuzhiyun 		return;
4744*4882a593Smuzhiyun 	}
4745*4882a593Smuzhiyun 
4746*4882a593Smuzhiyun err:
4747*4882a593Smuzhiyun 	kfree_skb(skb);
4748*4882a593Smuzhiyun }
4749*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4750*4882a593Smuzhiyun 
__skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps,struct sock * sk,int tstype)4751*4882a593Smuzhiyun void __skb_tstamp_tx(struct sk_buff *orig_skb,
4752*4882a593Smuzhiyun 		     struct skb_shared_hwtstamps *hwtstamps,
4753*4882a593Smuzhiyun 		     struct sock *sk, int tstype)
4754*4882a593Smuzhiyun {
4755*4882a593Smuzhiyun 	struct sk_buff *skb;
4756*4882a593Smuzhiyun 	bool tsonly, opt_stats = false;
4757*4882a593Smuzhiyun 
4758*4882a593Smuzhiyun 	if (!sk)
4759*4882a593Smuzhiyun 		return;
4760*4882a593Smuzhiyun 
4761*4882a593Smuzhiyun 	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4762*4882a593Smuzhiyun 	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4763*4882a593Smuzhiyun 		return;
4764*4882a593Smuzhiyun 
4765*4882a593Smuzhiyun 	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4766*4882a593Smuzhiyun 	if (!skb_may_tx_timestamp(sk, tsonly))
4767*4882a593Smuzhiyun 		return;
4768*4882a593Smuzhiyun 
4769*4882a593Smuzhiyun 	if (tsonly) {
4770*4882a593Smuzhiyun #ifdef CONFIG_INET
4771*4882a593Smuzhiyun 		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4772*4882a593Smuzhiyun 		    sk->sk_protocol == IPPROTO_TCP &&
4773*4882a593Smuzhiyun 		    sk->sk_type == SOCK_STREAM) {
4774*4882a593Smuzhiyun 			skb = tcp_get_timestamping_opt_stats(sk, orig_skb);
4775*4882a593Smuzhiyun 			opt_stats = true;
4776*4882a593Smuzhiyun 		} else
4777*4882a593Smuzhiyun #endif
4778*4882a593Smuzhiyun 			skb = alloc_skb(0, GFP_ATOMIC);
4779*4882a593Smuzhiyun 	} else {
4780*4882a593Smuzhiyun 		skb = skb_clone(orig_skb, GFP_ATOMIC);
4781*4882a593Smuzhiyun 	}
4782*4882a593Smuzhiyun 	if (!skb)
4783*4882a593Smuzhiyun 		return;
4784*4882a593Smuzhiyun 
4785*4882a593Smuzhiyun 	if (tsonly) {
4786*4882a593Smuzhiyun 		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4787*4882a593Smuzhiyun 					     SKBTX_ANY_TSTAMP;
4788*4882a593Smuzhiyun 		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4789*4882a593Smuzhiyun 	}
4790*4882a593Smuzhiyun 
4791*4882a593Smuzhiyun 	if (hwtstamps)
4792*4882a593Smuzhiyun 		*skb_hwtstamps(skb) = *hwtstamps;
4793*4882a593Smuzhiyun 	else
4794*4882a593Smuzhiyun 		skb->tstamp = ktime_get_real();
4795*4882a593Smuzhiyun 
4796*4882a593Smuzhiyun 	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4797*4882a593Smuzhiyun }
4798*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4799*4882a593Smuzhiyun 
skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps)4800*4882a593Smuzhiyun void skb_tstamp_tx(struct sk_buff *orig_skb,
4801*4882a593Smuzhiyun 		   struct skb_shared_hwtstamps *hwtstamps)
4802*4882a593Smuzhiyun {
4803*4882a593Smuzhiyun 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4804*4882a593Smuzhiyun 			       SCM_TSTAMP_SND);
4805*4882a593Smuzhiyun }
4806*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4807*4882a593Smuzhiyun 
skb_complete_wifi_ack(struct sk_buff * skb,bool acked)4808*4882a593Smuzhiyun void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4809*4882a593Smuzhiyun {
4810*4882a593Smuzhiyun 	struct sock *sk = skb->sk;
4811*4882a593Smuzhiyun 	struct sock_exterr_skb *serr;
4812*4882a593Smuzhiyun 	int err = 1;
4813*4882a593Smuzhiyun 
4814*4882a593Smuzhiyun 	skb->wifi_acked_valid = 1;
4815*4882a593Smuzhiyun 	skb->wifi_acked = acked;
4816*4882a593Smuzhiyun 
4817*4882a593Smuzhiyun 	serr = SKB_EXT_ERR(skb);
4818*4882a593Smuzhiyun 	memset(serr, 0, sizeof(*serr));
4819*4882a593Smuzhiyun 	serr->ee.ee_errno = ENOMSG;
4820*4882a593Smuzhiyun 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4821*4882a593Smuzhiyun 
4822*4882a593Smuzhiyun 	/* Take a reference to prevent skb_orphan() from freeing the socket,
4823*4882a593Smuzhiyun 	 * but only if the socket refcount is not zero.
4824*4882a593Smuzhiyun 	 */
4825*4882a593Smuzhiyun 	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4826*4882a593Smuzhiyun 		err = sock_queue_err_skb(sk, skb);
4827*4882a593Smuzhiyun 		sock_put(sk);
4828*4882a593Smuzhiyun 	}
4829*4882a593Smuzhiyun 	if (err)
4830*4882a593Smuzhiyun 		kfree_skb(skb);
4831*4882a593Smuzhiyun }
4832*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4833*4882a593Smuzhiyun 
4834*4882a593Smuzhiyun /**
4835*4882a593Smuzhiyun  * skb_partial_csum_set - set up and verify partial csum values for packet
4836*4882a593Smuzhiyun  * @skb: the skb to set
4837*4882a593Smuzhiyun  * @start: the number of bytes after skb->data to start checksumming.
4838*4882a593Smuzhiyun  * @off: the offset from start to place the checksum.
4839*4882a593Smuzhiyun  *
4840*4882a593Smuzhiyun  * For untrusted partially-checksummed packets, we need to make sure the values
4841*4882a593Smuzhiyun  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4842*4882a593Smuzhiyun  *
4843*4882a593Smuzhiyun  * This function checks and sets those values and skb->ip_summed: if this
4844*4882a593Smuzhiyun  * returns false you should drop the packet.
4845*4882a593Smuzhiyun  */
skb_partial_csum_set(struct sk_buff * skb,u16 start,u16 off)4846*4882a593Smuzhiyun bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4847*4882a593Smuzhiyun {
4848*4882a593Smuzhiyun 	u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4849*4882a593Smuzhiyun 	u32 csum_start = skb_headroom(skb) + (u32)start;
4850*4882a593Smuzhiyun 
4851*4882a593Smuzhiyun 	if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4852*4882a593Smuzhiyun 		net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4853*4882a593Smuzhiyun 				     start, off, skb_headroom(skb), skb_headlen(skb));
4854*4882a593Smuzhiyun 		return false;
4855*4882a593Smuzhiyun 	}
4856*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_PARTIAL;
4857*4882a593Smuzhiyun 	skb->csum_start = csum_start;
4858*4882a593Smuzhiyun 	skb->csum_offset = off;
4859*4882a593Smuzhiyun 	skb_set_transport_header(skb, start);
4860*4882a593Smuzhiyun 	return true;
4861*4882a593Smuzhiyun }
4862*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4863*4882a593Smuzhiyun 
skb_maybe_pull_tail(struct sk_buff * skb,unsigned int len,unsigned int max)4864*4882a593Smuzhiyun static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4865*4882a593Smuzhiyun 			       unsigned int max)
4866*4882a593Smuzhiyun {
4867*4882a593Smuzhiyun 	if (skb_headlen(skb) >= len)
4868*4882a593Smuzhiyun 		return 0;
4869*4882a593Smuzhiyun 
4870*4882a593Smuzhiyun 	/* If we need to pullup then pullup to the max, so we
4871*4882a593Smuzhiyun 	 * won't need to do it again.
4872*4882a593Smuzhiyun 	 */
4873*4882a593Smuzhiyun 	if (max > skb->len)
4874*4882a593Smuzhiyun 		max = skb->len;
4875*4882a593Smuzhiyun 
4876*4882a593Smuzhiyun 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4877*4882a593Smuzhiyun 		return -ENOMEM;
4878*4882a593Smuzhiyun 
4879*4882a593Smuzhiyun 	if (skb_headlen(skb) < len)
4880*4882a593Smuzhiyun 		return -EPROTO;
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun 	return 0;
4883*4882a593Smuzhiyun }
4884*4882a593Smuzhiyun 
4885*4882a593Smuzhiyun #define MAX_TCP_HDR_LEN (15 * 4)
4886*4882a593Smuzhiyun 
skb_checksum_setup_ip(struct sk_buff * skb,typeof(IPPROTO_IP) proto,unsigned int off)4887*4882a593Smuzhiyun static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4888*4882a593Smuzhiyun 				      typeof(IPPROTO_IP) proto,
4889*4882a593Smuzhiyun 				      unsigned int off)
4890*4882a593Smuzhiyun {
4891*4882a593Smuzhiyun 	int err;
4892*4882a593Smuzhiyun 
4893*4882a593Smuzhiyun 	switch (proto) {
4894*4882a593Smuzhiyun 	case IPPROTO_TCP:
4895*4882a593Smuzhiyun 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4896*4882a593Smuzhiyun 					  off + MAX_TCP_HDR_LEN);
4897*4882a593Smuzhiyun 		if (!err && !skb_partial_csum_set(skb, off,
4898*4882a593Smuzhiyun 						  offsetof(struct tcphdr,
4899*4882a593Smuzhiyun 							   check)))
4900*4882a593Smuzhiyun 			err = -EPROTO;
4901*4882a593Smuzhiyun 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4902*4882a593Smuzhiyun 
4903*4882a593Smuzhiyun 	case IPPROTO_UDP:
4904*4882a593Smuzhiyun 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4905*4882a593Smuzhiyun 					  off + sizeof(struct udphdr));
4906*4882a593Smuzhiyun 		if (!err && !skb_partial_csum_set(skb, off,
4907*4882a593Smuzhiyun 						  offsetof(struct udphdr,
4908*4882a593Smuzhiyun 							   check)))
4909*4882a593Smuzhiyun 			err = -EPROTO;
4910*4882a593Smuzhiyun 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4911*4882a593Smuzhiyun 	}
4912*4882a593Smuzhiyun 
4913*4882a593Smuzhiyun 	return ERR_PTR(-EPROTO);
4914*4882a593Smuzhiyun }
4915*4882a593Smuzhiyun 
4916*4882a593Smuzhiyun /* This value should be large enough to cover a tagged ethernet header plus
4917*4882a593Smuzhiyun  * maximally sized IP and TCP or UDP headers.
4918*4882a593Smuzhiyun  */
4919*4882a593Smuzhiyun #define MAX_IP_HDR_LEN 128
4920*4882a593Smuzhiyun 
skb_checksum_setup_ipv4(struct sk_buff * skb,bool recalculate)4921*4882a593Smuzhiyun static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4922*4882a593Smuzhiyun {
4923*4882a593Smuzhiyun 	unsigned int off;
4924*4882a593Smuzhiyun 	bool fragment;
4925*4882a593Smuzhiyun 	__sum16 *csum;
4926*4882a593Smuzhiyun 	int err;
4927*4882a593Smuzhiyun 
4928*4882a593Smuzhiyun 	fragment = false;
4929*4882a593Smuzhiyun 
4930*4882a593Smuzhiyun 	err = skb_maybe_pull_tail(skb,
4931*4882a593Smuzhiyun 				  sizeof(struct iphdr),
4932*4882a593Smuzhiyun 				  MAX_IP_HDR_LEN);
4933*4882a593Smuzhiyun 	if (err < 0)
4934*4882a593Smuzhiyun 		goto out;
4935*4882a593Smuzhiyun 
4936*4882a593Smuzhiyun 	if (ip_is_fragment(ip_hdr(skb)))
4937*4882a593Smuzhiyun 		fragment = true;
4938*4882a593Smuzhiyun 
4939*4882a593Smuzhiyun 	off = ip_hdrlen(skb);
4940*4882a593Smuzhiyun 
4941*4882a593Smuzhiyun 	err = -EPROTO;
4942*4882a593Smuzhiyun 
4943*4882a593Smuzhiyun 	if (fragment)
4944*4882a593Smuzhiyun 		goto out;
4945*4882a593Smuzhiyun 
4946*4882a593Smuzhiyun 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4947*4882a593Smuzhiyun 	if (IS_ERR(csum))
4948*4882a593Smuzhiyun 		return PTR_ERR(csum);
4949*4882a593Smuzhiyun 
4950*4882a593Smuzhiyun 	if (recalculate)
4951*4882a593Smuzhiyun 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4952*4882a593Smuzhiyun 					   ip_hdr(skb)->daddr,
4953*4882a593Smuzhiyun 					   skb->len - off,
4954*4882a593Smuzhiyun 					   ip_hdr(skb)->protocol, 0);
4955*4882a593Smuzhiyun 	err = 0;
4956*4882a593Smuzhiyun 
4957*4882a593Smuzhiyun out:
4958*4882a593Smuzhiyun 	return err;
4959*4882a593Smuzhiyun }
4960*4882a593Smuzhiyun 
4961*4882a593Smuzhiyun /* This value should be large enough to cover a tagged ethernet header plus
4962*4882a593Smuzhiyun  * an IPv6 header, all options, and a maximal TCP or UDP header.
4963*4882a593Smuzhiyun  */
4964*4882a593Smuzhiyun #define MAX_IPV6_HDR_LEN 256
4965*4882a593Smuzhiyun 
4966*4882a593Smuzhiyun #define OPT_HDR(type, skb, off) \
4967*4882a593Smuzhiyun 	(type *)(skb_network_header(skb) + (off))
4968*4882a593Smuzhiyun 
skb_checksum_setup_ipv6(struct sk_buff * skb,bool recalculate)4969*4882a593Smuzhiyun static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4970*4882a593Smuzhiyun {
4971*4882a593Smuzhiyun 	int err;
4972*4882a593Smuzhiyun 	u8 nexthdr;
4973*4882a593Smuzhiyun 	unsigned int off;
4974*4882a593Smuzhiyun 	unsigned int len;
4975*4882a593Smuzhiyun 	bool fragment;
4976*4882a593Smuzhiyun 	bool done;
4977*4882a593Smuzhiyun 	__sum16 *csum;
4978*4882a593Smuzhiyun 
4979*4882a593Smuzhiyun 	fragment = false;
4980*4882a593Smuzhiyun 	done = false;
4981*4882a593Smuzhiyun 
4982*4882a593Smuzhiyun 	off = sizeof(struct ipv6hdr);
4983*4882a593Smuzhiyun 
4984*4882a593Smuzhiyun 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4985*4882a593Smuzhiyun 	if (err < 0)
4986*4882a593Smuzhiyun 		goto out;
4987*4882a593Smuzhiyun 
4988*4882a593Smuzhiyun 	nexthdr = ipv6_hdr(skb)->nexthdr;
4989*4882a593Smuzhiyun 
4990*4882a593Smuzhiyun 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4991*4882a593Smuzhiyun 	while (off <= len && !done) {
4992*4882a593Smuzhiyun 		switch (nexthdr) {
4993*4882a593Smuzhiyun 		case IPPROTO_DSTOPTS:
4994*4882a593Smuzhiyun 		case IPPROTO_HOPOPTS:
4995*4882a593Smuzhiyun 		case IPPROTO_ROUTING: {
4996*4882a593Smuzhiyun 			struct ipv6_opt_hdr *hp;
4997*4882a593Smuzhiyun 
4998*4882a593Smuzhiyun 			err = skb_maybe_pull_tail(skb,
4999*4882a593Smuzhiyun 						  off +
5000*4882a593Smuzhiyun 						  sizeof(struct ipv6_opt_hdr),
5001*4882a593Smuzhiyun 						  MAX_IPV6_HDR_LEN);
5002*4882a593Smuzhiyun 			if (err < 0)
5003*4882a593Smuzhiyun 				goto out;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5006*4882a593Smuzhiyun 			nexthdr = hp->nexthdr;
5007*4882a593Smuzhiyun 			off += ipv6_optlen(hp);
5008*4882a593Smuzhiyun 			break;
5009*4882a593Smuzhiyun 		}
5010*4882a593Smuzhiyun 		case IPPROTO_AH: {
5011*4882a593Smuzhiyun 			struct ip_auth_hdr *hp;
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 			err = skb_maybe_pull_tail(skb,
5014*4882a593Smuzhiyun 						  off +
5015*4882a593Smuzhiyun 						  sizeof(struct ip_auth_hdr),
5016*4882a593Smuzhiyun 						  MAX_IPV6_HDR_LEN);
5017*4882a593Smuzhiyun 			if (err < 0)
5018*4882a593Smuzhiyun 				goto out;
5019*4882a593Smuzhiyun 
5020*4882a593Smuzhiyun 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5021*4882a593Smuzhiyun 			nexthdr = hp->nexthdr;
5022*4882a593Smuzhiyun 			off += ipv6_authlen(hp);
5023*4882a593Smuzhiyun 			break;
5024*4882a593Smuzhiyun 		}
5025*4882a593Smuzhiyun 		case IPPROTO_FRAGMENT: {
5026*4882a593Smuzhiyun 			struct frag_hdr *hp;
5027*4882a593Smuzhiyun 
5028*4882a593Smuzhiyun 			err = skb_maybe_pull_tail(skb,
5029*4882a593Smuzhiyun 						  off +
5030*4882a593Smuzhiyun 						  sizeof(struct frag_hdr),
5031*4882a593Smuzhiyun 						  MAX_IPV6_HDR_LEN);
5032*4882a593Smuzhiyun 			if (err < 0)
5033*4882a593Smuzhiyun 				goto out;
5034*4882a593Smuzhiyun 
5035*4882a593Smuzhiyun 			hp = OPT_HDR(struct frag_hdr, skb, off);
5036*4882a593Smuzhiyun 
5037*4882a593Smuzhiyun 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5038*4882a593Smuzhiyun 				fragment = true;
5039*4882a593Smuzhiyun 
5040*4882a593Smuzhiyun 			nexthdr = hp->nexthdr;
5041*4882a593Smuzhiyun 			off += sizeof(struct frag_hdr);
5042*4882a593Smuzhiyun 			break;
5043*4882a593Smuzhiyun 		}
5044*4882a593Smuzhiyun 		default:
5045*4882a593Smuzhiyun 			done = true;
5046*4882a593Smuzhiyun 			break;
5047*4882a593Smuzhiyun 		}
5048*4882a593Smuzhiyun 	}
5049*4882a593Smuzhiyun 
5050*4882a593Smuzhiyun 	err = -EPROTO;
5051*4882a593Smuzhiyun 
5052*4882a593Smuzhiyun 	if (!done || fragment)
5053*4882a593Smuzhiyun 		goto out;
5054*4882a593Smuzhiyun 
5055*4882a593Smuzhiyun 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
5056*4882a593Smuzhiyun 	if (IS_ERR(csum))
5057*4882a593Smuzhiyun 		return PTR_ERR(csum);
5058*4882a593Smuzhiyun 
5059*4882a593Smuzhiyun 	if (recalculate)
5060*4882a593Smuzhiyun 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5061*4882a593Smuzhiyun 					 &ipv6_hdr(skb)->daddr,
5062*4882a593Smuzhiyun 					 skb->len - off, nexthdr, 0);
5063*4882a593Smuzhiyun 	err = 0;
5064*4882a593Smuzhiyun 
5065*4882a593Smuzhiyun out:
5066*4882a593Smuzhiyun 	return err;
5067*4882a593Smuzhiyun }
5068*4882a593Smuzhiyun 
5069*4882a593Smuzhiyun /**
5070*4882a593Smuzhiyun  * skb_checksum_setup - set up partial checksum offset
5071*4882a593Smuzhiyun  * @skb: the skb to set up
5072*4882a593Smuzhiyun  * @recalculate: if true the pseudo-header checksum will be recalculated
5073*4882a593Smuzhiyun  */
skb_checksum_setup(struct sk_buff * skb,bool recalculate)5074*4882a593Smuzhiyun int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5075*4882a593Smuzhiyun {
5076*4882a593Smuzhiyun 	int err;
5077*4882a593Smuzhiyun 
5078*4882a593Smuzhiyun 	switch (skb->protocol) {
5079*4882a593Smuzhiyun 	case htons(ETH_P_IP):
5080*4882a593Smuzhiyun 		err = skb_checksum_setup_ipv4(skb, recalculate);
5081*4882a593Smuzhiyun 		break;
5082*4882a593Smuzhiyun 
5083*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
5084*4882a593Smuzhiyun 		err = skb_checksum_setup_ipv6(skb, recalculate);
5085*4882a593Smuzhiyun 		break;
5086*4882a593Smuzhiyun 
5087*4882a593Smuzhiyun 	default:
5088*4882a593Smuzhiyun 		err = -EPROTO;
5089*4882a593Smuzhiyun 		break;
5090*4882a593Smuzhiyun 	}
5091*4882a593Smuzhiyun 
5092*4882a593Smuzhiyun 	return err;
5093*4882a593Smuzhiyun }
5094*4882a593Smuzhiyun EXPORT_SYMBOL(skb_checksum_setup);
5095*4882a593Smuzhiyun 
5096*4882a593Smuzhiyun /**
5097*4882a593Smuzhiyun  * skb_checksum_maybe_trim - maybe trims the given skb
5098*4882a593Smuzhiyun  * @skb: the skb to check
5099*4882a593Smuzhiyun  * @transport_len: the data length beyond the network header
5100*4882a593Smuzhiyun  *
5101*4882a593Smuzhiyun  * Checks whether the given skb has data beyond the given transport length.
5102*4882a593Smuzhiyun  * If so, returns a cloned skb trimmed to this transport length.
5103*4882a593Smuzhiyun  * Otherwise returns the provided skb. Returns NULL in error cases
5104*4882a593Smuzhiyun  * (e.g. transport_len exceeds skb length or out-of-memory).
5105*4882a593Smuzhiyun  *
5106*4882a593Smuzhiyun  * Caller needs to set the skb transport header and free any returned skb if it
5107*4882a593Smuzhiyun  * differs from the provided skb.
5108*4882a593Smuzhiyun  */
skb_checksum_maybe_trim(struct sk_buff * skb,unsigned int transport_len)5109*4882a593Smuzhiyun static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5110*4882a593Smuzhiyun 					       unsigned int transport_len)
5111*4882a593Smuzhiyun {
5112*4882a593Smuzhiyun 	struct sk_buff *skb_chk;
5113*4882a593Smuzhiyun 	unsigned int len = skb_transport_offset(skb) + transport_len;
5114*4882a593Smuzhiyun 	int ret;
5115*4882a593Smuzhiyun 
5116*4882a593Smuzhiyun 	if (skb->len < len)
5117*4882a593Smuzhiyun 		return NULL;
5118*4882a593Smuzhiyun 	else if (skb->len == len)
5119*4882a593Smuzhiyun 		return skb;
5120*4882a593Smuzhiyun 
5121*4882a593Smuzhiyun 	skb_chk = skb_clone(skb, GFP_ATOMIC);
5122*4882a593Smuzhiyun 	if (!skb_chk)
5123*4882a593Smuzhiyun 		return NULL;
5124*4882a593Smuzhiyun 
5125*4882a593Smuzhiyun 	ret = pskb_trim_rcsum(skb_chk, len);
5126*4882a593Smuzhiyun 	if (ret) {
5127*4882a593Smuzhiyun 		kfree_skb(skb_chk);
5128*4882a593Smuzhiyun 		return NULL;
5129*4882a593Smuzhiyun 	}
5130*4882a593Smuzhiyun 
5131*4882a593Smuzhiyun 	return skb_chk;
5132*4882a593Smuzhiyun }
5133*4882a593Smuzhiyun 
5134*4882a593Smuzhiyun /**
5135*4882a593Smuzhiyun  * skb_checksum_trimmed - validate checksum of an skb
5136*4882a593Smuzhiyun  * @skb: the skb to check
5137*4882a593Smuzhiyun  * @transport_len: the data length beyond the network header
5138*4882a593Smuzhiyun  * @skb_chkf: checksum function to use
5139*4882a593Smuzhiyun  *
5140*4882a593Smuzhiyun  * Applies the given checksum function skb_chkf to the provided skb.
5141*4882a593Smuzhiyun  * Returns a checked and maybe trimmed skb. Returns NULL on error.
5142*4882a593Smuzhiyun  *
5143*4882a593Smuzhiyun  * If the skb has data beyond the given transport length, then a
5144*4882a593Smuzhiyun  * trimmed & cloned skb is checked and returned.
5145*4882a593Smuzhiyun  *
5146*4882a593Smuzhiyun  * Caller needs to set the skb transport header and free any returned skb if it
5147*4882a593Smuzhiyun  * differs from the provided skb.
5148*4882a593Smuzhiyun  */
skb_checksum_trimmed(struct sk_buff * skb,unsigned int transport_len,__sum16 (* skb_chkf)(struct sk_buff * skb))5149*4882a593Smuzhiyun struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5150*4882a593Smuzhiyun 				     unsigned int transport_len,
5151*4882a593Smuzhiyun 				     __sum16(*skb_chkf)(struct sk_buff *skb))
5152*4882a593Smuzhiyun {
5153*4882a593Smuzhiyun 	struct sk_buff *skb_chk;
5154*4882a593Smuzhiyun 	unsigned int offset = skb_transport_offset(skb);
5155*4882a593Smuzhiyun 	__sum16 ret;
5156*4882a593Smuzhiyun 
5157*4882a593Smuzhiyun 	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5158*4882a593Smuzhiyun 	if (!skb_chk)
5159*4882a593Smuzhiyun 		goto err;
5160*4882a593Smuzhiyun 
5161*4882a593Smuzhiyun 	if (!pskb_may_pull(skb_chk, offset))
5162*4882a593Smuzhiyun 		goto err;
5163*4882a593Smuzhiyun 
5164*4882a593Smuzhiyun 	skb_pull_rcsum(skb_chk, offset);
5165*4882a593Smuzhiyun 	ret = skb_chkf(skb_chk);
5166*4882a593Smuzhiyun 	skb_push_rcsum(skb_chk, offset);
5167*4882a593Smuzhiyun 
5168*4882a593Smuzhiyun 	if (ret)
5169*4882a593Smuzhiyun 		goto err;
5170*4882a593Smuzhiyun 
5171*4882a593Smuzhiyun 	return skb_chk;
5172*4882a593Smuzhiyun 
5173*4882a593Smuzhiyun err:
5174*4882a593Smuzhiyun 	if (skb_chk && skb_chk != skb)
5175*4882a593Smuzhiyun 		kfree_skb(skb_chk);
5176*4882a593Smuzhiyun 
5177*4882a593Smuzhiyun 	return NULL;
5178*4882a593Smuzhiyun 
5179*4882a593Smuzhiyun }
5180*4882a593Smuzhiyun EXPORT_SYMBOL(skb_checksum_trimmed);
5181*4882a593Smuzhiyun 
__skb_warn_lro_forwarding(const struct sk_buff * skb)5182*4882a593Smuzhiyun void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5183*4882a593Smuzhiyun {
5184*4882a593Smuzhiyun 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5185*4882a593Smuzhiyun 			     skb->dev->name);
5186*4882a593Smuzhiyun }
5187*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5188*4882a593Smuzhiyun 
kfree_skb_partial(struct sk_buff * skb,bool head_stolen)5189*4882a593Smuzhiyun void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5190*4882a593Smuzhiyun {
5191*4882a593Smuzhiyun 	if (head_stolen) {
5192*4882a593Smuzhiyun 		skb_release_head_state(skb);
5193*4882a593Smuzhiyun 		kmem_cache_free(skbuff_head_cache, skb);
5194*4882a593Smuzhiyun 	} else {
5195*4882a593Smuzhiyun 		__kfree_skb(skb);
5196*4882a593Smuzhiyun 	}
5197*4882a593Smuzhiyun }
5198*4882a593Smuzhiyun EXPORT_SYMBOL(kfree_skb_partial);
5199*4882a593Smuzhiyun 
5200*4882a593Smuzhiyun /**
5201*4882a593Smuzhiyun  * skb_try_coalesce - try to merge skb to prior one
5202*4882a593Smuzhiyun  * @to: prior buffer
5203*4882a593Smuzhiyun  * @from: buffer to add
5204*4882a593Smuzhiyun  * @fragstolen: pointer to boolean
5205*4882a593Smuzhiyun  * @delta_truesize: how much more was allocated than was requested
5206*4882a593Smuzhiyun  */
skb_try_coalesce(struct sk_buff * to,struct sk_buff * from,bool * fragstolen,int * delta_truesize)5207*4882a593Smuzhiyun bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5208*4882a593Smuzhiyun 		      bool *fragstolen, int *delta_truesize)
5209*4882a593Smuzhiyun {
5210*4882a593Smuzhiyun 	struct skb_shared_info *to_shinfo, *from_shinfo;
5211*4882a593Smuzhiyun 	int i, delta, len = from->len;
5212*4882a593Smuzhiyun 
5213*4882a593Smuzhiyun 	*fragstolen = false;
5214*4882a593Smuzhiyun 
5215*4882a593Smuzhiyun 	if (skb_cloned(to))
5216*4882a593Smuzhiyun 		return false;
5217*4882a593Smuzhiyun 
5218*4882a593Smuzhiyun 	if (len <= skb_tailroom(to)) {
5219*4882a593Smuzhiyun 		if (len)
5220*4882a593Smuzhiyun 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5221*4882a593Smuzhiyun 		*delta_truesize = 0;
5222*4882a593Smuzhiyun 		return true;
5223*4882a593Smuzhiyun 	}
5224*4882a593Smuzhiyun 
5225*4882a593Smuzhiyun 	to_shinfo = skb_shinfo(to);
5226*4882a593Smuzhiyun 	from_shinfo = skb_shinfo(from);
5227*4882a593Smuzhiyun 	if (to_shinfo->frag_list || from_shinfo->frag_list)
5228*4882a593Smuzhiyun 		return false;
5229*4882a593Smuzhiyun 	if (skb_zcopy(to) || skb_zcopy(from))
5230*4882a593Smuzhiyun 		return false;
5231*4882a593Smuzhiyun 
5232*4882a593Smuzhiyun 	if (skb_headlen(from) != 0) {
5233*4882a593Smuzhiyun 		struct page *page;
5234*4882a593Smuzhiyun 		unsigned int offset;
5235*4882a593Smuzhiyun 
5236*4882a593Smuzhiyun 		if (to_shinfo->nr_frags +
5237*4882a593Smuzhiyun 		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5238*4882a593Smuzhiyun 			return false;
5239*4882a593Smuzhiyun 
5240*4882a593Smuzhiyun 		if (skb_head_is_locked(from))
5241*4882a593Smuzhiyun 			return false;
5242*4882a593Smuzhiyun 
5243*4882a593Smuzhiyun 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5244*4882a593Smuzhiyun 
5245*4882a593Smuzhiyun 		page = virt_to_head_page(from->head);
5246*4882a593Smuzhiyun 		offset = from->data - (unsigned char *)page_address(page);
5247*4882a593Smuzhiyun 
5248*4882a593Smuzhiyun 		skb_fill_page_desc(to, to_shinfo->nr_frags,
5249*4882a593Smuzhiyun 				   page, offset, skb_headlen(from));
5250*4882a593Smuzhiyun 		*fragstolen = true;
5251*4882a593Smuzhiyun 	} else {
5252*4882a593Smuzhiyun 		if (to_shinfo->nr_frags +
5253*4882a593Smuzhiyun 		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
5254*4882a593Smuzhiyun 			return false;
5255*4882a593Smuzhiyun 
5256*4882a593Smuzhiyun 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5257*4882a593Smuzhiyun 	}
5258*4882a593Smuzhiyun 
5259*4882a593Smuzhiyun 	WARN_ON_ONCE(delta < len);
5260*4882a593Smuzhiyun 
5261*4882a593Smuzhiyun 	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5262*4882a593Smuzhiyun 	       from_shinfo->frags,
5263*4882a593Smuzhiyun 	       from_shinfo->nr_frags * sizeof(skb_frag_t));
5264*4882a593Smuzhiyun 	to_shinfo->nr_frags += from_shinfo->nr_frags;
5265*4882a593Smuzhiyun 
5266*4882a593Smuzhiyun 	if (!skb_cloned(from))
5267*4882a593Smuzhiyun 		from_shinfo->nr_frags = 0;
5268*4882a593Smuzhiyun 
5269*4882a593Smuzhiyun 	/* if the skb is not cloned this does nothing
5270*4882a593Smuzhiyun 	 * since we set nr_frags to 0.
5271*4882a593Smuzhiyun 	 */
5272*4882a593Smuzhiyun 	for (i = 0; i < from_shinfo->nr_frags; i++)
5273*4882a593Smuzhiyun 		__skb_frag_ref(&from_shinfo->frags[i]);
5274*4882a593Smuzhiyun 
5275*4882a593Smuzhiyun 	to->truesize += delta;
5276*4882a593Smuzhiyun 	to->len += len;
5277*4882a593Smuzhiyun 	to->data_len += len;
5278*4882a593Smuzhiyun 
5279*4882a593Smuzhiyun 	*delta_truesize = delta;
5280*4882a593Smuzhiyun 	return true;
5281*4882a593Smuzhiyun }
5282*4882a593Smuzhiyun EXPORT_SYMBOL(skb_try_coalesce);
5283*4882a593Smuzhiyun 
5284*4882a593Smuzhiyun /**
5285*4882a593Smuzhiyun  * skb_scrub_packet - scrub an skb
5286*4882a593Smuzhiyun  *
5287*4882a593Smuzhiyun  * @skb: buffer to clean
5288*4882a593Smuzhiyun  * @xnet: packet is crossing netns
5289*4882a593Smuzhiyun  *
5290*4882a593Smuzhiyun  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5291*4882a593Smuzhiyun  * into/from a tunnel. Some information have to be cleared during these
5292*4882a593Smuzhiyun  * operations.
5293*4882a593Smuzhiyun  * skb_scrub_packet can also be used to clean a skb before injecting it in
5294*4882a593Smuzhiyun  * another namespace (@xnet == true). We have to clear all information in the
5295*4882a593Smuzhiyun  * skb that could impact namespace isolation.
5296*4882a593Smuzhiyun  */
skb_scrub_packet(struct sk_buff * skb,bool xnet)5297*4882a593Smuzhiyun void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5298*4882a593Smuzhiyun {
5299*4882a593Smuzhiyun 	skb->pkt_type = PACKET_HOST;
5300*4882a593Smuzhiyun 	skb->skb_iif = 0;
5301*4882a593Smuzhiyun 	skb->ignore_df = 0;
5302*4882a593Smuzhiyun 	skb_dst_drop(skb);
5303*4882a593Smuzhiyun 	skb_ext_reset(skb);
5304*4882a593Smuzhiyun 	nf_reset_ct(skb);
5305*4882a593Smuzhiyun 	nf_reset_trace(skb);
5306*4882a593Smuzhiyun 
5307*4882a593Smuzhiyun #ifdef CONFIG_NET_SWITCHDEV
5308*4882a593Smuzhiyun 	skb->offload_fwd_mark = 0;
5309*4882a593Smuzhiyun 	skb->offload_l3_fwd_mark = 0;
5310*4882a593Smuzhiyun #endif
5311*4882a593Smuzhiyun 
5312*4882a593Smuzhiyun 	if (!xnet)
5313*4882a593Smuzhiyun 		return;
5314*4882a593Smuzhiyun 
5315*4882a593Smuzhiyun 	ipvs_reset(skb);
5316*4882a593Smuzhiyun 	skb->mark = 0;
5317*4882a593Smuzhiyun 	skb->tstamp = 0;
5318*4882a593Smuzhiyun }
5319*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_scrub_packet);
5320*4882a593Smuzhiyun 
5321*4882a593Smuzhiyun /**
5322*4882a593Smuzhiyun  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5323*4882a593Smuzhiyun  *
5324*4882a593Smuzhiyun  * @skb: GSO skb
5325*4882a593Smuzhiyun  *
5326*4882a593Smuzhiyun  * skb_gso_transport_seglen is used to determine the real size of the
5327*4882a593Smuzhiyun  * individual segments, including Layer4 headers (TCP/UDP).
5328*4882a593Smuzhiyun  *
5329*4882a593Smuzhiyun  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5330*4882a593Smuzhiyun  */
skb_gso_transport_seglen(const struct sk_buff * skb)5331*4882a593Smuzhiyun static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5332*4882a593Smuzhiyun {
5333*4882a593Smuzhiyun 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
5334*4882a593Smuzhiyun 	unsigned int thlen = 0;
5335*4882a593Smuzhiyun 
5336*4882a593Smuzhiyun 	if (skb->encapsulation) {
5337*4882a593Smuzhiyun 		thlen = skb_inner_transport_header(skb) -
5338*4882a593Smuzhiyun 			skb_transport_header(skb);
5339*4882a593Smuzhiyun 
5340*4882a593Smuzhiyun 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5341*4882a593Smuzhiyun 			thlen += inner_tcp_hdrlen(skb);
5342*4882a593Smuzhiyun 	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5343*4882a593Smuzhiyun 		thlen = tcp_hdrlen(skb);
5344*4882a593Smuzhiyun 	} else if (unlikely(skb_is_gso_sctp(skb))) {
5345*4882a593Smuzhiyun 		thlen = sizeof(struct sctphdr);
5346*4882a593Smuzhiyun 	} else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5347*4882a593Smuzhiyun 		thlen = sizeof(struct udphdr);
5348*4882a593Smuzhiyun 	}
5349*4882a593Smuzhiyun 	/* UFO sets gso_size to the size of the fragmentation
5350*4882a593Smuzhiyun 	 * payload, i.e. the size of the L4 (UDP) header is already
5351*4882a593Smuzhiyun 	 * accounted for.
5352*4882a593Smuzhiyun 	 */
5353*4882a593Smuzhiyun 	return thlen + shinfo->gso_size;
5354*4882a593Smuzhiyun }
5355*4882a593Smuzhiyun 
5356*4882a593Smuzhiyun /**
5357*4882a593Smuzhiyun  * skb_gso_network_seglen - Return length of individual segments of a gso packet
5358*4882a593Smuzhiyun  *
5359*4882a593Smuzhiyun  * @skb: GSO skb
5360*4882a593Smuzhiyun  *
5361*4882a593Smuzhiyun  * skb_gso_network_seglen is used to determine the real size of the
5362*4882a593Smuzhiyun  * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5363*4882a593Smuzhiyun  *
5364*4882a593Smuzhiyun  * The MAC/L2 header is not accounted for.
5365*4882a593Smuzhiyun  */
skb_gso_network_seglen(const struct sk_buff * skb)5366*4882a593Smuzhiyun static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5367*4882a593Smuzhiyun {
5368*4882a593Smuzhiyun 	unsigned int hdr_len = skb_transport_header(skb) -
5369*4882a593Smuzhiyun 			       skb_network_header(skb);
5370*4882a593Smuzhiyun 
5371*4882a593Smuzhiyun 	return hdr_len + skb_gso_transport_seglen(skb);
5372*4882a593Smuzhiyun }
5373*4882a593Smuzhiyun 
5374*4882a593Smuzhiyun /**
5375*4882a593Smuzhiyun  * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5376*4882a593Smuzhiyun  *
5377*4882a593Smuzhiyun  * @skb: GSO skb
5378*4882a593Smuzhiyun  *
5379*4882a593Smuzhiyun  * skb_gso_mac_seglen is used to determine the real size of the
5380*4882a593Smuzhiyun  * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5381*4882a593Smuzhiyun  * headers (TCP/UDP).
5382*4882a593Smuzhiyun  */
skb_gso_mac_seglen(const struct sk_buff * skb)5383*4882a593Smuzhiyun static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5384*4882a593Smuzhiyun {
5385*4882a593Smuzhiyun 	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5386*4882a593Smuzhiyun 
5387*4882a593Smuzhiyun 	return hdr_len + skb_gso_transport_seglen(skb);
5388*4882a593Smuzhiyun }
5389*4882a593Smuzhiyun 
5390*4882a593Smuzhiyun /**
5391*4882a593Smuzhiyun  * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5392*4882a593Smuzhiyun  *
5393*4882a593Smuzhiyun  * There are a couple of instances where we have a GSO skb, and we
5394*4882a593Smuzhiyun  * want to determine what size it would be after it is segmented.
5395*4882a593Smuzhiyun  *
5396*4882a593Smuzhiyun  * We might want to check:
5397*4882a593Smuzhiyun  * -    L3+L4+payload size (e.g. IP forwarding)
5398*4882a593Smuzhiyun  * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5399*4882a593Smuzhiyun  *
5400*4882a593Smuzhiyun  * This is a helper to do that correctly considering GSO_BY_FRAGS.
5401*4882a593Smuzhiyun  *
5402*4882a593Smuzhiyun  * @skb: GSO skb
5403*4882a593Smuzhiyun  *
5404*4882a593Smuzhiyun  * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5405*4882a593Smuzhiyun  *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5406*4882a593Smuzhiyun  *
5407*4882a593Smuzhiyun  * @max_len: The maximum permissible length.
5408*4882a593Smuzhiyun  *
5409*4882a593Smuzhiyun  * Returns true if the segmented length <= max length.
5410*4882a593Smuzhiyun  */
skb_gso_size_check(const struct sk_buff * skb,unsigned int seg_len,unsigned int max_len)5411*4882a593Smuzhiyun static inline bool skb_gso_size_check(const struct sk_buff *skb,
5412*4882a593Smuzhiyun 				      unsigned int seg_len,
5413*4882a593Smuzhiyun 				      unsigned int max_len) {
5414*4882a593Smuzhiyun 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
5415*4882a593Smuzhiyun 	const struct sk_buff *iter;
5416*4882a593Smuzhiyun 
5417*4882a593Smuzhiyun 	if (shinfo->gso_size != GSO_BY_FRAGS)
5418*4882a593Smuzhiyun 		return seg_len <= max_len;
5419*4882a593Smuzhiyun 
5420*4882a593Smuzhiyun 	/* Undo this so we can re-use header sizes */
5421*4882a593Smuzhiyun 	seg_len -= GSO_BY_FRAGS;
5422*4882a593Smuzhiyun 
5423*4882a593Smuzhiyun 	skb_walk_frags(skb, iter) {
5424*4882a593Smuzhiyun 		if (seg_len + skb_headlen(iter) > max_len)
5425*4882a593Smuzhiyun 			return false;
5426*4882a593Smuzhiyun 	}
5427*4882a593Smuzhiyun 
5428*4882a593Smuzhiyun 	return true;
5429*4882a593Smuzhiyun }
5430*4882a593Smuzhiyun 
5431*4882a593Smuzhiyun /**
5432*4882a593Smuzhiyun  * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5433*4882a593Smuzhiyun  *
5434*4882a593Smuzhiyun  * @skb: GSO skb
5435*4882a593Smuzhiyun  * @mtu: MTU to validate against
5436*4882a593Smuzhiyun  *
5437*4882a593Smuzhiyun  * skb_gso_validate_network_len validates if a given skb will fit a
5438*4882a593Smuzhiyun  * wanted MTU once split. It considers L3 headers, L4 headers, and the
5439*4882a593Smuzhiyun  * payload.
5440*4882a593Smuzhiyun  */
skb_gso_validate_network_len(const struct sk_buff * skb,unsigned int mtu)5441*4882a593Smuzhiyun bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5442*4882a593Smuzhiyun {
5443*4882a593Smuzhiyun 	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5444*4882a593Smuzhiyun }
5445*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5446*4882a593Smuzhiyun 
5447*4882a593Smuzhiyun /**
5448*4882a593Smuzhiyun  * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5449*4882a593Smuzhiyun  *
5450*4882a593Smuzhiyun  * @skb: GSO skb
5451*4882a593Smuzhiyun  * @len: length to validate against
5452*4882a593Smuzhiyun  *
5453*4882a593Smuzhiyun  * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5454*4882a593Smuzhiyun  * length once split, including L2, L3 and L4 headers and the payload.
5455*4882a593Smuzhiyun  */
skb_gso_validate_mac_len(const struct sk_buff * skb,unsigned int len)5456*4882a593Smuzhiyun bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5457*4882a593Smuzhiyun {
5458*4882a593Smuzhiyun 	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5459*4882a593Smuzhiyun }
5460*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5461*4882a593Smuzhiyun 
skb_reorder_vlan_header(struct sk_buff * skb)5462*4882a593Smuzhiyun static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5463*4882a593Smuzhiyun {
5464*4882a593Smuzhiyun 	int mac_len, meta_len;
5465*4882a593Smuzhiyun 	void *meta;
5466*4882a593Smuzhiyun 
5467*4882a593Smuzhiyun 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
5468*4882a593Smuzhiyun 		kfree_skb(skb);
5469*4882a593Smuzhiyun 		return NULL;
5470*4882a593Smuzhiyun 	}
5471*4882a593Smuzhiyun 
5472*4882a593Smuzhiyun 	mac_len = skb->data - skb_mac_header(skb);
5473*4882a593Smuzhiyun 	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5474*4882a593Smuzhiyun 		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5475*4882a593Smuzhiyun 			mac_len - VLAN_HLEN - ETH_TLEN);
5476*4882a593Smuzhiyun 	}
5477*4882a593Smuzhiyun 
5478*4882a593Smuzhiyun 	meta_len = skb_metadata_len(skb);
5479*4882a593Smuzhiyun 	if (meta_len) {
5480*4882a593Smuzhiyun 		meta = skb_metadata_end(skb) - meta_len;
5481*4882a593Smuzhiyun 		memmove(meta + VLAN_HLEN, meta, meta_len);
5482*4882a593Smuzhiyun 	}
5483*4882a593Smuzhiyun 
5484*4882a593Smuzhiyun 	skb->mac_header += VLAN_HLEN;
5485*4882a593Smuzhiyun 	return skb;
5486*4882a593Smuzhiyun }
5487*4882a593Smuzhiyun 
skb_vlan_untag(struct sk_buff * skb)5488*4882a593Smuzhiyun struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5489*4882a593Smuzhiyun {
5490*4882a593Smuzhiyun 	struct vlan_hdr *vhdr;
5491*4882a593Smuzhiyun 	u16 vlan_tci;
5492*4882a593Smuzhiyun 
5493*4882a593Smuzhiyun 	if (unlikely(skb_vlan_tag_present(skb))) {
5494*4882a593Smuzhiyun 		/* vlan_tci is already set-up so leave this for another time */
5495*4882a593Smuzhiyun 		return skb;
5496*4882a593Smuzhiyun 	}
5497*4882a593Smuzhiyun 
5498*4882a593Smuzhiyun 	skb = skb_share_check(skb, GFP_ATOMIC);
5499*4882a593Smuzhiyun 	if (unlikely(!skb))
5500*4882a593Smuzhiyun 		goto err_free;
5501*4882a593Smuzhiyun 	/* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
5502*4882a593Smuzhiyun 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5503*4882a593Smuzhiyun 		goto err_free;
5504*4882a593Smuzhiyun 
5505*4882a593Smuzhiyun 	vhdr = (struct vlan_hdr *)skb->data;
5506*4882a593Smuzhiyun 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
5507*4882a593Smuzhiyun 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5508*4882a593Smuzhiyun 
5509*4882a593Smuzhiyun 	skb_pull_rcsum(skb, VLAN_HLEN);
5510*4882a593Smuzhiyun 	vlan_set_encap_proto(skb, vhdr);
5511*4882a593Smuzhiyun 
5512*4882a593Smuzhiyun 	skb = skb_reorder_vlan_header(skb);
5513*4882a593Smuzhiyun 	if (unlikely(!skb))
5514*4882a593Smuzhiyun 		goto err_free;
5515*4882a593Smuzhiyun 
5516*4882a593Smuzhiyun 	skb_reset_network_header(skb);
5517*4882a593Smuzhiyun 	skb_reset_transport_header(skb);
5518*4882a593Smuzhiyun 	skb_reset_mac_len(skb);
5519*4882a593Smuzhiyun 
5520*4882a593Smuzhiyun 	return skb;
5521*4882a593Smuzhiyun 
5522*4882a593Smuzhiyun err_free:
5523*4882a593Smuzhiyun 	kfree_skb(skb);
5524*4882a593Smuzhiyun 	return NULL;
5525*4882a593Smuzhiyun }
5526*4882a593Smuzhiyun EXPORT_SYMBOL(skb_vlan_untag);
5527*4882a593Smuzhiyun 
skb_ensure_writable(struct sk_buff * skb,int write_len)5528*4882a593Smuzhiyun int skb_ensure_writable(struct sk_buff *skb, int write_len)
5529*4882a593Smuzhiyun {
5530*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, write_len))
5531*4882a593Smuzhiyun 		return -ENOMEM;
5532*4882a593Smuzhiyun 
5533*4882a593Smuzhiyun 	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5534*4882a593Smuzhiyun 		return 0;
5535*4882a593Smuzhiyun 
5536*4882a593Smuzhiyun 	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5537*4882a593Smuzhiyun }
5538*4882a593Smuzhiyun EXPORT_SYMBOL(skb_ensure_writable);
5539*4882a593Smuzhiyun 
5540*4882a593Smuzhiyun /* remove VLAN header from packet and update csum accordingly.
5541*4882a593Smuzhiyun  * expects a non skb_vlan_tag_present skb with a vlan tag payload
5542*4882a593Smuzhiyun  */
__skb_vlan_pop(struct sk_buff * skb,u16 * vlan_tci)5543*4882a593Smuzhiyun int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5544*4882a593Smuzhiyun {
5545*4882a593Smuzhiyun 	struct vlan_hdr *vhdr;
5546*4882a593Smuzhiyun 	int offset = skb->data - skb_mac_header(skb);
5547*4882a593Smuzhiyun 	int err;
5548*4882a593Smuzhiyun 
5549*4882a593Smuzhiyun 	if (WARN_ONCE(offset,
5550*4882a593Smuzhiyun 		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5551*4882a593Smuzhiyun 		      offset)) {
5552*4882a593Smuzhiyun 		return -EINVAL;
5553*4882a593Smuzhiyun 	}
5554*4882a593Smuzhiyun 
5555*4882a593Smuzhiyun 	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5556*4882a593Smuzhiyun 	if (unlikely(err))
5557*4882a593Smuzhiyun 		return err;
5558*4882a593Smuzhiyun 
5559*4882a593Smuzhiyun 	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5560*4882a593Smuzhiyun 
5561*4882a593Smuzhiyun 	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5562*4882a593Smuzhiyun 	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
5563*4882a593Smuzhiyun 
5564*4882a593Smuzhiyun 	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5565*4882a593Smuzhiyun 	__skb_pull(skb, VLAN_HLEN);
5566*4882a593Smuzhiyun 
5567*4882a593Smuzhiyun 	vlan_set_encap_proto(skb, vhdr);
5568*4882a593Smuzhiyun 	skb->mac_header += VLAN_HLEN;
5569*4882a593Smuzhiyun 
5570*4882a593Smuzhiyun 	if (skb_network_offset(skb) < ETH_HLEN)
5571*4882a593Smuzhiyun 		skb_set_network_header(skb, ETH_HLEN);
5572*4882a593Smuzhiyun 
5573*4882a593Smuzhiyun 	skb_reset_mac_len(skb);
5574*4882a593Smuzhiyun 
5575*4882a593Smuzhiyun 	return err;
5576*4882a593Smuzhiyun }
5577*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_vlan_pop);
5578*4882a593Smuzhiyun 
5579*4882a593Smuzhiyun /* Pop a vlan tag either from hwaccel or from payload.
5580*4882a593Smuzhiyun  * Expects skb->data at mac header.
5581*4882a593Smuzhiyun  */
skb_vlan_pop(struct sk_buff * skb)5582*4882a593Smuzhiyun int skb_vlan_pop(struct sk_buff *skb)
5583*4882a593Smuzhiyun {
5584*4882a593Smuzhiyun 	u16 vlan_tci;
5585*4882a593Smuzhiyun 	__be16 vlan_proto;
5586*4882a593Smuzhiyun 	int err;
5587*4882a593Smuzhiyun 
5588*4882a593Smuzhiyun 	if (likely(skb_vlan_tag_present(skb))) {
5589*4882a593Smuzhiyun 		__vlan_hwaccel_clear_tag(skb);
5590*4882a593Smuzhiyun 	} else {
5591*4882a593Smuzhiyun 		if (unlikely(!eth_type_vlan(skb->protocol)))
5592*4882a593Smuzhiyun 			return 0;
5593*4882a593Smuzhiyun 
5594*4882a593Smuzhiyun 		err = __skb_vlan_pop(skb, &vlan_tci);
5595*4882a593Smuzhiyun 		if (err)
5596*4882a593Smuzhiyun 			return err;
5597*4882a593Smuzhiyun 	}
5598*4882a593Smuzhiyun 	/* move next vlan tag to hw accel tag */
5599*4882a593Smuzhiyun 	if (likely(!eth_type_vlan(skb->protocol)))
5600*4882a593Smuzhiyun 		return 0;
5601*4882a593Smuzhiyun 
5602*4882a593Smuzhiyun 	vlan_proto = skb->protocol;
5603*4882a593Smuzhiyun 	err = __skb_vlan_pop(skb, &vlan_tci);
5604*4882a593Smuzhiyun 	if (unlikely(err))
5605*4882a593Smuzhiyun 		return err;
5606*4882a593Smuzhiyun 
5607*4882a593Smuzhiyun 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5608*4882a593Smuzhiyun 	return 0;
5609*4882a593Smuzhiyun }
5610*4882a593Smuzhiyun EXPORT_SYMBOL(skb_vlan_pop);
5611*4882a593Smuzhiyun 
5612*4882a593Smuzhiyun /* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5613*4882a593Smuzhiyun  * Expects skb->data at mac header.
5614*4882a593Smuzhiyun  */
skb_vlan_push(struct sk_buff * skb,__be16 vlan_proto,u16 vlan_tci)5615*4882a593Smuzhiyun int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5616*4882a593Smuzhiyun {
5617*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
5618*4882a593Smuzhiyun 		int offset = skb->data - skb_mac_header(skb);
5619*4882a593Smuzhiyun 		int err;
5620*4882a593Smuzhiyun 
5621*4882a593Smuzhiyun 		if (WARN_ONCE(offset,
5622*4882a593Smuzhiyun 			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5623*4882a593Smuzhiyun 			      offset)) {
5624*4882a593Smuzhiyun 			return -EINVAL;
5625*4882a593Smuzhiyun 		}
5626*4882a593Smuzhiyun 
5627*4882a593Smuzhiyun 		err = __vlan_insert_tag(skb, skb->vlan_proto,
5628*4882a593Smuzhiyun 					skb_vlan_tag_get(skb));
5629*4882a593Smuzhiyun 		if (err)
5630*4882a593Smuzhiyun 			return err;
5631*4882a593Smuzhiyun 
5632*4882a593Smuzhiyun 		skb->protocol = skb->vlan_proto;
5633*4882a593Smuzhiyun 		skb->mac_len += VLAN_HLEN;
5634*4882a593Smuzhiyun 
5635*4882a593Smuzhiyun 		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5636*4882a593Smuzhiyun 	}
5637*4882a593Smuzhiyun 	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5638*4882a593Smuzhiyun 	return 0;
5639*4882a593Smuzhiyun }
5640*4882a593Smuzhiyun EXPORT_SYMBOL(skb_vlan_push);
5641*4882a593Smuzhiyun 
5642*4882a593Smuzhiyun /**
5643*4882a593Smuzhiyun  * skb_eth_pop() - Drop the Ethernet header at the head of a packet
5644*4882a593Smuzhiyun  *
5645*4882a593Smuzhiyun  * @skb: Socket buffer to modify
5646*4882a593Smuzhiyun  *
5647*4882a593Smuzhiyun  * Drop the Ethernet header of @skb.
5648*4882a593Smuzhiyun  *
5649*4882a593Smuzhiyun  * Expects that skb->data points to the mac header and that no VLAN tags are
5650*4882a593Smuzhiyun  * present.
5651*4882a593Smuzhiyun  *
5652*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5653*4882a593Smuzhiyun  */
skb_eth_pop(struct sk_buff * skb)5654*4882a593Smuzhiyun int skb_eth_pop(struct sk_buff *skb)
5655*4882a593Smuzhiyun {
5656*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
5657*4882a593Smuzhiyun 	    skb_network_offset(skb) < ETH_HLEN)
5658*4882a593Smuzhiyun 		return -EPROTO;
5659*4882a593Smuzhiyun 
5660*4882a593Smuzhiyun 	skb_pull_rcsum(skb, ETH_HLEN);
5661*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
5662*4882a593Smuzhiyun 	skb_reset_mac_len(skb);
5663*4882a593Smuzhiyun 
5664*4882a593Smuzhiyun 	return 0;
5665*4882a593Smuzhiyun }
5666*4882a593Smuzhiyun EXPORT_SYMBOL(skb_eth_pop);
5667*4882a593Smuzhiyun 
5668*4882a593Smuzhiyun /**
5669*4882a593Smuzhiyun  * skb_eth_push() - Add a new Ethernet header at the head of a packet
5670*4882a593Smuzhiyun  *
5671*4882a593Smuzhiyun  * @skb: Socket buffer to modify
5672*4882a593Smuzhiyun  * @dst: Destination MAC address of the new header
5673*4882a593Smuzhiyun  * @src: Source MAC address of the new header
5674*4882a593Smuzhiyun  *
5675*4882a593Smuzhiyun  * Prepend @skb with a new Ethernet header.
5676*4882a593Smuzhiyun  *
5677*4882a593Smuzhiyun  * Expects that skb->data points to the mac header, which must be empty.
5678*4882a593Smuzhiyun  *
5679*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5680*4882a593Smuzhiyun  */
skb_eth_push(struct sk_buff * skb,const unsigned char * dst,const unsigned char * src)5681*4882a593Smuzhiyun int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
5682*4882a593Smuzhiyun 		 const unsigned char *src)
5683*4882a593Smuzhiyun {
5684*4882a593Smuzhiyun 	struct ethhdr *eth;
5685*4882a593Smuzhiyun 	int err;
5686*4882a593Smuzhiyun 
5687*4882a593Smuzhiyun 	if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
5688*4882a593Smuzhiyun 		return -EPROTO;
5689*4882a593Smuzhiyun 
5690*4882a593Smuzhiyun 	err = skb_cow_head(skb, sizeof(*eth));
5691*4882a593Smuzhiyun 	if (err < 0)
5692*4882a593Smuzhiyun 		return err;
5693*4882a593Smuzhiyun 
5694*4882a593Smuzhiyun 	skb_push(skb, sizeof(*eth));
5695*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
5696*4882a593Smuzhiyun 	skb_reset_mac_len(skb);
5697*4882a593Smuzhiyun 
5698*4882a593Smuzhiyun 	eth = eth_hdr(skb);
5699*4882a593Smuzhiyun 	ether_addr_copy(eth->h_dest, dst);
5700*4882a593Smuzhiyun 	ether_addr_copy(eth->h_source, src);
5701*4882a593Smuzhiyun 	eth->h_proto = skb->protocol;
5702*4882a593Smuzhiyun 
5703*4882a593Smuzhiyun 	skb_postpush_rcsum(skb, eth, sizeof(*eth));
5704*4882a593Smuzhiyun 
5705*4882a593Smuzhiyun 	return 0;
5706*4882a593Smuzhiyun }
5707*4882a593Smuzhiyun EXPORT_SYMBOL(skb_eth_push);
5708*4882a593Smuzhiyun 
5709*4882a593Smuzhiyun /* Update the ethertype of hdr and the skb csum value if required. */
skb_mod_eth_type(struct sk_buff * skb,struct ethhdr * hdr,__be16 ethertype)5710*4882a593Smuzhiyun static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5711*4882a593Smuzhiyun 			     __be16 ethertype)
5712*4882a593Smuzhiyun {
5713*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
5714*4882a593Smuzhiyun 		__be16 diff[] = { ~hdr->h_proto, ethertype };
5715*4882a593Smuzhiyun 
5716*4882a593Smuzhiyun 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5717*4882a593Smuzhiyun 	}
5718*4882a593Smuzhiyun 
5719*4882a593Smuzhiyun 	hdr->h_proto = ethertype;
5720*4882a593Smuzhiyun }
5721*4882a593Smuzhiyun 
5722*4882a593Smuzhiyun /**
5723*4882a593Smuzhiyun  * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5724*4882a593Smuzhiyun  *                   the packet
5725*4882a593Smuzhiyun  *
5726*4882a593Smuzhiyun  * @skb: buffer
5727*4882a593Smuzhiyun  * @mpls_lse: MPLS label stack entry to push
5728*4882a593Smuzhiyun  * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5729*4882a593Smuzhiyun  * @mac_len: length of the MAC header
5730*4882a593Smuzhiyun  * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5731*4882a593Smuzhiyun  *            ethernet
5732*4882a593Smuzhiyun  *
5733*4882a593Smuzhiyun  * Expects skb->data at mac header.
5734*4882a593Smuzhiyun  *
5735*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5736*4882a593Smuzhiyun  */
skb_mpls_push(struct sk_buff * skb,__be32 mpls_lse,__be16 mpls_proto,int mac_len,bool ethernet)5737*4882a593Smuzhiyun int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5738*4882a593Smuzhiyun 		  int mac_len, bool ethernet)
5739*4882a593Smuzhiyun {
5740*4882a593Smuzhiyun 	struct mpls_shim_hdr *lse;
5741*4882a593Smuzhiyun 	int err;
5742*4882a593Smuzhiyun 
5743*4882a593Smuzhiyun 	if (unlikely(!eth_p_mpls(mpls_proto)))
5744*4882a593Smuzhiyun 		return -EINVAL;
5745*4882a593Smuzhiyun 
5746*4882a593Smuzhiyun 	/* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5747*4882a593Smuzhiyun 	if (skb->encapsulation)
5748*4882a593Smuzhiyun 		return -EINVAL;
5749*4882a593Smuzhiyun 
5750*4882a593Smuzhiyun 	err = skb_cow_head(skb, MPLS_HLEN);
5751*4882a593Smuzhiyun 	if (unlikely(err))
5752*4882a593Smuzhiyun 		return err;
5753*4882a593Smuzhiyun 
5754*4882a593Smuzhiyun 	if (!skb->inner_protocol) {
5755*4882a593Smuzhiyun 		skb_set_inner_network_header(skb, skb_network_offset(skb));
5756*4882a593Smuzhiyun 		skb_set_inner_protocol(skb, skb->protocol);
5757*4882a593Smuzhiyun 	}
5758*4882a593Smuzhiyun 
5759*4882a593Smuzhiyun 	skb_push(skb, MPLS_HLEN);
5760*4882a593Smuzhiyun 	memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5761*4882a593Smuzhiyun 		mac_len);
5762*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
5763*4882a593Smuzhiyun 	skb_set_network_header(skb, mac_len);
5764*4882a593Smuzhiyun 	skb_reset_mac_len(skb);
5765*4882a593Smuzhiyun 
5766*4882a593Smuzhiyun 	lse = mpls_hdr(skb);
5767*4882a593Smuzhiyun 	lse->label_stack_entry = mpls_lse;
5768*4882a593Smuzhiyun 	skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5769*4882a593Smuzhiyun 
5770*4882a593Smuzhiyun 	if (ethernet && mac_len >= ETH_HLEN)
5771*4882a593Smuzhiyun 		skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5772*4882a593Smuzhiyun 	skb->protocol = mpls_proto;
5773*4882a593Smuzhiyun 
5774*4882a593Smuzhiyun 	return 0;
5775*4882a593Smuzhiyun }
5776*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_mpls_push);
5777*4882a593Smuzhiyun 
5778*4882a593Smuzhiyun /**
5779*4882a593Smuzhiyun  * skb_mpls_pop() - pop the outermost MPLS header
5780*4882a593Smuzhiyun  *
5781*4882a593Smuzhiyun  * @skb: buffer
5782*4882a593Smuzhiyun  * @next_proto: ethertype of header after popped MPLS header
5783*4882a593Smuzhiyun  * @mac_len: length of the MAC header
5784*4882a593Smuzhiyun  * @ethernet: flag to indicate if the packet is ethernet
5785*4882a593Smuzhiyun  *
5786*4882a593Smuzhiyun  * Expects skb->data at mac header.
5787*4882a593Smuzhiyun  *
5788*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5789*4882a593Smuzhiyun  */
skb_mpls_pop(struct sk_buff * skb,__be16 next_proto,int mac_len,bool ethernet)5790*4882a593Smuzhiyun int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5791*4882a593Smuzhiyun 		 bool ethernet)
5792*4882a593Smuzhiyun {
5793*4882a593Smuzhiyun 	int err;
5794*4882a593Smuzhiyun 
5795*4882a593Smuzhiyun 	if (unlikely(!eth_p_mpls(skb->protocol)))
5796*4882a593Smuzhiyun 		return 0;
5797*4882a593Smuzhiyun 
5798*4882a593Smuzhiyun 	err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5799*4882a593Smuzhiyun 	if (unlikely(err))
5800*4882a593Smuzhiyun 		return err;
5801*4882a593Smuzhiyun 
5802*4882a593Smuzhiyun 	skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5803*4882a593Smuzhiyun 	memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5804*4882a593Smuzhiyun 		mac_len);
5805*4882a593Smuzhiyun 
5806*4882a593Smuzhiyun 	__skb_pull(skb, MPLS_HLEN);
5807*4882a593Smuzhiyun 	skb_reset_mac_header(skb);
5808*4882a593Smuzhiyun 	skb_set_network_header(skb, mac_len);
5809*4882a593Smuzhiyun 
5810*4882a593Smuzhiyun 	if (ethernet && mac_len >= ETH_HLEN) {
5811*4882a593Smuzhiyun 		struct ethhdr *hdr;
5812*4882a593Smuzhiyun 
5813*4882a593Smuzhiyun 		/* use mpls_hdr() to get ethertype to account for VLANs. */
5814*4882a593Smuzhiyun 		hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5815*4882a593Smuzhiyun 		skb_mod_eth_type(skb, hdr, next_proto);
5816*4882a593Smuzhiyun 	}
5817*4882a593Smuzhiyun 	skb->protocol = next_proto;
5818*4882a593Smuzhiyun 
5819*4882a593Smuzhiyun 	return 0;
5820*4882a593Smuzhiyun }
5821*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_mpls_pop);
5822*4882a593Smuzhiyun 
5823*4882a593Smuzhiyun /**
5824*4882a593Smuzhiyun  * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5825*4882a593Smuzhiyun  *
5826*4882a593Smuzhiyun  * @skb: buffer
5827*4882a593Smuzhiyun  * @mpls_lse: new MPLS label stack entry to update to
5828*4882a593Smuzhiyun  *
5829*4882a593Smuzhiyun  * Expects skb->data at mac header.
5830*4882a593Smuzhiyun  *
5831*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5832*4882a593Smuzhiyun  */
skb_mpls_update_lse(struct sk_buff * skb,__be32 mpls_lse)5833*4882a593Smuzhiyun int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5834*4882a593Smuzhiyun {
5835*4882a593Smuzhiyun 	int err;
5836*4882a593Smuzhiyun 
5837*4882a593Smuzhiyun 	if (unlikely(!eth_p_mpls(skb->protocol)))
5838*4882a593Smuzhiyun 		return -EINVAL;
5839*4882a593Smuzhiyun 
5840*4882a593Smuzhiyun 	err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5841*4882a593Smuzhiyun 	if (unlikely(err))
5842*4882a593Smuzhiyun 		return err;
5843*4882a593Smuzhiyun 
5844*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_COMPLETE) {
5845*4882a593Smuzhiyun 		__be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5846*4882a593Smuzhiyun 
5847*4882a593Smuzhiyun 		skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5848*4882a593Smuzhiyun 	}
5849*4882a593Smuzhiyun 
5850*4882a593Smuzhiyun 	mpls_hdr(skb)->label_stack_entry = mpls_lse;
5851*4882a593Smuzhiyun 
5852*4882a593Smuzhiyun 	return 0;
5853*4882a593Smuzhiyun }
5854*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5855*4882a593Smuzhiyun 
5856*4882a593Smuzhiyun /**
5857*4882a593Smuzhiyun  * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
5858*4882a593Smuzhiyun  *
5859*4882a593Smuzhiyun  * @skb: buffer
5860*4882a593Smuzhiyun  *
5861*4882a593Smuzhiyun  * Expects skb->data at mac header.
5862*4882a593Smuzhiyun  *
5863*4882a593Smuzhiyun  * Returns 0 on success, -errno otherwise.
5864*4882a593Smuzhiyun  */
skb_mpls_dec_ttl(struct sk_buff * skb)5865*4882a593Smuzhiyun int skb_mpls_dec_ttl(struct sk_buff *skb)
5866*4882a593Smuzhiyun {
5867*4882a593Smuzhiyun 	u32 lse;
5868*4882a593Smuzhiyun 	u8 ttl;
5869*4882a593Smuzhiyun 
5870*4882a593Smuzhiyun 	if (unlikely(!eth_p_mpls(skb->protocol)))
5871*4882a593Smuzhiyun 		return -EINVAL;
5872*4882a593Smuzhiyun 
5873*4882a593Smuzhiyun 	if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
5874*4882a593Smuzhiyun 		return -ENOMEM;
5875*4882a593Smuzhiyun 
5876*4882a593Smuzhiyun 	lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
5877*4882a593Smuzhiyun 	ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
5878*4882a593Smuzhiyun 	if (!--ttl)
5879*4882a593Smuzhiyun 		return -EINVAL;
5880*4882a593Smuzhiyun 
5881*4882a593Smuzhiyun 	lse &= ~MPLS_LS_TTL_MASK;
5882*4882a593Smuzhiyun 	lse |= ttl << MPLS_LS_TTL_SHIFT;
5883*4882a593Smuzhiyun 
5884*4882a593Smuzhiyun 	return skb_mpls_update_lse(skb, cpu_to_be32(lse));
5885*4882a593Smuzhiyun }
5886*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
5887*4882a593Smuzhiyun 
5888*4882a593Smuzhiyun /**
5889*4882a593Smuzhiyun  * alloc_skb_with_frags - allocate skb with page frags
5890*4882a593Smuzhiyun  *
5891*4882a593Smuzhiyun  * @header_len: size of linear part
5892*4882a593Smuzhiyun  * @data_len: needed length in frags
5893*4882a593Smuzhiyun  * @max_page_order: max page order desired.
5894*4882a593Smuzhiyun  * @errcode: pointer to error code if any
5895*4882a593Smuzhiyun  * @gfp_mask: allocation mask
5896*4882a593Smuzhiyun  *
5897*4882a593Smuzhiyun  * This can be used to allocate a paged skb, given a maximal order for frags.
5898*4882a593Smuzhiyun  */
alloc_skb_with_frags(unsigned long header_len,unsigned long data_len,int max_page_order,int * errcode,gfp_t gfp_mask)5899*4882a593Smuzhiyun struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5900*4882a593Smuzhiyun 				     unsigned long data_len,
5901*4882a593Smuzhiyun 				     int max_page_order,
5902*4882a593Smuzhiyun 				     int *errcode,
5903*4882a593Smuzhiyun 				     gfp_t gfp_mask)
5904*4882a593Smuzhiyun {
5905*4882a593Smuzhiyun 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5906*4882a593Smuzhiyun 	unsigned long chunk;
5907*4882a593Smuzhiyun 	struct sk_buff *skb;
5908*4882a593Smuzhiyun 	struct page *page;
5909*4882a593Smuzhiyun 	int i;
5910*4882a593Smuzhiyun 
5911*4882a593Smuzhiyun 	*errcode = -EMSGSIZE;
5912*4882a593Smuzhiyun 	/* Note this test could be relaxed, if we succeed to allocate
5913*4882a593Smuzhiyun 	 * high order pages...
5914*4882a593Smuzhiyun 	 */
5915*4882a593Smuzhiyun 	if (npages > MAX_SKB_FRAGS)
5916*4882a593Smuzhiyun 		return NULL;
5917*4882a593Smuzhiyun 
5918*4882a593Smuzhiyun 	*errcode = -ENOBUFS;
5919*4882a593Smuzhiyun 	skb = alloc_skb(header_len, gfp_mask);
5920*4882a593Smuzhiyun 	if (!skb)
5921*4882a593Smuzhiyun 		return NULL;
5922*4882a593Smuzhiyun 
5923*4882a593Smuzhiyun 	skb->truesize += npages << PAGE_SHIFT;
5924*4882a593Smuzhiyun 
5925*4882a593Smuzhiyun 	for (i = 0; npages > 0; i++) {
5926*4882a593Smuzhiyun 		int order = max_page_order;
5927*4882a593Smuzhiyun 
5928*4882a593Smuzhiyun 		while (order) {
5929*4882a593Smuzhiyun 			if (npages >= 1 << order) {
5930*4882a593Smuzhiyun 				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5931*4882a593Smuzhiyun 						   __GFP_COMP |
5932*4882a593Smuzhiyun 						   __GFP_NOWARN,
5933*4882a593Smuzhiyun 						   order);
5934*4882a593Smuzhiyun 				if (page)
5935*4882a593Smuzhiyun 					goto fill_page;
5936*4882a593Smuzhiyun 				/* Do not retry other high order allocations */
5937*4882a593Smuzhiyun 				order = 1;
5938*4882a593Smuzhiyun 				max_page_order = 0;
5939*4882a593Smuzhiyun 			}
5940*4882a593Smuzhiyun 			order--;
5941*4882a593Smuzhiyun 		}
5942*4882a593Smuzhiyun 		page = alloc_page(gfp_mask);
5943*4882a593Smuzhiyun 		if (!page)
5944*4882a593Smuzhiyun 			goto failure;
5945*4882a593Smuzhiyun fill_page:
5946*4882a593Smuzhiyun 		chunk = min_t(unsigned long, data_len,
5947*4882a593Smuzhiyun 			      PAGE_SIZE << order);
5948*4882a593Smuzhiyun 		skb_fill_page_desc(skb, i, page, 0, chunk);
5949*4882a593Smuzhiyun 		data_len -= chunk;
5950*4882a593Smuzhiyun 		npages -= 1 << order;
5951*4882a593Smuzhiyun 	}
5952*4882a593Smuzhiyun 	return skb;
5953*4882a593Smuzhiyun 
5954*4882a593Smuzhiyun failure:
5955*4882a593Smuzhiyun 	kfree_skb(skb);
5956*4882a593Smuzhiyun 	return NULL;
5957*4882a593Smuzhiyun }
5958*4882a593Smuzhiyun EXPORT_SYMBOL(alloc_skb_with_frags);
5959*4882a593Smuzhiyun 
5960*4882a593Smuzhiyun /* carve out the first off bytes from skb when off < headlen */
pskb_carve_inside_header(struct sk_buff * skb,const u32 off,const int headlen,gfp_t gfp_mask)5961*4882a593Smuzhiyun static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5962*4882a593Smuzhiyun 				    const int headlen, gfp_t gfp_mask)
5963*4882a593Smuzhiyun {
5964*4882a593Smuzhiyun 	int i;
5965*4882a593Smuzhiyun 	int size = skb_end_offset(skb);
5966*4882a593Smuzhiyun 	int new_hlen = headlen - off;
5967*4882a593Smuzhiyun 	u8 *data;
5968*4882a593Smuzhiyun 
5969*4882a593Smuzhiyun 	size = SKB_DATA_ALIGN(size);
5970*4882a593Smuzhiyun 
5971*4882a593Smuzhiyun 	if (skb_pfmemalloc(skb))
5972*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
5973*4882a593Smuzhiyun 	data = kmalloc_reserve(size +
5974*4882a593Smuzhiyun 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5975*4882a593Smuzhiyun 			       gfp_mask, NUMA_NO_NODE, NULL);
5976*4882a593Smuzhiyun 	if (!data)
5977*4882a593Smuzhiyun 		return -ENOMEM;
5978*4882a593Smuzhiyun 
5979*4882a593Smuzhiyun 	size = SKB_WITH_OVERHEAD(ksize(data));
5980*4882a593Smuzhiyun 
5981*4882a593Smuzhiyun 	/* Copy real data, and all frags */
5982*4882a593Smuzhiyun 	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5983*4882a593Smuzhiyun 	skb->len -= off;
5984*4882a593Smuzhiyun 
5985*4882a593Smuzhiyun 	memcpy((struct skb_shared_info *)(data + size),
5986*4882a593Smuzhiyun 	       skb_shinfo(skb),
5987*4882a593Smuzhiyun 	       offsetof(struct skb_shared_info,
5988*4882a593Smuzhiyun 			frags[skb_shinfo(skb)->nr_frags]));
5989*4882a593Smuzhiyun 	if (skb_cloned(skb)) {
5990*4882a593Smuzhiyun 		/* drop the old head gracefully */
5991*4882a593Smuzhiyun 		if (skb_orphan_frags(skb, gfp_mask)) {
5992*4882a593Smuzhiyun 			kfree(data);
5993*4882a593Smuzhiyun 			return -ENOMEM;
5994*4882a593Smuzhiyun 		}
5995*4882a593Smuzhiyun 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5996*4882a593Smuzhiyun 			skb_frag_ref(skb, i);
5997*4882a593Smuzhiyun 		if (skb_has_frag_list(skb))
5998*4882a593Smuzhiyun 			skb_clone_fraglist(skb);
5999*4882a593Smuzhiyun 		skb_release_data(skb);
6000*4882a593Smuzhiyun 	} else {
6001*4882a593Smuzhiyun 		/* we can reuse existing recount- all we did was
6002*4882a593Smuzhiyun 		 * relocate values
6003*4882a593Smuzhiyun 		 */
6004*4882a593Smuzhiyun 		skb_free_head(skb);
6005*4882a593Smuzhiyun 	}
6006*4882a593Smuzhiyun 
6007*4882a593Smuzhiyun 	skb->head = data;
6008*4882a593Smuzhiyun 	skb->data = data;
6009*4882a593Smuzhiyun 	skb->head_frag = 0;
6010*4882a593Smuzhiyun 	skb_set_end_offset(skb, size);
6011*4882a593Smuzhiyun 	skb_set_tail_pointer(skb, skb_headlen(skb));
6012*4882a593Smuzhiyun 	skb_headers_offset_update(skb, 0);
6013*4882a593Smuzhiyun 	skb->cloned = 0;
6014*4882a593Smuzhiyun 	skb->hdr_len = 0;
6015*4882a593Smuzhiyun 	skb->nohdr = 0;
6016*4882a593Smuzhiyun 	atomic_set(&skb_shinfo(skb)->dataref, 1);
6017*4882a593Smuzhiyun 
6018*4882a593Smuzhiyun 	return 0;
6019*4882a593Smuzhiyun }
6020*4882a593Smuzhiyun 
6021*4882a593Smuzhiyun static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6022*4882a593Smuzhiyun 
6023*4882a593Smuzhiyun /* carve out the first eat bytes from skb's frag_list. May recurse into
6024*4882a593Smuzhiyun  * pskb_carve()
6025*4882a593Smuzhiyun  */
pskb_carve_frag_list(struct sk_buff * skb,struct skb_shared_info * shinfo,int eat,gfp_t gfp_mask)6026*4882a593Smuzhiyun static int pskb_carve_frag_list(struct sk_buff *skb,
6027*4882a593Smuzhiyun 				struct skb_shared_info *shinfo, int eat,
6028*4882a593Smuzhiyun 				gfp_t gfp_mask)
6029*4882a593Smuzhiyun {
6030*4882a593Smuzhiyun 	struct sk_buff *list = shinfo->frag_list;
6031*4882a593Smuzhiyun 	struct sk_buff *clone = NULL;
6032*4882a593Smuzhiyun 	struct sk_buff *insp = NULL;
6033*4882a593Smuzhiyun 
6034*4882a593Smuzhiyun 	do {
6035*4882a593Smuzhiyun 		if (!list) {
6036*4882a593Smuzhiyun 			pr_err("Not enough bytes to eat. Want %d\n", eat);
6037*4882a593Smuzhiyun 			return -EFAULT;
6038*4882a593Smuzhiyun 		}
6039*4882a593Smuzhiyun 		if (list->len <= eat) {
6040*4882a593Smuzhiyun 			/* Eaten as whole. */
6041*4882a593Smuzhiyun 			eat -= list->len;
6042*4882a593Smuzhiyun 			list = list->next;
6043*4882a593Smuzhiyun 			insp = list;
6044*4882a593Smuzhiyun 		} else {
6045*4882a593Smuzhiyun 			/* Eaten partially. */
6046*4882a593Smuzhiyun 			if (skb_shared(list)) {
6047*4882a593Smuzhiyun 				clone = skb_clone(list, gfp_mask);
6048*4882a593Smuzhiyun 				if (!clone)
6049*4882a593Smuzhiyun 					return -ENOMEM;
6050*4882a593Smuzhiyun 				insp = list->next;
6051*4882a593Smuzhiyun 				list = clone;
6052*4882a593Smuzhiyun 			} else {
6053*4882a593Smuzhiyun 				/* This may be pulled without problems. */
6054*4882a593Smuzhiyun 				insp = list;
6055*4882a593Smuzhiyun 			}
6056*4882a593Smuzhiyun 			if (pskb_carve(list, eat, gfp_mask) < 0) {
6057*4882a593Smuzhiyun 				kfree_skb(clone);
6058*4882a593Smuzhiyun 				return -ENOMEM;
6059*4882a593Smuzhiyun 			}
6060*4882a593Smuzhiyun 			break;
6061*4882a593Smuzhiyun 		}
6062*4882a593Smuzhiyun 	} while (eat);
6063*4882a593Smuzhiyun 
6064*4882a593Smuzhiyun 	/* Free pulled out fragments. */
6065*4882a593Smuzhiyun 	while ((list = shinfo->frag_list) != insp) {
6066*4882a593Smuzhiyun 		shinfo->frag_list = list->next;
6067*4882a593Smuzhiyun 		consume_skb(list);
6068*4882a593Smuzhiyun 	}
6069*4882a593Smuzhiyun 	/* And insert new clone at head. */
6070*4882a593Smuzhiyun 	if (clone) {
6071*4882a593Smuzhiyun 		clone->next = list;
6072*4882a593Smuzhiyun 		shinfo->frag_list = clone;
6073*4882a593Smuzhiyun 	}
6074*4882a593Smuzhiyun 	return 0;
6075*4882a593Smuzhiyun }
6076*4882a593Smuzhiyun 
6077*4882a593Smuzhiyun /* carve off first len bytes from skb. Split line (off) is in the
6078*4882a593Smuzhiyun  * non-linear part of skb
6079*4882a593Smuzhiyun  */
pskb_carve_inside_nonlinear(struct sk_buff * skb,const u32 off,int pos,gfp_t gfp_mask)6080*4882a593Smuzhiyun static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6081*4882a593Smuzhiyun 				       int pos, gfp_t gfp_mask)
6082*4882a593Smuzhiyun {
6083*4882a593Smuzhiyun 	int i, k = 0;
6084*4882a593Smuzhiyun 	int size = skb_end_offset(skb);
6085*4882a593Smuzhiyun 	u8 *data;
6086*4882a593Smuzhiyun 	const int nfrags = skb_shinfo(skb)->nr_frags;
6087*4882a593Smuzhiyun 	struct skb_shared_info *shinfo;
6088*4882a593Smuzhiyun 
6089*4882a593Smuzhiyun 	size = SKB_DATA_ALIGN(size);
6090*4882a593Smuzhiyun 
6091*4882a593Smuzhiyun 	if (skb_pfmemalloc(skb))
6092*4882a593Smuzhiyun 		gfp_mask |= __GFP_MEMALLOC;
6093*4882a593Smuzhiyun 	data = kmalloc_reserve(size +
6094*4882a593Smuzhiyun 			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
6095*4882a593Smuzhiyun 			       gfp_mask, NUMA_NO_NODE, NULL);
6096*4882a593Smuzhiyun 	if (!data)
6097*4882a593Smuzhiyun 		return -ENOMEM;
6098*4882a593Smuzhiyun 
6099*4882a593Smuzhiyun 	size = SKB_WITH_OVERHEAD(ksize(data));
6100*4882a593Smuzhiyun 
6101*4882a593Smuzhiyun 	memcpy((struct skb_shared_info *)(data + size),
6102*4882a593Smuzhiyun 	       skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6103*4882a593Smuzhiyun 	if (skb_orphan_frags(skb, gfp_mask)) {
6104*4882a593Smuzhiyun 		kfree(data);
6105*4882a593Smuzhiyun 		return -ENOMEM;
6106*4882a593Smuzhiyun 	}
6107*4882a593Smuzhiyun 	shinfo = (struct skb_shared_info *)(data + size);
6108*4882a593Smuzhiyun 	for (i = 0; i < nfrags; i++) {
6109*4882a593Smuzhiyun 		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6110*4882a593Smuzhiyun 
6111*4882a593Smuzhiyun 		if (pos + fsize > off) {
6112*4882a593Smuzhiyun 			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6113*4882a593Smuzhiyun 
6114*4882a593Smuzhiyun 			if (pos < off) {
6115*4882a593Smuzhiyun 				/* Split frag.
6116*4882a593Smuzhiyun 				 * We have two variants in this case:
6117*4882a593Smuzhiyun 				 * 1. Move all the frag to the second
6118*4882a593Smuzhiyun 				 *    part, if it is possible. F.e.
6119*4882a593Smuzhiyun 				 *    this approach is mandatory for TUX,
6120*4882a593Smuzhiyun 				 *    where splitting is expensive.
6121*4882a593Smuzhiyun 				 * 2. Split is accurately. We make this.
6122*4882a593Smuzhiyun 				 */
6123*4882a593Smuzhiyun 				skb_frag_off_add(&shinfo->frags[0], off - pos);
6124*4882a593Smuzhiyun 				skb_frag_size_sub(&shinfo->frags[0], off - pos);
6125*4882a593Smuzhiyun 			}
6126*4882a593Smuzhiyun 			skb_frag_ref(skb, i);
6127*4882a593Smuzhiyun 			k++;
6128*4882a593Smuzhiyun 		}
6129*4882a593Smuzhiyun 		pos += fsize;
6130*4882a593Smuzhiyun 	}
6131*4882a593Smuzhiyun 	shinfo->nr_frags = k;
6132*4882a593Smuzhiyun 	if (skb_has_frag_list(skb))
6133*4882a593Smuzhiyun 		skb_clone_fraglist(skb);
6134*4882a593Smuzhiyun 
6135*4882a593Smuzhiyun 	/* split line is in frag list */
6136*4882a593Smuzhiyun 	if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6137*4882a593Smuzhiyun 		/* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6138*4882a593Smuzhiyun 		if (skb_has_frag_list(skb))
6139*4882a593Smuzhiyun 			kfree_skb_list(skb_shinfo(skb)->frag_list);
6140*4882a593Smuzhiyun 		kfree(data);
6141*4882a593Smuzhiyun 		return -ENOMEM;
6142*4882a593Smuzhiyun 	}
6143*4882a593Smuzhiyun 	skb_release_data(skb);
6144*4882a593Smuzhiyun 
6145*4882a593Smuzhiyun 	skb->head = data;
6146*4882a593Smuzhiyun 	skb->head_frag = 0;
6147*4882a593Smuzhiyun 	skb->data = data;
6148*4882a593Smuzhiyun 	skb_set_end_offset(skb, size);
6149*4882a593Smuzhiyun 	skb_reset_tail_pointer(skb);
6150*4882a593Smuzhiyun 	skb_headers_offset_update(skb, 0);
6151*4882a593Smuzhiyun 	skb->cloned   = 0;
6152*4882a593Smuzhiyun 	skb->hdr_len  = 0;
6153*4882a593Smuzhiyun 	skb->nohdr    = 0;
6154*4882a593Smuzhiyun 	skb->len -= off;
6155*4882a593Smuzhiyun 	skb->data_len = skb->len;
6156*4882a593Smuzhiyun 	atomic_set(&skb_shinfo(skb)->dataref, 1);
6157*4882a593Smuzhiyun 	return 0;
6158*4882a593Smuzhiyun }
6159*4882a593Smuzhiyun 
6160*4882a593Smuzhiyun /* remove len bytes from the beginning of the skb */
pskb_carve(struct sk_buff * skb,const u32 len,gfp_t gfp)6161*4882a593Smuzhiyun static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6162*4882a593Smuzhiyun {
6163*4882a593Smuzhiyun 	int headlen = skb_headlen(skb);
6164*4882a593Smuzhiyun 
6165*4882a593Smuzhiyun 	if (len < headlen)
6166*4882a593Smuzhiyun 		return pskb_carve_inside_header(skb, len, headlen, gfp);
6167*4882a593Smuzhiyun 	else
6168*4882a593Smuzhiyun 		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6169*4882a593Smuzhiyun }
6170*4882a593Smuzhiyun 
6171*4882a593Smuzhiyun /* Extract to_copy bytes starting at off from skb, and return this in
6172*4882a593Smuzhiyun  * a new skb
6173*4882a593Smuzhiyun  */
pskb_extract(struct sk_buff * skb,int off,int to_copy,gfp_t gfp)6174*4882a593Smuzhiyun struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6175*4882a593Smuzhiyun 			     int to_copy, gfp_t gfp)
6176*4882a593Smuzhiyun {
6177*4882a593Smuzhiyun 	struct sk_buff  *clone = skb_clone(skb, gfp);
6178*4882a593Smuzhiyun 
6179*4882a593Smuzhiyun 	if (!clone)
6180*4882a593Smuzhiyun 		return NULL;
6181*4882a593Smuzhiyun 
6182*4882a593Smuzhiyun 	if (pskb_carve(clone, off, gfp) < 0 ||
6183*4882a593Smuzhiyun 	    pskb_trim(clone, to_copy)) {
6184*4882a593Smuzhiyun 		kfree_skb(clone);
6185*4882a593Smuzhiyun 		return NULL;
6186*4882a593Smuzhiyun 	}
6187*4882a593Smuzhiyun 	return clone;
6188*4882a593Smuzhiyun }
6189*4882a593Smuzhiyun EXPORT_SYMBOL(pskb_extract);
6190*4882a593Smuzhiyun 
6191*4882a593Smuzhiyun /**
6192*4882a593Smuzhiyun  * skb_condense - try to get rid of fragments/frag_list if possible
6193*4882a593Smuzhiyun  * @skb: buffer
6194*4882a593Smuzhiyun  *
6195*4882a593Smuzhiyun  * Can be used to save memory before skb is added to a busy queue.
6196*4882a593Smuzhiyun  * If packet has bytes in frags and enough tail room in skb->head,
6197*4882a593Smuzhiyun  * pull all of them, so that we can free the frags right now and adjust
6198*4882a593Smuzhiyun  * truesize.
6199*4882a593Smuzhiyun  * Notes:
6200*4882a593Smuzhiyun  *	We do not reallocate skb->head thus can not fail.
6201*4882a593Smuzhiyun  *	Caller must re-evaluate skb->truesize if needed.
6202*4882a593Smuzhiyun  */
skb_condense(struct sk_buff * skb)6203*4882a593Smuzhiyun void skb_condense(struct sk_buff *skb)
6204*4882a593Smuzhiyun {
6205*4882a593Smuzhiyun 	if (skb->data_len) {
6206*4882a593Smuzhiyun 		if (skb->data_len > skb->end - skb->tail ||
6207*4882a593Smuzhiyun 		    skb_cloned(skb))
6208*4882a593Smuzhiyun 			return;
6209*4882a593Smuzhiyun 
6210*4882a593Smuzhiyun 		/* Nice, we can free page frag(s) right now */
6211*4882a593Smuzhiyun 		__pskb_pull_tail(skb, skb->data_len);
6212*4882a593Smuzhiyun 	}
6213*4882a593Smuzhiyun 	/* At this point, skb->truesize might be over estimated,
6214*4882a593Smuzhiyun 	 * because skb had a fragment, and fragments do not tell
6215*4882a593Smuzhiyun 	 * their truesize.
6216*4882a593Smuzhiyun 	 * When we pulled its content into skb->head, fragment
6217*4882a593Smuzhiyun 	 * was freed, but __pskb_pull_tail() could not possibly
6218*4882a593Smuzhiyun 	 * adjust skb->truesize, not knowing the frag truesize.
6219*4882a593Smuzhiyun 	 */
6220*4882a593Smuzhiyun 	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6221*4882a593Smuzhiyun }
6222*4882a593Smuzhiyun 
6223*4882a593Smuzhiyun #ifdef CONFIG_SKB_EXTENSIONS
skb_ext_get_ptr(struct skb_ext * ext,enum skb_ext_id id)6224*4882a593Smuzhiyun static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6225*4882a593Smuzhiyun {
6226*4882a593Smuzhiyun 	return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6227*4882a593Smuzhiyun }
6228*4882a593Smuzhiyun 
6229*4882a593Smuzhiyun /**
6230*4882a593Smuzhiyun  * __skb_ext_alloc - allocate a new skb extensions storage
6231*4882a593Smuzhiyun  *
6232*4882a593Smuzhiyun  * @flags: See kmalloc().
6233*4882a593Smuzhiyun  *
6234*4882a593Smuzhiyun  * Returns the newly allocated pointer. The pointer can later attached to a
6235*4882a593Smuzhiyun  * skb via __skb_ext_set().
6236*4882a593Smuzhiyun  * Note: caller must handle the skb_ext as an opaque data.
6237*4882a593Smuzhiyun  */
__skb_ext_alloc(gfp_t flags)6238*4882a593Smuzhiyun struct skb_ext *__skb_ext_alloc(gfp_t flags)
6239*4882a593Smuzhiyun {
6240*4882a593Smuzhiyun 	struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6241*4882a593Smuzhiyun 
6242*4882a593Smuzhiyun 	if (new) {
6243*4882a593Smuzhiyun 		memset(new->offset, 0, sizeof(new->offset));
6244*4882a593Smuzhiyun 		refcount_set(&new->refcnt, 1);
6245*4882a593Smuzhiyun 	}
6246*4882a593Smuzhiyun 
6247*4882a593Smuzhiyun 	return new;
6248*4882a593Smuzhiyun }
6249*4882a593Smuzhiyun 
skb_ext_maybe_cow(struct skb_ext * old,unsigned int old_active)6250*4882a593Smuzhiyun static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6251*4882a593Smuzhiyun 					 unsigned int old_active)
6252*4882a593Smuzhiyun {
6253*4882a593Smuzhiyun 	struct skb_ext *new;
6254*4882a593Smuzhiyun 
6255*4882a593Smuzhiyun 	if (refcount_read(&old->refcnt) == 1)
6256*4882a593Smuzhiyun 		return old;
6257*4882a593Smuzhiyun 
6258*4882a593Smuzhiyun 	new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6259*4882a593Smuzhiyun 	if (!new)
6260*4882a593Smuzhiyun 		return NULL;
6261*4882a593Smuzhiyun 
6262*4882a593Smuzhiyun 	memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6263*4882a593Smuzhiyun 	refcount_set(&new->refcnt, 1);
6264*4882a593Smuzhiyun 
6265*4882a593Smuzhiyun #ifdef CONFIG_XFRM
6266*4882a593Smuzhiyun 	if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6267*4882a593Smuzhiyun 		struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6268*4882a593Smuzhiyun 		unsigned int i;
6269*4882a593Smuzhiyun 
6270*4882a593Smuzhiyun 		for (i = 0; i < sp->len; i++)
6271*4882a593Smuzhiyun 			xfrm_state_hold(sp->xvec[i]);
6272*4882a593Smuzhiyun 	}
6273*4882a593Smuzhiyun #endif
6274*4882a593Smuzhiyun 	__skb_ext_put(old);
6275*4882a593Smuzhiyun 	return new;
6276*4882a593Smuzhiyun }
6277*4882a593Smuzhiyun 
6278*4882a593Smuzhiyun /**
6279*4882a593Smuzhiyun  * __skb_ext_set - attach the specified extension storage to this skb
6280*4882a593Smuzhiyun  * @skb: buffer
6281*4882a593Smuzhiyun  * @id: extension id
6282*4882a593Smuzhiyun  * @ext: extension storage previously allocated via __skb_ext_alloc()
6283*4882a593Smuzhiyun  *
6284*4882a593Smuzhiyun  * Existing extensions, if any, are cleared.
6285*4882a593Smuzhiyun  *
6286*4882a593Smuzhiyun  * Returns the pointer to the extension.
6287*4882a593Smuzhiyun  */
__skb_ext_set(struct sk_buff * skb,enum skb_ext_id id,struct skb_ext * ext)6288*4882a593Smuzhiyun void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6289*4882a593Smuzhiyun 		    struct skb_ext *ext)
6290*4882a593Smuzhiyun {
6291*4882a593Smuzhiyun 	unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6292*4882a593Smuzhiyun 
6293*4882a593Smuzhiyun 	skb_ext_put(skb);
6294*4882a593Smuzhiyun 	newlen = newoff + skb_ext_type_len[id];
6295*4882a593Smuzhiyun 	ext->chunks = newlen;
6296*4882a593Smuzhiyun 	ext->offset[id] = newoff;
6297*4882a593Smuzhiyun 	skb->extensions = ext;
6298*4882a593Smuzhiyun 	skb->active_extensions = 1 << id;
6299*4882a593Smuzhiyun 	return skb_ext_get_ptr(ext, id);
6300*4882a593Smuzhiyun }
6301*4882a593Smuzhiyun 
6302*4882a593Smuzhiyun /**
6303*4882a593Smuzhiyun  * skb_ext_add - allocate space for given extension, COW if needed
6304*4882a593Smuzhiyun  * @skb: buffer
6305*4882a593Smuzhiyun  * @id: extension to allocate space for
6306*4882a593Smuzhiyun  *
6307*4882a593Smuzhiyun  * Allocates enough space for the given extension.
6308*4882a593Smuzhiyun  * If the extension is already present, a pointer to that extension
6309*4882a593Smuzhiyun  * is returned.
6310*4882a593Smuzhiyun  *
6311*4882a593Smuzhiyun  * If the skb was cloned, COW applies and the returned memory can be
6312*4882a593Smuzhiyun  * modified without changing the extension space of clones buffers.
6313*4882a593Smuzhiyun  *
6314*4882a593Smuzhiyun  * Returns pointer to the extension or NULL on allocation failure.
6315*4882a593Smuzhiyun  */
skb_ext_add(struct sk_buff * skb,enum skb_ext_id id)6316*4882a593Smuzhiyun void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6317*4882a593Smuzhiyun {
6318*4882a593Smuzhiyun 	struct skb_ext *new, *old = NULL;
6319*4882a593Smuzhiyun 	unsigned int newlen, newoff;
6320*4882a593Smuzhiyun 
6321*4882a593Smuzhiyun 	if (skb->active_extensions) {
6322*4882a593Smuzhiyun 		old = skb->extensions;
6323*4882a593Smuzhiyun 
6324*4882a593Smuzhiyun 		new = skb_ext_maybe_cow(old, skb->active_extensions);
6325*4882a593Smuzhiyun 		if (!new)
6326*4882a593Smuzhiyun 			return NULL;
6327*4882a593Smuzhiyun 
6328*4882a593Smuzhiyun 		if (__skb_ext_exist(new, id))
6329*4882a593Smuzhiyun 			goto set_active;
6330*4882a593Smuzhiyun 
6331*4882a593Smuzhiyun 		newoff = new->chunks;
6332*4882a593Smuzhiyun 	} else {
6333*4882a593Smuzhiyun 		newoff = SKB_EXT_CHUNKSIZEOF(*new);
6334*4882a593Smuzhiyun 
6335*4882a593Smuzhiyun 		new = __skb_ext_alloc(GFP_ATOMIC);
6336*4882a593Smuzhiyun 		if (!new)
6337*4882a593Smuzhiyun 			return NULL;
6338*4882a593Smuzhiyun 	}
6339*4882a593Smuzhiyun 
6340*4882a593Smuzhiyun 	newlen = newoff + skb_ext_type_len[id];
6341*4882a593Smuzhiyun 	new->chunks = newlen;
6342*4882a593Smuzhiyun 	new->offset[id] = newoff;
6343*4882a593Smuzhiyun set_active:
6344*4882a593Smuzhiyun 	skb->extensions = new;
6345*4882a593Smuzhiyun 	skb->active_extensions |= 1 << id;
6346*4882a593Smuzhiyun 	return skb_ext_get_ptr(new, id);
6347*4882a593Smuzhiyun }
6348*4882a593Smuzhiyun EXPORT_SYMBOL(skb_ext_add);
6349*4882a593Smuzhiyun 
6350*4882a593Smuzhiyun #ifdef CONFIG_XFRM
skb_ext_put_sp(struct sec_path * sp)6351*4882a593Smuzhiyun static void skb_ext_put_sp(struct sec_path *sp)
6352*4882a593Smuzhiyun {
6353*4882a593Smuzhiyun 	unsigned int i;
6354*4882a593Smuzhiyun 
6355*4882a593Smuzhiyun 	for (i = 0; i < sp->len; i++)
6356*4882a593Smuzhiyun 		xfrm_state_put(sp->xvec[i]);
6357*4882a593Smuzhiyun }
6358*4882a593Smuzhiyun #endif
6359*4882a593Smuzhiyun 
__skb_ext_del(struct sk_buff * skb,enum skb_ext_id id)6360*4882a593Smuzhiyun void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6361*4882a593Smuzhiyun {
6362*4882a593Smuzhiyun 	struct skb_ext *ext = skb->extensions;
6363*4882a593Smuzhiyun 
6364*4882a593Smuzhiyun 	skb->active_extensions &= ~(1 << id);
6365*4882a593Smuzhiyun 	if (skb->active_extensions == 0) {
6366*4882a593Smuzhiyun 		skb->extensions = NULL;
6367*4882a593Smuzhiyun 		__skb_ext_put(ext);
6368*4882a593Smuzhiyun #ifdef CONFIG_XFRM
6369*4882a593Smuzhiyun 	} else if (id == SKB_EXT_SEC_PATH &&
6370*4882a593Smuzhiyun 		   refcount_read(&ext->refcnt) == 1) {
6371*4882a593Smuzhiyun 		struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6372*4882a593Smuzhiyun 
6373*4882a593Smuzhiyun 		skb_ext_put_sp(sp);
6374*4882a593Smuzhiyun 		sp->len = 0;
6375*4882a593Smuzhiyun #endif
6376*4882a593Smuzhiyun 	}
6377*4882a593Smuzhiyun }
6378*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_ext_del);
6379*4882a593Smuzhiyun 
__skb_ext_put(struct skb_ext * ext)6380*4882a593Smuzhiyun void __skb_ext_put(struct skb_ext *ext)
6381*4882a593Smuzhiyun {
6382*4882a593Smuzhiyun 	/* If this is last clone, nothing can increment
6383*4882a593Smuzhiyun 	 * it after check passes.  Avoids one atomic op.
6384*4882a593Smuzhiyun 	 */
6385*4882a593Smuzhiyun 	if (refcount_read(&ext->refcnt) == 1)
6386*4882a593Smuzhiyun 		goto free_now;
6387*4882a593Smuzhiyun 
6388*4882a593Smuzhiyun 	if (!refcount_dec_and_test(&ext->refcnt))
6389*4882a593Smuzhiyun 		return;
6390*4882a593Smuzhiyun free_now:
6391*4882a593Smuzhiyun #ifdef CONFIG_XFRM
6392*4882a593Smuzhiyun 	if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6393*4882a593Smuzhiyun 		skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6394*4882a593Smuzhiyun #endif
6395*4882a593Smuzhiyun 
6396*4882a593Smuzhiyun 	kmem_cache_free(skbuff_ext_cache, ext);
6397*4882a593Smuzhiyun }
6398*4882a593Smuzhiyun EXPORT_SYMBOL(__skb_ext_put);
6399*4882a593Smuzhiyun #endif /* CONFIG_SKB_EXTENSIONS */
6400