1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * inet fragments management
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Pavel Emelyanov <xemul@openvz.org>
6*4882a593Smuzhiyun * Started as consolidation of ipv4/ip_fragment.c,
7*4882a593Smuzhiyun * ipv6/reassembly. and ipv6 nf conntrack reassembly
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/timer.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/random.h>
16*4882a593Smuzhiyun #include <linux/skbuff.h>
17*4882a593Smuzhiyun #include <linux/rtnetlink.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/rhashtable.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <net/sock.h>
22*4882a593Smuzhiyun #include <net/inet_frag.h>
23*4882a593Smuzhiyun #include <net/inet_ecn.h>
24*4882a593Smuzhiyun #include <net/ip.h>
25*4882a593Smuzhiyun #include <net/ipv6.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Use skb->cb to track consecutive/adjacent fragments coming at
28*4882a593Smuzhiyun * the end of the queue. Nodes in the rb-tree queue will
29*4882a593Smuzhiyun * contain "runs" of one or more adjacent fragments.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Invariants:
32*4882a593Smuzhiyun * - next_frag is NULL at the tail of a "run";
33*4882a593Smuzhiyun * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
34*4882a593Smuzhiyun */
35*4882a593Smuzhiyun struct ipfrag_skb_cb {
36*4882a593Smuzhiyun union {
37*4882a593Smuzhiyun struct inet_skb_parm h4;
38*4882a593Smuzhiyun struct inet6_skb_parm h6;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun struct sk_buff *next_frag;
41*4882a593Smuzhiyun int frag_run_len;
42*4882a593Smuzhiyun };
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
45*4882a593Smuzhiyun
fragcb_clear(struct sk_buff * skb)46*4882a593Smuzhiyun static void fragcb_clear(struct sk_buff *skb)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun RB_CLEAR_NODE(&skb->rbnode);
49*4882a593Smuzhiyun FRAG_CB(skb)->next_frag = NULL;
50*4882a593Smuzhiyun FRAG_CB(skb)->frag_run_len = skb->len;
51*4882a593Smuzhiyun }
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Append skb to the last "run". */
fragrun_append_to_last(struct inet_frag_queue * q,struct sk_buff * skb)54*4882a593Smuzhiyun static void fragrun_append_to_last(struct inet_frag_queue *q,
55*4882a593Smuzhiyun struct sk_buff *skb)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun fragcb_clear(skb);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
60*4882a593Smuzhiyun FRAG_CB(q->fragments_tail)->next_frag = skb;
61*4882a593Smuzhiyun q->fragments_tail = skb;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Create a new "run" with the skb. */
fragrun_create(struct inet_frag_queue * q,struct sk_buff * skb)65*4882a593Smuzhiyun static void fragrun_create(struct inet_frag_queue *q, struct sk_buff *skb)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
68*4882a593Smuzhiyun fragcb_clear(skb);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun if (q->last_run_head)
71*4882a593Smuzhiyun rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
72*4882a593Smuzhiyun &q->last_run_head->rbnode.rb_right);
73*4882a593Smuzhiyun else
74*4882a593Smuzhiyun rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
75*4882a593Smuzhiyun rb_insert_color(&skb->rbnode, &q->rb_fragments);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun q->fragments_tail = skb;
78*4882a593Smuzhiyun q->last_run_head = skb;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
82*4882a593Smuzhiyun * Value : 0xff if frame should be dropped.
83*4882a593Smuzhiyun * 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun const u8 ip_frag_ecn_table[16] = {
86*4882a593Smuzhiyun /* at least one fragment had CE, and others ECT_0 or ECT_1 */
87*4882a593Smuzhiyun [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = INET_ECN_CE,
88*4882a593Smuzhiyun [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
89*4882a593Smuzhiyun [IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = INET_ECN_CE,
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* invalid combinations : drop frame */
92*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
93*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
94*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
95*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
96*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
97*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
98*4882a593Smuzhiyun [IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun EXPORT_SYMBOL(ip_frag_ecn_table);
101*4882a593Smuzhiyun
inet_frags_init(struct inet_frags * f)102*4882a593Smuzhiyun int inet_frags_init(struct inet_frags *f)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
105*4882a593Smuzhiyun NULL);
106*4882a593Smuzhiyun if (!f->frags_cachep)
107*4882a593Smuzhiyun return -ENOMEM;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun refcount_set(&f->refcnt, 1);
110*4882a593Smuzhiyun init_completion(&f->completion);
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frags_init);
114*4882a593Smuzhiyun
inet_frags_fini(struct inet_frags * f)115*4882a593Smuzhiyun void inet_frags_fini(struct inet_frags *f)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun if (refcount_dec_and_test(&f->refcnt))
118*4882a593Smuzhiyun complete(&f->completion);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun wait_for_completion(&f->completion);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun kmem_cache_destroy(f->frags_cachep);
123*4882a593Smuzhiyun f->frags_cachep = NULL;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frags_fini);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* called from rhashtable_free_and_destroy() at netns_frags dismantle */
inet_frags_free_cb(void * ptr,void * arg)128*4882a593Smuzhiyun static void inet_frags_free_cb(void *ptr, void *arg)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun struct inet_frag_queue *fq = ptr;
131*4882a593Smuzhiyun int count;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun count = del_timer_sync(&fq->timer) ? 1 : 0;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun spin_lock_bh(&fq->lock);
136*4882a593Smuzhiyun if (!(fq->flags & INET_FRAG_COMPLETE)) {
137*4882a593Smuzhiyun fq->flags |= INET_FRAG_COMPLETE;
138*4882a593Smuzhiyun count++;
139*4882a593Smuzhiyun } else if (fq->flags & INET_FRAG_HASH_DEAD) {
140*4882a593Smuzhiyun count++;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun spin_unlock_bh(&fq->lock);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (refcount_sub_and_test(count, &fq->refcnt))
145*4882a593Smuzhiyun inet_frag_destroy(fq);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
fqdir_work_fn(struct work_struct * work)148*4882a593Smuzhiyun static void fqdir_work_fn(struct work_struct *work)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct fqdir *fqdir = container_of(work, struct fqdir, destroy_work);
151*4882a593Smuzhiyun struct inet_frags *f = fqdir->f;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun rhashtable_free_and_destroy(&fqdir->rhashtable, inet_frags_free_cb, NULL);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* We need to make sure all ongoing call_rcu(..., inet_frag_destroy_rcu)
156*4882a593Smuzhiyun * have completed, since they need to dereference fqdir.
157*4882a593Smuzhiyun * Would it not be nice to have kfree_rcu_barrier() ? :)
158*4882a593Smuzhiyun */
159*4882a593Smuzhiyun rcu_barrier();
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (refcount_dec_and_test(&f->refcnt))
162*4882a593Smuzhiyun complete(&f->completion);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun kfree(fqdir);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
fqdir_init(struct fqdir ** fqdirp,struct inet_frags * f,struct net * net)167*4882a593Smuzhiyun int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun struct fqdir *fqdir = kzalloc(sizeof(*fqdir), GFP_KERNEL);
170*4882a593Smuzhiyun int res;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (!fqdir)
173*4882a593Smuzhiyun return -ENOMEM;
174*4882a593Smuzhiyun fqdir->f = f;
175*4882a593Smuzhiyun fqdir->net = net;
176*4882a593Smuzhiyun res = rhashtable_init(&fqdir->rhashtable, &fqdir->f->rhash_params);
177*4882a593Smuzhiyun if (res < 0) {
178*4882a593Smuzhiyun kfree(fqdir);
179*4882a593Smuzhiyun return res;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun refcount_inc(&f->refcnt);
182*4882a593Smuzhiyun *fqdirp = fqdir;
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun EXPORT_SYMBOL(fqdir_init);
186*4882a593Smuzhiyun
fqdir_exit(struct fqdir * fqdir)187*4882a593Smuzhiyun void fqdir_exit(struct fqdir *fqdir)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun INIT_WORK(&fqdir->destroy_work, fqdir_work_fn);
190*4882a593Smuzhiyun queue_work(system_wq, &fqdir->destroy_work);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun EXPORT_SYMBOL(fqdir_exit);
193*4882a593Smuzhiyun
inet_frag_kill(struct inet_frag_queue * fq)194*4882a593Smuzhiyun void inet_frag_kill(struct inet_frag_queue *fq)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun if (del_timer(&fq->timer))
197*4882a593Smuzhiyun refcount_dec(&fq->refcnt);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun if (!(fq->flags & INET_FRAG_COMPLETE)) {
200*4882a593Smuzhiyun struct fqdir *fqdir = fq->fqdir;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun fq->flags |= INET_FRAG_COMPLETE;
203*4882a593Smuzhiyun rcu_read_lock();
204*4882a593Smuzhiyun /* The RCU read lock provides a memory barrier
205*4882a593Smuzhiyun * guaranteeing that if fqdir->dead is false then
206*4882a593Smuzhiyun * the hash table destruction will not start until
207*4882a593Smuzhiyun * after we unlock. Paired with fqdir_pre_exit().
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun if (!READ_ONCE(fqdir->dead)) {
210*4882a593Smuzhiyun rhashtable_remove_fast(&fqdir->rhashtable, &fq->node,
211*4882a593Smuzhiyun fqdir->f->rhash_params);
212*4882a593Smuzhiyun refcount_dec(&fq->refcnt);
213*4882a593Smuzhiyun } else {
214*4882a593Smuzhiyun fq->flags |= INET_FRAG_HASH_DEAD;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun rcu_read_unlock();
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_kill);
220*4882a593Smuzhiyun
inet_frag_destroy_rcu(struct rcu_head * head)221*4882a593Smuzhiyun static void inet_frag_destroy_rcu(struct rcu_head *head)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
224*4882a593Smuzhiyun rcu);
225*4882a593Smuzhiyun struct inet_frags *f = q->fqdir->f;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (f->destructor)
228*4882a593Smuzhiyun f->destructor(q);
229*4882a593Smuzhiyun kmem_cache_free(f->frags_cachep, q);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
inet_frag_rbtree_purge(struct rb_root * root)232*4882a593Smuzhiyun unsigned int inet_frag_rbtree_purge(struct rb_root *root)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun struct rb_node *p = rb_first(root);
235*4882a593Smuzhiyun unsigned int sum = 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun while (p) {
238*4882a593Smuzhiyun struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun p = rb_next(p);
241*4882a593Smuzhiyun rb_erase(&skb->rbnode, root);
242*4882a593Smuzhiyun while (skb) {
243*4882a593Smuzhiyun struct sk_buff *next = FRAG_CB(skb)->next_frag;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun sum += skb->truesize;
246*4882a593Smuzhiyun kfree_skb(skb);
247*4882a593Smuzhiyun skb = next;
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun return sum;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_rbtree_purge);
253*4882a593Smuzhiyun
inet_frag_destroy(struct inet_frag_queue * q)254*4882a593Smuzhiyun void inet_frag_destroy(struct inet_frag_queue *q)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct fqdir *fqdir;
257*4882a593Smuzhiyun unsigned int sum, sum_truesize = 0;
258*4882a593Smuzhiyun struct inet_frags *f;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
261*4882a593Smuzhiyun WARN_ON(del_timer(&q->timer) != 0);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Release all fragment data. */
264*4882a593Smuzhiyun fqdir = q->fqdir;
265*4882a593Smuzhiyun f = fqdir->f;
266*4882a593Smuzhiyun sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
267*4882a593Smuzhiyun sum = sum_truesize + f->qsize;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun call_rcu(&q->rcu, inet_frag_destroy_rcu);
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun sub_frag_mem_limit(fqdir, sum);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_destroy);
274*4882a593Smuzhiyun
inet_frag_alloc(struct fqdir * fqdir,struct inet_frags * f,void * arg)275*4882a593Smuzhiyun static struct inet_frag_queue *inet_frag_alloc(struct fqdir *fqdir,
276*4882a593Smuzhiyun struct inet_frags *f,
277*4882a593Smuzhiyun void *arg)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun struct inet_frag_queue *q;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
282*4882a593Smuzhiyun if (!q)
283*4882a593Smuzhiyun return NULL;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun q->fqdir = fqdir;
286*4882a593Smuzhiyun f->constructor(q, arg);
287*4882a593Smuzhiyun add_frag_mem_limit(fqdir, f->qsize);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun timer_setup(&q->timer, f->frag_expire, 0);
290*4882a593Smuzhiyun spin_lock_init(&q->lock);
291*4882a593Smuzhiyun refcount_set(&q->refcnt, 3);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return q;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
inet_frag_create(struct fqdir * fqdir,void * arg,struct inet_frag_queue ** prev)296*4882a593Smuzhiyun static struct inet_frag_queue *inet_frag_create(struct fqdir *fqdir,
297*4882a593Smuzhiyun void *arg,
298*4882a593Smuzhiyun struct inet_frag_queue **prev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct inet_frags *f = fqdir->f;
301*4882a593Smuzhiyun struct inet_frag_queue *q;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun q = inet_frag_alloc(fqdir, f, arg);
304*4882a593Smuzhiyun if (!q) {
305*4882a593Smuzhiyun *prev = ERR_PTR(-ENOMEM);
306*4882a593Smuzhiyun return NULL;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun mod_timer(&q->timer, jiffies + fqdir->timeout);
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun *prev = rhashtable_lookup_get_insert_key(&fqdir->rhashtable, &q->key,
311*4882a593Smuzhiyun &q->node, f->rhash_params);
312*4882a593Smuzhiyun if (*prev) {
313*4882a593Smuzhiyun q->flags |= INET_FRAG_COMPLETE;
314*4882a593Smuzhiyun inet_frag_kill(q);
315*4882a593Smuzhiyun inet_frag_destroy(q);
316*4882a593Smuzhiyun return NULL;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun return q;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
inet_frag_find(struct fqdir * fqdir,void * key)322*4882a593Smuzhiyun struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun /* This pairs with WRITE_ONCE() in fqdir_pre_exit(). */
325*4882a593Smuzhiyun long high_thresh = READ_ONCE(fqdir->high_thresh);
326*4882a593Smuzhiyun struct inet_frag_queue *fq = NULL, *prev;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (!high_thresh || frag_mem_limit(fqdir) > high_thresh)
329*4882a593Smuzhiyun return NULL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun rcu_read_lock();
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun prev = rhashtable_lookup(&fqdir->rhashtable, key, fqdir->f->rhash_params);
334*4882a593Smuzhiyun if (!prev)
335*4882a593Smuzhiyun fq = inet_frag_create(fqdir, key, &prev);
336*4882a593Smuzhiyun if (!IS_ERR_OR_NULL(prev)) {
337*4882a593Smuzhiyun fq = prev;
338*4882a593Smuzhiyun if (!refcount_inc_not_zero(&fq->refcnt))
339*4882a593Smuzhiyun fq = NULL;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun rcu_read_unlock();
342*4882a593Smuzhiyun return fq;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_find);
345*4882a593Smuzhiyun
inet_frag_queue_insert(struct inet_frag_queue * q,struct sk_buff * skb,int offset,int end)346*4882a593Smuzhiyun int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
347*4882a593Smuzhiyun int offset, int end)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun struct sk_buff *last = q->fragments_tail;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun /* RFC5722, Section 4, amended by Errata ID : 3089
352*4882a593Smuzhiyun * When reassembling an IPv6 datagram, if
353*4882a593Smuzhiyun * one or more its constituent fragments is determined to be an
354*4882a593Smuzhiyun * overlapping fragment, the entire datagram (and any constituent
355*4882a593Smuzhiyun * fragments) MUST be silently discarded.
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * Duplicates, however, should be ignored (i.e. skb dropped, but the
358*4882a593Smuzhiyun * queue/fragments kept for later reassembly).
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun if (!last)
361*4882a593Smuzhiyun fragrun_create(q, skb); /* First fragment. */
362*4882a593Smuzhiyun else if (last->ip_defrag_offset + last->len < end) {
363*4882a593Smuzhiyun /* This is the common case: skb goes to the end. */
364*4882a593Smuzhiyun /* Detect and discard overlaps. */
365*4882a593Smuzhiyun if (offset < last->ip_defrag_offset + last->len)
366*4882a593Smuzhiyun return IPFRAG_OVERLAP;
367*4882a593Smuzhiyun if (offset == last->ip_defrag_offset + last->len)
368*4882a593Smuzhiyun fragrun_append_to_last(q, skb);
369*4882a593Smuzhiyun else
370*4882a593Smuzhiyun fragrun_create(q, skb);
371*4882a593Smuzhiyun } else {
372*4882a593Smuzhiyun /* Binary search. Note that skb can become the first fragment,
373*4882a593Smuzhiyun * but not the last (covered above).
374*4882a593Smuzhiyun */
375*4882a593Smuzhiyun struct rb_node **rbn, *parent;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun rbn = &q->rb_fragments.rb_node;
378*4882a593Smuzhiyun do {
379*4882a593Smuzhiyun struct sk_buff *curr;
380*4882a593Smuzhiyun int curr_run_end;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun parent = *rbn;
383*4882a593Smuzhiyun curr = rb_to_skb(parent);
384*4882a593Smuzhiyun curr_run_end = curr->ip_defrag_offset +
385*4882a593Smuzhiyun FRAG_CB(curr)->frag_run_len;
386*4882a593Smuzhiyun if (end <= curr->ip_defrag_offset)
387*4882a593Smuzhiyun rbn = &parent->rb_left;
388*4882a593Smuzhiyun else if (offset >= curr_run_end)
389*4882a593Smuzhiyun rbn = &parent->rb_right;
390*4882a593Smuzhiyun else if (offset >= curr->ip_defrag_offset &&
391*4882a593Smuzhiyun end <= curr_run_end)
392*4882a593Smuzhiyun return IPFRAG_DUP;
393*4882a593Smuzhiyun else
394*4882a593Smuzhiyun return IPFRAG_OVERLAP;
395*4882a593Smuzhiyun } while (*rbn);
396*4882a593Smuzhiyun /* Here we have parent properly set, and rbn pointing to
397*4882a593Smuzhiyun * one of its NULL left/right children. Insert skb.
398*4882a593Smuzhiyun */
399*4882a593Smuzhiyun fragcb_clear(skb);
400*4882a593Smuzhiyun rb_link_node(&skb->rbnode, parent, rbn);
401*4882a593Smuzhiyun rb_insert_color(&skb->rbnode, &q->rb_fragments);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun skb->ip_defrag_offset = offset;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return IPFRAG_OK;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_queue_insert);
409*4882a593Smuzhiyun
inet_frag_reasm_prepare(struct inet_frag_queue * q,struct sk_buff * skb,struct sk_buff * parent)410*4882a593Smuzhiyun void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
411*4882a593Smuzhiyun struct sk_buff *parent)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun struct sk_buff *fp, *head = skb_rb_first(&q->rb_fragments);
414*4882a593Smuzhiyun struct sk_buff **nextp;
415*4882a593Smuzhiyun int delta;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun if (head != skb) {
418*4882a593Smuzhiyun fp = skb_clone(skb, GFP_ATOMIC);
419*4882a593Smuzhiyun if (!fp)
420*4882a593Smuzhiyun return NULL;
421*4882a593Smuzhiyun FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
422*4882a593Smuzhiyun if (RB_EMPTY_NODE(&skb->rbnode))
423*4882a593Smuzhiyun FRAG_CB(parent)->next_frag = fp;
424*4882a593Smuzhiyun else
425*4882a593Smuzhiyun rb_replace_node(&skb->rbnode, &fp->rbnode,
426*4882a593Smuzhiyun &q->rb_fragments);
427*4882a593Smuzhiyun if (q->fragments_tail == skb)
428*4882a593Smuzhiyun q->fragments_tail = fp;
429*4882a593Smuzhiyun skb_morph(skb, head);
430*4882a593Smuzhiyun FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
431*4882a593Smuzhiyun rb_replace_node(&head->rbnode, &skb->rbnode,
432*4882a593Smuzhiyun &q->rb_fragments);
433*4882a593Smuzhiyun consume_skb(head);
434*4882a593Smuzhiyun head = skb;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun WARN_ON(head->ip_defrag_offset != 0);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun delta = -head->truesize;
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /* Head of list must not be cloned. */
441*4882a593Smuzhiyun if (skb_unclone(head, GFP_ATOMIC))
442*4882a593Smuzhiyun return NULL;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun delta += head->truesize;
445*4882a593Smuzhiyun if (delta)
446*4882a593Smuzhiyun add_frag_mem_limit(q->fqdir, delta);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun /* If the first fragment is fragmented itself, we split
449*4882a593Smuzhiyun * it to two chunks: the first with data and paged part
450*4882a593Smuzhiyun * and the second, holding only fragments.
451*4882a593Smuzhiyun */
452*4882a593Smuzhiyun if (skb_has_frag_list(head)) {
453*4882a593Smuzhiyun struct sk_buff *clone;
454*4882a593Smuzhiyun int i, plen = 0;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun clone = alloc_skb(0, GFP_ATOMIC);
457*4882a593Smuzhiyun if (!clone)
458*4882a593Smuzhiyun return NULL;
459*4882a593Smuzhiyun skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
460*4882a593Smuzhiyun skb_frag_list_init(head);
461*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
462*4882a593Smuzhiyun plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
463*4882a593Smuzhiyun clone->data_len = head->data_len - plen;
464*4882a593Smuzhiyun clone->len = clone->data_len;
465*4882a593Smuzhiyun head->truesize += clone->truesize;
466*4882a593Smuzhiyun clone->csum = 0;
467*4882a593Smuzhiyun clone->ip_summed = head->ip_summed;
468*4882a593Smuzhiyun add_frag_mem_limit(q->fqdir, clone->truesize);
469*4882a593Smuzhiyun skb_shinfo(head)->frag_list = clone;
470*4882a593Smuzhiyun nextp = &clone->next;
471*4882a593Smuzhiyun } else {
472*4882a593Smuzhiyun nextp = &skb_shinfo(head)->frag_list;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun return nextp;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_reasm_prepare);
478*4882a593Smuzhiyun
inet_frag_reasm_finish(struct inet_frag_queue * q,struct sk_buff * head,void * reasm_data,bool try_coalesce)479*4882a593Smuzhiyun void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
480*4882a593Smuzhiyun void *reasm_data, bool try_coalesce)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct sk_buff **nextp = (struct sk_buff **)reasm_data;
483*4882a593Smuzhiyun struct rb_node *rbn;
484*4882a593Smuzhiyun struct sk_buff *fp;
485*4882a593Smuzhiyun int sum_truesize;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun skb_push(head, head->data - skb_network_header(head));
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /* Traverse the tree in order, to build frag_list. */
490*4882a593Smuzhiyun fp = FRAG_CB(head)->next_frag;
491*4882a593Smuzhiyun rbn = rb_next(&head->rbnode);
492*4882a593Smuzhiyun rb_erase(&head->rbnode, &q->rb_fragments);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun sum_truesize = head->truesize;
495*4882a593Smuzhiyun while (rbn || fp) {
496*4882a593Smuzhiyun /* fp points to the next sk_buff in the current run;
497*4882a593Smuzhiyun * rbn points to the next run.
498*4882a593Smuzhiyun */
499*4882a593Smuzhiyun /* Go through the current run. */
500*4882a593Smuzhiyun while (fp) {
501*4882a593Smuzhiyun struct sk_buff *next_frag = FRAG_CB(fp)->next_frag;
502*4882a593Smuzhiyun bool stolen;
503*4882a593Smuzhiyun int delta;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun sum_truesize += fp->truesize;
506*4882a593Smuzhiyun if (head->ip_summed != fp->ip_summed)
507*4882a593Smuzhiyun head->ip_summed = CHECKSUM_NONE;
508*4882a593Smuzhiyun else if (head->ip_summed == CHECKSUM_COMPLETE)
509*4882a593Smuzhiyun head->csum = csum_add(head->csum, fp->csum);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (try_coalesce && skb_try_coalesce(head, fp, &stolen,
512*4882a593Smuzhiyun &delta)) {
513*4882a593Smuzhiyun kfree_skb_partial(fp, stolen);
514*4882a593Smuzhiyun } else {
515*4882a593Smuzhiyun fp->prev = NULL;
516*4882a593Smuzhiyun memset(&fp->rbnode, 0, sizeof(fp->rbnode));
517*4882a593Smuzhiyun fp->sk = NULL;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun head->data_len += fp->len;
520*4882a593Smuzhiyun head->len += fp->len;
521*4882a593Smuzhiyun head->truesize += fp->truesize;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun *nextp = fp;
524*4882a593Smuzhiyun nextp = &fp->next;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun fp = next_frag;
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun /* Move to the next run. */
530*4882a593Smuzhiyun if (rbn) {
531*4882a593Smuzhiyun struct rb_node *rbnext = rb_next(rbn);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun fp = rb_to_skb(rbn);
534*4882a593Smuzhiyun rb_erase(rbn, &q->rb_fragments);
535*4882a593Smuzhiyun rbn = rbnext;
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun sub_frag_mem_limit(q->fqdir, sum_truesize);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun *nextp = NULL;
541*4882a593Smuzhiyun skb_mark_not_on_list(head);
542*4882a593Smuzhiyun head->prev = NULL;
543*4882a593Smuzhiyun head->tstamp = q->stamp;
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_reasm_finish);
546*4882a593Smuzhiyun
inet_frag_pull_head(struct inet_frag_queue * q)547*4882a593Smuzhiyun struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct sk_buff *head, *skb;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun head = skb_rb_first(&q->rb_fragments);
552*4882a593Smuzhiyun if (!head)
553*4882a593Smuzhiyun return NULL;
554*4882a593Smuzhiyun skb = FRAG_CB(head)->next_frag;
555*4882a593Smuzhiyun if (skb)
556*4882a593Smuzhiyun rb_replace_node(&head->rbnode, &skb->rbnode,
557*4882a593Smuzhiyun &q->rb_fragments);
558*4882a593Smuzhiyun else
559*4882a593Smuzhiyun rb_erase(&head->rbnode, &q->rb_fragments);
560*4882a593Smuzhiyun memset(&head->rbnode, 0, sizeof(head->rbnode));
561*4882a593Smuzhiyun barrier();
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (head == q->fragments_tail)
564*4882a593Smuzhiyun q->fragments_tail = NULL;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun sub_frag_mem_limit(q->fqdir, head->truesize);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun return head;
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun EXPORT_SYMBOL(inet_frag_pull_head);
571