1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __NET_FRAG_H__
3*4882a593Smuzhiyun #define __NET_FRAG_H__
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/rhashtable-types.h>
6*4882a593Smuzhiyun #include <linux/completion.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /* Per netns frag queues directory */
9*4882a593Smuzhiyun struct fqdir {
10*4882a593Smuzhiyun /* sysctls */
11*4882a593Smuzhiyun long high_thresh;
12*4882a593Smuzhiyun long low_thresh;
13*4882a593Smuzhiyun int timeout;
14*4882a593Smuzhiyun int max_dist;
15*4882a593Smuzhiyun struct inet_frags *f;
16*4882a593Smuzhiyun struct net *net;
17*4882a593Smuzhiyun bool dead;
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct rhashtable rhashtable ____cacheline_aligned_in_smp;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Keep atomic mem on separate cachelines in structs that include it */
22*4882a593Smuzhiyun atomic_long_t mem ____cacheline_aligned_in_smp;
23*4882a593Smuzhiyun struct work_struct destroy_work;
24*4882a593Smuzhiyun };
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * fragment queue flags
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * @INET_FRAG_FIRST_IN: first fragment has arrived
30*4882a593Smuzhiyun * @INET_FRAG_LAST_IN: final fragment has arrived
31*4882a593Smuzhiyun * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
32*4882a593Smuzhiyun * @INET_FRAG_HASH_DEAD: inet_frag_kill() has not removed fq from rhashtable
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun enum {
35*4882a593Smuzhiyun INET_FRAG_FIRST_IN = BIT(0),
36*4882a593Smuzhiyun INET_FRAG_LAST_IN = BIT(1),
37*4882a593Smuzhiyun INET_FRAG_COMPLETE = BIT(2),
38*4882a593Smuzhiyun INET_FRAG_HASH_DEAD = BIT(3),
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct frag_v4_compare_key {
42*4882a593Smuzhiyun __be32 saddr;
43*4882a593Smuzhiyun __be32 daddr;
44*4882a593Smuzhiyun u32 user;
45*4882a593Smuzhiyun u32 vif;
46*4882a593Smuzhiyun __be16 id;
47*4882a593Smuzhiyun u16 protocol;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun struct frag_v6_compare_key {
51*4882a593Smuzhiyun struct in6_addr saddr;
52*4882a593Smuzhiyun struct in6_addr daddr;
53*4882a593Smuzhiyun u32 user;
54*4882a593Smuzhiyun __be32 id;
55*4882a593Smuzhiyun u32 iif;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * struct inet_frag_queue - fragment queue
60*4882a593Smuzhiyun *
61*4882a593Smuzhiyun * @node: rhash node
62*4882a593Smuzhiyun * @key: keys identifying this frag.
63*4882a593Smuzhiyun * @timer: queue expiration timer
64*4882a593Smuzhiyun * @lock: spinlock protecting this frag
65*4882a593Smuzhiyun * @refcnt: reference count of the queue
66*4882a593Smuzhiyun * @rb_fragments: received fragments rb-tree root
67*4882a593Smuzhiyun * @fragments_tail: received fragments tail
68*4882a593Smuzhiyun * @last_run_head: the head of the last "run". see ip_fragment.c
69*4882a593Smuzhiyun * @stamp: timestamp of the last received fragment
70*4882a593Smuzhiyun * @len: total length of the original datagram
71*4882a593Smuzhiyun * @meat: length of received fragments so far
72*4882a593Smuzhiyun * @flags: fragment queue flags
73*4882a593Smuzhiyun * @max_size: maximum received fragment size
74*4882a593Smuzhiyun * @fqdir: pointer to struct fqdir
75*4882a593Smuzhiyun * @rcu: rcu head for freeing deferall
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun struct inet_frag_queue {
78*4882a593Smuzhiyun struct rhash_head node;
79*4882a593Smuzhiyun union {
80*4882a593Smuzhiyun struct frag_v4_compare_key v4;
81*4882a593Smuzhiyun struct frag_v6_compare_key v6;
82*4882a593Smuzhiyun } key;
83*4882a593Smuzhiyun struct timer_list timer;
84*4882a593Smuzhiyun spinlock_t lock;
85*4882a593Smuzhiyun refcount_t refcnt;
86*4882a593Smuzhiyun struct rb_root rb_fragments;
87*4882a593Smuzhiyun struct sk_buff *fragments_tail;
88*4882a593Smuzhiyun struct sk_buff *last_run_head;
89*4882a593Smuzhiyun ktime_t stamp;
90*4882a593Smuzhiyun int len;
91*4882a593Smuzhiyun int meat;
92*4882a593Smuzhiyun __u8 flags;
93*4882a593Smuzhiyun u16 max_size;
94*4882a593Smuzhiyun struct fqdir *fqdir;
95*4882a593Smuzhiyun struct rcu_head rcu;
96*4882a593Smuzhiyun };
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun struct inet_frags {
99*4882a593Smuzhiyun unsigned int qsize;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun void (*constructor)(struct inet_frag_queue *q,
102*4882a593Smuzhiyun const void *arg);
103*4882a593Smuzhiyun void (*destructor)(struct inet_frag_queue *);
104*4882a593Smuzhiyun void (*frag_expire)(struct timer_list *t);
105*4882a593Smuzhiyun struct kmem_cache *frags_cachep;
106*4882a593Smuzhiyun const char *frags_cache_name;
107*4882a593Smuzhiyun struct rhashtable_params rhash_params;
108*4882a593Smuzhiyun refcount_t refcnt;
109*4882a593Smuzhiyun struct completion completion;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun int inet_frags_init(struct inet_frags *);
113*4882a593Smuzhiyun void inet_frags_fini(struct inet_frags *);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun int fqdir_init(struct fqdir **fqdirp, struct inet_frags *f, struct net *net);
116*4882a593Smuzhiyun
fqdir_pre_exit(struct fqdir * fqdir)117*4882a593Smuzhiyun static inline void fqdir_pre_exit(struct fqdir *fqdir)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun /* Prevent creation of new frags.
120*4882a593Smuzhiyun * Pairs with READ_ONCE() in inet_frag_find().
121*4882a593Smuzhiyun */
122*4882a593Smuzhiyun WRITE_ONCE(fqdir->high_thresh, 0);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun /* Pairs with READ_ONCE() in inet_frag_kill(), ip_expire()
125*4882a593Smuzhiyun * and ip6frag_expire_frag_queue().
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun WRITE_ONCE(fqdir->dead, true);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun void fqdir_exit(struct fqdir *fqdir);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun void inet_frag_kill(struct inet_frag_queue *q);
132*4882a593Smuzhiyun void inet_frag_destroy(struct inet_frag_queue *q);
133*4882a593Smuzhiyun struct inet_frag_queue *inet_frag_find(struct fqdir *fqdir, void *key);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun /* Free all skbs in the queue; return the sum of their truesizes. */
136*4882a593Smuzhiyun unsigned int inet_frag_rbtree_purge(struct rb_root *root);
137*4882a593Smuzhiyun
inet_frag_put(struct inet_frag_queue * q)138*4882a593Smuzhiyun static inline void inet_frag_put(struct inet_frag_queue *q)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun if (refcount_dec_and_test(&q->refcnt))
141*4882a593Smuzhiyun inet_frag_destroy(q);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Memory Tracking Functions. */
145*4882a593Smuzhiyun
frag_mem_limit(const struct fqdir * fqdir)146*4882a593Smuzhiyun static inline long frag_mem_limit(const struct fqdir *fqdir)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun return atomic_long_read(&fqdir->mem);
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
sub_frag_mem_limit(struct fqdir * fqdir,long val)151*4882a593Smuzhiyun static inline void sub_frag_mem_limit(struct fqdir *fqdir, long val)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun atomic_long_sub(val, &fqdir->mem);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
add_frag_mem_limit(struct fqdir * fqdir,long val)156*4882a593Smuzhiyun static inline void add_frag_mem_limit(struct fqdir *fqdir, long val)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun atomic_long_add(val, &fqdir->mem);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* RFC 3168 support :
162*4882a593Smuzhiyun * We want to check ECN values of all fragments, do detect invalid combinations.
163*4882a593Smuzhiyun * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
164*4882a593Smuzhiyun */
165*4882a593Smuzhiyun #define IPFRAG_ECN_NOT_ECT 0x01 /* one frag had ECN_NOT_ECT */
166*4882a593Smuzhiyun #define IPFRAG_ECN_ECT_1 0x02 /* one frag had ECN_ECT_1 */
167*4882a593Smuzhiyun #define IPFRAG_ECN_ECT_0 0x04 /* one frag had ECN_ECT_0 */
168*4882a593Smuzhiyun #define IPFRAG_ECN_CE 0x08 /* one frag had ECN_CE */
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun extern const u8 ip_frag_ecn_table[16];
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun /* Return values of inet_frag_queue_insert() */
173*4882a593Smuzhiyun #define IPFRAG_OK 0
174*4882a593Smuzhiyun #define IPFRAG_DUP 1
175*4882a593Smuzhiyun #define IPFRAG_OVERLAP 2
176*4882a593Smuzhiyun int inet_frag_queue_insert(struct inet_frag_queue *q, struct sk_buff *skb,
177*4882a593Smuzhiyun int offset, int end);
178*4882a593Smuzhiyun void *inet_frag_reasm_prepare(struct inet_frag_queue *q, struct sk_buff *skb,
179*4882a593Smuzhiyun struct sk_buff *parent);
180*4882a593Smuzhiyun void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head,
181*4882a593Smuzhiyun void *reasm_data, bool try_coalesce);
182*4882a593Smuzhiyun struct sk_buff *inet_frag_pull_head(struct inet_frag_queue *q);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun #endif
185