1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * INETPEER - A storage for permanent information about peers
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This source is covered by the GNU GPL, the same as all kernel sources.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Authors: Andrey V. Savochkin <saw@msu.ru>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/cache.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/types.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/interrupt.h>
14*4882a593Smuzhiyun #include <linux/spinlock.h>
15*4882a593Smuzhiyun #include <linux/random.h>
16*4882a593Smuzhiyun #include <linux/timer.h>
17*4882a593Smuzhiyun #include <linux/time.h>
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/net.h>
21*4882a593Smuzhiyun #include <linux/workqueue.h>
22*4882a593Smuzhiyun #include <net/ip.h>
23*4882a593Smuzhiyun #include <net/inetpeer.h>
24*4882a593Smuzhiyun #include <net/secure_seq.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun * Theory of operations.
28*4882a593Smuzhiyun * We keep one entry for each peer IP address. The nodes contains long-living
29*4882a593Smuzhiyun * information about the peer which doesn't depend on routes.
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * Nodes are removed only when reference counter goes to 0.
32*4882a593Smuzhiyun * When it's happened the node may be removed when a sufficient amount of
33*4882a593Smuzhiyun * time has been passed since its last use. The less-recently-used entry can
34*4882a593Smuzhiyun * also be removed if the pool is overloaded i.e. if the total amount of
35*4882a593Smuzhiyun * entries is greater-or-equal than the threshold.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * Node pool is organised as an RB tree.
38*4882a593Smuzhiyun * Such an implementation has been chosen not just for fun. It's a way to
39*4882a593Smuzhiyun * prevent easy and efficient DoS attacks by creating hash collisions. A huge
40*4882a593Smuzhiyun * amount of long living nodes in a single hash slot would significantly delay
41*4882a593Smuzhiyun * lookups performed with disabled BHs.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Serialisation issues.
44*4882a593Smuzhiyun * 1. Nodes may appear in the tree only with the pool lock held.
45*4882a593Smuzhiyun * 2. Nodes may disappear from the tree only with the pool lock held
46*4882a593Smuzhiyun * AND reference count being 0.
47*4882a593Smuzhiyun * 3. Global variable peer_total is modified under the pool lock.
48*4882a593Smuzhiyun * 4. struct inet_peer fields modification:
49*4882a593Smuzhiyun * rb_node: pool lock
50*4882a593Smuzhiyun * refcnt: atomically against modifications on other CPU;
51*4882a593Smuzhiyun * usually under some other lock to prevent node disappearing
52*4882a593Smuzhiyun * daddr: unchangeable
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun static struct kmem_cache *peer_cachep __ro_after_init;
56*4882a593Smuzhiyun
inet_peer_base_init(struct inet_peer_base * bp)57*4882a593Smuzhiyun void inet_peer_base_init(struct inet_peer_base *bp)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun bp->rb_root = RB_ROOT;
60*4882a593Smuzhiyun seqlock_init(&bp->lock);
61*4882a593Smuzhiyun bp->total = 0;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(inet_peer_base_init);
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define PEER_MAX_GC 32
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Exported for sysctl_net_ipv4. */
68*4882a593Smuzhiyun int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more
69*4882a593Smuzhiyun * aggressively at this stage */
70*4882a593Smuzhiyun int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */
71*4882a593Smuzhiyun int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Called from ip_output.c:ip_init */
inet_initpeers(void)74*4882a593Smuzhiyun void __init inet_initpeers(void)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun struct sysinfo si;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Use the straight interface to information about memory. */
79*4882a593Smuzhiyun si_meminfo(&si);
80*4882a593Smuzhiyun /* The values below were suggested by Alexey Kuznetsov
81*4882a593Smuzhiyun * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values
82*4882a593Smuzhiyun * myself. --SAW
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun if (si.totalram <= (32768*1024)/PAGE_SIZE)
85*4882a593Smuzhiyun inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
86*4882a593Smuzhiyun if (si.totalram <= (16384*1024)/PAGE_SIZE)
87*4882a593Smuzhiyun inet_peer_threshold >>= 1; /* about 512KB */
88*4882a593Smuzhiyun if (si.totalram <= (8192*1024)/PAGE_SIZE)
89*4882a593Smuzhiyun inet_peer_threshold >>= 2; /* about 128KB */
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun peer_cachep = kmem_cache_create("inet_peer_cache",
92*4882a593Smuzhiyun sizeof(struct inet_peer),
93*4882a593Smuzhiyun 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC,
94*4882a593Smuzhiyun NULL);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Called with rcu_read_lock() or base->lock held */
lookup(const struct inetpeer_addr * daddr,struct inet_peer_base * base,unsigned int seq,struct inet_peer * gc_stack[],unsigned int * gc_cnt,struct rb_node ** parent_p,struct rb_node *** pp_p)98*4882a593Smuzhiyun static struct inet_peer *lookup(const struct inetpeer_addr *daddr,
99*4882a593Smuzhiyun struct inet_peer_base *base,
100*4882a593Smuzhiyun unsigned int seq,
101*4882a593Smuzhiyun struct inet_peer *gc_stack[],
102*4882a593Smuzhiyun unsigned int *gc_cnt,
103*4882a593Smuzhiyun struct rb_node **parent_p,
104*4882a593Smuzhiyun struct rb_node ***pp_p)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct rb_node **pp, *parent, *next;
107*4882a593Smuzhiyun struct inet_peer *p;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun pp = &base->rb_root.rb_node;
110*4882a593Smuzhiyun parent = NULL;
111*4882a593Smuzhiyun while (1) {
112*4882a593Smuzhiyun int cmp;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun next = rcu_dereference_raw(*pp);
115*4882a593Smuzhiyun if (!next)
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun parent = next;
118*4882a593Smuzhiyun p = rb_entry(parent, struct inet_peer, rb_node);
119*4882a593Smuzhiyun cmp = inetpeer_addr_cmp(daddr, &p->daddr);
120*4882a593Smuzhiyun if (cmp == 0) {
121*4882a593Smuzhiyun if (!refcount_inc_not_zero(&p->refcnt))
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun return p;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun if (gc_stack) {
126*4882a593Smuzhiyun if (*gc_cnt < PEER_MAX_GC)
127*4882a593Smuzhiyun gc_stack[(*gc_cnt)++] = p;
128*4882a593Smuzhiyun } else if (unlikely(read_seqretry(&base->lock, seq))) {
129*4882a593Smuzhiyun break;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun if (cmp == -1)
132*4882a593Smuzhiyun pp = &next->rb_left;
133*4882a593Smuzhiyun else
134*4882a593Smuzhiyun pp = &next->rb_right;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun *parent_p = parent;
137*4882a593Smuzhiyun *pp_p = pp;
138*4882a593Smuzhiyun return NULL;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
inetpeer_free_rcu(struct rcu_head * head)141*4882a593Smuzhiyun static void inetpeer_free_rcu(struct rcu_head *head)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu));
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun /* perform garbage collect on all items stacked during a lookup */
inet_peer_gc(struct inet_peer_base * base,struct inet_peer * gc_stack[],unsigned int gc_cnt)147*4882a593Smuzhiyun static void inet_peer_gc(struct inet_peer_base *base,
148*4882a593Smuzhiyun struct inet_peer *gc_stack[],
149*4882a593Smuzhiyun unsigned int gc_cnt)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun int peer_threshold, peer_maxttl, peer_minttl;
152*4882a593Smuzhiyun struct inet_peer *p;
153*4882a593Smuzhiyun __u32 delta, ttl;
154*4882a593Smuzhiyun int i;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun peer_threshold = READ_ONCE(inet_peer_threshold);
157*4882a593Smuzhiyun peer_maxttl = READ_ONCE(inet_peer_maxttl);
158*4882a593Smuzhiyun peer_minttl = READ_ONCE(inet_peer_minttl);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun if (base->total >= peer_threshold)
161*4882a593Smuzhiyun ttl = 0; /* be aggressive */
162*4882a593Smuzhiyun else
163*4882a593Smuzhiyun ttl = peer_maxttl - (peer_maxttl - peer_minttl) / HZ *
164*4882a593Smuzhiyun base->total / peer_threshold * HZ;
165*4882a593Smuzhiyun for (i = 0; i < gc_cnt; i++) {
166*4882a593Smuzhiyun p = gc_stack[i];
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* The READ_ONCE() pairs with the WRITE_ONCE()
169*4882a593Smuzhiyun * in inet_putpeer()
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun delta = (__u32)jiffies - READ_ONCE(p->dtime);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun if (delta < ttl || !refcount_dec_if_one(&p->refcnt))
174*4882a593Smuzhiyun gc_stack[i] = NULL;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun for (i = 0; i < gc_cnt; i++) {
177*4882a593Smuzhiyun p = gc_stack[i];
178*4882a593Smuzhiyun if (p) {
179*4882a593Smuzhiyun rb_erase(&p->rb_node, &base->rb_root);
180*4882a593Smuzhiyun base->total--;
181*4882a593Smuzhiyun call_rcu(&p->rcu, inetpeer_free_rcu);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
inet_getpeer(struct inet_peer_base * base,const struct inetpeer_addr * daddr,int create)186*4882a593Smuzhiyun struct inet_peer *inet_getpeer(struct inet_peer_base *base,
187*4882a593Smuzhiyun const struct inetpeer_addr *daddr,
188*4882a593Smuzhiyun int create)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun struct inet_peer *p, *gc_stack[PEER_MAX_GC];
191*4882a593Smuzhiyun struct rb_node **pp, *parent;
192*4882a593Smuzhiyun unsigned int gc_cnt, seq;
193*4882a593Smuzhiyun int invalidated;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Attempt a lockless lookup first.
196*4882a593Smuzhiyun * Because of a concurrent writer, we might not find an existing entry.
197*4882a593Smuzhiyun */
198*4882a593Smuzhiyun rcu_read_lock();
199*4882a593Smuzhiyun seq = read_seqbegin(&base->lock);
200*4882a593Smuzhiyun p = lookup(daddr, base, seq, NULL, &gc_cnt, &parent, &pp);
201*4882a593Smuzhiyun invalidated = read_seqretry(&base->lock, seq);
202*4882a593Smuzhiyun rcu_read_unlock();
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (p)
205*4882a593Smuzhiyun return p;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* If no writer did a change during our lookup, we can return early. */
208*4882a593Smuzhiyun if (!create && !invalidated)
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* retry an exact lookup, taking the lock before.
212*4882a593Smuzhiyun * At least, nodes should be hot in our cache.
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun parent = NULL;
215*4882a593Smuzhiyun write_seqlock_bh(&base->lock);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun gc_cnt = 0;
218*4882a593Smuzhiyun p = lookup(daddr, base, seq, gc_stack, &gc_cnt, &parent, &pp);
219*4882a593Smuzhiyun if (!p && create) {
220*4882a593Smuzhiyun p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
221*4882a593Smuzhiyun if (p) {
222*4882a593Smuzhiyun p->daddr = *daddr;
223*4882a593Smuzhiyun p->dtime = (__u32)jiffies;
224*4882a593Smuzhiyun refcount_set(&p->refcnt, 2);
225*4882a593Smuzhiyun atomic_set(&p->rid, 0);
226*4882a593Smuzhiyun p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
227*4882a593Smuzhiyun p->rate_tokens = 0;
228*4882a593Smuzhiyun p->n_redirects = 0;
229*4882a593Smuzhiyun /* 60*HZ is arbitrary, but chosen enough high so that the first
230*4882a593Smuzhiyun * calculation of tokens is at its maximum.
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun p->rate_last = jiffies - 60*HZ;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun rb_link_node(&p->rb_node, parent, pp);
235*4882a593Smuzhiyun rb_insert_color(&p->rb_node, &base->rb_root);
236*4882a593Smuzhiyun base->total++;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun if (gc_cnt)
240*4882a593Smuzhiyun inet_peer_gc(base, gc_stack, gc_cnt);
241*4882a593Smuzhiyun write_sequnlock_bh(&base->lock);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return p;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(inet_getpeer);
246*4882a593Smuzhiyun
inet_putpeer(struct inet_peer * p)247*4882a593Smuzhiyun void inet_putpeer(struct inet_peer *p)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun /* The WRITE_ONCE() pairs with itself (we run lockless)
250*4882a593Smuzhiyun * and the READ_ONCE() in inet_peer_gc()
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun WRITE_ONCE(p->dtime, (__u32)jiffies);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun if (refcount_dec_and_test(&p->refcnt))
255*4882a593Smuzhiyun call_rcu(&p->rcu, inetpeer_free_rcu);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(inet_putpeer);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * Check transmit rate limitation for given message.
261*4882a593Smuzhiyun * The rate information is held in the inet_peer entries now.
262*4882a593Smuzhiyun * This function is generic and could be used for other purposes
263*4882a593Smuzhiyun * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov.
264*4882a593Smuzhiyun *
265*4882a593Smuzhiyun * Note that the same inet_peer fields are modified by functions in
266*4882a593Smuzhiyun * route.c too, but these work for packet destinations while xrlim_allow
267*4882a593Smuzhiyun * works for icmp destinations. This means the rate limiting information
268*4882a593Smuzhiyun * for one "ip object" is shared - and these ICMPs are twice limited:
269*4882a593Smuzhiyun * by source and by destination.
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate
272*4882a593Smuzhiyun * SHOULD allow setting of rate limits
273*4882a593Smuzhiyun *
274*4882a593Smuzhiyun * Shared between ICMPv4 and ICMPv6.
275*4882a593Smuzhiyun */
276*4882a593Smuzhiyun #define XRLIM_BURST_FACTOR 6
inet_peer_xrlim_allow(struct inet_peer * peer,int timeout)277*4882a593Smuzhiyun bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned long now, token;
280*4882a593Smuzhiyun bool rc = false;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (!peer)
283*4882a593Smuzhiyun return true;
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun token = peer->rate_tokens;
286*4882a593Smuzhiyun now = jiffies;
287*4882a593Smuzhiyun token += now - peer->rate_last;
288*4882a593Smuzhiyun peer->rate_last = now;
289*4882a593Smuzhiyun if (token > XRLIM_BURST_FACTOR * timeout)
290*4882a593Smuzhiyun token = XRLIM_BURST_FACTOR * timeout;
291*4882a593Smuzhiyun if (token >= timeout) {
292*4882a593Smuzhiyun token -= timeout;
293*4882a593Smuzhiyun rc = true;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun peer->rate_tokens = token;
296*4882a593Smuzhiyun return rc;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun EXPORT_SYMBOL(inet_peer_xrlim_allow);
299*4882a593Smuzhiyun
inetpeer_invalidate_tree(struct inet_peer_base * base)300*4882a593Smuzhiyun void inetpeer_invalidate_tree(struct inet_peer_base *base)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun struct rb_node *p = rb_first(&base->rb_root);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun while (p) {
305*4882a593Smuzhiyun struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun p = rb_next(p);
308*4882a593Smuzhiyun rb_erase(&peer->rb_node, &base->rb_root);
309*4882a593Smuzhiyun inet_putpeer(peer);
310*4882a593Smuzhiyun cond_resched();
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun base->total = 0;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun EXPORT_SYMBOL(inetpeer_invalidate_tree);
316