1/*
2 *		INETPEER - A storage for permanent information about peers
3 *
4 *  This source is covered by the GNU GPL, the same as all kernel sources.
5 *
6 *  Version:	$Id: inetpeer.c,v 1.1.1.1 2008/10/15 03:27:33 james26_jang Exp $
7 *
8 *  Authors:	Andrey V. Savochkin <saw@msu.ru>
9 */
10
11#include <linux/types.h>
12#include <linux/slab.h>
13#include <linux/interrupt.h>
14#include <linux/spinlock.h>
15#include <linux/random.h>
16#include <linux/sched.h>
17#include <linux/timer.h>
18#include <linux/time.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <net/inetpeer.h>
22
23/*
24 *  Theory of operations.
25 *  We keep one entry for each peer IP address.  The nodes contains long-living
26 *  information about the peer which doesn't depend on routes.
27 *  At this moment this information consists only of ID field for the next
28 *  outgoing IP packet.  This field is incremented with each packet as encoded
29 *  in inet_getid() function (include/net/inetpeer.h).
30 *  At the moment of writing this notes identifier of IP packets is generated
31 *  to be unpredictable using this code only for packets subjected
32 *  (actually or potentially) to defragmentation.  I.e. DF packets less than
33 *  PMTU in size uses a constant ID and do not use this code (see
34 *  ip_select_ident() in include/net/ip.h).
35 *
36 *  Route cache entries hold references to our nodes.
37 *  New cache entries get references via lookup by destination IP address in
38 *  the avl tree.  The reference is grabbed only when it's needed i.e. only
39 *  when we try to output IP packet which needs an unpredictable ID (see
40 *  __ip_select_ident() in net/ipv4/route.c).
41 *  Nodes are removed only when reference counter goes to 0.
42 *  When it's happened the node may be removed when a sufficient amount of
43 *  time has been passed since its last use.  The less-recently-used entry can
44 *  also be removed if the pool is overloaded i.e. if the total amount of
45 *  entries is greater-or-equal than the threshold.
46 *
47 *  Node pool is organised as an AVL tree.
48 *  Such an implementation has been chosen not just for fun.  It's a way to
49 *  prevent easy and efficient DoS attacks by creating hash collisions.  A huge
50 *  amount of long living nodes in a single hash slot would significantly delay
51 *  lookups performed with disabled BHs.
52 *
53 *  Serialisation issues.
54 *  1.  Nodes may appear in the tree only with the pool write lock held.
55 *  2.  Nodes may disappear from the tree only with the pool write lock held
56 *      AND reference count being 0.
57 *  3.  Nodes appears and disappears from unused node list only under
58 *      "inet_peer_unused_lock".
59 *  4.  Global variable peer_total is modified under the pool lock.
60 *  5.  struct inet_peer fields modification:
61 *		avl_left, avl_right, avl_parent, avl_height: pool lock
62 *		unused_next, unused_prevp: unused node list lock
63 *		refcnt: atomically against modifications on other CPU;
64 *		   usually under some other lock to prevent node disappearing
65 *		dtime: unused node list lock
66 *		v4daddr: unchangeable
67 *		ip_id_count: idlock
68 */
69
70/* Exported for inet_getid inline function.  */
71spinlock_t inet_peer_idlock = SPIN_LOCK_UNLOCKED;
72
73static kmem_cache_t *peer_cachep;
74
75#define node_height(x) x->avl_height
76static struct inet_peer peer_fake_node = {
77	avl_left : &peer_fake_node,
78	avl_right : &peer_fake_node,
79	avl_height : 0
80};
81#define peer_avl_empty (&peer_fake_node)
82static struct inet_peer *peer_root = peer_avl_empty;
83static rwlock_t peer_pool_lock = RW_LOCK_UNLOCKED;
84#define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
85
86static volatile int peer_total;
87/* Exported for sysctl_net_ipv4.  */
88int inet_peer_threshold = 65536 + 128;	/* start to throw entries more
89					 * aggressively at this stage */
90int inet_peer_minttl = 120 * HZ;	/* TTL under high load: 120 sec */
91int inet_peer_maxttl = 10 * 60 * HZ;	/* usual time to live: 10 min */
92
93/* Exported for inet_putpeer inline function.  */
94struct inet_peer *inet_peer_unused_head,
95		**inet_peer_unused_tailp = &inet_peer_unused_head;
96spinlock_t inet_peer_unused_lock = SPIN_LOCK_UNLOCKED;
97#define PEER_MAX_CLEANUP_WORK 30
98
99static void peer_check_expire(unsigned long dummy);
100static struct timer_list peer_periodic_timer =
101	{ { NULL, NULL }, 0, 0, &peer_check_expire };
102
103/* Exported for sysctl_net_ipv4.  */
104int inet_peer_gc_mintime = 10 * HZ,
105    inet_peer_gc_maxtime = 120 * HZ;
106
107/* Called from ip_output.c:ip_init  */
108void __init inet_initpeers(void)
109{
110	struct sysinfo si;
111
112	/* Use the straight interface to information about memory. */
113	si_meminfo(&si);
114	/* The values below were suggested by Alexey Kuznetsov
115	 * <kuznet@ms2.inr.ac.ru>.  I don't have any opinion about the values
116	 * myself.  --SAW
117	 */
118	if (si.totalram <= (32768*1024)/PAGE_SIZE)
119		inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */
120	if (si.totalram <= (16384*1024)/PAGE_SIZE)
121		inet_peer_threshold >>= 1; /* about 512KB */
122	if (si.totalram <= (8192*1024)/PAGE_SIZE)
123		inet_peer_threshold >>= 2; /* about 128KB */
124
125	peer_cachep = kmem_cache_create("inet_peer_cache",
126			sizeof(struct inet_peer),
127			0, SLAB_HWCACHE_ALIGN,
128			NULL, NULL);
129
130	/* All the timers, started at system startup tend
131	   to synchronize. Perturb it a bit.
132	 */
133	peer_periodic_timer.expires = jiffies
134		+ net_random() % inet_peer_gc_maxtime
135		+ inet_peer_gc_maxtime;
136	add_timer(&peer_periodic_timer);
137}
138
139/* Called with or without local BH being disabled. */
140static void unlink_from_unused(struct inet_peer *p)
141{
142	spin_lock_bh(&inet_peer_unused_lock);
143	if (p->unused_prevp != NULL) {
144		/* On unused list. */
145		*p->unused_prevp = p->unused_next;
146		if (p->unused_next != NULL)
147			p->unused_next->unused_prevp = p->unused_prevp;
148		else
149			inet_peer_unused_tailp = p->unused_prevp;
150		p->unused_prevp = NULL; /* mark it as removed */
151	}
152	spin_unlock_bh(&inet_peer_unused_lock);
153}
154
155/* Called with local BH disabled and the pool lock held. */
156#define lookup(daddr) 						\
157({								\
158	struct inet_peer *u, **v;				\
159	stackptr = stack;					\
160	*stackptr++ = &peer_root;				\
161	for (u = peer_root; u != peer_avl_empty; ) {		\
162		if (daddr == u->v4daddr)			\
163			break;					\
164		if (daddr < u->v4daddr)				\
165			v = &u->avl_left;			\
166		else						\
167			v = &u->avl_right;			\
168		*stackptr++ = v;				\
169		u = *v;						\
170	}							\
171	u;							\
172})
173
174/* Called with local BH disabled and the pool write lock held. */
175#define lookup_rightempty(start)				\
176({								\
177	struct inet_peer *u, **v;				\
178	*stackptr++ = &start->avl_left;				\
179	v = &start->avl_left;					\
180	for (u = *v; u->avl_right != peer_avl_empty; ) {	\
181		v = &u->avl_right;				\
182		*stackptr++ = v;				\
183		u = *v;						\
184	}							\
185	u;							\
186})
187
188/* Called with local BH disabled and the pool write lock held.
189 * Variable names are the proof of operation correctness.
190 * Look into mm/map_avl.c for more detail description of the ideas.  */
191static void peer_avl_rebalance(struct inet_peer **stack[],
192		struct inet_peer ***stackend)
193{
194	struct inet_peer **nodep, *node, *l, *r;
195	int lh, rh;
196
197	while (stackend > stack) {
198		nodep = *--stackend;
199		node = *nodep;
200		l = node->avl_left;
201		r = node->avl_right;
202		lh = node_height(l);
203		rh = node_height(r);
204		if (lh > rh + 1) { /* l: RH+2 */
205			struct inet_peer *ll, *lr, *lrl, *lrr;
206			int lrh;
207			ll = l->avl_left;
208			lr = l->avl_right;
209			lrh = node_height(lr);
210			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
211				node->avl_left = lr;	/* lr: RH or RH+1 */
212				node->avl_right = r;	/* r: RH */
213				node->avl_height = lrh + 1; /* RH+1 or RH+2 */
214				l->avl_left = ll;	/* ll: RH+1 */
215				l->avl_right = node;	/* node: RH+1 or RH+2 */
216				l->avl_height = node->avl_height + 1;
217				*nodep = l;
218			} else { /* ll: RH, lr: RH+1 */
219				lrl = lr->avl_left;	/* lrl: RH or RH-1 */
220				lrr = lr->avl_right;	/* lrr: RH or RH-1 */
221				node->avl_left = lrr;	/* lrr: RH or RH-1 */
222				node->avl_right = r;	/* r: RH */
223				node->avl_height = rh + 1; /* node: RH+1 */
224				l->avl_left = ll;	/* ll: RH */
225				l->avl_right = lrl;	/* lrl: RH or RH-1 */
226				l->avl_height = rh + 1;	/* l: RH+1 */
227				lr->avl_left = l;	/* l: RH+1 */
228				lr->avl_right = node;	/* node: RH+1 */
229				lr->avl_height = rh + 2;
230				*nodep = lr;
231			}
232		} else if (rh > lh + 1) { /* r: LH+2 */
233			struct inet_peer *rr, *rl, *rlr, *rll;
234			int rlh;
235			rr = r->avl_right;
236			rl = r->avl_left;
237			rlh = node_height(rl);
238			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
239				node->avl_right = rl;	/* rl: LH or LH+1 */
240				node->avl_left = l;	/* l: LH */
241				node->avl_height = rlh + 1; /* LH+1 or LH+2 */
242				r->avl_right = rr;	/* rr: LH+1 */
243				r->avl_left = node;	/* node: LH+1 or LH+2 */
244				r->avl_height = node->avl_height + 1;
245				*nodep = r;
246			} else { /* rr: RH, rl: RH+1 */
247				rlr = rl->avl_right;	/* rlr: LH or LH-1 */
248				rll = rl->avl_left;	/* rll: LH or LH-1 */
249				node->avl_right = rll;	/* rll: LH or LH-1 */
250				node->avl_left = l;	/* l: LH */
251				node->avl_height = lh + 1; /* node: LH+1 */
252				r->avl_right = rr;	/* rr: LH */
253				r->avl_left = rlr;	/* rlr: LH or LH-1 */
254				r->avl_height = lh + 1;	/* r: LH+1 */
255				rl->avl_right = r;	/* r: LH+1 */
256				rl->avl_left = node;	/* node: LH+1 */
257				rl->avl_height = lh + 2;
258				*nodep = rl;
259			}
260		} else {
261			node->avl_height = (lh > rh ? lh : rh) + 1;
262		}
263	}
264}
265
266/* Called with local BH disabled and the pool write lock held. */
267#define link_to_pool(n)						\
268do {								\
269	n->avl_height = 1;					\
270	n->avl_left = peer_avl_empty;				\
271	n->avl_right = peer_avl_empty;				\
272	**--stackptr = n;					\
273	peer_avl_rebalance(stack, stackptr);			\
274} while(0)
275
276/* May be called with local BH enabled. */
277static void unlink_from_pool(struct inet_peer *p)
278{
279	int do_free;
280
281	do_free = 0;
282
283	write_lock_bh(&peer_pool_lock);
284	/* Check the reference counter.  It was artificially incremented by 1
285	 * in cleanup() function to prevent sudden disappearing.  If the
286	 * reference count is still 1 then the node is referenced only as `p'
287	 * here and from the pool.  So under the exclusive pool lock it's safe
288	 * to remove the node and free it later. */
289	if (atomic_read(&p->refcnt) == 1) {
290		struct inet_peer **stack[PEER_MAXDEPTH];
291		struct inet_peer ***stackptr, ***delp;
292		if (lookup(p->v4daddr) != p)
293			BUG();
294		delp = stackptr - 1; /* *delp[0] == p */
295		if (p->avl_left == peer_avl_empty) {
296			*delp[0] = p->avl_right;
297			--stackptr;
298		} else {
299			/* look for a node to insert instead of p */
300			struct inet_peer *t;
301			t = lookup_rightempty(p);
302			if (*stackptr[-1] != t)
303				BUG();
304			**--stackptr = t->avl_left;
305			/* t is removed, t->v4daddr > x->v4daddr for any
306			 * x in p->avl_left subtree.
307			 * Put t in the old place of p. */
308			*delp[0] = t;
309			t->avl_left = p->avl_left;
310			t->avl_right = p->avl_right;
311			t->avl_height = p->avl_height;
312			if (delp[1] != &p->avl_left)
313				BUG();
314			delp[1] = &t->avl_left; /* was &p->avl_left */
315		}
316		peer_avl_rebalance(stack, stackptr);
317		peer_total--;
318		do_free = 1;
319	}
320	write_unlock_bh(&peer_pool_lock);
321
322	if (do_free)
323		kmem_cache_free(peer_cachep, p);
324	else
325		/* The node is used again.  Decrease the reference counter
326		 * back.  The loop "cleanup -> unlink_from_unused
327		 *   -> unlink_from_pool -> putpeer -> link_to_unused
328		 *   -> cleanup (for the same node)"
329		 * doesn't really exist because the entry will have a
330		 * recent deletion time and will not be cleaned again soon. */
331		inet_putpeer(p);
332}
333
334/* May be called with local BH enabled. */
335static int cleanup_once(unsigned long ttl)
336{
337	struct inet_peer *p;
338
339	/* Remove the first entry from the list of unused nodes. */
340	spin_lock_bh(&inet_peer_unused_lock);
341	p = inet_peer_unused_head;
342	if (p != NULL) {
343		if (time_after(p->dtime + ttl, jiffies)) {
344			/* Do not prune fresh entries. */
345			spin_unlock_bh(&inet_peer_unused_lock);
346			return -1;
347		}
348		inet_peer_unused_head = p->unused_next;
349		if (p->unused_next != NULL)
350			p->unused_next->unused_prevp = p->unused_prevp;
351		else
352			inet_peer_unused_tailp = p->unused_prevp;
353		p->unused_prevp = NULL; /* mark as not on the list */
354		/* Grab an extra reference to prevent node disappearing
355		 * before unlink_from_pool() call. */
356		atomic_inc(&p->refcnt);
357	}
358	spin_unlock_bh(&inet_peer_unused_lock);
359
360	if (p == NULL)
361		/* It means that the total number of USED entries has
362		 * grown over inet_peer_threshold.  It shouldn't really
363		 * happen because of entry limits in route cache. */
364		return -1;
365
366	unlink_from_pool(p);
367	return 0;
368}
369
370/* Called with or without local BH being disabled. */
371struct inet_peer *inet_getpeer(__u32 daddr, int create)
372{
373	struct inet_peer *p, *n;
374	struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
375
376	/* Look up for the address quickly. */
377	read_lock_bh(&peer_pool_lock);
378	p = lookup(daddr);
379	if (p != peer_avl_empty)
380		atomic_inc(&p->refcnt);
381	read_unlock_bh(&peer_pool_lock);
382
383	if (p != peer_avl_empty) {
384		/* The existing node has been found. */
385		/* Remove the entry from unused list if it was there. */
386		unlink_from_unused(p);
387		return p;
388	}
389
390	if (!create)
391		return NULL;
392
393	/* Allocate the space outside the locked region. */
394	n = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
395	if (n == NULL)
396		return NULL;
397	n->v4daddr = daddr;
398	atomic_set(&n->refcnt, 1);
399	n->ip_id_count = secure_ip_id(daddr);
400	n->tcp_ts_stamp = 0;
401
402	write_lock_bh(&peer_pool_lock);
403	/* Check if an entry has suddenly appeared. */
404	p = lookup(daddr);
405	if (p != peer_avl_empty)
406		goto out_free;
407
408	/* Link the node. */
409	link_to_pool(n);
410	n->unused_prevp = NULL; /* not on the list */
411	peer_total++;
412	write_unlock_bh(&peer_pool_lock);
413
414	if (peer_total >= inet_peer_threshold)
415		/* Remove one less-recently-used entry. */
416		cleanup_once(0);
417
418	return n;
419
420out_free:
421	/* The appropriate node is already in the pool. */
422	atomic_inc(&p->refcnt);
423	write_unlock_bh(&peer_pool_lock);
424	/* Remove the entry from unused list if it was there. */
425	unlink_from_unused(p);
426	/* Free preallocated the preallocated node. */
427	kmem_cache_free(peer_cachep, n);
428	return p;
429}
430
431/* Called with local BH disabled. */
432static void peer_check_expire(unsigned long dummy)
433{
434	int i;
435	int ttl;
436
437	if (peer_total >= inet_peer_threshold)
438		ttl = inet_peer_minttl;
439	else
440		ttl = inet_peer_maxttl
441				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
442					peer_total / inet_peer_threshold * HZ;
443	for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
444
445	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
446	 * interval depending on the total number of entries (more entries,
447	 * less interval). */
448	peer_periodic_timer.expires = jiffies
449		+ inet_peer_gc_maxtime
450		- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
451			peer_total / inet_peer_threshold * HZ;
452	add_timer(&peer_periodic_timer);
453}
454