• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/net/ipv4/
1/*
2 * inet fragments management
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * 		Authors:	Pavel Emelyanov <xemul@openvz.org>
10 *				Started as consolidation of ipv4/ip_fragment.c,
11 *				ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/random.h>
20#include <linux/skbuff.h>
21#include <linux/rtnetlink.h>
22#include <linux/slab.h>
23
24#include <net/inet_frag.h>
25
26static void inet_frag_secret_rebuild(unsigned long dummy)
27{
28	struct inet_frags *f = (struct inet_frags *)dummy;
29	unsigned long now = jiffies;
30	int i;
31
32	write_lock(&f->lock);
33	get_random_bytes(&f->rnd, sizeof(u32));
34	for (i = 0; i < INETFRAGS_HASHSZ; i++) {
35		struct inet_frag_queue *q;
36		struct hlist_node *p, *n;
37
38		hlist_for_each_entry_safe(q, p, n, &f->hash[i], list) {
39			unsigned int hval = f->hashfn(q);
40
41			if (hval != i) {
42				hlist_del(&q->list);
43
44				/* Relink to new hash chain. */
45				hlist_add_head(&q->list, &f->hash[hval]);
46			}
47		}
48	}
49	write_unlock(&f->lock);
50
51	mod_timer(&f->secret_timer, now + f->secret_interval);
52}
53
54void inet_frags_init(struct inet_frags *f)
55{
56	int i;
57
58	for (i = 0; i < INETFRAGS_HASHSZ; i++)
59		INIT_HLIST_HEAD(&f->hash[i]);
60
61	rwlock_init(&f->lock);
62
63	f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
64				   (jiffies ^ (jiffies >> 6)));
65
66	setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
67			(unsigned long)f);
68	f->secret_timer.expires = jiffies + f->secret_interval;
69	add_timer(&f->secret_timer);
70}
71EXPORT_SYMBOL(inet_frags_init);
72
73void inet_frags_init_net(struct netns_frags *nf)
74{
75	nf->nqueues = 0;
76	atomic_set(&nf->mem, 0);
77	INIT_LIST_HEAD(&nf->lru_list);
78}
79EXPORT_SYMBOL(inet_frags_init_net);
80
81void inet_frags_fini(struct inet_frags *f)
82{
83	del_timer(&f->secret_timer);
84}
85EXPORT_SYMBOL(inet_frags_fini);
86
87void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
88{
89	nf->low_thresh = 0;
90
91	local_bh_disable();
92	inet_frag_evictor(nf, f);
93	local_bh_enable();
94}
95EXPORT_SYMBOL(inet_frags_exit_net);
96
97static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
98{
99	write_lock(&f->lock);
100	hlist_del(&fq->list);
101	list_del(&fq->lru_list);
102	fq->net->nqueues--;
103	write_unlock(&f->lock);
104}
105
106void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
107{
108	if (del_timer(&fq->timer))
109		atomic_dec(&fq->refcnt);
110
111	if (!(fq->last_in & INET_FRAG_COMPLETE)) {
112		fq_unlink(fq, f);
113		atomic_dec(&fq->refcnt);
114		fq->last_in |= INET_FRAG_COMPLETE;
115	}
116}
117EXPORT_SYMBOL(inet_frag_kill);
118
119static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
120		struct sk_buff *skb, int *work)
121{
122	if (work)
123		*work -= skb->truesize;
124
125	atomic_sub(skb->truesize, &nf->mem);
126	if (f->skb_free)
127		f->skb_free(skb);
128	kfree_skb(skb);
129}
130
131void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
132					int *work)
133{
134	struct sk_buff *fp;
135	struct netns_frags *nf;
136
137	WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
138	WARN_ON(del_timer(&q->timer) != 0);
139
140	/* Release all fragment data. */
141	fp = q->fragments;
142	nf = q->net;
143	while (fp) {
144		struct sk_buff *xp = fp->next;
145
146		frag_kfree_skb(nf, f, fp, work);
147		fp = xp;
148	}
149
150	if (work)
151		*work -= f->qsize;
152	atomic_sub(f->qsize, &nf->mem);
153
154	if (f->destructor)
155		f->destructor(q);
156	kfree(q);
157
158}
159EXPORT_SYMBOL(inet_frag_destroy);
160
161int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f)
162{
163	struct inet_frag_queue *q;
164	int work, evicted = 0;
165
166	work = atomic_read(&nf->mem) - nf->low_thresh;
167	while (work > 0) {
168		read_lock(&f->lock);
169		if (list_empty(&nf->lru_list)) {
170			read_unlock(&f->lock);
171			break;
172		}
173
174		q = list_first_entry(&nf->lru_list,
175				struct inet_frag_queue, lru_list);
176		atomic_inc(&q->refcnt);
177		read_unlock(&f->lock);
178
179		spin_lock(&q->lock);
180		if (!(q->last_in & INET_FRAG_COMPLETE))
181			inet_frag_kill(q, f);
182		spin_unlock(&q->lock);
183
184		if (atomic_dec_and_test(&q->refcnt))
185			inet_frag_destroy(q, f, &work);
186		evicted++;
187	}
188
189	return evicted;
190}
191EXPORT_SYMBOL(inet_frag_evictor);
192
193static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
194		struct inet_frag_queue *qp_in, struct inet_frags *f,
195		void *arg)
196{
197	struct inet_frag_queue *qp;
198#ifdef CONFIG_SMP
199	struct hlist_node *n;
200#endif
201	unsigned int hash;
202
203	write_lock(&f->lock);
204	/*
205	 * While we stayed w/o the lock other CPU could update
206	 * the rnd seed, so we need to re-calculate the hash
207	 * chain. Fortunatelly the qp_in can be used to get one.
208	 */
209	hash = f->hashfn(qp_in);
210#ifdef CONFIG_SMP
211	/* With SMP race we have to recheck hash table, because
212	 * such entry could be created on other cpu, while we
213	 * promoted read lock to write lock.
214	 */
215	hlist_for_each_entry(qp, n, &f->hash[hash], list) {
216		if (qp->net == nf && f->match(qp, arg)) {
217			atomic_inc(&qp->refcnt);
218			write_unlock(&f->lock);
219			qp_in->last_in |= INET_FRAG_COMPLETE;
220			inet_frag_put(qp_in, f);
221			return qp;
222		}
223	}
224#endif
225	qp = qp_in;
226	if (!mod_timer(&qp->timer, jiffies + nf->timeout))
227		atomic_inc(&qp->refcnt);
228
229	atomic_inc(&qp->refcnt);
230	hlist_add_head(&qp->list, &f->hash[hash]);
231	list_add_tail(&qp->lru_list, &nf->lru_list);
232	nf->nqueues++;
233	write_unlock(&f->lock);
234	return qp;
235}
236
237static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
238		struct inet_frags *f, void *arg)
239{
240	struct inet_frag_queue *q;
241
242	q = kzalloc(f->qsize, GFP_ATOMIC);
243	if (q == NULL)
244		return NULL;
245
246	f->constructor(q, arg);
247	atomic_add(f->qsize, &nf->mem);
248	setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
249	spin_lock_init(&q->lock);
250	atomic_set(&q->refcnt, 1);
251	q->net = nf;
252
253	return q;
254}
255
256static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
257		struct inet_frags *f, void *arg)
258{
259	struct inet_frag_queue *q;
260
261	q = inet_frag_alloc(nf, f, arg);
262	if (q == NULL)
263		return NULL;
264
265	return inet_frag_intern(nf, q, f, arg);
266}
267
268struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
269		struct inet_frags *f, void *key, unsigned int hash)
270	__releases(&f->lock)
271{
272	struct inet_frag_queue *q;
273	struct hlist_node *n;
274
275	hlist_for_each_entry(q, n, &f->hash[hash], list) {
276		if (q->net == nf && f->match(q, key)) {
277			atomic_inc(&q->refcnt);
278			read_unlock(&f->lock);
279			return q;
280		}
281	}
282	read_unlock(&f->lock);
283
284	return inet_frag_create(nf, f, key);
285}
286EXPORT_SYMBOL(inet_frag_find);
287