• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/netfilter/
1/* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5 *
6 *      This program is free software; you can redistribute it and/or
7 *      modify it under the terms of the GNU General Public License
8 *      as published by the Free Software Foundation; either version
9 *      2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/rcupdate.h>
15#include <linux/slab.h>
16#include <linux/skbuff.h>
17#include <net/netfilter/nf_conntrack_extend.h>
18
19static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21
22void __nf_ct_ext_destroy(struct nf_conn *ct)
23{
24	unsigned int i;
25	struct nf_ct_ext_type *t;
26	struct nf_ct_ext *ext = ct->ext;
27
28	for (i = 0; i < NF_CT_EXT_NUM; i++) {
29		if (!__nf_ct_ext_exist(ext, i))
30			continue;
31
32		rcu_read_lock();
33		t = rcu_dereference(nf_ct_ext_types[i]);
34
35		/* Here the nf_ct_ext_type might have been unregisterd.
36		 * I.e., it has responsible to cleanup private
37		 * area in all conntracks when it is unregisterd.
38		 */
39		if (t && t->destroy)
40			t->destroy(ct);
41		rcu_read_unlock();
42	}
43}
44EXPORT_SYMBOL(__nf_ct_ext_destroy);
45
46static void *
47nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
48{
49	unsigned int off, len;
50	struct nf_ct_ext_type *t;
51	size_t alloc_size;
52
53	rcu_read_lock();
54	t = rcu_dereference(nf_ct_ext_types[id]);
55	BUG_ON(t == NULL);
56	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
57	len = off + t->len;
58	alloc_size = t->alloc_size;
59	rcu_read_unlock();
60
61	*ext = kzalloc(alloc_size, gfp);
62	if (!*ext)
63		return NULL;
64
65	(*ext)->offset[id] = off;
66	(*ext)->len = len;
67
68	return (void *)(*ext) + off;
69}
70
71static void __nf_ct_ext_free_rcu(struct rcu_head *head)
72{
73	struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu);
74	kfree(ext);
75}
76
77void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
78{
79	struct nf_ct_ext *old, *new;
80	int i, newlen, newoff;
81	struct nf_ct_ext_type *t;
82
83	/* Conntrack must not be confirmed to avoid races on reallocation. */
84	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
85
86	old = ct->ext;
87	if (!old)
88		return nf_ct_ext_create(&ct->ext, id, gfp);
89
90	if (__nf_ct_ext_exist(old, id))
91		return NULL;
92
93	rcu_read_lock();
94	t = rcu_dereference(nf_ct_ext_types[id]);
95	BUG_ON(t == NULL);
96
97	newoff = ALIGN(old->len, t->align);
98	newlen = newoff + t->len;
99	rcu_read_unlock();
100
101	new = __krealloc(old, newlen, gfp);
102	if (!new)
103		return NULL;
104
105	if (new != old) {
106		for (i = 0; i < NF_CT_EXT_NUM; i++) {
107			if (!__nf_ct_ext_exist(old, i))
108				continue;
109
110			rcu_read_lock();
111			t = rcu_dereference(nf_ct_ext_types[i]);
112			if (t && t->move)
113				t->move((void *)new + new->offset[i],
114					(void *)old + old->offset[i]);
115			rcu_read_unlock();
116		}
117		call_rcu(&old->rcu, __nf_ct_ext_free_rcu);
118		ct->ext = new;
119	}
120
121	new->offset[id] = newoff;
122	new->len = newlen;
123	memset((void *)new + newoff, 0, newlen - newoff);
124	return (void *)new + newoff;
125}
126EXPORT_SYMBOL(__nf_ct_ext_add);
127
128static void update_alloc_size(struct nf_ct_ext_type *type)
129{
130	int i, j;
131	struct nf_ct_ext_type *t1, *t2;
132	enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
133
134	/* unnecessary to update all types */
135	if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
136		min = type->id;
137		max = type->id;
138	}
139
140	/* This assumes that extended areas in conntrack for the types
141	   whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
142	for (i = min; i <= max; i++) {
143		t1 = nf_ct_ext_types[i];
144		if (!t1)
145			continue;
146
147		t1->alloc_size = sizeof(struct nf_ct_ext)
148				 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
149				 + t1->len;
150		for (j = 0; j < NF_CT_EXT_NUM; j++) {
151			t2 = nf_ct_ext_types[j];
152			if (t2 == NULL || t2 == t1 ||
153			    (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
154				continue;
155
156			t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
157					 + t2->len;
158		}
159	}
160}
161
162/* This MUST be called in process context. */
163int nf_ct_extend_register(struct nf_ct_ext_type *type)
164{
165	int ret = 0;
166
167	mutex_lock(&nf_ct_ext_type_mutex);
168	if (nf_ct_ext_types[type->id]) {
169		ret = -EBUSY;
170		goto out;
171	}
172
173	/* This ensures that nf_ct_ext_create() can allocate enough area
174	   before updating alloc_size */
175	type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
176			   + type->len;
177	rcu_assign_pointer(nf_ct_ext_types[type->id], type);
178	update_alloc_size(type);
179out:
180	mutex_unlock(&nf_ct_ext_type_mutex);
181	return ret;
182}
183EXPORT_SYMBOL_GPL(nf_ct_extend_register);
184
185/* This MUST be called in process context. */
186void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
187{
188	mutex_lock(&nf_ct_ext_type_mutex);
189	rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
190	update_alloc_size(type);
191	mutex_unlock(&nf_ct_ext_type_mutex);
192	rcu_barrier(); /* Wait for completion of call_rcu()'s */
193}
194EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
195