• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/netfilter/
1/*
2 * x_tables core - Backend for {ip,ip6,arp}_tables
3 *
4 * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
5 *
6 * Based on existing ip_tables code which is
7 *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
8 *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16#include <linux/kernel.h>
17#include <linux/socket.h>
18#include <linux/net.h>
19#include <linux/proc_fs.h>
20#include <linux/seq_file.h>
21#include <linux/string.h>
22#include <linux/vmalloc.h>
23#include <linux/mutex.h>
24#include <linux/mm.h>
25#include <linux/slab.h>
26#include <net/net_namespace.h>
27
28#include <linux/netfilter/x_tables.h>
29#include <linux/netfilter_arp.h>
30#include <linux/netfilter_ipv4/ip_tables.h>
31#include <linux/netfilter_ipv6/ip6_tables.h>
32#include <linux/netfilter_arp/arp_tables.h>
33
34MODULE_LICENSE("GPL");
35MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
36MODULE_DESCRIPTION("{ip,ip6,arp,eb}_tables backend module");
37
38#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
39
40struct compat_delta {
41	struct compat_delta *next;
42	unsigned int offset;
43	int delta;
44};
45
46struct xt_af {
47	struct mutex mutex;
48	struct list_head match;
49	struct list_head target;
50#ifdef CONFIG_COMPAT
51	struct mutex compat_mutex;
52	struct compat_delta *compat_offsets;
53#endif
54};
55
56static struct xt_af *xt;
57
58static const char *const xt_prefix[NFPROTO_NUMPROTO] = {
59	[NFPROTO_UNSPEC] = "x",
60	[NFPROTO_IPV4]   = "ip",
61	[NFPROTO_ARP]    = "arp",
62	[NFPROTO_BRIDGE] = "eb",
63	[NFPROTO_IPV6]   = "ip6",
64};
65
66/* Allow this many total (re)entries. */
67static const unsigned int xt_jumpstack_multiplier = 2;
68
69/* Registration hooks for targets. */
70int
71xt_register_target(struct xt_target *target)
72{
73	u_int8_t af = target->family;
74	int ret;
75
76	ret = mutex_lock_interruptible(&xt[af].mutex);
77	if (ret != 0)
78		return ret;
79	list_add(&target->list, &xt[af].target);
80	mutex_unlock(&xt[af].mutex);
81	return ret;
82}
83EXPORT_SYMBOL(xt_register_target);
84
85void
86xt_unregister_target(struct xt_target *target)
87{
88	u_int8_t af = target->family;
89
90	mutex_lock(&xt[af].mutex);
91	list_del(&target->list);
92	mutex_unlock(&xt[af].mutex);
93}
94EXPORT_SYMBOL(xt_unregister_target);
95
96int
97xt_register_targets(struct xt_target *target, unsigned int n)
98{
99	unsigned int i;
100	int err = 0;
101
102	for (i = 0; i < n; i++) {
103		err = xt_register_target(&target[i]);
104		if (err)
105			goto err;
106	}
107	return err;
108
109err:
110	if (i > 0)
111		xt_unregister_targets(target, i);
112	return err;
113}
114EXPORT_SYMBOL(xt_register_targets);
115
116void
117xt_unregister_targets(struct xt_target *target, unsigned int n)
118{
119	unsigned int i;
120
121	for (i = 0; i < n; i++)
122		xt_unregister_target(&target[i]);
123}
124EXPORT_SYMBOL(xt_unregister_targets);
125
126int
127xt_register_match(struct xt_match *match)
128{
129	u_int8_t af = match->family;
130	int ret;
131
132	ret = mutex_lock_interruptible(&xt[af].mutex);
133	if (ret != 0)
134		return ret;
135
136	list_add(&match->list, &xt[af].match);
137	mutex_unlock(&xt[af].mutex);
138
139	return ret;
140}
141EXPORT_SYMBOL(xt_register_match);
142
143void
144xt_unregister_match(struct xt_match *match)
145{
146	u_int8_t af = match->family;
147
148	mutex_lock(&xt[af].mutex);
149	list_del(&match->list);
150	mutex_unlock(&xt[af].mutex);
151}
152EXPORT_SYMBOL(xt_unregister_match);
153
154int
155xt_register_matches(struct xt_match *match, unsigned int n)
156{
157	unsigned int i;
158	int err = 0;
159
160	for (i = 0; i < n; i++) {
161		err = xt_register_match(&match[i]);
162		if (err)
163			goto err;
164	}
165	return err;
166
167err:
168	if (i > 0)
169		xt_unregister_matches(match, i);
170	return err;
171}
172EXPORT_SYMBOL(xt_register_matches);
173
174void
175xt_unregister_matches(struct xt_match *match, unsigned int n)
176{
177	unsigned int i;
178
179	for (i = 0; i < n; i++)
180		xt_unregister_match(&match[i]);
181}
182EXPORT_SYMBOL(xt_unregister_matches);
183
184
185/*
186 * These are weird, but module loading must not be done with mutex
187 * held (since they will register), and we have to have a single
188 * function to use try_then_request_module().
189 */
190
191/* Find match, grabs ref.  Returns ERR_PTR() on error. */
192struct xt_match *xt_find_match(u8 af, const char *name, u8 revision)
193{
194	struct xt_match *m;
195	int err = 0;
196
197	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
198		return ERR_PTR(-EINTR);
199
200	list_for_each_entry(m, &xt[af].match, list) {
201		if (strcmp(m->name, name) == 0) {
202			if (m->revision == revision) {
203				if (try_module_get(m->me)) {
204					mutex_unlock(&xt[af].mutex);
205					return m;
206				}
207			} else
208				err = -EPROTOTYPE; /* Found something. */
209		}
210	}
211	mutex_unlock(&xt[af].mutex);
212
213	if (af != NFPROTO_UNSPEC)
214		/* Try searching again in the family-independent list */
215		return xt_find_match(NFPROTO_UNSPEC, name, revision);
216
217	return ERR_PTR(err);
218}
219EXPORT_SYMBOL(xt_find_match);
220
221struct xt_match *
222xt_request_find_match(uint8_t nfproto, const char *name, uint8_t revision)
223{
224	struct xt_match *match;
225
226	match = try_then_request_module(xt_find_match(nfproto, name, revision),
227					"%st_%s", xt_prefix[nfproto], name);
228	return (match != NULL) ? match : ERR_PTR(-ENOENT);
229}
230EXPORT_SYMBOL_GPL(xt_request_find_match);
231
232/* Find target, grabs ref.  Returns ERR_PTR() on error. */
233struct xt_target *xt_find_target(u8 af, const char *name, u8 revision)
234{
235	struct xt_target *t;
236	int err = 0;
237
238	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
239		return ERR_PTR(-EINTR);
240
241	list_for_each_entry(t, &xt[af].target, list) {
242		if (strcmp(t->name, name) == 0) {
243			if (t->revision == revision) {
244				if (try_module_get(t->me)) {
245					mutex_unlock(&xt[af].mutex);
246					return t;
247				}
248			} else
249				err = -EPROTOTYPE; /* Found something. */
250		}
251	}
252	mutex_unlock(&xt[af].mutex);
253
254	if (af != NFPROTO_UNSPEC)
255		/* Try searching again in the family-independent list */
256		return xt_find_target(NFPROTO_UNSPEC, name, revision);
257
258	return ERR_PTR(err);
259}
260EXPORT_SYMBOL(xt_find_target);
261
262struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision)
263{
264	struct xt_target *target;
265
266	target = try_then_request_module(xt_find_target(af, name, revision),
267					 "%st_%s", xt_prefix[af], name);
268	return (target != NULL) ? target : ERR_PTR(-ENOENT);
269}
270EXPORT_SYMBOL_GPL(xt_request_find_target);
271
272static int match_revfn(u8 af, const char *name, u8 revision, int *bestp)
273{
274	const struct xt_match *m;
275	int have_rev = 0;
276
277	list_for_each_entry(m, &xt[af].match, list) {
278		if (strcmp(m->name, name) == 0) {
279			if (m->revision > *bestp)
280				*bestp = m->revision;
281			if (m->revision == revision)
282				have_rev = 1;
283		}
284	}
285
286	if (af != NFPROTO_UNSPEC && !have_rev)
287		return match_revfn(NFPROTO_UNSPEC, name, revision, bestp);
288
289	return have_rev;
290}
291
292static int target_revfn(u8 af, const char *name, u8 revision, int *bestp)
293{
294	const struct xt_target *t;
295	int have_rev = 0;
296
297	list_for_each_entry(t, &xt[af].target, list) {
298		if (strcmp(t->name, name) == 0) {
299			if (t->revision > *bestp)
300				*bestp = t->revision;
301			if (t->revision == revision)
302				have_rev = 1;
303		}
304	}
305
306	if (af != NFPROTO_UNSPEC && !have_rev)
307		return target_revfn(NFPROTO_UNSPEC, name, revision, bestp);
308
309	return have_rev;
310}
311
312/* Returns true or false (if no such extension at all) */
313int xt_find_revision(u8 af, const char *name, u8 revision, int target,
314		     int *err)
315{
316	int have_rev, best = -1;
317
318	if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
319		*err = -EINTR;
320		return 1;
321	}
322	if (target == 1)
323		have_rev = target_revfn(af, name, revision, &best);
324	else
325		have_rev = match_revfn(af, name, revision, &best);
326	mutex_unlock(&xt[af].mutex);
327
328	/* Nothing at all?  Return 0 to try loading module. */
329	if (best == -1) {
330		*err = -ENOENT;
331		return 0;
332	}
333
334	*err = best;
335	if (!have_rev)
336		*err = -EPROTONOSUPPORT;
337	return 1;
338}
339EXPORT_SYMBOL_GPL(xt_find_revision);
340
341static char *textify_hooks(char *buf, size_t size, unsigned int mask)
342{
343	static const char *const names[] = {
344		"PREROUTING", "INPUT", "FORWARD",
345		"OUTPUT", "POSTROUTING", "BROUTING",
346	};
347	unsigned int i;
348	char *p = buf;
349	bool np = false;
350	int res;
351
352	*p = '\0';
353	for (i = 0; i < ARRAY_SIZE(names); ++i) {
354		if (!(mask & (1 << i)))
355			continue;
356		res = snprintf(p, size, "%s%s", np ? "/" : "", names[i]);
357		if (res > 0) {
358			size -= res;
359			p += res;
360		}
361		np = true;
362	}
363
364	return buf;
365}
366
367int xt_check_match(struct xt_mtchk_param *par,
368		   unsigned int size, u_int8_t proto, bool inv_proto)
369{
370	int ret;
371
372	if (XT_ALIGN(par->match->matchsize) != size &&
373	    par->match->matchsize != -1) {
374		/*
375		 * ebt_among is exempt from centralized matchsize checking
376		 * because it uses a dynamic-size data set.
377		 */
378		pr_err("%s_tables: %s.%u match: invalid size "
379		       "%u (kernel) != (user) %u\n",
380		       xt_prefix[par->family], par->match->name,
381		       par->match->revision,
382		       XT_ALIGN(par->match->matchsize), size);
383		return -EINVAL;
384	}
385	if (par->match->table != NULL &&
386	    strcmp(par->match->table, par->table) != 0) {
387		pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
388		       xt_prefix[par->family], par->match->name,
389		       par->match->table, par->table);
390		return -EINVAL;
391	}
392	if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
393		char used[64], allow[64];
394
395		pr_err("%s_tables: %s match: used from hooks %s, but only "
396		       "valid from %s\n",
397		       xt_prefix[par->family], par->match->name,
398		       textify_hooks(used, sizeof(used), par->hook_mask),
399		       textify_hooks(allow, sizeof(allow), par->match->hooks));
400		return -EINVAL;
401	}
402	if (par->match->proto && (par->match->proto != proto || inv_proto)) {
403		pr_err("%s_tables: %s match: only valid for protocol %u\n",
404		       xt_prefix[par->family], par->match->name,
405		       par->match->proto);
406		return -EINVAL;
407	}
408	if (par->match->checkentry != NULL) {
409		ret = par->match->checkentry(par);
410		if (ret < 0)
411			return ret;
412		else if (ret > 0)
413			/* Flag up potential errors. */
414			return -EIO;
415	}
416	return 0;
417}
418EXPORT_SYMBOL_GPL(xt_check_match);
419
420#ifdef CONFIG_COMPAT
421int xt_compat_add_offset(u_int8_t af, unsigned int offset, short delta)
422{
423	struct compat_delta *tmp;
424
425	tmp = kmalloc(sizeof(struct compat_delta), GFP_KERNEL);
426	if (!tmp)
427		return -ENOMEM;
428
429	tmp->offset = offset;
430	tmp->delta = delta;
431
432	if (xt[af].compat_offsets) {
433		tmp->next = xt[af].compat_offsets->next;
434		xt[af].compat_offsets->next = tmp;
435	} else {
436		xt[af].compat_offsets = tmp;
437		tmp->next = NULL;
438	}
439	return 0;
440}
441EXPORT_SYMBOL_GPL(xt_compat_add_offset);
442
443void xt_compat_flush_offsets(u_int8_t af)
444{
445	struct compat_delta *tmp, *next;
446
447	if (xt[af].compat_offsets) {
448		for (tmp = xt[af].compat_offsets; tmp; tmp = next) {
449			next = tmp->next;
450			kfree(tmp);
451		}
452		xt[af].compat_offsets = NULL;
453	}
454}
455EXPORT_SYMBOL_GPL(xt_compat_flush_offsets);
456
457int xt_compat_calc_jump(u_int8_t af, unsigned int offset)
458{
459	struct compat_delta *tmp;
460	int delta;
461
462	for (tmp = xt[af].compat_offsets, delta = 0; tmp; tmp = tmp->next)
463		if (tmp->offset < offset)
464			delta += tmp->delta;
465	return delta;
466}
467EXPORT_SYMBOL_GPL(xt_compat_calc_jump);
468
469int xt_compat_match_offset(const struct xt_match *match)
470{
471	u_int16_t csize = match->compatsize ? : match->matchsize;
472	return XT_ALIGN(match->matchsize) - COMPAT_XT_ALIGN(csize);
473}
474EXPORT_SYMBOL_GPL(xt_compat_match_offset);
475
476int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
477			      unsigned int *size)
478{
479	const struct xt_match *match = m->u.kernel.match;
480	struct compat_xt_entry_match *cm = (struct compat_xt_entry_match *)m;
481	int pad, off = xt_compat_match_offset(match);
482	u_int16_t msize = cm->u.user.match_size;
483
484	m = *dstptr;
485	memcpy(m, cm, sizeof(*cm));
486	if (match->compat_from_user)
487		match->compat_from_user(m->data, cm->data);
488	else
489		memcpy(m->data, cm->data, msize - sizeof(*cm));
490	pad = XT_ALIGN(match->matchsize) - match->matchsize;
491	if (pad > 0)
492		memset(m->data + match->matchsize, 0, pad);
493
494	msize += off;
495	m->u.user.match_size = msize;
496
497	*size += off;
498	*dstptr += msize;
499	return 0;
500}
501EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
502
503int xt_compat_match_to_user(const struct xt_entry_match *m,
504			    void __user **dstptr, unsigned int *size)
505{
506	const struct xt_match *match = m->u.kernel.match;
507	struct compat_xt_entry_match __user *cm = *dstptr;
508	int off = xt_compat_match_offset(match);
509	u_int16_t msize = m->u.user.match_size - off;
510
511	if (copy_to_user(cm, m, sizeof(*cm)) ||
512	    put_user(msize, &cm->u.user.match_size) ||
513	    copy_to_user(cm->u.user.name, m->u.kernel.match->name,
514			 strlen(m->u.kernel.match->name) + 1))
515		return -EFAULT;
516
517	if (match->compat_to_user) {
518		if (match->compat_to_user((void __user *)cm->data, m->data))
519			return -EFAULT;
520	} else {
521		if (copy_to_user(cm->data, m->data, msize - sizeof(*cm)))
522			return -EFAULT;
523	}
524
525	*size -= off;
526	*dstptr += msize;
527	return 0;
528}
529EXPORT_SYMBOL_GPL(xt_compat_match_to_user);
530#endif /* CONFIG_COMPAT */
531
532int xt_check_target(struct xt_tgchk_param *par,
533		    unsigned int size, u_int8_t proto, bool inv_proto)
534{
535	int ret;
536
537	if (XT_ALIGN(par->target->targetsize) != size) {
538		pr_err("%s_tables: %s.%u target: invalid size "
539		       "%u (kernel) != (user) %u\n",
540		       xt_prefix[par->family], par->target->name,
541		       par->target->revision,
542		       XT_ALIGN(par->target->targetsize), size);
543		return -EINVAL;
544	}
545	if (par->target->table != NULL &&
546	    strcmp(par->target->table, par->table) != 0) {
547		pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
548		       xt_prefix[par->family], par->target->name,
549		       par->target->table, par->table);
550		return -EINVAL;
551	}
552	if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
553		char used[64], allow[64];
554
555		pr_err("%s_tables: %s target: used from hooks %s, but only "
556		       "usable from %s\n",
557		       xt_prefix[par->family], par->target->name,
558		       textify_hooks(used, sizeof(used), par->hook_mask),
559		       textify_hooks(allow, sizeof(allow), par->target->hooks));
560		return -EINVAL;
561	}
562	if (par->target->proto && (par->target->proto != proto || inv_proto)) {
563		pr_err("%s_tables: %s target: only valid for protocol %u\n",
564		       xt_prefix[par->family], par->target->name,
565		       par->target->proto);
566		return -EINVAL;
567	}
568	if (par->target->checkentry != NULL) {
569		ret = par->target->checkentry(par);
570		if (ret < 0)
571			return ret;
572		else if (ret > 0)
573			/* Flag up potential errors. */
574			return -EIO;
575	}
576	return 0;
577}
578EXPORT_SYMBOL_GPL(xt_check_target);
579
580#ifdef CONFIG_COMPAT
581int xt_compat_target_offset(const struct xt_target *target)
582{
583	u_int16_t csize = target->compatsize ? : target->targetsize;
584	return XT_ALIGN(target->targetsize) - COMPAT_XT_ALIGN(csize);
585}
586EXPORT_SYMBOL_GPL(xt_compat_target_offset);
587
588void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr,
589				unsigned int *size)
590{
591	const struct xt_target *target = t->u.kernel.target;
592	struct compat_xt_entry_target *ct = (struct compat_xt_entry_target *)t;
593	int pad, off = xt_compat_target_offset(target);
594	u_int16_t tsize = ct->u.user.target_size;
595
596	t = *dstptr;
597	memcpy(t, ct, sizeof(*ct));
598	if (target->compat_from_user)
599		target->compat_from_user(t->data, ct->data);
600	else
601		memcpy(t->data, ct->data, tsize - sizeof(*ct));
602	pad = XT_ALIGN(target->targetsize) - target->targetsize;
603	if (pad > 0)
604		memset(t->data + target->targetsize, 0, pad);
605
606	tsize += off;
607	t->u.user.target_size = tsize;
608
609	*size += off;
610	*dstptr += tsize;
611}
612EXPORT_SYMBOL_GPL(xt_compat_target_from_user);
613
614int xt_compat_target_to_user(const struct xt_entry_target *t,
615			     void __user **dstptr, unsigned int *size)
616{
617	const struct xt_target *target = t->u.kernel.target;
618	struct compat_xt_entry_target __user *ct = *dstptr;
619	int off = xt_compat_target_offset(target);
620	u_int16_t tsize = t->u.user.target_size - off;
621
622	if (copy_to_user(ct, t, sizeof(*ct)) ||
623	    put_user(tsize, &ct->u.user.target_size) ||
624	    copy_to_user(ct->u.user.name, t->u.kernel.target->name,
625			 strlen(t->u.kernel.target->name) + 1))
626		return -EFAULT;
627
628	if (target->compat_to_user) {
629		if (target->compat_to_user((void __user *)ct->data, t->data))
630			return -EFAULT;
631	} else {
632		if (copy_to_user(ct->data, t->data, tsize - sizeof(*ct)))
633			return -EFAULT;
634	}
635
636	*size -= off;
637	*dstptr += tsize;
638	return 0;
639}
640EXPORT_SYMBOL_GPL(xt_compat_target_to_user);
641#endif
642
643struct xt_table_info *xt_alloc_table_info(unsigned int size)
644{
645	struct xt_table_info *newinfo;
646	int cpu;
647
648	/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
649	if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > totalram_pages)
650		return NULL;
651
652	newinfo = kzalloc(XT_TABLE_INFO_SZ, GFP_KERNEL);
653	if (!newinfo)
654		return NULL;
655
656	newinfo->size = size;
657
658	for_each_possible_cpu(cpu) {
659		if (size <= PAGE_SIZE)
660			newinfo->entries[cpu] = kmalloc_node(size,
661							GFP_KERNEL,
662							cpu_to_node(cpu));
663		else
664			newinfo->entries[cpu] = vmalloc_node(size,
665							cpu_to_node(cpu));
666
667		if (newinfo->entries[cpu] == NULL) {
668			xt_free_table_info(newinfo);
669			return NULL;
670		}
671	}
672
673	return newinfo;
674}
675EXPORT_SYMBOL(xt_alloc_table_info);
676
677void xt_free_table_info(struct xt_table_info *info)
678{
679	int cpu;
680
681	for_each_possible_cpu(cpu) {
682		if (info->size <= PAGE_SIZE)
683			kfree(info->entries[cpu]);
684		else
685			vfree(info->entries[cpu]);
686	}
687
688	if (info->jumpstack != NULL) {
689		if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
690			for_each_possible_cpu(cpu)
691				vfree(info->jumpstack[cpu]);
692		} else {
693			for_each_possible_cpu(cpu)
694				kfree(info->jumpstack[cpu]);
695		}
696	}
697
698	if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
699		vfree(info->jumpstack);
700	else
701		kfree(info->jumpstack);
702
703	free_percpu(info->stackptr);
704
705	kfree(info);
706}
707EXPORT_SYMBOL(xt_free_table_info);
708
709/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
710struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af,
711				    const char *name)
712{
713	struct xt_table *t;
714
715	if (mutex_lock_interruptible(&xt[af].mutex) != 0)
716		return ERR_PTR(-EINTR);
717
718	list_for_each_entry(t, &net->xt.tables[af], list)
719		if (strcmp(t->name, name) == 0 && try_module_get(t->me))
720			return t;
721	mutex_unlock(&xt[af].mutex);
722	return NULL;
723}
724EXPORT_SYMBOL_GPL(xt_find_table_lock);
725
726void xt_table_unlock(struct xt_table *table)
727{
728	mutex_unlock(&xt[table->af].mutex);
729}
730EXPORT_SYMBOL_GPL(xt_table_unlock);
731
732#ifdef CONFIG_COMPAT
733void xt_compat_lock(u_int8_t af)
734{
735	mutex_lock(&xt[af].compat_mutex);
736}
737EXPORT_SYMBOL_GPL(xt_compat_lock);
738
739void xt_compat_unlock(u_int8_t af)
740{
741	mutex_unlock(&xt[af].compat_mutex);
742}
743EXPORT_SYMBOL_GPL(xt_compat_unlock);
744#endif
745
746DEFINE_PER_CPU(struct xt_info_lock, xt_info_locks);
747EXPORT_PER_CPU_SYMBOL_GPL(xt_info_locks);
748
749static int xt_jumpstack_alloc(struct xt_table_info *i)
750{
751	unsigned int size;
752	int cpu;
753
754	i->stackptr = alloc_percpu(unsigned int);
755	if (i->stackptr == NULL)
756		return -ENOMEM;
757
758	size = sizeof(void **) * nr_cpu_ids;
759	if (size > PAGE_SIZE)
760		i->jumpstack = vmalloc(size);
761	else
762		i->jumpstack = kmalloc(size, GFP_KERNEL);
763	if (i->jumpstack == NULL)
764		return -ENOMEM;
765	memset(i->jumpstack, 0, size);
766
767	i->stacksize *= xt_jumpstack_multiplier;
768	size = sizeof(void *) * i->stacksize;
769	for_each_possible_cpu(cpu) {
770		if (size > PAGE_SIZE)
771			i->jumpstack[cpu] = vmalloc_node(size,
772				cpu_to_node(cpu));
773		else
774			i->jumpstack[cpu] = kmalloc_node(size,
775				GFP_KERNEL, cpu_to_node(cpu));
776		if (i->jumpstack[cpu] == NULL)
777			/*
778			 * Freeing will be done later on by the callers. The
779			 * chain is: xt_replace_table -> __do_replace ->
780			 * do_replace -> xt_free_table_info.
781			 */
782			return -ENOMEM;
783	}
784
785	return 0;
786}
787
788struct xt_table_info *
789xt_replace_table(struct xt_table *table,
790	      unsigned int num_counters,
791	      struct xt_table_info *newinfo,
792	      int *error)
793{
794	struct xt_table_info *private;
795	int ret;
796
797	ret = xt_jumpstack_alloc(newinfo);
798	if (ret < 0) {
799		*error = ret;
800		return NULL;
801	}
802
803	/* Do the substitution. */
804	local_bh_disable();
805	private = table->private;
806
807	/* Check inside lock: is the old number correct? */
808	if (num_counters != private->number) {
809		pr_debug("num_counters != table->private->number (%u/%u)\n",
810			 num_counters, private->number);
811		local_bh_enable();
812		*error = -EAGAIN;
813		return NULL;
814	}
815
816	table->private = newinfo;
817	newinfo->initial_entries = private->initial_entries;
818
819	/*
820	 * Even though table entries have now been swapped, other CPU's
821	 * may still be using the old entries. This is okay, because
822	 * resynchronization happens because of the locking done
823	 * during the get_counters() routine.
824	 */
825	local_bh_enable();
826
827	return private;
828}
829EXPORT_SYMBOL_GPL(xt_replace_table);
830
831struct xt_table *xt_register_table(struct net *net,
832				   const struct xt_table *input_table,
833				   struct xt_table_info *bootstrap,
834				   struct xt_table_info *newinfo)
835{
836	int ret;
837	struct xt_table_info *private;
838	struct xt_table *t, *table;
839
840	/* Don't add one object to multiple lists. */
841	table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL);
842	if (!table) {
843		ret = -ENOMEM;
844		goto out;
845	}
846
847	ret = mutex_lock_interruptible(&xt[table->af].mutex);
848	if (ret != 0)
849		goto out_free;
850
851	/* Don't autoload: we'd eat our tail... */
852	list_for_each_entry(t, &net->xt.tables[table->af], list) {
853		if (strcmp(t->name, table->name) == 0) {
854			ret = -EEXIST;
855			goto unlock;
856		}
857	}
858
859	/* Simplifies replace_table code. */
860	table->private = bootstrap;
861
862	if (!xt_replace_table(table, 0, newinfo, &ret))
863		goto unlock;
864
865	private = table->private;
866	pr_debug("table->private->number = %u\n", private->number);
867
868	/* save number of initial entries */
869	private->initial_entries = private->number;
870
871	list_add(&table->list, &net->xt.tables[table->af]);
872	mutex_unlock(&xt[table->af].mutex);
873	return table;
874
875 unlock:
876	mutex_unlock(&xt[table->af].mutex);
877out_free:
878	kfree(table);
879out:
880	return ERR_PTR(ret);
881}
882EXPORT_SYMBOL_GPL(xt_register_table);
883
884void *xt_unregister_table(struct xt_table *table)
885{
886	struct xt_table_info *private;
887
888	mutex_lock(&xt[table->af].mutex);
889	private = table->private;
890	list_del(&table->list);
891	mutex_unlock(&xt[table->af].mutex);
892	kfree(table);
893
894	return private;
895}
896EXPORT_SYMBOL_GPL(xt_unregister_table);
897
898#ifdef CONFIG_PROC_FS
899struct xt_names_priv {
900	struct seq_net_private p;
901	u_int8_t af;
902};
903static void *xt_table_seq_start(struct seq_file *seq, loff_t *pos)
904{
905	struct xt_names_priv *priv = seq->private;
906	struct net *net = seq_file_net(seq);
907	u_int8_t af = priv->af;
908
909	mutex_lock(&xt[af].mutex);
910	return seq_list_start(&net->xt.tables[af], *pos);
911}
912
913static void *xt_table_seq_next(struct seq_file *seq, void *v, loff_t *pos)
914{
915	struct xt_names_priv *priv = seq->private;
916	struct net *net = seq_file_net(seq);
917	u_int8_t af = priv->af;
918
919	return seq_list_next(v, &net->xt.tables[af], pos);
920}
921
922static void xt_table_seq_stop(struct seq_file *seq, void *v)
923{
924	struct xt_names_priv *priv = seq->private;
925	u_int8_t af = priv->af;
926
927	mutex_unlock(&xt[af].mutex);
928}
929
930static int xt_table_seq_show(struct seq_file *seq, void *v)
931{
932	struct xt_table *table = list_entry(v, struct xt_table, list);
933
934	if (strlen(table->name))
935		return seq_printf(seq, "%s\n", table->name);
936	else
937		return 0;
938}
939
940static const struct seq_operations xt_table_seq_ops = {
941	.start	= xt_table_seq_start,
942	.next	= xt_table_seq_next,
943	.stop	= xt_table_seq_stop,
944	.show	= xt_table_seq_show,
945};
946
947static int xt_table_open(struct inode *inode, struct file *file)
948{
949	int ret;
950	struct xt_names_priv *priv;
951
952	ret = seq_open_net(inode, file, &xt_table_seq_ops,
953			   sizeof(struct xt_names_priv));
954	if (!ret) {
955		priv = ((struct seq_file *)file->private_data)->private;
956		priv->af = (unsigned long)PDE(inode)->data;
957	}
958	return ret;
959}
960
961static const struct file_operations xt_table_ops = {
962	.owner	 = THIS_MODULE,
963	.open	 = xt_table_open,
964	.read	 = seq_read,
965	.llseek	 = seq_lseek,
966	.release = seq_release_net,
967};
968
969/*
970 * Traverse state for ip{,6}_{tables,matches} for helping crossing
971 * the multi-AF mutexes.
972 */
973struct nf_mttg_trav {
974	struct list_head *head, *curr;
975	uint8_t class, nfproto;
976};
977
978enum {
979	MTTG_TRAV_INIT,
980	MTTG_TRAV_NFP_UNSPEC,
981	MTTG_TRAV_NFP_SPEC,
982	MTTG_TRAV_DONE,
983};
984
985static void *xt_mttg_seq_next(struct seq_file *seq, void *v, loff_t *ppos,
986    bool is_target)
987{
988	static const uint8_t next_class[] = {
989		[MTTG_TRAV_NFP_UNSPEC] = MTTG_TRAV_NFP_SPEC,
990		[MTTG_TRAV_NFP_SPEC]   = MTTG_TRAV_DONE,
991	};
992	struct nf_mttg_trav *trav = seq->private;
993
994	switch (trav->class) {
995	case MTTG_TRAV_INIT:
996		trav->class = MTTG_TRAV_NFP_UNSPEC;
997		mutex_lock(&xt[NFPROTO_UNSPEC].mutex);
998		trav->head = trav->curr = is_target ?
999			&xt[NFPROTO_UNSPEC].target : &xt[NFPROTO_UNSPEC].match;
1000 		break;
1001	case MTTG_TRAV_NFP_UNSPEC:
1002		trav->curr = trav->curr->next;
1003		if (trav->curr != trav->head)
1004			break;
1005		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1006		mutex_lock(&xt[trav->nfproto].mutex);
1007		trav->head = trav->curr = is_target ?
1008			&xt[trav->nfproto].target : &xt[trav->nfproto].match;
1009		trav->class = next_class[trav->class];
1010		break;
1011	case MTTG_TRAV_NFP_SPEC:
1012		trav->curr = trav->curr->next;
1013		if (trav->curr != trav->head)
1014			break;
1015		/* fallthru, _stop will unlock */
1016	default:
1017		return NULL;
1018	}
1019
1020	if (ppos != NULL)
1021		++*ppos;
1022	return trav;
1023}
1024
1025static void *xt_mttg_seq_start(struct seq_file *seq, loff_t *pos,
1026    bool is_target)
1027{
1028	struct nf_mttg_trav *trav = seq->private;
1029	unsigned int j;
1030
1031	trav->class = MTTG_TRAV_INIT;
1032	for (j = 0; j < *pos; ++j)
1033		if (xt_mttg_seq_next(seq, NULL, NULL, is_target) == NULL)
1034			return NULL;
1035	return trav;
1036}
1037
1038static void xt_mttg_seq_stop(struct seq_file *seq, void *v)
1039{
1040	struct nf_mttg_trav *trav = seq->private;
1041
1042	switch (trav->class) {
1043	case MTTG_TRAV_NFP_UNSPEC:
1044		mutex_unlock(&xt[NFPROTO_UNSPEC].mutex);
1045		break;
1046	case MTTG_TRAV_NFP_SPEC:
1047		mutex_unlock(&xt[trav->nfproto].mutex);
1048		break;
1049	}
1050}
1051
1052static void *xt_match_seq_start(struct seq_file *seq, loff_t *pos)
1053{
1054	return xt_mttg_seq_start(seq, pos, false);
1055}
1056
1057static void *xt_match_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1058{
1059	return xt_mttg_seq_next(seq, v, ppos, false);
1060}
1061
1062static int xt_match_seq_show(struct seq_file *seq, void *v)
1063{
1064	const struct nf_mttg_trav *trav = seq->private;
1065	const struct xt_match *match;
1066
1067	switch (trav->class) {
1068	case MTTG_TRAV_NFP_UNSPEC:
1069	case MTTG_TRAV_NFP_SPEC:
1070		if (trav->curr == trav->head)
1071			return 0;
1072		match = list_entry(trav->curr, struct xt_match, list);
1073		return (*match->name == '\0') ? 0 :
1074		       seq_printf(seq, "%s\n", match->name);
1075	}
1076	return 0;
1077}
1078
1079static const struct seq_operations xt_match_seq_ops = {
1080	.start	= xt_match_seq_start,
1081	.next	= xt_match_seq_next,
1082	.stop	= xt_mttg_seq_stop,
1083	.show	= xt_match_seq_show,
1084};
1085
1086static int xt_match_open(struct inode *inode, struct file *file)
1087{
1088	struct seq_file *seq;
1089	struct nf_mttg_trav *trav;
1090	int ret;
1091
1092	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1093	if (trav == NULL)
1094		return -ENOMEM;
1095
1096	ret = seq_open(file, &xt_match_seq_ops);
1097	if (ret < 0) {
1098		kfree(trav);
1099		return ret;
1100	}
1101
1102	seq = file->private_data;
1103	seq->private = trav;
1104	trav->nfproto = (unsigned long)PDE(inode)->data;
1105	return 0;
1106}
1107
1108static const struct file_operations xt_match_ops = {
1109	.owner	 = THIS_MODULE,
1110	.open	 = xt_match_open,
1111	.read	 = seq_read,
1112	.llseek	 = seq_lseek,
1113	.release = seq_release_private,
1114};
1115
1116static void *xt_target_seq_start(struct seq_file *seq, loff_t *pos)
1117{
1118	return xt_mttg_seq_start(seq, pos, true);
1119}
1120
1121static void *xt_target_seq_next(struct seq_file *seq, void *v, loff_t *ppos)
1122{
1123	return xt_mttg_seq_next(seq, v, ppos, true);
1124}
1125
1126static int xt_target_seq_show(struct seq_file *seq, void *v)
1127{
1128	const struct nf_mttg_trav *trav = seq->private;
1129	const struct xt_target *target;
1130
1131	switch (trav->class) {
1132	case MTTG_TRAV_NFP_UNSPEC:
1133	case MTTG_TRAV_NFP_SPEC:
1134		if (trav->curr == trav->head)
1135			return 0;
1136		target = list_entry(trav->curr, struct xt_target, list);
1137		return (*target->name == '\0') ? 0 :
1138		       seq_printf(seq, "%s\n", target->name);
1139	}
1140	return 0;
1141}
1142
1143static const struct seq_operations xt_target_seq_ops = {
1144	.start	= xt_target_seq_start,
1145	.next	= xt_target_seq_next,
1146	.stop	= xt_mttg_seq_stop,
1147	.show	= xt_target_seq_show,
1148};
1149
1150static int xt_target_open(struct inode *inode, struct file *file)
1151{
1152	struct seq_file *seq;
1153	struct nf_mttg_trav *trav;
1154	int ret;
1155
1156	trav = kmalloc(sizeof(*trav), GFP_KERNEL);
1157	if (trav == NULL)
1158		return -ENOMEM;
1159
1160	ret = seq_open(file, &xt_target_seq_ops);
1161	if (ret < 0) {
1162		kfree(trav);
1163		return ret;
1164	}
1165
1166	seq = file->private_data;
1167	seq->private = trav;
1168	trav->nfproto = (unsigned long)PDE(inode)->data;
1169	return 0;
1170}
1171
1172static const struct file_operations xt_target_ops = {
1173	.owner	 = THIS_MODULE,
1174	.open	 = xt_target_open,
1175	.read	 = seq_read,
1176	.llseek	 = seq_lseek,
1177	.release = seq_release_private,
1178};
1179
1180#define FORMAT_TABLES	"_tables_names"
1181#define	FORMAT_MATCHES	"_tables_matches"
1182#define FORMAT_TARGETS 	"_tables_targets"
1183
1184#endif /* CONFIG_PROC_FS */
1185
1186/**
1187 * xt_hook_link - set up hooks for a new table
1188 * @table:	table with metadata needed to set up hooks
1189 * @fn:		Hook function
1190 *
1191 * This function will take care of creating and registering the necessary
1192 * Netfilter hooks for XT tables.
1193 */
1194struct nf_hook_ops *xt_hook_link(const struct xt_table *table, nf_hookfn *fn)
1195{
1196	unsigned int hook_mask = table->valid_hooks;
1197	uint8_t i, num_hooks = hweight32(hook_mask);
1198	uint8_t hooknum;
1199	struct nf_hook_ops *ops;
1200	int ret;
1201
1202	ops = kmalloc(sizeof(*ops) * num_hooks, GFP_KERNEL);
1203	if (ops == NULL)
1204		return ERR_PTR(-ENOMEM);
1205
1206	for (i = 0, hooknum = 0; i < num_hooks && hook_mask != 0;
1207	     hook_mask >>= 1, ++hooknum) {
1208		if (!(hook_mask & 1))
1209			continue;
1210		ops[i].hook     = fn;
1211		ops[i].owner    = table->me;
1212		ops[i].pf       = table->af;
1213		ops[i].hooknum  = hooknum;
1214		ops[i].priority = table->priority;
1215		++i;
1216	}
1217
1218	ret = nf_register_hooks(ops, num_hooks);
1219	if (ret < 0) {
1220		kfree(ops);
1221		return ERR_PTR(ret);
1222	}
1223
1224	return ops;
1225}
1226EXPORT_SYMBOL_GPL(xt_hook_link);
1227
1228/**
1229 * xt_hook_unlink - remove hooks for a table
1230 * @ops:	nf_hook_ops array as returned by nf_hook_link
1231 * @hook_mask:	the very same mask that was passed to nf_hook_link
1232 */
1233void xt_hook_unlink(const struct xt_table *table, struct nf_hook_ops *ops)
1234{
1235	nf_unregister_hooks(ops, hweight32(table->valid_hooks));
1236	kfree(ops);
1237}
1238EXPORT_SYMBOL_GPL(xt_hook_unlink);
1239
1240int xt_proto_init(struct net *net, u_int8_t af)
1241{
1242#ifdef CONFIG_PROC_FS
1243	char buf[XT_FUNCTION_MAXNAMELEN];
1244	struct proc_dir_entry *proc;
1245#endif
1246
1247	if (af >= ARRAY_SIZE(xt_prefix))
1248		return -EINVAL;
1249
1250
1251#ifdef CONFIG_PROC_FS
1252	strlcpy(buf, xt_prefix[af], sizeof(buf));
1253	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1254	proc = proc_create_data(buf, 0440, net->proc_net, &xt_table_ops,
1255				(void *)(unsigned long)af);
1256	if (!proc)
1257		goto out;
1258
1259	strlcpy(buf, xt_prefix[af], sizeof(buf));
1260	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1261	proc = proc_create_data(buf, 0440, net->proc_net, &xt_match_ops,
1262				(void *)(unsigned long)af);
1263	if (!proc)
1264		goto out_remove_tables;
1265
1266	strlcpy(buf, xt_prefix[af], sizeof(buf));
1267	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1268	proc = proc_create_data(buf, 0440, net->proc_net, &xt_target_ops,
1269				(void *)(unsigned long)af);
1270	if (!proc)
1271		goto out_remove_matches;
1272#endif
1273
1274	return 0;
1275
1276#ifdef CONFIG_PROC_FS
1277out_remove_matches:
1278	strlcpy(buf, xt_prefix[af], sizeof(buf));
1279	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1280	proc_net_remove(net, buf);
1281
1282out_remove_tables:
1283	strlcpy(buf, xt_prefix[af], sizeof(buf));
1284	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1285	proc_net_remove(net, buf);
1286out:
1287	return -1;
1288#endif
1289}
1290EXPORT_SYMBOL_GPL(xt_proto_init);
1291
1292void xt_proto_fini(struct net *net, u_int8_t af)
1293{
1294#ifdef CONFIG_PROC_FS
1295	char buf[XT_FUNCTION_MAXNAMELEN];
1296
1297	strlcpy(buf, xt_prefix[af], sizeof(buf));
1298	strlcat(buf, FORMAT_TABLES, sizeof(buf));
1299	proc_net_remove(net, buf);
1300
1301	strlcpy(buf, xt_prefix[af], sizeof(buf));
1302	strlcat(buf, FORMAT_TARGETS, sizeof(buf));
1303	proc_net_remove(net, buf);
1304
1305	strlcpy(buf, xt_prefix[af], sizeof(buf));
1306	strlcat(buf, FORMAT_MATCHES, sizeof(buf));
1307	proc_net_remove(net, buf);
1308#endif /*CONFIG_PROC_FS*/
1309}
1310EXPORT_SYMBOL_GPL(xt_proto_fini);
1311
1312static int __net_init xt_net_init(struct net *net)
1313{
1314	int i;
1315
1316	for (i = 0; i < NFPROTO_NUMPROTO; i++)
1317		INIT_LIST_HEAD(&net->xt.tables[i]);
1318	return 0;
1319}
1320
1321static struct pernet_operations xt_net_ops = {
1322	.init = xt_net_init,
1323};
1324
1325static int __init xt_init(void)
1326{
1327	unsigned int i;
1328	int rv;
1329
1330	for_each_possible_cpu(i) {
1331		struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1332		spin_lock_init(&lock->lock);
1333		lock->readers = 0;
1334	}
1335
1336	xt = kmalloc(sizeof(struct xt_af) * NFPROTO_NUMPROTO, GFP_KERNEL);
1337	if (!xt)
1338		return -ENOMEM;
1339
1340	for (i = 0; i < NFPROTO_NUMPROTO; i++) {
1341		mutex_init(&xt[i].mutex);
1342#ifdef CONFIG_COMPAT
1343		mutex_init(&xt[i].compat_mutex);
1344		xt[i].compat_offsets = NULL;
1345#endif
1346		INIT_LIST_HEAD(&xt[i].target);
1347		INIT_LIST_HEAD(&xt[i].match);
1348	}
1349	rv = register_pernet_subsys(&xt_net_ops);
1350	if (rv < 0)
1351		kfree(xt);
1352	return rv;
1353}
1354
1355static void __exit xt_fini(void)
1356{
1357	unregister_pernet_subsys(&xt_net_ops);
1358	kfree(xt);
1359}
1360
1361module_init(xt_init);
1362module_exit(xt_fini);
1363