• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/net/netfilter/
1/* (C) 1999-2001 Paul `Rusty' Russell
2 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/types.h>
10#include <linux/netfilter.h>
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
16#include <linux/percpu.h>
17#include <linux/netdevice.h>
18#include <net/net_namespace.h>
19#ifdef CONFIG_SYSCTL
20#include <linux/sysctl.h>
21#endif
22
23#include <net/netfilter/nf_conntrack.h>
24#include <net/netfilter/nf_conntrack_core.h>
25#include <net/netfilter/nf_conntrack_l3proto.h>
26#include <net/netfilter/nf_conntrack_l4proto.h>
27#include <net/netfilter/nf_conntrack_expect.h>
28#include <net/netfilter/nf_conntrack_helper.h>
29#include <net/netfilter/nf_conntrack_acct.h>
30#include <net/netfilter/nf_conntrack_zones.h>
31
32MODULE_LICENSE("GPL");
33
34#ifdef CONFIG_PROC_FS
35int
36print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
37            const struct nf_conntrack_l3proto *l3proto,
38            const struct nf_conntrack_l4proto *l4proto)
39{
40	return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
41}
42EXPORT_SYMBOL_GPL(print_tuple);
43
44struct ct_iter_state {
45	struct seq_net_private p;
46	unsigned int bucket;
47};
48
49static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
50{
51	struct net *net = seq_file_net(seq);
52	struct ct_iter_state *st = seq->private;
53	struct hlist_nulls_node *n;
54
55	for (st->bucket = 0;
56	     st->bucket < net->ct.htable_size;
57	     st->bucket++) {
58		n = rcu_dereference(net->ct.hash[st->bucket].first);
59		if (!is_a_nulls(n))
60			return n;
61	}
62	return NULL;
63}
64
65static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
66				      struct hlist_nulls_node *head)
67{
68	struct net *net = seq_file_net(seq);
69	struct ct_iter_state *st = seq->private;
70
71	head = rcu_dereference(head->next);
72	while (is_a_nulls(head)) {
73		if (likely(get_nulls_value(head) == st->bucket)) {
74			if (++st->bucket >= net->ct.htable_size)
75				return NULL;
76		}
77		head = rcu_dereference(net->ct.hash[st->bucket].first);
78	}
79	return head;
80}
81
82static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
83{
84	struct hlist_nulls_node *head = ct_get_first(seq);
85
86	if (head)
87		while (pos && (head = ct_get_next(seq, head)))
88			pos--;
89	return pos ? NULL : head;
90}
91
92static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
93	__acquires(RCU)
94{
95	rcu_read_lock();
96	return ct_get_idx(seq, *pos);
97}
98
99static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
100{
101	(*pos)++;
102	return ct_get_next(s, v);
103}
104
105static void ct_seq_stop(struct seq_file *s, void *v)
106	__releases(RCU)
107{
108	rcu_read_unlock();
109}
110
111/* return 0 on success, 1 in case of error */
112static int ct_seq_show(struct seq_file *s, void *v)
113{
114	struct nf_conntrack_tuple_hash *hash = v;
115	struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
116	const struct nf_conntrack_l3proto *l3proto;
117	const struct nf_conntrack_l4proto *l4proto;
118	int ret = 0;
119
120	NF_CT_ASSERT(ct);
121	if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
122		return 0;
123
124	/* we only want to print DIR_ORIGINAL */
125	if (NF_CT_DIRECTION(hash))
126		goto release;
127
128	l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
129	NF_CT_ASSERT(l3proto);
130	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
131	NF_CT_ASSERT(l4proto);
132
133	ret = -ENOSPC;
134	if (seq_printf(s, "%-8s %u %-8s %u %ld ",
135		       l3proto->name, nf_ct_l3num(ct),
136		       l4proto->name, nf_ct_protonum(ct),
137		       timer_pending(&ct->timeout)
138		       ? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
139		goto release;
140
141	if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
142		goto release;
143
144	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
145			l3proto, l4proto))
146		goto release;
147
148	if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
149		goto release;
150
151	if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
152		if (seq_printf(s, "[UNREPLIED] "))
153			goto release;
154
155	if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
156			l3proto, l4proto))
157		goto release;
158
159	if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
160		goto release;
161
162	if (test_bit(IPS_ASSURED_BIT, &ct->status))
163		if (seq_printf(s, "[ASSURED] "))
164			goto release;
165
166#if defined(CONFIG_NF_CONNTRACK_MARK)
167	if (seq_printf(s, "mark=%u ", ct->mark))
168		goto release;
169#endif
170
171#ifdef CONFIG_NF_CONNTRACK_SECMARK
172	if (seq_printf(s, "secmark=%u ", ct->secmark))
173		goto release;
174#endif
175
176#ifdef CONFIG_NF_CONNTRACK_ZONES
177	if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
178		goto release;
179#endif
180
181	if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
182		goto release;
183
184	ret = 0;
185release:
186	nf_ct_put(ct);
187	return 0;
188}
189
190static const struct seq_operations ct_seq_ops = {
191	.start = ct_seq_start,
192	.next  = ct_seq_next,
193	.stop  = ct_seq_stop,
194	.show  = ct_seq_show
195};
196
197static int ct_open(struct inode *inode, struct file *file)
198{
199	return seq_open_net(inode, file, &ct_seq_ops,
200			sizeof(struct ct_iter_state));
201}
202
203static const struct file_operations ct_file_ops = {
204	.owner   = THIS_MODULE,
205	.open    = ct_open,
206	.read    = seq_read,
207	.llseek  = seq_lseek,
208	.release = seq_release_net,
209};
210
211static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
212{
213	struct net *net = seq_file_net(seq);
214	int cpu;
215
216	if (*pos == 0)
217		return SEQ_START_TOKEN;
218
219	for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
220		if (!cpu_possible(cpu))
221			continue;
222		*pos = cpu + 1;
223		return per_cpu_ptr(net->ct.stat, cpu);
224	}
225
226	return NULL;
227}
228
229static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
230{
231	struct net *net = seq_file_net(seq);
232	int cpu;
233
234	for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
235		if (!cpu_possible(cpu))
236			continue;
237		*pos = cpu + 1;
238		return per_cpu_ptr(net->ct.stat, cpu);
239	}
240
241	return NULL;
242}
243
244static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
245{
246}
247
248static int ct_cpu_seq_show(struct seq_file *seq, void *v)
249{
250	struct net *net = seq_file_net(seq);
251	unsigned int nr_conntracks = atomic_read(&net->ct.count);
252	const struct ip_conntrack_stat *st = v;
253
254	if (v == SEQ_START_TOKEN) {
255		seq_printf(seq, "entries  searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error  expect_new expect_create expect_delete search_restart\n");
256		return 0;
257	}
258
259	seq_printf(seq, "%08x  %08x %08x %08x %08x %08x %08x %08x "
260			"%08x %08x %08x %08x %08x  %08x %08x %08x %08x\n",
261		   nr_conntracks,
262		   st->searched,
263		   st->found,
264		   st->new,
265		   st->invalid,
266		   st->ignore,
267		   st->delete,
268		   st->delete_list,
269		   st->insert,
270		   st->insert_failed,
271		   st->drop,
272		   st->early_drop,
273		   st->error,
274
275		   st->expect_new,
276		   st->expect_create,
277		   st->expect_delete,
278		   st->search_restart
279		);
280	return 0;
281}
282
283static const struct seq_operations ct_cpu_seq_ops = {
284	.start	= ct_cpu_seq_start,
285	.next	= ct_cpu_seq_next,
286	.stop	= ct_cpu_seq_stop,
287	.show	= ct_cpu_seq_show,
288};
289
290static int ct_cpu_seq_open(struct inode *inode, struct file *file)
291{
292	return seq_open_net(inode, file, &ct_cpu_seq_ops,
293			    sizeof(struct seq_net_private));
294}
295
296static const struct file_operations ct_cpu_seq_fops = {
297	.owner	 = THIS_MODULE,
298	.open	 = ct_cpu_seq_open,
299	.read	 = seq_read,
300	.llseek	 = seq_lseek,
301	.release = seq_release_net,
302};
303
304static int nf_conntrack_standalone_init_proc(struct net *net)
305{
306	struct proc_dir_entry *pde;
307
308	pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
309	if (!pde)
310		goto out_nf_conntrack;
311
312	pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
313			  &ct_cpu_seq_fops);
314	if (!pde)
315		goto out_stat_nf_conntrack;
316	return 0;
317
318out_stat_nf_conntrack:
319	proc_net_remove(net, "nf_conntrack");
320out_nf_conntrack:
321	return -ENOMEM;
322}
323
324static void nf_conntrack_standalone_fini_proc(struct net *net)
325{
326	remove_proc_entry("nf_conntrack", net->proc_net_stat);
327	proc_net_remove(net, "nf_conntrack");
328}
329#else
330static int nf_conntrack_standalone_init_proc(struct net *net)
331{
332	return 0;
333}
334
335static void nf_conntrack_standalone_fini_proc(struct net *net)
336{
337}
338#endif /* CONFIG_PROC_FS */
339
340/* Sysctl support */
341
342#ifdef CONFIG_SYSCTL
343/* Log invalid packets of a given protocol */
344static int log_invalid_proto_min = 0;
345static int log_invalid_proto_max = 255;
346
347static struct ctl_table_header *nf_ct_netfilter_header;
348
349static ctl_table nf_ct_sysctl_table[] = {
350	{
351		.procname	= "nf_conntrack_max",
352		.data		= &nf_conntrack_max,
353		.maxlen		= sizeof(int),
354		.mode		= 0644,
355		.proc_handler	= proc_dointvec,
356	},
357	{
358		.procname	= "nf_conntrack_count",
359		.data		= &init_net.ct.count,
360		.maxlen		= sizeof(int),
361		.mode		= 0444,
362		.proc_handler	= proc_dointvec,
363	},
364	{
365		.procname       = "nf_conntrack_buckets",
366		.data           = &init_net.ct.htable_size,
367		.maxlen         = sizeof(unsigned int),
368		.mode           = 0444,
369		.proc_handler   = proc_dointvec,
370	},
371	{
372		.procname	= "nf_conntrack_checksum",
373		.data		= &init_net.ct.sysctl_checksum,
374		.maxlen		= sizeof(unsigned int),
375		.mode		= 0644,
376		.proc_handler	= proc_dointvec,
377	},
378	{
379		.procname	= "nf_conntrack_log_invalid",
380		.data		= &init_net.ct.sysctl_log_invalid,
381		.maxlen		= sizeof(unsigned int),
382		.mode		= 0644,
383		.proc_handler	= proc_dointvec_minmax,
384		.extra1		= &log_invalid_proto_min,
385		.extra2		= &log_invalid_proto_max,
386	},
387	{
388		.procname	= "nf_conntrack_expect_max",
389		.data		= &nf_ct_expect_max,
390		.maxlen		= sizeof(int),
391		.mode		= 0644,
392		.proc_handler	= proc_dointvec,
393	},
394	{ }
395};
396
397#define NET_NF_CONNTRACK_MAX 2089
398
399static ctl_table nf_ct_netfilter_table[] = {
400	{
401		.procname	= "nf_conntrack_max",
402		.data		= &nf_conntrack_max,
403		.maxlen		= sizeof(int),
404		.mode		= 0644,
405		.proc_handler	= proc_dointvec,
406	},
407	{ }
408};
409
410static struct ctl_path nf_ct_path[] = {
411	{ .procname = "net", },
412	{ }
413};
414
415static int nf_conntrack_standalone_init_sysctl(struct net *net)
416{
417	struct ctl_table *table;
418
419	if (net_eq(net, &init_net)) {
420		nf_ct_netfilter_header =
421		       register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
422		if (!nf_ct_netfilter_header)
423			goto out;
424	}
425
426	table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
427			GFP_KERNEL);
428	if (!table)
429		goto out_kmemdup;
430
431	table[1].data = &net->ct.count;
432	table[2].data = &net->ct.htable_size;
433	table[3].data = &net->ct.sysctl_checksum;
434	table[4].data = &net->ct.sysctl_log_invalid;
435
436	net->ct.sysctl_header = register_net_sysctl_table(net,
437					nf_net_netfilter_sysctl_path, table);
438	if (!net->ct.sysctl_header)
439		goto out_unregister_netfilter;
440
441	return 0;
442
443out_unregister_netfilter:
444	kfree(table);
445out_kmemdup:
446	if (net_eq(net, &init_net))
447		unregister_sysctl_table(nf_ct_netfilter_header);
448out:
449	printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
450	return -ENOMEM;
451}
452
453static void nf_conntrack_standalone_fini_sysctl(struct net *net)
454{
455	struct ctl_table *table;
456
457	if (net_eq(net, &init_net))
458		unregister_sysctl_table(nf_ct_netfilter_header);
459	table = net->ct.sysctl_header->ctl_table_arg;
460	unregister_net_sysctl_table(net->ct.sysctl_header);
461	kfree(table);
462}
463#else
464static int nf_conntrack_standalone_init_sysctl(struct net *net)
465{
466	return 0;
467}
468
469static void nf_conntrack_standalone_fini_sysctl(struct net *net)
470{
471}
472#endif /* CONFIG_SYSCTL */
473
474static int nf_conntrack_net_init(struct net *net)
475{
476	int ret;
477
478	ret = nf_conntrack_init(net);
479	if (ret < 0)
480		goto out_init;
481	ret = nf_conntrack_standalone_init_proc(net);
482	if (ret < 0)
483		goto out_proc;
484	net->ct.sysctl_checksum = 1;
485	net->ct.sysctl_log_invalid = 0;
486	ret = nf_conntrack_standalone_init_sysctl(net);
487	if (ret < 0)
488		goto out_sysctl;
489	return 0;
490
491out_sysctl:
492	nf_conntrack_standalone_fini_proc(net);
493out_proc:
494	nf_conntrack_cleanup(net);
495out_init:
496	return ret;
497}
498
499static void nf_conntrack_net_exit(struct net *net)
500{
501	nf_conntrack_standalone_fini_sysctl(net);
502	nf_conntrack_standalone_fini_proc(net);
503	nf_conntrack_cleanup(net);
504}
505
506static struct pernet_operations nf_conntrack_net_ops = {
507	.init = nf_conntrack_net_init,
508	.exit = nf_conntrack_net_exit,
509};
510
511static int __init nf_conntrack_standalone_init(void)
512{
513	return register_pernet_subsys(&nf_conntrack_net_ops);
514}
515
516static void __exit nf_conntrack_standalone_fini(void)
517{
518	unregister_pernet_subsys(&nf_conntrack_net_ops);
519}
520
521module_init(nf_conntrack_standalone_init);
522module_exit(nf_conntrack_standalone_fini);
523
524/* Some modules need us, but don't depend directly on any symbol.
525   They should call this. */
526void need_conntrack(void)
527{
528}
529EXPORT_SYMBOL_GPL(need_conntrack);
530