1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/module.h>
4#include <linux/proc_fs.h>
5#include <linux/skbuff.h>
6#include <linux/netfilter.h>
7#include <linux/seq_file.h>
8#include <linux/rcupdate.h>
9#include <net/protocol.h>
10
11#include "nf_internals.h"
12
13/*
14 * A queue handler may be registered for each protocol.  Each is protected by
15 * long term mutex.  The handler must provide an an outfn() to accept packets
16 * for queueing and must reinject all packets it receives, no matter what.
17 */
18static struct nf_queue_handler *queue_handler[NPROTO];
19
20static DEFINE_RWLOCK(queue_handler_lock);
21
22/* return EBUSY when somebody else is registered, return EEXIST if the
23 * same handler is registered, return 0 in case of success. */
24int nf_register_queue_handler(int pf, struct nf_queue_handler *qh)
25{
26	int ret;
27
28	if (pf >= NPROTO)
29		return -EINVAL;
30
31	write_lock_bh(&queue_handler_lock);
32	if (queue_handler[pf] == qh)
33		ret = -EEXIST;
34	else if (queue_handler[pf])
35		ret = -EBUSY;
36	else {
37		queue_handler[pf] = qh;
38		ret = 0;
39	}
40	write_unlock_bh(&queue_handler_lock);
41
42	return ret;
43}
44EXPORT_SYMBOL(nf_register_queue_handler);
45
46/* The caller must flush their queue before this */
47int nf_unregister_queue_handler(int pf)
48{
49	if (pf >= NPROTO)
50		return -EINVAL;
51
52	write_lock_bh(&queue_handler_lock);
53	queue_handler[pf] = NULL;
54	write_unlock_bh(&queue_handler_lock);
55
56	return 0;
57}
58EXPORT_SYMBOL(nf_unregister_queue_handler);
59
60void nf_unregister_queue_handlers(struct nf_queue_handler *qh)
61{
62	int pf;
63
64	write_lock_bh(&queue_handler_lock);
65	for (pf = 0; pf < NPROTO; pf++)  {
66		if (queue_handler[pf] == qh)
67			queue_handler[pf] = NULL;
68	}
69	write_unlock_bh(&queue_handler_lock);
70}
71EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
72
73/*
74 * Any packet that leaves via this function must come back
75 * through nf_reinject().
76 */
77static int __nf_queue(struct sk_buff *skb,
78		      struct list_head *elem,
79		      int pf, unsigned int hook,
80		      struct net_device *indev,
81		      struct net_device *outdev,
82		      int (*okfn)(struct sk_buff *),
83		      unsigned int queuenum)
84{
85	int status;
86	struct nf_info *info;
87#ifdef CONFIG_BRIDGE_NETFILTER
88	struct net_device *physindev = NULL;
89	struct net_device *physoutdev = NULL;
90#endif
91	struct nf_afinfo *afinfo;
92
93	/* QUEUE == DROP if noone is waiting, to be safe. */
94	read_lock(&queue_handler_lock);
95	if (!queue_handler[pf]) {
96		read_unlock(&queue_handler_lock);
97		kfree_skb(skb);
98		return 1;
99	}
100
101	afinfo = nf_get_afinfo(pf);
102	if (!afinfo) {
103		read_unlock(&queue_handler_lock);
104		kfree_skb(skb);
105		return 1;
106	}
107
108	info = kmalloc(sizeof(*info) + afinfo->route_key_size, GFP_ATOMIC);
109	if (!info) {
110		if (net_ratelimit())
111			printk(KERN_ERR "OOM queueing packet %p\n",
112			       skb);
113		read_unlock(&queue_handler_lock);
114		kfree_skb(skb);
115		return 1;
116	}
117
118	*info = (struct nf_info) {
119		(struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn };
120
121	/* If it's going away, ignore hook. */
122	if (!try_module_get(info->elem->owner)) {
123		read_unlock(&queue_handler_lock);
124		kfree(info);
125		return 0;
126	}
127
128	/* Bump dev refs so they don't vanish while packet is out */
129	if (indev) dev_hold(indev);
130	if (outdev) dev_hold(outdev);
131
132#ifdef CONFIG_BRIDGE_NETFILTER
133	if (skb->nf_bridge) {
134		physindev = skb->nf_bridge->physindev;
135		if (physindev) dev_hold(physindev);
136		physoutdev = skb->nf_bridge->physoutdev;
137		if (physoutdev) dev_hold(physoutdev);
138	}
139#endif
140	afinfo->saveroute(skb, info);
141	status = queue_handler[pf]->outfn(skb, info, queuenum,
142					  queue_handler[pf]->data);
143
144	read_unlock(&queue_handler_lock);
145
146	if (status < 0) {
147		/* James M doesn't say fuck enough. */
148		if (indev) dev_put(indev);
149		if (outdev) dev_put(outdev);
150#ifdef CONFIG_BRIDGE_NETFILTER
151		if (physindev) dev_put(physindev);
152		if (physoutdev) dev_put(physoutdev);
153#endif
154		module_put(info->elem->owner);
155		kfree(info);
156		kfree_skb(skb);
157
158		return 1;
159	}
160
161	return 1;
162}
163
164int nf_queue(struct sk_buff *skb,
165	     struct list_head *elem,
166	     int pf, unsigned int hook,
167	     struct net_device *indev,
168	     struct net_device *outdev,
169	     int (*okfn)(struct sk_buff *),
170	     unsigned int queuenum)
171{
172	struct sk_buff *segs;
173
174	if (!skb_is_gso(skb))
175		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
176				  queuenum);
177
178	switch (pf) {
179	case AF_INET:
180		skb->protocol = htons(ETH_P_IP);
181		break;
182	case AF_INET6:
183		skb->protocol = htons(ETH_P_IPV6);
184		break;
185	}
186
187	segs = skb_gso_segment(skb, 0);
188	kfree_skb(skb);
189	if (unlikely(IS_ERR(segs)))
190		return 1;
191
192	do {
193		struct sk_buff *nskb = segs->next;
194
195		segs->next = NULL;
196		if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
197				queuenum))
198			kfree_skb(segs);
199		segs = nskb;
200	} while (segs);
201	return 1;
202}
203
204void nf_reinject(struct sk_buff *skb, struct nf_info *info,
205		 unsigned int verdict)
206{
207	struct list_head *elem = &info->elem->list;
208	struct list_head *i;
209	struct nf_afinfo *afinfo;
210
211	rcu_read_lock();
212
213	/* Release those devices we held, or Alexey will kill me. */
214	if (info->indev) dev_put(info->indev);
215	if (info->outdev) dev_put(info->outdev);
216#ifdef CONFIG_BRIDGE_NETFILTER
217	if (skb->nf_bridge) {
218		if (skb->nf_bridge->physindev)
219			dev_put(skb->nf_bridge->physindev);
220		if (skb->nf_bridge->physoutdev)
221			dev_put(skb->nf_bridge->physoutdev);
222	}
223#endif
224
225	/* Drop reference to owner of hook which queued us. */
226	module_put(info->elem->owner);
227
228	list_for_each_rcu(i, &nf_hooks[info->pf][info->hook]) {
229		if (i == elem)
230			break;
231	}
232
233	if (i == &nf_hooks[info->pf][info->hook]) {
234		/* The module which sent it to userspace is gone. */
235		NFDEBUG("%s: module disappeared, dropping packet.\n",
236			__FUNCTION__);
237		verdict = NF_DROP;
238	}
239
240	/* Continue traversal iff userspace said ok... */
241	if (verdict == NF_REPEAT) {
242		elem = elem->prev;
243		verdict = NF_ACCEPT;
244	}
245
246	if (verdict == NF_ACCEPT) {
247		afinfo = nf_get_afinfo(info->pf);
248		if (!afinfo || afinfo->reroute(&skb, info) < 0)
249			verdict = NF_DROP;
250	}
251
252	if (verdict == NF_ACCEPT) {
253	next_hook:
254		verdict = nf_iterate(&nf_hooks[info->pf][info->hook],
255				     &skb, info->hook,
256				     info->indev, info->outdev, &elem,
257				     info->okfn, INT_MIN);
258	}
259
260	switch (verdict & NF_VERDICT_MASK) {
261	case NF_ACCEPT:
262	case NF_STOP:
263		info->okfn(skb);
264	case NF_STOLEN:
265		break;
266	case NF_QUEUE:
267		if (!__nf_queue(skb, elem, info->pf, info->hook,
268				info->indev, info->outdev, info->okfn,
269				verdict >> NF_VERDICT_BITS))
270			goto next_hook;
271		break;
272	default:
273		kfree_skb(skb);
274	}
275	rcu_read_unlock();
276	kfree(info);
277	return;
278}
279EXPORT_SYMBOL(nf_reinject);
280
281#ifdef CONFIG_PROC_FS
282static void *seq_start(struct seq_file *seq, loff_t *pos)
283{
284	if (*pos >= NPROTO)
285		return NULL;
286
287	return pos;
288}
289
290static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
291{
292	(*pos)++;
293
294	if (*pos >= NPROTO)
295		return NULL;
296
297	return pos;
298}
299
300static void seq_stop(struct seq_file *s, void *v)
301{
302
303}
304
305static int seq_show(struct seq_file *s, void *v)
306{
307	int ret;
308	loff_t *pos = v;
309	struct nf_queue_handler *qh;
310
311	read_lock_bh(&queue_handler_lock);
312	qh = queue_handler[*pos];
313	if (!qh)
314		ret = seq_printf(s, "%2lld NONE\n", *pos);
315	else
316		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
317	read_unlock_bh(&queue_handler_lock);
318
319	return ret;
320}
321
322static struct seq_operations nfqueue_seq_ops = {
323	.start	= seq_start,
324	.next	= seq_next,
325	.stop	= seq_stop,
326	.show	= seq_show,
327};
328
329static int nfqueue_open(struct inode *inode, struct file *file)
330{
331	return seq_open(file, &nfqueue_seq_ops);
332}
333
334static const struct file_operations nfqueue_file_ops = {
335	.owner	 = THIS_MODULE,
336	.open	 = nfqueue_open,
337	.read	 = seq_read,
338	.llseek	 = seq_lseek,
339	.release = seq_release,
340};
341#endif /* PROC_FS */
342
343
344int __init netfilter_queue_init(void)
345{
346#ifdef CONFIG_PROC_FS
347	struct proc_dir_entry *pde;
348
349	pde = create_proc_entry("nf_queue", S_IRUGO, proc_net_netfilter);
350	if (!pde)
351		return -1;
352	pde->proc_fops = &nfqueue_file_ops;
353#endif
354	return 0;
355}
356