• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/netfilter/
1#include <linux/kernel.h>
2#include <linux/slab.h>
3#include <linux/init.h>
4#include <linux/module.h>
5#include <linux/proc_fs.h>
6#include <linux/skbuff.h>
7#include <linux/netfilter.h>
8#include <linux/seq_file.h>
9#include <linux/rcupdate.h>
10#include <net/protocol.h>
11#include <net/netfilter/nf_queue.h>
12#include <net/dst.h>
13
14#include "nf_internals.h"
15
16/*
17 * A queue handler may be registered for each protocol.  Each is protected by
18 * long term mutex.  The handler must provide an an outfn() to accept packets
19 * for queueing and must reinject all packets it receives, no matter what.
20 */
21static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22
23static DEFINE_MUTEX(queue_handler_mutex);
24
25/* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */
27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
28{
29	int ret;
30
31	if (pf >= ARRAY_SIZE(queue_handler))
32		return -EINVAL;
33
34	mutex_lock(&queue_handler_mutex);
35	if (queue_handler[pf] == qh)
36		ret = -EEXIST;
37	else if (queue_handler[pf])
38		ret = -EBUSY;
39	else {
40		rcu_assign_pointer(queue_handler[pf], qh);
41		ret = 0;
42	}
43	mutex_unlock(&queue_handler_mutex);
44
45	return ret;
46}
47EXPORT_SYMBOL(nf_register_queue_handler);
48
49/* The caller must flush their queue before this */
50int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
51{
52	if (pf >= ARRAY_SIZE(queue_handler))
53		return -EINVAL;
54
55	mutex_lock(&queue_handler_mutex);
56	if (queue_handler[pf] && queue_handler[pf] != qh) {
57		mutex_unlock(&queue_handler_mutex);
58		return -EINVAL;
59	}
60
61	rcu_assign_pointer(queue_handler[pf], NULL);
62	mutex_unlock(&queue_handler_mutex);
63
64	synchronize_rcu();
65
66	return 0;
67}
68EXPORT_SYMBOL(nf_unregister_queue_handler);
69
70void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
71{
72	u_int8_t pf;
73
74	mutex_lock(&queue_handler_mutex);
75	for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++)  {
76		if (queue_handler[pf] == qh)
77			rcu_assign_pointer(queue_handler[pf], NULL);
78	}
79	mutex_unlock(&queue_handler_mutex);
80
81	synchronize_rcu();
82}
83EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
84
85static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
86{
87	/* Release those devices we held, or Alexey will kill me. */
88	if (entry->indev)
89		dev_put(entry->indev);
90	if (entry->outdev)
91		dev_put(entry->outdev);
92#ifdef CONFIG_BRIDGE_NETFILTER
93	if (entry->skb->nf_bridge) {
94		struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
95
96		if (nf_bridge->physindev)
97			dev_put(nf_bridge->physindev);
98		if (nf_bridge->physoutdev)
99			dev_put(nf_bridge->physoutdev);
100	}
101#endif
102	/* Drop reference to owner of hook which queued us. */
103	module_put(entry->elem->owner);
104}
105
106/*
107 * Any packet that leaves via this function must come back
108 * through nf_reinject().
109 */
110static int __nf_queue(struct sk_buff *skb,
111		      struct list_head *elem,
112		      u_int8_t pf, unsigned int hook,
113		      struct net_device *indev,
114		      struct net_device *outdev,
115		      int (*okfn)(struct sk_buff *),
116		      unsigned int queuenum)
117{
118	int status = -ENOENT;
119	struct nf_queue_entry *entry = NULL;
120#ifdef CONFIG_BRIDGE_NETFILTER
121	struct net_device *physindev;
122	struct net_device *physoutdev;
123#endif
124	const struct nf_afinfo *afinfo;
125	const struct nf_queue_handler *qh;
126
127	/* QUEUE == DROP if noone is waiting, to be safe. */
128	rcu_read_lock();
129
130	qh = rcu_dereference(queue_handler[pf]);
131	if (!qh) {
132		status = -ESRCH;
133		goto err_unlock;
134	}
135
136	afinfo = nf_get_afinfo(pf);
137	if (!afinfo)
138		goto err_unlock;
139
140	entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
141	if (!entry) {
142		status = -ENOMEM;
143		goto err_unlock;
144	}
145
146	*entry = (struct nf_queue_entry) {
147		.skb	= skb,
148		.elem	= list_entry(elem, struct nf_hook_ops, list),
149		.pf	= pf,
150		.hook	= hook,
151		.indev	= indev,
152		.outdev	= outdev,
153		.okfn	= okfn,
154	};
155
156	/* If it's going away, ignore hook. */
157	if (!try_module_get(entry->elem->owner)) {
158		status = -ECANCELED;
159		goto err_unlock;
160	}
161
162	/* Bump dev refs so they don't vanish while packet is out */
163	if (indev)
164		dev_hold(indev);
165	if (outdev)
166		dev_hold(outdev);
167#ifdef CONFIG_BRIDGE_NETFILTER
168	if (skb->nf_bridge) {
169		physindev = skb->nf_bridge->physindev;
170		if (physindev)
171			dev_hold(physindev);
172		physoutdev = skb->nf_bridge->physoutdev;
173		if (physoutdev)
174			dev_hold(physoutdev);
175	}
176#endif
177	skb_dst_force(skb);
178	afinfo->saveroute(skb, entry);
179	status = qh->outfn(entry, queuenum);
180
181	rcu_read_unlock();
182
183	if (status < 0) {
184		nf_queue_entry_release_refs(entry);
185		goto err;
186	}
187
188	return 0;
189
190err_unlock:
191	rcu_read_unlock();
192err:
193	kfree(entry);
194	return status;
195}
196
197int nf_queue(struct sk_buff *skb,
198	     struct list_head *elem,
199	     u_int8_t pf, unsigned int hook,
200	     struct net_device *indev,
201	     struct net_device *outdev,
202	     int (*okfn)(struct sk_buff *),
203	     unsigned int queuenum)
204{
205	struct sk_buff *segs;
206	int err;
207	unsigned int queued;
208
209	if (!skb_is_gso(skb))
210		return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
211				  queuenum);
212
213	switch (pf) {
214	case NFPROTO_IPV4:
215		skb->protocol = htons(ETH_P_IP);
216		break;
217	case NFPROTO_IPV6:
218		skb->protocol = htons(ETH_P_IPV6);
219		break;
220	}
221
222	/* Set packet's nf flag to indicate non-optimized segmentation */
223	skb->tcpf_nf = 1;
224
225	segs = skb_gso_segment(skb, 0);
226	/* Does not use PTR_ERR to limit the number of error codes that can be
227	 * returned by nf_queue.  For instance, callers rely on -ECANCELED to mean
228	 * 'ignore this hook'.
229	 */
230	if (IS_ERR(segs))
231		return -EINVAL;
232
233	queued = 0;
234	err = 0;
235	do {
236		struct sk_buff *nskb = segs->next;
237
238		segs->next = NULL;
239		if (err == 0)
240			err = __nf_queue(segs, elem, pf, hook, indev,
241					   outdev, okfn, queuenum);
242		if (err == 0)
243			queued++;
244		else
245			kfree_skb(segs);
246		segs = nskb;
247	} while (segs);
248
249	/* also free orig skb if only some segments were queued */
250	if (unlikely(err && queued))
251		err = 0;
252	if (err == 0)
253		kfree_skb(skb);
254	return err;
255}
256
257void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
258{
259	struct sk_buff *skb = entry->skb;
260	struct list_head *elem = &entry->elem->list;
261	const struct nf_afinfo *afinfo;
262	int err;
263
264	rcu_read_lock();
265
266	nf_queue_entry_release_refs(entry);
267
268	/* Continue traversal iff userspace said ok... */
269	if (verdict == NF_REPEAT) {
270		elem = elem->prev;
271		verdict = NF_ACCEPT;
272	}
273
274	if (verdict == NF_ACCEPT) {
275		afinfo = nf_get_afinfo(entry->pf);
276		if (!afinfo || afinfo->reroute(skb, entry) < 0)
277			verdict = NF_DROP;
278	}
279
280	if (verdict == NF_ACCEPT) {
281	next_hook:
282		verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
283				     skb, entry->hook,
284				     entry->indev, entry->outdev, &elem,
285				     entry->okfn, INT_MIN);
286	}
287
288	switch (verdict & NF_VERDICT_MASK) {
289	case NF_ACCEPT:
290	case NF_STOP:
291		local_bh_disable();
292		entry->okfn(skb);
293		local_bh_enable();
294		break;
295	case NF_QUEUE:
296		err = __nf_queue(skb, elem, entry->pf, entry->hook,
297				 entry->indev, entry->outdev, entry->okfn,
298				 verdict >> NF_VERDICT_QBITS);
299		if (err < 0) {
300			if (err == -ECANCELED)
301				goto next_hook;
302			if (err == -ESRCH &&
303			   (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS))
304				goto next_hook;
305			kfree_skb(skb);
306		}
307		break;
308	case NF_STOLEN:
309	default:
310		kfree_skb(skb);
311	}
312	rcu_read_unlock();
313	kfree(entry);
314}
315EXPORT_SYMBOL(nf_reinject);
316
317#ifdef CONFIG_PROC_FS
318static void *seq_start(struct seq_file *seq, loff_t *pos)
319{
320	if (*pos >= ARRAY_SIZE(queue_handler))
321		return NULL;
322
323	return pos;
324}
325
326static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
327{
328	(*pos)++;
329
330	if (*pos >= ARRAY_SIZE(queue_handler))
331		return NULL;
332
333	return pos;
334}
335
336static void seq_stop(struct seq_file *s, void *v)
337{
338
339}
340
341static int seq_show(struct seq_file *s, void *v)
342{
343	int ret;
344	loff_t *pos = v;
345	const struct nf_queue_handler *qh;
346
347	rcu_read_lock();
348	qh = rcu_dereference(queue_handler[*pos]);
349	if (!qh)
350		ret = seq_printf(s, "%2lld NONE\n", *pos);
351	else
352		ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
353	rcu_read_unlock();
354
355	return ret;
356}
357
358static const struct seq_operations nfqueue_seq_ops = {
359	.start	= seq_start,
360	.next	= seq_next,
361	.stop	= seq_stop,
362	.show	= seq_show,
363};
364
365static int nfqueue_open(struct inode *inode, struct file *file)
366{
367	return seq_open(file, &nfqueue_seq_ops);
368}
369
370static const struct file_operations nfqueue_file_ops = {
371	.owner	 = THIS_MODULE,
372	.open	 = nfqueue_open,
373	.read	 = seq_read,
374	.llseek	 = seq_lseek,
375	.release = seq_release,
376};
377#endif /* PROC_FS */
378
379
380int __init netfilter_queue_init(void)
381{
382#ifdef CONFIG_PROC_FS
383	if (!proc_create("nf_queue", S_IRUGO,
384			 proc_net_netfilter, &nfqueue_file_ops))
385		return -1;
386#endif
387	return 0;
388}
389