1/*
2 * This is a module which is used for queueing packets and communicating with
3 * userspace via nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ip_queue.c:
8 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
9 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16#include <linux/module.h>
17#include <linux/skbuff.h>
18#include <linux/init.h>
19#include <linux/spinlock.h>
20#include <linux/notifier.h>
21#include <linux/netdevice.h>
22#include <linux/netfilter.h>
23#include <linux/proc_fs.h>
24#include <linux/netfilter_ipv4.h>
25#include <linux/netfilter_ipv6.h>
26#include <linux/netfilter/nfnetlink.h>
27#include <linux/netfilter/nfnetlink_queue.h>
28#include <linux/list.h>
29#include <net/sock.h>
30
31#include <asm/atomic.h>
32
33#ifdef CONFIG_BRIDGE_NETFILTER
34#include "../bridge/br_private.h"
35#endif
36
37#define NFQNL_QMAX_DEFAULT 1024
38
39#define QDEBUG(x, ...)
40
41struct nfqnl_queue_entry {
42	struct list_head list;
43	struct nf_info *info;
44	struct sk_buff *skb;
45	unsigned int id;
46};
47
48struct nfqnl_instance {
49	struct hlist_node hlist;		/* global list of queues */
50	atomic_t use;
51
52	int peer_pid;
53	unsigned int queue_maxlen;
54	unsigned int copy_range;
55	unsigned int queue_total;
56	unsigned int queue_dropped;
57	unsigned int queue_user_dropped;
58
59	atomic_t id_sequence;			/* 'sequence' of pkt ids */
60
61	u_int16_t queue_num;			/* number of this queue */
62	u_int8_t copy_mode;
63
64	spinlock_t lock;
65
66	struct list_head queue_list;		/* packets in queue */
67};
68
69typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long);
70
71static DEFINE_RWLOCK(instances_lock);
72
73#define INSTANCE_BUCKETS	16
74static struct hlist_head instance_table[INSTANCE_BUCKETS];
75
76static inline u_int8_t instance_hashfn(u_int16_t queue_num)
77{
78	return ((queue_num >> 8) | queue_num) % INSTANCE_BUCKETS;
79}
80
81static struct nfqnl_instance *
82__instance_lookup(u_int16_t queue_num)
83{
84	struct hlist_head *head;
85	struct hlist_node *pos;
86	struct nfqnl_instance *inst;
87
88	head = &instance_table[instance_hashfn(queue_num)];
89	hlist_for_each_entry(inst, pos, head, hlist) {
90		if (inst->queue_num == queue_num)
91			return inst;
92	}
93	return NULL;
94}
95
96static struct nfqnl_instance *
97instance_lookup_get(u_int16_t queue_num)
98{
99	struct nfqnl_instance *inst;
100
101	read_lock_bh(&instances_lock);
102	inst = __instance_lookup(queue_num);
103	if (inst)
104		atomic_inc(&inst->use);
105	read_unlock_bh(&instances_lock);
106
107	return inst;
108}
109
110static void
111instance_put(struct nfqnl_instance *inst)
112{
113	if (inst && atomic_dec_and_test(&inst->use)) {
114		QDEBUG("kfree(inst=%p)\n", inst);
115		kfree(inst);
116	}
117}
118
119static struct nfqnl_instance *
120instance_create(u_int16_t queue_num, int pid)
121{
122	struct nfqnl_instance *inst;
123
124	QDEBUG("entering for queue_num=%u, pid=%d\n", queue_num, pid);
125
126	write_lock_bh(&instances_lock);
127	if (__instance_lookup(queue_num)) {
128		inst = NULL;
129		QDEBUG("aborting, instance already exists\n");
130		goto out_unlock;
131	}
132
133	inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
134	if (!inst)
135		goto out_unlock;
136
137	inst->queue_num = queue_num;
138	inst->peer_pid = pid;
139	inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
140	inst->copy_range = 0xfffff;
141	inst->copy_mode = NFQNL_COPY_NONE;
142	atomic_set(&inst->id_sequence, 0);
143	/* needs to be two, since we _put() after creation */
144	atomic_set(&inst->use, 2);
145	spin_lock_init(&inst->lock);
146	INIT_LIST_HEAD(&inst->queue_list);
147
148	if (!try_module_get(THIS_MODULE))
149		goto out_free;
150
151	hlist_add_head(&inst->hlist,
152		       &instance_table[instance_hashfn(queue_num)]);
153
154	write_unlock_bh(&instances_lock);
155
156	QDEBUG("successfully created new instance\n");
157
158	return inst;
159
160out_free:
161	kfree(inst);
162out_unlock:
163	write_unlock_bh(&instances_lock);
164	return NULL;
165}
166
167static void nfqnl_flush(struct nfqnl_instance *queue, int verdict);
168
169static void
170_instance_destroy2(struct nfqnl_instance *inst, int lock)
171{
172	/* first pull it out of the global list */
173	if (lock)
174		write_lock_bh(&instances_lock);
175
176	QDEBUG("removing instance %p (queuenum=%u) from hash\n",
177		inst, inst->queue_num);
178	hlist_del(&inst->hlist);
179
180	if (lock)
181		write_unlock_bh(&instances_lock);
182
183	/* then flush all pending skbs from the queue */
184	nfqnl_flush(inst, NF_DROP);
185
186	/* and finally put the refcount */
187	instance_put(inst);
188
189	module_put(THIS_MODULE);
190}
191
192static inline void
193__instance_destroy(struct nfqnl_instance *inst)
194{
195	_instance_destroy2(inst, 0);
196}
197
198static inline void
199instance_destroy(struct nfqnl_instance *inst)
200{
201	_instance_destroy2(inst, 1);
202}
203
204
205
206static void
207issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
208{
209	QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
210
211	/* TCP input path (and probably other bits) assume to be called
212	 * from softirq context, not from syscall, like issue_verdict is
213	 * called.  TCP input path deadlocks with locks taken from timer
214	 * softirq, e.g.  We therefore emulate this by local_bh_disable() */
215
216	local_bh_disable();
217	nf_reinject(entry->skb, entry->info, verdict);
218	local_bh_enable();
219
220	kfree(entry);
221}
222
223static inline void
224__enqueue_entry(struct nfqnl_instance *queue,
225		      struct nfqnl_queue_entry *entry)
226{
227       list_add(&entry->list, &queue->queue_list);
228       queue->queue_total++;
229}
230
231/*
232 * Find and return a queued entry matched by cmpfn, or return the last
233 * entry if cmpfn is NULL.
234 */
235static inline struct nfqnl_queue_entry *
236__find_entry(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
237		   unsigned long data)
238{
239	struct list_head *p;
240
241	list_for_each_prev(p, &queue->queue_list) {
242		struct nfqnl_queue_entry *entry = (struct nfqnl_queue_entry *)p;
243
244		if (!cmpfn || cmpfn(entry, data))
245			return entry;
246	}
247	return NULL;
248}
249
250static inline void
251__dequeue_entry(struct nfqnl_instance *q, struct nfqnl_queue_entry *entry)
252{
253	list_del(&entry->list);
254	q->queue_total--;
255}
256
257static inline struct nfqnl_queue_entry *
258__find_dequeue_entry(struct nfqnl_instance *queue,
259		     nfqnl_cmpfn cmpfn, unsigned long data)
260{
261	struct nfqnl_queue_entry *entry;
262
263	entry = __find_entry(queue, cmpfn, data);
264	if (entry == NULL)
265		return NULL;
266
267	__dequeue_entry(queue, entry);
268	return entry;
269}
270
271
272static inline void
273__nfqnl_flush(struct nfqnl_instance *queue, int verdict)
274{
275	struct nfqnl_queue_entry *entry;
276
277	while ((entry = __find_dequeue_entry(queue, NULL, 0)))
278		issue_verdict(entry, verdict);
279}
280
281static inline int
282__nfqnl_set_mode(struct nfqnl_instance *queue,
283		 unsigned char mode, unsigned int range)
284{
285	int status = 0;
286
287	switch (mode) {
288	case NFQNL_COPY_NONE:
289	case NFQNL_COPY_META:
290		queue->copy_mode = mode;
291		queue->copy_range = 0;
292		break;
293
294	case NFQNL_COPY_PACKET:
295		queue->copy_mode = mode;
296		/* we're using struct nfattr which has 16bit nfa_len */
297		if (range > 0xffff)
298			queue->copy_range = 0xffff;
299		else
300			queue->copy_range = range;
301		break;
302
303	default:
304		status = -EINVAL;
305
306	}
307	return status;
308}
309
310static struct nfqnl_queue_entry *
311find_dequeue_entry(struct nfqnl_instance *queue,
312			 nfqnl_cmpfn cmpfn, unsigned long data)
313{
314	struct nfqnl_queue_entry *entry;
315
316	spin_lock_bh(&queue->lock);
317	entry = __find_dequeue_entry(queue, cmpfn, data);
318	spin_unlock_bh(&queue->lock);
319
320	return entry;
321}
322
323static void
324nfqnl_flush(struct nfqnl_instance *queue, int verdict)
325{
326	spin_lock_bh(&queue->lock);
327	__nfqnl_flush(queue, verdict);
328	spin_unlock_bh(&queue->lock);
329}
330
331static struct sk_buff *
332nfqnl_build_packet_message(struct nfqnl_instance *queue,
333			   struct nfqnl_queue_entry *entry, int *errp)
334{
335	sk_buff_data_t old_tail;
336	size_t size;
337	size_t data_len = 0;
338	struct sk_buff *skb;
339	struct nfqnl_msg_packet_hdr pmsg;
340	struct nlmsghdr *nlh;
341	struct nfgenmsg *nfmsg;
342	struct nf_info *entinf = entry->info;
343	struct sk_buff *entskb = entry->skb;
344	struct net_device *indev;
345	struct net_device *outdev;
346	__be32 tmp_uint;
347
348	QDEBUG("entered\n");
349
350	/* all macros expand to constant values at compile time */
351	size =    NLMSG_SPACE(sizeof(struct nfgenmsg)) +
352		+ NFA_SPACE(sizeof(struct nfqnl_msg_packet_hdr))
353		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
354		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
355#ifdef CONFIG_BRIDGE_NETFILTER
356		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
357		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
358#endif
359		+ NFA_SPACE(sizeof(u_int32_t))	/* mark */
360		+ NFA_SPACE(sizeof(struct nfqnl_msg_packet_hw))
361		+ NFA_SPACE(sizeof(struct nfqnl_msg_packet_timestamp));
362
363	outdev = entinf->outdev;
364
365	spin_lock_bh(&queue->lock);
366
367	switch (queue->copy_mode) {
368	case NFQNL_COPY_META:
369	case NFQNL_COPY_NONE:
370		data_len = 0;
371		break;
372
373	case NFQNL_COPY_PACKET:
374		if ((entskb->ip_summed == CHECKSUM_PARTIAL ||
375		     entskb->ip_summed == CHECKSUM_COMPLETE) &&
376		    (*errp = skb_checksum_help(entskb))) {
377			spin_unlock_bh(&queue->lock);
378			return NULL;
379		}
380		if (queue->copy_range == 0
381		    || queue->copy_range > entskb->len)
382			data_len = entskb->len;
383		else
384			data_len = queue->copy_range;
385
386		size += NFA_SPACE(data_len);
387		break;
388
389	default:
390		*errp = -EINVAL;
391		spin_unlock_bh(&queue->lock);
392		return NULL;
393	}
394
395	spin_unlock_bh(&queue->lock);
396
397	skb = alloc_skb(size, GFP_ATOMIC);
398	if (!skb)
399		goto nlmsg_failure;
400
401	old_tail = skb->tail;
402	nlh = NLMSG_PUT(skb, 0, 0,
403			NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
404			sizeof(struct nfgenmsg));
405	nfmsg = NLMSG_DATA(nlh);
406	nfmsg->nfgen_family = entinf->pf;
407	nfmsg->version = NFNETLINK_V0;
408	nfmsg->res_id = htons(queue->queue_num);
409
410	pmsg.packet_id 		= htonl(entry->id);
411	pmsg.hw_protocol	= entskb->protocol;
412	pmsg.hook		= entinf->hook;
413
414	NFA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
415
416	indev = entinf->indev;
417	if (indev) {
418		tmp_uint = htonl(indev->ifindex);
419#ifndef CONFIG_BRIDGE_NETFILTER
420		NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
421#else
422		if (entinf->pf == PF_BRIDGE) {
423			/* Case 1: indev is physical input device, we need to
424			 * look for bridge group (when called from
425			 * netfilter_bridge) */
426			NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV, sizeof(tmp_uint),
427				&tmp_uint);
428			/* this is the bridge group "brX" */
429			tmp_uint = htonl(indev->br_port->br->dev->ifindex);
430			NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
431				&tmp_uint);
432		} else {
433			/* Case 2: indev is bridge group, we need to look for
434			 * physical device (when called from ipv4) */
435			NFA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint),
436				&tmp_uint);
437			if (entskb->nf_bridge
438			    && entskb->nf_bridge->physindev) {
439				tmp_uint = htonl(entskb->nf_bridge->physindev->ifindex);
440				NFA_PUT(skb, NFQA_IFINDEX_PHYSINDEV,
441					sizeof(tmp_uint), &tmp_uint);
442			}
443		}
444#endif
445	}
446
447	if (outdev) {
448		tmp_uint = htonl(outdev->ifindex);
449#ifndef CONFIG_BRIDGE_NETFILTER
450		NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
451#else
452		if (entinf->pf == PF_BRIDGE) {
453			/* Case 1: outdev is physical output device, we need to
454			 * look for bridge group (when called from
455			 * netfilter_bridge) */
456			NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV, sizeof(tmp_uint),
457				&tmp_uint);
458			/* this is the bridge group "brX" */
459			tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
460			NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
461				&tmp_uint);
462		} else {
463			/* Case 2: outdev is bridge group, we need to look for
464			 * physical output device (when called from ipv4) */
465			NFA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint),
466				&tmp_uint);
467			if (entskb->nf_bridge
468			    && entskb->nf_bridge->physoutdev) {
469				tmp_uint = htonl(entskb->nf_bridge->physoutdev->ifindex);
470				NFA_PUT(skb, NFQA_IFINDEX_PHYSOUTDEV,
471					sizeof(tmp_uint), &tmp_uint);
472			}
473		}
474#endif
475	}
476
477	if (entskb->mark) {
478		tmp_uint = htonl(entskb->mark);
479		NFA_PUT(skb, NFQA_MARK, sizeof(u_int32_t), &tmp_uint);
480	}
481
482	if (indev && entskb->dev
483	    && entskb->dev->hard_header_parse) {
484		struct nfqnl_msg_packet_hw phw;
485
486		int len = entskb->dev->hard_header_parse(entskb,
487							   phw.hw_addr);
488		phw.hw_addrlen = htons(len);
489		NFA_PUT(skb, NFQA_HWADDR, sizeof(phw), &phw);
490	}
491
492	if (entskb->tstamp.tv64) {
493		struct nfqnl_msg_packet_timestamp ts;
494		struct timeval tv = ktime_to_timeval(entskb->tstamp);
495		ts.sec = cpu_to_be64(tv.tv_sec);
496		ts.usec = cpu_to_be64(tv.tv_usec);
497
498		NFA_PUT(skb, NFQA_TIMESTAMP, sizeof(ts), &ts);
499	}
500
501	if (data_len) {
502		struct nfattr *nfa;
503		int size = NFA_LENGTH(data_len);
504
505		if (skb_tailroom(skb) < (int)NFA_SPACE(data_len)) {
506			printk(KERN_WARNING "nf_queue: no tailroom!\n");
507			goto nlmsg_failure;
508		}
509
510		nfa = (struct nfattr *)skb_put(skb, NFA_ALIGN(size));
511		nfa->nfa_type = NFQA_PAYLOAD;
512		nfa->nfa_len = size;
513
514		if (skb_copy_bits(entskb, 0, NFA_DATA(nfa), data_len))
515			BUG();
516	}
517
518	nlh->nlmsg_len = skb->tail - old_tail;
519	return skb;
520
521nlmsg_failure:
522nfattr_failure:
523	if (skb)
524		kfree_skb(skb);
525	*errp = -EINVAL;
526	if (net_ratelimit())
527		printk(KERN_ERR "nf_queue: error creating packet message\n");
528	return NULL;
529}
530
531static int
532nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
533		     unsigned int queuenum, void *data)
534{
535	int status = -EINVAL;
536	struct sk_buff *nskb;
537	struct nfqnl_instance *queue;
538	struct nfqnl_queue_entry *entry;
539
540	QDEBUG("entered\n");
541
542	queue = instance_lookup_get(queuenum);
543	if (!queue) {
544		QDEBUG("no queue instance matching\n");
545		return -EINVAL;
546	}
547
548	if (queue->copy_mode == NFQNL_COPY_NONE) {
549		QDEBUG("mode COPY_NONE, aborting\n");
550		status = -EAGAIN;
551		goto err_out_put;
552	}
553
554	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
555	if (entry == NULL) {
556		if (net_ratelimit())
557			printk(KERN_ERR
558				"nf_queue: OOM in nfqnl_enqueue_packet()\n");
559		status = -ENOMEM;
560		goto err_out_put;
561	}
562
563	entry->info = info;
564	entry->skb = skb;
565	entry->id = atomic_inc_return(&queue->id_sequence);
566
567	nskb = nfqnl_build_packet_message(queue, entry, &status);
568	if (nskb == NULL)
569		goto err_out_free;
570
571	spin_lock_bh(&queue->lock);
572
573	if (!queue->peer_pid)
574		goto err_out_free_nskb;
575
576	if (queue->queue_total >= queue->queue_maxlen) {
577		queue->queue_dropped++;
578		status = -ENOSPC;
579		if (net_ratelimit())
580			  printk(KERN_WARNING "nf_queue: full at %d entries, "
581				 "dropping packets(s). Dropped: %d\n",
582				 queue->queue_total, queue->queue_dropped);
583		goto err_out_free_nskb;
584	}
585
586	/* nfnetlink_unicast will either free the nskb or add it to a socket */
587	status = nfnetlink_unicast(nskb, queue->peer_pid, MSG_DONTWAIT);
588	if (status < 0) {
589		queue->queue_user_dropped++;
590		goto err_out_unlock;
591	}
592
593	__enqueue_entry(queue, entry);
594
595	spin_unlock_bh(&queue->lock);
596	instance_put(queue);
597	return status;
598
599err_out_free_nskb:
600	kfree_skb(nskb);
601
602err_out_unlock:
603	spin_unlock_bh(&queue->lock);
604
605err_out_free:
606	kfree(entry);
607err_out_put:
608	instance_put(queue);
609	return status;
610}
611
612static int
613nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e)
614{
615	int diff;
616
617	diff = data_len - e->skb->len;
618	if (diff < 0) {
619		if (pskb_trim(e->skb, data_len))
620			return -ENOMEM;
621	} else if (diff > 0) {
622		if (data_len > 0xFFFF)
623			return -EINVAL;
624		if (diff > skb_tailroom(e->skb)) {
625			struct sk_buff *newskb;
626
627			newskb = skb_copy_expand(e->skb,
628						 skb_headroom(e->skb),
629						 diff,
630						 GFP_ATOMIC);
631			if (newskb == NULL) {
632				printk(KERN_WARNING "nf_queue: OOM "
633				      "in mangle, dropping packet\n");
634				return -ENOMEM;
635			}
636			if (e->skb->sk)
637				skb_set_owner_w(newskb, e->skb->sk);
638			kfree_skb(e->skb);
639			e->skb = newskb;
640		}
641		skb_put(e->skb, diff);
642	}
643	if (!skb_make_writable(&e->skb, data_len))
644		return -ENOMEM;
645	skb_copy_to_linear_data(e->skb, data, data_len);
646	e->skb->ip_summed = CHECKSUM_NONE;
647	return 0;
648}
649
650static inline int
651id_cmp(struct nfqnl_queue_entry *e, unsigned long id)
652{
653	return (id == e->id);
654}
655
656static int
657nfqnl_set_mode(struct nfqnl_instance *queue,
658	       unsigned char mode, unsigned int range)
659{
660	int status;
661
662	spin_lock_bh(&queue->lock);
663	status = __nfqnl_set_mode(queue, mode, range);
664	spin_unlock_bh(&queue->lock);
665
666	return status;
667}
668
669static int
670dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex)
671{
672	struct nf_info *entinf = entry->info;
673
674	if (entinf->indev)
675		if (entinf->indev->ifindex == ifindex)
676			return 1;
677	if (entinf->outdev)
678		if (entinf->outdev->ifindex == ifindex)
679			return 1;
680#ifdef CONFIG_BRIDGE_NETFILTER
681	if (entry->skb->nf_bridge) {
682		if (entry->skb->nf_bridge->physindev &&
683		    entry->skb->nf_bridge->physindev->ifindex == ifindex)
684			return 1;
685		if (entry->skb->nf_bridge->physoutdev &&
686		    entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
687			return 1;
688	}
689#endif
690	return 0;
691}
692
693/* drop all packets with either indev or outdev == ifindex from all queue
694 * instances */
695static void
696nfqnl_dev_drop(int ifindex)
697{
698	int i;
699
700	QDEBUG("entering for ifindex %u\n", ifindex);
701
702	/* this only looks like we have to hold the readlock for a way too long
703	 * time, issue_verdict(),  nf_reinject(), ... - but we always only
704	 * issue NF_DROP, which is processed directly in nf_reinject() */
705	read_lock_bh(&instances_lock);
706
707	for  (i = 0; i < INSTANCE_BUCKETS; i++) {
708		struct hlist_node *tmp;
709		struct nfqnl_instance *inst;
710		struct hlist_head *head = &instance_table[i];
711
712		hlist_for_each_entry(inst, tmp, head, hlist) {
713			struct nfqnl_queue_entry *entry;
714			while ((entry = find_dequeue_entry(inst, dev_cmp,
715							   ifindex)) != NULL)
716				issue_verdict(entry, NF_DROP);
717		}
718	}
719
720	read_unlock_bh(&instances_lock);
721}
722
723#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
724
725static int
726nfqnl_rcv_dev_event(struct notifier_block *this,
727		    unsigned long event, void *ptr)
728{
729	struct net_device *dev = ptr;
730
731	/* Drop any packets associated with the downed device */
732	if (event == NETDEV_DOWN)
733		nfqnl_dev_drop(dev->ifindex);
734	return NOTIFY_DONE;
735}
736
737static struct notifier_block nfqnl_dev_notifier = {
738	.notifier_call	= nfqnl_rcv_dev_event,
739};
740
741static int
742nfqnl_rcv_nl_event(struct notifier_block *this,
743		   unsigned long event, void *ptr)
744{
745	struct netlink_notify *n = ptr;
746
747	if (event == NETLINK_URELEASE &&
748	    n->protocol == NETLINK_NETFILTER && n->pid) {
749		int i;
750
751		/* destroy all instances for this pid */
752		write_lock_bh(&instances_lock);
753		for  (i = 0; i < INSTANCE_BUCKETS; i++) {
754			struct hlist_node *tmp, *t2;
755			struct nfqnl_instance *inst;
756			struct hlist_head *head = &instance_table[i];
757
758			hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
759				if (n->pid == inst->peer_pid)
760					__instance_destroy(inst);
761			}
762		}
763		write_unlock_bh(&instances_lock);
764	}
765	return NOTIFY_DONE;
766}
767
768static struct notifier_block nfqnl_rtnl_notifier = {
769	.notifier_call	= nfqnl_rcv_nl_event,
770};
771
772static const int nfqa_verdict_min[NFQA_MAX] = {
773	[NFQA_VERDICT_HDR-1]	= sizeof(struct nfqnl_msg_verdict_hdr),
774	[NFQA_MARK-1]		= sizeof(u_int32_t),
775	[NFQA_PAYLOAD-1]	= 0,
776};
777
778static int
779nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
780		   struct nlmsghdr *nlh, struct nfattr *nfqa[])
781{
782	struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
783	u_int16_t queue_num = ntohs(nfmsg->res_id);
784
785	struct nfqnl_msg_verdict_hdr *vhdr;
786	struct nfqnl_instance *queue;
787	unsigned int verdict;
788	struct nfqnl_queue_entry *entry;
789	int err;
790
791	if (nfattr_bad_size(nfqa, NFQA_MAX, nfqa_verdict_min)) {
792		QDEBUG("bad attribute size\n");
793		return -EINVAL;
794	}
795
796	queue = instance_lookup_get(queue_num);
797	if (!queue)
798		return -ENODEV;
799
800	if (queue->peer_pid != NETLINK_CB(skb).pid) {
801		err = -EPERM;
802		goto err_out_put;
803	}
804
805	if (!nfqa[NFQA_VERDICT_HDR-1]) {
806		err = -EINVAL;
807		goto err_out_put;
808	}
809
810	vhdr = NFA_DATA(nfqa[NFQA_VERDICT_HDR-1]);
811	verdict = ntohl(vhdr->verdict);
812
813	if ((verdict & NF_VERDICT_MASK) > NF_MAX_VERDICT) {
814		err = -EINVAL;
815		goto err_out_put;
816	}
817
818	entry = find_dequeue_entry(queue, id_cmp, ntohl(vhdr->id));
819	if (entry == NULL) {
820		err = -ENOENT;
821		goto err_out_put;
822	}
823
824	if (nfqa[NFQA_PAYLOAD-1]) {
825		if (nfqnl_mangle(NFA_DATA(nfqa[NFQA_PAYLOAD-1]),
826				 NFA_PAYLOAD(nfqa[NFQA_PAYLOAD-1]), entry) < 0)
827			verdict = NF_DROP;
828	}
829
830	if (nfqa[NFQA_MARK-1])
831		entry->skb->mark = ntohl(*(__be32 *)
832					 NFA_DATA(nfqa[NFQA_MARK-1]));
833
834	issue_verdict(entry, verdict);
835	instance_put(queue);
836	return 0;
837
838err_out_put:
839	instance_put(queue);
840	return err;
841}
842
843static int
844nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
845		  struct nlmsghdr *nlh, struct nfattr *nfqa[])
846{
847	return -ENOTSUPP;
848}
849
850static const int nfqa_cfg_min[NFQA_CFG_MAX] = {
851	[NFQA_CFG_CMD-1]	= sizeof(struct nfqnl_msg_config_cmd),
852	[NFQA_CFG_PARAMS-1]	= sizeof(struct nfqnl_msg_config_params),
853};
854
855static struct nf_queue_handler nfqh = {
856	.name 	= "nf_queue",
857	.outfn	= &nfqnl_enqueue_packet,
858};
859
860static int
861nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
862		  struct nlmsghdr *nlh, struct nfattr *nfqa[])
863{
864	struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
865	u_int16_t queue_num = ntohs(nfmsg->res_id);
866	struct nfqnl_instance *queue;
867	int ret = 0;
868
869	QDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
870
871	if (nfattr_bad_size(nfqa, NFQA_CFG_MAX, nfqa_cfg_min)) {
872		QDEBUG("bad attribute size\n");
873		return -EINVAL;
874	}
875
876	queue = instance_lookup_get(queue_num);
877	if (nfqa[NFQA_CFG_CMD-1]) {
878		struct nfqnl_msg_config_cmd *cmd;
879		cmd = NFA_DATA(nfqa[NFQA_CFG_CMD-1]);
880		QDEBUG("found CFG_CMD\n");
881
882		switch (cmd->command) {
883		case NFQNL_CFG_CMD_BIND:
884			if (queue)
885				return -EBUSY;
886
887			queue = instance_create(queue_num, NETLINK_CB(skb).pid);
888			if (!queue)
889				return -EINVAL;
890			break;
891		case NFQNL_CFG_CMD_UNBIND:
892			if (!queue)
893				return -ENODEV;
894
895			if (queue->peer_pid != NETLINK_CB(skb).pid) {
896				ret = -EPERM;
897				goto out_put;
898			}
899
900			instance_destroy(queue);
901			break;
902		case NFQNL_CFG_CMD_PF_BIND:
903			QDEBUG("registering queue handler for pf=%u\n",
904				ntohs(cmd->pf));
905			ret = nf_register_queue_handler(ntohs(cmd->pf), &nfqh);
906			break;
907		case NFQNL_CFG_CMD_PF_UNBIND:
908			QDEBUG("unregistering queue handler for pf=%u\n",
909				ntohs(cmd->pf));
910			/* This is a bug and a feature.  We can unregister
911			 * other handlers(!) */
912			ret = nf_unregister_queue_handler(ntohs(cmd->pf));
913			break;
914		default:
915			ret = -EINVAL;
916			break;
917		}
918	} else {
919		if (!queue) {
920			QDEBUG("no config command, and no instance ENOENT\n");
921			ret = -ENOENT;
922			goto out_put;
923		}
924
925		if (queue->peer_pid != NETLINK_CB(skb).pid) {
926			QDEBUG("no config command, and wrong pid\n");
927			ret = -EPERM;
928			goto out_put;
929		}
930	}
931
932	if (nfqa[NFQA_CFG_PARAMS-1]) {
933		struct nfqnl_msg_config_params *params;
934
935		if (!queue) {
936			ret = -ENOENT;
937			goto out_put;
938		}
939		params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
940		nfqnl_set_mode(queue, params->copy_mode,
941				ntohl(params->copy_range));
942	}
943
944	if (nfqa[NFQA_CFG_QUEUE_MAXLEN-1]) {
945		__be32 *queue_maxlen;
946		queue_maxlen = NFA_DATA(nfqa[NFQA_CFG_QUEUE_MAXLEN-1]);
947		spin_lock_bh(&queue->lock);
948		queue->queue_maxlen = ntohl(*queue_maxlen);
949		spin_unlock_bh(&queue->lock);
950	}
951
952out_put:
953	instance_put(queue);
954	return ret;
955}
956
957static struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
958	[NFQNL_MSG_PACKET]	= { .call = nfqnl_recv_unsupp,
959				    .attr_count = NFQA_MAX, },
960	[NFQNL_MSG_VERDICT]	= { .call = nfqnl_recv_verdict,
961				    .attr_count = NFQA_MAX, },
962	[NFQNL_MSG_CONFIG]	= { .call = nfqnl_recv_config,
963				    .attr_count = NFQA_CFG_MAX, },
964};
965
966static struct nfnetlink_subsystem nfqnl_subsys = {
967	.name		= "nf_queue",
968	.subsys_id	= NFNL_SUBSYS_QUEUE,
969	.cb_count	= NFQNL_MSG_MAX,
970	.cb		= nfqnl_cb,
971};
972
973#ifdef CONFIG_PROC_FS
974struct iter_state {
975	unsigned int bucket;
976};
977
978static struct hlist_node *get_first(struct seq_file *seq)
979{
980	struct iter_state *st = seq->private;
981
982	if (!st)
983		return NULL;
984
985	for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
986		if (!hlist_empty(&instance_table[st->bucket]))
987			return instance_table[st->bucket].first;
988	}
989	return NULL;
990}
991
992static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
993{
994	struct iter_state *st = seq->private;
995
996	h = h->next;
997	while (!h) {
998		if (++st->bucket >= INSTANCE_BUCKETS)
999			return NULL;
1000
1001		h = instance_table[st->bucket].first;
1002	}
1003	return h;
1004}
1005
1006static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1007{
1008	struct hlist_node *head;
1009	head = get_first(seq);
1010
1011	if (head)
1012		while (pos && (head = get_next(seq, head)))
1013			pos--;
1014	return pos ? NULL : head;
1015}
1016
1017static void *seq_start(struct seq_file *seq, loff_t *pos)
1018{
1019	read_lock_bh(&instances_lock);
1020	return get_idx(seq, *pos);
1021}
1022
1023static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1024{
1025	(*pos)++;
1026	return get_next(s, v);
1027}
1028
1029static void seq_stop(struct seq_file *s, void *v)
1030{
1031	read_unlock_bh(&instances_lock);
1032}
1033
1034static int seq_show(struct seq_file *s, void *v)
1035{
1036	const struct nfqnl_instance *inst = v;
1037
1038	return seq_printf(s, "%5d %6d %5d %1d %5d %5d %5d %8d %2d\n",
1039			  inst->queue_num,
1040			  inst->peer_pid, inst->queue_total,
1041			  inst->copy_mode, inst->copy_range,
1042			  inst->queue_dropped, inst->queue_user_dropped,
1043			  atomic_read(&inst->id_sequence),
1044			  atomic_read(&inst->use));
1045}
1046
1047static struct seq_operations nfqnl_seq_ops = {
1048	.start	= seq_start,
1049	.next	= seq_next,
1050	.stop	= seq_stop,
1051	.show	= seq_show,
1052};
1053
1054static int nfqnl_open(struct inode *inode, struct file *file)
1055{
1056	struct seq_file *seq;
1057	struct iter_state *is;
1058	int ret;
1059
1060	is = kzalloc(sizeof(*is), GFP_KERNEL);
1061	if (!is)
1062		return -ENOMEM;
1063	ret = seq_open(file, &nfqnl_seq_ops);
1064	if (ret < 0)
1065		goto out_free;
1066	seq = file->private_data;
1067	seq->private = is;
1068	return ret;
1069out_free:
1070	kfree(is);
1071	return ret;
1072}
1073
1074static const struct file_operations nfqnl_file_ops = {
1075	.owner	 = THIS_MODULE,
1076	.open	 = nfqnl_open,
1077	.read	 = seq_read,
1078	.llseek	 = seq_lseek,
1079	.release = seq_release_private,
1080};
1081
1082#endif /* PROC_FS */
1083
1084static int __init nfnetlink_queue_init(void)
1085{
1086	int i, status = -ENOMEM;
1087#ifdef CONFIG_PROC_FS
1088	struct proc_dir_entry *proc_nfqueue;
1089#endif
1090
1091	for (i = 0; i < INSTANCE_BUCKETS; i++)
1092		INIT_HLIST_HEAD(&instance_table[i]);
1093
1094	netlink_register_notifier(&nfqnl_rtnl_notifier);
1095	status = nfnetlink_subsys_register(&nfqnl_subsys);
1096	if (status < 0) {
1097		printk(KERN_ERR "nf_queue: failed to create netlink socket\n");
1098		goto cleanup_netlink_notifier;
1099	}
1100
1101#ifdef CONFIG_PROC_FS
1102	proc_nfqueue = create_proc_entry("nfnetlink_queue", 0440,
1103					 proc_net_netfilter);
1104	if (!proc_nfqueue)
1105		goto cleanup_subsys;
1106	proc_nfqueue->proc_fops = &nfqnl_file_ops;
1107#endif
1108
1109	register_netdevice_notifier(&nfqnl_dev_notifier);
1110	return status;
1111
1112#ifdef CONFIG_PROC_FS
1113cleanup_subsys:
1114	nfnetlink_subsys_unregister(&nfqnl_subsys);
1115#endif
1116cleanup_netlink_notifier:
1117	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1118	return status;
1119}
1120
1121static void __exit nfnetlink_queue_fini(void)
1122{
1123	nf_unregister_queue_handlers(&nfqh);
1124	unregister_netdevice_notifier(&nfqnl_dev_notifier);
1125#ifdef CONFIG_PROC_FS
1126	remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1127#endif
1128	nfnetlink_subsys_unregister(&nfqnl_subsys);
1129	netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1130}
1131
1132MODULE_DESCRIPTION("netfilter packet queue handler");
1133MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1134MODULE_LICENSE("GPL");
1135MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1136
1137module_init(nfnetlink_queue_init);
1138module_exit(nfnetlink_queue_fini);
1139