• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/net/netfilter/
1/*
2 *	xt_hashlimit - Netfilter module to limit the number of packets per time
3 *	separately for each hashbucket (sourceip/sourceport/dstip/dstport)
4 *
5 *	(C) 2003-2004 by Harald Welte <laforge@netfilter.org>
6 *	Copyright �� CC Computer Consultants GmbH, 2007 - 2008
7 *
8 * Development of this code was funded by Astaro AG, http://www.astaro.com/
9 */
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11#include <linux/module.h>
12#include <linux/spinlock.h>
13#include <linux/random.h>
14#include <linux/jhash.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/proc_fs.h>
18#include <linux/seq_file.h>
19#include <linux/list.h>
20#include <linux/skbuff.h>
21#include <linux/mm.h>
22#include <linux/in.h>
23#include <linux/ip.h>
24#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
25#include <linux/ipv6.h>
26#include <net/ipv6.h>
27#endif
28
29#include <net/net_namespace.h>
30#include <net/netns/generic.h>
31
32#include <linux/netfilter/x_tables.h>
33#include <linux/netfilter_ipv4/ip_tables.h>
34#include <linux/netfilter_ipv6/ip6_tables.h>
35#include <linux/netfilter/xt_hashlimit.h>
36#include <linux/mutex.h>
37
38MODULE_LICENSE("GPL");
39MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
40MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
41MODULE_DESCRIPTION("Xtables: per hash-bucket rate-limit match");
42MODULE_ALIAS("ipt_hashlimit");
43MODULE_ALIAS("ip6t_hashlimit");
44
45struct hashlimit_net {
46	struct hlist_head	htables;
47	struct proc_dir_entry	*ipt_hashlimit;
48	struct proc_dir_entry	*ip6t_hashlimit;
49};
50
51static int hashlimit_net_id;
52static inline struct hashlimit_net *hashlimit_pernet(struct net *net)
53{
54	return net_generic(net, hashlimit_net_id);
55}
56
57/* need to declare this at the top */
58static const struct file_operations dl_file_ops;
59
60/* hash table crap */
61struct dsthash_dst {
62	union {
63		struct {
64			__be32 src;
65			__be32 dst;
66		} ip;
67#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
68		struct {
69			__be32 src[4];
70			__be32 dst[4];
71		} ip6;
72#endif
73	};
74	__be16 src_port;
75	__be16 dst_port;
76};
77
78struct dsthash_ent {
79	/* static / read-only parts in the beginning */
80	struct hlist_node node;
81	struct dsthash_dst dst;
82
83	/* modified structure members in the end */
84	spinlock_t lock;
85	unsigned long expires;		/* precalculated expiry time */
86	struct {
87		unsigned long prev;	/* last modification */
88		u_int32_t credit;
89		u_int32_t credit_cap, cost;
90	} rateinfo;
91	struct rcu_head rcu;
92};
93
94struct xt_hashlimit_htable {
95	struct hlist_node node;		/* global list of all htables */
96	int use;
97	u_int8_t family;
98	bool rnd_initialized;
99
100	struct hashlimit_cfg1 cfg;	/* config */
101
102	/* used internally */
103	spinlock_t lock;		/* lock for list_head */
104	u_int32_t rnd;			/* random seed for hash */
105	unsigned int count;		/* number entries in table */
106	struct timer_list timer;	/* timer for gc */
107
108	/* seq_file stuff */
109	struct proc_dir_entry *pde;
110	struct net *net;
111
112	struct hlist_head hash[0];	/* hashtable itself */
113};
114
115static DEFINE_MUTEX(hashlimit_mutex);	/* protects htables list */
116static struct kmem_cache *hashlimit_cachep __read_mostly;
117
118static inline bool dst_cmp(const struct dsthash_ent *ent,
119			   const struct dsthash_dst *b)
120{
121	return !memcmp(&ent->dst, b, sizeof(ent->dst));
122}
123
124static u_int32_t
125hash_dst(const struct xt_hashlimit_htable *ht, const struct dsthash_dst *dst)
126{
127	u_int32_t hash = jhash2((const u32 *)dst,
128				sizeof(*dst)/sizeof(u32),
129				ht->rnd);
130	/*
131	 * Instead of returning hash % ht->cfg.size (implying a divide)
132	 * we return the high 32 bits of the (hash * ht->cfg.size) that will
133	 * give results between [0 and cfg.size-1] and same hash distribution,
134	 * but using a multiply, less expensive than a divide
135	 */
136	return ((u64)hash * ht->cfg.size) >> 32;
137}
138
139static struct dsthash_ent *
140dsthash_find(const struct xt_hashlimit_htable *ht,
141	     const struct dsthash_dst *dst)
142{
143	struct dsthash_ent *ent;
144	struct hlist_node *pos;
145	u_int32_t hash = hash_dst(ht, dst);
146
147	if (!hlist_empty(&ht->hash[hash])) {
148		hlist_for_each_entry_rcu(ent, pos, &ht->hash[hash], node)
149			if (dst_cmp(ent, dst)) {
150				spin_lock(&ent->lock);
151				return ent;
152			}
153	}
154	return NULL;
155}
156
157/* allocate dsthash_ent, initialize dst, put in htable and lock it */
158static struct dsthash_ent *
159dsthash_alloc_init(struct xt_hashlimit_htable *ht,
160		   const struct dsthash_dst *dst)
161{
162	struct dsthash_ent *ent;
163
164	spin_lock(&ht->lock);
165	/* initialize hash with random val at the time we allocate
166	 * the first hashtable entry */
167	if (unlikely(!ht->rnd_initialized)) {
168		get_random_bytes(&ht->rnd, sizeof(ht->rnd));
169		ht->rnd_initialized = true;
170	}
171
172	if (ht->cfg.max && ht->count >= ht->cfg.max) {
173		if (net_ratelimit())
174			pr_err("max count of %u reached\n", ht->cfg.max);
175		ent = NULL;
176	} else
177		ent = kmem_cache_alloc(hashlimit_cachep, GFP_ATOMIC);
178	if (!ent) {
179		if (net_ratelimit())
180			pr_err("cannot allocate dsthash_ent\n");
181	} else {
182		memcpy(&ent->dst, dst, sizeof(ent->dst));
183		spin_lock_init(&ent->lock);
184
185		spin_lock(&ent->lock);
186		hlist_add_head_rcu(&ent->node, &ht->hash[hash_dst(ht, dst)]);
187		ht->count++;
188	}
189	spin_unlock(&ht->lock);
190	return ent;
191}
192
193static void dsthash_free_rcu(struct rcu_head *head)
194{
195	struct dsthash_ent *ent = container_of(head, struct dsthash_ent, rcu);
196
197	kmem_cache_free(hashlimit_cachep, ent);
198}
199
200static inline void
201dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
202{
203	hlist_del_rcu(&ent->node);
204	call_rcu_bh(&ent->rcu, dsthash_free_rcu);
205	ht->count--;
206}
207static void htable_gc(unsigned long htlong);
208
209static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
210			 u_int8_t family)
211{
212	struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
213	struct xt_hashlimit_htable *hinfo;
214	unsigned int size;
215	unsigned int i;
216
217	if (minfo->cfg.size) {
218		size = minfo->cfg.size;
219	} else {
220		size = (totalram_pages << PAGE_SHIFT) / 16384 /
221		       sizeof(struct list_head);
222		if (totalram_pages > 1024 * 1024 * 1024 / PAGE_SIZE)
223			size = 8192;
224		if (size < 16)
225			size = 16;
226	}
227	hinfo = vmalloc(sizeof(struct xt_hashlimit_htable) +
228	                sizeof(struct list_head) * size);
229	if (hinfo == NULL)
230		return -ENOMEM;
231	minfo->hinfo = hinfo;
232
233	/* copy match config into hashtable config */
234	memcpy(&hinfo->cfg, &minfo->cfg, sizeof(hinfo->cfg));
235	hinfo->cfg.size = size;
236	if (hinfo->cfg.max == 0)
237		hinfo->cfg.max = 8 * hinfo->cfg.size;
238	else if (hinfo->cfg.max < hinfo->cfg.size)
239		hinfo->cfg.max = hinfo->cfg.size;
240
241	for (i = 0; i < hinfo->cfg.size; i++)
242		INIT_HLIST_HEAD(&hinfo->hash[i]);
243
244	hinfo->use = 1;
245	hinfo->count = 0;
246	hinfo->family = family;
247	hinfo->rnd_initialized = false;
248	spin_lock_init(&hinfo->lock);
249
250	hinfo->pde = proc_create_data(minfo->name, 0,
251		(family == NFPROTO_IPV4) ?
252		hashlimit_net->ipt_hashlimit : hashlimit_net->ip6t_hashlimit,
253		&dl_file_ops, hinfo);
254	if (hinfo->pde == NULL) {
255		vfree(hinfo);
256		return -ENOMEM;
257	}
258	hinfo->net = net;
259
260	setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
261	hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
262	add_timer(&hinfo->timer);
263
264	hlist_add_head(&hinfo->node, &hashlimit_net->htables);
265
266	return 0;
267}
268
269static bool select_all(const struct xt_hashlimit_htable *ht,
270		       const struct dsthash_ent *he)
271{
272	return 1;
273}
274
275static bool select_gc(const struct xt_hashlimit_htable *ht,
276		      const struct dsthash_ent *he)
277{
278	return time_after_eq(jiffies, he->expires);
279}
280
281static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
282			bool (*select)(const struct xt_hashlimit_htable *ht,
283				      const struct dsthash_ent *he))
284{
285	unsigned int i;
286
287	/* lock hash table and iterate over it */
288	spin_lock_bh(&ht->lock);
289	for (i = 0; i < ht->cfg.size; i++) {
290		struct dsthash_ent *dh;
291		struct hlist_node *pos, *n;
292		hlist_for_each_entry_safe(dh, pos, n, &ht->hash[i], node) {
293			if ((*select)(ht, dh))
294				dsthash_free(ht, dh);
295		}
296	}
297	spin_unlock_bh(&ht->lock);
298}
299
300/* hash table garbage collector, run by timer */
301static void htable_gc(unsigned long htlong)
302{
303	struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
304
305	htable_selective_cleanup(ht, select_gc);
306
307	/* re-add the timer accordingly */
308	ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
309	add_timer(&ht->timer);
310}
311
312static void htable_destroy(struct xt_hashlimit_htable *hinfo)
313{
314	struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
315	struct proc_dir_entry *parent;
316
317	del_timer_sync(&hinfo->timer);
318
319	if (hinfo->family == NFPROTO_IPV4)
320		parent = hashlimit_net->ipt_hashlimit;
321	else
322		parent = hashlimit_net->ip6t_hashlimit;
323	remove_proc_entry(hinfo->pde->name, parent);
324	htable_selective_cleanup(hinfo, select_all);
325	vfree(hinfo);
326}
327
328static struct xt_hashlimit_htable *htable_find_get(struct net *net,
329						   const char *name,
330						   u_int8_t family)
331{
332	struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
333	struct xt_hashlimit_htable *hinfo;
334	struct hlist_node *pos;
335
336	hlist_for_each_entry(hinfo, pos, &hashlimit_net->htables, node) {
337		if (!strcmp(name, hinfo->pde->name) &&
338		    hinfo->family == family) {
339			hinfo->use++;
340			return hinfo;
341		}
342	}
343	return NULL;
344}
345
346static void htable_put(struct xt_hashlimit_htable *hinfo)
347{
348	mutex_lock(&hashlimit_mutex);
349	if (--hinfo->use == 0) {
350		hlist_del(&hinfo->node);
351		htable_destroy(hinfo);
352	}
353	mutex_unlock(&hashlimit_mutex);
354}
355
356/* The algorithm used is the Simple Token Bucket Filter (TBF)
357 * see net/sched/sch_tbf.c in the linux source tree
358 */
359
360/* Rusty: This is my (non-mathematically-inclined) understanding of
361   this algorithm.  The `average rate' in jiffies becomes your initial
362   amount of credit `credit' and the most credit you can ever have
363   `credit_cap'.  The `peak rate' becomes the cost of passing the
364   test, `cost'.
365
366   `prev' tracks the last packet hit: you gain one credit per jiffy.
367   If you get credit balance more than this, the extra credit is
368   discarded.  Every time the match passes, you lose `cost' credits;
369   if you don't have that many, the test fails.
370
371   See Alexey's formal explanation in net/sched/sch_tbf.c.
372
373   To get the maximum range, we multiply by this factor (ie. you get N
374   credits per jiffy).  We want to allow a rate as low as 1 per day
375   (slowest userspace tool allows), which means
376   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32 ie.
377*/
378#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
379
380/* Repeated shift and or gives us all 1s, final shift and add 1 gives
381 * us the power of 2 below the theoretical max, so GCC simply does a
382 * shift. */
383#define _POW2_BELOW2(x) ((x)|((x)>>1))
384#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
385#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
386#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
387#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
388#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
389
390#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
391
392/* Precision saver. */
393static inline u_int32_t
394user2credits(u_int32_t user)
395{
396	/* If multiplying would overflow... */
397	if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
398		/* Divide first. */
399		return (user / XT_HASHLIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
400
401	return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE;
402}
403
404static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now)
405{
406	dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY;
407	if (dh->rateinfo.credit > dh->rateinfo.credit_cap)
408		dh->rateinfo.credit = dh->rateinfo.credit_cap;
409	dh->rateinfo.prev = now;
410}
411
412static inline __be32 maskl(__be32 a, unsigned int l)
413{
414	return l ? htonl(ntohl(a) & ~0 << (32 - l)) : 0;
415}
416
417#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
418static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
419{
420	switch (p) {
421	case 0 ... 31:
422		i[0] = maskl(i[0], p);
423		i[1] = i[2] = i[3] = 0;
424		break;
425	case 32 ... 63:
426		i[1] = maskl(i[1], p - 32);
427		i[2] = i[3] = 0;
428		break;
429	case 64 ... 95:
430		i[2] = maskl(i[2], p - 64);
431		i[3] = 0;
432		break;
433	case 96 ... 127:
434		i[3] = maskl(i[3], p - 96);
435		break;
436	case 128:
437		break;
438	}
439}
440#endif
441
442static int
443hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
444		   struct dsthash_dst *dst,
445		   const struct sk_buff *skb, unsigned int protoff)
446{
447	__be16 _ports[2], *ports;
448	u8 nexthdr;
449
450	memset(dst, 0, sizeof(*dst));
451
452	switch (hinfo->family) {
453	case NFPROTO_IPV4:
454		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP)
455			dst->ip.dst = maskl(ip_hdr(skb)->daddr,
456			              hinfo->cfg.dstmask);
457		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP)
458			dst->ip.src = maskl(ip_hdr(skb)->saddr,
459			              hinfo->cfg.srcmask);
460
461		if (!(hinfo->cfg.mode &
462		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
463			return 0;
464		nexthdr = ip_hdr(skb)->protocol;
465		break;
466#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
467	case NFPROTO_IPV6:
468		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
469			memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
470			       sizeof(dst->ip6.dst));
471			hashlimit_ipv6_mask(dst->ip6.dst, hinfo->cfg.dstmask);
472		}
473		if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SIP) {
474			memcpy(&dst->ip6.src, &ipv6_hdr(skb)->saddr,
475			       sizeof(dst->ip6.src));
476			hashlimit_ipv6_mask(dst->ip6.src, hinfo->cfg.srcmask);
477		}
478
479		if (!(hinfo->cfg.mode &
480		      (XT_HASHLIMIT_HASH_DPT | XT_HASHLIMIT_HASH_SPT)))
481			return 0;
482		nexthdr = ipv6_hdr(skb)->nexthdr;
483		protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr);
484		if ((int)protoff < 0)
485			return -1;
486		break;
487#endif
488	default:
489		BUG();
490		return 0;
491	}
492
493	switch (nexthdr) {
494	case IPPROTO_TCP:
495	case IPPROTO_UDP:
496	case IPPROTO_UDPLITE:
497	case IPPROTO_SCTP:
498	case IPPROTO_DCCP:
499		ports = skb_header_pointer(skb, protoff, sizeof(_ports),
500					   &_ports);
501		break;
502	default:
503		_ports[0] = _ports[1] = 0;
504		ports = _ports;
505		break;
506	}
507	if (!ports)
508		return -1;
509	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_SPT)
510		dst->src_port = ports[0];
511	if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DPT)
512		dst->dst_port = ports[1];
513	return 0;
514}
515
516static bool
517hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
518{
519	const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
520	struct xt_hashlimit_htable *hinfo = info->hinfo;
521	unsigned long now = jiffies;
522	struct dsthash_ent *dh;
523	struct dsthash_dst dst;
524
525	if (hashlimit_init_dst(hinfo, &dst, skb, par->thoff) < 0)
526		goto hotdrop;
527
528	rcu_read_lock_bh();
529	dh = dsthash_find(hinfo, &dst);
530	if (dh == NULL) {
531		dh = dsthash_alloc_init(hinfo, &dst);
532		if (dh == NULL) {
533			rcu_read_unlock_bh();
534			goto hotdrop;
535		}
536		dh->expires = jiffies + msecs_to_jiffies(hinfo->cfg.expire);
537		dh->rateinfo.prev = jiffies;
538		dh->rateinfo.credit = user2credits(hinfo->cfg.avg *
539		                      hinfo->cfg.burst);
540		dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg *
541		                          hinfo->cfg.burst);
542		dh->rateinfo.cost = user2credits(hinfo->cfg.avg);
543	} else {
544		/* update expiration timeout */
545		dh->expires = now + msecs_to_jiffies(hinfo->cfg.expire);
546		rateinfo_recalc(dh, now);
547	}
548
549	if (dh->rateinfo.credit >= dh->rateinfo.cost) {
550		/* below the limit */
551		dh->rateinfo.credit -= dh->rateinfo.cost;
552		spin_unlock(&dh->lock);
553		rcu_read_unlock_bh();
554		return !(info->cfg.mode & XT_HASHLIMIT_INVERT);
555	}
556
557	spin_unlock(&dh->lock);
558	rcu_read_unlock_bh();
559	/* default match is underlimit - so over the limit, we need to invert */
560	return info->cfg.mode & XT_HASHLIMIT_INVERT;
561
562 hotdrop:
563	par->hotdrop = true;
564	return false;
565}
566
567static int hashlimit_mt_check(const struct xt_mtchk_param *par)
568{
569	struct net *net = par->net;
570	struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
571	int ret;
572
573	/* Check for overflow. */
574	if (info->cfg.burst == 0 ||
575	    user2credits(info->cfg.avg * info->cfg.burst) <
576	    user2credits(info->cfg.avg)) {
577		pr_info("overflow, try lower: %u/%u\n",
578			info->cfg.avg, info->cfg.burst);
579		return -ERANGE;
580	}
581	if (info->cfg.gc_interval == 0 || info->cfg.expire == 0)
582		return -EINVAL;
583	if (info->name[sizeof(info->name)-1] != '\0')
584		return -EINVAL;
585	if (par->family == NFPROTO_IPV4) {
586		if (info->cfg.srcmask > 32 || info->cfg.dstmask > 32)
587			return -EINVAL;
588	} else {
589		if (info->cfg.srcmask > 128 || info->cfg.dstmask > 128)
590			return -EINVAL;
591	}
592
593	mutex_lock(&hashlimit_mutex);
594	info->hinfo = htable_find_get(net, info->name, par->family);
595	if (info->hinfo == NULL) {
596		ret = htable_create(net, info, par->family);
597		if (ret < 0) {
598			mutex_unlock(&hashlimit_mutex);
599			return ret;
600		}
601	}
602	mutex_unlock(&hashlimit_mutex);
603	return 0;
604}
605
606static void hashlimit_mt_destroy(const struct xt_mtdtor_param *par)
607{
608	const struct xt_hashlimit_mtinfo1 *info = par->matchinfo;
609
610	htable_put(info->hinfo);
611}
612
613static struct xt_match hashlimit_mt_reg[] __read_mostly = {
614	{
615		.name           = "hashlimit",
616		.revision       = 1,
617		.family         = NFPROTO_IPV4,
618		.match          = hashlimit_mt,
619		.matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
620		.checkentry     = hashlimit_mt_check,
621		.destroy        = hashlimit_mt_destroy,
622		.me             = THIS_MODULE,
623	},
624#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
625	{
626		.name           = "hashlimit",
627		.revision       = 1,
628		.family         = NFPROTO_IPV6,
629		.match          = hashlimit_mt,
630		.matchsize      = sizeof(struct xt_hashlimit_mtinfo1),
631		.checkentry     = hashlimit_mt_check,
632		.destroy        = hashlimit_mt_destroy,
633		.me             = THIS_MODULE,
634	},
635#endif
636};
637
638/* PROC stuff */
639static void *dl_seq_start(struct seq_file *s, loff_t *pos)
640	__acquires(htable->lock)
641{
642	struct xt_hashlimit_htable *htable = s->private;
643	unsigned int *bucket;
644
645	spin_lock_bh(&htable->lock);
646	if (*pos >= htable->cfg.size)
647		return NULL;
648
649	bucket = kmalloc(sizeof(unsigned int), GFP_ATOMIC);
650	if (!bucket)
651		return ERR_PTR(-ENOMEM);
652
653	*bucket = *pos;
654	return bucket;
655}
656
657static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
658{
659	struct xt_hashlimit_htable *htable = s->private;
660	unsigned int *bucket = (unsigned int *)v;
661
662	*pos = ++(*bucket);
663	if (*pos >= htable->cfg.size) {
664		kfree(v);
665		return NULL;
666	}
667	return bucket;
668}
669
670static void dl_seq_stop(struct seq_file *s, void *v)
671	__releases(htable->lock)
672{
673	struct xt_hashlimit_htable *htable = s->private;
674	unsigned int *bucket = (unsigned int *)v;
675
676	if (!IS_ERR(bucket))
677		kfree(bucket);
678	spin_unlock_bh(&htable->lock);
679}
680
681static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
682				   struct seq_file *s)
683{
684	int res;
685
686	spin_lock(&ent->lock);
687	/* recalculate to show accurate numbers */
688	rateinfo_recalc(ent, jiffies);
689
690	switch (family) {
691	case NFPROTO_IPV4:
692		res = seq_printf(s, "%ld %pI4:%u->%pI4:%u %u %u %u\n",
693				 (long)(ent->expires - jiffies)/HZ,
694				 &ent->dst.ip.src,
695				 ntohs(ent->dst.src_port),
696				 &ent->dst.ip.dst,
697				 ntohs(ent->dst.dst_port),
698				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
699				 ent->rateinfo.cost);
700		break;
701#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
702	case NFPROTO_IPV6:
703		res = seq_printf(s, "%ld %pI6:%u->%pI6:%u %u %u %u\n",
704				 (long)(ent->expires - jiffies)/HZ,
705				 &ent->dst.ip6.src,
706				 ntohs(ent->dst.src_port),
707				 &ent->dst.ip6.dst,
708				 ntohs(ent->dst.dst_port),
709				 ent->rateinfo.credit, ent->rateinfo.credit_cap,
710				 ent->rateinfo.cost);
711		break;
712#endif
713	default:
714		BUG();
715		res = 0;
716	}
717	spin_unlock(&ent->lock);
718	return res;
719}
720
721static int dl_seq_show(struct seq_file *s, void *v)
722{
723	struct xt_hashlimit_htable *htable = s->private;
724	unsigned int *bucket = (unsigned int *)v;
725	struct dsthash_ent *ent;
726	struct hlist_node *pos;
727
728	if (!hlist_empty(&htable->hash[*bucket])) {
729		hlist_for_each_entry(ent, pos, &htable->hash[*bucket], node)
730			if (dl_seq_real_show(ent, htable->family, s))
731				return -1;
732	}
733	return 0;
734}
735
736static const struct seq_operations dl_seq_ops = {
737	.start = dl_seq_start,
738	.next  = dl_seq_next,
739	.stop  = dl_seq_stop,
740	.show  = dl_seq_show
741};
742
743static int dl_proc_open(struct inode *inode, struct file *file)
744{
745	int ret = seq_open(file, &dl_seq_ops);
746
747	if (!ret) {
748		struct seq_file *sf = file->private_data;
749		sf->private = PDE(inode)->data;
750	}
751	return ret;
752}
753
754static const struct file_operations dl_file_ops = {
755	.owner   = THIS_MODULE,
756	.open    = dl_proc_open,
757	.read    = seq_read,
758	.llseek  = seq_lseek,
759	.release = seq_release
760};
761
762static int __net_init hashlimit_proc_net_init(struct net *net)
763{
764	struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
765
766	hashlimit_net->ipt_hashlimit = proc_mkdir("ipt_hashlimit", net->proc_net);
767	if (!hashlimit_net->ipt_hashlimit)
768		return -ENOMEM;
769#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
770	hashlimit_net->ip6t_hashlimit = proc_mkdir("ip6t_hashlimit", net->proc_net);
771	if (!hashlimit_net->ip6t_hashlimit) {
772		proc_net_remove(net, "ipt_hashlimit");
773		return -ENOMEM;
774	}
775#endif
776	return 0;
777}
778
779static void __net_exit hashlimit_proc_net_exit(struct net *net)
780{
781	proc_net_remove(net, "ipt_hashlimit");
782#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
783	proc_net_remove(net, "ip6t_hashlimit");
784#endif
785}
786
787static int __net_init hashlimit_net_init(struct net *net)
788{
789	struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
790
791	INIT_HLIST_HEAD(&hashlimit_net->htables);
792	return hashlimit_proc_net_init(net);
793}
794
795static void __net_exit hashlimit_net_exit(struct net *net)
796{
797	struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
798
799	BUG_ON(!hlist_empty(&hashlimit_net->htables));
800	hashlimit_proc_net_exit(net);
801}
802
803static struct pernet_operations hashlimit_net_ops = {
804	.init	= hashlimit_net_init,
805	.exit	= hashlimit_net_exit,
806	.id	= &hashlimit_net_id,
807	.size	= sizeof(struct hashlimit_net),
808};
809
810static int __init hashlimit_mt_init(void)
811{
812	int err;
813
814	err = register_pernet_subsys(&hashlimit_net_ops);
815	if (err < 0)
816		return err;
817	err = xt_register_matches(hashlimit_mt_reg,
818	      ARRAY_SIZE(hashlimit_mt_reg));
819	if (err < 0)
820		goto err1;
821
822	err = -ENOMEM;
823	hashlimit_cachep = kmem_cache_create("xt_hashlimit",
824					    sizeof(struct dsthash_ent), 0, 0,
825					    NULL);
826	if (!hashlimit_cachep) {
827		pr_warning("unable to create slab cache\n");
828		goto err2;
829	}
830	return 0;
831
832err2:
833	xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
834err1:
835	unregister_pernet_subsys(&hashlimit_net_ops);
836	return err;
837
838}
839
840static void __exit hashlimit_mt_exit(void)
841{
842	xt_unregister_matches(hashlimit_mt_reg, ARRAY_SIZE(hashlimit_mt_reg));
843	unregister_pernet_subsys(&hashlimit_net_ops);
844
845	rcu_barrier_bh();
846	kmem_cache_destroy(hashlimit_cachep);
847}
848
849module_init(hashlimit_mt_init);
850module_exit(hashlimit_mt_exit);
851