1/*
2 * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 *	The filters are packed to hash tables of key nodes
12 *	with a set of 32bit key/mask pairs at every node.
13 *	Nodes reference next level hash tables etc.
14 *
15 *	This scheme is the best universal classifier I managed to
16 *	invent; it is not super-fast, but it is not slow (provided you
17 *	program it correctly), and general enough.  And its relative
18 *	speed grows as the number of rules becomes larger.
19 *
20 *	It seems that it represents the best middle point between
21 *	speed and manageability both by human and by machine.
22 *
23 *	It is especially useful for link sharing combined with QoS;
24 *	pure RSVP doesn't need such a general approach and can use
25 *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28 *	eventually when the meta match extension is made available
29 *
30 *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33#include <linux/module.h>
34#include <linux/slab.h>
35#include <linux/types.h>
36#include <linux/kernel.h>
37#include <linux/string.h>
38#include <linux/errno.h>
39#include <linux/rtnetlink.h>
40#include <linux/skbuff.h>
41#include <net/netlink.h>
42#include <net/act_api.h>
43#include <net/pkt_cls.h>
44
45struct tc_u_knode
46{
47	struct tc_u_knode	*next;
48	u32			handle;
49	struct tc_u_hnode	*ht_up;
50	struct tcf_exts		exts;
51#ifdef CONFIG_NET_CLS_IND
52	char                     indev[IFNAMSIZ];
53#endif
54	u8			fshift;
55	struct tcf_result	res;
56	struct tc_u_hnode	*ht_down;
57#ifdef CONFIG_CLS_U32_PERF
58	struct tc_u32_pcnt	*pf;
59#endif
60#ifdef CONFIG_CLS_U32_MARK
61	struct tc_u32_mark	mark;
62#endif
63	struct tc_u32_sel	sel;
64};
65
66struct tc_u_hnode
67{
68	struct tc_u_hnode	*next;
69	u32			handle;
70	u32			prio;
71	struct tc_u_common	*tp_c;
72	int			refcnt;
73	unsigned		divisor;
74	struct tc_u_knode	*ht[1];
75};
76
77struct tc_u_common
78{
79	struct tc_u_hnode	*hlist;
80	struct Qdisc		*q;
81	int			refcnt;
82	u32			hgenerator;
83};
84
85static const struct tcf_ext_map u32_ext_map = {
86	.action = TCA_U32_ACT,
87	.police = TCA_U32_POLICE
88};
89
90static __inline__ unsigned u32_hash_fold(__be32 key, struct tc_u32_sel *sel, u8 fshift)
91{
92	unsigned h = ntohl(key & sel->hmask)>>fshift;
93
94	return h;
95}
96
97static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
98{
99	struct {
100		struct tc_u_knode *knode;
101		unsigned int	  off;
102	} stack[TC_U32_MAXDEPTH];
103
104	struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
105	unsigned int off = skb_network_offset(skb);
106	struct tc_u_knode *n;
107	int sdepth = 0;
108	int off2 = 0;
109	int sel = 0;
110#ifdef CONFIG_CLS_U32_PERF
111	int j;
112#endif
113	int i, r;
114
115next_ht:
116	n = ht->ht[sel];
117
118next_knode:
119	if (n) {
120		struct tc_u32_key *key = n->sel.keys;
121
122#ifdef CONFIG_CLS_U32_PERF
123		n->pf->rcnt +=1;
124		j = 0;
125#endif
126
127#ifdef CONFIG_CLS_U32_MARK
128		if ((skb->mark & n->mark.mask) != n->mark.val) {
129			n = n->next;
130			goto next_knode;
131		} else {
132			n->mark.success++;
133		}
134#endif
135
136		for (i = n->sel.nkeys; i>0; i--, key++) {
137			int toff = off + key->off + (off2 & key->offmask);
138			__be32 *data, _data;
139
140			if (skb_headroom(skb) + toff > INT_MAX)
141				goto out;
142
143			data = skb_header_pointer(skb, toff, 4, &_data);
144			if (!data)
145				goto out;
146			if ((*data ^ key->val) & key->mask) {
147				n = n->next;
148				goto next_knode;
149			}
150#ifdef CONFIG_CLS_U32_PERF
151			n->pf->kcnts[j] +=1;
152			j++;
153#endif
154		}
155		if (n->ht_down == NULL) {
156check_terminal:
157			if (n->sel.flags&TC_U32_TERMINAL) {
158
159				*res = n->res;
160#ifdef CONFIG_NET_CLS_IND
161				if (!tcf_match_indev(skb, n->indev)) {
162					n = n->next;
163					goto next_knode;
164				}
165#endif
166#ifdef CONFIG_CLS_U32_PERF
167				n->pf->rhit +=1;
168#endif
169				r = tcf_exts_exec(skb, &n->exts, res);
170				if (r < 0) {
171					n = n->next;
172					goto next_knode;
173				}
174
175				return r;
176			}
177			n = n->next;
178			goto next_knode;
179		}
180
181		/* PUSH */
182		if (sdepth >= TC_U32_MAXDEPTH)
183			goto deadloop;
184		stack[sdepth].knode = n;
185		stack[sdepth].off = off;
186		sdepth++;
187
188		ht = n->ht_down;
189		sel = 0;
190		if (ht->divisor) {
191			__be32 *data, _data;
192
193			data = skb_header_pointer(skb, off + n->sel.hoff, 4,
194						  &_data);
195			if (!data)
196				goto out;
197			sel = ht->divisor & u32_hash_fold(*data, &n->sel,
198							  n->fshift);
199		}
200		if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
201			goto next_ht;
202
203		if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
204			off2 = n->sel.off + 3;
205			if (n->sel.flags & TC_U32_VAROFFSET) {
206				__be16 *data, _data;
207
208				data = skb_header_pointer(skb,
209							  off + n->sel.offoff,
210							  2, &_data);
211				if (!data)
212					goto out;
213				off2 += ntohs(n->sel.offmask & *data) >>
214					n->sel.offshift;
215			}
216			off2 &= ~3;
217		}
218		if (n->sel.flags&TC_U32_EAT) {
219			off += off2;
220			off2 = 0;
221		}
222
223		if (off < skb->len)
224			goto next_ht;
225	}
226
227	/* POP */
228	if (sdepth--) {
229		n = stack[sdepth].knode;
230		ht = n->ht_up;
231		off = stack[sdepth].off;
232		goto check_terminal;
233	}
234out:
235	return -1;
236
237deadloop:
238	if (net_ratelimit())
239		printk(KERN_WARNING "cls_u32: dead loop\n");
240	return -1;
241}
242
243static __inline__ struct tc_u_hnode *
244u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
245{
246	struct tc_u_hnode *ht;
247
248	for (ht = tp_c->hlist; ht; ht = ht->next)
249		if (ht->handle == handle)
250			break;
251
252	return ht;
253}
254
255static __inline__ struct tc_u_knode *
256u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
257{
258	unsigned sel;
259	struct tc_u_knode *n = NULL;
260
261	sel = TC_U32_HASH(handle);
262	if (sel > ht->divisor)
263		goto out;
264
265	for (n = ht->ht[sel]; n; n = n->next)
266		if (n->handle == handle)
267			break;
268out:
269	return n;
270}
271
272
273static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
274{
275	struct tc_u_hnode *ht;
276	struct tc_u_common *tp_c = tp->data;
277
278	if (TC_U32_HTID(handle) == TC_U32_ROOT)
279		ht = tp->root;
280	else
281		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
282
283	if (!ht)
284		return 0;
285
286	if (TC_U32_KEY(handle) == 0)
287		return (unsigned long)ht;
288
289	return (unsigned long)u32_lookup_key(ht, handle);
290}
291
292static void u32_put(struct tcf_proto *tp, unsigned long f)
293{
294}
295
296static u32 gen_new_htid(struct tc_u_common *tp_c)
297{
298	int i = 0x800;
299
300	do {
301		if (++tp_c->hgenerator == 0x7FF)
302			tp_c->hgenerator = 1;
303	} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
304
305	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
306}
307
308static int u32_init(struct tcf_proto *tp)
309{
310	struct tc_u_hnode *root_ht;
311	struct tc_u_common *tp_c;
312
313	tp_c = tp->q->u32_node;
314
315	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
316	if (root_ht == NULL)
317		return -ENOBUFS;
318
319	root_ht->divisor = 0;
320	root_ht->refcnt++;
321	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
322	root_ht->prio = tp->prio;
323
324	if (tp_c == NULL) {
325		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
326		if (tp_c == NULL) {
327			kfree(root_ht);
328			return -ENOBUFS;
329		}
330		tp_c->q = tp->q;
331		tp->q->u32_node = tp_c;
332	}
333
334	tp_c->refcnt++;
335	root_ht->next = tp_c->hlist;
336	tp_c->hlist = root_ht;
337	root_ht->tp_c = tp_c;
338
339	tp->root = root_ht;
340	tp->data = tp_c;
341	return 0;
342}
343
344static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
345{
346	tcf_unbind_filter(tp, &n->res);
347	tcf_exts_destroy(tp, &n->exts);
348	if (n->ht_down)
349		n->ht_down->refcnt--;
350#ifdef CONFIG_CLS_U32_PERF
351	kfree(n->pf);
352#endif
353	kfree(n);
354	return 0;
355}
356
357static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
358{
359	struct tc_u_knode **kp;
360	struct tc_u_hnode *ht = key->ht_up;
361
362	if (ht) {
363		for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
364			if (*kp == key) {
365				tcf_tree_lock(tp);
366				*kp = key->next;
367				tcf_tree_unlock(tp);
368
369				u32_destroy_key(tp, key);
370				return 0;
371			}
372		}
373	}
374	WARN_ON(1);
375	return 0;
376}
377
378static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
379{
380	struct tc_u_knode *n;
381	unsigned h;
382
383	for (h=0; h<=ht->divisor; h++) {
384		while ((n = ht->ht[h]) != NULL) {
385			ht->ht[h] = n->next;
386
387			u32_destroy_key(tp, n);
388		}
389	}
390}
391
392static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
393{
394	struct tc_u_common *tp_c = tp->data;
395	struct tc_u_hnode **hn;
396
397	WARN_ON(ht->refcnt);
398
399	u32_clear_hnode(tp, ht);
400
401	for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
402		if (*hn == ht) {
403			*hn = ht->next;
404			kfree(ht);
405			return 0;
406		}
407	}
408
409	WARN_ON(1);
410	return -ENOENT;
411}
412
413static void u32_destroy(struct tcf_proto *tp)
414{
415	struct tc_u_common *tp_c = tp->data;
416	struct tc_u_hnode *root_ht = tp->root;
417
418	WARN_ON(root_ht == NULL);
419
420	if (root_ht && --root_ht->refcnt == 0)
421		u32_destroy_hnode(tp, root_ht);
422
423	if (--tp_c->refcnt == 0) {
424		struct tc_u_hnode *ht;
425
426		tp->q->u32_node = NULL;
427
428		for (ht = tp_c->hlist; ht; ht = ht->next) {
429			ht->refcnt--;
430			u32_clear_hnode(tp, ht);
431		}
432
433		while ((ht = tp_c->hlist) != NULL) {
434			tp_c->hlist = ht->next;
435
436			WARN_ON(ht->refcnt != 0);
437
438			kfree(ht);
439		}
440
441		kfree(tp_c);
442	}
443
444	tp->data = NULL;
445}
446
447static int u32_delete(struct tcf_proto *tp, unsigned long arg)
448{
449	struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
450
451	if (ht == NULL)
452		return 0;
453
454	if (TC_U32_KEY(ht->handle))
455		return u32_delete_key(tp, (struct tc_u_knode*)ht);
456
457	if (tp->root == ht)
458		return -EINVAL;
459
460	if (ht->refcnt == 1) {
461		ht->refcnt--;
462		u32_destroy_hnode(tp, ht);
463	} else {
464		return -EBUSY;
465	}
466
467	return 0;
468}
469
470static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
471{
472	struct tc_u_knode *n;
473	unsigned i = 0x7FF;
474
475	for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
476		if (i < TC_U32_NODE(n->handle))
477			i = TC_U32_NODE(n->handle);
478	i++;
479
480	return handle|(i>0xFFF ? 0xFFF : i);
481}
482
483static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
484	[TCA_U32_CLASSID]	= { .type = NLA_U32 },
485	[TCA_U32_HASH]		= { .type = NLA_U32 },
486	[TCA_U32_LINK]		= { .type = NLA_U32 },
487	[TCA_U32_DIVISOR]	= { .type = NLA_U32 },
488	[TCA_U32_SEL]		= { .len = sizeof(struct tc_u32_sel) },
489	[TCA_U32_INDEV]		= { .type = NLA_STRING, .len = IFNAMSIZ },
490	[TCA_U32_MARK]		= { .len = sizeof(struct tc_u32_mark) },
491};
492
493static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
494			 struct tc_u_hnode *ht,
495			 struct tc_u_knode *n, struct nlattr **tb,
496			 struct nlattr *est)
497{
498	int err;
499	struct tcf_exts e;
500
501	err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
502	if (err < 0)
503		return err;
504
505	err = -EINVAL;
506	if (tb[TCA_U32_LINK]) {
507		u32 handle = nla_get_u32(tb[TCA_U32_LINK]);
508		struct tc_u_hnode *ht_down = NULL, *ht_old;
509
510		if (TC_U32_KEY(handle))
511			goto errout;
512
513		if (handle) {
514			ht_down = u32_lookup_ht(ht->tp_c, handle);
515
516			if (ht_down == NULL)
517				goto errout;
518			ht_down->refcnt++;
519		}
520
521		tcf_tree_lock(tp);
522		ht_old = n->ht_down;
523		n->ht_down = ht_down;
524		tcf_tree_unlock(tp);
525
526		if (ht_old)
527			ht_old->refcnt--;
528	}
529	if (tb[TCA_U32_CLASSID]) {
530		n->res.classid = nla_get_u32(tb[TCA_U32_CLASSID]);
531		tcf_bind_filter(tp, &n->res, base);
532	}
533
534#ifdef CONFIG_NET_CLS_IND
535	if (tb[TCA_U32_INDEV]) {
536		err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV]);
537		if (err < 0)
538			goto errout;
539	}
540#endif
541	tcf_exts_change(tp, &n->exts, &e);
542
543	return 0;
544errout:
545	tcf_exts_destroy(tp, &e);
546	return err;
547}
548
549static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
550		      struct nlattr **tca,
551		      unsigned long *arg)
552{
553	struct tc_u_common *tp_c = tp->data;
554	struct tc_u_hnode *ht;
555	struct tc_u_knode *n;
556	struct tc_u32_sel *s;
557	struct nlattr *opt = tca[TCA_OPTIONS];
558	struct nlattr *tb[TCA_U32_MAX + 1];
559	u32 htid;
560	int err;
561
562	if (opt == NULL)
563		return handle ? -EINVAL : 0;
564
565	err = nla_parse_nested(tb, TCA_U32_MAX, opt, u32_policy);
566	if (err < 0)
567		return err;
568
569	if ((n = (struct tc_u_knode*)*arg) != NULL) {
570		if (TC_U32_KEY(n->handle) == 0)
571			return -EINVAL;
572
573		return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE]);
574	}
575
576	if (tb[TCA_U32_DIVISOR]) {
577		unsigned divisor = nla_get_u32(tb[TCA_U32_DIVISOR]);
578
579		if (--divisor > 0x100)
580			return -EINVAL;
581		if (TC_U32_KEY(handle))
582			return -EINVAL;
583		if (handle == 0) {
584			handle = gen_new_htid(tp->data);
585			if (handle == 0)
586				return -ENOMEM;
587		}
588		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
589		if (ht == NULL)
590			return -ENOBUFS;
591		ht->tp_c = tp_c;
592		ht->refcnt = 1;
593		ht->divisor = divisor;
594		ht->handle = handle;
595		ht->prio = tp->prio;
596		ht->next = tp_c->hlist;
597		tp_c->hlist = ht;
598		*arg = (unsigned long)ht;
599		return 0;
600	}
601
602	if (tb[TCA_U32_HASH]) {
603		htid = nla_get_u32(tb[TCA_U32_HASH]);
604		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
605			ht = tp->root;
606			htid = ht->handle;
607		} else {
608			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
609			if (ht == NULL)
610				return -EINVAL;
611		}
612	} else {
613		ht = tp->root;
614		htid = ht->handle;
615	}
616
617	if (ht->divisor < TC_U32_HASH(htid))
618		return -EINVAL;
619
620	if (handle) {
621		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
622			return -EINVAL;
623		handle = htid | TC_U32_NODE(handle);
624	} else
625		handle = gen_new_kid(ht, htid);
626
627	if (tb[TCA_U32_SEL] == NULL)
628		return -EINVAL;
629
630	s = nla_data(tb[TCA_U32_SEL]);
631
632	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
633	if (n == NULL)
634		return -ENOBUFS;
635
636#ifdef CONFIG_CLS_U32_PERF
637	n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
638	if (n->pf == NULL) {
639		kfree(n);
640		return -ENOBUFS;
641	}
642#endif
643
644	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
645	n->ht_up = ht;
646	n->handle = handle;
647	n->fshift = s->hmask ? ffs(ntohl(s->hmask)) - 1 : 0;
648
649#ifdef CONFIG_CLS_U32_MARK
650	if (tb[TCA_U32_MARK]) {
651		struct tc_u32_mark *mark;
652
653		mark = nla_data(tb[TCA_U32_MARK]);
654		memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
655		n->mark.success = 0;
656	}
657#endif
658
659	err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE]);
660	if (err == 0) {
661		struct tc_u_knode **ins;
662		for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
663			if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
664				break;
665
666		n->next = *ins;
667		tcf_tree_lock(tp);
668		*ins = n;
669		tcf_tree_unlock(tp);
670
671		*arg = (unsigned long)n;
672		return 0;
673	}
674#ifdef CONFIG_CLS_U32_PERF
675	kfree(n->pf);
676#endif
677	kfree(n);
678	return err;
679}
680
681static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
682{
683	struct tc_u_common *tp_c = tp->data;
684	struct tc_u_hnode *ht;
685	struct tc_u_knode *n;
686	unsigned h;
687
688	if (arg->stop)
689		return;
690
691	for (ht = tp_c->hlist; ht; ht = ht->next) {
692		if (ht->prio != tp->prio)
693			continue;
694		if (arg->count >= arg->skip) {
695			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
696				arg->stop = 1;
697				return;
698			}
699		}
700		arg->count++;
701		for (h = 0; h <= ht->divisor; h++) {
702			for (n = ht->ht[h]; n; n = n->next) {
703				if (arg->count < arg->skip) {
704					arg->count++;
705					continue;
706				}
707				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
708					arg->stop = 1;
709					return;
710				}
711				arg->count++;
712			}
713		}
714	}
715}
716
717static int u32_dump(struct tcf_proto *tp, unsigned long fh,
718		     struct sk_buff *skb, struct tcmsg *t)
719{
720	struct tc_u_knode *n = (struct tc_u_knode*)fh;
721	struct nlattr *nest;
722
723	if (n == NULL)
724		return skb->len;
725
726	t->tcm_handle = n->handle;
727
728	nest = nla_nest_start(skb, TCA_OPTIONS);
729	if (nest == NULL)
730		goto nla_put_failure;
731
732	if (TC_U32_KEY(n->handle) == 0) {
733		struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
734		u32 divisor = ht->divisor+1;
735		NLA_PUT_U32(skb, TCA_U32_DIVISOR, divisor);
736	} else {
737		NLA_PUT(skb, TCA_U32_SEL,
738			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
739			&n->sel);
740		if (n->ht_up) {
741			u32 htid = n->handle & 0xFFFFF000;
742			NLA_PUT_U32(skb, TCA_U32_HASH, htid);
743		}
744		if (n->res.classid)
745			NLA_PUT_U32(skb, TCA_U32_CLASSID, n->res.classid);
746		if (n->ht_down)
747			NLA_PUT_U32(skb, TCA_U32_LINK, n->ht_down->handle);
748
749#ifdef CONFIG_CLS_U32_MARK
750		if (n->mark.val || n->mark.mask)
751			NLA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
752#endif
753
754		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
755			goto nla_put_failure;
756
757#ifdef CONFIG_NET_CLS_IND
758		if(strlen(n->indev))
759			NLA_PUT_STRING(skb, TCA_U32_INDEV, n->indev);
760#endif
761#ifdef CONFIG_CLS_U32_PERF
762		NLA_PUT(skb, TCA_U32_PCNT,
763		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
764			n->pf);
765#endif
766	}
767
768	nla_nest_end(skb, nest);
769
770	if (TC_U32_KEY(n->handle))
771		if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
772			goto nla_put_failure;
773	return skb->len;
774
775nla_put_failure:
776	nla_nest_cancel(skb, nest);
777	return -1;
778}
779
780static struct tcf_proto_ops cls_u32_ops __read_mostly = {
781	.kind		=	"u32",
782	.classify	=	u32_classify,
783	.init		=	u32_init,
784	.destroy	=	u32_destroy,
785	.get		=	u32_get,
786	.put		=	u32_put,
787	.change		=	u32_change,
788	.delete		=	u32_delete,
789	.walk		=	u32_walk,
790	.dump		=	u32_dump,
791	.owner		=	THIS_MODULE,
792};
793
794static int __init init_u32(void)
795{
796	pr_info("u32 classifier\n");
797#ifdef CONFIG_CLS_U32_PERF
798	pr_info("    Performance counters on\n");
799#endif
800#ifdef CONFIG_NET_CLS_IND
801	pr_info("    input device check on\n");
802#endif
803#ifdef CONFIG_NET_CLS_ACT
804	pr_info("    Actions configured\n");
805#endif
806	return register_tcf_proto_ops(&cls_u32_ops);
807}
808
809static void __exit exit_u32(void)
810{
811	unregister_tcf_proto_ops(&cls_u32_ops);
812}
813
814module_init(init_u32)
815module_exit(exit_u32)
816MODULE_LICENSE("GPL");
817