1/*
2 * net/sched/cls_u32.c	Ugly (or Universal) 32bit key Packet Classifier.
3 *
4 *		This program is free software; you can redistribute it and/or
5 *		modify it under the terms of the GNU General Public License
6 *		as published by the Free Software Foundation; either version
7 *		2 of the License, or (at your option) any later version.
8 *
9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 *	The filters are packed to hash tables of key nodes
12 *	with a set of 32bit key/mask pairs at every node.
13 *	Nodes reference next level hash tables etc.
14 *
15 *	This scheme is the best universal classifier I managed to
16 *	invent; it is not super-fast, but it is not slow (provided you
17 *	program it correctly), and general enough.  And its relative
18 *	speed grows as the number of rules becomes larger.
19 *
20 *	It seems that it represents the best middle point between
21 *	speed and manageability both by human and by machine.
22 *
23 *	It is especially useful for link sharing combined with QoS;
24 *	pure RSVP doesn't need such a general approach and can use
25 *	much simpler (and faster) schemes, sort of cls_rsvp.c.
26 *
27 *	JHS: We should remove the CONFIG_NET_CLS_IND from here
28 *	eventually when the meta match extension is made available
29 *
30 *	nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
31 */
32
33#include <asm/uaccess.h>
34#include <asm/system.h>
35#include <linux/bitops.h>
36#include <linux/module.h>
37#include <linux/types.h>
38#include <linux/kernel.h>
39#include <linux/string.h>
40#include <linux/mm.h>
41#include <linux/socket.h>
42#include <linux/sockios.h>
43#include <linux/in.h>
44#include <linux/errno.h>
45#include <linux/interrupt.h>
46#include <linux/if_ether.h>
47#include <linux/inet.h>
48#include <linux/netdevice.h>
49#include <linux/etherdevice.h>
50#include <linux/notifier.h>
51#include <linux/rtnetlink.h>
52#include <net/ip.h>
53#include <net/netlink.h>
54#include <net/route.h>
55#include <linux/skbuff.h>
56#include <net/sock.h>
57#include <net/act_api.h>
58#include <net/pkt_cls.h>
59
60struct tc_u_knode
61{
62	struct tc_u_knode	*next;
63	u32			handle;
64	struct tc_u_hnode	*ht_up;
65	struct tcf_exts		exts;
66#ifdef CONFIG_NET_CLS_IND
67	char                     indev[IFNAMSIZ];
68#endif
69	u8			fshift;
70	struct tcf_result	res;
71	struct tc_u_hnode	*ht_down;
72#ifdef CONFIG_CLS_U32_PERF
73	struct tc_u32_pcnt	*pf;
74#endif
75#ifdef CONFIG_CLS_U32_MARK
76	struct tc_u32_mark	mark;
77#endif
78	struct tc_u32_sel	sel;
79};
80
81struct tc_u_hnode
82{
83	struct tc_u_hnode	*next;
84	u32			handle;
85	u32			prio;
86	struct tc_u_common	*tp_c;
87	int			refcnt;
88	unsigned		divisor;
89	struct tc_u_knode	*ht[1];
90};
91
92struct tc_u_common
93{
94	struct tc_u_common	*next;
95	struct tc_u_hnode	*hlist;
96	struct Qdisc		*q;
97	int			refcnt;
98	u32			hgenerator;
99};
100
101static struct tcf_ext_map u32_ext_map = {
102	.action = TCA_U32_ACT,
103	.police = TCA_U32_POLICE
104};
105
106static struct tc_u_common *u32_list;
107
108static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
109{
110	unsigned h = (key & sel->hmask)>>fshift;
111
112	return h;
113}
114
115static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
116{
117	struct {
118		struct tc_u_knode *knode;
119		u8		  *ptr;
120	} stack[TC_U32_MAXDEPTH];
121
122	struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
123	u8 *ptr = skb_network_header(skb);
124	struct tc_u_knode *n;
125	int sdepth = 0;
126	int off2 = 0;
127	int sel = 0;
128#ifdef CONFIG_CLS_U32_PERF
129	int j;
130#endif
131	int i, r;
132
133next_ht:
134	n = ht->ht[sel];
135
136next_knode:
137	if (n) {
138		struct tc_u32_key *key = n->sel.keys;
139
140#ifdef CONFIG_CLS_U32_PERF
141		n->pf->rcnt +=1;
142		j = 0;
143#endif
144
145#ifdef CONFIG_CLS_U32_MARK
146		if ((skb->mark & n->mark.mask) != n->mark.val) {
147			n = n->next;
148			goto next_knode;
149		} else {
150			n->mark.success++;
151		}
152#endif
153
154		for (i = n->sel.nkeys; i>0; i--, key++) {
155
156			if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
157				n = n->next;
158				goto next_knode;
159			}
160#ifdef CONFIG_CLS_U32_PERF
161			n->pf->kcnts[j] +=1;
162			j++;
163#endif
164		}
165		if (n->ht_down == NULL) {
166check_terminal:
167			if (n->sel.flags&TC_U32_TERMINAL) {
168
169				*res = n->res;
170#ifdef CONFIG_NET_CLS_IND
171				if (!tcf_match_indev(skb, n->indev)) {
172					n = n->next;
173					goto next_knode;
174				}
175#endif
176#ifdef CONFIG_CLS_U32_PERF
177				n->pf->rhit +=1;
178#endif
179				r = tcf_exts_exec(skb, &n->exts, res);
180				if (r < 0) {
181					n = n->next;
182					goto next_knode;
183				}
184
185				return r;
186			}
187			n = n->next;
188			goto next_knode;
189		}
190
191		/* PUSH */
192		if (sdepth >= TC_U32_MAXDEPTH)
193			goto deadloop;
194		stack[sdepth].knode = n;
195		stack[sdepth].ptr = ptr;
196		sdepth++;
197
198		ht = n->ht_down;
199		sel = 0;
200		if (ht->divisor)
201			sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
202
203		if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
204			goto next_ht;
205
206		if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
207			off2 = n->sel.off + 3;
208			if (n->sel.flags&TC_U32_VAROFFSET)
209				off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
210			off2 &= ~3;
211		}
212		if (n->sel.flags&TC_U32_EAT) {
213			ptr += off2;
214			off2 = 0;
215		}
216
217		if (ptr < skb_tail_pointer(skb))
218			goto next_ht;
219	}
220
221	/* POP */
222	if (sdepth--) {
223		n = stack[sdepth].knode;
224		ht = n->ht_up;
225		ptr = stack[sdepth].ptr;
226		goto check_terminal;
227	}
228	return -1;
229
230deadloop:
231	if (net_ratelimit())
232		printk("cls_u32: dead loop\n");
233	return -1;
234}
235
236static __inline__ struct tc_u_hnode *
237u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
238{
239	struct tc_u_hnode *ht;
240
241	for (ht = tp_c->hlist; ht; ht = ht->next)
242		if (ht->handle == handle)
243			break;
244
245	return ht;
246}
247
248static __inline__ struct tc_u_knode *
249u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
250{
251	unsigned sel;
252	struct tc_u_knode *n = NULL;
253
254	sel = TC_U32_HASH(handle);
255	if (sel > ht->divisor)
256		goto out;
257
258	for (n = ht->ht[sel]; n; n = n->next)
259		if (n->handle == handle)
260			break;
261out:
262	return n;
263}
264
265
266static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
267{
268	struct tc_u_hnode *ht;
269	struct tc_u_common *tp_c = tp->data;
270
271	if (TC_U32_HTID(handle) == TC_U32_ROOT)
272		ht = tp->root;
273	else
274		ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
275
276	if (!ht)
277		return 0;
278
279	if (TC_U32_KEY(handle) == 0)
280		return (unsigned long)ht;
281
282	return (unsigned long)u32_lookup_key(ht, handle);
283}
284
285static void u32_put(struct tcf_proto *tp, unsigned long f)
286{
287}
288
289static u32 gen_new_htid(struct tc_u_common *tp_c)
290{
291	int i = 0x800;
292
293	do {
294		if (++tp_c->hgenerator == 0x7FF)
295			tp_c->hgenerator = 1;
296	} while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
297
298	return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
299}
300
301static int u32_init(struct tcf_proto *tp)
302{
303	struct tc_u_hnode *root_ht;
304	struct tc_u_common *tp_c;
305
306	for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
307		if (tp_c->q == tp->q)
308			break;
309
310	root_ht = kzalloc(sizeof(*root_ht), GFP_KERNEL);
311	if (root_ht == NULL)
312		return -ENOBUFS;
313
314	root_ht->divisor = 0;
315	root_ht->refcnt++;
316	root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
317	root_ht->prio = tp->prio;
318
319	if (tp_c == NULL) {
320		tp_c = kzalloc(sizeof(*tp_c), GFP_KERNEL);
321		if (tp_c == NULL) {
322			kfree(root_ht);
323			return -ENOBUFS;
324		}
325		tp_c->q = tp->q;
326		tp_c->next = u32_list;
327		u32_list = tp_c;
328	}
329
330	tp_c->refcnt++;
331	root_ht->next = tp_c->hlist;
332	tp_c->hlist = root_ht;
333	root_ht->tp_c = tp_c;
334
335	tp->root = root_ht;
336	tp->data = tp_c;
337	return 0;
338}
339
340static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
341{
342	tcf_unbind_filter(tp, &n->res);
343	tcf_exts_destroy(tp, &n->exts);
344	if (n->ht_down)
345		n->ht_down->refcnt--;
346#ifdef CONFIG_CLS_U32_PERF
347	kfree(n->pf);
348#endif
349	kfree(n);
350	return 0;
351}
352
353static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
354{
355	struct tc_u_knode **kp;
356	struct tc_u_hnode *ht = key->ht_up;
357
358	if (ht) {
359		for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
360			if (*kp == key) {
361				tcf_tree_lock(tp);
362				*kp = key->next;
363				tcf_tree_unlock(tp);
364
365				u32_destroy_key(tp, key);
366				return 0;
367			}
368		}
369	}
370	BUG_TRAP(0);
371	return 0;
372}
373
374static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
375{
376	struct tc_u_knode *n;
377	unsigned h;
378
379	for (h=0; h<=ht->divisor; h++) {
380		while ((n = ht->ht[h]) != NULL) {
381			ht->ht[h] = n->next;
382
383			u32_destroy_key(tp, n);
384		}
385	}
386}
387
388static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
389{
390	struct tc_u_common *tp_c = tp->data;
391	struct tc_u_hnode **hn;
392
393	BUG_TRAP(!ht->refcnt);
394
395	u32_clear_hnode(tp, ht);
396
397	for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
398		if (*hn == ht) {
399			*hn = ht->next;
400			kfree(ht);
401			return 0;
402		}
403	}
404
405	BUG_TRAP(0);
406	return -ENOENT;
407}
408
409static void u32_destroy(struct tcf_proto *tp)
410{
411	struct tc_u_common *tp_c = tp->data;
412	struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
413
414	BUG_TRAP(root_ht != NULL);
415
416	if (root_ht && --root_ht->refcnt == 0)
417		u32_destroy_hnode(tp, root_ht);
418
419	if (--tp_c->refcnt == 0) {
420		struct tc_u_hnode *ht;
421		struct tc_u_common **tp_cp;
422
423		for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
424			if (*tp_cp == tp_c) {
425				*tp_cp = tp_c->next;
426				break;
427			}
428		}
429
430		for (ht=tp_c->hlist; ht; ht = ht->next)
431			u32_clear_hnode(tp, ht);
432
433		while ((ht = tp_c->hlist) != NULL) {
434			tp_c->hlist = ht->next;
435
436			BUG_TRAP(ht->refcnt == 0);
437
438			kfree(ht);
439		}
440
441		kfree(tp_c);
442	}
443
444	tp->data = NULL;
445}
446
447static int u32_delete(struct tcf_proto *tp, unsigned long arg)
448{
449	struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
450
451	if (ht == NULL)
452		return 0;
453
454	if (TC_U32_KEY(ht->handle))
455		return u32_delete_key(tp, (struct tc_u_knode*)ht);
456
457	if (tp->root == ht)
458		return -EINVAL;
459
460	if (--ht->refcnt == 0)
461		u32_destroy_hnode(tp, ht);
462
463	return 0;
464}
465
466static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
467{
468	struct tc_u_knode *n;
469	unsigned i = 0x7FF;
470
471	for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
472		if (i < TC_U32_NODE(n->handle))
473			i = TC_U32_NODE(n->handle);
474	i++;
475
476	return handle|(i>0xFFF ? 0xFFF : i);
477}
478
479static int u32_set_parms(struct tcf_proto *tp, unsigned long base,
480			 struct tc_u_hnode *ht,
481			 struct tc_u_knode *n, struct rtattr **tb,
482			 struct rtattr *est)
483{
484	int err;
485	struct tcf_exts e;
486
487	err = tcf_exts_validate(tp, tb, est, &e, &u32_ext_map);
488	if (err < 0)
489		return err;
490
491	err = -EINVAL;
492	if (tb[TCA_U32_LINK-1]) {
493		u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
494		struct tc_u_hnode *ht_down = NULL;
495
496		if (TC_U32_KEY(handle))
497			goto errout;
498
499		if (handle) {
500			ht_down = u32_lookup_ht(ht->tp_c, handle);
501
502			if (ht_down == NULL)
503				goto errout;
504			ht_down->refcnt++;
505		}
506
507		tcf_tree_lock(tp);
508		ht_down = xchg(&n->ht_down, ht_down);
509		tcf_tree_unlock(tp);
510
511		if (ht_down)
512			ht_down->refcnt--;
513	}
514	if (tb[TCA_U32_CLASSID-1]) {
515		n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
516		tcf_bind_filter(tp, &n->res, base);
517	}
518
519#ifdef CONFIG_NET_CLS_IND
520	if (tb[TCA_U32_INDEV-1]) {
521		int err = tcf_change_indev(tp, n->indev, tb[TCA_U32_INDEV-1]);
522		if (err < 0)
523			goto errout;
524	}
525#endif
526	tcf_exts_change(tp, &n->exts, &e);
527
528	return 0;
529errout:
530	tcf_exts_destroy(tp, &e);
531	return err;
532}
533
534static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
535		      struct rtattr **tca,
536		      unsigned long *arg)
537{
538	struct tc_u_common *tp_c = tp->data;
539	struct tc_u_hnode *ht;
540	struct tc_u_knode *n;
541	struct tc_u32_sel *s;
542	struct rtattr *opt = tca[TCA_OPTIONS-1];
543	struct rtattr *tb[TCA_U32_MAX];
544	u32 htid;
545	int err;
546
547	if (opt == NULL)
548		return handle ? -EINVAL : 0;
549
550	if (rtattr_parse_nested(tb, TCA_U32_MAX, opt) < 0)
551		return -EINVAL;
552
553	if ((n = (struct tc_u_knode*)*arg) != NULL) {
554		if (TC_U32_KEY(n->handle) == 0)
555			return -EINVAL;
556
557		return u32_set_parms(tp, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
558	}
559
560	if (tb[TCA_U32_DIVISOR-1]) {
561		unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
562
563		if (--divisor > 0x100)
564			return -EINVAL;
565		if (TC_U32_KEY(handle))
566			return -EINVAL;
567		if (handle == 0) {
568			handle = gen_new_htid(tp->data);
569			if (handle == 0)
570				return -ENOMEM;
571		}
572		ht = kzalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
573		if (ht == NULL)
574			return -ENOBUFS;
575		ht->tp_c = tp_c;
576		ht->refcnt = 0;
577		ht->divisor = divisor;
578		ht->handle = handle;
579		ht->prio = tp->prio;
580		ht->next = tp_c->hlist;
581		tp_c->hlist = ht;
582		*arg = (unsigned long)ht;
583		return 0;
584	}
585
586	if (tb[TCA_U32_HASH-1]) {
587		htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
588		if (TC_U32_HTID(htid) == TC_U32_ROOT) {
589			ht = tp->root;
590			htid = ht->handle;
591		} else {
592			ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
593			if (ht == NULL)
594				return -EINVAL;
595		}
596	} else {
597		ht = tp->root;
598		htid = ht->handle;
599	}
600
601	if (ht->divisor < TC_U32_HASH(htid))
602		return -EINVAL;
603
604	if (handle) {
605		if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
606			return -EINVAL;
607		handle = htid | TC_U32_NODE(handle);
608	} else
609		handle = gen_new_kid(ht, htid);
610
611	if (tb[TCA_U32_SEL-1] == 0 ||
612	    RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
613		return -EINVAL;
614
615	s = RTA_DATA(tb[TCA_U32_SEL-1]);
616
617	n = kzalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
618	if (n == NULL)
619		return -ENOBUFS;
620
621#ifdef CONFIG_CLS_U32_PERF
622	n->pf = kzalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(u64), GFP_KERNEL);
623	if (n->pf == NULL) {
624		kfree(n);
625		return -ENOBUFS;
626	}
627#endif
628
629	memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
630	n->ht_up = ht;
631	n->handle = handle;
632{
633	u8 i = 0;
634	u32 mask = s->hmask;
635	if (mask) {
636		while (!(mask & 1)) {
637			i++;
638			mask>>=1;
639		}
640	}
641	n->fshift = i;
642}
643
644#ifdef CONFIG_CLS_U32_MARK
645	if (tb[TCA_U32_MARK-1]) {
646		struct tc_u32_mark *mark;
647
648		if (RTA_PAYLOAD(tb[TCA_U32_MARK-1]) < sizeof(struct tc_u32_mark)) {
649#ifdef CONFIG_CLS_U32_PERF
650			kfree(n->pf);
651#endif
652			kfree(n);
653			return -EINVAL;
654		}
655		mark = RTA_DATA(tb[TCA_U32_MARK-1]);
656		memcpy(&n->mark, mark, sizeof(struct tc_u32_mark));
657		n->mark.success = 0;
658	}
659#endif
660
661	err = u32_set_parms(tp, base, ht, n, tb, tca[TCA_RATE-1]);
662	if (err == 0) {
663		struct tc_u_knode **ins;
664		for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
665			if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
666				break;
667
668		n->next = *ins;
669		wmb();
670		*ins = n;
671
672		*arg = (unsigned long)n;
673		return 0;
674	}
675#ifdef CONFIG_CLS_U32_PERF
676	kfree(n->pf);
677#endif
678	kfree(n);
679	return err;
680}
681
682static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
683{
684	struct tc_u_common *tp_c = tp->data;
685	struct tc_u_hnode *ht;
686	struct tc_u_knode *n;
687	unsigned h;
688
689	if (arg->stop)
690		return;
691
692	for (ht = tp_c->hlist; ht; ht = ht->next) {
693		if (ht->prio != tp->prio)
694			continue;
695		if (arg->count >= arg->skip) {
696			if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
697				arg->stop = 1;
698				return;
699			}
700		}
701		arg->count++;
702		for (h = 0; h <= ht->divisor; h++) {
703			for (n = ht->ht[h]; n; n = n->next) {
704				if (arg->count < arg->skip) {
705					arg->count++;
706					continue;
707				}
708				if (arg->fn(tp, (unsigned long)n, arg) < 0) {
709					arg->stop = 1;
710					return;
711				}
712				arg->count++;
713			}
714		}
715	}
716}
717
718static int u32_dump(struct tcf_proto *tp, unsigned long fh,
719		     struct sk_buff *skb, struct tcmsg *t)
720{
721	struct tc_u_knode *n = (struct tc_u_knode*)fh;
722	unsigned char *b = skb_tail_pointer(skb);
723	struct rtattr *rta;
724
725	if (n == NULL)
726		return skb->len;
727
728	t->tcm_handle = n->handle;
729
730	rta = (struct rtattr*)b;
731	RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
732
733	if (TC_U32_KEY(n->handle) == 0) {
734		struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
735		u32 divisor = ht->divisor+1;
736		RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
737	} else {
738		RTA_PUT(skb, TCA_U32_SEL,
739			sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
740			&n->sel);
741		if (n->ht_up) {
742			u32 htid = n->handle & 0xFFFFF000;
743			RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
744		}
745		if (n->res.classid)
746			RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
747		if (n->ht_down)
748			RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
749
750#ifdef CONFIG_CLS_U32_MARK
751		if (n->mark.val || n->mark.mask)
752			RTA_PUT(skb, TCA_U32_MARK, sizeof(n->mark), &n->mark);
753#endif
754
755		if (tcf_exts_dump(skb, &n->exts, &u32_ext_map) < 0)
756			goto rtattr_failure;
757
758#ifdef CONFIG_NET_CLS_IND
759		if(strlen(n->indev))
760			RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
761#endif
762#ifdef CONFIG_CLS_U32_PERF
763		RTA_PUT(skb, TCA_U32_PCNT,
764		sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(u64),
765			n->pf);
766#endif
767	}
768
769	rta->rta_len = skb_tail_pointer(skb) - b;
770	if (TC_U32_KEY(n->handle))
771		if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0)
772			goto rtattr_failure;
773	return skb->len;
774
775rtattr_failure:
776	nlmsg_trim(skb, b);
777	return -1;
778}
779
780static struct tcf_proto_ops cls_u32_ops = {
781	.next		=	NULL,
782	.kind		=	"u32",
783	.classify	=	u32_classify,
784	.init		=	u32_init,
785	.destroy	=	u32_destroy,
786	.get		=	u32_get,
787	.put		=	u32_put,
788	.change		=	u32_change,
789	.delete		=	u32_delete,
790	.walk		=	u32_walk,
791	.dump		=	u32_dump,
792	.owner		=	THIS_MODULE,
793};
794
795static int __init init_u32(void)
796{
797	printk("u32 classifier\n");
798#ifdef CONFIG_CLS_U32_PERF
799	printk("    Performance counters on\n");
800#endif
801#ifdef CONFIG_NET_CLS_POLICE
802	printk("    OLD policer on \n");
803#endif
804#ifdef CONFIG_NET_CLS_IND
805	printk("    input device check on \n");
806#endif
807#ifdef CONFIG_NET_CLS_ACT
808	printk("    Actions configured \n");
809#endif
810	return register_tcf_proto_ops(&cls_u32_ops);
811}
812
813static void __exit exit_u32(void)
814{
815	unregister_tcf_proto_ops(&cls_u32_ops);
816}
817
818module_init(init_u32)
819module_exit(exit_u32)
820MODULE_LICENSE("GPL");
821