1/*	$NetBSD: bpf_filter.c,v 1.72 2023/08/17 15:16:33 christos Exp $	*/
2
3/*-
4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5 *	The Regents of the University of California.  All rights reserved.
6 *
7 * This code is derived from the Stanford/CMU enet packet filter,
8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10 * Berkeley Laboratory.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 *    notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 *    notice, this list of conditions and the following disclaimer in the
19 *    documentation and/or other materials provided with the distribution.
20 * 3. Neither the name of the University nor the names of its contributors
21 *    may be used to endorse or promote products derived from this software
22 *    without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 * SUCH DAMAGE.
35 *
36 *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37 */
38
39#include <sys/cdefs.h>
40__KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.72 2023/08/17 15:16:33 christos Exp $");
41
42#if 0
43#if !(defined(lint) || defined(KERNEL))
44static const char rcsid[] =
45    "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46#endif
47#endif
48
49#include <sys/param.h>
50#include <sys/time.h>
51#include <sys/kmem.h>
52#include <sys/endian.h>
53
54#ifdef _KERNEL
55#include <sys/module.h>
56#endif
57
58#define	__BPF_PRIVATE
59#include <net/bpf.h>
60
61#ifdef _KERNEL
62
63bpf_ctx_t *
64bpf_create(void)
65{
66	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
67}
68
69void
70bpf_destroy(bpf_ctx_t *bc)
71{
72	kmem_free(bc, sizeof(bpf_ctx_t));
73}
74
75int
76bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
77{
78	bc->copfuncs = funcs;
79	bc->nfuncs = n;
80	return 0;
81}
82
83int
84bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
85{
86	if (nwords > BPF_MAX_MEMWORDS || (preinited >> nwords) != 0) {
87		return EINVAL;
88	}
89	bc->extwords = nwords;
90	bc->preinited = preinited;
91	return 0;
92}
93
94#endif
95
96#define EXTRACT_SHORT(p)	be16dec(p)
97#define EXTRACT_LONG(p)		be32dec(p)
98
99#ifdef _KERNEL
100#include <sys/mbuf.h>
101#define MINDEX(len, m, k) 		\
102{					\
103	len = m->m_len; 		\
104	while (k >= len) { 		\
105		k -= len; 		\
106		m = m->m_next; 		\
107		if (m == 0) 		\
108			return 0; 	\
109		len = m->m_len; 	\
110	}				\
111}
112
113uint32_t m_xword(const struct mbuf *, uint32_t, int *);
114uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
115uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
116
117#define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
118#define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
119#define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
120
121uint32_t
122m_xword(const struct mbuf *m, uint32_t k, int *err)
123{
124	int len;
125	u_char *cp, *np;
126	struct mbuf *m0;
127
128	*err = 1;
129	MINDEX(len, m, k);
130	cp = mtod(m, u_char *) + k;
131	if (len - k >= 4) {
132		*err = 0;
133		return EXTRACT_LONG(cp);
134	}
135	m0 = m->m_next;
136	if (m0 == 0 || (len - k) + m0->m_len < 4)
137		return 0;
138	*err = 0;
139	np = mtod(m0, u_char *);
140
141	switch (len - k) {
142	case 1:
143		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
144	case 2:
145		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
146	default:
147		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
148	}
149}
150
151uint32_t
152m_xhalf(const struct mbuf *m, uint32_t k, int *err)
153{
154	int len;
155	u_char *cp;
156	struct mbuf *m0;
157
158	*err = 1;
159	MINDEX(len, m, k);
160	cp = mtod(m, u_char *) + k;
161	if (len - k >= 2) {
162		*err = 0;
163		return EXTRACT_SHORT(cp);
164	}
165	m0 = m->m_next;
166	if (m0 == 0)
167		return 0;
168	*err = 0;
169	return (cp[0] << 8) | mtod(m0, u_char *)[0];
170}
171
172uint32_t
173m_xbyte(const struct mbuf *m, uint32_t k, int *err)
174{
175	int len;
176
177	*err = 1;
178	MINDEX(len, m, k);
179	*err = 0;
180	return mtod(m, u_char *)[k];
181}
182#else /* _KERNEL */
183#include <stdlib.h>
184#endif /* !_KERNEL */
185
186#include <net/bpf.h>
187
188/*
189 * Execute the filter program starting at pc on the packet p
190 * wirelen is the length of the original packet
191 * buflen is the amount of data present
192 */
193#ifdef _KERNEL
194
195u_int
196bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
197    u_int buflen)
198{
199	uint32_t mem[BPF_MEMWORDS];
200	bpf_args_t args = {
201		.pkt = p,
202		.wirelen = wirelen,
203		.buflen = buflen,
204		.mem = mem,
205		.arg = NULL
206	};
207
208	return bpf_filter_ext(NULL, pc, &args);
209}
210
211u_int
212bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
213#else
214__strong_alias(pcap_filter, bpf_filter)
215u_int
216bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
217    u_int buflen)
218#endif
219{
220	uint32_t A, X, k;
221#ifndef _KERNEL
222	uint32_t mem[BPF_MEMWORDS];
223	bpf_args_t args_store = {
224		.pkt = p,
225		.wirelen = wirelen,
226		.buflen = buflen,
227		.mem = mem,
228		.arg = NULL
229	};
230	bpf_args_t * const args = &args_store;
231#else
232	const uint8_t * const p = args->pkt;
233#endif
234	if (pc == 0) {
235		/*
236		 * No filter means accept all.
237		 */
238		return (u_int)-1;
239	}
240
241	/*
242	 * Note: safe to leave memwords uninitialised, as the validation
243	 * step ensures that it will not be read, if it was not written.
244	 */
245	A = 0;
246	X = 0;
247	--pc;
248
249	for (;;) {
250		++pc;
251		switch (pc->code) {
252
253		default:
254#ifdef _KERNEL
255			return 0;
256#else
257			abort();
258			/*NOTREACHED*/
259#endif
260		case BPF_RET|BPF_K:
261			return (u_int)pc->k;
262
263		case BPF_RET|BPF_A:
264			return (u_int)A;
265
266		case BPF_LD|BPF_W|BPF_ABS:
267			k = pc->k;
268			if (k > args->buflen ||
269			    sizeof(int32_t) > args->buflen - k) {
270#ifdef _KERNEL
271				int merr;
272
273				if (args->buflen != 0)
274					return 0;
275				A = xword(args->pkt, k, &merr);
276				if (merr != 0)
277					return 0;
278				continue;
279#else
280				return 0;
281#endif
282			}
283			A = EXTRACT_LONG(&p[k]);
284			continue;
285
286		case BPF_LD|BPF_H|BPF_ABS:
287			k = pc->k;
288			if (k > args->buflen ||
289			    sizeof(int16_t) > args->buflen - k) {
290#ifdef _KERNEL
291				int merr;
292
293				if (args->buflen != 0)
294					return 0;
295				A = xhalf(args->pkt, k, &merr);
296				if (merr != 0)
297					return 0;
298				continue;
299#else
300				return 0;
301#endif
302			}
303			A = EXTRACT_SHORT(&p[k]);
304			continue;
305
306		case BPF_LD|BPF_B|BPF_ABS:
307			k = pc->k;
308			if (k >= args->buflen) {
309#ifdef _KERNEL
310				int merr;
311
312				if (args->buflen != 0)
313					return 0;
314				A = xbyte(args->pkt, k, &merr);
315				if (merr != 0)
316					return 0;
317				continue;
318#else
319				return 0;
320#endif
321			}
322			A = p[k];
323			continue;
324
325		case BPF_LD|BPF_W|BPF_LEN:
326			A = args->wirelen;
327			continue;
328
329		case BPF_LDX|BPF_W|BPF_LEN:
330			X = args->wirelen;
331			continue;
332
333		case BPF_LD|BPF_W|BPF_IND:
334			k = X + pc->k;
335			if (k < X || k >= args->buflen ||
336			    sizeof(int32_t) > args->buflen - k) {
337#ifdef _KERNEL
338				int merr;
339
340				if (k < X || args->buflen != 0)
341					return 0;
342				A = xword(args->pkt, k, &merr);
343				if (merr != 0)
344					return 0;
345				continue;
346#else
347				return 0;
348#endif
349			}
350			A = EXTRACT_LONG(&p[k]);
351			continue;
352
353		case BPF_LD|BPF_H|BPF_IND:
354			k = X + pc->k;
355			if (k < X || k >= args->buflen ||
356			    sizeof(int16_t) > args->buflen - k) {
357#ifdef _KERNEL
358				int merr;
359
360				if (k < X || args->buflen != 0)
361					return 0;
362				A = xhalf(args->pkt, k, &merr);
363				if (merr != 0)
364					return 0;
365				continue;
366#else
367				return 0;
368#endif
369			}
370			A = EXTRACT_SHORT(&p[k]);
371			continue;
372
373		case BPF_LD|BPF_B|BPF_IND:
374			k = X + pc->k;
375			if (k < X || k >= args->buflen) {
376#ifdef _KERNEL
377				int merr;
378
379				if (k < X || args->buflen != 0)
380					return 0;
381				A = xbyte(args->pkt, k, &merr);
382				if (merr != 0)
383					return 0;
384				continue;
385#else
386				return 0;
387#endif
388			}
389			A = p[k];
390			continue;
391
392		case BPF_LDX|BPF_MSH|BPF_B:
393			k = pc->k;
394			if (k >= args->buflen) {
395#ifdef _KERNEL
396				int merr;
397
398				if (args->buflen != 0)
399					return 0;
400				X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
401				if (merr != 0)
402					return 0;
403				continue;
404#else
405				return 0;
406#endif
407			}
408			X = (p[pc->k] & 0xf) << 2;
409			continue;
410
411		case BPF_LD|BPF_IMM:
412			A = pc->k;
413			continue;
414
415		case BPF_LDX|BPF_IMM:
416			X = pc->k;
417			continue;
418
419		case BPF_LD|BPF_MEM:
420			A = args->mem[pc->k];
421			continue;
422
423		case BPF_LDX|BPF_MEM:
424			X = args->mem[pc->k];
425			continue;
426
427		case BPF_ST:
428			args->mem[pc->k] = A;
429			continue;
430
431		case BPF_STX:
432			args->mem[pc->k] = X;
433			continue;
434
435		case BPF_JMP|BPF_JA:
436			pc += pc->k;
437			continue;
438
439		case BPF_JMP|BPF_JGT|BPF_K:
440			pc += (A > pc->k) ? pc->jt : pc->jf;
441			continue;
442
443		case BPF_JMP|BPF_JGE|BPF_K:
444			pc += (A >= pc->k) ? pc->jt : pc->jf;
445			continue;
446
447		case BPF_JMP|BPF_JEQ|BPF_K:
448			pc += (A == pc->k) ? pc->jt : pc->jf;
449			continue;
450
451		case BPF_JMP|BPF_JSET|BPF_K:
452			pc += (A & pc->k) ? pc->jt : pc->jf;
453			continue;
454
455		case BPF_JMP|BPF_JGT|BPF_X:
456			pc += (A > X) ? pc->jt : pc->jf;
457			continue;
458
459		case BPF_JMP|BPF_JGE|BPF_X:
460			pc += (A >= X) ? pc->jt : pc->jf;
461			continue;
462
463		case BPF_JMP|BPF_JEQ|BPF_X:
464			pc += (A == X) ? pc->jt : pc->jf;
465			continue;
466
467		case BPF_JMP|BPF_JSET|BPF_X:
468			pc += (A & X) ? pc->jt : pc->jf;
469			continue;
470
471		case BPF_ALU|BPF_ADD|BPF_X:
472			A += X;
473			continue;
474
475		case BPF_ALU|BPF_SUB|BPF_X:
476			A -= X;
477			continue;
478
479		case BPF_ALU|BPF_MUL|BPF_X:
480			A *= X;
481			continue;
482
483		case BPF_ALU|BPF_DIV|BPF_X:
484			if (X == 0)
485				return 0;
486			A /= X;
487			continue;
488
489		case BPF_ALU|BPF_MOD|BPF_X:
490			if (X == 0)
491				return 0;
492			A %= X;
493			continue;
494
495		case BPF_ALU|BPF_AND|BPF_X:
496			A &= X;
497			continue;
498
499		case BPF_ALU|BPF_OR|BPF_X:
500			A |= X;
501			continue;
502
503		case BPF_ALU|BPF_XOR|BPF_X:
504			A ^= X;
505			continue;
506
507		case BPF_ALU|BPF_LSH|BPF_X:
508			A <<= X;
509			continue;
510
511		case BPF_ALU|BPF_RSH|BPF_X:
512			A >>= X;
513			continue;
514
515		case BPF_ALU|BPF_ADD|BPF_K:
516			A += pc->k;
517			continue;
518
519		case BPF_ALU|BPF_SUB|BPF_K:
520			A -= pc->k;
521			continue;
522
523		case BPF_ALU|BPF_MUL|BPF_K:
524			A *= pc->k;
525			continue;
526
527		case BPF_ALU|BPF_DIV|BPF_K:
528			A /= pc->k;
529			continue;
530
531		case BPF_ALU|BPF_MOD|BPF_K:
532			A %= pc->k;
533			continue;
534
535		case BPF_ALU|BPF_AND|BPF_K:
536			A &= pc->k;
537			continue;
538
539		case BPF_ALU|BPF_OR|BPF_K:
540			A |= pc->k;
541			continue;
542
543		case BPF_ALU|BPF_XOR|BPF_K:
544			A ^= pc->k;
545			continue;
546
547		case BPF_ALU|BPF_LSH|BPF_K:
548			A <<= pc->k;
549			continue;
550
551		case BPF_ALU|BPF_RSH|BPF_K:
552			A >>= pc->k;
553			continue;
554
555		case BPF_ALU|BPF_NEG:
556			A = -A;
557			continue;
558
559		case BPF_MISC|BPF_TAX:
560			X = A;
561			continue;
562
563		case BPF_MISC|BPF_TXA:
564			A = X;
565			continue;
566
567		case BPF_MISC|BPF_COP:
568#ifdef _KERNEL
569			if (pc->k < bc->nfuncs) {
570				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
571				A = fn(bc, args, A);
572				continue;
573			}
574#endif
575			return 0;
576
577		case BPF_MISC|BPF_COPX:
578#ifdef _KERNEL
579			if (X < bc->nfuncs) {
580				const bpf_copfunc_t fn = bc->copfuncs[X];
581				A = fn(bc, args, A);
582				continue;
583			}
584#endif
585			return 0;
586		}
587	}
588}
589
590/*
591 * Return true if the 'fcode' is a valid filter program.
592 * The constraints are that each jump be forward and to a valid
593 * code, that memory accesses are within valid ranges (to the
594 * extent that this can be checked statically; loads of packet
595 * data have to be, and are, also checked at run time), and that
596 * the code terminates with either an accept or reject.
597 *
598 * The kernel needs to be able to verify an application's filter code.
599 * Otherwise, a bogus program could easily crash the system.
600 */
601
602#if defined(KERNEL) || defined(_KERNEL)
603
604int
605bpf_validate(const struct bpf_insn *f, int signed_len)
606{
607	return bpf_validate_ext(NULL, f, signed_len);
608}
609
610int
611bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
612#else
613__strong_alias(pcap_validate_filter, bpf_validate)
614int
615bpf_validate(const struct bpf_insn *f, int signed_len)
616#endif
617{
618	u_int i, from, len, ok = 0;
619	const struct bpf_insn *p;
620#if defined(KERNEL) || defined(_KERNEL)
621	bpf_memword_init_t *mem, invalid;
622	size_t size;
623	const size_t extwords = bc ? bc->extwords : 0;
624	const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
625	const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
626#else
627	const size_t memwords = BPF_MEMWORDS;
628#endif
629
630	len = (u_int)signed_len;
631	if (len < 1)
632		return 0;
633#if defined(KERNEL) || defined(_KERNEL)
634	if (len > BPF_MAXINSNS)
635		return 0;
636#endif
637	if (f[len - 1].code != (BPF_RET|BPF_K) &&
638	    f[len - 1].code != (BPF_RET|BPF_A)) {
639		return 0;
640	}
641
642#if defined(KERNEL) || defined(_KERNEL)
643	/* Note: only the pre-initialised is valid on startup */
644	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
645	invalid = ~preinited;
646#endif
647
648	for (i = 0; i < len; ++i) {
649#if defined(KERNEL) || defined(_KERNEL)
650		/* blend in any invalid bits for current pc */
651		invalid |= mem[i];
652#endif
653		p = &f[i];
654		switch (BPF_CLASS(p->code)) {
655		/*
656		 * Check that memory operations use valid addresses.
657		 */
658		case BPF_LD:
659		case BPF_LDX:
660			switch (BPF_MODE(p->code)) {
661			case BPF_MEM:
662				/*
663				 * There's no maximum packet data size
664				 * in userland.  The runtime packet length
665				 * check suffices.
666				 */
667#if defined(KERNEL) || defined(_KERNEL)
668				/*
669				 * More strict check with actual packet length
670				 * is done runtime.
671				 */
672				if (p->k >= memwords)
673					goto out;
674				/* check for current memory invalid */
675				if (invalid & BPF_MEMWORD_INIT(p->k))
676					goto out;
677#endif
678				break;
679			case BPF_ABS:
680			case BPF_IND:
681			case BPF_MSH:
682			case BPF_IMM:
683			case BPF_LEN:
684				break;
685			default:
686				goto out;
687			}
688			break;
689		case BPF_ST:
690		case BPF_STX:
691			if (p->k >= memwords)
692				goto out;
693#if defined(KERNEL) || defined(_KERNEL)
694			/* validate the memory word */
695			invalid &= ~BPF_MEMWORD_INIT(p->k);
696#endif
697			break;
698		case BPF_ALU:
699			switch (BPF_OP(p->code)) {
700			case BPF_ADD:
701			case BPF_SUB:
702			case BPF_MUL:
703			case BPF_OR:
704			case BPF_XOR:
705			case BPF_AND:
706			case BPF_LSH:
707			case BPF_RSH:
708			case BPF_NEG:
709				break;
710			case BPF_DIV:
711			case BPF_MOD:
712				/*
713				 * Check for constant division by 0.
714				 */
715				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
716					goto out;
717				break;
718			default:
719				goto out;
720			}
721			break;
722		case BPF_JMP:
723			/*
724			 * Check that jumps are within the code block,
725			 * and that unconditional branches don't go
726			 * backwards as a result of an overflow.
727			 * Unconditional branches have a 32-bit offset,
728			 * so they could overflow; we check to make
729			 * sure they don't.  Conditional branches have
730			 * an 8-bit offset, and the from address is <=
731			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
732			 * is sufficiently small that adding 255 to it
733			 * won't overflow.
734			 *
735			 * We know that len is <= BPF_MAXINSNS, and we
736			 * assume that BPF_MAXINSNS is < the maximum size
737			 * of a u_int, so that i + 1 doesn't overflow.
738			 *
739			 * For userland, we don't know that the from
740			 * or len are <= BPF_MAXINSNS, but we know that
741			 * from <= len, and, except on a 64-bit system,
742			 * it's unlikely that len, if it truly reflects
743			 * the size of the program we've been handed,
744			 * will be anywhere near the maximum size of
745			 * a u_int.  We also don't check for backward
746			 * branches, as we currently support them in
747			 * userland for the protochain operation.
748			 */
749			from = i + 1;
750			switch (BPF_OP(p->code)) {
751			case BPF_JA:
752				if (from + p->k >= len)
753					goto out;
754#if defined(KERNEL) || defined(_KERNEL)
755				if (from + p->k < from)
756					goto out;
757				/*
758				 * mark the currently invalid bits for the
759				 * destination
760				 */
761				mem[from + p->k] |= invalid;
762				invalid = 0;
763#endif
764				break;
765			case BPF_JEQ:
766			case BPF_JGT:
767			case BPF_JGE:
768			case BPF_JSET:
769				if (from + p->jt >= len || from + p->jf >= len)
770					goto out;
771#if defined(KERNEL) || defined(_KERNEL)
772				/*
773				 * mark the currently invalid bits for both
774				 * possible jump destinations
775				 */
776				mem[from + p->jt] |= invalid;
777				mem[from + p->jf] |= invalid;
778				invalid = 0;
779#endif
780				break;
781			default:
782				goto out;
783			}
784			break;
785		case BPF_RET:
786			break;
787		case BPF_MISC:
788			switch (BPF_MISCOP(p->code)) {
789			case BPF_COP:
790			case BPF_COPX:
791				/* In-kernel COP use only. */
792#if defined(KERNEL) || defined(_KERNEL)
793				if (bc == NULL || bc->copfuncs == NULL)
794					goto out;
795				if (BPF_MISCOP(p->code) == BPF_COP &&
796				    p->k >= bc->nfuncs) {
797					goto out;
798				}
799				break;
800#else
801				goto out;
802#endif
803			default:
804				break;
805			}
806			break;
807		default:
808			goto out;
809		}
810	}
811	ok = 1;
812out:
813#if defined(KERNEL) || defined(_KERNEL)
814	kmem_free(mem, size);
815#endif
816	return ok;
817}
818
819/* Kernel module interface */
820
821#ifdef _KERNEL
822MODULE(MODULE_CLASS_MISC, bpf_filter, NULL);
823
824static int
825bpf_filter_modcmd(modcmd_t cmd, void *opaque)
826{
827
828	switch (cmd) {
829	case MODULE_CMD_INIT:
830	case MODULE_CMD_FINI:
831		return 0;
832	default:
833		return ENOTTY;
834	}
835}
836#endif
837