bpf.c revision 123922
1280259Sandrew/*
2280259Sandrew * Copyright (c) 1990, 1991, 1993
3280259Sandrew *	The Regents of the University of California.  All rights reserved.
4280259Sandrew *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
39 *
40 * $FreeBSD: head/sys/net/bpf.c 123922 2003-12-28 03:56:00Z sam $
41 */
42
43#include "opt_bpf.h"
44#include "opt_mac.h"
45#include "opt_netgraph.h"
46
47#include <sys/types.h>
48#include <sys/param.h>
49#include <sys/systm.h>
50#include <sys/conf.h>
51#include <sys/mac.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/time.h>
55#include <sys/proc.h>
56#include <sys/signalvar.h>
57#include <sys/filio.h>
58#include <sys/sockio.h>
59#include <sys/ttycom.h>
60#include <sys/filedesc.h>
61
62#include <sys/event.h>
63#include <sys/file.h>
64#include <sys/poll.h>
65#include <sys/proc.h>
66
67#include <sys/socket.h>
68#include <sys/vnode.h>
69
70#include <net/if.h>
71#include <net/bpf.h>
72#include <net/bpfdesc.h>
73
74#include <netinet/in.h>
75#include <netinet/if_ether.h>
76#include <sys/kernel.h>
77#include <sys/sysctl.h>
78
79static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
80
81#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
82
83#define PRINET  26			/* interruptible */
84
85/*
86 * The default read buffer size is patchable.
87 */
88static int bpf_bufsize = 4096;
89SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
90	&bpf_bufsize, 0, "");
91static int bpf_maxbufsize = BPF_MAXBUFSIZE;
92SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
93	&bpf_maxbufsize, 0, "");
94
95/*
96 *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
97 */
98static struct bpf_if	*bpf_iflist;
99static struct mtx	bpf_mtx;		/* bpf global lock */
100
101static int	bpf_allocbufs(struct bpf_d *);
102static void	bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
103static void	bpf_detachd(struct bpf_d *d);
104static void	bpf_freed(struct bpf_d *);
105static void	bpf_mcopy(const void *, void *, size_t);
106static int	bpf_movein(struct uio *, int,
107		    struct mbuf **, struct sockaddr *, int *);
108static int	bpf_setif(struct bpf_d *, struct ifreq *);
109static void	bpf_timed_out(void *);
110static __inline void
111		bpf_wakeup(struct bpf_d *);
112static void	catchpacket(struct bpf_d *, u_char *, u_int,
113		    u_int, void (*)(const void *, void *, size_t));
114static void	reset_d(struct bpf_d *);
115static int	 bpf_setf(struct bpf_d *, struct bpf_program *);
116static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
117static int	bpf_setdlt(struct bpf_d *, u_int);
118static void	filt_bpfdetach(struct knote *);
119static int	filt_bpfread(struct knote *, long);
120
121static	d_open_t	bpfopen;
122static	d_close_t	bpfclose;
123static	d_read_t	bpfread;
124static	d_write_t	bpfwrite;
125static	d_ioctl_t	bpfioctl;
126static	d_poll_t	bpfpoll;
127static	d_kqfilter_t	bpfkqfilter;
128
129#define CDEV_MAJOR 23
130static struct cdevsw bpf_cdevsw = {
131	.d_open =	bpfopen,
132	.d_close =	bpfclose,
133	.d_read =	bpfread,
134	.d_write =	bpfwrite,
135	.d_ioctl =	bpfioctl,
136	.d_poll =	bpfpoll,
137	.d_name =	"bpf",
138	.d_maj =	CDEV_MAJOR,
139	.d_kqfilter =	bpfkqfilter,
140};
141
142static struct filterops bpfread_filtops =
143	{ 1, NULL, filt_bpfdetach, filt_bpfread };
144
145static int
146bpf_movein(uio, linktype, mp, sockp, datlen)
147	struct uio *uio;
148	int linktype, *datlen;
149	struct mbuf **mp;
150	struct sockaddr *sockp;
151{
152	struct mbuf *m;
153	int error;
154	int len;
155	int hlen;
156
157	/*
158	 * Build a sockaddr based on the data link layer type.
159	 * We do this at this level because the ethernet header
160	 * is copied directly into the data field of the sockaddr.
161	 * In the case of SLIP, there is no header and the packet
162	 * is forwarded as is.
163	 * Also, we are careful to leave room at the front of the mbuf
164	 * for the link level header.
165	 */
166	switch (linktype) {
167
168	case DLT_SLIP:
169		sockp->sa_family = AF_INET;
170		hlen = 0;
171		break;
172
173	case DLT_EN10MB:
174		sockp->sa_family = AF_UNSPEC;
175		/* XXX Would MAXLINKHDR be better? */
176		hlen = ETHER_HDR_LEN;
177		break;
178
179	case DLT_FDDI:
180		sockp->sa_family = AF_IMPLINK;
181		hlen = 0;
182		break;
183
184	case DLT_RAW:
185	case DLT_NULL:
186		sockp->sa_family = AF_UNSPEC;
187		hlen = 0;
188		break;
189
190	case DLT_ATM_RFC1483:
191		/*
192		 * en atm driver requires 4-byte atm pseudo header.
193		 * though it isn't standard, vpi:vci needs to be
194		 * specified anyway.
195		 */
196		sockp->sa_family = AF_UNSPEC;
197		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
198		break;
199
200	case DLT_PPP:
201		sockp->sa_family = AF_UNSPEC;
202		hlen = 4;	/* This should match PPP_HDRLEN */
203		break;
204
205	default:
206		return (EIO);
207	}
208
209	len = uio->uio_resid;
210	*datlen = len - hlen;
211	if ((unsigned)len > MCLBYTES)
212		return (EIO);
213
214	if (len > MHLEN) {
215		m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
216	} else {
217		MGETHDR(m, M_TRYWAIT, MT_DATA);
218	}
219	if (m == NULL)
220		return (ENOBUFS);
221	m->m_pkthdr.len = m->m_len = len;
222	m->m_pkthdr.rcvif = NULL;
223	*mp = m;
224
225	/*
226	 * Make room for link header.
227	 */
228	if (hlen != 0) {
229		m->m_pkthdr.len -= hlen;
230		m->m_len -= hlen;
231#if BSD >= 199103
232		m->m_data += hlen; /* XXX */
233#else
234		m->m_off += hlen;
235#endif
236		error = uiomove(sockp->sa_data, hlen, uio);
237		if (error)
238			goto bad;
239	}
240	error = uiomove(mtod(m, void *), len - hlen, uio);
241	if (!error)
242		return (0);
243bad:
244	m_freem(m);
245	return (error);
246}
247
248/*
249 * Attach file to the bpf interface, i.e. make d listen on bp.
250 */
251static void
252bpf_attachd(d, bp)
253	struct bpf_d *d;
254	struct bpf_if *bp;
255{
256	/*
257	 * Point d at bp, and add d to the interface's list of listeners.
258	 * Finally, point the driver's bpf cookie at the interface so
259	 * it will divert packets to bpf.
260	 */
261	BPFIF_LOCK(bp);
262	d->bd_bif = bp;
263	d->bd_next = bp->bif_dlist;
264	bp->bif_dlist = d;
265
266	*bp->bif_driverp = bp;
267	BPFIF_UNLOCK(bp);
268}
269
270/*
271 * Detach a file from its interface.
272 */
273static void
274bpf_detachd(d)
275	struct bpf_d *d;
276{
277	int error;
278	struct bpf_d **p;
279	struct bpf_if *bp;
280
281	/* XXX locking */
282	bp = d->bd_bif;
283	d->bd_bif = 0;
284	/*
285	 * Check if this descriptor had requested promiscuous mode.
286	 * If so, turn it off.
287	 */
288	if (d->bd_promisc) {
289		d->bd_promisc = 0;
290		error = ifpromisc(bp->bif_ifp, 0);
291		if (error != 0 && error != ENXIO) {
292			/*
293			 * ENXIO can happen if a pccard is unplugged
294			 * Something is really wrong if we were able to put
295			 * the driver into promiscuous mode, but can't
296			 * take it out.
297			 */
298			if_printf(bp->bif_ifp,
299				"bpf_detach: ifpromisc failed (%d)\n", error);
300		}
301	}
302	/* Remove d from the interface's descriptor list. */
303	BPFIF_LOCK(bp);
304	p = &bp->bif_dlist;
305	while (*p != d) {
306		p = &(*p)->bd_next;
307		if (*p == 0)
308			panic("bpf_detachd: descriptor not in list");
309	}
310	*p = (*p)->bd_next;
311	if (bp->bif_dlist == 0)
312		/*
313		 * Let the driver know that there are no more listeners.
314		 */
315		*bp->bif_driverp = 0;
316	BPFIF_UNLOCK(bp);
317}
318
319/*
320 * Open ethernet device.  Returns ENXIO for illegal minor device number,
321 * EBUSY if file is open by another process.
322 */
323/* ARGSUSED */
324static	int
325bpfopen(dev, flags, fmt, td)
326	dev_t dev;
327	int flags;
328	int fmt;
329	struct thread *td;
330{
331	struct bpf_d *d;
332
333	mtx_lock(&bpf_mtx);
334	d = dev->si_drv1;
335	/*
336	 * Each minor can be opened by only one process.  If the requested
337	 * minor is in use, return EBUSY.
338	 */
339	if (d) {
340		mtx_unlock(&bpf_mtx);
341		return (EBUSY);
342	}
343	dev->si_drv1 = (struct bpf_d *)~0;	/* mark device in use */
344	mtx_unlock(&bpf_mtx);
345
346	if ((dev->si_flags & SI_NAMED) == 0)
347		make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
348		    "bpf%d", dev2unit(dev));
349	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
350	dev->si_drv1 = d;
351	d->bd_bufsize = bpf_bufsize;
352	d->bd_sig = SIGIO;
353	d->bd_seesent = 1;
354#ifdef MAC
355	mac_init_bpfdesc(d);
356	mac_create_bpfdesc(td->td_ucred, d);
357#endif
358	mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
359	callout_init(&d->bd_callout, CALLOUT_MPSAFE);
360
361	return (0);
362}
363
364/*
365 * Close the descriptor by detaching it from its interface,
366 * deallocating its buffers, and marking it free.
367 */
368/* ARGSUSED */
369static	int
370bpfclose(dev, flags, fmt, td)
371	dev_t dev;
372	int flags;
373	int fmt;
374	struct thread *td;
375{
376	struct bpf_d *d = dev->si_drv1;
377
378	BPFD_LOCK(d);
379	if (d->bd_state == BPF_WAITING)
380		callout_stop(&d->bd_callout);
381	d->bd_state = BPF_IDLE;
382	BPFD_UNLOCK(d);
383	funsetown(&d->bd_sigio);
384	mtx_lock(&bpf_mtx);
385	if (d->bd_bif)
386		bpf_detachd(d);
387	mtx_unlock(&bpf_mtx);
388#ifdef MAC
389	mac_destroy_bpfdesc(d);
390#endif /* MAC */
391	bpf_freed(d);
392	dev->si_drv1 = 0;
393	free(d, M_BPF);
394
395	return (0);
396}
397
398
399/*
400 * Rotate the packet buffers in descriptor d.  Move the store buffer
401 * into the hold slot, and the free buffer into the store slot.
402 * Zero the length of the new store buffer.
403 */
404#define ROTATE_BUFFERS(d) \
405	(d)->bd_hbuf = (d)->bd_sbuf; \
406	(d)->bd_hlen = (d)->bd_slen; \
407	(d)->bd_sbuf = (d)->bd_fbuf; \
408	(d)->bd_slen = 0; \
409	(d)->bd_fbuf = 0;
410/*
411 *  bpfread - read next chunk of packets from buffers
412 */
413static	int
414bpfread(dev, uio, ioflag)
415	dev_t dev;
416	struct uio *uio;
417	int ioflag;
418{
419	struct bpf_d *d = dev->si_drv1;
420	int timed_out;
421	int error;
422
423	/*
424	 * Restrict application to use a buffer the same size as
425	 * as kernel buffers.
426	 */
427	if (uio->uio_resid != d->bd_bufsize)
428		return (EINVAL);
429
430	BPFD_LOCK(d);
431	if (d->bd_state == BPF_WAITING)
432		callout_stop(&d->bd_callout);
433	timed_out = (d->bd_state == BPF_TIMED_OUT);
434	d->bd_state = BPF_IDLE;
435	/*
436	 * If the hold buffer is empty, then do a timed sleep, which
437	 * ends when the timeout expires or when enough packets
438	 * have arrived to fill the store buffer.
439	 */
440	while (d->bd_hbuf == 0) {
441		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
442			/*
443			 * A packet(s) either arrived since the previous
444			 * read or arrived while we were asleep.
445			 * Rotate the buffers and return what's here.
446			 */
447			ROTATE_BUFFERS(d);
448			break;
449		}
450
451		/*
452		 * No data is available, check to see if the bpf device
453		 * is still pointed at a real interface.  If not, return
454		 * ENXIO so that the userland process knows to rebind
455		 * it before using it again.
456		 */
457		if (d->bd_bif == NULL) {
458			BPFD_UNLOCK(d);
459			return (ENXIO);
460		}
461
462		if (ioflag & IO_NDELAY) {
463			BPFD_UNLOCK(d);
464			return (EWOULDBLOCK);
465		}
466		error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
467		     "bpf", d->bd_rtout);
468		if (error == EINTR || error == ERESTART) {
469			BPFD_UNLOCK(d);
470			return (error);
471		}
472		if (error == EWOULDBLOCK) {
473			/*
474			 * On a timeout, return what's in the buffer,
475			 * which may be nothing.  If there is something
476			 * in the store buffer, we can rotate the buffers.
477			 */
478			if (d->bd_hbuf)
479				/*
480				 * We filled up the buffer in between
481				 * getting the timeout and arriving
482				 * here, so we don't need to rotate.
483				 */
484				break;
485
486			if (d->bd_slen == 0) {
487				BPFD_UNLOCK(d);
488				return (0);
489			}
490			ROTATE_BUFFERS(d);
491			break;
492		}
493	}
494	/*
495	 * At this point, we know we have something in the hold slot.
496	 */
497	BPFD_UNLOCK(d);
498
499	/*
500	 * Move data from hold buffer into user space.
501	 * We know the entire buffer is transferred since
502	 * we checked above that the read buffer is bpf_bufsize bytes.
503	 */
504	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
505
506	BPFD_LOCK(d);
507	d->bd_fbuf = d->bd_hbuf;
508	d->bd_hbuf = 0;
509	d->bd_hlen = 0;
510	BPFD_UNLOCK(d);
511
512	return (error);
513}
514
515
516/*
517 * If there are processes sleeping on this descriptor, wake them up.
518 */
519static __inline void
520bpf_wakeup(d)
521	struct bpf_d *d;
522{
523	if (d->bd_state == BPF_WAITING) {
524		callout_stop(&d->bd_callout);
525		d->bd_state = BPF_IDLE;
526	}
527	wakeup(d);
528	if (d->bd_async && d->bd_sig && d->bd_sigio)
529		pgsigio(&d->bd_sigio, d->bd_sig, 0);
530
531	selwakeuppri(&d->bd_sel, PRINET);
532	KNOTE(&d->bd_sel.si_note, 0);
533}
534
535static void
536bpf_timed_out(arg)
537	void *arg;
538{
539	struct bpf_d *d = (struct bpf_d *)arg;
540
541	BPFD_LOCK(d);
542	if (d->bd_state == BPF_WAITING) {
543		d->bd_state = BPF_TIMED_OUT;
544		if (d->bd_slen != 0)
545			bpf_wakeup(d);
546	}
547	BPFD_UNLOCK(d);
548}
549
550static	int
551bpfwrite(dev, uio, ioflag)
552	dev_t dev;
553	struct uio *uio;
554	int ioflag;
555{
556	struct bpf_d *d = dev->si_drv1;
557	struct ifnet *ifp;
558	struct mbuf *m;
559	int error;
560	static struct sockaddr dst;
561	int datlen;
562
563	if (d->bd_bif == 0)
564		return (ENXIO);
565
566	ifp = d->bd_bif->bif_ifp;
567
568	if (uio->uio_resid == 0)
569		return (0);
570
571	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
572	if (error)
573		return (error);
574
575	if (datlen > ifp->if_mtu)
576		return (EMSGSIZE);
577
578	if (d->bd_hdrcmplt)
579		dst.sa_family = pseudo_AF_HDRCMPLT;
580
581	mtx_lock(&Giant);
582#ifdef MAC
583	mac_create_mbuf_from_bpfdesc(d, m);
584#endif
585	error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
586	mtx_unlock(&Giant);
587	/*
588	 * The driver frees the mbuf.
589	 */
590	return (error);
591}
592
593/*
594 * Reset a descriptor by flushing its packet buffer and clearing the
595 * receive and drop counts.
596 */
597static void
598reset_d(d)
599	struct bpf_d *d;
600{
601
602	mtx_assert(&d->bd_mtx, MA_OWNED);
603	if (d->bd_hbuf) {
604		/* Free the hold buffer. */
605		d->bd_fbuf = d->bd_hbuf;
606		d->bd_hbuf = 0;
607	}
608	d->bd_slen = 0;
609	d->bd_hlen = 0;
610	d->bd_rcount = 0;
611	d->bd_dcount = 0;
612}
613
614/*
615 *  FIONREAD		Check for read packet available.
616 *  SIOCGIFADDR		Get interface address - convenient hook to driver.
617 *  BIOCGBLEN		Get buffer len [for read()].
618 *  BIOCSETF		Set ethernet read filter.
619 *  BIOCFLUSH		Flush read packet buffer.
620 *  BIOCPROMISC		Put interface into promiscuous mode.
621 *  BIOCGDLT		Get link layer type.
622 *  BIOCGETIF		Get interface name.
623 *  BIOCSETIF		Set interface.
624 *  BIOCSRTIMEOUT	Set read timeout.
625 *  BIOCGRTIMEOUT	Get read timeout.
626 *  BIOCGSTATS		Get packet stats.
627 *  BIOCIMMEDIATE	Set immediate mode.
628 *  BIOCVERSION		Get filter language version.
629 *  BIOCGHDRCMPLT	Get "header already complete" flag
630 *  BIOCSHDRCMPLT	Set "header already complete" flag
631 *  BIOCGSEESENT	Get "see packets sent" flag
632 *  BIOCSSEESENT	Set "see packets sent" flag
633 */
634/* ARGSUSED */
635static	int
636bpfioctl(dev, cmd, addr, flags, td)
637	dev_t dev;
638	u_long cmd;
639	caddr_t addr;
640	int flags;
641	struct thread *td;
642{
643	struct bpf_d *d = dev->si_drv1;
644	int error = 0;
645
646	BPFD_LOCK(d);
647	if (d->bd_state == BPF_WAITING)
648		callout_stop(&d->bd_callout);
649	d->bd_state = BPF_IDLE;
650	BPFD_UNLOCK(d);
651
652	switch (cmd) {
653
654	default:
655		error = EINVAL;
656		break;
657
658	/*
659	 * Check for read packet available.
660	 */
661	case FIONREAD:
662		{
663			int n;
664
665			BPFD_LOCK(d);
666			n = d->bd_slen;
667			if (d->bd_hbuf)
668				n += d->bd_hlen;
669			BPFD_UNLOCK(d);
670
671			*(int *)addr = n;
672			break;
673		}
674
675	case SIOCGIFADDR:
676		{
677			struct ifnet *ifp;
678
679			if (d->bd_bif == 0)
680				error = EINVAL;
681			else {
682				ifp = d->bd_bif->bif_ifp;
683				error = (*ifp->if_ioctl)(ifp, cmd, addr);
684			}
685			break;
686		}
687
688	/*
689	 * Get buffer len [for read()].
690	 */
691	case BIOCGBLEN:
692		*(u_int *)addr = d->bd_bufsize;
693		break;
694
695	/*
696	 * Set buffer length.
697	 */
698	case BIOCSBLEN:
699		if (d->bd_bif != 0)
700			error = EINVAL;
701		else {
702			u_int size = *(u_int *)addr;
703
704			if (size > bpf_maxbufsize)
705				*(u_int *)addr = size = bpf_maxbufsize;
706			else if (size < BPF_MINBUFSIZE)
707				*(u_int *)addr = size = BPF_MINBUFSIZE;
708			d->bd_bufsize = size;
709		}
710		break;
711
712	/*
713	 * Set link layer read filter.
714	 */
715	case BIOCSETF:
716		error = bpf_setf(d, (struct bpf_program *)addr);
717		break;
718
719	/*
720	 * Flush read packet buffer.
721	 */
722	case BIOCFLUSH:
723		BPFD_LOCK(d);
724		reset_d(d);
725		BPFD_UNLOCK(d);
726		break;
727
728	/*
729	 * Put interface into promiscuous mode.
730	 */
731	case BIOCPROMISC:
732		if (d->bd_bif == 0) {
733			/*
734			 * No interface attached yet.
735			 */
736			error = EINVAL;
737			break;
738		}
739		if (d->bd_promisc == 0) {
740			mtx_lock(&Giant);
741			error = ifpromisc(d->bd_bif->bif_ifp, 1);
742			mtx_unlock(&Giant);
743			if (error == 0)
744				d->bd_promisc = 1;
745		}
746		break;
747
748	/*
749	 * Get current data link type.
750	 */
751	case BIOCGDLT:
752		if (d->bd_bif == 0)
753			error = EINVAL;
754		else
755			*(u_int *)addr = d->bd_bif->bif_dlt;
756		break;
757
758	/*
759	 * Get a list of supported data link types.
760	 */
761	case BIOCGDLTLIST:
762		if (d->bd_bif == 0)
763			error = EINVAL;
764		else
765			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
766		break;
767
768	/*
769	 * Set data link type.
770	 */
771	case BIOCSDLT:
772		if (d->bd_bif == 0)
773			error = EINVAL;
774		else
775			error = bpf_setdlt(d, *(u_int *)addr);
776		break;
777
778	/*
779	 * Get interface name.
780	 */
781	case BIOCGETIF:
782		if (d->bd_bif == 0)
783			error = EINVAL;
784		else {
785			struct ifnet *const ifp = d->bd_bif->bif_ifp;
786			struct ifreq *const ifr = (struct ifreq *)addr;
787
788			strlcpy(ifr->ifr_name, ifp->if_xname,
789			    sizeof(ifr->ifr_name));
790		}
791		break;
792
793	/*
794	 * Set interface.
795	 */
796	case BIOCSETIF:
797		error = bpf_setif(d, (struct ifreq *)addr);
798		break;
799
800	/*
801	 * Set read timeout.
802	 */
803	case BIOCSRTIMEOUT:
804		{
805			struct timeval *tv = (struct timeval *)addr;
806
807			/*
808			 * Subtract 1 tick from tvtohz() since this isn't
809			 * a one-shot timer.
810			 */
811			if ((error = itimerfix(tv)) == 0)
812				d->bd_rtout = tvtohz(tv) - 1;
813			break;
814		}
815
816	/*
817	 * Get read timeout.
818	 */
819	case BIOCGRTIMEOUT:
820		{
821			struct timeval *tv = (struct timeval *)addr;
822
823			tv->tv_sec = d->bd_rtout / hz;
824			tv->tv_usec = (d->bd_rtout % hz) * tick;
825			break;
826		}
827
828	/*
829	 * Get packet stats.
830	 */
831	case BIOCGSTATS:
832		{
833			struct bpf_stat *bs = (struct bpf_stat *)addr;
834
835			bs->bs_recv = d->bd_rcount;
836			bs->bs_drop = d->bd_dcount;
837			break;
838		}
839
840	/*
841	 * Set immediate mode.
842	 */
843	case BIOCIMMEDIATE:
844		d->bd_immediate = *(u_int *)addr;
845		break;
846
847	case BIOCVERSION:
848		{
849			struct bpf_version *bv = (struct bpf_version *)addr;
850
851			bv->bv_major = BPF_MAJOR_VERSION;
852			bv->bv_minor = BPF_MINOR_VERSION;
853			break;
854		}
855
856	/*
857	 * Get "header already complete" flag
858	 */
859	case BIOCGHDRCMPLT:
860		*(u_int *)addr = d->bd_hdrcmplt;
861		break;
862
863	/*
864	 * Set "header already complete" flag
865	 */
866	case BIOCSHDRCMPLT:
867		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
868		break;
869
870	/*
871	 * Get "see sent packets" flag
872	 */
873	case BIOCGSEESENT:
874		*(u_int *)addr = d->bd_seesent;
875		break;
876
877	/*
878	 * Set "see sent packets" flag
879	 */
880	case BIOCSSEESENT:
881		d->bd_seesent = *(u_int *)addr;
882		break;
883
884	case FIONBIO:		/* Non-blocking I/O */
885		break;
886
887	case FIOASYNC:		/* Send signal on receive packets */
888		d->bd_async = *(int *)addr;
889		break;
890
891	case FIOSETOWN:
892		error = fsetown(*(int *)addr, &d->bd_sigio);
893		break;
894
895	case FIOGETOWN:
896		*(int *)addr = fgetown(&d->bd_sigio);
897		break;
898
899	/* This is deprecated, FIOSETOWN should be used instead. */
900	case TIOCSPGRP:
901		error = fsetown(-(*(int *)addr), &d->bd_sigio);
902		break;
903
904	/* This is deprecated, FIOGETOWN should be used instead. */
905	case TIOCGPGRP:
906		*(int *)addr = -fgetown(&d->bd_sigio);
907		break;
908
909	case BIOCSRSIG:		/* Set receive signal */
910		{
911			u_int sig;
912
913			sig = *(u_int *)addr;
914
915			if (sig >= NSIG)
916				error = EINVAL;
917			else
918				d->bd_sig = sig;
919			break;
920		}
921	case BIOCGRSIG:
922		*(u_int *)addr = d->bd_sig;
923		break;
924	}
925	return (error);
926}
927
928/*
929 * Set d's packet filter program to fp.  If this file already has a filter,
930 * free it and replace it.  Returns EINVAL for bogus requests.
931 */
932static int
933bpf_setf(d, fp)
934	struct bpf_d *d;
935	struct bpf_program *fp;
936{
937	struct bpf_insn *fcode, *old;
938	u_int flen, size;
939
940	old = d->bd_filter;
941	if (fp->bf_insns == 0) {
942		if (fp->bf_len != 0)
943			return (EINVAL);
944		BPFD_LOCK(d);
945		d->bd_filter = 0;
946		reset_d(d);
947		BPFD_UNLOCK(d);
948		if (old != 0)
949			free((caddr_t)old, M_BPF);
950		return (0);
951	}
952	flen = fp->bf_len;
953	if (flen > BPF_MAXINSNS)
954		return (EINVAL);
955
956	size = flen * sizeof(*fp->bf_insns);
957	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
958	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
959	    bpf_validate(fcode, (int)flen)) {
960		BPFD_LOCK(d);
961		d->bd_filter = fcode;
962		reset_d(d);
963		BPFD_UNLOCK(d);
964		if (old != 0)
965			free((caddr_t)old, M_BPF);
966
967		return (0);
968	}
969	free((caddr_t)fcode, M_BPF);
970	return (EINVAL);
971}
972
973/*
974 * Detach a file from its current interface (if attached at all) and attach
975 * to the interface indicated by the name stored in ifr.
976 * Return an errno or 0.
977 */
978static int
979bpf_setif(d, ifr)
980	struct bpf_d *d;
981	struct ifreq *ifr;
982{
983	struct bpf_if *bp;
984	int error;
985	struct ifnet *theywant;
986
987	theywant = ifunit(ifr->ifr_name);
988	if (theywant == 0)
989		return ENXIO;
990
991	/*
992	 * Look through attached interfaces for the named one.
993	 */
994	mtx_lock(&bpf_mtx);
995	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
996		struct ifnet *ifp = bp->bif_ifp;
997
998		if (ifp == 0 || ifp != theywant)
999			continue;
1000		/* skip additional entry */
1001		if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf)
1002			continue;
1003
1004		mtx_unlock(&bpf_mtx);
1005		/*
1006		 * We found the requested interface.
1007		 * If it's not up, return an error.
1008		 * Allocate the packet buffers if we need to.
1009		 * If we're already attached to requested interface,
1010		 * just flush the buffer.
1011		 */
1012		if ((ifp->if_flags & IFF_UP) == 0)
1013			return (ENETDOWN);
1014
1015		if (d->bd_sbuf == 0) {
1016			error = bpf_allocbufs(d);
1017			if (error != 0)
1018				return (error);
1019		}
1020		if (bp != d->bd_bif) {
1021			if (d->bd_bif)
1022				/*
1023				 * Detach if attached to something else.
1024				 */
1025				bpf_detachd(d);
1026
1027			bpf_attachd(d, bp);
1028		}
1029		BPFD_LOCK(d);
1030		reset_d(d);
1031		BPFD_UNLOCK(d);
1032		return (0);
1033	}
1034	mtx_unlock(&bpf_mtx);
1035	/* Not found. */
1036	return (ENXIO);
1037}
1038
1039/*
1040 * Support for select() and poll() system calls
1041 *
1042 * Return true iff the specific operation will not block indefinitely.
1043 * Otherwise, return false but make a note that a selwakeup() must be done.
1044 */
1045static int
1046bpfpoll(dev, events, td)
1047	dev_t dev;
1048	int events;
1049	struct thread *td;
1050{
1051	struct bpf_d *d;
1052	int revents;
1053
1054	d = dev->si_drv1;
1055	if (d->bd_bif == NULL)
1056		return (ENXIO);
1057
1058	revents = events & (POLLOUT | POLLWRNORM);
1059	BPFD_LOCK(d);
1060	if (events & (POLLIN | POLLRDNORM)) {
1061		if (bpf_ready(d))
1062			revents |= events & (POLLIN | POLLRDNORM);
1063		else {
1064			selrecord(td, &d->bd_sel);
1065			/* Start the read timeout if necessary. */
1066			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1067				callout_reset(&d->bd_callout, d->bd_rtout,
1068				    bpf_timed_out, d);
1069				d->bd_state = BPF_WAITING;
1070			}
1071		}
1072	}
1073	BPFD_UNLOCK(d);
1074	return (revents);
1075}
1076
1077/*
1078 * Support for kevent() system call.  Register EVFILT_READ filters and
1079 * reject all others.
1080 */
1081int
1082bpfkqfilter(dev, kn)
1083	dev_t dev;
1084	struct knote *kn;
1085{
1086	struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1087
1088	if (kn->kn_filter != EVFILT_READ)
1089		return (1);
1090
1091	kn->kn_fop = &bpfread_filtops;
1092	kn->kn_hook = d;
1093	BPFD_LOCK(d);
1094	SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
1095	BPFD_UNLOCK(d);
1096
1097	return (0);
1098}
1099
1100static void
1101filt_bpfdetach(kn)
1102	struct knote *kn;
1103{
1104	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1105
1106	BPFD_LOCK(d);
1107	SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
1108	BPFD_UNLOCK(d);
1109}
1110
1111static int
1112filt_bpfread(kn, hint)
1113	struct knote *kn;
1114	long hint;
1115{
1116	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1117	int ready;
1118
1119	BPFD_LOCK(d);
1120	ready = bpf_ready(d);
1121	if (ready) {
1122		kn->kn_data = d->bd_slen;
1123		if (d->bd_hbuf)
1124			kn->kn_data += d->bd_hlen;
1125	}
1126	else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1127		callout_reset(&d->bd_callout, d->bd_rtout,
1128		    bpf_timed_out, d);
1129		d->bd_state = BPF_WAITING;
1130	}
1131	BPFD_UNLOCK(d);
1132
1133	return (ready);
1134}
1135
1136/*
1137 * Incoming linkage from device drivers.  Process the packet pkt, of length
1138 * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1139 * by each process' filter, and if accepted, stashed into the corresponding
1140 * buffer.
1141 */
1142void
1143bpf_tap(bp, pkt, pktlen)
1144	struct bpf_if *bp;
1145	u_char *pkt;
1146	u_int pktlen;
1147{
1148	struct bpf_d *d;
1149	u_int slen;
1150
1151	BPFIF_LOCK(bp);
1152	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1153		BPFD_LOCK(d);
1154		++d->bd_rcount;
1155		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1156		if (slen != 0) {
1157#ifdef MAC
1158			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1159#endif
1160				catchpacket(d, pkt, pktlen, slen, bcopy);
1161		}
1162		BPFD_UNLOCK(d);
1163	}
1164	BPFIF_UNLOCK(bp);
1165}
1166
1167/*
1168 * Copy data from an mbuf chain into a buffer.  This code is derived
1169 * from m_copydata in sys/uipc_mbuf.c.
1170 */
1171static void
1172bpf_mcopy(src_arg, dst_arg, len)
1173	const void *src_arg;
1174	void *dst_arg;
1175	size_t len;
1176{
1177	const struct mbuf *m;
1178	u_int count;
1179	u_char *dst;
1180
1181	m = src_arg;
1182	dst = dst_arg;
1183	while (len > 0) {
1184		if (m == 0)
1185			panic("bpf_mcopy");
1186		count = min(m->m_len, len);
1187		bcopy(mtod(m, void *), dst, count);
1188		m = m->m_next;
1189		dst += count;
1190		len -= count;
1191	}
1192}
1193
1194/*
1195 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1196 */
1197void
1198bpf_mtap(bp, m)
1199	struct bpf_if *bp;
1200	struct mbuf *m;
1201{
1202	struct bpf_d *d;
1203	u_int pktlen, slen;
1204
1205	pktlen = m_length(m, NULL);
1206	if (pktlen == m->m_len) {
1207		bpf_tap(bp, mtod(m, u_char *), pktlen);
1208		return;
1209	}
1210
1211	BPFIF_LOCK(bp);
1212	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1213		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1214			continue;
1215		BPFD_LOCK(d);
1216		++d->bd_rcount;
1217		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1218		if (slen != 0)
1219#ifdef MAC
1220			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1221#endif
1222				catchpacket(d, (u_char *)m, pktlen, slen,
1223				    bpf_mcopy);
1224		BPFD_UNLOCK(d);
1225	}
1226	BPFIF_UNLOCK(bp);
1227}
1228
1229/*
1230 * Incoming linkage from device drivers, when packet is in
1231 * an mbuf chain and to be prepended by a contiguous header.
1232 */
1233void
1234bpf_mtap2(bp, data, dlen, m)
1235	struct bpf_if *bp;
1236	void *data;
1237	u_int dlen;
1238	struct mbuf *m;
1239{
1240	struct mbuf mb;
1241	struct bpf_d *d;
1242	u_int pktlen, slen;
1243
1244	pktlen = m_length(m, NULL);
1245	/*
1246	 * Craft on-stack mbuf suitable for passing to bpf_filter.
1247	 * Note that we cut corners here; we only setup what's
1248	 * absolutely needed--this mbuf should never go anywhere else.
1249	 */
1250	mb.m_next = m;
1251	mb.m_data = data;
1252	mb.m_len = dlen;
1253	pktlen += dlen;
1254
1255	BPFIF_LOCK(bp);
1256	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1257		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1258			continue;
1259		BPFD_LOCK(d);
1260		++d->bd_rcount;
1261		slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0);
1262		if (slen != 0)
1263#ifdef MAC
1264			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1265#endif
1266				catchpacket(d, (u_char *)&mb, pktlen, slen,
1267				    bpf_mcopy);
1268		BPFD_UNLOCK(d);
1269	}
1270	BPFIF_UNLOCK(bp);
1271}
1272
1273/*
1274 * Move the packet data from interface memory (pkt) into the
1275 * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
1276 * otherwise 0.  "copy" is the routine called to do the actual data
1277 * transfer.  bcopy is passed in to copy contiguous chunks, while
1278 * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1279 * pkt is really an mbuf.
1280 */
1281static void
1282catchpacket(d, pkt, pktlen, snaplen, cpfn)
1283	struct bpf_d *d;
1284	u_char *pkt;
1285	u_int pktlen, snaplen;
1286	void (*cpfn)(const void *, void *, size_t);
1287{
1288	struct bpf_hdr *hp;
1289	int totlen, curlen;
1290	int hdrlen = d->bd_bif->bif_hdrlen;
1291	/*
1292	 * Figure out how many bytes to move.  If the packet is
1293	 * greater or equal to the snapshot length, transfer that
1294	 * much.  Otherwise, transfer the whole packet (unless
1295	 * we hit the buffer size limit).
1296	 */
1297	totlen = hdrlen + min(snaplen, pktlen);
1298	if (totlen > d->bd_bufsize)
1299		totlen = d->bd_bufsize;
1300
1301	/*
1302	 * Round up the end of the previous packet to the next longword.
1303	 */
1304	curlen = BPF_WORDALIGN(d->bd_slen);
1305	if (curlen + totlen > d->bd_bufsize) {
1306		/*
1307		 * This packet will overflow the storage buffer.
1308		 * Rotate the buffers if we can, then wakeup any
1309		 * pending reads.
1310		 */
1311		if (d->bd_fbuf == 0) {
1312			/*
1313			 * We haven't completed the previous read yet,
1314			 * so drop the packet.
1315			 */
1316			++d->bd_dcount;
1317			return;
1318		}
1319		ROTATE_BUFFERS(d);
1320		bpf_wakeup(d);
1321		curlen = 0;
1322	}
1323	else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1324		/*
1325		 * Immediate mode is set, or the read timeout has
1326		 * already expired during a select call.  A packet
1327		 * arrived, so the reader should be woken up.
1328		 */
1329		bpf_wakeup(d);
1330
1331	/*
1332	 * Append the bpf header.
1333	 */
1334	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1335	microtime(&hp->bh_tstamp);
1336	hp->bh_datalen = pktlen;
1337	hp->bh_hdrlen = hdrlen;
1338	/*
1339	 * Copy the packet data into the store buffer and update its length.
1340	 */
1341	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1342	d->bd_slen = curlen + totlen;
1343}
1344
1345/*
1346 * Initialize all nonzero fields of a descriptor.
1347 */
1348static int
1349bpf_allocbufs(d)
1350	struct bpf_d *d;
1351{
1352	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1353	if (d->bd_fbuf == 0)
1354		return (ENOBUFS);
1355
1356	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1357	if (d->bd_sbuf == 0) {
1358		free(d->bd_fbuf, M_BPF);
1359		return (ENOBUFS);
1360	}
1361	d->bd_slen = 0;
1362	d->bd_hlen = 0;
1363	return (0);
1364}
1365
1366/*
1367 * Free buffers currently in use by a descriptor.
1368 * Called on close.
1369 */
1370static void
1371bpf_freed(d)
1372	struct bpf_d *d;
1373{
1374	/*
1375	 * We don't need to lock out interrupts since this descriptor has
1376	 * been detached from its interface and it yet hasn't been marked
1377	 * free.
1378	 */
1379	if (d->bd_sbuf != 0) {
1380		free(d->bd_sbuf, M_BPF);
1381		if (d->bd_hbuf != 0)
1382			free(d->bd_hbuf, M_BPF);
1383		if (d->bd_fbuf != 0)
1384			free(d->bd_fbuf, M_BPF);
1385	}
1386	if (d->bd_filter)
1387		free((caddr_t)d->bd_filter, M_BPF);
1388	mtx_destroy(&d->bd_mtx);
1389}
1390
1391/*
1392 * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
1393 * fixed size of the link header (variable length headers not yet supported).
1394 */
1395void
1396bpfattach(ifp, dlt, hdrlen)
1397	struct ifnet *ifp;
1398	u_int dlt, hdrlen;
1399{
1400
1401	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1402}
1403
1404/*
1405 * Attach an interface to bpf.  ifp is a pointer to the structure
1406 * defining the interface to be attached, dlt is the link layer type,
1407 * and hdrlen is the fixed size of the link header (variable length
1408 * headers are not yet supporrted).
1409 */
1410void
1411bpfattach2(ifp, dlt, hdrlen, driverp)
1412	struct ifnet *ifp;
1413	u_int dlt, hdrlen;
1414	struct bpf_if **driverp;
1415{
1416	struct bpf_if *bp;
1417	bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1418	if (bp == 0)
1419		panic("bpfattach");
1420
1421	bp->bif_dlist = 0;
1422	bp->bif_driverp = driverp;
1423	bp->bif_ifp = ifp;
1424	bp->bif_dlt = dlt;
1425	mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1426
1427	mtx_lock(&bpf_mtx);
1428	bp->bif_next = bpf_iflist;
1429	bpf_iflist = bp;
1430	mtx_unlock(&bpf_mtx);
1431
1432	*bp->bif_driverp = 0;
1433
1434	/*
1435	 * Compute the length of the bpf header.  This is not necessarily
1436	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1437	 * that the network layer header begins on a longword boundary (for
1438	 * performance reasons and to alleviate alignment restrictions).
1439	 */
1440	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1441
1442	if (bootverbose)
1443		if_printf(ifp, "bpf attached\n");
1444}
1445
1446/*
1447 * Detach bpf from an interface.  This involves detaching each descriptor
1448 * associated with the interface, and leaving bd_bif NULL.  Notify each
1449 * descriptor as it's detached so that any sleepers wake up and get
1450 * ENXIO.
1451 */
1452void
1453bpfdetach(ifp)
1454	struct ifnet *ifp;
1455{
1456	struct bpf_if	*bp, *bp_prev;
1457	struct bpf_d	*d;
1458
1459	/* Locate BPF interface information */
1460	bp_prev = NULL;
1461
1462	mtx_lock(&bpf_mtx);
1463	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1464		if (ifp == bp->bif_ifp)
1465			break;
1466		bp_prev = bp;
1467	}
1468
1469	/* Interface wasn't attached */
1470	if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1471		mtx_unlock(&bpf_mtx);
1472		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1473		return;
1474	}
1475
1476	if (bp_prev) {
1477		bp_prev->bif_next = bp->bif_next;
1478	} else {
1479		bpf_iflist = bp->bif_next;
1480	}
1481	mtx_unlock(&bpf_mtx);
1482
1483	while ((d = bp->bif_dlist) != NULL) {
1484		bpf_detachd(d);
1485		BPFD_LOCK(d);
1486		bpf_wakeup(d);
1487		BPFD_UNLOCK(d);
1488	}
1489
1490	mtx_destroy(&bp->bif_mtx);
1491	free(bp, M_BPF);
1492}
1493
1494/*
1495 * Get a list of available data link type of the interface.
1496 */
1497static int
1498bpf_getdltlist(d, bfl)
1499	struct bpf_d *d;
1500	struct bpf_dltlist *bfl;
1501{
1502	int n, error;
1503	struct ifnet *ifp;
1504	struct bpf_if *bp;
1505
1506	ifp = d->bd_bif->bif_ifp;
1507	n = 0;
1508	error = 0;
1509	mtx_lock(&bpf_mtx);
1510	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1511		if (bp->bif_ifp != ifp)
1512			continue;
1513		if (bfl->bfl_list != NULL) {
1514			if (n >= bfl->bfl_len) {
1515				mtx_unlock(&bpf_mtx);
1516				return (ENOMEM);
1517			}
1518			error = copyout(&bp->bif_dlt,
1519			    bfl->bfl_list + n, sizeof(u_int));
1520		}
1521		n++;
1522	}
1523	mtx_unlock(&bpf_mtx);
1524	bfl->bfl_len = n;
1525	return (error);
1526}
1527
1528/*
1529 * Set the data link type of a BPF instance.
1530 */
1531static int
1532bpf_setdlt(d, dlt)
1533	struct bpf_d *d;
1534	u_int dlt;
1535{
1536	int error, opromisc;
1537	struct ifnet *ifp;
1538	struct bpf_if *bp;
1539
1540	if (d->bd_bif->bif_dlt == dlt)
1541		return (0);
1542	ifp = d->bd_bif->bif_ifp;
1543	mtx_lock(&bpf_mtx);
1544	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1545		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1546			break;
1547	}
1548	mtx_unlock(&bpf_mtx);
1549	if (bp != NULL) {
1550		BPFD_LOCK(d);
1551		opromisc = d->bd_promisc;
1552		bpf_detachd(d);
1553		bpf_attachd(d, bp);
1554		reset_d(d);
1555		BPFD_UNLOCK(d);
1556		if (opromisc) {
1557			error = ifpromisc(bp->bif_ifp, 1);
1558			if (error)
1559				if_printf(bp->bif_ifp,
1560					"bpf_setdlt: ifpromisc failed (%d)\n",
1561					error);
1562			else
1563				d->bd_promisc = 1;
1564		}
1565	}
1566	return (bp == NULL ? EINVAL : 0);
1567}
1568
1569static void bpf_drvinit(void *unused);
1570
1571static void bpf_clone(void *arg, char *name, int namelen, dev_t *dev);
1572
1573static void
1574bpf_clone(arg, name, namelen, dev)
1575	void *arg;
1576	char *name;
1577	int namelen;
1578	dev_t *dev;
1579{
1580	int u;
1581
1582	if (*dev != NODEV)
1583		return;
1584	if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1585		return;
1586	*dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1587	    "bpf%d", u);
1588	(*dev)->si_flags |= SI_CHEAPCLONE;
1589	return;
1590}
1591
1592static void
1593bpf_drvinit(unused)
1594	void *unused;
1595{
1596
1597	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1598	EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1599}
1600
1601SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1602
1603#else /* !DEV_BPF && !NETGRAPH_BPF */
1604/*
1605 * NOP stubs to allow bpf-using drivers to load and function.
1606 *
1607 * A 'better' implementation would allow the core bpf functionality
1608 * to be loaded at runtime.
1609 */
1610
1611void
1612bpf_tap(bp, pkt, pktlen)
1613	struct bpf_if *bp;
1614	u_char *pkt;
1615	u_int pktlen;
1616{
1617}
1618
1619void
1620bpf_mtap(bp, m)
1621	struct bpf_if *bp;
1622	struct mbuf *m;
1623{
1624}
1625
1626void
1627bpf_mtap2(bp, d, l, m)
1628	struct bpf_if *bp;
1629	const void *d;
1630	u_int l;
1631	struct mbuf *m;
1632{
1633}
1634
1635void
1636bpfattach(ifp, dlt, hdrlen)
1637	struct ifnet *ifp;
1638	u_int dlt, hdrlen;
1639{
1640}
1641
1642void
1643bpfattach2(ifp, dlt, hdrlen, driverp)
1644	struct ifnet *ifp;
1645	u_int dlt, hdrlen;
1646	struct bpf_if **driverp;
1647{
1648}
1649
1650void
1651bpfdetach(ifp)
1652	struct ifnet *ifp;
1653{
1654}
1655
1656u_int
1657bpf_filter(pc, p, wirelen, buflen)
1658	const struct bpf_insn *pc;
1659	u_char *p;
1660	u_int wirelen;
1661	u_int buflen;
1662{
1663	return -1;	/* "no filter" behaviour */
1664}
1665
1666int
1667bpf_validate(f, len)
1668	const struct bpf_insn *f;
1669	int len;
1670{
1671	return 0;		/* false */
1672}
1673
1674#endif /* !DEV_BPF && !NETGRAPH_BPF */
1675