bpf.c revision 134967
1/*
2 * Copyright (c) 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
35 *
36 * $FreeBSD: head/sys/net/bpf.c 134967 2004-09-09 00:19:27Z rwatson $
37 */
38
39#include "opt_bpf.h"
40#include "opt_mac.h"
41#include "opt_netgraph.h"
42
43#include <sys/types.h>
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/conf.h>
47#include <sys/mac.h>
48#include <sys/malloc.h>
49#include <sys/mbuf.h>
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <sys/signalvar.h>
53#include <sys/filio.h>
54#include <sys/sockio.h>
55#include <sys/ttycom.h>
56#include <sys/filedesc.h>
57
58#include <sys/event.h>
59#include <sys/file.h>
60#include <sys/poll.h>
61#include <sys/proc.h>
62
63#include <sys/socket.h>
64#include <sys/vnode.h>
65
66#include <net/if.h>
67#include <net/bpf.h>
68#include <net/bpfdesc.h>
69
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <sys/kernel.h>
73#include <sys/sysctl.h>
74
75static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
76
77#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
78
79#define PRINET  26			/* interruptible */
80
81/*
82 * The default read buffer size is patchable.
83 */
84static int bpf_bufsize = 4096;
85SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
86	&bpf_bufsize, 0, "");
87static int bpf_maxbufsize = BPF_MAXBUFSIZE;
88SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
89	&bpf_maxbufsize, 0, "");
90
91/*
92 *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
93 */
94static LIST_HEAD(, bpf_if)	bpf_iflist;
95static struct mtx	bpf_mtx;		/* bpf global lock */
96
97static int	bpf_allocbufs(struct bpf_d *);
98static void	bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
99static void	bpf_detachd(struct bpf_d *d);
100static void	bpf_freed(struct bpf_d *);
101static void	bpf_mcopy(const void *, void *, size_t);
102static int	bpf_movein(struct uio *, int,
103		    struct mbuf **, struct sockaddr *, int *);
104static int	bpf_setif(struct bpf_d *, struct ifreq *);
105static void	bpf_timed_out(void *);
106static __inline void
107		bpf_wakeup(struct bpf_d *);
108static void	catchpacket(struct bpf_d *, u_char *, u_int,
109		    u_int, void (*)(const void *, void *, size_t));
110static void	reset_d(struct bpf_d *);
111static int	 bpf_setf(struct bpf_d *, struct bpf_program *);
112static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
113static int	bpf_setdlt(struct bpf_d *, u_int);
114static void	filt_bpfdetach(struct knote *);
115static int	filt_bpfread(struct knote *, long);
116
117static	d_open_t	bpfopen;
118static	d_close_t	bpfclose;
119static	d_read_t	bpfread;
120static	d_write_t	bpfwrite;
121static	d_ioctl_t	bpfioctl;
122static	d_poll_t	bpfpoll;
123static	d_kqfilter_t	bpfkqfilter;
124
125static struct cdevsw bpf_cdevsw = {
126	.d_version =	D_VERSION,
127	.d_flags =	D_NEEDGIANT,
128	.d_open =	bpfopen,
129	.d_close =	bpfclose,
130	.d_read =	bpfread,
131	.d_write =	bpfwrite,
132	.d_ioctl =	bpfioctl,
133	.d_poll =	bpfpoll,
134	.d_name =	"bpf",
135	.d_kqfilter =	bpfkqfilter,
136};
137
138static struct filterops bpfread_filtops =
139	{ 1, NULL, filt_bpfdetach, filt_bpfread };
140
141static int
142bpf_movein(uio, linktype, mp, sockp, datlen)
143	struct uio *uio;
144	int linktype, *datlen;
145	struct mbuf **mp;
146	struct sockaddr *sockp;
147{
148	struct mbuf *m;
149	int error;
150	int len;
151	int hlen;
152
153	/*
154	 * Build a sockaddr based on the data link layer type.
155	 * We do this at this level because the ethernet header
156	 * is copied directly into the data field of the sockaddr.
157	 * In the case of SLIP, there is no header and the packet
158	 * is forwarded as is.
159	 * Also, we are careful to leave room at the front of the mbuf
160	 * for the link level header.
161	 */
162	switch (linktype) {
163
164	case DLT_SLIP:
165		sockp->sa_family = AF_INET;
166		hlen = 0;
167		break;
168
169	case DLT_EN10MB:
170		sockp->sa_family = AF_UNSPEC;
171		/* XXX Would MAXLINKHDR be better? */
172		hlen = ETHER_HDR_LEN;
173		break;
174
175	case DLT_FDDI:
176		sockp->sa_family = AF_IMPLINK;
177		hlen = 0;
178		break;
179
180	case DLT_RAW:
181	case DLT_NULL:
182		sockp->sa_family = AF_UNSPEC;
183		hlen = 0;
184		break;
185
186	case DLT_ATM_RFC1483:
187		/*
188		 * en atm driver requires 4-byte atm pseudo header.
189		 * though it isn't standard, vpi:vci needs to be
190		 * specified anyway.
191		 */
192		sockp->sa_family = AF_UNSPEC;
193		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
194		break;
195
196	case DLT_PPP:
197		sockp->sa_family = AF_UNSPEC;
198		hlen = 4;	/* This should match PPP_HDRLEN */
199		break;
200
201	default:
202		return (EIO);
203	}
204
205	len = uio->uio_resid;
206	*datlen = len - hlen;
207	if ((unsigned)len > MCLBYTES)
208		return (EIO);
209
210	if (len > MHLEN) {
211		m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
212	} else {
213		MGETHDR(m, M_TRYWAIT, MT_DATA);
214	}
215	if (m == NULL)
216		return (ENOBUFS);
217	m->m_pkthdr.len = m->m_len = len;
218	m->m_pkthdr.rcvif = NULL;
219	*mp = m;
220
221	/*
222	 * Make room for link header.
223	 */
224	if (hlen != 0) {
225		m->m_pkthdr.len -= hlen;
226		m->m_len -= hlen;
227#if BSD >= 199103
228		m->m_data += hlen; /* XXX */
229#else
230		m->m_off += hlen;
231#endif
232		error = uiomove(sockp->sa_data, hlen, uio);
233		if (error)
234			goto bad;
235	}
236	error = uiomove(mtod(m, void *), len - hlen, uio);
237	if (!error)
238		return (0);
239bad:
240	m_freem(m);
241	return (error);
242}
243
244/*
245 * Attach file to the bpf interface, i.e. make d listen on bp.
246 */
247static void
248bpf_attachd(d, bp)
249	struct bpf_d *d;
250	struct bpf_if *bp;
251{
252	/*
253	 * Point d at bp, and add d to the interface's list of listeners.
254	 * Finally, point the driver's bpf cookie at the interface so
255	 * it will divert packets to bpf.
256	 */
257	BPFIF_LOCK(bp);
258	d->bd_bif = bp;
259	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
260
261	*bp->bif_driverp = bp;
262	BPFIF_UNLOCK(bp);
263}
264
265/*
266 * Detach a file from its interface.
267 */
268static void
269bpf_detachd(d)
270	struct bpf_d *d;
271{
272	int error;
273	struct bpf_if *bp;
274
275	/* XXX locking */
276	bp = d->bd_bif;
277	d->bd_bif = NULL;
278	/*
279	 * Check if this descriptor had requested promiscuous mode.
280	 * If so, turn it off.
281	 */
282	if (d->bd_promisc) {
283		d->bd_promisc = 0;
284		error = ifpromisc(bp->bif_ifp, 0);
285		if (error != 0 && error != ENXIO) {
286			/*
287			 * ENXIO can happen if a pccard is unplugged
288			 * Something is really wrong if we were able to put
289			 * the driver into promiscuous mode, but can't
290			 * take it out.
291			 */
292			if_printf(bp->bif_ifp,
293				"bpf_detach: ifpromisc failed (%d)\n", error);
294		}
295	}
296	/* Remove d from the interface's descriptor list. */
297	BPFIF_LOCK(bp);
298	LIST_REMOVE(d, bd_next);
299	if (LIST_EMPTY(&bp->bif_dlist))
300		/*
301		 * Let the driver know that there are no more listeners.
302		 */
303		*bp->bif_driverp = NULL;
304	BPFIF_UNLOCK(bp);
305}
306
307/*
308 * Open ethernet device.  Returns ENXIO for illegal minor device number,
309 * EBUSY if file is open by another process.
310 */
311/* ARGSUSED */
312static	int
313bpfopen(dev, flags, fmt, td)
314	struct cdev *dev;
315	int flags;
316	int fmt;
317	struct thread *td;
318{
319	struct bpf_d *d;
320
321	mtx_lock(&bpf_mtx);
322	d = dev->si_drv1;
323	/*
324	 * Each minor can be opened by only one process.  If the requested
325	 * minor is in use, return EBUSY.
326	 */
327	if (d != NULL) {
328		mtx_unlock(&bpf_mtx);
329		return (EBUSY);
330	}
331	dev->si_drv1 = (struct bpf_d *)~0;	/* mark device in use */
332	mtx_unlock(&bpf_mtx);
333
334	if ((dev->si_flags & SI_NAMED) == 0)
335		make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
336		    "bpf%d", dev2unit(dev));
337	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
338	dev->si_drv1 = d;
339	d->bd_bufsize = bpf_bufsize;
340	d->bd_sig = SIGIO;
341	d->bd_seesent = 1;
342#ifdef MAC
343	mac_init_bpfdesc(d);
344	mac_create_bpfdesc(td->td_ucred, d);
345#endif
346	mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
347	callout_init(&d->bd_callout, debug_mpsafenet ? CALLOUT_MPSAFE : 0);
348	knlist_init(&d->bd_sel.si_note, &d->bd_mtx);
349
350	return (0);
351}
352
353/*
354 * Close the descriptor by detaching it from its interface,
355 * deallocating its buffers, and marking it free.
356 */
357/* ARGSUSED */
358static	int
359bpfclose(dev, flags, fmt, td)
360	struct cdev *dev;
361	int flags;
362	int fmt;
363	struct thread *td;
364{
365	struct bpf_d *d = dev->si_drv1;
366
367	BPFD_LOCK(d);
368	if (d->bd_state == BPF_WAITING)
369		callout_stop(&d->bd_callout);
370	d->bd_state = BPF_IDLE;
371	BPFD_UNLOCK(d);
372	funsetown(&d->bd_sigio);
373	mtx_lock(&bpf_mtx);
374	if (d->bd_bif)
375		bpf_detachd(d);
376	mtx_unlock(&bpf_mtx);
377#ifdef MAC
378	mac_destroy_bpfdesc(d);
379#endif /* MAC */
380	knlist_destroy(&d->bd_sel.si_note);
381	bpf_freed(d);
382	dev->si_drv1 = NULL;
383	free(d, M_BPF);
384
385	return (0);
386}
387
388
389/*
390 * Rotate the packet buffers in descriptor d.  Move the store buffer
391 * into the hold slot, and the free buffer into the store slot.
392 * Zero the length of the new store buffer.
393 */
394#define ROTATE_BUFFERS(d) \
395	(d)->bd_hbuf = (d)->bd_sbuf; \
396	(d)->bd_hlen = (d)->bd_slen; \
397	(d)->bd_sbuf = (d)->bd_fbuf; \
398	(d)->bd_slen = 0; \
399	(d)->bd_fbuf = NULL;
400/*
401 *  bpfread - read next chunk of packets from buffers
402 */
403static	int
404bpfread(dev, uio, ioflag)
405	struct cdev *dev;
406	struct uio *uio;
407	int ioflag;
408{
409	struct bpf_d *d = dev->si_drv1;
410	int timed_out;
411	int error;
412
413	/*
414	 * Restrict application to use a buffer the same size as
415	 * as kernel buffers.
416	 */
417	if (uio->uio_resid != d->bd_bufsize)
418		return (EINVAL);
419
420	BPFD_LOCK(d);
421	if (d->bd_state == BPF_WAITING)
422		callout_stop(&d->bd_callout);
423	timed_out = (d->bd_state == BPF_TIMED_OUT);
424	d->bd_state = BPF_IDLE;
425	/*
426	 * If the hold buffer is empty, then do a timed sleep, which
427	 * ends when the timeout expires or when enough packets
428	 * have arrived to fill the store buffer.
429	 */
430	while (d->bd_hbuf == NULL) {
431		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
432			/*
433			 * A packet(s) either arrived since the previous
434			 * read or arrived while we were asleep.
435			 * Rotate the buffers and return what's here.
436			 */
437			ROTATE_BUFFERS(d);
438			break;
439		}
440
441		/*
442		 * No data is available, check to see if the bpf device
443		 * is still pointed at a real interface.  If not, return
444		 * ENXIO so that the userland process knows to rebind
445		 * it before using it again.
446		 */
447		if (d->bd_bif == NULL) {
448			BPFD_UNLOCK(d);
449			return (ENXIO);
450		}
451
452		if (ioflag & IO_NDELAY) {
453			BPFD_UNLOCK(d);
454			return (EWOULDBLOCK);
455		}
456		error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
457		     "bpf", d->bd_rtout);
458		if (error == EINTR || error == ERESTART) {
459			BPFD_UNLOCK(d);
460			return (error);
461		}
462		if (error == EWOULDBLOCK) {
463			/*
464			 * On a timeout, return what's in the buffer,
465			 * which may be nothing.  If there is something
466			 * in the store buffer, we can rotate the buffers.
467			 */
468			if (d->bd_hbuf)
469				/*
470				 * We filled up the buffer in between
471				 * getting the timeout and arriving
472				 * here, so we don't need to rotate.
473				 */
474				break;
475
476			if (d->bd_slen == 0) {
477				BPFD_UNLOCK(d);
478				return (0);
479			}
480			ROTATE_BUFFERS(d);
481			break;
482		}
483	}
484	/*
485	 * At this point, we know we have something in the hold slot.
486	 */
487	BPFD_UNLOCK(d);
488
489	/*
490	 * Move data from hold buffer into user space.
491	 * We know the entire buffer is transferred since
492	 * we checked above that the read buffer is bpf_bufsize bytes.
493	 */
494	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
495
496	BPFD_LOCK(d);
497	d->bd_fbuf = d->bd_hbuf;
498	d->bd_hbuf = NULL;
499	d->bd_hlen = 0;
500	BPFD_UNLOCK(d);
501
502	return (error);
503}
504
505
506/*
507 * If there are processes sleeping on this descriptor, wake them up.
508 */
509static __inline void
510bpf_wakeup(d)
511	struct bpf_d *d;
512{
513	if (d->bd_state == BPF_WAITING) {
514		callout_stop(&d->bd_callout);
515		d->bd_state = BPF_IDLE;
516	}
517	wakeup(d);
518	if (d->bd_async && d->bd_sig && d->bd_sigio)
519		pgsigio(&d->bd_sigio, d->bd_sig, 0);
520
521	selwakeuppri(&d->bd_sel, PRINET);
522	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
523}
524
525static void
526bpf_timed_out(arg)
527	void *arg;
528{
529	struct bpf_d *d = (struct bpf_d *)arg;
530
531	BPFD_LOCK(d);
532	if (d->bd_state == BPF_WAITING) {
533		d->bd_state = BPF_TIMED_OUT;
534		if (d->bd_slen != 0)
535			bpf_wakeup(d);
536	}
537	BPFD_UNLOCK(d);
538}
539
540static	int
541bpfwrite(dev, uio, ioflag)
542	struct cdev *dev;
543	struct uio *uio;
544	int ioflag;
545{
546	struct bpf_d *d = dev->si_drv1;
547	struct ifnet *ifp;
548	struct mbuf *m;
549	int error;
550	struct sockaddr dst;
551	int datlen;
552
553	if (d->bd_bif == NULL)
554		return (ENXIO);
555
556	ifp = d->bd_bif->bif_ifp;
557
558	if (uio->uio_resid == 0)
559		return (0);
560
561	bzero(&dst, sizeof(dst));
562	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
563	if (error)
564		return (error);
565
566	if (datlen > ifp->if_mtu)
567		return (EMSGSIZE);
568
569	if (d->bd_hdrcmplt)
570		dst.sa_family = pseudo_AF_HDRCMPLT;
571
572#ifdef MAC
573	BPFD_LOCK(d);
574	mac_create_mbuf_from_bpfdesc(d, m);
575	BPFD_UNLOCK(d);
576#endif
577	NET_LOCK_GIANT();
578	error = (*ifp->if_output)(ifp, m, &dst, NULL);
579	NET_UNLOCK_GIANT();
580	/*
581	 * The driver frees the mbuf.
582	 */
583	return (error);
584}
585
586/*
587 * Reset a descriptor by flushing its packet buffer and clearing the
588 * receive and drop counts.
589 */
590static void
591reset_d(d)
592	struct bpf_d *d;
593{
594
595	mtx_assert(&d->bd_mtx, MA_OWNED);
596	if (d->bd_hbuf) {
597		/* Free the hold buffer. */
598		d->bd_fbuf = d->bd_hbuf;
599		d->bd_hbuf = NULL;
600	}
601	d->bd_slen = 0;
602	d->bd_hlen = 0;
603	d->bd_rcount = 0;
604	d->bd_dcount = 0;
605}
606
607/*
608 *  FIONREAD		Check for read packet available.
609 *  SIOCGIFADDR		Get interface address - convenient hook to driver.
610 *  BIOCGBLEN		Get buffer len [for read()].
611 *  BIOCSETF		Set ethernet read filter.
612 *  BIOCFLUSH		Flush read packet buffer.
613 *  BIOCPROMISC		Put interface into promiscuous mode.
614 *  BIOCGDLT		Get link layer type.
615 *  BIOCGETIF		Get interface name.
616 *  BIOCSETIF		Set interface.
617 *  BIOCSRTIMEOUT	Set read timeout.
618 *  BIOCGRTIMEOUT	Get read timeout.
619 *  BIOCGSTATS		Get packet stats.
620 *  BIOCIMMEDIATE	Set immediate mode.
621 *  BIOCVERSION		Get filter language version.
622 *  BIOCGHDRCMPLT	Get "header already complete" flag
623 *  BIOCSHDRCMPLT	Set "header already complete" flag
624 *  BIOCGSEESENT	Get "see packets sent" flag
625 *  BIOCSSEESENT	Set "see packets sent" flag
626 */
627/* ARGSUSED */
628static	int
629bpfioctl(dev, cmd, addr, flags, td)
630	struct cdev *dev;
631	u_long cmd;
632	caddr_t addr;
633	int flags;
634	struct thread *td;
635{
636	struct bpf_d *d = dev->si_drv1;
637	int error = 0;
638
639	BPFD_LOCK(d);
640	if (d->bd_state == BPF_WAITING)
641		callout_stop(&d->bd_callout);
642	d->bd_state = BPF_IDLE;
643	BPFD_UNLOCK(d);
644
645	switch (cmd) {
646
647	default:
648		error = EINVAL;
649		break;
650
651	/*
652	 * Check for read packet available.
653	 */
654	case FIONREAD:
655		{
656			int n;
657
658			BPFD_LOCK(d);
659			n = d->bd_slen;
660			if (d->bd_hbuf)
661				n += d->bd_hlen;
662			BPFD_UNLOCK(d);
663
664			*(int *)addr = n;
665			break;
666		}
667
668	case SIOCGIFADDR:
669		{
670			struct ifnet *ifp;
671
672			if (d->bd_bif == NULL)
673				error = EINVAL;
674			else {
675				ifp = d->bd_bif->bif_ifp;
676				error = (*ifp->if_ioctl)(ifp, cmd, addr);
677			}
678			break;
679		}
680
681	/*
682	 * Get buffer len [for read()].
683	 */
684	case BIOCGBLEN:
685		*(u_int *)addr = d->bd_bufsize;
686		break;
687
688	/*
689	 * Set buffer length.
690	 */
691	case BIOCSBLEN:
692		if (d->bd_bif != NULL)
693			error = EINVAL;
694		else {
695			u_int size = *(u_int *)addr;
696
697			if (size > bpf_maxbufsize)
698				*(u_int *)addr = size = bpf_maxbufsize;
699			else if (size < BPF_MINBUFSIZE)
700				*(u_int *)addr = size = BPF_MINBUFSIZE;
701			d->bd_bufsize = size;
702		}
703		break;
704
705	/*
706	 * Set link layer read filter.
707	 */
708	case BIOCSETF:
709		error = bpf_setf(d, (struct bpf_program *)addr);
710		break;
711
712	/*
713	 * Flush read packet buffer.
714	 */
715	case BIOCFLUSH:
716		BPFD_LOCK(d);
717		reset_d(d);
718		BPFD_UNLOCK(d);
719		break;
720
721	/*
722	 * Put interface into promiscuous mode.
723	 */
724	case BIOCPROMISC:
725		if (d->bd_bif == NULL) {
726			/*
727			 * No interface attached yet.
728			 */
729			error = EINVAL;
730			break;
731		}
732		if (d->bd_promisc == 0) {
733			mtx_lock(&Giant);
734			error = ifpromisc(d->bd_bif->bif_ifp, 1);
735			mtx_unlock(&Giant);
736			if (error == 0)
737				d->bd_promisc = 1;
738		}
739		break;
740
741	/*
742	 * Get current data link type.
743	 */
744	case BIOCGDLT:
745		if (d->bd_bif == NULL)
746			error = EINVAL;
747		else
748			*(u_int *)addr = d->bd_bif->bif_dlt;
749		break;
750
751	/*
752	 * Get a list of supported data link types.
753	 */
754	case BIOCGDLTLIST:
755		if (d->bd_bif == NULL)
756			error = EINVAL;
757		else
758			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
759		break;
760
761	/*
762	 * Set data link type.
763	 */
764	case BIOCSDLT:
765		if (d->bd_bif == NULL)
766			error = EINVAL;
767		else
768			error = bpf_setdlt(d, *(u_int *)addr);
769		break;
770
771	/*
772	 * Get interface name.
773	 */
774	case BIOCGETIF:
775		if (d->bd_bif == NULL)
776			error = EINVAL;
777		else {
778			struct ifnet *const ifp = d->bd_bif->bif_ifp;
779			struct ifreq *const ifr = (struct ifreq *)addr;
780
781			strlcpy(ifr->ifr_name, ifp->if_xname,
782			    sizeof(ifr->ifr_name));
783		}
784		break;
785
786	/*
787	 * Set interface.
788	 */
789	case BIOCSETIF:
790		error = bpf_setif(d, (struct ifreq *)addr);
791		break;
792
793	/*
794	 * Set read timeout.
795	 */
796	case BIOCSRTIMEOUT:
797		{
798			struct timeval *tv = (struct timeval *)addr;
799
800			/*
801			 * Subtract 1 tick from tvtohz() since this isn't
802			 * a one-shot timer.
803			 */
804			if ((error = itimerfix(tv)) == 0)
805				d->bd_rtout = tvtohz(tv) - 1;
806			break;
807		}
808
809	/*
810	 * Get read timeout.
811	 */
812	case BIOCGRTIMEOUT:
813		{
814			struct timeval *tv = (struct timeval *)addr;
815
816			tv->tv_sec = d->bd_rtout / hz;
817			tv->tv_usec = (d->bd_rtout % hz) * tick;
818			break;
819		}
820
821	/*
822	 * Get packet stats.
823	 */
824	case BIOCGSTATS:
825		{
826			struct bpf_stat *bs = (struct bpf_stat *)addr;
827
828			bs->bs_recv = d->bd_rcount;
829			bs->bs_drop = d->bd_dcount;
830			break;
831		}
832
833	/*
834	 * Set immediate mode.
835	 */
836	case BIOCIMMEDIATE:
837		d->bd_immediate = *(u_int *)addr;
838		break;
839
840	case BIOCVERSION:
841		{
842			struct bpf_version *bv = (struct bpf_version *)addr;
843
844			bv->bv_major = BPF_MAJOR_VERSION;
845			bv->bv_minor = BPF_MINOR_VERSION;
846			break;
847		}
848
849	/*
850	 * Get "header already complete" flag
851	 */
852	case BIOCGHDRCMPLT:
853		*(u_int *)addr = d->bd_hdrcmplt;
854		break;
855
856	/*
857	 * Set "header already complete" flag
858	 */
859	case BIOCSHDRCMPLT:
860		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
861		break;
862
863	/*
864	 * Get "see sent packets" flag
865	 */
866	case BIOCGSEESENT:
867		*(u_int *)addr = d->bd_seesent;
868		break;
869
870	/*
871	 * Set "see sent packets" flag
872	 */
873	case BIOCSSEESENT:
874		d->bd_seesent = *(u_int *)addr;
875		break;
876
877	case FIONBIO:		/* Non-blocking I/O */
878		break;
879
880	case FIOASYNC:		/* Send signal on receive packets */
881		d->bd_async = *(int *)addr;
882		break;
883
884	case FIOSETOWN:
885		error = fsetown(*(int *)addr, &d->bd_sigio);
886		break;
887
888	case FIOGETOWN:
889		*(int *)addr = fgetown(&d->bd_sigio);
890		break;
891
892	/* This is deprecated, FIOSETOWN should be used instead. */
893	case TIOCSPGRP:
894		error = fsetown(-(*(int *)addr), &d->bd_sigio);
895		break;
896
897	/* This is deprecated, FIOGETOWN should be used instead. */
898	case TIOCGPGRP:
899		*(int *)addr = -fgetown(&d->bd_sigio);
900		break;
901
902	case BIOCSRSIG:		/* Set receive signal */
903		{
904			u_int sig;
905
906			sig = *(u_int *)addr;
907
908			if (sig >= NSIG)
909				error = EINVAL;
910			else
911				d->bd_sig = sig;
912			break;
913		}
914	case BIOCGRSIG:
915		*(u_int *)addr = d->bd_sig;
916		break;
917	}
918	return (error);
919}
920
921/*
922 * Set d's packet filter program to fp.  If this file already has a filter,
923 * free it and replace it.  Returns EINVAL for bogus requests.
924 */
925static int
926bpf_setf(d, fp)
927	struct bpf_d *d;
928	struct bpf_program *fp;
929{
930	struct bpf_insn *fcode, *old;
931	u_int flen, size;
932
933	old = d->bd_filter;
934	if (fp->bf_insns == NULL) {
935		if (fp->bf_len != 0)
936			return (EINVAL);
937		BPFD_LOCK(d);
938		d->bd_filter = NULL;
939		reset_d(d);
940		BPFD_UNLOCK(d);
941		if (old != NULL)
942			free((caddr_t)old, M_BPF);
943		return (0);
944	}
945	flen = fp->bf_len;
946	if (flen > BPF_MAXINSNS)
947		return (EINVAL);
948
949	size = flen * sizeof(*fp->bf_insns);
950	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
951	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
952	    bpf_validate(fcode, (int)flen)) {
953		BPFD_LOCK(d);
954		d->bd_filter = fcode;
955		reset_d(d);
956		BPFD_UNLOCK(d);
957		if (old != NULL)
958			free((caddr_t)old, M_BPF);
959
960		return (0);
961	}
962	free((caddr_t)fcode, M_BPF);
963	return (EINVAL);
964}
965
966/*
967 * Detach a file from its current interface (if attached at all) and attach
968 * to the interface indicated by the name stored in ifr.
969 * Return an errno or 0.
970 */
971static int
972bpf_setif(d, ifr)
973	struct bpf_d *d;
974	struct ifreq *ifr;
975{
976	struct bpf_if *bp;
977	int error;
978	struct ifnet *theywant;
979
980	theywant = ifunit(ifr->ifr_name);
981	if (theywant == NULL)
982		return ENXIO;
983
984	/*
985	 * Look through attached interfaces for the named one.
986	 */
987	mtx_lock(&bpf_mtx);
988	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
989		struct ifnet *ifp = bp->bif_ifp;
990
991		if (ifp == NULL || ifp != theywant)
992			continue;
993		/* skip additional entry */
994		if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf)
995			continue;
996
997		mtx_unlock(&bpf_mtx);
998		/*
999		 * We found the requested interface.
1000		 * If it's not up, return an error.
1001		 * Allocate the packet buffers if we need to.
1002		 * If we're already attached to requested interface,
1003		 * just flush the buffer.
1004		 */
1005		if ((ifp->if_flags & IFF_UP) == 0)
1006			return (ENETDOWN);
1007
1008		if (d->bd_sbuf == NULL) {
1009			error = bpf_allocbufs(d);
1010			if (error != 0)
1011				return (error);
1012		}
1013		if (bp != d->bd_bif) {
1014			if (d->bd_bif)
1015				/*
1016				 * Detach if attached to something else.
1017				 */
1018				bpf_detachd(d);
1019
1020			bpf_attachd(d, bp);
1021		}
1022		BPFD_LOCK(d);
1023		reset_d(d);
1024		BPFD_UNLOCK(d);
1025		return (0);
1026	}
1027	mtx_unlock(&bpf_mtx);
1028	/* Not found. */
1029	return (ENXIO);
1030}
1031
1032/*
1033 * Support for select() and poll() system calls
1034 *
1035 * Return true iff the specific operation will not block indefinitely.
1036 * Otherwise, return false but make a note that a selwakeup() must be done.
1037 */
1038static int
1039bpfpoll(dev, events, td)
1040	struct cdev *dev;
1041	int events;
1042	struct thread *td;
1043{
1044	struct bpf_d *d;
1045	int revents;
1046
1047	d = dev->si_drv1;
1048	if (d->bd_bif == NULL)
1049		return (ENXIO);
1050
1051	revents = events & (POLLOUT | POLLWRNORM);
1052	BPFD_LOCK(d);
1053	if (events & (POLLIN | POLLRDNORM)) {
1054		if (bpf_ready(d))
1055			revents |= events & (POLLIN | POLLRDNORM);
1056		else {
1057			selrecord(td, &d->bd_sel);
1058			/* Start the read timeout if necessary. */
1059			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1060				callout_reset(&d->bd_callout, d->bd_rtout,
1061				    bpf_timed_out, d);
1062				d->bd_state = BPF_WAITING;
1063			}
1064		}
1065	}
1066	BPFD_UNLOCK(d);
1067	return (revents);
1068}
1069
1070/*
1071 * Support for kevent() system call.  Register EVFILT_READ filters and
1072 * reject all others.
1073 */
1074int
1075bpfkqfilter(dev, kn)
1076	struct cdev *dev;
1077	struct knote *kn;
1078{
1079	struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1080
1081	if (kn->kn_filter != EVFILT_READ)
1082		return (1);
1083
1084	kn->kn_fop = &bpfread_filtops;
1085	kn->kn_hook = d;
1086	knlist_add(&d->bd_sel.si_note, kn, 0);
1087
1088	return (0);
1089}
1090
1091static void
1092filt_bpfdetach(kn)
1093	struct knote *kn;
1094{
1095	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1096
1097	knlist_remove(&d->bd_sel.si_note, kn, 0);
1098}
1099
1100static int
1101filt_bpfread(kn, hint)
1102	struct knote *kn;
1103	long hint;
1104{
1105	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1106	int ready;
1107
1108	BPFD_LOCK(d);
1109	ready = bpf_ready(d);
1110	if (ready) {
1111		kn->kn_data = d->bd_slen;
1112		if (d->bd_hbuf)
1113			kn->kn_data += d->bd_hlen;
1114	}
1115	else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1116		callout_reset(&d->bd_callout, d->bd_rtout,
1117		    bpf_timed_out, d);
1118		d->bd_state = BPF_WAITING;
1119	}
1120	BPFD_UNLOCK(d);
1121
1122	return (ready);
1123}
1124
1125/*
1126 * Incoming linkage from device drivers.  Process the packet pkt, of length
1127 * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1128 * by each process' filter, and if accepted, stashed into the corresponding
1129 * buffer.
1130 */
1131void
1132bpf_tap(bp, pkt, pktlen)
1133	struct bpf_if *bp;
1134	u_char *pkt;
1135	u_int pktlen;
1136{
1137	struct bpf_d *d;
1138	u_int slen;
1139
1140	/*
1141	 * Lockless read to avoid cost of locking the interface if there are
1142	 * no descriptors attached.
1143	 */
1144	if (LIST_EMPTY(&bp->bif_dlist))
1145		return;
1146
1147	BPFIF_LOCK(bp);
1148	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1149		BPFD_LOCK(d);
1150		++d->bd_rcount;
1151		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1152		if (slen != 0) {
1153#ifdef MAC
1154			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1155#endif
1156				catchpacket(d, pkt, pktlen, slen, bcopy);
1157		}
1158		BPFD_UNLOCK(d);
1159	}
1160	BPFIF_UNLOCK(bp);
1161}
1162
1163/*
1164 * Copy data from an mbuf chain into a buffer.  This code is derived
1165 * from m_copydata in sys/uipc_mbuf.c.
1166 */
1167static void
1168bpf_mcopy(src_arg, dst_arg, len)
1169	const void *src_arg;
1170	void *dst_arg;
1171	size_t len;
1172{
1173	const struct mbuf *m;
1174	u_int count;
1175	u_char *dst;
1176
1177	m = src_arg;
1178	dst = dst_arg;
1179	while (len > 0) {
1180		if (m == NULL)
1181			panic("bpf_mcopy");
1182		count = min(m->m_len, len);
1183		bcopy(mtod(m, void *), dst, count);
1184		m = m->m_next;
1185		dst += count;
1186		len -= count;
1187	}
1188}
1189
1190/*
1191 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1192 */
1193void
1194bpf_mtap(bp, m)
1195	struct bpf_if *bp;
1196	struct mbuf *m;
1197{
1198	struct bpf_d *d;
1199	u_int pktlen, slen;
1200
1201	/*
1202	 * Lockless read to avoid cost of locking the interface if there are
1203	 * no descriptors attached.
1204	 */
1205	if (LIST_EMPTY(&bp->bif_dlist))
1206		return;
1207
1208	pktlen = m_length(m, NULL);
1209	if (pktlen == m->m_len) {
1210		bpf_tap(bp, mtod(m, u_char *), pktlen);
1211		return;
1212	}
1213
1214	BPFIF_LOCK(bp);
1215	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1216		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1217			continue;
1218		BPFD_LOCK(d);
1219		++d->bd_rcount;
1220		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1221		if (slen != 0)
1222#ifdef MAC
1223			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1224#endif
1225				catchpacket(d, (u_char *)m, pktlen, slen,
1226				    bpf_mcopy);
1227		BPFD_UNLOCK(d);
1228	}
1229	BPFIF_UNLOCK(bp);
1230}
1231
1232/*
1233 * Incoming linkage from device drivers, when packet is in
1234 * an mbuf chain and to be prepended by a contiguous header.
1235 */
1236void
1237bpf_mtap2(bp, data, dlen, m)
1238	struct bpf_if *bp;
1239	void *data;
1240	u_int dlen;
1241	struct mbuf *m;
1242{
1243	struct mbuf mb;
1244	struct bpf_d *d;
1245	u_int pktlen, slen;
1246
1247	/*
1248	 * Lockless read to avoid cost of locking the interface if there are
1249	 * no descriptors attached.
1250	 */
1251	if (LIST_EMPTY(&bp->bif_dlist))
1252		return;
1253
1254	pktlen = m_length(m, NULL);
1255	/*
1256	 * Craft on-stack mbuf suitable for passing to bpf_filter.
1257	 * Note that we cut corners here; we only setup what's
1258	 * absolutely needed--this mbuf should never go anywhere else.
1259	 */
1260	mb.m_next = m;
1261	mb.m_data = data;
1262	mb.m_len = dlen;
1263	pktlen += dlen;
1264
1265	BPFIF_LOCK(bp);
1266	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1267		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1268			continue;
1269		BPFD_LOCK(d);
1270		++d->bd_rcount;
1271		slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0);
1272		if (slen != 0)
1273#ifdef MAC
1274			if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1275#endif
1276				catchpacket(d, (u_char *)&mb, pktlen, slen,
1277				    bpf_mcopy);
1278		BPFD_UNLOCK(d);
1279	}
1280	BPFIF_UNLOCK(bp);
1281}
1282
1283/*
1284 * Move the packet data from interface memory (pkt) into the
1285 * store buffer.  "cpfn" is the routine called to do the actual data
1286 * transfer.  bcopy is passed in to copy contiguous chunks, while
1287 * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1288 * pkt is really an mbuf.
1289 */
1290static void
1291catchpacket(d, pkt, pktlen, snaplen, cpfn)
1292	struct bpf_d *d;
1293	u_char *pkt;
1294	u_int pktlen, snaplen;
1295	void (*cpfn)(const void *, void *, size_t);
1296{
1297	struct bpf_hdr *hp;
1298	int totlen, curlen;
1299	int hdrlen = d->bd_bif->bif_hdrlen;
1300
1301	/*
1302	 * Figure out how many bytes to move.  If the packet is
1303	 * greater or equal to the snapshot length, transfer that
1304	 * much.  Otherwise, transfer the whole packet (unless
1305	 * we hit the buffer size limit).
1306	 */
1307	totlen = hdrlen + min(snaplen, pktlen);
1308	if (totlen > d->bd_bufsize)
1309		totlen = d->bd_bufsize;
1310
1311	/*
1312	 * Round up the end of the previous packet to the next longword.
1313	 */
1314	curlen = BPF_WORDALIGN(d->bd_slen);
1315	if (curlen + totlen > d->bd_bufsize) {
1316		/*
1317		 * This packet will overflow the storage buffer.
1318		 * Rotate the buffers if we can, then wakeup any
1319		 * pending reads.
1320		 */
1321		if (d->bd_fbuf == NULL) {
1322			/*
1323			 * We haven't completed the previous read yet,
1324			 * so drop the packet.
1325			 */
1326			++d->bd_dcount;
1327			return;
1328		}
1329		ROTATE_BUFFERS(d);
1330		bpf_wakeup(d);
1331		curlen = 0;
1332	}
1333	else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1334		/*
1335		 * Immediate mode is set, or the read timeout has
1336		 * already expired during a select call.  A packet
1337		 * arrived, so the reader should be woken up.
1338		 */
1339		bpf_wakeup(d);
1340
1341	/*
1342	 * Append the bpf header.
1343	 */
1344	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1345	microtime(&hp->bh_tstamp);
1346	hp->bh_datalen = pktlen;
1347	hp->bh_hdrlen = hdrlen;
1348	/*
1349	 * Copy the packet data into the store buffer and update its length.
1350	 */
1351	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1352	d->bd_slen = curlen + totlen;
1353}
1354
1355/*
1356 * Initialize all nonzero fields of a descriptor.
1357 */
1358static int
1359bpf_allocbufs(d)
1360	struct bpf_d *d;
1361{
1362	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1363	if (d->bd_fbuf == NULL)
1364		return (ENOBUFS);
1365
1366	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1367	if (d->bd_sbuf == NULL) {
1368		free(d->bd_fbuf, M_BPF);
1369		return (ENOBUFS);
1370	}
1371	d->bd_slen = 0;
1372	d->bd_hlen = 0;
1373	return (0);
1374}
1375
1376/*
1377 * Free buffers currently in use by a descriptor.
1378 * Called on close.
1379 */
1380static void
1381bpf_freed(d)
1382	struct bpf_d *d;
1383{
1384	/*
1385	 * We don't need to lock out interrupts since this descriptor has
1386	 * been detached from its interface and it yet hasn't been marked
1387	 * free.
1388	 */
1389	if (d->bd_sbuf != NULL) {
1390		free(d->bd_sbuf, M_BPF);
1391		if (d->bd_hbuf != NULL)
1392			free(d->bd_hbuf, M_BPF);
1393		if (d->bd_fbuf != NULL)
1394			free(d->bd_fbuf, M_BPF);
1395	}
1396	if (d->bd_filter)
1397		free((caddr_t)d->bd_filter, M_BPF);
1398	mtx_destroy(&d->bd_mtx);
1399}
1400
1401/*
1402 * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
1403 * fixed size of the link header (variable length headers not yet supported).
1404 */
1405void
1406bpfattach(ifp, dlt, hdrlen)
1407	struct ifnet *ifp;
1408	u_int dlt, hdrlen;
1409{
1410
1411	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1412}
1413
1414/*
1415 * Attach an interface to bpf.  ifp is a pointer to the structure
1416 * defining the interface to be attached, dlt is the link layer type,
1417 * and hdrlen is the fixed size of the link header (variable length
1418 * headers are not yet supporrted).
1419 */
1420void
1421bpfattach2(ifp, dlt, hdrlen, driverp)
1422	struct ifnet *ifp;
1423	u_int dlt, hdrlen;
1424	struct bpf_if **driverp;
1425{
1426	struct bpf_if *bp;
1427	bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1428	if (bp == NULL)
1429		panic("bpfattach");
1430
1431	LIST_INIT(&bp->bif_dlist);
1432	bp->bif_driverp = driverp;
1433	bp->bif_ifp = ifp;
1434	bp->bif_dlt = dlt;
1435	mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1436
1437	mtx_lock(&bpf_mtx);
1438	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
1439	mtx_unlock(&bpf_mtx);
1440
1441	*bp->bif_driverp = NULL;
1442
1443	/*
1444	 * Compute the length of the bpf header.  This is not necessarily
1445	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1446	 * that the network layer header begins on a longword boundary (for
1447	 * performance reasons and to alleviate alignment restrictions).
1448	 */
1449	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1450
1451	if (bootverbose)
1452		if_printf(ifp, "bpf attached\n");
1453}
1454
1455/*
1456 * Detach bpf from an interface.  This involves detaching each descriptor
1457 * associated with the interface, and leaving bd_bif NULL.  Notify each
1458 * descriptor as it's detached so that any sleepers wake up and get
1459 * ENXIO.
1460 */
1461void
1462bpfdetach(ifp)
1463	struct ifnet *ifp;
1464{
1465	struct bpf_if	*bp;
1466	struct bpf_d	*d;
1467
1468	/* Locate BPF interface information */
1469	mtx_lock(&bpf_mtx);
1470	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1471		if (ifp == bp->bif_ifp)
1472			break;
1473	}
1474
1475	/* Interface wasn't attached */
1476	if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1477		mtx_unlock(&bpf_mtx);
1478		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1479		return;
1480	}
1481
1482	LIST_REMOVE(bp, bif_next);
1483	mtx_unlock(&bpf_mtx);
1484
1485	while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
1486		bpf_detachd(d);
1487		BPFD_LOCK(d);
1488		bpf_wakeup(d);
1489		BPFD_UNLOCK(d);
1490	}
1491
1492	mtx_destroy(&bp->bif_mtx);
1493	free(bp, M_BPF);
1494}
1495
1496/*
1497 * Get a list of available data link type of the interface.
1498 */
1499static int
1500bpf_getdltlist(d, bfl)
1501	struct bpf_d *d;
1502	struct bpf_dltlist *bfl;
1503{
1504	int n, error;
1505	struct ifnet *ifp;
1506	struct bpf_if *bp;
1507
1508	ifp = d->bd_bif->bif_ifp;
1509	n = 0;
1510	error = 0;
1511	mtx_lock(&bpf_mtx);
1512	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1513		if (bp->bif_ifp != ifp)
1514			continue;
1515		if (bfl->bfl_list != NULL) {
1516			if (n >= bfl->bfl_len) {
1517				mtx_unlock(&bpf_mtx);
1518				return (ENOMEM);
1519			}
1520			error = copyout(&bp->bif_dlt,
1521			    bfl->bfl_list + n, sizeof(u_int));
1522		}
1523		n++;
1524	}
1525	mtx_unlock(&bpf_mtx);
1526	bfl->bfl_len = n;
1527	return (error);
1528}
1529
1530/*
1531 * Set the data link type of a BPF instance.
1532 */
1533static int
1534bpf_setdlt(d, dlt)
1535	struct bpf_d *d;
1536	u_int dlt;
1537{
1538	int error, opromisc;
1539	struct ifnet *ifp;
1540	struct bpf_if *bp;
1541
1542	if (d->bd_bif->bif_dlt == dlt)
1543		return (0);
1544	ifp = d->bd_bif->bif_ifp;
1545	mtx_lock(&bpf_mtx);
1546	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
1547		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1548			break;
1549	}
1550	mtx_unlock(&bpf_mtx);
1551	if (bp != NULL) {
1552		BPFD_LOCK(d);
1553		opromisc = d->bd_promisc;
1554		bpf_detachd(d);
1555		bpf_attachd(d, bp);
1556		reset_d(d);
1557		BPFD_UNLOCK(d);
1558		if (opromisc) {
1559			error = ifpromisc(bp->bif_ifp, 1);
1560			if (error)
1561				if_printf(bp->bif_ifp,
1562					"bpf_setdlt: ifpromisc failed (%d)\n",
1563					error);
1564			else
1565				d->bd_promisc = 1;
1566		}
1567	}
1568	return (bp == NULL ? EINVAL : 0);
1569}
1570
1571static void bpf_drvinit(void *unused);
1572
1573static void bpf_clone(void *arg, char *name, int namelen, struct cdev **dev);
1574
1575static void
1576bpf_clone(arg, name, namelen, dev)
1577	void *arg;
1578	char *name;
1579	int namelen;
1580	struct cdev **dev;
1581{
1582	int u;
1583
1584	if (*dev != NULL)
1585		return;
1586	if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1587		return;
1588	*dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1589	    "bpf%d", u);
1590	(*dev)->si_flags |= SI_CHEAPCLONE;
1591	return;
1592}
1593
1594static void
1595bpf_drvinit(unused)
1596	void *unused;
1597{
1598
1599	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1600	LIST_INIT(&bpf_iflist);
1601	EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1602}
1603
1604SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1605
1606#else /* !DEV_BPF && !NETGRAPH_BPF */
1607/*
1608 * NOP stubs to allow bpf-using drivers to load and function.
1609 *
1610 * A 'better' implementation would allow the core bpf functionality
1611 * to be loaded at runtime.
1612 */
1613
1614void
1615bpf_tap(bp, pkt, pktlen)
1616	struct bpf_if *bp;
1617	u_char *pkt;
1618	u_int pktlen;
1619{
1620}
1621
1622void
1623bpf_mtap(bp, m)
1624	struct bpf_if *bp;
1625	struct mbuf *m;
1626{
1627}
1628
1629void
1630bpf_mtap2(bp, d, l, m)
1631	struct bpf_if *bp;
1632	void *d;
1633	u_int l;
1634	struct mbuf *m;
1635{
1636}
1637
1638void
1639bpfattach(ifp, dlt, hdrlen)
1640	struct ifnet *ifp;
1641	u_int dlt, hdrlen;
1642{
1643}
1644
1645void
1646bpfattach2(ifp, dlt, hdrlen, driverp)
1647	struct ifnet *ifp;
1648	u_int dlt, hdrlen;
1649	struct bpf_if **driverp;
1650{
1651}
1652
1653void
1654bpfdetach(ifp)
1655	struct ifnet *ifp;
1656{
1657}
1658
1659u_int
1660bpf_filter(pc, p, wirelen, buflen)
1661	const struct bpf_insn *pc;
1662	u_char *p;
1663	u_int wirelen;
1664	u_int buflen;
1665{
1666	return -1;	/* "no filter" behaviour */
1667}
1668
1669int
1670bpf_validate(f, len)
1671	const struct bpf_insn *f;
1672	int len;
1673{
1674	return 0;		/* false */
1675}
1676
1677#endif /* !DEV_BPF && !NETGRAPH_BPF */
1678