bpf.c revision 244090
1/*-
2 * Copyright (c) 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/net/bpf.c 244090 2012-12-10 16:14:44Z ghelmer $");
39
40#include "opt_bpf.h"
41#include "opt_compat.h"
42#include "opt_netgraph.h"
43
44#include <sys/types.h>
45#include <sys/param.h>
46#include <sys/lock.h>
47#include <sys/rwlock.h>
48#include <sys/systm.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/jail.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/time.h>
55#include <sys/priv.h>
56#include <sys/proc.h>
57#include <sys/signalvar.h>
58#include <sys/filio.h>
59#include <sys/sockio.h>
60#include <sys/ttycom.h>
61#include <sys/uio.h>
62
63#include <sys/event.h>
64#include <sys/file.h>
65#include <sys/poll.h>
66#include <sys/proc.h>
67
68#include <sys/socket.h>
69
70#include <net/if.h>
71#define	BPF_INTERNAL
72#include <net/bpf.h>
73#include <net/bpf_buffer.h>
74#ifdef BPF_JITTER
75#include <net/bpf_jitter.h>
76#endif
77#include <net/bpf_zerocopy.h>
78#include <net/bpfdesc.h>
79#include <net/vnet.h>
80
81#include <netinet/in.h>
82#include <netinet/if_ether.h>
83#include <sys/kernel.h>
84#include <sys/sysctl.h>
85
86#include <net80211/ieee80211_freebsd.h>
87
88#include <security/mac/mac_framework.h>
89
90MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
91
92#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
93
94#define PRINET  26			/* interruptible */
95
96#define	SIZEOF_BPF_HDR(type)	\
97    (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
98
99#ifdef COMPAT_FREEBSD32
100#include <sys/mount.h>
101#include <compat/freebsd32/freebsd32.h>
102#define BPF_ALIGNMENT32 sizeof(int32_t)
103#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
104
105#ifndef BURN_BRIDGES
106/*
107 * 32-bit version of structure prepended to each packet.  We use this header
108 * instead of the standard one for 32-bit streams.  We mark the a stream as
109 * 32-bit the first time we see a 32-bit compat ioctl request.
110 */
111struct bpf_hdr32 {
112	struct timeval32 bh_tstamp;	/* time stamp */
113	uint32_t	bh_caplen;	/* length of captured portion */
114	uint32_t	bh_datalen;	/* original length of packet */
115	uint16_t	bh_hdrlen;	/* length of bpf header (this struct
116					   plus alignment padding) */
117};
118#endif
119
120struct bpf_program32 {
121	u_int bf_len;
122	uint32_t bf_insns;
123};
124
125struct bpf_dltlist32 {
126	u_int	bfl_len;
127	u_int	bfl_list;
128};
129
130#define	BIOCSETF32	_IOW('B', 103, struct bpf_program32)
131#define	BIOCSRTIMEOUT32	_IOW('B', 109, struct timeval32)
132#define	BIOCGRTIMEOUT32	_IOR('B', 110, struct timeval32)
133#define	BIOCGDLTLIST32	_IOWR('B', 121, struct bpf_dltlist32)
134#define	BIOCSETWF32	_IOW('B', 123, struct bpf_program32)
135#define	BIOCSETFNR32	_IOW('B', 130, struct bpf_program32)
136#endif
137
138/*
139 * bpf_iflist is a list of BPF interface structures, each corresponding to a
140 * specific DLT.  The same network interface might have several BPF interface
141 * structures registered by different layers in the stack (i.e., 802.11
142 * frames, ethernet frames, etc).
143 */
144static LIST_HEAD(, bpf_if)	bpf_iflist, bpf_freelist;
145static struct mtx	bpf_mtx;		/* bpf global lock */
146static int		bpf_bpfd_cnt;
147
148static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
149static void	bpf_detachd(struct bpf_d *);
150static void	bpf_detachd_locked(struct bpf_d *);
151static void	bpf_freed(struct bpf_d *);
152static int	bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
153		    struct sockaddr *, int *, struct bpf_insn *);
154static int	bpf_setif(struct bpf_d *, struct ifreq *);
155static void	bpf_timed_out(void *);
156static __inline void
157		bpf_wakeup(struct bpf_d *);
158static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
159		    void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
160		    struct bintime *);
161static void	reset_d(struct bpf_d *);
162static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
163static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
164static int	bpf_setdlt(struct bpf_d *, u_int);
165static void	filt_bpfdetach(struct knote *);
166static int	filt_bpfread(struct knote *, long);
167static void	bpf_drvinit(void *);
168static int	bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
169
170SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
171int bpf_maxinsns = BPF_MAXINSNS;
172SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
173    &bpf_maxinsns, 0, "Maximum bpf program instructions");
174static int bpf_zerocopy_enable = 0;
175SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
176    &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
177static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
178    bpf_stats_sysctl, "bpf statistics portal");
179
180static VNET_DEFINE(int, bpf_optimize_writers) = 0;
181#define	V_bpf_optimize_writers VNET(bpf_optimize_writers)
182SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
183    CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
184    "Do not send packets until BPF program is set");
185
186static	d_open_t	bpfopen;
187static	d_read_t	bpfread;
188static	d_write_t	bpfwrite;
189static	d_ioctl_t	bpfioctl;
190static	d_poll_t	bpfpoll;
191static	d_kqfilter_t	bpfkqfilter;
192
193static struct cdevsw bpf_cdevsw = {
194	.d_version =	D_VERSION,
195	.d_open =	bpfopen,
196	.d_read =	bpfread,
197	.d_write =	bpfwrite,
198	.d_ioctl =	bpfioctl,
199	.d_poll =	bpfpoll,
200	.d_name =	"bpf",
201	.d_kqfilter =	bpfkqfilter,
202};
203
204static struct filterops bpfread_filtops = {
205	.f_isfd = 1,
206	.f_detach = filt_bpfdetach,
207	.f_event = filt_bpfread,
208};
209
210eventhandler_tag	bpf_ifdetach_cookie = NULL;
211
212/*
213 * LOCKING MODEL USED BY BPF:
214 * Locks:
215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
216 * some global counters and every bpf_if reference.
217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
219 *   used by bpf_mtap code.
220 *
221 * Lock order:
222 *
223 * Global lock, interface lock, descriptor lock
224 *
225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
226 * working model. In many places (like bpf_detachd) we start with BPF descriptor
227 * (and we need to at least rlock it to get reliable interface pointer). This
228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if
229 * change in every such place.
230 *
231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
232 * 3) descriptor main wlock.
233 * Reading bd_bif can be protected by any of these locks, typically global lock.
234 *
235 * Changing read/write BPF filter is protected by the same three locks,
236 * the same applies for reading.
237 *
238 * Sleeping in global lock is not allowed due to bpfdetach() using it.
239 */
240
241/*
242 * Wrapper functions for various buffering methods.  If the set of buffer
243 * modes expands, we will probably want to introduce a switch data structure
244 * similar to protosw, et.
245 */
246static void
247bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
248    u_int len)
249{
250
251	BPFD_LOCK_ASSERT(d);
252
253	switch (d->bd_bufmode) {
254	case BPF_BUFMODE_BUFFER:
255		return (bpf_buffer_append_bytes(d, buf, offset, src, len));
256
257	case BPF_BUFMODE_ZBUF:
258		d->bd_zcopy++;
259		return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
260
261	default:
262		panic("bpf_buf_append_bytes");
263	}
264}
265
266static void
267bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
268    u_int len)
269{
270
271	BPFD_LOCK_ASSERT(d);
272
273	switch (d->bd_bufmode) {
274	case BPF_BUFMODE_BUFFER:
275		return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
276
277	case BPF_BUFMODE_ZBUF:
278		d->bd_zcopy++;
279		return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
280
281	default:
282		panic("bpf_buf_append_mbuf");
283	}
284}
285
286/*
287 * This function gets called when the free buffer is re-assigned.
288 */
289static void
290bpf_buf_reclaimed(struct bpf_d *d)
291{
292
293	BPFD_LOCK_ASSERT(d);
294
295	switch (d->bd_bufmode) {
296	case BPF_BUFMODE_BUFFER:
297		return;
298
299	case BPF_BUFMODE_ZBUF:
300		bpf_zerocopy_buf_reclaimed(d);
301		return;
302
303	default:
304		panic("bpf_buf_reclaimed");
305	}
306}
307
308/*
309 * If the buffer mechanism has a way to decide that a held buffer can be made
310 * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
311 * returned if the buffer can be discarded, (0) is returned if it cannot.
312 */
313static int
314bpf_canfreebuf(struct bpf_d *d)
315{
316
317	BPFD_LOCK_ASSERT(d);
318
319	switch (d->bd_bufmode) {
320	case BPF_BUFMODE_ZBUF:
321		return (bpf_zerocopy_canfreebuf(d));
322	}
323	return (0);
324}
325
326/*
327 * Allow the buffer model to indicate that the current store buffer is
328 * immutable, regardless of the appearance of space.  Return (1) if the
329 * buffer is writable, and (0) if not.
330 */
331static int
332bpf_canwritebuf(struct bpf_d *d)
333{
334	BPFD_LOCK_ASSERT(d);
335
336	switch (d->bd_bufmode) {
337	case BPF_BUFMODE_ZBUF:
338		return (bpf_zerocopy_canwritebuf(d));
339	}
340	return (1);
341}
342
343/*
344 * Notify buffer model that an attempt to write to the store buffer has
345 * resulted in a dropped packet, in which case the buffer may be considered
346 * full.
347 */
348static void
349bpf_buffull(struct bpf_d *d)
350{
351
352	BPFD_LOCK_ASSERT(d);
353
354	switch (d->bd_bufmode) {
355	case BPF_BUFMODE_ZBUF:
356		bpf_zerocopy_buffull(d);
357		break;
358	}
359}
360
361/*
362 * Notify the buffer model that a buffer has moved into the hold position.
363 */
364void
365bpf_bufheld(struct bpf_d *d)
366{
367
368	BPFD_LOCK_ASSERT(d);
369
370	switch (d->bd_bufmode) {
371	case BPF_BUFMODE_ZBUF:
372		bpf_zerocopy_bufheld(d);
373		break;
374	}
375}
376
377static void
378bpf_free(struct bpf_d *d)
379{
380
381	switch (d->bd_bufmode) {
382	case BPF_BUFMODE_BUFFER:
383		return (bpf_buffer_free(d));
384
385	case BPF_BUFMODE_ZBUF:
386		return (bpf_zerocopy_free(d));
387
388	default:
389		panic("bpf_buf_free");
390	}
391}
392
393static int
394bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
395{
396
397	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
398		return (EOPNOTSUPP);
399	return (bpf_buffer_uiomove(d, buf, len, uio));
400}
401
402static int
403bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
404{
405
406	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
407		return (EOPNOTSUPP);
408	return (bpf_buffer_ioctl_sblen(d, i));
409}
410
411static int
412bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
413{
414
415	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
416		return (EOPNOTSUPP);
417	return (bpf_zerocopy_ioctl_getzmax(td, d, i));
418}
419
420static int
421bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
422{
423
424	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
425		return (EOPNOTSUPP);
426	return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
427}
428
429static int
430bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
431{
432
433	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
434		return (EOPNOTSUPP);
435	return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
436}
437
438/*
439 * General BPF functions.
440 */
441static int
442bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
443    struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
444{
445	const struct ieee80211_bpf_params *p;
446	struct ether_header *eh;
447	struct mbuf *m;
448	int error;
449	int len;
450	int hlen;
451	int slen;
452
453	/*
454	 * Build a sockaddr based on the data link layer type.
455	 * We do this at this level because the ethernet header
456	 * is copied directly into the data field of the sockaddr.
457	 * In the case of SLIP, there is no header and the packet
458	 * is forwarded as is.
459	 * Also, we are careful to leave room at the front of the mbuf
460	 * for the link level header.
461	 */
462	switch (linktype) {
463
464	case DLT_SLIP:
465		sockp->sa_family = AF_INET;
466		hlen = 0;
467		break;
468
469	case DLT_EN10MB:
470		sockp->sa_family = AF_UNSPEC;
471		/* XXX Would MAXLINKHDR be better? */
472		hlen = ETHER_HDR_LEN;
473		break;
474
475	case DLT_FDDI:
476		sockp->sa_family = AF_IMPLINK;
477		hlen = 0;
478		break;
479
480	case DLT_RAW:
481		sockp->sa_family = AF_UNSPEC;
482		hlen = 0;
483		break;
484
485	case DLT_NULL:
486		/*
487		 * null interface types require a 4 byte pseudo header which
488		 * corresponds to the address family of the packet.
489		 */
490		sockp->sa_family = AF_UNSPEC;
491		hlen = 4;
492		break;
493
494	case DLT_ATM_RFC1483:
495		/*
496		 * en atm driver requires 4-byte atm pseudo header.
497		 * though it isn't standard, vpi:vci needs to be
498		 * specified anyway.
499		 */
500		sockp->sa_family = AF_UNSPEC;
501		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
502		break;
503
504	case DLT_PPP:
505		sockp->sa_family = AF_UNSPEC;
506		hlen = 4;	/* This should match PPP_HDRLEN */
507		break;
508
509	case DLT_IEEE802_11:		/* IEEE 802.11 wireless */
510		sockp->sa_family = AF_IEEE80211;
511		hlen = 0;
512		break;
513
514	case DLT_IEEE802_11_RADIO:	/* IEEE 802.11 wireless w/ phy params */
515		sockp->sa_family = AF_IEEE80211;
516		sockp->sa_len = 12;	/* XXX != 0 */
517		hlen = sizeof(struct ieee80211_bpf_params);
518		break;
519
520	default:
521		return (EIO);
522	}
523
524	len = uio->uio_resid;
525
526	if (len - hlen > ifp->if_mtu)
527		return (EMSGSIZE);
528
529	if ((unsigned)len > MJUM16BYTES)
530		return (EIO);
531
532	if (len <= MHLEN)
533		MGETHDR(m, M_WAITOK, MT_DATA);
534	else if (len <= MCLBYTES)
535		m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
536	else
537		m = m_getjcl(M_WAITOK, MT_DATA, M_PKTHDR,
538#if (MJUMPAGESIZE > MCLBYTES)
539		    len <= MJUMPAGESIZE ? MJUMPAGESIZE :
540#endif
541		    (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES));
542	m->m_pkthdr.len = m->m_len = len;
543	m->m_pkthdr.rcvif = NULL;
544	*mp = m;
545
546	if (m->m_len < hlen) {
547		error = EPERM;
548		goto bad;
549	}
550
551	error = uiomove(mtod(m, u_char *), len, uio);
552	if (error)
553		goto bad;
554
555	slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
556	if (slen == 0) {
557		error = EPERM;
558		goto bad;
559	}
560
561	/* Check for multicast destination */
562	switch (linktype) {
563	case DLT_EN10MB:
564		eh = mtod(m, struct ether_header *);
565		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
566			if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
567			    ETHER_ADDR_LEN) == 0)
568				m->m_flags |= M_BCAST;
569			else
570				m->m_flags |= M_MCAST;
571		}
572		break;
573	}
574
575	/*
576	 * Make room for link header, and copy it to sockaddr
577	 */
578	if (hlen != 0) {
579		if (sockp->sa_family == AF_IEEE80211) {
580			/*
581			 * Collect true length from the parameter header
582			 * NB: sockp is known to be zero'd so if we do a
583			 *     short copy unspecified parameters will be
584			 *     zero.
585			 * NB: packet may not be aligned after stripping
586			 *     bpf params
587			 * XXX check ibp_vers
588			 */
589			p = mtod(m, const struct ieee80211_bpf_params *);
590			hlen = p->ibp_len;
591			if (hlen > sizeof(sockp->sa_data)) {
592				error = EINVAL;
593				goto bad;
594			}
595		}
596		bcopy(m->m_data, sockp->sa_data, hlen);
597	}
598	*hdrlen = hlen;
599
600	return (0);
601bad:
602	m_freem(m);
603	return (error);
604}
605
606/*
607 * Attach file to the bpf interface, i.e. make d listen on bp.
608 */
609static void
610bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
611{
612	int op_w;
613
614	BPF_LOCK_ASSERT();
615
616	/*
617	 * Save sysctl value to protect from sysctl change
618	 * between reads
619	 */
620	op_w = V_bpf_optimize_writers;
621
622	if (d->bd_bif != NULL)
623		bpf_detachd_locked(d);
624	/*
625	 * Point d at bp, and add d to the interface's list.
626	 * Since there are many applicaiotns using BPF for
627	 * sending raw packets only (dhcpd, cdpd are good examples)
628	 * we can delay adding d to the list of active listeners until
629	 * some filter is configured.
630	 */
631
632	BPFIF_WLOCK(bp);
633	BPFD_LOCK(d);
634
635	d->bd_bif = bp;
636
637	if (op_w != 0) {
638		/* Add to writers-only list */
639		LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
640		/*
641		 * We decrement bd_writer on every filter set operation.
642		 * First BIOCSETF is done by pcap_open_live() to set up
643		 * snap length. After that appliation usually sets its own filter
644		 */
645		d->bd_writer = 2;
646	} else
647		LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
648
649	BPFD_UNLOCK(d);
650	BPFIF_WUNLOCK(bp);
651
652	bpf_bpfd_cnt++;
653
654	CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
655	    __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
656
657	if (op_w == 0)
658		EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
659}
660
661/*
662 * Add d to the list of active bp filters.
663 * Reuqires bpf_attachd() to be called before
664 */
665static void
666bpf_upgraded(struct bpf_d *d)
667{
668	struct bpf_if *bp;
669
670	BPF_LOCK_ASSERT();
671
672	bp = d->bd_bif;
673
674	/*
675	 * Filter can be set several times without specifying interface.
676	 * Mark d as reader and exit.
677	 */
678	if (bp == NULL) {
679		BPFD_LOCK(d);
680		d->bd_writer = 0;
681		BPFD_UNLOCK(d);
682		return;
683	}
684
685	BPFIF_WLOCK(bp);
686	BPFD_LOCK(d);
687
688	/* Remove from writers-only list */
689	LIST_REMOVE(d, bd_next);
690	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
691	/* Mark d as reader */
692	d->bd_writer = 0;
693
694	BPFD_UNLOCK(d);
695	BPFIF_WUNLOCK(bp);
696
697	CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
698
699	EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
700}
701
702/*
703 * Detach a file from its interface.
704 */
705static void
706bpf_detachd(struct bpf_d *d)
707{
708	BPF_LOCK();
709	bpf_detachd_locked(d);
710	BPF_UNLOCK();
711}
712
713static void
714bpf_detachd_locked(struct bpf_d *d)
715{
716	int error;
717	struct bpf_if *bp;
718	struct ifnet *ifp;
719
720	CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
721
722	BPF_LOCK_ASSERT();
723
724	/* Check if descriptor is attached */
725	if ((bp = d->bd_bif) == NULL)
726		return;
727
728	BPFIF_WLOCK(bp);
729	BPFD_LOCK(d);
730
731	/* Save bd_writer value */
732	error = d->bd_writer;
733
734	/*
735	 * Remove d from the interface's descriptor list.
736	 */
737	LIST_REMOVE(d, bd_next);
738
739	ifp = bp->bif_ifp;
740	d->bd_bif = NULL;
741	BPFD_UNLOCK(d);
742	BPFIF_WUNLOCK(bp);
743
744	bpf_bpfd_cnt--;
745
746	/* Call event handler iff d is attached */
747	if (error == 0)
748		EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
749
750	/*
751	 * Check if this descriptor had requested promiscuous mode.
752	 * If so, turn it off.
753	 */
754	if (d->bd_promisc) {
755		d->bd_promisc = 0;
756		CURVNET_SET(ifp->if_vnet);
757		error = ifpromisc(ifp, 0);
758		CURVNET_RESTORE();
759		if (error != 0 && error != ENXIO) {
760			/*
761			 * ENXIO can happen if a pccard is unplugged
762			 * Something is really wrong if we were able to put
763			 * the driver into promiscuous mode, but can't
764			 * take it out.
765			 */
766			if_printf(bp->bif_ifp,
767				"bpf_detach: ifpromisc failed (%d)\n", error);
768		}
769	}
770}
771
772/*
773 * Close the descriptor by detaching it from its interface,
774 * deallocating its buffers, and marking it free.
775 */
776static void
777bpf_dtor(void *data)
778{
779	struct bpf_d *d = data;
780
781	BPFD_LOCK(d);
782	if (d->bd_state == BPF_WAITING)
783		callout_stop(&d->bd_callout);
784	d->bd_state = BPF_IDLE;
785	BPFD_UNLOCK(d);
786	funsetown(&d->bd_sigio);
787	bpf_detachd(d);
788#ifdef MAC
789	mac_bpfdesc_destroy(d);
790#endif /* MAC */
791	seldrain(&d->bd_sel);
792	knlist_destroy(&d->bd_sel.si_note);
793	callout_drain(&d->bd_callout);
794	bpf_freed(d);
795	free(d, M_BPF);
796}
797
798/*
799 * Open ethernet device.  Returns ENXIO for illegal minor device number,
800 * EBUSY if file is open by another process.
801 */
802/* ARGSUSED */
803static	int
804bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
805{
806	struct bpf_d *d;
807	int error, size;
808
809	d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
810	error = devfs_set_cdevpriv(d, bpf_dtor);
811	if (error != 0) {
812		free(d, M_BPF);
813		return (error);
814	}
815
816	/*
817	 * For historical reasons, perform a one-time initialization call to
818	 * the buffer routines, even though we're not yet committed to a
819	 * particular buffer method.
820	 */
821	bpf_buffer_init(d);
822	d->bd_hbuf_in_use = 0;
823	d->bd_bufmode = BPF_BUFMODE_BUFFER;
824	d->bd_sig = SIGIO;
825	d->bd_direction = BPF_D_INOUT;
826	BPF_PID_REFRESH(d, td);
827#ifdef MAC
828	mac_bpfdesc_init(d);
829	mac_bpfdesc_create(td->td_ucred, d);
830#endif
831	mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
832	callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
833	knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
834
835	/* Allocate default buffers */
836	size = d->bd_bufsize;
837	bpf_buffer_ioctl_sblen(d, &size);
838
839	return (0);
840}
841
842/*
843 *  bpfread - read next chunk of packets from buffers
844 */
845static	int
846bpfread(struct cdev *dev, struct uio *uio, int ioflag)
847{
848	struct bpf_d *d;
849	int error;
850	int non_block;
851	int timed_out;
852
853	error = devfs_get_cdevpriv((void **)&d);
854	if (error != 0)
855		return (error);
856
857	/*
858	 * Restrict application to use a buffer the same size as
859	 * as kernel buffers.
860	 */
861	if (uio->uio_resid != d->bd_bufsize)
862		return (EINVAL);
863
864	non_block = ((ioflag & O_NONBLOCK) != 0);
865
866	BPFD_LOCK(d);
867	BPF_PID_REFRESH_CUR(d);
868	if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
869		BPFD_UNLOCK(d);
870		return (EOPNOTSUPP);
871	}
872	if (d->bd_state == BPF_WAITING)
873		callout_stop(&d->bd_callout);
874	timed_out = (d->bd_state == BPF_TIMED_OUT);
875	d->bd_state = BPF_IDLE;
876	while (d->bd_hbuf_in_use)
877		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
878		    PRINET|PCATCH, "bd_hbuf", 0);
879	/*
880	 * If the hold buffer is empty, then do a timed sleep, which
881	 * ends when the timeout expires or when enough packets
882	 * have arrived to fill the store buffer.
883	 */
884	while (d->bd_hbuf == NULL) {
885		if (d->bd_slen != 0) {
886			/*
887			 * A packet(s) either arrived since the previous
888			 * read or arrived while we were asleep.
889			 */
890			if (d->bd_immediate || non_block || timed_out) {
891				/*
892				 * Rotate the buffers and return what's here
893				 * if we are in immediate mode, non-blocking
894				 * flag is set, or this descriptor timed out.
895				 */
896				ROTATE_BUFFERS(d);
897				break;
898			}
899		}
900
901		/*
902		 * No data is available, check to see if the bpf device
903		 * is still pointed at a real interface.  If not, return
904		 * ENXIO so that the userland process knows to rebind
905		 * it before using it again.
906		 */
907		if (d->bd_bif == NULL) {
908			BPFD_UNLOCK(d);
909			return (ENXIO);
910		}
911
912		if (non_block) {
913			BPFD_UNLOCK(d);
914			return (EWOULDBLOCK);
915		}
916		error = msleep(d, &d->bd_lock, PRINET|PCATCH,
917		     "bpf", d->bd_rtout);
918		if (error == EINTR || error == ERESTART) {
919			BPFD_UNLOCK(d);
920			return (error);
921		}
922		if (error == EWOULDBLOCK) {
923			/*
924			 * On a timeout, return what's in the buffer,
925			 * which may be nothing.  If there is something
926			 * in the store buffer, we can rotate the buffers.
927			 */
928			if (d->bd_hbuf)
929				/*
930				 * We filled up the buffer in between
931				 * getting the timeout and arriving
932				 * here, so we don't need to rotate.
933				 */
934				break;
935
936			if (d->bd_slen == 0) {
937				BPFD_UNLOCK(d);
938				return (0);
939			}
940			ROTATE_BUFFERS(d);
941			break;
942		}
943	}
944	/*
945	 * At this point, we know we have something in the hold slot.
946	 */
947	d->bd_hbuf_in_use = 1;
948	BPFD_UNLOCK(d);
949
950	/*
951	 * Move data from hold buffer into user space.
952	 * We know the entire buffer is transferred since
953	 * we checked above that the read buffer is bpf_bufsize bytes.
954  	 *
955	 * We do not have to worry about simultaneous reads because
956	 * we waited for sole access to the hold buffer above.
957	 */
958	error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
959
960	BPFD_LOCK(d);
961	KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
962	d->bd_fbuf = d->bd_hbuf;
963	d->bd_hbuf = NULL;
964	d->bd_hlen = 0;
965	bpf_buf_reclaimed(d);
966	d->bd_hbuf_in_use = 0;
967	wakeup(&d->bd_hbuf_in_use);
968	BPFD_UNLOCK(d);
969
970	return (error);
971}
972
973/*
974 * If there are processes sleeping on this descriptor, wake them up.
975 */
976static __inline void
977bpf_wakeup(struct bpf_d *d)
978{
979
980	BPFD_LOCK_ASSERT(d);
981	if (d->bd_state == BPF_WAITING) {
982		callout_stop(&d->bd_callout);
983		d->bd_state = BPF_IDLE;
984	}
985	wakeup(d);
986	if (d->bd_async && d->bd_sig && d->bd_sigio)
987		pgsigio(&d->bd_sigio, d->bd_sig, 0);
988
989	selwakeuppri(&d->bd_sel, PRINET);
990	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
991}
992
993static void
994bpf_timed_out(void *arg)
995{
996	struct bpf_d *d = (struct bpf_d *)arg;
997
998	BPFD_LOCK_ASSERT(d);
999
1000	if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
1001		return;
1002	if (d->bd_state == BPF_WAITING) {
1003		d->bd_state = BPF_TIMED_OUT;
1004		if (d->bd_slen != 0)
1005			bpf_wakeup(d);
1006	}
1007}
1008
1009static int
1010bpf_ready(struct bpf_d *d)
1011{
1012
1013	BPFD_LOCK_ASSERT(d);
1014
1015	if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1016		return (1);
1017	if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1018	    d->bd_slen != 0)
1019		return (1);
1020	return (0);
1021}
1022
1023static int
1024bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1025{
1026	struct bpf_d *d;
1027	struct ifnet *ifp;
1028	struct mbuf *m, *mc;
1029	struct sockaddr dst;
1030	int error, hlen;
1031
1032	error = devfs_get_cdevpriv((void **)&d);
1033	if (error != 0)
1034		return (error);
1035
1036	BPF_PID_REFRESH_CUR(d);
1037	d->bd_wcount++;
1038	/* XXX: locking required */
1039	if (d->bd_bif == NULL) {
1040		d->bd_wdcount++;
1041		return (ENXIO);
1042	}
1043
1044	ifp = d->bd_bif->bif_ifp;
1045
1046	if ((ifp->if_flags & IFF_UP) == 0) {
1047		d->bd_wdcount++;
1048		return (ENETDOWN);
1049	}
1050
1051	if (uio->uio_resid == 0) {
1052		d->bd_wdcount++;
1053		return (0);
1054	}
1055
1056	bzero(&dst, sizeof(dst));
1057	m = NULL;
1058	hlen = 0;
1059	/* XXX: bpf_movein() can sleep */
1060	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1061	    &m, &dst, &hlen, d->bd_wfilter);
1062	if (error) {
1063		d->bd_wdcount++;
1064		return (error);
1065	}
1066	d->bd_wfcount++;
1067	if (d->bd_hdrcmplt)
1068		dst.sa_family = pseudo_AF_HDRCMPLT;
1069
1070	if (d->bd_feedback) {
1071		mc = m_dup(m, M_NOWAIT);
1072		if (mc != NULL)
1073			mc->m_pkthdr.rcvif = ifp;
1074		/* Set M_PROMISC for outgoing packets to be discarded. */
1075		if (d->bd_direction == BPF_D_INOUT)
1076			m->m_flags |= M_PROMISC;
1077	} else
1078		mc = NULL;
1079
1080	m->m_pkthdr.len -= hlen;
1081	m->m_len -= hlen;
1082	m->m_data += hlen;	/* XXX */
1083
1084	CURVNET_SET(ifp->if_vnet);
1085#ifdef MAC
1086	BPFD_LOCK(d);
1087	mac_bpfdesc_create_mbuf(d, m);
1088	if (mc != NULL)
1089		mac_bpfdesc_create_mbuf(d, mc);
1090	BPFD_UNLOCK(d);
1091#endif
1092
1093	error = (*ifp->if_output)(ifp, m, &dst, NULL);
1094	if (error)
1095		d->bd_wdcount++;
1096
1097	if (mc != NULL) {
1098		if (error == 0)
1099			(*ifp->if_input)(ifp, mc);
1100		else
1101			m_freem(mc);
1102	}
1103	CURVNET_RESTORE();
1104
1105	return (error);
1106}
1107
1108/*
1109 * Reset a descriptor by flushing its packet buffer and clearing the receive
1110 * and drop counts.  This is doable for kernel-only buffers, but with
1111 * zero-copy buffers, we can't write to (or rotate) buffers that are
1112 * currently owned by userspace.  It would be nice if we could encapsulate
1113 * this logic in the buffer code rather than here.
1114 */
1115static void
1116reset_d(struct bpf_d *d)
1117{
1118
1119	BPFD_LOCK_ASSERT(d);
1120
1121	while (d->bd_hbuf_in_use)
1122		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1123		    "bd_hbuf", 0);
1124	if ((d->bd_hbuf != NULL) &&
1125	    (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1126		/* Free the hold buffer. */
1127		d->bd_fbuf = d->bd_hbuf;
1128		d->bd_hbuf = NULL;
1129		d->bd_hlen = 0;
1130		bpf_buf_reclaimed(d);
1131	}
1132	if (bpf_canwritebuf(d))
1133		d->bd_slen = 0;
1134	d->bd_rcount = 0;
1135	d->bd_dcount = 0;
1136	d->bd_fcount = 0;
1137	d->bd_wcount = 0;
1138	d->bd_wfcount = 0;
1139	d->bd_wdcount = 0;
1140	d->bd_zcopy = 0;
1141}
1142
1143/*
1144 *  FIONREAD		Check for read packet available.
1145 *  SIOCGIFADDR		Get interface address - convenient hook to driver.
1146 *  BIOCGBLEN		Get buffer len [for read()].
1147 *  BIOCSETF		Set read filter.
1148 *  BIOCSETFNR		Set read filter without resetting descriptor.
1149 *  BIOCSETWF		Set write filter.
1150 *  BIOCFLUSH		Flush read packet buffer.
1151 *  BIOCPROMISC		Put interface into promiscuous mode.
1152 *  BIOCGDLT		Get link layer type.
1153 *  BIOCGETIF		Get interface name.
1154 *  BIOCSETIF		Set interface.
1155 *  BIOCSRTIMEOUT	Set read timeout.
1156 *  BIOCGRTIMEOUT	Get read timeout.
1157 *  BIOCGSTATS		Get packet stats.
1158 *  BIOCIMMEDIATE	Set immediate mode.
1159 *  BIOCVERSION		Get filter language version.
1160 *  BIOCGHDRCMPLT	Get "header already complete" flag
1161 *  BIOCSHDRCMPLT	Set "header already complete" flag
1162 *  BIOCGDIRECTION	Get packet direction flag
1163 *  BIOCSDIRECTION	Set packet direction flag
1164 *  BIOCGTSTAMP		Get time stamp format and resolution.
1165 *  BIOCSTSTAMP		Set time stamp format and resolution.
1166 *  BIOCLOCK		Set "locked" flag
1167 *  BIOCFEEDBACK	Set packet feedback mode.
1168 *  BIOCSETZBUF		Set current zero-copy buffer locations.
1169 *  BIOCGETZMAX		Get maximum zero-copy buffer size.
1170 *  BIOCROTZBUF		Force rotation of zero-copy buffer
1171 *  BIOCSETBUFMODE	Set buffer mode.
1172 *  BIOCGETBUFMODE	Get current buffer mode.
1173 */
1174/* ARGSUSED */
1175static	int
1176bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1177    struct thread *td)
1178{
1179	struct bpf_d *d;
1180	int error;
1181
1182	error = devfs_get_cdevpriv((void **)&d);
1183	if (error != 0)
1184		return (error);
1185
1186	/*
1187	 * Refresh PID associated with this descriptor.
1188	 */
1189	BPFD_LOCK(d);
1190	BPF_PID_REFRESH(d, td);
1191	if (d->bd_state == BPF_WAITING)
1192		callout_stop(&d->bd_callout);
1193	d->bd_state = BPF_IDLE;
1194	BPFD_UNLOCK(d);
1195
1196	if (d->bd_locked == 1) {
1197		switch (cmd) {
1198		case BIOCGBLEN:
1199		case BIOCFLUSH:
1200		case BIOCGDLT:
1201		case BIOCGDLTLIST:
1202#ifdef COMPAT_FREEBSD32
1203		case BIOCGDLTLIST32:
1204#endif
1205		case BIOCGETIF:
1206		case BIOCGRTIMEOUT:
1207#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1208		case BIOCGRTIMEOUT32:
1209#endif
1210		case BIOCGSTATS:
1211		case BIOCVERSION:
1212		case BIOCGRSIG:
1213		case BIOCGHDRCMPLT:
1214		case BIOCSTSTAMP:
1215		case BIOCFEEDBACK:
1216		case FIONREAD:
1217		case BIOCLOCK:
1218		case BIOCSRTIMEOUT:
1219#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1220		case BIOCSRTIMEOUT32:
1221#endif
1222		case BIOCIMMEDIATE:
1223		case TIOCGPGRP:
1224		case BIOCROTZBUF:
1225			break;
1226		default:
1227			return (EPERM);
1228		}
1229	}
1230#ifdef COMPAT_FREEBSD32
1231	/*
1232	 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1233	 * that it will get 32-bit packet headers.
1234	 */
1235	switch (cmd) {
1236	case BIOCSETF32:
1237	case BIOCSETFNR32:
1238	case BIOCSETWF32:
1239	case BIOCGDLTLIST32:
1240	case BIOCGRTIMEOUT32:
1241	case BIOCSRTIMEOUT32:
1242		BPFD_LOCK(d);
1243		d->bd_compat32 = 1;
1244		BPFD_UNLOCK(d);
1245	}
1246#endif
1247
1248	CURVNET_SET(TD_TO_VNET(td));
1249	switch (cmd) {
1250
1251	default:
1252		error = EINVAL;
1253		break;
1254
1255	/*
1256	 * Check for read packet available.
1257	 */
1258	case FIONREAD:
1259		{
1260			int n;
1261
1262			BPFD_LOCK(d);
1263			n = d->bd_slen;
1264			while (d->bd_hbuf_in_use)
1265				mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1266				    PRINET, "bd_hbuf", 0);
1267			if (d->bd_hbuf)
1268				n += d->bd_hlen;
1269			BPFD_UNLOCK(d);
1270
1271			*(int *)addr = n;
1272			break;
1273		}
1274
1275	case SIOCGIFADDR:
1276		{
1277			struct ifnet *ifp;
1278
1279			if (d->bd_bif == NULL)
1280				error = EINVAL;
1281			else {
1282				ifp = d->bd_bif->bif_ifp;
1283				error = (*ifp->if_ioctl)(ifp, cmd, addr);
1284			}
1285			break;
1286		}
1287
1288	/*
1289	 * Get buffer len [for read()].
1290	 */
1291	case BIOCGBLEN:
1292		BPFD_LOCK(d);
1293		*(u_int *)addr = d->bd_bufsize;
1294		BPFD_UNLOCK(d);
1295		break;
1296
1297	/*
1298	 * Set buffer length.
1299	 */
1300	case BIOCSBLEN:
1301		error = bpf_ioctl_sblen(d, (u_int *)addr);
1302		break;
1303
1304	/*
1305	 * Set link layer read filter.
1306	 */
1307	case BIOCSETF:
1308	case BIOCSETFNR:
1309	case BIOCSETWF:
1310#ifdef COMPAT_FREEBSD32
1311	case BIOCSETF32:
1312	case BIOCSETFNR32:
1313	case BIOCSETWF32:
1314#endif
1315		error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1316		break;
1317
1318	/*
1319	 * Flush read packet buffer.
1320	 */
1321	case BIOCFLUSH:
1322		BPFD_LOCK(d);
1323		reset_d(d);
1324		BPFD_UNLOCK(d);
1325		break;
1326
1327	/*
1328	 * Put interface into promiscuous mode.
1329	 */
1330	case BIOCPROMISC:
1331		if (d->bd_bif == NULL) {
1332			/*
1333			 * No interface attached yet.
1334			 */
1335			error = EINVAL;
1336			break;
1337		}
1338		if (d->bd_promisc == 0) {
1339			error = ifpromisc(d->bd_bif->bif_ifp, 1);
1340			if (error == 0)
1341				d->bd_promisc = 1;
1342		}
1343		break;
1344
1345	/*
1346	 * Get current data link type.
1347	 */
1348	case BIOCGDLT:
1349		BPF_LOCK();
1350		if (d->bd_bif == NULL)
1351			error = EINVAL;
1352		else
1353			*(u_int *)addr = d->bd_bif->bif_dlt;
1354		BPF_UNLOCK();
1355		break;
1356
1357	/*
1358	 * Get a list of supported data link types.
1359	 */
1360#ifdef COMPAT_FREEBSD32
1361	case BIOCGDLTLIST32:
1362		{
1363			struct bpf_dltlist32 *list32;
1364			struct bpf_dltlist dltlist;
1365
1366			list32 = (struct bpf_dltlist32 *)addr;
1367			dltlist.bfl_len = list32->bfl_len;
1368			dltlist.bfl_list = PTRIN(list32->bfl_list);
1369			BPF_LOCK();
1370			if (d->bd_bif == NULL)
1371				error = EINVAL;
1372			else {
1373				error = bpf_getdltlist(d, &dltlist);
1374				if (error == 0)
1375					list32->bfl_len = dltlist.bfl_len;
1376			}
1377			BPF_UNLOCK();
1378			break;
1379		}
1380#endif
1381
1382	case BIOCGDLTLIST:
1383		BPF_LOCK();
1384		if (d->bd_bif == NULL)
1385			error = EINVAL;
1386		else
1387			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1388		BPF_UNLOCK();
1389		break;
1390
1391	/*
1392	 * Set data link type.
1393	 */
1394	case BIOCSDLT:
1395		BPF_LOCK();
1396		if (d->bd_bif == NULL)
1397			error = EINVAL;
1398		else
1399			error = bpf_setdlt(d, *(u_int *)addr);
1400		BPF_UNLOCK();
1401		break;
1402
1403	/*
1404	 * Get interface name.
1405	 */
1406	case BIOCGETIF:
1407		BPF_LOCK();
1408		if (d->bd_bif == NULL)
1409			error = EINVAL;
1410		else {
1411			struct ifnet *const ifp = d->bd_bif->bif_ifp;
1412			struct ifreq *const ifr = (struct ifreq *)addr;
1413
1414			strlcpy(ifr->ifr_name, ifp->if_xname,
1415			    sizeof(ifr->ifr_name));
1416		}
1417		BPF_UNLOCK();
1418		break;
1419
1420	/*
1421	 * Set interface.
1422	 */
1423	case BIOCSETIF:
1424		BPF_LOCK();
1425		error = bpf_setif(d, (struct ifreq *)addr);
1426		BPF_UNLOCK();
1427		break;
1428
1429	/*
1430	 * Set read timeout.
1431	 */
1432	case BIOCSRTIMEOUT:
1433#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1434	case BIOCSRTIMEOUT32:
1435#endif
1436		{
1437			struct timeval *tv = (struct timeval *)addr;
1438#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1439			struct timeval32 *tv32;
1440			struct timeval tv64;
1441
1442			if (cmd == BIOCSRTIMEOUT32) {
1443				tv32 = (struct timeval32 *)addr;
1444				tv = &tv64;
1445				tv->tv_sec = tv32->tv_sec;
1446				tv->tv_usec = tv32->tv_usec;
1447			} else
1448#endif
1449				tv = (struct timeval *)addr;
1450
1451			/*
1452			 * Subtract 1 tick from tvtohz() since this isn't
1453			 * a one-shot timer.
1454			 */
1455			if ((error = itimerfix(tv)) == 0)
1456				d->bd_rtout = tvtohz(tv) - 1;
1457			break;
1458		}
1459
1460	/*
1461	 * Get read timeout.
1462	 */
1463	case BIOCGRTIMEOUT:
1464#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1465	case BIOCGRTIMEOUT32:
1466#endif
1467		{
1468			struct timeval *tv;
1469#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1470			struct timeval32 *tv32;
1471			struct timeval tv64;
1472
1473			if (cmd == BIOCGRTIMEOUT32)
1474				tv = &tv64;
1475			else
1476#endif
1477				tv = (struct timeval *)addr;
1478
1479			tv->tv_sec = d->bd_rtout / hz;
1480			tv->tv_usec = (d->bd_rtout % hz) * tick;
1481#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1482			if (cmd == BIOCGRTIMEOUT32) {
1483				tv32 = (struct timeval32 *)addr;
1484				tv32->tv_sec = tv->tv_sec;
1485				tv32->tv_usec = tv->tv_usec;
1486			}
1487#endif
1488
1489			break;
1490		}
1491
1492	/*
1493	 * Get packet stats.
1494	 */
1495	case BIOCGSTATS:
1496		{
1497			struct bpf_stat *bs = (struct bpf_stat *)addr;
1498
1499			/* XXXCSJP overflow */
1500			bs->bs_recv = d->bd_rcount;
1501			bs->bs_drop = d->bd_dcount;
1502			break;
1503		}
1504
1505	/*
1506	 * Set immediate mode.
1507	 */
1508	case BIOCIMMEDIATE:
1509		BPFD_LOCK(d);
1510		d->bd_immediate = *(u_int *)addr;
1511		BPFD_UNLOCK(d);
1512		break;
1513
1514	case BIOCVERSION:
1515		{
1516			struct bpf_version *bv = (struct bpf_version *)addr;
1517
1518			bv->bv_major = BPF_MAJOR_VERSION;
1519			bv->bv_minor = BPF_MINOR_VERSION;
1520			break;
1521		}
1522
1523	/*
1524	 * Get "header already complete" flag
1525	 */
1526	case BIOCGHDRCMPLT:
1527		BPFD_LOCK(d);
1528		*(u_int *)addr = d->bd_hdrcmplt;
1529		BPFD_UNLOCK(d);
1530		break;
1531
1532	/*
1533	 * Set "header already complete" flag
1534	 */
1535	case BIOCSHDRCMPLT:
1536		BPFD_LOCK(d);
1537		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1538		BPFD_UNLOCK(d);
1539		break;
1540
1541	/*
1542	 * Get packet direction flag
1543	 */
1544	case BIOCGDIRECTION:
1545		BPFD_LOCK(d);
1546		*(u_int *)addr = d->bd_direction;
1547		BPFD_UNLOCK(d);
1548		break;
1549
1550	/*
1551	 * Set packet direction flag
1552	 */
1553	case BIOCSDIRECTION:
1554		{
1555			u_int	direction;
1556
1557			direction = *(u_int *)addr;
1558			switch (direction) {
1559			case BPF_D_IN:
1560			case BPF_D_INOUT:
1561			case BPF_D_OUT:
1562				BPFD_LOCK(d);
1563				d->bd_direction = direction;
1564				BPFD_UNLOCK(d);
1565				break;
1566			default:
1567				error = EINVAL;
1568			}
1569		}
1570		break;
1571
1572	/*
1573	 * Get packet timestamp format and resolution.
1574	 */
1575	case BIOCGTSTAMP:
1576		BPFD_LOCK(d);
1577		*(u_int *)addr = d->bd_tstamp;
1578		BPFD_UNLOCK(d);
1579		break;
1580
1581	/*
1582	 * Set packet timestamp format and resolution.
1583	 */
1584	case BIOCSTSTAMP:
1585		{
1586			u_int	func;
1587
1588			func = *(u_int *)addr;
1589			if (BPF_T_VALID(func))
1590				d->bd_tstamp = func;
1591			else
1592				error = EINVAL;
1593		}
1594		break;
1595
1596	case BIOCFEEDBACK:
1597		BPFD_LOCK(d);
1598		d->bd_feedback = *(u_int *)addr;
1599		BPFD_UNLOCK(d);
1600		break;
1601
1602	case BIOCLOCK:
1603		BPFD_LOCK(d);
1604		d->bd_locked = 1;
1605		BPFD_UNLOCK(d);
1606		break;
1607
1608	case FIONBIO:		/* Non-blocking I/O */
1609		break;
1610
1611	case FIOASYNC:		/* Send signal on receive packets */
1612		BPFD_LOCK(d);
1613		d->bd_async = *(int *)addr;
1614		BPFD_UNLOCK(d);
1615		break;
1616
1617	case FIOSETOWN:
1618		/*
1619		 * XXX: Add some sort of locking here?
1620		 * fsetown() can sleep.
1621		 */
1622		error = fsetown(*(int *)addr, &d->bd_sigio);
1623		break;
1624
1625	case FIOGETOWN:
1626		BPFD_LOCK(d);
1627		*(int *)addr = fgetown(&d->bd_sigio);
1628		BPFD_UNLOCK(d);
1629		break;
1630
1631	/* This is deprecated, FIOSETOWN should be used instead. */
1632	case TIOCSPGRP:
1633		error = fsetown(-(*(int *)addr), &d->bd_sigio);
1634		break;
1635
1636	/* This is deprecated, FIOGETOWN should be used instead. */
1637	case TIOCGPGRP:
1638		*(int *)addr = -fgetown(&d->bd_sigio);
1639		break;
1640
1641	case BIOCSRSIG:		/* Set receive signal */
1642		{
1643			u_int sig;
1644
1645			sig = *(u_int *)addr;
1646
1647			if (sig >= NSIG)
1648				error = EINVAL;
1649			else {
1650				BPFD_LOCK(d);
1651				d->bd_sig = sig;
1652				BPFD_UNLOCK(d);
1653			}
1654			break;
1655		}
1656	case BIOCGRSIG:
1657		BPFD_LOCK(d);
1658		*(u_int *)addr = d->bd_sig;
1659		BPFD_UNLOCK(d);
1660		break;
1661
1662	case BIOCGETBUFMODE:
1663		BPFD_LOCK(d);
1664		*(u_int *)addr = d->bd_bufmode;
1665		BPFD_UNLOCK(d);
1666		break;
1667
1668	case BIOCSETBUFMODE:
1669		/*
1670		 * Allow the buffering mode to be changed as long as we
1671		 * haven't yet committed to a particular mode.  Our
1672		 * definition of commitment, for now, is whether or not a
1673		 * buffer has been allocated or an interface attached, since
1674		 * that's the point where things get tricky.
1675		 */
1676		switch (*(u_int *)addr) {
1677		case BPF_BUFMODE_BUFFER:
1678			break;
1679
1680		case BPF_BUFMODE_ZBUF:
1681			if (bpf_zerocopy_enable)
1682				break;
1683			/* FALLSTHROUGH */
1684
1685		default:
1686			CURVNET_RESTORE();
1687			return (EINVAL);
1688		}
1689
1690		BPFD_LOCK(d);
1691		if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1692		    d->bd_fbuf != NULL || d->bd_bif != NULL) {
1693			BPFD_UNLOCK(d);
1694			CURVNET_RESTORE();
1695			return (EBUSY);
1696		}
1697		d->bd_bufmode = *(u_int *)addr;
1698		BPFD_UNLOCK(d);
1699		break;
1700
1701	case BIOCGETZMAX:
1702		error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1703		break;
1704
1705	case BIOCSETZBUF:
1706		error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1707		break;
1708
1709	case BIOCROTZBUF:
1710		error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1711		break;
1712	}
1713	CURVNET_RESTORE();
1714	return (error);
1715}
1716
1717/*
1718 * Set d's packet filter program to fp.  If this file already has a filter,
1719 * free it and replace it.  Returns EINVAL for bogus requests.
1720 *
1721 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1722 * since reading d->bd_bif can't be protected by d or interface lock due to
1723 * lock order.
1724 *
1725 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1726 * interface read lock to read all filers.
1727 *
1728 */
1729static int
1730bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1731{
1732#ifdef COMPAT_FREEBSD32
1733	struct bpf_program fp_swab;
1734	struct bpf_program32 *fp32;
1735#endif
1736	struct bpf_insn *fcode, *old;
1737#ifdef BPF_JITTER
1738	bpf_jit_filter *jfunc, *ofunc;
1739#endif
1740	size_t size;
1741	u_int flen;
1742	int need_upgrade;
1743
1744#ifdef COMPAT_FREEBSD32
1745	switch (cmd) {
1746	case BIOCSETF32:
1747	case BIOCSETWF32:
1748	case BIOCSETFNR32:
1749		fp32 = (struct bpf_program32 *)fp;
1750		fp_swab.bf_len = fp32->bf_len;
1751		fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1752		fp = &fp_swab;
1753		switch (cmd) {
1754		case BIOCSETF32:
1755			cmd = BIOCSETF;
1756			break;
1757		case BIOCSETWF32:
1758			cmd = BIOCSETWF;
1759			break;
1760		}
1761		break;
1762	}
1763#endif
1764
1765	fcode = NULL;
1766#ifdef BPF_JITTER
1767	jfunc = ofunc = NULL;
1768#endif
1769	need_upgrade = 0;
1770
1771	/*
1772	 * Check new filter validness before acquiring any locks.
1773	 * Allocate memory for new filter, if needed.
1774	 */
1775	flen = fp->bf_len;
1776	if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1777		return (EINVAL);
1778	size = flen * sizeof(*fp->bf_insns);
1779	if (size > 0) {
1780		/* We're setting up new filter.  Copy and check actual data. */
1781		fcode = malloc(size, M_BPF, M_WAITOK);
1782		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1783		    !bpf_validate(fcode, flen)) {
1784			free(fcode, M_BPF);
1785			return (EINVAL);
1786		}
1787#ifdef BPF_JITTER
1788		/* Filter is copied inside fcode and is perfectly valid. */
1789		jfunc = bpf_jitter(fcode, flen);
1790#endif
1791	}
1792
1793	BPF_LOCK();
1794
1795	/*
1796	 * Set up new filter.
1797	 * Protect filter change by interface lock.
1798	 * Additionally, we are protected by global lock here.
1799	 */
1800	if (d->bd_bif != NULL)
1801		BPFIF_WLOCK(d->bd_bif);
1802	BPFD_LOCK(d);
1803	if (cmd == BIOCSETWF) {
1804		old = d->bd_wfilter;
1805		d->bd_wfilter = fcode;
1806	} else {
1807		old = d->bd_rfilter;
1808		d->bd_rfilter = fcode;
1809#ifdef BPF_JITTER
1810		ofunc = d->bd_bfilter;
1811		d->bd_bfilter = jfunc;
1812#endif
1813		if (cmd == BIOCSETF)
1814			reset_d(d);
1815
1816		if (fcode != NULL) {
1817			/*
1818			 * Do not require upgrade by first BIOCSETF
1819			 * (used to set snaplen) by pcap_open_live().
1820			 */
1821			if (d->bd_writer != 0 && --d->bd_writer == 0)
1822				need_upgrade = 1;
1823			CTR4(KTR_NET, "%s: filter function set by pid %d, "
1824			    "bd_writer counter %d, need_upgrade %d",
1825			    __func__, d->bd_pid, d->bd_writer, need_upgrade);
1826		}
1827	}
1828	BPFD_UNLOCK(d);
1829	if (d->bd_bif != NULL)
1830		BPFIF_WUNLOCK(d->bd_bif);
1831	if (old != NULL)
1832		free(old, M_BPF);
1833#ifdef BPF_JITTER
1834	if (ofunc != NULL)
1835		bpf_destroy_jit_filter(ofunc);
1836#endif
1837
1838	/* Move d to active readers list. */
1839	if (need_upgrade)
1840		bpf_upgraded(d);
1841
1842	BPF_UNLOCK();
1843	return (0);
1844}
1845
1846/*
1847 * Detach a file from its current interface (if attached at all) and attach
1848 * to the interface indicated by the name stored in ifr.
1849 * Return an errno or 0.
1850 */
1851static int
1852bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1853{
1854	struct bpf_if *bp;
1855	struct ifnet *theywant;
1856
1857	BPF_LOCK_ASSERT();
1858
1859	theywant = ifunit(ifr->ifr_name);
1860	if (theywant == NULL || theywant->if_bpf == NULL)
1861		return (ENXIO);
1862
1863	bp = theywant->if_bpf;
1864
1865	/* Check if interface is not being detached from BPF */
1866	BPFIF_RLOCK(bp);
1867	if (bp->flags & BPFIF_FLAG_DYING) {
1868		BPFIF_RUNLOCK(bp);
1869		return (ENXIO);
1870	}
1871	BPFIF_RUNLOCK(bp);
1872
1873	/*
1874	 * Behavior here depends on the buffering model.  If we're using
1875	 * kernel memory buffers, then we can allocate them here.  If we're
1876	 * using zero-copy, then the user process must have registered
1877	 * buffers by the time we get here.  If not, return an error.
1878	 */
1879	switch (d->bd_bufmode) {
1880	case BPF_BUFMODE_BUFFER:
1881	case BPF_BUFMODE_ZBUF:
1882		if (d->bd_sbuf == NULL)
1883			return (EINVAL);
1884		break;
1885
1886	default:
1887		panic("bpf_setif: bufmode %d", d->bd_bufmode);
1888	}
1889	if (bp != d->bd_bif)
1890		bpf_attachd(d, bp);
1891	BPFD_LOCK(d);
1892	reset_d(d);
1893	BPFD_UNLOCK(d);
1894	return (0);
1895}
1896
1897/*
1898 * Support for select() and poll() system calls
1899 *
1900 * Return true iff the specific operation will not block indefinitely.
1901 * Otherwise, return false but make a note that a selwakeup() must be done.
1902 */
1903static int
1904bpfpoll(struct cdev *dev, int events, struct thread *td)
1905{
1906	struct bpf_d *d;
1907	int revents;
1908
1909	if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1910		return (events &
1911		    (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1912
1913	/*
1914	 * Refresh PID associated with this descriptor.
1915	 */
1916	revents = events & (POLLOUT | POLLWRNORM);
1917	BPFD_LOCK(d);
1918	BPF_PID_REFRESH(d, td);
1919	if (events & (POLLIN | POLLRDNORM)) {
1920		if (bpf_ready(d))
1921			revents |= events & (POLLIN | POLLRDNORM);
1922		else {
1923			selrecord(td, &d->bd_sel);
1924			/* Start the read timeout if necessary. */
1925			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1926				callout_reset(&d->bd_callout, d->bd_rtout,
1927				    bpf_timed_out, d);
1928				d->bd_state = BPF_WAITING;
1929			}
1930		}
1931	}
1932	BPFD_UNLOCK(d);
1933	return (revents);
1934}
1935
1936/*
1937 * Support for kevent() system call.  Register EVFILT_READ filters and
1938 * reject all others.
1939 */
1940int
1941bpfkqfilter(struct cdev *dev, struct knote *kn)
1942{
1943	struct bpf_d *d;
1944
1945	if (devfs_get_cdevpriv((void **)&d) != 0 ||
1946	    kn->kn_filter != EVFILT_READ)
1947		return (1);
1948
1949	/*
1950	 * Refresh PID associated with this descriptor.
1951	 */
1952	BPFD_LOCK(d);
1953	BPF_PID_REFRESH_CUR(d);
1954	kn->kn_fop = &bpfread_filtops;
1955	kn->kn_hook = d;
1956	knlist_add(&d->bd_sel.si_note, kn, 1);
1957	BPFD_UNLOCK(d);
1958
1959	return (0);
1960}
1961
1962static void
1963filt_bpfdetach(struct knote *kn)
1964{
1965	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1966
1967	knlist_remove(&d->bd_sel.si_note, kn, 0);
1968}
1969
1970static int
1971filt_bpfread(struct knote *kn, long hint)
1972{
1973	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1974	int ready;
1975
1976	BPFD_LOCK_ASSERT(d);
1977	ready = bpf_ready(d);
1978	if (ready) {
1979		kn->kn_data = d->bd_slen;
1980		while (d->bd_hbuf_in_use)
1981			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1982			    PRINET, "bd_hbuf", 0);
1983		if (d->bd_hbuf)
1984			kn->kn_data += d->bd_hlen;
1985	} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1986		callout_reset(&d->bd_callout, d->bd_rtout,
1987		    bpf_timed_out, d);
1988		d->bd_state = BPF_WAITING;
1989	}
1990
1991	return (ready);
1992}
1993
1994#define	BPF_TSTAMP_NONE		0
1995#define	BPF_TSTAMP_FAST		1
1996#define	BPF_TSTAMP_NORMAL	2
1997#define	BPF_TSTAMP_EXTERN	3
1998
1999static int
2000bpf_ts_quality(int tstype)
2001{
2002
2003	if (tstype == BPF_T_NONE)
2004		return (BPF_TSTAMP_NONE);
2005	if ((tstype & BPF_T_FAST) != 0)
2006		return (BPF_TSTAMP_FAST);
2007
2008	return (BPF_TSTAMP_NORMAL);
2009}
2010
2011static int
2012bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2013{
2014	struct m_tag *tag;
2015	int quality;
2016
2017	quality = bpf_ts_quality(tstype);
2018	if (quality == BPF_TSTAMP_NONE)
2019		return (quality);
2020
2021	if (m != NULL) {
2022		tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2023		if (tag != NULL) {
2024			*bt = *(struct bintime *)(tag + 1);
2025			return (BPF_TSTAMP_EXTERN);
2026		}
2027	}
2028	if (quality == BPF_TSTAMP_NORMAL)
2029		binuptime(bt);
2030	else
2031		getbinuptime(bt);
2032
2033	return (quality);
2034}
2035
2036/*
2037 * Incoming linkage from device drivers.  Process the packet pkt, of length
2038 * pktlen, which is stored in a contiguous buffer.  The packet is parsed
2039 * by each process' filter, and if accepted, stashed into the corresponding
2040 * buffer.
2041 */
2042void
2043bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2044{
2045	struct bintime bt;
2046	struct bpf_d *d;
2047#ifdef BPF_JITTER
2048	bpf_jit_filter *bf;
2049#endif
2050	u_int slen;
2051	int gottime;
2052
2053	gottime = BPF_TSTAMP_NONE;
2054
2055	BPFIF_RLOCK(bp);
2056
2057	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2058		/*
2059		 * We are not using any locks for d here because:
2060		 * 1) any filter change is protected by interface
2061		 * write lock
2062		 * 2) destroying/detaching d is protected by interface
2063		 * write lock, too
2064		 */
2065
2066		/* XXX: Do not protect counter for the sake of performance. */
2067		++d->bd_rcount;
2068		/*
2069		 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2070		 * way for the caller to indiciate to us whether this packet
2071		 * is inbound or outbound.  In the bpf_mtap() routines, we use
2072		 * the interface pointers on the mbuf to figure it out.
2073		 */
2074#ifdef BPF_JITTER
2075		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2076		if (bf != NULL)
2077			slen = (*(bf->func))(pkt, pktlen, pktlen);
2078		else
2079#endif
2080		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2081		if (slen != 0) {
2082			/*
2083			 * Filter matches. Let's to acquire write lock.
2084			 */
2085			BPFD_LOCK(d);
2086
2087			d->bd_fcount++;
2088			if (gottime < bpf_ts_quality(d->bd_tstamp))
2089				gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2090#ifdef MAC
2091			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2092#endif
2093				catchpacket(d, pkt, pktlen, slen,
2094				    bpf_append_bytes, &bt);
2095			BPFD_UNLOCK(d);
2096		}
2097	}
2098	BPFIF_RUNLOCK(bp);
2099}
2100
2101#define	BPF_CHECK_DIRECTION(d, r, i)				\
2102	    (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||	\
2103	    ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2104
2105/*
2106 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2107 * Locking model is explained in bpf_tap().
2108 */
2109void
2110bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2111{
2112	struct bintime bt;
2113	struct bpf_d *d;
2114#ifdef BPF_JITTER
2115	bpf_jit_filter *bf;
2116#endif
2117	u_int pktlen, slen;
2118	int gottime;
2119
2120	/* Skip outgoing duplicate packets. */
2121	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2122		m->m_flags &= ~M_PROMISC;
2123		return;
2124	}
2125
2126	pktlen = m_length(m, NULL);
2127	gottime = BPF_TSTAMP_NONE;
2128
2129	BPFIF_RLOCK(bp);
2130
2131	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2132		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2133			continue;
2134		++d->bd_rcount;
2135#ifdef BPF_JITTER
2136		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2137		/* XXX We cannot handle multiple mbufs. */
2138		if (bf != NULL && m->m_next == NULL)
2139			slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2140		else
2141#endif
2142		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2143		if (slen != 0) {
2144			BPFD_LOCK(d);
2145
2146			d->bd_fcount++;
2147			if (gottime < bpf_ts_quality(d->bd_tstamp))
2148				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2149#ifdef MAC
2150			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2151#endif
2152				catchpacket(d, (u_char *)m, pktlen, slen,
2153				    bpf_append_mbuf, &bt);
2154			BPFD_UNLOCK(d);
2155		}
2156	}
2157	BPFIF_RUNLOCK(bp);
2158}
2159
2160/*
2161 * Incoming linkage from device drivers, when packet is in
2162 * an mbuf chain and to be prepended by a contiguous header.
2163 */
2164void
2165bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2166{
2167	struct bintime bt;
2168	struct mbuf mb;
2169	struct bpf_d *d;
2170	u_int pktlen, slen;
2171	int gottime;
2172
2173	/* Skip outgoing duplicate packets. */
2174	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2175		m->m_flags &= ~M_PROMISC;
2176		return;
2177	}
2178
2179	pktlen = m_length(m, NULL);
2180	/*
2181	 * Craft on-stack mbuf suitable for passing to bpf_filter.
2182	 * Note that we cut corners here; we only setup what's
2183	 * absolutely needed--this mbuf should never go anywhere else.
2184	 */
2185	mb.m_next = m;
2186	mb.m_data = data;
2187	mb.m_len = dlen;
2188	pktlen += dlen;
2189
2190	gottime = BPF_TSTAMP_NONE;
2191
2192	BPFIF_RLOCK(bp);
2193
2194	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2195		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2196			continue;
2197		++d->bd_rcount;
2198		slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2199		if (slen != 0) {
2200			BPFD_LOCK(d);
2201
2202			d->bd_fcount++;
2203			if (gottime < bpf_ts_quality(d->bd_tstamp))
2204				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2205#ifdef MAC
2206			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2207#endif
2208				catchpacket(d, (u_char *)&mb, pktlen, slen,
2209				    bpf_append_mbuf, &bt);
2210			BPFD_UNLOCK(d);
2211		}
2212	}
2213	BPFIF_RUNLOCK(bp);
2214}
2215
2216#undef	BPF_CHECK_DIRECTION
2217
2218#undef	BPF_TSTAMP_NONE
2219#undef	BPF_TSTAMP_FAST
2220#undef	BPF_TSTAMP_NORMAL
2221#undef	BPF_TSTAMP_EXTERN
2222
2223static int
2224bpf_hdrlen(struct bpf_d *d)
2225{
2226	int hdrlen;
2227
2228	hdrlen = d->bd_bif->bif_hdrlen;
2229#ifndef BURN_BRIDGES
2230	if (d->bd_tstamp == BPF_T_NONE ||
2231	    BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2232#ifdef COMPAT_FREEBSD32
2233		if (d->bd_compat32)
2234			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2235		else
2236#endif
2237			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2238	else
2239#endif
2240		hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2241#ifdef COMPAT_FREEBSD32
2242	if (d->bd_compat32)
2243		hdrlen = BPF_WORDALIGN32(hdrlen);
2244	else
2245#endif
2246		hdrlen = BPF_WORDALIGN(hdrlen);
2247
2248	return (hdrlen - d->bd_bif->bif_hdrlen);
2249}
2250
2251static void
2252bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2253{
2254	struct bintime bt2;
2255	struct timeval tsm;
2256	struct timespec tsn;
2257
2258	if ((tstype & BPF_T_MONOTONIC) == 0) {
2259		bt2 = *bt;
2260		bintime_add(&bt2, &boottimebin);
2261		bt = &bt2;
2262	}
2263	switch (BPF_T_FORMAT(tstype)) {
2264	case BPF_T_MICROTIME:
2265		bintime2timeval(bt, &tsm);
2266		ts->bt_sec = tsm.tv_sec;
2267		ts->bt_frac = tsm.tv_usec;
2268		break;
2269	case BPF_T_NANOTIME:
2270		bintime2timespec(bt, &tsn);
2271		ts->bt_sec = tsn.tv_sec;
2272		ts->bt_frac = tsn.tv_nsec;
2273		break;
2274	case BPF_T_BINTIME:
2275		ts->bt_sec = bt->sec;
2276		ts->bt_frac = bt->frac;
2277		break;
2278	}
2279}
2280
2281/*
2282 * Move the packet data from interface memory (pkt) into the
2283 * store buffer.  "cpfn" is the routine called to do the actual data
2284 * transfer.  bcopy is passed in to copy contiguous chunks, while
2285 * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
2286 * pkt is really an mbuf.
2287 */
2288static void
2289catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2290    void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2291    struct bintime *bt)
2292{
2293	struct bpf_xhdr hdr;
2294#ifndef BURN_BRIDGES
2295	struct bpf_hdr hdr_old;
2296#ifdef COMPAT_FREEBSD32
2297	struct bpf_hdr32 hdr32_old;
2298#endif
2299#endif
2300	int caplen, curlen, hdrlen, totlen;
2301	int do_wakeup = 0;
2302	int do_timestamp;
2303	int tstype;
2304
2305	BPFD_LOCK_ASSERT(d);
2306
2307	/*
2308	 * Detect whether user space has released a buffer back to us, and if
2309	 * so, move it from being a hold buffer to a free buffer.  This may
2310	 * not be the best place to do it (for example, we might only want to
2311	 * run this check if we need the space), but for now it's a reliable
2312	 * spot to do it.
2313	 */
2314	if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2315		while (d->bd_hbuf_in_use)
2316			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2317			    PRINET, "bd_hbuf", 0);
2318		d->bd_fbuf = d->bd_hbuf;
2319		d->bd_hbuf = NULL;
2320		d->bd_hlen = 0;
2321		bpf_buf_reclaimed(d);
2322	}
2323
2324	/*
2325	 * Figure out how many bytes to move.  If the packet is
2326	 * greater or equal to the snapshot length, transfer that
2327	 * much.  Otherwise, transfer the whole packet (unless
2328	 * we hit the buffer size limit).
2329	 */
2330	hdrlen = bpf_hdrlen(d);
2331	totlen = hdrlen + min(snaplen, pktlen);
2332	if (totlen > d->bd_bufsize)
2333		totlen = d->bd_bufsize;
2334
2335	/*
2336	 * Round up the end of the previous packet to the next longword.
2337	 *
2338	 * Drop the packet if there's no room and no hope of room
2339	 * If the packet would overflow the storage buffer or the storage
2340	 * buffer is considered immutable by the buffer model, try to rotate
2341	 * the buffer and wakeup pending processes.
2342	 */
2343#ifdef COMPAT_FREEBSD32
2344	if (d->bd_compat32)
2345		curlen = BPF_WORDALIGN32(d->bd_slen);
2346	else
2347#endif
2348		curlen = BPF_WORDALIGN(d->bd_slen);
2349	if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2350		if (d->bd_fbuf == NULL) {
2351			/*
2352			 * There's no room in the store buffer, and no
2353			 * prospect of room, so drop the packet.  Notify the
2354			 * buffer model.
2355			 */
2356			bpf_buffull(d);
2357			++d->bd_dcount;
2358			return;
2359		}
2360		while (d->bd_hbuf_in_use)
2361			mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
2362			    PRINET, "bd_hbuf", 0);
2363		ROTATE_BUFFERS(d);
2364		do_wakeup = 1;
2365		curlen = 0;
2366	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2367		/*
2368		 * Immediate mode is set, or the read timeout has already
2369		 * expired during a select call.  A packet arrived, so the
2370		 * reader should be woken up.
2371		 */
2372		do_wakeup = 1;
2373	caplen = totlen - hdrlen;
2374	tstype = d->bd_tstamp;
2375	do_timestamp = tstype != BPF_T_NONE;
2376#ifndef BURN_BRIDGES
2377	if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2378		struct bpf_ts ts;
2379		if (do_timestamp)
2380			bpf_bintime2ts(bt, &ts, tstype);
2381#ifdef COMPAT_FREEBSD32
2382		if (d->bd_compat32) {
2383			bzero(&hdr32_old, sizeof(hdr32_old));
2384			if (do_timestamp) {
2385				hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2386				hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2387			}
2388			hdr32_old.bh_datalen = pktlen;
2389			hdr32_old.bh_hdrlen = hdrlen;
2390			hdr32_old.bh_caplen = caplen;
2391			bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2392			    sizeof(hdr32_old));
2393			goto copy;
2394		}
2395#endif
2396		bzero(&hdr_old, sizeof(hdr_old));
2397		if (do_timestamp) {
2398			hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2399			hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2400		}
2401		hdr_old.bh_datalen = pktlen;
2402		hdr_old.bh_hdrlen = hdrlen;
2403		hdr_old.bh_caplen = caplen;
2404		bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2405		    sizeof(hdr_old));
2406		goto copy;
2407	}
2408#endif
2409
2410	/*
2411	 * Append the bpf header.  Note we append the actual header size, but
2412	 * move forward the length of the header plus padding.
2413	 */
2414	bzero(&hdr, sizeof(hdr));
2415	if (do_timestamp)
2416		bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2417	hdr.bh_datalen = pktlen;
2418	hdr.bh_hdrlen = hdrlen;
2419	hdr.bh_caplen = caplen;
2420	bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2421
2422	/*
2423	 * Copy the packet data into the store buffer and update its length.
2424	 */
2425#ifndef BURN_BRIDGES
2426copy:
2427#endif
2428	(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2429	d->bd_slen = curlen + totlen;
2430
2431	if (do_wakeup)
2432		bpf_wakeup(d);
2433}
2434
2435/*
2436 * Free buffers currently in use by a descriptor.
2437 * Called on close.
2438 */
2439static void
2440bpf_freed(struct bpf_d *d)
2441{
2442
2443	/*
2444	 * We don't need to lock out interrupts since this descriptor has
2445	 * been detached from its interface and it yet hasn't been marked
2446	 * free.
2447	 */
2448	bpf_free(d);
2449	if (d->bd_rfilter != NULL) {
2450		free((caddr_t)d->bd_rfilter, M_BPF);
2451#ifdef BPF_JITTER
2452		if (d->bd_bfilter != NULL)
2453			bpf_destroy_jit_filter(d->bd_bfilter);
2454#endif
2455	}
2456	if (d->bd_wfilter != NULL)
2457		free((caddr_t)d->bd_wfilter, M_BPF);
2458	mtx_destroy(&d->bd_lock);
2459}
2460
2461/*
2462 * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
2463 * fixed size of the link header (variable length headers not yet supported).
2464 */
2465void
2466bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2467{
2468
2469	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2470}
2471
2472/*
2473 * Attach an interface to bpf.  ifp is a pointer to the structure
2474 * defining the interface to be attached, dlt is the link layer type,
2475 * and hdrlen is the fixed size of the link header (variable length
2476 * headers are not yet supporrted).
2477 */
2478void
2479bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2480{
2481	struct bpf_if *bp;
2482
2483	bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2484	if (bp == NULL)
2485		panic("bpfattach");
2486
2487	LIST_INIT(&bp->bif_dlist);
2488	LIST_INIT(&bp->bif_wlist);
2489	bp->bif_ifp = ifp;
2490	bp->bif_dlt = dlt;
2491	rw_init(&bp->bif_lock, "bpf interface lock");
2492	KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2493	*driverp = bp;
2494
2495	BPF_LOCK();
2496	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2497	BPF_UNLOCK();
2498
2499	bp->bif_hdrlen = hdrlen;
2500
2501	if (bootverbose)
2502		if_printf(ifp, "bpf attached\n");
2503}
2504
2505/*
2506 * Detach bpf from an interface. This involves detaching each descriptor
2507 * associated with the interface. Notify each descriptor as it's detached
2508 * so that any sleepers wake up and get ENXIO.
2509 */
2510void
2511bpfdetach(struct ifnet *ifp)
2512{
2513	struct bpf_if	*bp, *bp_temp;
2514	struct bpf_d	*d;
2515	int ndetached;
2516
2517	ndetached = 0;
2518
2519	BPF_LOCK();
2520	/* Find all bpf_if struct's which reference ifp and detach them. */
2521	LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2522		if (ifp != bp->bif_ifp)
2523			continue;
2524
2525		LIST_REMOVE(bp, bif_next);
2526		/* Add to to-be-freed list */
2527		LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2528
2529		ndetached++;
2530		/*
2531		 * Delay freeing bp till interface is detached
2532		 * and all routes through this interface are removed.
2533		 * Mark bp as detached to restrict new consumers.
2534		 */
2535		BPFIF_WLOCK(bp);
2536		bp->flags |= BPFIF_FLAG_DYING;
2537		BPFIF_WUNLOCK(bp);
2538
2539		CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2540		    __func__, bp->bif_dlt, bp, ifp);
2541
2542		/* Free common descriptors */
2543		while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2544			bpf_detachd_locked(d);
2545			BPFD_LOCK(d);
2546			bpf_wakeup(d);
2547			BPFD_UNLOCK(d);
2548		}
2549
2550		/* Free writer-only descriptors */
2551		while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2552			bpf_detachd_locked(d);
2553			BPFD_LOCK(d);
2554			bpf_wakeup(d);
2555			BPFD_UNLOCK(d);
2556		}
2557	}
2558	BPF_UNLOCK();
2559
2560#ifdef INVARIANTS
2561	if (ndetached == 0)
2562		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2563#endif
2564}
2565
2566/*
2567 * Interface departure handler.
2568 * Note departure event does not guarantee interface is going down.
2569 * Interface renaming is currently done via departure/arrival event set.
2570 *
2571 * Departure handled is called after all routes pointing to
2572 * given interface are removed and interface is in down state
2573 * restricting any packets to be sent/received. We assume it is now safe
2574 * to free data allocated by BPF.
2575 */
2576static void
2577bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2578{
2579	struct bpf_if *bp, *bp_temp;
2580	int nmatched = 0;
2581
2582	BPF_LOCK();
2583	/*
2584	 * Find matching entries in free list.
2585	 * Nothing should be found if bpfdetach() was not called.
2586	 */
2587	LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2588		if (ifp != bp->bif_ifp)
2589			continue;
2590
2591		CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2592		    __func__, bp, ifp);
2593
2594		LIST_REMOVE(bp, bif_next);
2595
2596		rw_destroy(&bp->bif_lock);
2597		free(bp, M_BPF);
2598
2599		nmatched++;
2600	}
2601	BPF_UNLOCK();
2602
2603	/*
2604	 * Note that we cannot zero other pointers to
2605	 * custom DLTs possibly used by given interface.
2606	 */
2607	if (nmatched != 0)
2608		ifp->if_bpf = NULL;
2609}
2610
2611/*
2612 * Get a list of available data link type of the interface.
2613 */
2614static int
2615bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2616{
2617	int n, error;
2618	struct ifnet *ifp;
2619	struct bpf_if *bp;
2620
2621	BPF_LOCK_ASSERT();
2622
2623	ifp = d->bd_bif->bif_ifp;
2624	n = 0;
2625	error = 0;
2626	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2627		if (bp->bif_ifp != ifp)
2628			continue;
2629		if (bfl->bfl_list != NULL) {
2630			if (n >= bfl->bfl_len)
2631				return (ENOMEM);
2632			error = copyout(&bp->bif_dlt,
2633			    bfl->bfl_list + n, sizeof(u_int));
2634		}
2635		n++;
2636	}
2637	bfl->bfl_len = n;
2638	return (error);
2639}
2640
2641/*
2642 * Set the data link type of a BPF instance.
2643 */
2644static int
2645bpf_setdlt(struct bpf_d *d, u_int dlt)
2646{
2647	int error, opromisc;
2648	struct ifnet *ifp;
2649	struct bpf_if *bp;
2650
2651	BPF_LOCK_ASSERT();
2652
2653	if (d->bd_bif->bif_dlt == dlt)
2654		return (0);
2655	ifp = d->bd_bif->bif_ifp;
2656
2657	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2658		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2659			break;
2660	}
2661
2662	if (bp != NULL) {
2663		opromisc = d->bd_promisc;
2664		bpf_attachd(d, bp);
2665		BPFD_LOCK(d);
2666		reset_d(d);
2667		BPFD_UNLOCK(d);
2668		if (opromisc) {
2669			error = ifpromisc(bp->bif_ifp, 1);
2670			if (error)
2671				if_printf(bp->bif_ifp,
2672					"bpf_setdlt: ifpromisc failed (%d)\n",
2673					error);
2674			else
2675				d->bd_promisc = 1;
2676		}
2677	}
2678	return (bp == NULL ? EINVAL : 0);
2679}
2680
2681static void
2682bpf_drvinit(void *unused)
2683{
2684	struct cdev *dev;
2685
2686	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2687	LIST_INIT(&bpf_iflist);
2688	LIST_INIT(&bpf_freelist);
2689
2690	dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2691	/* For compatibility */
2692	make_dev_alias(dev, "bpf0");
2693
2694	/* Register interface departure handler */
2695	bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2696		    ifnet_departure_event, bpf_ifdetach, NULL,
2697		    EVENTHANDLER_PRI_ANY);
2698}
2699
2700/*
2701 * Zero out the various packet counters associated with all of the bpf
2702 * descriptors.  At some point, we will probably want to get a bit more
2703 * granular and allow the user to specify descriptors to be zeroed.
2704 */
2705static void
2706bpf_zero_counters(void)
2707{
2708	struct bpf_if *bp;
2709	struct bpf_d *bd;
2710
2711	BPF_LOCK();
2712	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2713		BPFIF_RLOCK(bp);
2714		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2715			BPFD_LOCK(bd);
2716			bd->bd_rcount = 0;
2717			bd->bd_dcount = 0;
2718			bd->bd_fcount = 0;
2719			bd->bd_wcount = 0;
2720			bd->bd_wfcount = 0;
2721			bd->bd_zcopy = 0;
2722			BPFD_UNLOCK(bd);
2723		}
2724		BPFIF_RUNLOCK(bp);
2725	}
2726	BPF_UNLOCK();
2727}
2728
2729/*
2730 * Fill filter statistics
2731 */
2732static void
2733bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2734{
2735
2736	bzero(d, sizeof(*d));
2737	BPFD_LOCK_ASSERT(bd);
2738	d->bd_structsize = sizeof(*d);
2739	/* XXX: reading should be protected by global lock */
2740	d->bd_immediate = bd->bd_immediate;
2741	d->bd_promisc = bd->bd_promisc;
2742	d->bd_hdrcmplt = bd->bd_hdrcmplt;
2743	d->bd_direction = bd->bd_direction;
2744	d->bd_feedback = bd->bd_feedback;
2745	d->bd_async = bd->bd_async;
2746	d->bd_rcount = bd->bd_rcount;
2747	d->bd_dcount = bd->bd_dcount;
2748	d->bd_fcount = bd->bd_fcount;
2749	d->bd_sig = bd->bd_sig;
2750	d->bd_slen = bd->bd_slen;
2751	d->bd_hlen = bd->bd_hlen;
2752	d->bd_bufsize = bd->bd_bufsize;
2753	d->bd_pid = bd->bd_pid;
2754	strlcpy(d->bd_ifname,
2755	    bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2756	d->bd_locked = bd->bd_locked;
2757	d->bd_wcount = bd->bd_wcount;
2758	d->bd_wdcount = bd->bd_wdcount;
2759	d->bd_wfcount = bd->bd_wfcount;
2760	d->bd_zcopy = bd->bd_zcopy;
2761	d->bd_bufmode = bd->bd_bufmode;
2762}
2763
2764/*
2765 * Handle `netstat -B' stats request
2766 */
2767static int
2768bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2769{
2770	struct xbpf_d *xbdbuf, *xbd, zerostats;
2771	int index, error;
2772	struct bpf_if *bp;
2773	struct bpf_d *bd;
2774
2775	/*
2776	 * XXX This is not technically correct. It is possible for non
2777	 * privileged users to open bpf devices. It would make sense
2778	 * if the users who opened the devices were able to retrieve
2779	 * the statistics for them, too.
2780	 */
2781	error = priv_check(req->td, PRIV_NET_BPF);
2782	if (error)
2783		return (error);
2784	/*
2785	 * Check to see if the user is requesting that the counters be
2786	 * zeroed out.  Explicitly check that the supplied data is zeroed,
2787	 * as we aren't allowing the user to set the counters currently.
2788	 */
2789	if (req->newptr != NULL) {
2790		if (req->newlen != sizeof(zerostats))
2791			return (EINVAL);
2792		bzero(&zerostats, sizeof(zerostats));
2793		xbd = req->newptr;
2794		if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0)
2795			return (EINVAL);
2796		bpf_zero_counters();
2797		return (0);
2798	}
2799	if (req->oldptr == NULL)
2800		return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2801	if (bpf_bpfd_cnt == 0)
2802		return (SYSCTL_OUT(req, 0, 0));
2803	xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2804	BPF_LOCK();
2805	if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2806		BPF_UNLOCK();
2807		free(xbdbuf, M_BPF);
2808		return (ENOMEM);
2809	}
2810	index = 0;
2811	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2812		BPFIF_RLOCK(bp);
2813		/* Send writers-only first */
2814		LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2815			xbd = &xbdbuf[index++];
2816			BPFD_LOCK(bd);
2817			bpfstats_fill_xbpf(xbd, bd);
2818			BPFD_UNLOCK(bd);
2819		}
2820		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2821			xbd = &xbdbuf[index++];
2822			BPFD_LOCK(bd);
2823			bpfstats_fill_xbpf(xbd, bd);
2824			BPFD_UNLOCK(bd);
2825		}
2826		BPFIF_RUNLOCK(bp);
2827	}
2828	BPF_UNLOCK();
2829	error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2830	free(xbdbuf, M_BPF);
2831	return (error);
2832}
2833
2834SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2835
2836#else /* !DEV_BPF && !NETGRAPH_BPF */
2837/*
2838 * NOP stubs to allow bpf-using drivers to load and function.
2839 *
2840 * A 'better' implementation would allow the core bpf functionality
2841 * to be loaded at runtime.
2842 */
2843static struct bpf_if bp_null;
2844
2845void
2846bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2847{
2848}
2849
2850void
2851bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2852{
2853}
2854
2855void
2856bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2857{
2858}
2859
2860void
2861bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2862{
2863
2864	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2865}
2866
2867void
2868bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2869{
2870
2871	*driverp = &bp_null;
2872}
2873
2874void
2875bpfdetach(struct ifnet *ifp)
2876{
2877}
2878
2879u_int
2880bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2881{
2882	return -1;	/* "no filter" behaviour */
2883}
2884
2885int
2886bpf_validate(const struct bpf_insn *f, int len)
2887{
2888	return 0;		/* false */
2889}
2890
2891#endif /* !DEV_BPF && !NETGRAPH_BPF */
2892