bpf.c revision 299615
1/*-
2 * Copyright (c) 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *      @(#)bpf.c	8.4 (Berkeley) 1/9/95
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: stable/10/sys/net/bpf.c 299615 2016-05-13 08:04:57Z ngie $");
39
40#include "opt_bpf.h"
41#include "opt_compat.h"
42#include "opt_netgraph.h"
43
44#include <sys/types.h>
45#include <sys/param.h>
46#include <sys/lock.h>
47#include <sys/rwlock.h>
48#include <sys/systm.h>
49#include <sys/conf.h>
50#include <sys/fcntl.h>
51#include <sys/jail.h>
52#include <sys/malloc.h>
53#include <sys/mbuf.h>
54#include <sys/time.h>
55#include <sys/priv.h>
56#include <sys/proc.h>
57#include <sys/signalvar.h>
58#include <sys/filio.h>
59#include <sys/sockio.h>
60#include <sys/ttycom.h>
61#include <sys/uio.h>
62
63#include <sys/event.h>
64#include <sys/file.h>
65#include <sys/poll.h>
66#include <sys/proc.h>
67
68#include <sys/socket.h>
69
70#include <net/if.h>
71#define	BPF_INTERNAL
72#include <net/bpf.h>
73#include <net/bpf_buffer.h>
74#ifdef BPF_JITTER
75#include <net/bpf_jitter.h>
76#endif
77#include <net/bpf_zerocopy.h>
78#include <net/bpfdesc.h>
79#include <net/vnet.h>
80
81#include <netinet/in.h>
82#include <netinet/if_ether.h>
83#include <sys/kernel.h>
84#include <sys/sysctl.h>
85
86#include <net80211/ieee80211_freebsd.h>
87
88#include <security/mac/mac_framework.h>
89
90MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
91
92#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
93
94#define PRINET  26			/* interruptible */
95
96#define	SIZEOF_BPF_HDR(type)	\
97    (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen))
98
99#ifdef COMPAT_FREEBSD32
100#include <sys/mount.h>
101#include <compat/freebsd32/freebsd32.h>
102#define BPF_ALIGNMENT32 sizeof(int32_t)
103#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1))
104
105#ifndef BURN_BRIDGES
106/*
107 * 32-bit version of structure prepended to each packet.  We use this header
108 * instead of the standard one for 32-bit streams.  We mark the a stream as
109 * 32-bit the first time we see a 32-bit compat ioctl request.
110 */
111struct bpf_hdr32 {
112	struct timeval32 bh_tstamp;	/* time stamp */
113	uint32_t	bh_caplen;	/* length of captured portion */
114	uint32_t	bh_datalen;	/* original length of packet */
115	uint16_t	bh_hdrlen;	/* length of bpf header (this struct
116					   plus alignment padding) */
117};
118#endif
119
120struct bpf_program32 {
121	u_int bf_len;
122	uint32_t bf_insns;
123};
124
125struct bpf_dltlist32 {
126	u_int	bfl_len;
127	u_int	bfl_list;
128};
129
130#define	BIOCSETF32	_IOW('B', 103, struct bpf_program32)
131#define	BIOCSRTIMEOUT32	_IOW('B', 109, struct timeval32)
132#define	BIOCGRTIMEOUT32	_IOR('B', 110, struct timeval32)
133#define	BIOCGDLTLIST32	_IOWR('B', 121, struct bpf_dltlist32)
134#define	BIOCSETWF32	_IOW('B', 123, struct bpf_program32)
135#define	BIOCSETFNR32	_IOW('B', 130, struct bpf_program32)
136#endif
137
138/*
139 * bpf_iflist is a list of BPF interface structures, each corresponding to a
140 * specific DLT.  The same network interface might have several BPF interface
141 * structures registered by different layers in the stack (i.e., 802.11
142 * frames, ethernet frames, etc).
143 */
144static LIST_HEAD(, bpf_if)	bpf_iflist, bpf_freelist;
145static struct mtx	bpf_mtx;		/* bpf global lock */
146static int		bpf_bpfd_cnt;
147
148static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
149static void	bpf_detachd(struct bpf_d *);
150static void	bpf_detachd_locked(struct bpf_d *);
151static void	bpf_freed(struct bpf_d *);
152static int	bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **,
153		    struct sockaddr *, int *, struct bpf_insn *);
154static int	bpf_setif(struct bpf_d *, struct ifreq *);
155static void	bpf_timed_out(void *);
156static __inline void
157		bpf_wakeup(struct bpf_d *);
158static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
159		    void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int),
160		    struct bintime *);
161static void	reset_d(struct bpf_d *);
162static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
163static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
164static int	bpf_setdlt(struct bpf_d *, u_int);
165static void	filt_bpfdetach(struct knote *);
166static int	filt_bpfread(struct knote *, long);
167static void	bpf_drvinit(void *);
168static int	bpf_stats_sysctl(SYSCTL_HANDLER_ARGS);
169
170SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
171int bpf_maxinsns = BPF_MAXINSNS;
172SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
173    &bpf_maxinsns, 0, "Maximum bpf program instructions");
174static int bpf_zerocopy_enable = 0;
175SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
176    &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
177static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
178    bpf_stats_sysctl, "bpf statistics portal");
179
180static VNET_DEFINE(int, bpf_optimize_writers) = 0;
181#define	V_bpf_optimize_writers VNET(bpf_optimize_writers)
182SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
183    CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
184    "Do not send packets until BPF program is set");
185
186static	d_open_t	bpfopen;
187static	d_read_t	bpfread;
188static	d_write_t	bpfwrite;
189static	d_ioctl_t	bpfioctl;
190static	d_poll_t	bpfpoll;
191static	d_kqfilter_t	bpfkqfilter;
192
193static struct cdevsw bpf_cdevsw = {
194	.d_version =	D_VERSION,
195	.d_open =	bpfopen,
196	.d_read =	bpfread,
197	.d_write =	bpfwrite,
198	.d_ioctl =	bpfioctl,
199	.d_poll =	bpfpoll,
200	.d_name =	"bpf",
201	.d_kqfilter =	bpfkqfilter,
202};
203
204static struct filterops bpfread_filtops = {
205	.f_isfd = 1,
206	.f_detach = filt_bpfdetach,
207	.f_event = filt_bpfread,
208};
209
210eventhandler_tag	bpf_ifdetach_cookie = NULL;
211
212/*
213 * LOCKING MODEL USED BY BPF:
214 * Locks:
215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal,
216 * some global counters and every bpf_if reference.
217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters.
218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields
219 *   used by bpf_mtap code.
220 *
221 * Lock order:
222 *
223 * Global lock, interface lock, descriptor lock
224 *
225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2]
226 * working model. In many places (like bpf_detachd) we start with BPF descriptor
227 * (and we need to at least rlock it to get reliable interface pointer). This
228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if
229 * change in every such place.
230 *
231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and
232 * 3) descriptor main wlock.
233 * Reading bd_bif can be protected by any of these locks, typically global lock.
234 *
235 * Changing read/write BPF filter is protected by the same three locks,
236 * the same applies for reading.
237 *
238 * Sleeping in global lock is not allowed due to bpfdetach() using it.
239 */
240
241/*
242 * Wrapper functions for various buffering methods.  If the set of buffer
243 * modes expands, we will probably want to introduce a switch data structure
244 * similar to protosw, et.
245 */
246static void
247bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
248    u_int len)
249{
250
251	BPFD_LOCK_ASSERT(d);
252
253	switch (d->bd_bufmode) {
254	case BPF_BUFMODE_BUFFER:
255		return (bpf_buffer_append_bytes(d, buf, offset, src, len));
256
257	case BPF_BUFMODE_ZBUF:
258		d->bd_zcopy++;
259		return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
260
261	default:
262		panic("bpf_buf_append_bytes");
263	}
264}
265
266static void
267bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
268    u_int len)
269{
270
271	BPFD_LOCK_ASSERT(d);
272
273	switch (d->bd_bufmode) {
274	case BPF_BUFMODE_BUFFER:
275		return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
276
277	case BPF_BUFMODE_ZBUF:
278		d->bd_zcopy++;
279		return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
280
281	default:
282		panic("bpf_buf_append_mbuf");
283	}
284}
285
286/*
287 * This function gets called when the free buffer is re-assigned.
288 */
289static void
290bpf_buf_reclaimed(struct bpf_d *d)
291{
292
293	BPFD_LOCK_ASSERT(d);
294
295	switch (d->bd_bufmode) {
296	case BPF_BUFMODE_BUFFER:
297		return;
298
299	case BPF_BUFMODE_ZBUF:
300		bpf_zerocopy_buf_reclaimed(d);
301		return;
302
303	default:
304		panic("bpf_buf_reclaimed");
305	}
306}
307
308/*
309 * If the buffer mechanism has a way to decide that a held buffer can be made
310 * free, then it is exposed via the bpf_canfreebuf() interface.  (1) is
311 * returned if the buffer can be discarded, (0) is returned if it cannot.
312 */
313static int
314bpf_canfreebuf(struct bpf_d *d)
315{
316
317	BPFD_LOCK_ASSERT(d);
318
319	switch (d->bd_bufmode) {
320	case BPF_BUFMODE_ZBUF:
321		return (bpf_zerocopy_canfreebuf(d));
322	}
323	return (0);
324}
325
326/*
327 * Allow the buffer model to indicate that the current store buffer is
328 * immutable, regardless of the appearance of space.  Return (1) if the
329 * buffer is writable, and (0) if not.
330 */
331static int
332bpf_canwritebuf(struct bpf_d *d)
333{
334	BPFD_LOCK_ASSERT(d);
335
336	switch (d->bd_bufmode) {
337	case BPF_BUFMODE_ZBUF:
338		return (bpf_zerocopy_canwritebuf(d));
339	}
340	return (1);
341}
342
343/*
344 * Notify buffer model that an attempt to write to the store buffer has
345 * resulted in a dropped packet, in which case the buffer may be considered
346 * full.
347 */
348static void
349bpf_buffull(struct bpf_d *d)
350{
351
352	BPFD_LOCK_ASSERT(d);
353
354	switch (d->bd_bufmode) {
355	case BPF_BUFMODE_ZBUF:
356		bpf_zerocopy_buffull(d);
357		break;
358	}
359}
360
361/*
362 * Notify the buffer model that a buffer has moved into the hold position.
363 */
364void
365bpf_bufheld(struct bpf_d *d)
366{
367
368	BPFD_LOCK_ASSERT(d);
369
370	switch (d->bd_bufmode) {
371	case BPF_BUFMODE_ZBUF:
372		bpf_zerocopy_bufheld(d);
373		break;
374	}
375}
376
377static void
378bpf_free(struct bpf_d *d)
379{
380
381	switch (d->bd_bufmode) {
382	case BPF_BUFMODE_BUFFER:
383		return (bpf_buffer_free(d));
384
385	case BPF_BUFMODE_ZBUF:
386		return (bpf_zerocopy_free(d));
387
388	default:
389		panic("bpf_buf_free");
390	}
391}
392
393static int
394bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio)
395{
396
397	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
398		return (EOPNOTSUPP);
399	return (bpf_buffer_uiomove(d, buf, len, uio));
400}
401
402static int
403bpf_ioctl_sblen(struct bpf_d *d, u_int *i)
404{
405
406	if (d->bd_bufmode != BPF_BUFMODE_BUFFER)
407		return (EOPNOTSUPP);
408	return (bpf_buffer_ioctl_sblen(d, i));
409}
410
411static int
412bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
413{
414
415	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
416		return (EOPNOTSUPP);
417	return (bpf_zerocopy_ioctl_getzmax(td, d, i));
418}
419
420static int
421bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
422{
423
424	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
425		return (EOPNOTSUPP);
426	return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
427}
428
429static int
430bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
431{
432
433	if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
434		return (EOPNOTSUPP);
435	return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
436}
437
438/*
439 * General BPF functions.
440 */
441static int
442bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp,
443    struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter)
444{
445	const struct ieee80211_bpf_params *p;
446	struct ether_header *eh;
447	struct mbuf *m;
448	int error;
449	int len;
450	int hlen;
451	int slen;
452
453	/*
454	 * Build a sockaddr based on the data link layer type.
455	 * We do this at this level because the ethernet header
456	 * is copied directly into the data field of the sockaddr.
457	 * In the case of SLIP, there is no header and the packet
458	 * is forwarded as is.
459	 * Also, we are careful to leave room at the front of the mbuf
460	 * for the link level header.
461	 */
462	switch (linktype) {
463
464	case DLT_SLIP:
465		sockp->sa_family = AF_INET;
466		hlen = 0;
467		break;
468
469	case DLT_EN10MB:
470		sockp->sa_family = AF_UNSPEC;
471		/* XXX Would MAXLINKHDR be better? */
472		hlen = ETHER_HDR_LEN;
473		break;
474
475	case DLT_FDDI:
476		sockp->sa_family = AF_IMPLINK;
477		hlen = 0;
478		break;
479
480	case DLT_RAW:
481		sockp->sa_family = AF_UNSPEC;
482		hlen = 0;
483		break;
484
485	case DLT_NULL:
486		/*
487		 * null interface types require a 4 byte pseudo header which
488		 * corresponds to the address family of the packet.
489		 */
490		sockp->sa_family = AF_UNSPEC;
491		hlen = 4;
492		break;
493
494	case DLT_ATM_RFC1483:
495		/*
496		 * en atm driver requires 4-byte atm pseudo header.
497		 * though it isn't standard, vpi:vci needs to be
498		 * specified anyway.
499		 */
500		sockp->sa_family = AF_UNSPEC;
501		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
502		break;
503
504	case DLT_PPP:
505		sockp->sa_family = AF_UNSPEC;
506		hlen = 4;	/* This should match PPP_HDRLEN */
507		break;
508
509	case DLT_IEEE802_11:		/* IEEE 802.11 wireless */
510		sockp->sa_family = AF_IEEE80211;
511		hlen = 0;
512		break;
513
514	case DLT_IEEE802_11_RADIO:	/* IEEE 802.11 wireless w/ phy params */
515		sockp->sa_family = AF_IEEE80211;
516		sockp->sa_len = 12;	/* XXX != 0 */
517		hlen = sizeof(struct ieee80211_bpf_params);
518		break;
519
520	default:
521		return (EIO);
522	}
523
524	len = uio->uio_resid;
525	if (len < hlen || len - hlen > ifp->if_mtu)
526		return (EMSGSIZE);
527
528	m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR);
529	if (m == NULL)
530		return (EIO);
531	m->m_pkthdr.len = m->m_len = len;
532	*mp = m;
533
534	error = uiomove(mtod(m, u_char *), len, uio);
535	if (error)
536		goto bad;
537
538	slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
539	if (slen == 0) {
540		error = EPERM;
541		goto bad;
542	}
543
544	/* Check for multicast destination */
545	switch (linktype) {
546	case DLT_EN10MB:
547		eh = mtod(m, struct ether_header *);
548		if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
549			if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost,
550			    ETHER_ADDR_LEN) == 0)
551				m->m_flags |= M_BCAST;
552			else
553				m->m_flags |= M_MCAST;
554		}
555		break;
556	}
557
558	/*
559	 * Make room for link header, and copy it to sockaddr
560	 */
561	if (hlen != 0) {
562		if (sockp->sa_family == AF_IEEE80211) {
563			/*
564			 * Collect true length from the parameter header
565			 * NB: sockp is known to be zero'd so if we do a
566			 *     short copy unspecified parameters will be
567			 *     zero.
568			 * NB: packet may not be aligned after stripping
569			 *     bpf params
570			 * XXX check ibp_vers
571			 */
572			p = mtod(m, const struct ieee80211_bpf_params *);
573			hlen = p->ibp_len;
574			if (hlen > sizeof(sockp->sa_data)) {
575				error = EINVAL;
576				goto bad;
577			}
578		}
579		bcopy(m->m_data, sockp->sa_data, hlen);
580	}
581	*hdrlen = hlen;
582
583	return (0);
584bad:
585	m_freem(m);
586	return (error);
587}
588
589/*
590 * Attach file to the bpf interface, i.e. make d listen on bp.
591 */
592static void
593bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
594{
595	int op_w;
596
597	BPF_LOCK_ASSERT();
598
599	/*
600	 * Save sysctl value to protect from sysctl change
601	 * between reads
602	 */
603	op_w = V_bpf_optimize_writers || d->bd_writer;
604
605	if (d->bd_bif != NULL)
606		bpf_detachd_locked(d);
607	/*
608	 * Point d at bp, and add d to the interface's list.
609	 * Since there are many applications using BPF for
610	 * sending raw packets only (dhcpd, cdpd are good examples)
611	 * we can delay adding d to the list of active listeners until
612	 * some filter is configured.
613	 */
614
615	BPFIF_WLOCK(bp);
616	BPFD_LOCK(d);
617
618	d->bd_bif = bp;
619
620	if (op_w != 0) {
621		/* Add to writers-only list */
622		LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next);
623		/*
624		 * We decrement bd_writer on every filter set operation.
625		 * First BIOCSETF is done by pcap_open_live() to set up
626		 * snap length. After that appliation usually sets its own filter
627		 */
628		d->bd_writer = 2;
629	} else
630		LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
631
632	BPFD_UNLOCK(d);
633	BPFIF_WUNLOCK(bp);
634
635	bpf_bpfd_cnt++;
636
637	CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list",
638	    __func__, d->bd_pid, d->bd_writer ? "writer" : "active");
639
640	if (op_w == 0)
641		EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
642}
643
644/*
645 * Add d to the list of active bp filters.
646 * Requires bpf_attachd() to be called before.
647 */
648static void
649bpf_upgraded(struct bpf_d *d)
650{
651	struct bpf_if *bp;
652
653	BPF_LOCK_ASSERT();
654
655	bp = d->bd_bif;
656
657	/*
658	 * Filter can be set several times without specifying interface.
659	 * Mark d as reader and exit.
660	 */
661	if (bp == NULL) {
662		BPFD_LOCK(d);
663		d->bd_writer = 0;
664		BPFD_UNLOCK(d);
665		return;
666	}
667
668	BPFIF_WLOCK(bp);
669	BPFD_LOCK(d);
670
671	/* Remove from writers-only list */
672	LIST_REMOVE(d, bd_next);
673	LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
674	/* Mark d as reader */
675	d->bd_writer = 0;
676
677	BPFD_UNLOCK(d);
678	BPFIF_WUNLOCK(bp);
679
680	CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid);
681
682	EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1);
683}
684
685/*
686 * Detach a file from its interface.
687 */
688static void
689bpf_detachd(struct bpf_d *d)
690{
691	BPF_LOCK();
692	bpf_detachd_locked(d);
693	BPF_UNLOCK();
694}
695
696static void
697bpf_detachd_locked(struct bpf_d *d)
698{
699	int error;
700	struct bpf_if *bp;
701	struct ifnet *ifp;
702
703	CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid);
704
705	BPF_LOCK_ASSERT();
706
707	/* Check if descriptor is attached */
708	if ((bp = d->bd_bif) == NULL)
709		return;
710
711	BPFIF_WLOCK(bp);
712	BPFD_LOCK(d);
713
714	/* Save bd_writer value */
715	error = d->bd_writer;
716
717	/*
718	 * Remove d from the interface's descriptor list.
719	 */
720	LIST_REMOVE(d, bd_next);
721
722	ifp = bp->bif_ifp;
723	d->bd_bif = NULL;
724	BPFD_UNLOCK(d);
725	BPFIF_WUNLOCK(bp);
726
727	bpf_bpfd_cnt--;
728
729	/* Call event handler iff d is attached */
730	if (error == 0)
731		EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0);
732
733	/*
734	 * Check if this descriptor had requested promiscuous mode.
735	 * If so, turn it off.
736	 */
737	if (d->bd_promisc) {
738		d->bd_promisc = 0;
739		CURVNET_SET(ifp->if_vnet);
740		error = ifpromisc(ifp, 0);
741		CURVNET_RESTORE();
742		if (error != 0 && error != ENXIO) {
743			/*
744			 * ENXIO can happen if a pccard is unplugged
745			 * Something is really wrong if we were able to put
746			 * the driver into promiscuous mode, but can't
747			 * take it out.
748			 */
749			if_printf(bp->bif_ifp,
750				"bpf_detach: ifpromisc failed (%d)\n", error);
751		}
752	}
753}
754
755/*
756 * Close the descriptor by detaching it from its interface,
757 * deallocating its buffers, and marking it free.
758 */
759static void
760bpf_dtor(void *data)
761{
762	struct bpf_d *d = data;
763
764	BPFD_LOCK(d);
765	if (d->bd_state == BPF_WAITING)
766		callout_stop(&d->bd_callout);
767	d->bd_state = BPF_IDLE;
768	BPFD_UNLOCK(d);
769	funsetown(&d->bd_sigio);
770	bpf_detachd(d);
771#ifdef MAC
772	mac_bpfdesc_destroy(d);
773#endif /* MAC */
774	seldrain(&d->bd_sel);
775	knlist_destroy(&d->bd_sel.si_note);
776	callout_drain(&d->bd_callout);
777	bpf_freed(d);
778	free(d, M_BPF);
779}
780
781/*
782 * Open ethernet device.  Returns ENXIO for illegal minor device number,
783 * EBUSY if file is open by another process.
784 */
785/* ARGSUSED */
786static	int
787bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
788{
789	struct bpf_d *d;
790	int error;
791
792	d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
793	error = devfs_set_cdevpriv(d, bpf_dtor);
794	if (error != 0) {
795		free(d, M_BPF);
796		return (error);
797	}
798
799	/*
800	 * For historical reasons, perform a one-time initialization call to
801	 * the buffer routines, even though we're not yet committed to a
802	 * particular buffer method.
803	 */
804	bpf_buffer_init(d);
805	if ((flags & FREAD) == 0)
806		d->bd_writer = 2;
807	d->bd_hbuf_in_use = 0;
808	d->bd_bufmode = BPF_BUFMODE_BUFFER;
809	d->bd_sig = SIGIO;
810	d->bd_direction = BPF_D_INOUT;
811	BPF_PID_REFRESH(d, td);
812#ifdef MAC
813	mac_bpfdesc_init(d);
814	mac_bpfdesc_create(td->td_ucred, d);
815#endif
816	mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF);
817	callout_init_mtx(&d->bd_callout, &d->bd_lock, 0);
818	knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock);
819
820	return (0);
821}
822
823/*
824 *  bpfread - read next chunk of packets from buffers
825 */
826static	int
827bpfread(struct cdev *dev, struct uio *uio, int ioflag)
828{
829	struct bpf_d *d;
830	int error;
831	int non_block;
832	int timed_out;
833
834	error = devfs_get_cdevpriv((void **)&d);
835	if (error != 0)
836		return (error);
837
838	/*
839	 * Restrict application to use a buffer the same size as
840	 * as kernel buffers.
841	 */
842	if (uio->uio_resid != d->bd_bufsize)
843		return (EINVAL);
844
845	non_block = ((ioflag & O_NONBLOCK) != 0);
846
847	BPFD_LOCK(d);
848	BPF_PID_REFRESH_CUR(d);
849	if (d->bd_bufmode != BPF_BUFMODE_BUFFER) {
850		BPFD_UNLOCK(d);
851		return (EOPNOTSUPP);
852	}
853	if (d->bd_state == BPF_WAITING)
854		callout_stop(&d->bd_callout);
855	timed_out = (d->bd_state == BPF_TIMED_OUT);
856	d->bd_state = BPF_IDLE;
857	while (d->bd_hbuf_in_use) {
858		error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
859		    PRINET|PCATCH, "bd_hbuf", 0);
860		if (error != 0) {
861			BPFD_UNLOCK(d);
862			return (error);
863		}
864	}
865	/*
866	 * If the hold buffer is empty, then do a timed sleep, which
867	 * ends when the timeout expires or when enough packets
868	 * have arrived to fill the store buffer.
869	 */
870	while (d->bd_hbuf == NULL) {
871		if (d->bd_slen != 0) {
872			/*
873			 * A packet(s) either arrived since the previous
874			 * read or arrived while we were asleep.
875			 */
876			if (d->bd_immediate || non_block || timed_out) {
877				/*
878				 * Rotate the buffers and return what's here
879				 * if we are in immediate mode, non-blocking
880				 * flag is set, or this descriptor timed out.
881				 */
882				ROTATE_BUFFERS(d);
883				break;
884			}
885		}
886
887		/*
888		 * No data is available, check to see if the bpf device
889		 * is still pointed at a real interface.  If not, return
890		 * ENXIO so that the userland process knows to rebind
891		 * it before using it again.
892		 */
893		if (d->bd_bif == NULL) {
894			BPFD_UNLOCK(d);
895			return (ENXIO);
896		}
897
898		if (non_block) {
899			BPFD_UNLOCK(d);
900			return (EWOULDBLOCK);
901		}
902		error = msleep(d, &d->bd_lock, PRINET|PCATCH,
903		     "bpf", d->bd_rtout);
904		if (error == EINTR || error == ERESTART) {
905			BPFD_UNLOCK(d);
906			return (error);
907		}
908		if (error == EWOULDBLOCK) {
909			/*
910			 * On a timeout, return what's in the buffer,
911			 * which may be nothing.  If there is something
912			 * in the store buffer, we can rotate the buffers.
913			 */
914			if (d->bd_hbuf)
915				/*
916				 * We filled up the buffer in between
917				 * getting the timeout and arriving
918				 * here, so we don't need to rotate.
919				 */
920				break;
921
922			if (d->bd_slen == 0) {
923				BPFD_UNLOCK(d);
924				return (0);
925			}
926			ROTATE_BUFFERS(d);
927			break;
928		}
929	}
930	/*
931	 * At this point, we know we have something in the hold slot.
932	 */
933	d->bd_hbuf_in_use = 1;
934	BPFD_UNLOCK(d);
935
936	/*
937	 * Move data from hold buffer into user space.
938	 * We know the entire buffer is transferred since
939	 * we checked above that the read buffer is bpf_bufsize bytes.
940  	 *
941	 * We do not have to worry about simultaneous reads because
942	 * we waited for sole access to the hold buffer above.
943	 */
944	error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio);
945
946	BPFD_LOCK(d);
947	KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf"));
948	d->bd_fbuf = d->bd_hbuf;
949	d->bd_hbuf = NULL;
950	d->bd_hlen = 0;
951	bpf_buf_reclaimed(d);
952	d->bd_hbuf_in_use = 0;
953	wakeup(&d->bd_hbuf_in_use);
954	BPFD_UNLOCK(d);
955
956	return (error);
957}
958
959/*
960 * If there are processes sleeping on this descriptor, wake them up.
961 */
962static __inline void
963bpf_wakeup(struct bpf_d *d)
964{
965
966	BPFD_LOCK_ASSERT(d);
967	if (d->bd_state == BPF_WAITING) {
968		callout_stop(&d->bd_callout);
969		d->bd_state = BPF_IDLE;
970	}
971	wakeup(d);
972	if (d->bd_async && d->bd_sig && d->bd_sigio)
973		pgsigio(&d->bd_sigio, d->bd_sig, 0);
974
975	selwakeuppri(&d->bd_sel, PRINET);
976	KNOTE_LOCKED(&d->bd_sel.si_note, 0);
977}
978
979static void
980bpf_timed_out(void *arg)
981{
982	struct bpf_d *d = (struct bpf_d *)arg;
983
984	BPFD_LOCK_ASSERT(d);
985
986	if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout))
987		return;
988	if (d->bd_state == BPF_WAITING) {
989		d->bd_state = BPF_TIMED_OUT;
990		if (d->bd_slen != 0)
991			bpf_wakeup(d);
992	}
993}
994
995static int
996bpf_ready(struct bpf_d *d)
997{
998
999	BPFD_LOCK_ASSERT(d);
1000
1001	if (!bpf_canfreebuf(d) && d->bd_hlen != 0)
1002		return (1);
1003	if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1004	    d->bd_slen != 0)
1005		return (1);
1006	return (0);
1007}
1008
1009static int
1010bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
1011{
1012	struct bpf_d *d;
1013	struct ifnet *ifp;
1014	struct mbuf *m, *mc;
1015	struct sockaddr dst;
1016	int error, hlen;
1017
1018	error = devfs_get_cdevpriv((void **)&d);
1019	if (error != 0)
1020		return (error);
1021
1022	BPF_PID_REFRESH_CUR(d);
1023	d->bd_wcount++;
1024	/* XXX: locking required */
1025	if (d->bd_bif == NULL) {
1026		d->bd_wdcount++;
1027		return (ENXIO);
1028	}
1029
1030	ifp = d->bd_bif->bif_ifp;
1031
1032	if ((ifp->if_flags & IFF_UP) == 0) {
1033		d->bd_wdcount++;
1034		return (ENETDOWN);
1035	}
1036
1037	if (uio->uio_resid == 0) {
1038		d->bd_wdcount++;
1039		return (0);
1040	}
1041
1042	bzero(&dst, sizeof(dst));
1043	m = NULL;
1044	hlen = 0;
1045	/* XXX: bpf_movein() can sleep */
1046	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp,
1047	    &m, &dst, &hlen, d->bd_wfilter);
1048	if (error) {
1049		d->bd_wdcount++;
1050		return (error);
1051	}
1052	d->bd_wfcount++;
1053	if (d->bd_hdrcmplt)
1054		dst.sa_family = pseudo_AF_HDRCMPLT;
1055
1056	if (d->bd_feedback) {
1057		mc = m_dup(m, M_NOWAIT);
1058		if (mc != NULL)
1059			mc->m_pkthdr.rcvif = ifp;
1060		/* Set M_PROMISC for outgoing packets to be discarded. */
1061		if (d->bd_direction == BPF_D_INOUT)
1062			m->m_flags |= M_PROMISC;
1063	} else
1064		mc = NULL;
1065
1066	m->m_pkthdr.len -= hlen;
1067	m->m_len -= hlen;
1068	m->m_data += hlen;	/* XXX */
1069
1070	CURVNET_SET(ifp->if_vnet);
1071#ifdef MAC
1072	BPFD_LOCK(d);
1073	mac_bpfdesc_create_mbuf(d, m);
1074	if (mc != NULL)
1075		mac_bpfdesc_create_mbuf(d, mc);
1076	BPFD_UNLOCK(d);
1077#endif
1078
1079	error = (*ifp->if_output)(ifp, m, &dst, NULL);
1080	if (error)
1081		d->bd_wdcount++;
1082
1083	if (mc != NULL) {
1084		if (error == 0)
1085			(*ifp->if_input)(ifp, mc);
1086		else
1087			m_freem(mc);
1088	}
1089	CURVNET_RESTORE();
1090
1091	return (error);
1092}
1093
1094/*
1095 * Reset a descriptor by flushing its packet buffer and clearing the receive
1096 * and drop counts.  This is doable for kernel-only buffers, but with
1097 * zero-copy buffers, we can't write to (or rotate) buffers that are
1098 * currently owned by userspace.  It would be nice if we could encapsulate
1099 * this logic in the buffer code rather than here.
1100 */
1101static void
1102reset_d(struct bpf_d *d)
1103{
1104
1105	BPFD_LOCK_ASSERT(d);
1106
1107	while (d->bd_hbuf_in_use)
1108		mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET,
1109		    "bd_hbuf", 0);
1110	if ((d->bd_hbuf != NULL) &&
1111	    (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) {
1112		/* Free the hold buffer. */
1113		d->bd_fbuf = d->bd_hbuf;
1114		d->bd_hbuf = NULL;
1115		d->bd_hlen = 0;
1116		bpf_buf_reclaimed(d);
1117	}
1118	if (bpf_canwritebuf(d))
1119		d->bd_slen = 0;
1120	d->bd_rcount = 0;
1121	d->bd_dcount = 0;
1122	d->bd_fcount = 0;
1123	d->bd_wcount = 0;
1124	d->bd_wfcount = 0;
1125	d->bd_wdcount = 0;
1126	d->bd_zcopy = 0;
1127}
1128
1129/*
1130 *  FIONREAD		Check for read packet available.
1131 *  SIOCGIFADDR		Get interface address - convenient hook to driver.
1132 *  BIOCGBLEN		Get buffer len [for read()].
1133 *  BIOCSETF		Set read filter.
1134 *  BIOCSETFNR		Set read filter without resetting descriptor.
1135 *  BIOCSETWF		Set write filter.
1136 *  BIOCFLUSH		Flush read packet buffer.
1137 *  BIOCPROMISC		Put interface into promiscuous mode.
1138 *  BIOCGDLT		Get link layer type.
1139 *  BIOCGETIF		Get interface name.
1140 *  BIOCSETIF		Set interface.
1141 *  BIOCSRTIMEOUT	Set read timeout.
1142 *  BIOCGRTIMEOUT	Get read timeout.
1143 *  BIOCGSTATS		Get packet stats.
1144 *  BIOCIMMEDIATE	Set immediate mode.
1145 *  BIOCVERSION		Get filter language version.
1146 *  BIOCGHDRCMPLT	Get "header already complete" flag
1147 *  BIOCSHDRCMPLT	Set "header already complete" flag
1148 *  BIOCGDIRECTION	Get packet direction flag
1149 *  BIOCSDIRECTION	Set packet direction flag
1150 *  BIOCGTSTAMP		Get time stamp format and resolution.
1151 *  BIOCSTSTAMP		Set time stamp format and resolution.
1152 *  BIOCLOCK		Set "locked" flag
1153 *  BIOCFEEDBACK	Set packet feedback mode.
1154 *  BIOCSETZBUF		Set current zero-copy buffer locations.
1155 *  BIOCGETZMAX		Get maximum zero-copy buffer size.
1156 *  BIOCROTZBUF		Force rotation of zero-copy buffer
1157 *  BIOCSETBUFMODE	Set buffer mode.
1158 *  BIOCGETBUFMODE	Get current buffer mode.
1159 */
1160/* ARGSUSED */
1161static	int
1162bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
1163    struct thread *td)
1164{
1165	struct bpf_d *d;
1166	int error;
1167
1168	error = devfs_get_cdevpriv((void **)&d);
1169	if (error != 0)
1170		return (error);
1171
1172	/*
1173	 * Refresh PID associated with this descriptor.
1174	 */
1175	BPFD_LOCK(d);
1176	BPF_PID_REFRESH(d, td);
1177	if (d->bd_state == BPF_WAITING)
1178		callout_stop(&d->bd_callout);
1179	d->bd_state = BPF_IDLE;
1180	BPFD_UNLOCK(d);
1181
1182	if (d->bd_locked == 1) {
1183		switch (cmd) {
1184		case BIOCGBLEN:
1185		case BIOCFLUSH:
1186		case BIOCGDLT:
1187		case BIOCGDLTLIST:
1188#ifdef COMPAT_FREEBSD32
1189		case BIOCGDLTLIST32:
1190#endif
1191		case BIOCGETIF:
1192		case BIOCGRTIMEOUT:
1193#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1194		case BIOCGRTIMEOUT32:
1195#endif
1196		case BIOCGSTATS:
1197		case BIOCVERSION:
1198		case BIOCGRSIG:
1199		case BIOCGHDRCMPLT:
1200		case BIOCSTSTAMP:
1201		case BIOCFEEDBACK:
1202		case FIONREAD:
1203		case BIOCLOCK:
1204		case BIOCSRTIMEOUT:
1205#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1206		case BIOCSRTIMEOUT32:
1207#endif
1208		case BIOCIMMEDIATE:
1209		case TIOCGPGRP:
1210		case BIOCROTZBUF:
1211			break;
1212		default:
1213			return (EPERM);
1214		}
1215	}
1216#ifdef COMPAT_FREEBSD32
1217	/*
1218	 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so
1219	 * that it will get 32-bit packet headers.
1220	 */
1221	switch (cmd) {
1222	case BIOCSETF32:
1223	case BIOCSETFNR32:
1224	case BIOCSETWF32:
1225	case BIOCGDLTLIST32:
1226	case BIOCGRTIMEOUT32:
1227	case BIOCSRTIMEOUT32:
1228		BPFD_LOCK(d);
1229		d->bd_compat32 = 1;
1230		BPFD_UNLOCK(d);
1231	}
1232#endif
1233
1234	CURVNET_SET(TD_TO_VNET(td));
1235	switch (cmd) {
1236
1237	default:
1238		error = EINVAL;
1239		break;
1240
1241	/*
1242	 * Check for read packet available.
1243	 */
1244	case FIONREAD:
1245		{
1246			int n;
1247
1248			BPFD_LOCK(d);
1249			n = d->bd_slen;
1250			while (d->bd_hbuf_in_use)
1251				mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock,
1252				    PRINET, "bd_hbuf", 0);
1253			if (d->bd_hbuf)
1254				n += d->bd_hlen;
1255			BPFD_UNLOCK(d);
1256
1257			*(int *)addr = n;
1258			break;
1259		}
1260
1261	case SIOCGIFADDR:
1262		{
1263			struct ifnet *ifp;
1264
1265			if (d->bd_bif == NULL)
1266				error = EINVAL;
1267			else {
1268				ifp = d->bd_bif->bif_ifp;
1269				error = (*ifp->if_ioctl)(ifp, cmd, addr);
1270			}
1271			break;
1272		}
1273
1274	/*
1275	 * Get buffer len [for read()].
1276	 */
1277	case BIOCGBLEN:
1278		BPFD_LOCK(d);
1279		*(u_int *)addr = d->bd_bufsize;
1280		BPFD_UNLOCK(d);
1281		break;
1282
1283	/*
1284	 * Set buffer length.
1285	 */
1286	case BIOCSBLEN:
1287		error = bpf_ioctl_sblen(d, (u_int *)addr);
1288		break;
1289
1290	/*
1291	 * Set link layer read filter.
1292	 */
1293	case BIOCSETF:
1294	case BIOCSETFNR:
1295	case BIOCSETWF:
1296#ifdef COMPAT_FREEBSD32
1297	case BIOCSETF32:
1298	case BIOCSETFNR32:
1299	case BIOCSETWF32:
1300#endif
1301		error = bpf_setf(d, (struct bpf_program *)addr, cmd);
1302		break;
1303
1304	/*
1305	 * Flush read packet buffer.
1306	 */
1307	case BIOCFLUSH:
1308		BPFD_LOCK(d);
1309		reset_d(d);
1310		BPFD_UNLOCK(d);
1311		break;
1312
1313	/*
1314	 * Put interface into promiscuous mode.
1315	 */
1316	case BIOCPROMISC:
1317		if (d->bd_bif == NULL) {
1318			/*
1319			 * No interface attached yet.
1320			 */
1321			error = EINVAL;
1322			break;
1323		}
1324		if (d->bd_promisc == 0) {
1325			error = ifpromisc(d->bd_bif->bif_ifp, 1);
1326			if (error == 0)
1327				d->bd_promisc = 1;
1328		}
1329		break;
1330
1331	/*
1332	 * Get current data link type.
1333	 */
1334	case BIOCGDLT:
1335		BPF_LOCK();
1336		if (d->bd_bif == NULL)
1337			error = EINVAL;
1338		else
1339			*(u_int *)addr = d->bd_bif->bif_dlt;
1340		BPF_UNLOCK();
1341		break;
1342
1343	/*
1344	 * Get a list of supported data link types.
1345	 */
1346#ifdef COMPAT_FREEBSD32
1347	case BIOCGDLTLIST32:
1348		{
1349			struct bpf_dltlist32 *list32;
1350			struct bpf_dltlist dltlist;
1351
1352			list32 = (struct bpf_dltlist32 *)addr;
1353			dltlist.bfl_len = list32->bfl_len;
1354			dltlist.bfl_list = PTRIN(list32->bfl_list);
1355			BPF_LOCK();
1356			if (d->bd_bif == NULL)
1357				error = EINVAL;
1358			else {
1359				error = bpf_getdltlist(d, &dltlist);
1360				if (error == 0)
1361					list32->bfl_len = dltlist.bfl_len;
1362			}
1363			BPF_UNLOCK();
1364			break;
1365		}
1366#endif
1367
1368	case BIOCGDLTLIST:
1369		BPF_LOCK();
1370		if (d->bd_bif == NULL)
1371			error = EINVAL;
1372		else
1373			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
1374		BPF_UNLOCK();
1375		break;
1376
1377	/*
1378	 * Set data link type.
1379	 */
1380	case BIOCSDLT:
1381		BPF_LOCK();
1382		if (d->bd_bif == NULL)
1383			error = EINVAL;
1384		else
1385			error = bpf_setdlt(d, *(u_int *)addr);
1386		BPF_UNLOCK();
1387		break;
1388
1389	/*
1390	 * Get interface name.
1391	 */
1392	case BIOCGETIF:
1393		BPF_LOCK();
1394		if (d->bd_bif == NULL)
1395			error = EINVAL;
1396		else {
1397			struct ifnet *const ifp = d->bd_bif->bif_ifp;
1398			struct ifreq *const ifr = (struct ifreq *)addr;
1399
1400			strlcpy(ifr->ifr_name, ifp->if_xname,
1401			    sizeof(ifr->ifr_name));
1402		}
1403		BPF_UNLOCK();
1404		break;
1405
1406	/*
1407	 * Set interface.
1408	 */
1409	case BIOCSETIF:
1410		{
1411			int alloc_buf, size;
1412
1413			/*
1414			 * Behavior here depends on the buffering model.  If
1415			 * we're using kernel memory buffers, then we can
1416			 * allocate them here.  If we're using zero-copy,
1417			 * then the user process must have registered buffers
1418			 * by the time we get here.
1419			 */
1420			alloc_buf = 0;
1421			BPFD_LOCK(d);
1422			if (d->bd_bufmode == BPF_BUFMODE_BUFFER &&
1423			    d->bd_sbuf == NULL)
1424				alloc_buf = 1;
1425			BPFD_UNLOCK(d);
1426			if (alloc_buf) {
1427				size = d->bd_bufsize;
1428				error = bpf_buffer_ioctl_sblen(d, &size);
1429				if (error != 0)
1430					break;
1431			}
1432			BPF_LOCK();
1433			error = bpf_setif(d, (struct ifreq *)addr);
1434			BPF_UNLOCK();
1435			break;
1436		}
1437
1438	/*
1439	 * Set read timeout.
1440	 */
1441	case BIOCSRTIMEOUT:
1442#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1443	case BIOCSRTIMEOUT32:
1444#endif
1445		{
1446			struct timeval *tv = (struct timeval *)addr;
1447#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1448			struct timeval32 *tv32;
1449			struct timeval tv64;
1450
1451			if (cmd == BIOCSRTIMEOUT32) {
1452				tv32 = (struct timeval32 *)addr;
1453				tv = &tv64;
1454				tv->tv_sec = tv32->tv_sec;
1455				tv->tv_usec = tv32->tv_usec;
1456			} else
1457#endif
1458				tv = (struct timeval *)addr;
1459
1460			/*
1461			 * Subtract 1 tick from tvtohz() since this isn't
1462			 * a one-shot timer.
1463			 */
1464			if ((error = itimerfix(tv)) == 0)
1465				d->bd_rtout = tvtohz(tv) - 1;
1466			break;
1467		}
1468
1469	/*
1470	 * Get read timeout.
1471	 */
1472	case BIOCGRTIMEOUT:
1473#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1474	case BIOCGRTIMEOUT32:
1475#endif
1476		{
1477			struct timeval *tv;
1478#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1479			struct timeval32 *tv32;
1480			struct timeval tv64;
1481
1482			if (cmd == BIOCGRTIMEOUT32)
1483				tv = &tv64;
1484			else
1485#endif
1486				tv = (struct timeval *)addr;
1487
1488			tv->tv_sec = d->bd_rtout / hz;
1489			tv->tv_usec = (d->bd_rtout % hz) * tick;
1490#if defined(COMPAT_FREEBSD32) && !defined(__mips__)
1491			if (cmd == BIOCGRTIMEOUT32) {
1492				tv32 = (struct timeval32 *)addr;
1493				tv32->tv_sec = tv->tv_sec;
1494				tv32->tv_usec = tv->tv_usec;
1495			}
1496#endif
1497
1498			break;
1499		}
1500
1501	/*
1502	 * Get packet stats.
1503	 */
1504	case BIOCGSTATS:
1505		{
1506			struct bpf_stat *bs = (struct bpf_stat *)addr;
1507
1508			/* XXXCSJP overflow */
1509			bs->bs_recv = d->bd_rcount;
1510			bs->bs_drop = d->bd_dcount;
1511			break;
1512		}
1513
1514	/*
1515	 * Set immediate mode.
1516	 */
1517	case BIOCIMMEDIATE:
1518		BPFD_LOCK(d);
1519		d->bd_immediate = *(u_int *)addr;
1520		BPFD_UNLOCK(d);
1521		break;
1522
1523	case BIOCVERSION:
1524		{
1525			struct bpf_version *bv = (struct bpf_version *)addr;
1526
1527			bv->bv_major = BPF_MAJOR_VERSION;
1528			bv->bv_minor = BPF_MINOR_VERSION;
1529			break;
1530		}
1531
1532	/*
1533	 * Get "header already complete" flag
1534	 */
1535	case BIOCGHDRCMPLT:
1536		BPFD_LOCK(d);
1537		*(u_int *)addr = d->bd_hdrcmplt;
1538		BPFD_UNLOCK(d);
1539		break;
1540
1541	/*
1542	 * Set "header already complete" flag
1543	 */
1544	case BIOCSHDRCMPLT:
1545		BPFD_LOCK(d);
1546		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1547		BPFD_UNLOCK(d);
1548		break;
1549
1550	/*
1551	 * Get packet direction flag
1552	 */
1553	case BIOCGDIRECTION:
1554		BPFD_LOCK(d);
1555		*(u_int *)addr = d->bd_direction;
1556		BPFD_UNLOCK(d);
1557		break;
1558
1559	/*
1560	 * Set packet direction flag
1561	 */
1562	case BIOCSDIRECTION:
1563		{
1564			u_int	direction;
1565
1566			direction = *(u_int *)addr;
1567			switch (direction) {
1568			case BPF_D_IN:
1569			case BPF_D_INOUT:
1570			case BPF_D_OUT:
1571				BPFD_LOCK(d);
1572				d->bd_direction = direction;
1573				BPFD_UNLOCK(d);
1574				break;
1575			default:
1576				error = EINVAL;
1577			}
1578		}
1579		break;
1580
1581	/*
1582	 * Get packet timestamp format and resolution.
1583	 */
1584	case BIOCGTSTAMP:
1585		BPFD_LOCK(d);
1586		*(u_int *)addr = d->bd_tstamp;
1587		BPFD_UNLOCK(d);
1588		break;
1589
1590	/*
1591	 * Set packet timestamp format and resolution.
1592	 */
1593	case BIOCSTSTAMP:
1594		{
1595			u_int	func;
1596
1597			func = *(u_int *)addr;
1598			if (BPF_T_VALID(func))
1599				d->bd_tstamp = func;
1600			else
1601				error = EINVAL;
1602		}
1603		break;
1604
1605	case BIOCFEEDBACK:
1606		BPFD_LOCK(d);
1607		d->bd_feedback = *(u_int *)addr;
1608		BPFD_UNLOCK(d);
1609		break;
1610
1611	case BIOCLOCK:
1612		BPFD_LOCK(d);
1613		d->bd_locked = 1;
1614		BPFD_UNLOCK(d);
1615		break;
1616
1617	case FIONBIO:		/* Non-blocking I/O */
1618		break;
1619
1620	case FIOASYNC:		/* Send signal on receive packets */
1621		BPFD_LOCK(d);
1622		d->bd_async = *(int *)addr;
1623		BPFD_UNLOCK(d);
1624		break;
1625
1626	case FIOSETOWN:
1627		/*
1628		 * XXX: Add some sort of locking here?
1629		 * fsetown() can sleep.
1630		 */
1631		error = fsetown(*(int *)addr, &d->bd_sigio);
1632		break;
1633
1634	case FIOGETOWN:
1635		BPFD_LOCK(d);
1636		*(int *)addr = fgetown(&d->bd_sigio);
1637		BPFD_UNLOCK(d);
1638		break;
1639
1640	/* This is deprecated, FIOSETOWN should be used instead. */
1641	case TIOCSPGRP:
1642		error = fsetown(-(*(int *)addr), &d->bd_sigio);
1643		break;
1644
1645	/* This is deprecated, FIOGETOWN should be used instead. */
1646	case TIOCGPGRP:
1647		*(int *)addr = -fgetown(&d->bd_sigio);
1648		break;
1649
1650	case BIOCSRSIG:		/* Set receive signal */
1651		{
1652			u_int sig;
1653
1654			sig = *(u_int *)addr;
1655
1656			if (sig >= NSIG)
1657				error = EINVAL;
1658			else {
1659				BPFD_LOCK(d);
1660				d->bd_sig = sig;
1661				BPFD_UNLOCK(d);
1662			}
1663			break;
1664		}
1665	case BIOCGRSIG:
1666		BPFD_LOCK(d);
1667		*(u_int *)addr = d->bd_sig;
1668		BPFD_UNLOCK(d);
1669		break;
1670
1671	case BIOCGETBUFMODE:
1672		BPFD_LOCK(d);
1673		*(u_int *)addr = d->bd_bufmode;
1674		BPFD_UNLOCK(d);
1675		break;
1676
1677	case BIOCSETBUFMODE:
1678		/*
1679		 * Allow the buffering mode to be changed as long as we
1680		 * haven't yet committed to a particular mode.  Our
1681		 * definition of commitment, for now, is whether or not a
1682		 * buffer has been allocated or an interface attached, since
1683		 * that's the point where things get tricky.
1684		 */
1685		switch (*(u_int *)addr) {
1686		case BPF_BUFMODE_BUFFER:
1687			break;
1688
1689		case BPF_BUFMODE_ZBUF:
1690			if (bpf_zerocopy_enable)
1691				break;
1692			/* FALLSTHROUGH */
1693
1694		default:
1695			CURVNET_RESTORE();
1696			return (EINVAL);
1697		}
1698
1699		BPFD_LOCK(d);
1700		if (d->bd_sbuf != NULL || d->bd_hbuf != NULL ||
1701		    d->bd_fbuf != NULL || d->bd_bif != NULL) {
1702			BPFD_UNLOCK(d);
1703			CURVNET_RESTORE();
1704			return (EBUSY);
1705		}
1706		d->bd_bufmode = *(u_int *)addr;
1707		BPFD_UNLOCK(d);
1708		break;
1709
1710	case BIOCGETZMAX:
1711		error = bpf_ioctl_getzmax(td, d, (size_t *)addr);
1712		break;
1713
1714	case BIOCSETZBUF:
1715		error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr);
1716		break;
1717
1718	case BIOCROTZBUF:
1719		error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr);
1720		break;
1721	}
1722	CURVNET_RESTORE();
1723	return (error);
1724}
1725
1726/*
1727 * Set d's packet filter program to fp.  If this file already has a filter,
1728 * free it and replace it.  Returns EINVAL for bogus requests.
1729 *
1730 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls
1731 * since reading d->bd_bif can't be protected by d or interface lock due to
1732 * lock order.
1733 *
1734 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses
1735 * interface read lock to read all filers.
1736 *
1737 */
1738static int
1739bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
1740{
1741#ifdef COMPAT_FREEBSD32
1742	struct bpf_program fp_swab;
1743	struct bpf_program32 *fp32;
1744#endif
1745	struct bpf_insn *fcode, *old;
1746#ifdef BPF_JITTER
1747	bpf_jit_filter *jfunc, *ofunc;
1748#endif
1749	size_t size;
1750	u_int flen;
1751	int need_upgrade;
1752
1753#ifdef COMPAT_FREEBSD32
1754	switch (cmd) {
1755	case BIOCSETF32:
1756	case BIOCSETWF32:
1757	case BIOCSETFNR32:
1758		fp32 = (struct bpf_program32 *)fp;
1759		fp_swab.bf_len = fp32->bf_len;
1760		fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns;
1761		fp = &fp_swab;
1762		switch (cmd) {
1763		case BIOCSETF32:
1764			cmd = BIOCSETF;
1765			break;
1766		case BIOCSETWF32:
1767			cmd = BIOCSETWF;
1768			break;
1769		}
1770		break;
1771	}
1772#endif
1773
1774	fcode = NULL;
1775#ifdef BPF_JITTER
1776	jfunc = ofunc = NULL;
1777#endif
1778	need_upgrade = 0;
1779
1780	/*
1781	 * Check new filter validness before acquiring any locks.
1782	 * Allocate memory for new filter, if needed.
1783	 */
1784	flen = fp->bf_len;
1785	if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0))
1786		return (EINVAL);
1787	size = flen * sizeof(*fp->bf_insns);
1788	if (size > 0) {
1789		/* We're setting up new filter.  Copy and check actual data. */
1790		fcode = malloc(size, M_BPF, M_WAITOK);
1791		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1792		    !bpf_validate(fcode, flen)) {
1793			free(fcode, M_BPF);
1794			return (EINVAL);
1795		}
1796#ifdef BPF_JITTER
1797		/* Filter is copied inside fcode and is perfectly valid. */
1798		jfunc = bpf_jitter(fcode, flen);
1799#endif
1800	}
1801
1802	BPF_LOCK();
1803
1804	/*
1805	 * Set up new filter.
1806	 * Protect filter change by interface lock.
1807	 * Additionally, we are protected by global lock here.
1808	 */
1809	if (d->bd_bif != NULL)
1810		BPFIF_WLOCK(d->bd_bif);
1811	BPFD_LOCK(d);
1812	if (cmd == BIOCSETWF) {
1813		old = d->bd_wfilter;
1814		d->bd_wfilter = fcode;
1815	} else {
1816		old = d->bd_rfilter;
1817		d->bd_rfilter = fcode;
1818#ifdef BPF_JITTER
1819		ofunc = d->bd_bfilter;
1820		d->bd_bfilter = jfunc;
1821#endif
1822		if (cmd == BIOCSETF)
1823			reset_d(d);
1824
1825		if (fcode != NULL) {
1826			/*
1827			 * Do not require upgrade by first BIOCSETF
1828			 * (used to set snaplen) by pcap_open_live().
1829			 */
1830			if (d->bd_writer != 0 && --d->bd_writer == 0)
1831				need_upgrade = 1;
1832			CTR4(KTR_NET, "%s: filter function set by pid %d, "
1833			    "bd_writer counter %d, need_upgrade %d",
1834			    __func__, d->bd_pid, d->bd_writer, need_upgrade);
1835		}
1836	}
1837	BPFD_UNLOCK(d);
1838	if (d->bd_bif != NULL)
1839		BPFIF_WUNLOCK(d->bd_bif);
1840	if (old != NULL)
1841		free(old, M_BPF);
1842#ifdef BPF_JITTER
1843	if (ofunc != NULL)
1844		bpf_destroy_jit_filter(ofunc);
1845#endif
1846
1847	/* Move d to active readers list. */
1848	if (need_upgrade)
1849		bpf_upgraded(d);
1850
1851	BPF_UNLOCK();
1852	return (0);
1853}
1854
1855/*
1856 * Detach a file from its current interface (if attached at all) and attach
1857 * to the interface indicated by the name stored in ifr.
1858 * Return an errno or 0.
1859 */
1860static int
1861bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1862{
1863	struct bpf_if *bp;
1864	struct ifnet *theywant;
1865
1866	BPF_LOCK_ASSERT();
1867
1868	theywant = ifunit(ifr->ifr_name);
1869	if (theywant == NULL || theywant->if_bpf == NULL)
1870		return (ENXIO);
1871
1872	bp = theywant->if_bpf;
1873
1874	/* Check if interface is not being detached from BPF */
1875	BPFIF_RLOCK(bp);
1876	if (bp->flags & BPFIF_FLAG_DYING) {
1877		BPFIF_RUNLOCK(bp);
1878		return (ENXIO);
1879	}
1880	BPFIF_RUNLOCK(bp);
1881
1882	/*
1883	 * At this point, we expect the buffer is already allocated.  If not,
1884	 * return an error.
1885	 */
1886	switch (d->bd_bufmode) {
1887	case BPF_BUFMODE_BUFFER:
1888	case BPF_BUFMODE_ZBUF:
1889		if (d->bd_sbuf == NULL)
1890			return (EINVAL);
1891		break;
1892
1893	default:
1894		panic("bpf_setif: bufmode %d", d->bd_bufmode);
1895	}
1896	if (bp != d->bd_bif)
1897		bpf_attachd(d, bp);
1898	BPFD_LOCK(d);
1899	reset_d(d);
1900	BPFD_UNLOCK(d);
1901	return (0);
1902}
1903
1904/*
1905 * Support for select() and poll() system calls
1906 *
1907 * Return true iff the specific operation will not block indefinitely.
1908 * Otherwise, return false but make a note that a selwakeup() must be done.
1909 */
1910static int
1911bpfpoll(struct cdev *dev, int events, struct thread *td)
1912{
1913	struct bpf_d *d;
1914	int revents;
1915
1916	if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
1917		return (events &
1918		    (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM));
1919
1920	/*
1921	 * Refresh PID associated with this descriptor.
1922	 */
1923	revents = events & (POLLOUT | POLLWRNORM);
1924	BPFD_LOCK(d);
1925	BPF_PID_REFRESH(d, td);
1926	if (events & (POLLIN | POLLRDNORM)) {
1927		if (bpf_ready(d))
1928			revents |= events & (POLLIN | POLLRDNORM);
1929		else {
1930			selrecord(td, &d->bd_sel);
1931			/* Start the read timeout if necessary. */
1932			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1933				callout_reset(&d->bd_callout, d->bd_rtout,
1934				    bpf_timed_out, d);
1935				d->bd_state = BPF_WAITING;
1936			}
1937		}
1938	}
1939	BPFD_UNLOCK(d);
1940	return (revents);
1941}
1942
1943/*
1944 * Support for kevent() system call.  Register EVFILT_READ filters and
1945 * reject all others.
1946 */
1947int
1948bpfkqfilter(struct cdev *dev, struct knote *kn)
1949{
1950	struct bpf_d *d;
1951
1952	if (devfs_get_cdevpriv((void **)&d) != 0 ||
1953	    kn->kn_filter != EVFILT_READ)
1954		return (1);
1955
1956	/*
1957	 * Refresh PID associated with this descriptor.
1958	 */
1959	BPFD_LOCK(d);
1960	BPF_PID_REFRESH_CUR(d);
1961	kn->kn_fop = &bpfread_filtops;
1962	kn->kn_hook = d;
1963	knlist_add(&d->bd_sel.si_note, kn, 1);
1964	BPFD_UNLOCK(d);
1965
1966	return (0);
1967}
1968
1969static void
1970filt_bpfdetach(struct knote *kn)
1971{
1972	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1973
1974	knlist_remove(&d->bd_sel.si_note, kn, 0);
1975}
1976
1977static int
1978filt_bpfread(struct knote *kn, long hint)
1979{
1980	struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1981	int ready;
1982
1983	BPFD_LOCK_ASSERT(d);
1984	ready = bpf_ready(d);
1985	if (ready) {
1986		kn->kn_data = d->bd_slen;
1987		/*
1988		 * Ignore the hold buffer if it is being copied to user space.
1989		 */
1990		if (!d->bd_hbuf_in_use && d->bd_hbuf)
1991			kn->kn_data += d->bd_hlen;
1992	} else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1993		callout_reset(&d->bd_callout, d->bd_rtout,
1994		    bpf_timed_out, d);
1995		d->bd_state = BPF_WAITING;
1996	}
1997
1998	return (ready);
1999}
2000
2001#define	BPF_TSTAMP_NONE		0
2002#define	BPF_TSTAMP_FAST		1
2003#define	BPF_TSTAMP_NORMAL	2
2004#define	BPF_TSTAMP_EXTERN	3
2005
2006static int
2007bpf_ts_quality(int tstype)
2008{
2009
2010	if (tstype == BPF_T_NONE)
2011		return (BPF_TSTAMP_NONE);
2012	if ((tstype & BPF_T_FAST) != 0)
2013		return (BPF_TSTAMP_FAST);
2014
2015	return (BPF_TSTAMP_NORMAL);
2016}
2017
2018static int
2019bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m)
2020{
2021	struct m_tag *tag;
2022	int quality;
2023
2024	quality = bpf_ts_quality(tstype);
2025	if (quality == BPF_TSTAMP_NONE)
2026		return (quality);
2027
2028	if (m != NULL) {
2029		tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL);
2030		if (tag != NULL) {
2031			*bt = *(struct bintime *)(tag + 1);
2032			return (BPF_TSTAMP_EXTERN);
2033		}
2034	}
2035	if (quality == BPF_TSTAMP_NORMAL)
2036		binuptime(bt);
2037	else
2038		getbinuptime(bt);
2039
2040	return (quality);
2041}
2042
2043/*
2044 * Incoming linkage from device drivers.  Process the packet pkt, of length
2045 * pktlen, which is stored in a contiguous buffer.  The packet is parsed
2046 * by each process' filter, and if accepted, stashed into the corresponding
2047 * buffer.
2048 */
2049void
2050bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2051{
2052	struct bintime bt;
2053	struct bpf_d *d;
2054#ifdef BPF_JITTER
2055	bpf_jit_filter *bf;
2056#endif
2057	u_int slen;
2058	int gottime;
2059
2060	gottime = BPF_TSTAMP_NONE;
2061
2062	BPFIF_RLOCK(bp);
2063
2064	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2065		/*
2066		 * We are not using any locks for d here because:
2067		 * 1) any filter change is protected by interface
2068		 * write lock
2069		 * 2) destroying/detaching d is protected by interface
2070		 * write lock, too
2071		 */
2072
2073		/* XXX: Do not protect counter for the sake of performance. */
2074		++d->bd_rcount;
2075		/*
2076		 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no
2077		 * way for the caller to indiciate to us whether this packet
2078		 * is inbound or outbound.  In the bpf_mtap() routines, we use
2079		 * the interface pointers on the mbuf to figure it out.
2080		 */
2081#ifdef BPF_JITTER
2082		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2083		if (bf != NULL)
2084			slen = (*(bf->func))(pkt, pktlen, pktlen);
2085		else
2086#endif
2087		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
2088		if (slen != 0) {
2089			/*
2090			 * Filter matches. Let's to acquire write lock.
2091			 */
2092			BPFD_LOCK(d);
2093
2094			d->bd_fcount++;
2095			if (gottime < bpf_ts_quality(d->bd_tstamp))
2096				gottime = bpf_gettime(&bt, d->bd_tstamp, NULL);
2097#ifdef MAC
2098			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2099#endif
2100				catchpacket(d, pkt, pktlen, slen,
2101				    bpf_append_bytes, &bt);
2102			BPFD_UNLOCK(d);
2103		}
2104	}
2105	BPFIF_RUNLOCK(bp);
2106}
2107
2108#define	BPF_CHECK_DIRECTION(d, r, i)				\
2109	    (((d)->bd_direction == BPF_D_IN && (r) != (i)) ||	\
2110	    ((d)->bd_direction == BPF_D_OUT && (r) == (i)))
2111
2112/*
2113 * Incoming linkage from device drivers, when packet is in an mbuf chain.
2114 * Locking model is explained in bpf_tap().
2115 */
2116void
2117bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2118{
2119	struct bintime bt;
2120	struct bpf_d *d;
2121#ifdef BPF_JITTER
2122	bpf_jit_filter *bf;
2123#endif
2124	u_int pktlen, slen;
2125	int gottime;
2126
2127	/* Skip outgoing duplicate packets. */
2128	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2129		m->m_flags &= ~M_PROMISC;
2130		return;
2131	}
2132
2133	pktlen = m_length(m, NULL);
2134	gottime = BPF_TSTAMP_NONE;
2135
2136	BPFIF_RLOCK(bp);
2137
2138	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2139		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2140			continue;
2141		++d->bd_rcount;
2142#ifdef BPF_JITTER
2143		bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL;
2144		/* XXX We cannot handle multiple mbufs. */
2145		if (bf != NULL && m->m_next == NULL)
2146			slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen);
2147		else
2148#endif
2149		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
2150		if (slen != 0) {
2151			BPFD_LOCK(d);
2152
2153			d->bd_fcount++;
2154			if (gottime < bpf_ts_quality(d->bd_tstamp))
2155				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2156#ifdef MAC
2157			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2158#endif
2159				catchpacket(d, (u_char *)m, pktlen, slen,
2160				    bpf_append_mbuf, &bt);
2161			BPFD_UNLOCK(d);
2162		}
2163	}
2164	BPFIF_RUNLOCK(bp);
2165}
2166
2167/*
2168 * Incoming linkage from device drivers, when packet is in
2169 * an mbuf chain and to be prepended by a contiguous header.
2170 */
2171void
2172bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
2173{
2174	struct bintime bt;
2175	struct mbuf mb;
2176	struct bpf_d *d;
2177	u_int pktlen, slen;
2178	int gottime;
2179
2180	/* Skip outgoing duplicate packets. */
2181	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
2182		m->m_flags &= ~M_PROMISC;
2183		return;
2184	}
2185
2186	pktlen = m_length(m, NULL);
2187	/*
2188	 * Craft on-stack mbuf suitable for passing to bpf_filter.
2189	 * Note that we cut corners here; we only setup what's
2190	 * absolutely needed--this mbuf should never go anywhere else.
2191	 */
2192	mb.m_next = m;
2193	mb.m_data = data;
2194	mb.m_len = dlen;
2195	pktlen += dlen;
2196
2197	gottime = BPF_TSTAMP_NONE;
2198
2199	BPFIF_RLOCK(bp);
2200
2201	LIST_FOREACH(d, &bp->bif_dlist, bd_next) {
2202		if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp))
2203			continue;
2204		++d->bd_rcount;
2205		slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0);
2206		if (slen != 0) {
2207			BPFD_LOCK(d);
2208
2209			d->bd_fcount++;
2210			if (gottime < bpf_ts_quality(d->bd_tstamp))
2211				gottime = bpf_gettime(&bt, d->bd_tstamp, m);
2212#ifdef MAC
2213			if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0)
2214#endif
2215				catchpacket(d, (u_char *)&mb, pktlen, slen,
2216				    bpf_append_mbuf, &bt);
2217			BPFD_UNLOCK(d);
2218		}
2219	}
2220	BPFIF_RUNLOCK(bp);
2221}
2222
2223#undef	BPF_CHECK_DIRECTION
2224
2225#undef	BPF_TSTAMP_NONE
2226#undef	BPF_TSTAMP_FAST
2227#undef	BPF_TSTAMP_NORMAL
2228#undef	BPF_TSTAMP_EXTERN
2229
2230static int
2231bpf_hdrlen(struct bpf_d *d)
2232{
2233	int hdrlen;
2234
2235	hdrlen = d->bd_bif->bif_hdrlen;
2236#ifndef BURN_BRIDGES
2237	if (d->bd_tstamp == BPF_T_NONE ||
2238	    BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME)
2239#ifdef COMPAT_FREEBSD32
2240		if (d->bd_compat32)
2241			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32);
2242		else
2243#endif
2244			hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr);
2245	else
2246#endif
2247		hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr);
2248#ifdef COMPAT_FREEBSD32
2249	if (d->bd_compat32)
2250		hdrlen = BPF_WORDALIGN32(hdrlen);
2251	else
2252#endif
2253		hdrlen = BPF_WORDALIGN(hdrlen);
2254
2255	return (hdrlen - d->bd_bif->bif_hdrlen);
2256}
2257
2258static void
2259bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype)
2260{
2261	struct bintime bt2;
2262	struct timeval tsm;
2263	struct timespec tsn;
2264
2265	if ((tstype & BPF_T_MONOTONIC) == 0) {
2266		bt2 = *bt;
2267		bintime_add(&bt2, &boottimebin);
2268		bt = &bt2;
2269	}
2270	switch (BPF_T_FORMAT(tstype)) {
2271	case BPF_T_MICROTIME:
2272		bintime2timeval(bt, &tsm);
2273		ts->bt_sec = tsm.tv_sec;
2274		ts->bt_frac = tsm.tv_usec;
2275		break;
2276	case BPF_T_NANOTIME:
2277		bintime2timespec(bt, &tsn);
2278		ts->bt_sec = tsn.tv_sec;
2279		ts->bt_frac = tsn.tv_nsec;
2280		break;
2281	case BPF_T_BINTIME:
2282		ts->bt_sec = bt->sec;
2283		ts->bt_frac = bt->frac;
2284		break;
2285	}
2286}
2287
2288/*
2289 * Move the packet data from interface memory (pkt) into the
2290 * store buffer.  "cpfn" is the routine called to do the actual data
2291 * transfer.  bcopy is passed in to copy contiguous chunks, while
2292 * bpf_append_mbuf is passed in to copy mbuf chains.  In the latter case,
2293 * pkt is really an mbuf.
2294 */
2295static void
2296catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
2297    void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int),
2298    struct bintime *bt)
2299{
2300	struct bpf_xhdr hdr;
2301#ifndef BURN_BRIDGES
2302	struct bpf_hdr hdr_old;
2303#ifdef COMPAT_FREEBSD32
2304	struct bpf_hdr32 hdr32_old;
2305#endif
2306#endif
2307	int caplen, curlen, hdrlen, totlen;
2308	int do_wakeup = 0;
2309	int do_timestamp;
2310	int tstype;
2311
2312	BPFD_LOCK_ASSERT(d);
2313
2314	/*
2315	 * Detect whether user space has released a buffer back to us, and if
2316	 * so, move it from being a hold buffer to a free buffer.  This may
2317	 * not be the best place to do it (for example, we might only want to
2318	 * run this check if we need the space), but for now it's a reliable
2319	 * spot to do it.
2320	 */
2321	if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) {
2322		d->bd_fbuf = d->bd_hbuf;
2323		d->bd_hbuf = NULL;
2324		d->bd_hlen = 0;
2325		bpf_buf_reclaimed(d);
2326	}
2327
2328	/*
2329	 * Figure out how many bytes to move.  If the packet is
2330	 * greater or equal to the snapshot length, transfer that
2331	 * much.  Otherwise, transfer the whole packet (unless
2332	 * we hit the buffer size limit).
2333	 */
2334	hdrlen = bpf_hdrlen(d);
2335	totlen = hdrlen + min(snaplen, pktlen);
2336	if (totlen > d->bd_bufsize)
2337		totlen = d->bd_bufsize;
2338
2339	/*
2340	 * Round up the end of the previous packet to the next longword.
2341	 *
2342	 * Drop the packet if there's no room and no hope of room
2343	 * If the packet would overflow the storage buffer or the storage
2344	 * buffer is considered immutable by the buffer model, try to rotate
2345	 * the buffer and wakeup pending processes.
2346	 */
2347#ifdef COMPAT_FREEBSD32
2348	if (d->bd_compat32)
2349		curlen = BPF_WORDALIGN32(d->bd_slen);
2350	else
2351#endif
2352		curlen = BPF_WORDALIGN(d->bd_slen);
2353	if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) {
2354		if (d->bd_fbuf == NULL) {
2355			/*
2356			 * There's no room in the store buffer, and no
2357			 * prospect of room, so drop the packet.  Notify the
2358			 * buffer model.
2359			 */
2360			bpf_buffull(d);
2361			++d->bd_dcount;
2362			return;
2363		}
2364		KASSERT(!d->bd_hbuf_in_use, ("hold buffer is in use"));
2365		ROTATE_BUFFERS(d);
2366		do_wakeup = 1;
2367		curlen = 0;
2368	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
2369		/*
2370		 * Immediate mode is set, or the read timeout has already
2371		 * expired during a select call.  A packet arrived, so the
2372		 * reader should be woken up.
2373		 */
2374		do_wakeup = 1;
2375	caplen = totlen - hdrlen;
2376	tstype = d->bd_tstamp;
2377	do_timestamp = tstype != BPF_T_NONE;
2378#ifndef BURN_BRIDGES
2379	if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) {
2380		struct bpf_ts ts;
2381		if (do_timestamp)
2382			bpf_bintime2ts(bt, &ts, tstype);
2383#ifdef COMPAT_FREEBSD32
2384		if (d->bd_compat32) {
2385			bzero(&hdr32_old, sizeof(hdr32_old));
2386			if (do_timestamp) {
2387				hdr32_old.bh_tstamp.tv_sec = ts.bt_sec;
2388				hdr32_old.bh_tstamp.tv_usec = ts.bt_frac;
2389			}
2390			hdr32_old.bh_datalen = pktlen;
2391			hdr32_old.bh_hdrlen = hdrlen;
2392			hdr32_old.bh_caplen = caplen;
2393			bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old,
2394			    sizeof(hdr32_old));
2395			goto copy;
2396		}
2397#endif
2398		bzero(&hdr_old, sizeof(hdr_old));
2399		if (do_timestamp) {
2400			hdr_old.bh_tstamp.tv_sec = ts.bt_sec;
2401			hdr_old.bh_tstamp.tv_usec = ts.bt_frac;
2402		}
2403		hdr_old.bh_datalen = pktlen;
2404		hdr_old.bh_hdrlen = hdrlen;
2405		hdr_old.bh_caplen = caplen;
2406		bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old,
2407		    sizeof(hdr_old));
2408		goto copy;
2409	}
2410#endif
2411
2412	/*
2413	 * Append the bpf header.  Note we append the actual header size, but
2414	 * move forward the length of the header plus padding.
2415	 */
2416	bzero(&hdr, sizeof(hdr));
2417	if (do_timestamp)
2418		bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype);
2419	hdr.bh_datalen = pktlen;
2420	hdr.bh_hdrlen = hdrlen;
2421	hdr.bh_caplen = caplen;
2422	bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr));
2423
2424	/*
2425	 * Copy the packet data into the store buffer and update its length.
2426	 */
2427#ifndef BURN_BRIDGES
2428copy:
2429#endif
2430	(*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen);
2431	d->bd_slen = curlen + totlen;
2432
2433	if (do_wakeup)
2434		bpf_wakeup(d);
2435}
2436
2437/*
2438 * Free buffers currently in use by a descriptor.
2439 * Called on close.
2440 */
2441static void
2442bpf_freed(struct bpf_d *d)
2443{
2444
2445	/*
2446	 * We don't need to lock out interrupts since this descriptor has
2447	 * been detached from its interface and it yet hasn't been marked
2448	 * free.
2449	 */
2450	bpf_free(d);
2451	if (d->bd_rfilter != NULL) {
2452		free((caddr_t)d->bd_rfilter, M_BPF);
2453#ifdef BPF_JITTER
2454		if (d->bd_bfilter != NULL)
2455			bpf_destroy_jit_filter(d->bd_bfilter);
2456#endif
2457	}
2458	if (d->bd_wfilter != NULL)
2459		free((caddr_t)d->bd_wfilter, M_BPF);
2460	mtx_destroy(&d->bd_lock);
2461}
2462
2463/*
2464 * Attach an interface to bpf.  dlt is the link layer type; hdrlen is the
2465 * fixed size of the link header (variable length headers not yet supported).
2466 */
2467void
2468bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2469{
2470
2471	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2472}
2473
2474/*
2475 * Attach an interface to bpf.  ifp is a pointer to the structure
2476 * defining the interface to be attached, dlt is the link layer type,
2477 * and hdrlen is the fixed size of the link header (variable length
2478 * headers are not yet supporrted).
2479 */
2480void
2481bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2482{
2483	struct bpf_if *bp;
2484
2485	bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
2486	if (bp == NULL)
2487		panic("bpfattach");
2488
2489	LIST_INIT(&bp->bif_dlist);
2490	LIST_INIT(&bp->bif_wlist);
2491	bp->bif_ifp = ifp;
2492	bp->bif_dlt = dlt;
2493	rw_init(&bp->bif_lock, "bpf interface lock");
2494	KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized"));
2495	*driverp = bp;
2496
2497	BPF_LOCK();
2498	LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next);
2499	BPF_UNLOCK();
2500
2501	bp->bif_hdrlen = hdrlen;
2502
2503	if (bootverbose && IS_DEFAULT_VNET(curvnet))
2504		if_printf(ifp, "bpf attached\n");
2505}
2506
2507/*
2508 * Detach bpf from an interface. This involves detaching each descriptor
2509 * associated with the interface. Notify each descriptor as it's detached
2510 * so that any sleepers wake up and get ENXIO.
2511 */
2512void
2513bpfdetach(struct ifnet *ifp)
2514{
2515	struct bpf_if	*bp, *bp_temp;
2516	struct bpf_d	*d;
2517	int ndetached;
2518
2519	ndetached = 0;
2520
2521	BPF_LOCK();
2522	/* Find all bpf_if struct's which reference ifp and detach them. */
2523	LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) {
2524		if (ifp != bp->bif_ifp)
2525			continue;
2526
2527		LIST_REMOVE(bp, bif_next);
2528		/* Add to to-be-freed list */
2529		LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next);
2530
2531		ndetached++;
2532		/*
2533		 * Delay freeing bp till interface is detached
2534		 * and all routes through this interface are removed.
2535		 * Mark bp as detached to restrict new consumers.
2536		 */
2537		BPFIF_WLOCK(bp);
2538		bp->flags |= BPFIF_FLAG_DYING;
2539		BPFIF_WUNLOCK(bp);
2540
2541		CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p",
2542		    __func__, bp->bif_dlt, bp, ifp);
2543
2544		/* Free common descriptors */
2545		while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) {
2546			bpf_detachd_locked(d);
2547			BPFD_LOCK(d);
2548			bpf_wakeup(d);
2549			BPFD_UNLOCK(d);
2550		}
2551
2552		/* Free writer-only descriptors */
2553		while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) {
2554			bpf_detachd_locked(d);
2555			BPFD_LOCK(d);
2556			bpf_wakeup(d);
2557			BPFD_UNLOCK(d);
2558		}
2559	}
2560	BPF_UNLOCK();
2561
2562#ifdef INVARIANTS
2563	if (ndetached == 0)
2564		printf("bpfdetach: %s was not attached\n", ifp->if_xname);
2565#endif
2566}
2567
2568/*
2569 * Interface departure handler.
2570 * Note departure event does not guarantee interface is going down.
2571 * Interface renaming is currently done via departure/arrival event set.
2572 *
2573 * Departure handled is called after all routes pointing to
2574 * given interface are removed and interface is in down state
2575 * restricting any packets to be sent/received. We assume it is now safe
2576 * to free data allocated by BPF.
2577 */
2578static void
2579bpf_ifdetach(void *arg __unused, struct ifnet *ifp)
2580{
2581	struct bpf_if *bp, *bp_temp;
2582	int nmatched = 0;
2583
2584	BPF_LOCK();
2585	/*
2586	 * Find matching entries in free list.
2587	 * Nothing should be found if bpfdetach() was not called.
2588	 */
2589	LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) {
2590		if (ifp != bp->bif_ifp)
2591			continue;
2592
2593		CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p",
2594		    __func__, bp, ifp);
2595
2596		LIST_REMOVE(bp, bif_next);
2597
2598		rw_destroy(&bp->bif_lock);
2599		free(bp, M_BPF);
2600
2601		nmatched++;
2602	}
2603	BPF_UNLOCK();
2604
2605	/*
2606	 * Note that we cannot zero other pointers to
2607	 * custom DLTs possibly used by given interface.
2608	 */
2609	if (nmatched != 0)
2610		ifp->if_bpf = NULL;
2611}
2612
2613/*
2614 * Get a list of available data link type of the interface.
2615 */
2616static int
2617bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
2618{
2619	struct ifnet *ifp;
2620	struct bpf_if *bp;
2621	u_int *lst;
2622	int error, n, n1;
2623
2624	BPF_LOCK_ASSERT();
2625
2626	ifp = d->bd_bif->bif_ifp;
2627again:
2628	n1 = 0;
2629	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2630		if (bp->bif_ifp == ifp)
2631			n1++;
2632	}
2633	if (bfl->bfl_list == NULL) {
2634		bfl->bfl_len = n1;
2635		return (0);
2636	}
2637	if (n1 > bfl->bfl_len)
2638		return (ENOMEM);
2639	BPF_UNLOCK();
2640	lst = malloc(n1 * sizeof(u_int), M_TEMP, M_WAITOK);
2641	n = 0;
2642	BPF_LOCK();
2643	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2644		if (bp->bif_ifp != ifp)
2645			continue;
2646		if (n >= n1) {
2647			free(lst, M_TEMP);
2648			goto again;
2649		}
2650		lst[n] = bp->bif_dlt;
2651		n++;
2652	}
2653	BPF_UNLOCK();
2654	error = copyout(lst, bfl->bfl_list, sizeof(u_int) * n);
2655	free(lst, M_TEMP);
2656	BPF_LOCK();
2657	bfl->bfl_len = n;
2658	return (error);
2659}
2660
2661/*
2662 * Set the data link type of a BPF instance.
2663 */
2664static int
2665bpf_setdlt(struct bpf_d *d, u_int dlt)
2666{
2667	int error, opromisc;
2668	struct ifnet *ifp;
2669	struct bpf_if *bp;
2670
2671	BPF_LOCK_ASSERT();
2672
2673	if (d->bd_bif->bif_dlt == dlt)
2674		return (0);
2675	ifp = d->bd_bif->bif_ifp;
2676
2677	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2678		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
2679			break;
2680	}
2681
2682	if (bp != NULL) {
2683		opromisc = d->bd_promisc;
2684		bpf_attachd(d, bp);
2685		BPFD_LOCK(d);
2686		reset_d(d);
2687		BPFD_UNLOCK(d);
2688		if (opromisc) {
2689			error = ifpromisc(bp->bif_ifp, 1);
2690			if (error)
2691				if_printf(bp->bif_ifp,
2692					"bpf_setdlt: ifpromisc failed (%d)\n",
2693					error);
2694			else
2695				d->bd_promisc = 1;
2696		}
2697	}
2698	return (bp == NULL ? EINVAL : 0);
2699}
2700
2701static void
2702bpf_drvinit(void *unused)
2703{
2704	struct cdev *dev;
2705
2706	mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
2707	LIST_INIT(&bpf_iflist);
2708	LIST_INIT(&bpf_freelist);
2709
2710	dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
2711	/* For compatibility */
2712	make_dev_alias(dev, "bpf0");
2713
2714	/* Register interface departure handler */
2715	bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
2716		    ifnet_departure_event, bpf_ifdetach, NULL,
2717		    EVENTHANDLER_PRI_ANY);
2718}
2719
2720/*
2721 * Zero out the various packet counters associated with all of the bpf
2722 * descriptors.  At some point, we will probably want to get a bit more
2723 * granular and allow the user to specify descriptors to be zeroed.
2724 */
2725static void
2726bpf_zero_counters(void)
2727{
2728	struct bpf_if *bp;
2729	struct bpf_d *bd;
2730
2731	BPF_LOCK();
2732	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2733		BPFIF_RLOCK(bp);
2734		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2735			BPFD_LOCK(bd);
2736			bd->bd_rcount = 0;
2737			bd->bd_dcount = 0;
2738			bd->bd_fcount = 0;
2739			bd->bd_wcount = 0;
2740			bd->bd_wfcount = 0;
2741			bd->bd_zcopy = 0;
2742			BPFD_UNLOCK(bd);
2743		}
2744		BPFIF_RUNLOCK(bp);
2745	}
2746	BPF_UNLOCK();
2747}
2748
2749/*
2750 * Fill filter statistics
2751 */
2752static void
2753bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
2754{
2755
2756	bzero(d, sizeof(*d));
2757	BPFD_LOCK_ASSERT(bd);
2758	d->bd_structsize = sizeof(*d);
2759	/* XXX: reading should be protected by global lock */
2760	d->bd_immediate = bd->bd_immediate;
2761	d->bd_promisc = bd->bd_promisc;
2762	d->bd_hdrcmplt = bd->bd_hdrcmplt;
2763	d->bd_direction = bd->bd_direction;
2764	d->bd_feedback = bd->bd_feedback;
2765	d->bd_async = bd->bd_async;
2766	d->bd_rcount = bd->bd_rcount;
2767	d->bd_dcount = bd->bd_dcount;
2768	d->bd_fcount = bd->bd_fcount;
2769	d->bd_sig = bd->bd_sig;
2770	d->bd_slen = bd->bd_slen;
2771	d->bd_hlen = bd->bd_hlen;
2772	d->bd_bufsize = bd->bd_bufsize;
2773	d->bd_pid = bd->bd_pid;
2774	strlcpy(d->bd_ifname,
2775	    bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
2776	d->bd_locked = bd->bd_locked;
2777	d->bd_wcount = bd->bd_wcount;
2778	d->bd_wdcount = bd->bd_wdcount;
2779	d->bd_wfcount = bd->bd_wfcount;
2780	d->bd_zcopy = bd->bd_zcopy;
2781	d->bd_bufmode = bd->bd_bufmode;
2782}
2783
2784/*
2785 * Handle `netstat -B' stats request
2786 */
2787static int
2788bpf_stats_sysctl(SYSCTL_HANDLER_ARGS)
2789{
2790	static const struct xbpf_d zerostats;
2791	struct xbpf_d *xbdbuf, *xbd, tempstats;
2792	int index, error;
2793	struct bpf_if *bp;
2794	struct bpf_d *bd;
2795
2796	/*
2797	 * XXX This is not technically correct. It is possible for non
2798	 * privileged users to open bpf devices. It would make sense
2799	 * if the users who opened the devices were able to retrieve
2800	 * the statistics for them, too.
2801	 */
2802	error = priv_check(req->td, PRIV_NET_BPF);
2803	if (error)
2804		return (error);
2805	/*
2806	 * Check to see if the user is requesting that the counters be
2807	 * zeroed out.  Explicitly check that the supplied data is zeroed,
2808	 * as we aren't allowing the user to set the counters currently.
2809	 */
2810	if (req->newptr != NULL) {
2811		if (req->newlen != sizeof(tempstats))
2812			return (EINVAL);
2813		memset(&tempstats, 0, sizeof(tempstats));
2814		error = SYSCTL_IN(req, &tempstats, sizeof(tempstats));
2815		if (error)
2816			return (error);
2817		if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0)
2818			return (EINVAL);
2819		bpf_zero_counters();
2820		return (0);
2821	}
2822	if (req->oldptr == NULL)
2823		return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd)));
2824	if (bpf_bpfd_cnt == 0)
2825		return (SYSCTL_OUT(req, 0, 0));
2826	xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK);
2827	BPF_LOCK();
2828	if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) {
2829		BPF_UNLOCK();
2830		free(xbdbuf, M_BPF);
2831		return (ENOMEM);
2832	}
2833	index = 0;
2834	LIST_FOREACH(bp, &bpf_iflist, bif_next) {
2835		BPFIF_RLOCK(bp);
2836		/* Send writers-only first */
2837		LIST_FOREACH(bd, &bp->bif_wlist, bd_next) {
2838			xbd = &xbdbuf[index++];
2839			BPFD_LOCK(bd);
2840			bpfstats_fill_xbpf(xbd, bd);
2841			BPFD_UNLOCK(bd);
2842		}
2843		LIST_FOREACH(bd, &bp->bif_dlist, bd_next) {
2844			xbd = &xbdbuf[index++];
2845			BPFD_LOCK(bd);
2846			bpfstats_fill_xbpf(xbd, bd);
2847			BPFD_UNLOCK(bd);
2848		}
2849		BPFIF_RUNLOCK(bp);
2850	}
2851	BPF_UNLOCK();
2852	error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd));
2853	free(xbdbuf, M_BPF);
2854	return (error);
2855}
2856
2857SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL);
2858
2859#else /* !DEV_BPF && !NETGRAPH_BPF */
2860/*
2861 * NOP stubs to allow bpf-using drivers to load and function.
2862 *
2863 * A 'better' implementation would allow the core bpf functionality
2864 * to be loaded at runtime.
2865 */
2866static struct bpf_if bp_null;
2867
2868void
2869bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
2870{
2871}
2872
2873void
2874bpf_mtap(struct bpf_if *bp, struct mbuf *m)
2875{
2876}
2877
2878void
2879bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m)
2880{
2881}
2882
2883void
2884bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
2885{
2886
2887	bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
2888}
2889
2890void
2891bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
2892{
2893
2894	*driverp = &bp_null;
2895}
2896
2897void
2898bpfdetach(struct ifnet *ifp)
2899{
2900}
2901
2902u_int
2903bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
2904{
2905	return -1;	/* "no filter" behaviour */
2906}
2907
2908int
2909bpf_validate(const struct bpf_insn *f, int len)
2910{
2911	return 0;		/* false */
2912}
2913
2914#endif /* !DEV_BPF && !NETGRAPH_BPF */
2915