bpf.c revision 189286
1163953Srrs/*- 2169382Srrs * Copyright (c) 1990, 1991, 1993 3163953Srrs * The Regents of the University of California. All rights reserved. 4163953Srrs * 5163953Srrs * This code is derived from the Stanford/CMU enet packet filter, 6163953Srrs * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7163953Srrs * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8163953Srrs * Berkeley Laboratory. 9163953Srrs * 10163953Srrs * Redistribution and use in source and binary forms, with or without 11163953Srrs * modification, are permitted provided that the following conditions 12163953Srrs * are met: 13163953Srrs * 1. Redistributions of source code must retain the above copyright 14163953Srrs * notice, this list of conditions and the following disclaimer. 15163953Srrs * 2. Redistributions in binary form must reproduce the above copyright 16163953Srrs * notice, this list of conditions and the following disclaimer in the 17163953Srrs * documentation and/or other materials provided with the distribution. 18163953Srrs * 4. Neither the name of the University nor the names of its contributors 19163953Srrs * may be used to endorse or promote products derived from this software 20163953Srrs * without specific prior written permission. 21163953Srrs * 22163953Srrs * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23163953Srrs * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24163953Srrs * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25163953Srrs * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26163953Srrs * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27163953Srrs * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28163953Srrs * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29163953Srrs * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30163953Srrs * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31163953Srrs * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32163953Srrs * SUCH DAMAGE. 33163953Srrs * 34163953Srrs * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35163953Srrs */ 36163953Srrs 37163953Srrs#include <sys/cdefs.h> 38167598Srrs__FBSDID("$FreeBSD: head/sys/net/bpf.c 189286 2009-03-02 19:42:01Z csjp $"); 39163953Srrs 40163953Srrs#include "opt_bpf.h" 41163953Srrs#include "opt_mac.h" 42163953Srrs#include "opt_netgraph.h" 43163953Srrs 44163953Srrs#include <sys/types.h> 45163953Srrs#include <sys/param.h> 46163953Srrs#include <sys/systm.h> 47170091Srrs#include <sys/conf.h> 48172091Srrs#include <sys/fcntl.h> 49163953Srrs#include <sys/malloc.h> 50163953Srrs#include <sys/mbuf.h> 51163953Srrs#include <sys/time.h> 52163953Srrs#include <sys/priv.h> 53163953Srrs#include <sys/proc.h> 54163953Srrs#include <sys/signalvar.h> 55163953Srrs#include <sys/filio.h> 56163953Srrs#include <sys/sockio.h> 57165220Srrs#include <sys/ttycom.h> 58165220Srrs#include <sys/uio.h> 59165220Srrs#include <sys/vimage.h> 60165220Srrs 61165220Srrs#include <sys/event.h> 62163953Srrs#include <sys/file.h> 63163953Srrs#include <sys/poll.h> 64165220Srrs#include <sys/proc.h> 65163953Srrs 66163953Srrs#include <sys/socket.h> 67163953Srrs 68165220Srrs#include <net/if.h> 69165220Srrs#include <net/bpf.h> 70165220Srrs#include <net/bpf_buffer.h> 71165220Srrs#ifdef BPF_JITTER 72165220Srrs#include <net/bpf_jitter.h> 73165220Srrs#endif 74163953Srrs#include <net/bpf_zerocopy.h> 75163953Srrs#include <net/bpfdesc.h> 76163953Srrs 77163953Srrs#include <netinet/in.h> 78163953Srrs#include <netinet/if_ether.h> 79163953Srrs#include <sys/kernel.h> 80163953Srrs#include <sys/sysctl.h> 81163953Srrs 82170181Srrs#include <net80211/ieee80211_freebsd.h> 83163953Srrs 84163953Srrs#include <security/mac/mac_framework.h> 85163953Srrs 86163953SrrsMALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 87163953Srrs 88169420Srrs#if defined(DEV_BPF) || defined(NETGRAPH_BPF) 89169420Srrs 90163953Srrs#define PRINET 26 /* interruptible */ 91163953Srrs 92163953Srrs/* 93163953Srrs * bpf_iflist is a list of BPF interface structures, each corresponding to a 94169420Srrs * specific DLT. The same network interface might have several BPF interface 95169420Srrs * structures registered by different layers in the stack (i.e., 802.11 96169420Srrs * frames, ethernet frames, etc). 97163953Srrs */ 98163953Srrsstatic LIST_HEAD(, bpf_if) bpf_iflist; 99163953Srrsstatic struct mtx bpf_mtx; /* bpf global lock */ 100163953Srrsstatic int bpf_bpfd_cnt; 101169352Srrs 102170181Srrsstatic void bpf_attachd(struct bpf_d *, struct bpf_if *); 103168299Srrsstatic void bpf_detachd(struct bpf_d *); 104168299Srrsstatic void bpf_freed(struct bpf_d *); 105163953Srrsstatic int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 106163953Srrs struct sockaddr *, int *, struct bpf_insn *); 107163953Srrsstatic int bpf_setif(struct bpf_d *, struct ifreq *); 108163953Srrsstatic void bpf_timed_out(void *); 109163953Srrsstatic __inline void 110169352Srrs bpf_wakeup(struct bpf_d *); 111170181Srrsstatic void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 112168299Srrs void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 113168299Srrs struct timeval *); 114163953Srrsstatic void reset_d(struct bpf_d *); 115163953Srrsstatic int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 116163953Srrsstatic int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 117163953Srrsstatic int bpf_setdlt(struct bpf_d *, u_int); 118163953Srrsstatic void filt_bpfdetach(struct knote *); 119163953Srrsstatic int filt_bpfread(struct knote *, long); 120169352Srrsstatic void bpf_drvinit(void *); 121170181Srrsstatic int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 122168299Srrs 123168299SrrsSYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 124163953Srrsint bpf_maxinsns = BPF_MAXINSNS; 125163953SrrsSYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 126163953Srrs &bpf_maxinsns, 0, "Maximum bpf program instructions"); 127163953Srrsstatic int bpf_zerocopy_enable = 1; 128163953SrrsSYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 129169352Srrs &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 130170181SrrsSYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW, 131171440Srrs bpf_stats_sysctl, "bpf statistics portal"); 132171440Srrs 133163953Srrsstatic d_open_t bpfopen; 134163953Srrsstatic d_read_t bpfread; 135163953Srrsstatic d_write_t bpfwrite; 136163953Srrsstatic d_ioctl_t bpfioctl; 137163953Srrsstatic d_poll_t bpfpoll; 138169352Srrsstatic d_kqfilter_t bpfkqfilter; 139170181Srrs 140168299Srrsstatic struct cdevsw bpf_cdevsw = { 141168299Srrs .d_version = D_VERSION, 142163953Srrs .d_open = bpfopen, 143163953Srrs .d_read = bpfread, 144163953Srrs .d_write = bpfwrite, 145163953Srrs .d_ioctl = bpfioctl, 146163953Srrs .d_poll = bpfpoll, 147169352Srrs .d_name = "bpf", 148170181Srrs .d_kqfilter = bpfkqfilter, 149168299Srrs}; 150168299Srrs 151163953Srrsstatic struct filterops bpfread_filtops = 152163953Srrs { 1, NULL, filt_bpfdetach, filt_bpfread }; 153163953Srrs 154163953Srrs/* 155163953Srrs * Wrapper functions for various buffering methods. If the set of buffer 156163953Srrs * modes expands, we will probably want to introduce a switch data structure 157170181Srrs * similar to protosw, et. 158168299Srrs */ 159168299Srrsstatic void 160163953Srrsbpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 161163953Srrs u_int len) 162163953Srrs{ 163169420Srrs 164170181Srrs BPFD_LOCK_ASSERT(d); 165163953Srrs 166163953Srrs switch (d->bd_bufmode) { 167163953Srrs case BPF_BUFMODE_BUFFER: 168163953Srrs return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 169163953Srrs 170171158Srrs case BPF_BUFMODE_ZBUF: 171171158Srrs d->bd_zcopy++; 172171158Srrs return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 173171158Srrs 174171158Srrs default: 175171158Srrs panic("bpf_buf_append_bytes"); 176171158Srrs } 177171158Srrs} 178171158Srrs 179171158Srrsstatic void 180171158Srrsbpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 181171158Srrs u_int len) 182171158Srrs{ 183171158Srrs 184171158Srrs BPFD_LOCK_ASSERT(d); 185171158Srrs 186171158Srrs switch (d->bd_bufmode) { 187171158Srrs case BPF_BUFMODE_BUFFER: 188171158Srrs return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 189171158Srrs 190171531Srrs case BPF_BUFMODE_ZBUF: 191171158Srrs d->bd_zcopy++; 192171158Srrs return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 193171158Srrs 194171158Srrs default: 195171158Srrs panic("bpf_buf_append_mbuf"); 196171158Srrs } 197171158Srrs} 198171158Srrs 199171158Srrs/* 200171158Srrs * This function gets called when the free buffer is re-assigned. 201171158Srrs */ 202171158Srrsstatic void 203171158Srrsbpf_buf_reclaimed(struct bpf_d *d) 204171158Srrs{ 205171158Srrs 206171158Srrs BPFD_LOCK_ASSERT(d); 207171158Srrs 208171158Srrs switch (d->bd_bufmode) { 209171158Srrs case BPF_BUFMODE_BUFFER: 210171158Srrs return; 211171158Srrs 212171158Srrs case BPF_BUFMODE_ZBUF: 213171158Srrs bpf_zerocopy_buf_reclaimed(d); 214171158Srrs return; 215171158Srrs 216171158Srrs default: 217171158Srrs panic("bpf_buf_reclaimed"); 218171158Srrs } 219171158Srrs} 220171158Srrs 221171158Srrs/* 222171158Srrs * If the buffer mechanism has a way to decide that a held buffer can be made 223171158Srrs * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 224171158Srrs * returned if the buffer can be discarded, (0) is returned if it cannot. 225171158Srrs */ 226171158Srrsstatic int 227171158Srrsbpf_canfreebuf(struct bpf_d *d) 228171158Srrs{ 229171158Srrs 230171158Srrs BPFD_LOCK_ASSERT(d); 231163953Srrs 232163953Srrs switch (d->bd_bufmode) { 233163953Srrs case BPF_BUFMODE_ZBUF: 234163953Srrs return (bpf_zerocopy_canfreebuf(d)); 235163953Srrs } 236163953Srrs return (0); 237163953Srrs} 238163953Srrs 239163953Srrs/* 240163953Srrs * Allow the buffer model to indicate that the current store buffer is 241163953Srrs * immutable, regardless of the appearance of space. Return (1) if the 242163953Srrs * buffer is writable, and (0) if not. 243163953Srrs */ 244163953Srrsstatic int 245163953Srrsbpf_canwritebuf(struct bpf_d *d) 246163953Srrs{ 247163953Srrs 248163953Srrs BPFD_LOCK_ASSERT(d); 249163953Srrs 250170744Srrs switch (d->bd_bufmode) { 251170744Srrs case BPF_BUFMODE_ZBUF: 252170744Srrs return (bpf_zerocopy_canwritebuf(d)); 253163953Srrs } 254163953Srrs return (1); 255164181Srrs} 256163953Srrs 257163953Srrs/* 258163953Srrs * Notify buffer model that an attempt to write to the store buffer has 259163953Srrs * resulted in a dropped packet, in which case the buffer may be considered 260163953Srrs * full. 261163953Srrs */ 262163953Srrsstatic void 263163953Srrsbpf_buffull(struct bpf_d *d) 264163953Srrs{ 265163953Srrs 266163953Srrs BPFD_LOCK_ASSERT(d); 267163953Srrs 268163953Srrs switch (d->bd_bufmode) { 269163953Srrs case BPF_BUFMODE_ZBUF: 270163953Srrs bpf_zerocopy_buffull(d); 271163953Srrs break; 272163953Srrs } 273163953Srrs} 274163953Srrs 275172090Srrs/* 276163953Srrs * Notify the buffer model that a buffer has moved into the hold position. 277163953Srrs */ 278163953Srrsvoid 279163953Srrsbpf_bufheld(struct bpf_d *d) 280163953Srrs{ 281163953Srrs 282163953Srrs BPFD_LOCK_ASSERT(d); 283169420Srrs 284163979Sru switch (d->bd_bufmode) { 285163953Srrs case BPF_BUFMODE_ZBUF: 286163953Srrs bpf_zerocopy_bufheld(d); 287169655Srrs break; 288163953Srrs } 289163953Srrs} 290163953Srrs 291163953Srrsstatic void 292163953Srrsbpf_free(struct bpf_d *d) 293163953Srrs{ 294163953Srrs 295164181Srrs switch (d->bd_bufmode) { 296163953Srrs case BPF_BUFMODE_BUFFER: 297163953Srrs return (bpf_buffer_free(d)); 298163953Srrs 299170744Srrs case BPF_BUFMODE_ZBUF: 300170744Srrs return (bpf_zerocopy_free(d)); 301170744Srrs 302163953Srrs default: 303163953Srrs panic("bpf_buf_free"); 304163953Srrs } 305163953Srrs} 306163953Srrs 307163953Srrsstatic int 308163953Srrsbpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 309163953Srrs{ 310170091Srrs 311163953Srrs if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 312163953Srrs return (EOPNOTSUPP); 313164181Srrs return (bpf_buffer_uiomove(d, buf, len, uio)); 314164181Srrs} 315164181Srrs 316164181Srrsstatic int 317164181Srrsbpf_ioctl_sblen(struct bpf_d *d, u_int *i) 318164181Srrs{ 319164181Srrs 320171158Srrs if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 321164181Srrs return (EOPNOTSUPP); 322164181Srrs return (bpf_buffer_ioctl_sblen(d, i)); 323164181Srrs} 324164181Srrs 325164181Srrsstatic int 326164181Srrsbpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 327170091Srrs{ 328163953Srrs 329164181Srrs if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 330164181Srrs return (EOPNOTSUPP); 331164181Srrs return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 332164181Srrs} 333163953Srrs 334170091Srrsstatic int 335163953Srrsbpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 336163953Srrs{ 337169420Srrs 338163953Srrs if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 339163953Srrs return (EOPNOTSUPP); 340163953Srrs return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 341163953Srrs} 342163953Srrs 343163953Srrsstatic int 344163953Srrsbpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 345163953Srrs{ 346163953Srrs 347163953Srrs if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 348163953Srrs return (EOPNOTSUPP); 349168943Srrs return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 350163953Srrs} 351163953Srrs 352163953Srrs/* 353163953Srrs * General BPF functions. 354163953Srrs */ 355163953Srrsstatic int 356163953Srrsbpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 357163953Srrs struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 358163953Srrs{ 359163953Srrs const struct ieee80211_bpf_params *p; 360169655Srrs struct ether_header *eh; 361163953Srrs struct mbuf *m; 362163953Srrs int error; 363163953Srrs int len; 364163953Srrs int hlen; 365163953Srrs int slen; 366163953Srrs 367163953Srrs /* 368163953Srrs * Build a sockaddr based on the data link layer type. 369163953Srrs * We do this at this level because the ethernet header 370170181Srrs * is copied directly into the data field of the sockaddr. 371163953Srrs * In the case of SLIP, there is no header and the packet 372163953Srrs * is forwarded as is. 373163953Srrs * Also, we are careful to leave room at the front of the mbuf 374163953Srrs * for the link level header. 375163953Srrs */ 376163953Srrs switch (linktype) { 377163953Srrs 378163953Srrs case DLT_SLIP: 379163953Srrs sockp->sa_family = AF_INET; 380163953Srrs hlen = 0; 381163953Srrs break; 382163953Srrs 383163953Srrs case DLT_EN10MB: 384163953Srrs sockp->sa_family = AF_UNSPEC; 385163953Srrs /* XXX Would MAXLINKHDR be better? */ 386172090Srrs hlen = ETHER_HDR_LEN; 387170056Srrs break; 388163953Srrs 389163953Srrs case DLT_FDDI: 390163953Srrs sockp->sa_family = AF_IMPLINK; 391163953Srrs hlen = 0; 392163953Srrs break; 393163953Srrs 394163953Srrs case DLT_RAW: 395163953Srrs sockp->sa_family = AF_UNSPEC; 396163953Srrs hlen = 0; 397163953Srrs break; 398163953Srrs 399163953Srrs case DLT_NULL: 400163953Srrs /* 401163953Srrs * null interface types require a 4 byte pseudo header which 402169420Srrs * corresponds to the address family of the packet. 403169420Srrs */ 404169420Srrs sockp->sa_family = AF_UNSPEC; 405163953Srrs hlen = 4; 406170181Srrs break; 407168299Srrs 408163953Srrs case DLT_ATM_RFC1483: 409163953Srrs /* 410171477Srrs * en atm driver requires 4-byte atm pseudo header. 411171477Srrs * though it isn't standard, vpi:vci needs to be 412171477Srrs * specified anyway. 413171477Srrs */ 414171477Srrs sockp->sa_family = AF_UNSPEC; 415171477Srrs hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 416171477Srrs break; 417171477Srrs 418171477Srrs case DLT_PPP: 419171477Srrs sockp->sa_family = AF_UNSPEC; 420171477Srrs hlen = 4; /* This should match PPP_HDRLEN */ 421163953Srrs break; 422163953Srrs 423163953Srrs case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 424163953Srrs sockp->sa_family = AF_IEEE80211; 425163953Srrs hlen = 0; 426163953Srrs break; 427163953Srrs 428163953Srrs case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 429171943Srrs sockp->sa_family = AF_IEEE80211; 430171943Srrs sockp->sa_len = 12; /* XXX != 0 */ 431171943Srrs hlen = sizeof(struct ieee80211_bpf_params); 432171943Srrs break; 433171943Srrs 434171943Srrs default: 435171943Srrs return (EIO); 436163953Srrs } 437163953Srrs 438163953Srrs len = uio->uio_resid; 439163953Srrs 440163953Srrs if (len - hlen > ifp->if_mtu) 441163953Srrs return (EMSGSIZE); 442163953Srrs 443163953Srrs if ((unsigned)len > MJUM16BYTES) 444163953Srrs return (EIO); 445163953Srrs 446165220Srrs if (len <= MHLEN) 447163953Srrs MGETHDR(m, M_WAIT, MT_DATA); 448165220Srrs else if (len <= MCLBYTES) 449171477Srrs m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 450165220Srrs else 451163953Srrs m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 452163953Srrs#if (MJUMPAGESIZE > MCLBYTES) 453163953Srrs len <= MJUMPAGESIZE ? MJUMPAGESIZE : 454163953Srrs#endif 455163953Srrs (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 456163953Srrs m->m_pkthdr.len = m->m_len = len; 457163953Srrs m->m_pkthdr.rcvif = NULL; 458163953Srrs *mp = m; 459163953Srrs 460163953Srrs if (m->m_len < hlen) { 461163953Srrs error = EPERM; 462163953Srrs goto bad; 463163953Srrs } 464163953Srrs 465163953Srrs error = uiomove(mtod(m, u_char *), len, uio); 466163953Srrs if (error) 467163953Srrs goto bad; 468163953Srrs 469165647Srrs slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 470163953Srrs if (slen == 0) { 471163953Srrs error = EPERM; 472163953Srrs goto bad; 473163953Srrs } 474163953Srrs 475163953Srrs /* Check for multicast destination */ 476163953Srrs switch (linktype) { 477163953Srrs case DLT_EN10MB: 478163953Srrs eh = mtod(m, struct ether_header *); 479163953Srrs if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 480163953Srrs if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 481170181Srrs ETHER_ADDR_LEN) == 0) 482168299Srrs m->m_flags |= M_BCAST; 483163953Srrs else 484163953Srrs m->m_flags |= M_MCAST; 485163953Srrs } 486163953Srrs break; 487163953Srrs } 488163953Srrs 489163953Srrs /* 490163953Srrs * Make room for link header, and copy it to sockaddr 491163953Srrs */ 492163953Srrs if (hlen != 0) { 493163953Srrs if (sockp->sa_family == AF_IEEE80211) { 494163953Srrs /* 495163953Srrs * Collect true length from the parameter header 496163953Srrs * NB: sockp is known to be zero'd so if we do a 497163953Srrs * short copy unspecified parameters will be 498163953Srrs * zero. 499163953Srrs * NB: packet may not be aligned after stripping 500163953Srrs * bpf params 501163953Srrs * XXX check ibp_vers 502163953Srrs */ 503163953Srrs p = mtod(m, const struct ieee80211_bpf_params *); 504163953Srrs hlen = p->ibp_len; 505163953Srrs if (hlen > sizeof(sockp->sa_data)) { 506163953Srrs error = EINVAL; 507163953Srrs goto bad; 508163953Srrs } 509163953Srrs } 510163953Srrs bcopy(m->m_data, sockp->sa_data, hlen); 511163953Srrs } 512163953Srrs *hdrlen = hlen; 513163953Srrs 514163953Srrs return (0); 515163953Srrsbad: 516163953Srrs m_freem(m); 517163953Srrs return (error); 518163953Srrs} 519163953Srrs 520163953Srrs/* 521163953Srrs * Attach file to the bpf interface, i.e. make d listen on bp. 522163953Srrs */ 523163953Srrsstatic void 524163953Srrsbpf_attachd(struct bpf_d *d, struct bpf_if *bp) 525163953Srrs{ 526169420Srrs /* 527163953Srrs * Point d at bp, and add d to the interface's list of listeners. 528163953Srrs * Finally, point the driver's bpf cookie at the interface so 529163953Srrs * it will divert packets to bpf. 530163953Srrs */ 531163953Srrs BPFIF_LOCK(bp); 532163953Srrs d->bd_bif = bp; 533163953Srrs LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 534163953Srrs 535163953Srrs bpf_bpfd_cnt++; 536163953Srrs BPFIF_UNLOCK(bp); 537167598Srrs} 538167598Srrs 539167598Srrs/* 540167598Srrs * Detach a file from its interface. 541167598Srrs */ 542167598Srrsstatic void 543167598Srrsbpf_detachd(struct bpf_d *d) 544167598Srrs{ 545167598Srrs int error; 546167598Srrs struct bpf_if *bp; 547167598Srrs struct ifnet *ifp; 548167598Srrs 549167598Srrs bp = d->bd_bif; 550167598Srrs BPFIF_LOCK(bp); 551167598Srrs BPFD_LOCK(d); 552172091Srrs ifp = d->bd_bif->bif_ifp; 553172091Srrs 554172091Srrs /* 555172091Srrs * Remove d from the interface's descriptor list. 556172091Srrs */ 557172091Srrs LIST_REMOVE(d, bd_next); 558172091Srrs 559172091Srrs bpf_bpfd_cnt--; 560172091Srrs d->bd_bif = NULL; 561172091Srrs BPFD_UNLOCK(d); 562172091Srrs BPFIF_UNLOCK(bp); 563172091Srrs 564172091Srrs /* 565172091Srrs * Check if this descriptor had requested promiscuous mode. 566172091Srrs * If so, turn it off. 567172091Srrs */ 568172091Srrs if (d->bd_promisc) { 569172091Srrs d->bd_promisc = 0; 570172091Srrs CURVNET_SET(ifp->if_vnet); 571172091Srrs error = ifpromisc(ifp, 0); 572172091Srrs CURVNET_RESTORE(); 573172091Srrs if (error != 0 && error != ENXIO) { 574167598Srrs /* 575163953Srrs * ENXIO can happen if a pccard is unplugged 576172090Srrs * Something is really wrong if we were able to put 577163953Srrs * the driver into promiscuous mode, but can't 578163953Srrs * take it out. 579163953Srrs */ 580163953Srrs if_printf(bp->bif_ifp, 581163953Srrs "bpf_detach: ifpromisc failed (%d)\n", error); 582163953Srrs } 583163953Srrs } 584163953Srrs} 585163953Srrs 586172090Srrs/* 587163953Srrs * Close the descriptor by detaching it from its interface, 588163953Srrs * deallocating its buffers, and marking it free. 589169420Srrs */ 590163953Srrsstatic void 591163953Srrsbpf_dtor(void *data) 592171440Srrs{ 593171440Srrs struct bpf_d *d = data; 594171440Srrs 595171440Srrs BPFD_LOCK(d); 596171440Srrs if (d->bd_state == BPF_WAITING) 597171440Srrs callout_stop(&d->bd_callout); 598171440Srrs d->bd_state = BPF_IDLE; 599171477Srrs BPFD_UNLOCK(d); 600171440Srrs funsetown(&d->bd_sigio); 601171440Srrs mtx_lock(&bpf_mtx); 602171440Srrs if (d->bd_bif) 603171440Srrs bpf_detachd(d); 604171440Srrs mtx_unlock(&bpf_mtx); 605171440Srrs selwakeuppri(&d->bd_sel, PRINET); 606171440Srrs#ifdef MAC 607171440Srrs mac_bpfdesc_destroy(d); 608171440Srrs#endif /* MAC */ 609171440Srrs knlist_destroy(&d->bd_sel.si_note); 610171440Srrs bpf_freed(d); 611163953Srrs free(d, M_BPF); 612171477Srrs} 613163953Srrs 614163953Srrs/* 615163953Srrs * Open ethernet device. Returns ENXIO for illegal minor device number, 616163953Srrs * EBUSY if file is open by another process. 617163953Srrs */ 618163953Srrs/* ARGSUSED */ 619172090Srrsstatic int 620172090Srrsbpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 621172090Srrs{ 622172090Srrs struct bpf_d *d; 623172090Srrs int error; 624169420Srrs 625163953Srrs d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 626163953Srrs error = devfs_set_cdevpriv(d, bpf_dtor); 627163953Srrs if (error != 0) { 628163953Srrs free(d, M_BPF); 629171440Srrs return (error); 630163953Srrs } 631172090Srrs 632163953Srrs /* 633163953Srrs * For historical reasons, perform a one-time initialization call to 634163953Srrs * the buffer routines, even though we're not yet committed to a 635163953Srrs * particular buffer method. 636163953Srrs */ 637163953Srrs bpf_buffer_init(d); 638168859Srrs d->bd_bufmode = BPF_BUFMODE_BUFFER; 639168859Srrs d->bd_sig = SIGIO; 640168859Srrs d->bd_direction = BPF_D_INOUT; 641172090Srrs d->bd_pid = td->td_proc->p_pid; 642172090Srrs#ifdef MAC 643172090Srrs mac_bpfdesc_init(d); 644172090Srrs mac_bpfdesc_create(td->td_ucred, d); 645172090Srrs#endif 646172090Srrs mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 647172090Srrs callout_init(&d->bd_callout, CALLOUT_MPSAFE); 648172090Srrs knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 649171990Srrs 650171943Srrs return (0); 651171943Srrs} 652172090Srrs 653172090Srrs/* 654172090Srrs * bpfread - read next chunk of packets from buffers 655169420Srrs */ 656163953Srrsstatic int 657163953Srrsbpfread(struct cdev *dev, struct uio *uio, int ioflag) 658163953Srrs{ 659163953Srrs struct bpf_d *d; 660163953Srrs int timed_out; 661163953Srrs int error; 662163953Srrs 663163953Srrs error = devfs_get_cdevpriv((void **)&d); 664163953Srrs if (error != 0) 665172090Srrs return (error); 666172090Srrs 667172090Srrs /* 668172090Srrs * Restrict application to use a buffer the same size as 669172090Srrs * as kernel buffers. 670169420Srrs */ 671169420Srrs if (uio->uio_resid != d->bd_bufsize) 672163953Srrs return (EINVAL); 673163953Srrs 674165220Srrs BPFD_LOCK(d); 675165220Srrs d->bd_pid = curthread->td_proc->p_pid; 676165220Srrs if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 677163953Srrs BPFD_UNLOCK(d); 678163953Srrs return (EOPNOTSUPP); 679163953Srrs } 680163953Srrs if (d->bd_state == BPF_WAITING) 681163953Srrs callout_stop(&d->bd_callout); 682163953Srrs timed_out = (d->bd_state == BPF_TIMED_OUT); 683163953Srrs d->bd_state = BPF_IDLE; 684163953Srrs /* 685165220Srrs * If the hold buffer is empty, then do a timed sleep, which 686163953Srrs * ends when the timeout expires or when enough packets 687163953Srrs * have arrived to fill the store buffer. 688163953Srrs */ 689163953Srrs while (d->bd_hbuf == NULL) { 690165220Srrs if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 691165220Srrs /* 692165220Srrs * A packet(s) either arrived since the previous 693163953Srrs * read or arrived while we were asleep. 694172090Srrs * Rotate the buffers and return what's here. 695172090Srrs */ 696172090Srrs ROTATE_BUFFERS(d); 697172090Srrs break; 698172090Srrs } 699172090Srrs 700172090Srrs /* 701172090Srrs * No data is available, check to see if the bpf device 702172090Srrs * is still pointed at a real interface. If not, return 703172090Srrs * ENXIO so that the userland process knows to rebind 704172090Srrs * it before using it again. 705172090Srrs */ 706172090Srrs if (d->bd_bif == NULL) { 707163996Srrs BPFD_UNLOCK(d); 708172090Srrs return (ENXIO); 709172090Srrs } 710172090Srrs 711163953Srrs if (ioflag & O_NONBLOCK) { 712163953Srrs BPFD_UNLOCK(d); 713163953Srrs return (EWOULDBLOCK); 714163953Srrs } 715170056Srrs error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 716163953Srrs "bpf", d->bd_rtout); 717171943Srrs if (error == EINTR || error == ERESTART) { 718163953Srrs BPFD_UNLOCK(d); 719163953Srrs return (error); 720163953Srrs } 721163953Srrs if (error == EWOULDBLOCK) { 722172090Srrs /* 723163953Srrs * On a timeout, return what's in the buffer, 724163953Srrs * which may be nothing. If there is something 725169378Srrs * in the store buffer, we can rotate the buffers. 726163953Srrs */ 727163953Srrs if (d->bd_hbuf) 728163953Srrs /* 729163953Srrs * We filled up the buffer in between 730163953Srrs * getting the timeout and arriving 731163953Srrs * here, so we don't need to rotate. 732163953Srrs */ 733171440Srrs break; 734163953Srrs 735171158Srrs if (d->bd_slen == 0) { 736171158Srrs BPFD_UNLOCK(d); 737163953Srrs return (0); 738163953Srrs } 739163953Srrs ROTATE_BUFFERS(d); 740163953Srrs break; 741163953Srrs } 742163953Srrs } 743163953Srrs /* 744163953Srrs * At this point, we know we have something in the hold slot. 745163953Srrs */ 746163953Srrs BPFD_UNLOCK(d); 747163953Srrs 748166675Srrs /* 749166675Srrs * Move data from hold buffer into user space. 750163953Srrs * We know the entire buffer is transferred since 751163953Srrs * we checked above that the read buffer is bpf_bufsize bytes. 752171943Srrs * 753163953Srrs * XXXRW: More synchronization needed here: what if a second thread 754171990Srrs * issues a read on the same fd at the same time? Don't want this 755171990Srrs * getting invalidated. 756163953Srrs */ 757163953Srrs error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 758163953Srrs 759163953Srrs BPFD_LOCK(d); 760163953Srrs d->bd_fbuf = d->bd_hbuf; 761163953Srrs d->bd_hbuf = NULL; 762163953Srrs d->bd_hlen = 0; 763163953Srrs bpf_buf_reclaimed(d); 764163953Srrs BPFD_UNLOCK(d); 765163953Srrs 766163953Srrs return (error); 767163953Srrs} 768172090Srrs 769172090Srrs/* 770172090Srrs * If there are processes sleeping on this descriptor, wake them up. 771172090Srrs */ 772172090Srrsstatic __inline void 773169420Srrsbpf_wakeup(struct bpf_d *d) 774169420Srrs{ 775163953Srrs 776163953Srrs BPFD_LOCK_ASSERT(d); 777163953Srrs if (d->bd_state == BPF_WAITING) { 778163953Srrs callout_stop(&d->bd_callout); 779163953Srrs d->bd_state = BPF_IDLE; 780163953Srrs } 781163953Srrs wakeup(d); 782163953Srrs if (d->bd_async && d->bd_sig && d->bd_sigio) 783163953Srrs pgsigio(&d->bd_sigio, d->bd_sig, 0); 784163953Srrs 785163953Srrs selwakeuppri(&d->bd_sel, PRINET); 786165220Srrs KNOTE_LOCKED(&d->bd_sel.si_note, 0); 787163953Srrs} 788163953Srrs 789163953Srrsstatic void 790163953Srrsbpf_timed_out(void *arg) 791165220Srrs{ 792165220Srrs struct bpf_d *d = (struct bpf_d *)arg; 793165220Srrs 794163953Srrs BPFD_LOCK(d); 795172090Srrs if (d->bd_state == BPF_WAITING) { 796172090Srrs d->bd_state = BPF_TIMED_OUT; 797172090Srrs if (d->bd_slen != 0) 798172090Srrs bpf_wakeup(d); 799172090Srrs } 800172090Srrs BPFD_UNLOCK(d); 801172090Srrs} 802172090Srrs 803172090Srrsstatic int 804172090Srrsbpf_ready(struct bpf_d *d) 805172090Srrs{ 806172090Srrs 807163996Srrs BPFD_LOCK_ASSERT(d); 808172090Srrs 809172090Srrs if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 810172090Srrs return (1); 811163953Srrs if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 812163953Srrs d->bd_slen != 0) 813163953Srrs return (1); 814163953Srrs return (0); 815163953Srrs} 816172090Srrs 817163953Srrsstatic int 818163953Srrsbpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 819171440Srrs{ 820163953Srrs struct bpf_d *d; 821163953Srrs struct ifnet *ifp; 822163953Srrs struct mbuf *m, *mc; 823163953Srrs struct sockaddr dst; 824172090Srrs int error, hlen; 825163953Srrs 826163953Srrs error = devfs_get_cdevpriv((void **)&d); 827163953Srrs if (error != 0) 828163953Srrs return (error); 829163953Srrs 830163953Srrs d->bd_pid = curthread->td_proc->p_pid; 831163953Srrs d->bd_wcount++; 832163953Srrs if (d->bd_bif == NULL) { 833172090Srrs d->bd_wdcount++; 834172090Srrs return (ENXIO); 835172090Srrs } 836172090Srrs 837172090Srrs ifp = d->bd_bif->bif_ifp; 838172090Srrs 839172090Srrs if ((ifp->if_flags & IFF_UP) == 0) { 840171943Srrs d->bd_wdcount++; 841171440Srrs return (ENETDOWN); 842172090Srrs } 843172090Srrs 844172090Srrs if (uio->uio_resid == 0) { 845163953Srrs d->bd_wdcount++; 846163953Srrs return (0); 847163953Srrs } 848163953Srrs 849163953Srrs bzero(&dst, sizeof(dst)); 850163953Srrs m = NULL; 851163953Srrs hlen = 0; 852163953Srrs error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 853163953Srrs &m, &dst, &hlen, d->bd_wfilter); 854163953Srrs if (error) { 855163953Srrs d->bd_wdcount++; 856163953Srrs return (error); 857163953Srrs } 858163953Srrs d->bd_wfcount++; 859163953Srrs if (d->bd_hdrcmplt) 860163953Srrs dst.sa_family = pseudo_AF_HDRCMPLT; 861163953Srrs 862163953Srrs if (d->bd_feedback) { 863163953Srrs mc = m_dup(m, M_DONTWAIT); 864163953Srrs if (mc != NULL) 865163953Srrs mc->m_pkthdr.rcvif = ifp; 866163953Srrs /* Set M_PROMISC for outgoing packets to be discarded. */ 867163953Srrs if (d->bd_direction == BPF_D_INOUT) 868169420Srrs m->m_flags |= M_PROMISC; 869169420Srrs } else 870169420Srrs mc = NULL; 871163953Srrs 872163953Srrs m->m_pkthdr.len -= hlen; 873163953Srrs m->m_len -= hlen; 874163953Srrs m->m_data += hlen; /* XXX */ 875163953Srrs 876163953Srrs#ifdef MAC 877163953Srrs BPFD_LOCK(d); 878163953Srrs CURVNET_SET(ifp->if_vnet); 879163953Srrs mac_bpfdesc_create_mbuf(d, m); 880163953Srrs CURVNET_RESTORE(); 881163953Srrs if (mc != NULL) 882163953Srrs mac_bpfdesc_create_mbuf(d, mc); 883163953Srrs BPFD_UNLOCK(d); 884163953Srrs#endif 885163953Srrs 886163953Srrs error = (*ifp->if_output)(ifp, m, &dst, NULL); 887163953Srrs if (error) 888163953Srrs d->bd_wdcount++; 889163953Srrs 890163953Srrs if (mc != NULL) { 891163953Srrs if (error == 0) 892163953Srrs (*ifp->if_input)(ifp, mc); 893163953Srrs else 894163953Srrs m_freem(mc); 895163953Srrs } 896163953Srrs 897163953Srrs return (error); 898163953Srrs} 899163953Srrs 900163953Srrs/* 901163953Srrs * Reset a descriptor by flushing its packet buffer and clearing the 902163953Srrs * receive and drop counts. 903163953Srrs */ 904163953Srrsstatic void 905163953Srrsreset_d(struct bpf_d *d) 906169420Srrs{ 907169420Srrs 908169420Srrs mtx_assert(&d->bd_mtx, MA_OWNED); 909163953Srrs if (d->bd_hbuf) { 910163953Srrs /* Free the hold buffer. */ 911163953Srrs d->bd_fbuf = d->bd_hbuf; 912169420Srrs d->bd_hbuf = NULL; 913169420Srrs bpf_buf_reclaimed(d); 914169420Srrs } 915163953Srrs d->bd_slen = 0; 916163953Srrs d->bd_hlen = 0; 917163953Srrs d->bd_rcount = 0; 918163953Srrs d->bd_dcount = 0; 919163953Srrs d->bd_fcount = 0; 920163953Srrs d->bd_wcount = 0; 921163953Srrs d->bd_wfcount = 0; 922163953Srrs d->bd_wdcount = 0; 923163953Srrs d->bd_zcopy = 0; 924163953Srrs} 925163953Srrs 926163953Srrs/* 927163953Srrs * FIONREAD Check for read packet available. 928163953Srrs * SIOCGIFADDR Get interface address - convenient hook to driver. 929163953Srrs * BIOCGBLEN Get buffer len [for read()]. 930172090Srrs * BIOCSETF Set read filter. 931172090Srrs * BIOCSETFNR Set read filter without resetting descriptor. 932172090Srrs * BIOCSETWF Set write filter. 933172090Srrs * BIOCFLUSH Flush read packet buffer. 934172090Srrs * BIOCPROMISC Put interface into promiscuous mode. 935163953Srrs * BIOCGDLT Get link layer type. 936163953Srrs * BIOCGETIF Get interface name. 937163953Srrs * BIOCSETIF Set interface. 938163953Srrs * BIOCSRTIMEOUT Set read timeout. 939163953Srrs * BIOCGRTIMEOUT Get read timeout. 940163953Srrs * BIOCGSTATS Get packet stats. 941163953Srrs * BIOCIMMEDIATE Set immediate mode. 942163953Srrs * BIOCVERSION Get filter language version. 943163953Srrs * BIOCGHDRCMPLT Get "header already complete" flag 944163953Srrs * BIOCSHDRCMPLT Set "header already complete" flag 945163953Srrs * BIOCGDIRECTION Get packet direction flag 946169420Srrs * BIOCSDIRECTION Set packet direction flag 947169420Srrs * BIOCLOCK Set "locked" flag 948163953Srrs * BIOCFEEDBACK Set packet feedback mode. 949163953Srrs * BIOCSETZBUF Set current zero-copy buffer locations. 950163953Srrs * BIOCGETZMAX Get maximum zero-copy buffer size. 951163953Srrs * BIOCROTZBUF Force rotation of zero-copy buffer 952163953Srrs * BIOCSETBUFMODE Set buffer mode. 953163953Srrs * BIOCGETBUFMODE Get current buffer mode. 954163953Srrs */ 955169420Srrs/* ARGSUSED */ 956169420Srrsstatic int 957163953Srrsbpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 958163953Srrs struct thread *td) 959163953Srrs{ 960163953Srrs struct bpf_d *d; 961163953Srrs int error; 962163953Srrs 963163953Srrs error = devfs_get_cdevpriv((void **)&d); 964163953Srrs if (error != 0) 965163953Srrs return (error); 966163953Srrs 967163953Srrs /* 968163953Srrs * Refresh PID associated with this descriptor. 969163953Srrs */ 970163953Srrs BPFD_LOCK(d); 971163953Srrs d->bd_pid = td->td_proc->p_pid; 972172090Srrs if (d->bd_state == BPF_WAITING) 973163953Srrs callout_stop(&d->bd_callout); 974172090Srrs d->bd_state = BPF_IDLE; 975172090Srrs BPFD_UNLOCK(d); 976172090Srrs 977172090Srrs if (d->bd_locked == 1) { 978172090Srrs switch (cmd) { 979172090Srrs case BIOCGBLEN: 980172090Srrs case BIOCFLUSH: 981172090Srrs case BIOCGDLT: 982171943Srrs case BIOCGDLTLIST: 983171943Srrs case BIOCGETIF: 984172090Srrs case BIOCGRTIMEOUT: 985172090Srrs case BIOCGSTATS: 986172090Srrs case BIOCVERSION: 987163953Srrs case BIOCGRSIG: 988163953Srrs case BIOCGHDRCMPLT: 989163953Srrs case BIOCFEEDBACK: 990163953Srrs case FIONREAD: 991163953Srrs case BIOCLOCK: 992163953Srrs case BIOCSRTIMEOUT: 993163953Srrs case BIOCIMMEDIATE: 994172090Srrs case TIOCGPGRP: 995163953Srrs case BIOCROTZBUF: 996163953Srrs break; 997163953Srrs default: 998163953Srrs return (EPERM); 999163953Srrs } 1000163953Srrs } 1001163953Srrs CURVNET_SET(TD_TO_VNET(td)); 1002163953Srrs switch (cmd) { 1003163953Srrs 1004163953Srrs default: 1005163953Srrs error = EINVAL; 1006163953Srrs break; 1007163953Srrs 1008163953Srrs /* 1009163953Srrs * Check for read packet available. 1010163953Srrs */ 1011163953Srrs case FIONREAD: 1012163953Srrs { 1013163953Srrs int n; 1014163953Srrs 1015163953Srrs BPFD_LOCK(d); 1016163953Srrs n = d->bd_slen; 1017163953Srrs if (d->bd_hbuf) 1018163953Srrs n += d->bd_hlen; 1019163953Srrs BPFD_UNLOCK(d); 1020163953Srrs 1021163953Srrs *(int *)addr = n; 1022163953Srrs break; 1023163953Srrs } 1024163953Srrs 1025163953Srrs case SIOCGIFADDR: 1026163953Srrs { 1027163953Srrs struct ifnet *ifp; 1028163953Srrs 1029166086Srrs if (d->bd_bif == NULL) 1030163953Srrs error = EINVAL; 1031169420Srrs else { 1032169420Srrs ifp = d->bd_bif->bif_ifp; 1033163953Srrs error = (*ifp->if_ioctl)(ifp, cmd, addr); 1034163953Srrs } 1035163953Srrs break; 1036163953Srrs } 1037163953Srrs 1038166086Srrs /* 1039163953Srrs * Get buffer len [for read()]. 1040163953Srrs */ 1041163953Srrs case BIOCGBLEN: 1042163953Srrs *(u_int *)addr = d->bd_bufsize; 1043163953Srrs break; 1044169420Srrs 1045169420Srrs /* 1046163953Srrs * Set buffer length. 1047163953Srrs */ 1048163953Srrs case BIOCSBLEN: 1049163953Srrs error = bpf_ioctl_sblen(d, (u_int *)addr); 1050163953Srrs break; 1051163953Srrs 1052163953Srrs /* 1053163953Srrs * Set link layer read filter. 1054163953Srrs */ 1055163953Srrs case BIOCSETF: 1056169352Srrs case BIOCSETFNR: 1057169352Srrs case BIOCSETWF: 1058170181Srrs error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1059163953Srrs break; 1060163953Srrs 1061163953Srrs /* 1062163953Srrs * Flush read packet buffer. 1063169420Srrs */ 1064169420Srrs case BIOCFLUSH: 1065169420Srrs BPFD_LOCK(d); 1066163953Srrs reset_d(d); 1067169420Srrs BPFD_UNLOCK(d); 1068169420Srrs break; 1069163953Srrs 1070163953Srrs /* 1071163953Srrs * Put interface into promiscuous mode. 1072163953Srrs */ 1073163953Srrs case BIOCPROMISC: 1074163953Srrs if (d->bd_bif == NULL) { 1075170181Srrs /* 1076168299Srrs * No interface attached yet. 1077163953Srrs */ 1078163953Srrs error = EINVAL; 1079163953Srrs break; 1080163953Srrs } 1081163953Srrs if (d->bd_promisc == 0) { 1082163953Srrs error = ifpromisc(d->bd_bif->bif_ifp, 1); 1083163953Srrs if (error == 0) 1084163953Srrs d->bd_promisc = 1; 1085170181Srrs } 1086168299Srrs break; 1087163953Srrs 1088163953Srrs /* 1089163953Srrs * Get current data link type. 1090163953Srrs */ 1091163953Srrs case BIOCGDLT: 1092163953Srrs if (d->bd_bif == NULL) 1093170181Srrs error = EINVAL; 1094168299Srrs else 1095163953Srrs *(u_int *)addr = d->bd_bif->bif_dlt; 1096163953Srrs break; 1097163953Srrs 1098163953Srrs /* 1099163953Srrs * Get a list of supported data link types. 1100163953Srrs */ 1101170181Srrs case BIOCGDLTLIST: 1102168299Srrs if (d->bd_bif == NULL) 1103163953Srrs error = EINVAL; 1104163953Srrs else 1105163953Srrs error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1106163953Srrs break; 1107163953Srrs 1108163953Srrs /* 1109170181Srrs * Set data link type. 1110168299Srrs */ 1111163953Srrs case BIOCSDLT: 1112163953Srrs if (d->bd_bif == NULL) 1113163953Srrs error = EINVAL; 1114171943Srrs else 1115163953Srrs error = bpf_setdlt(d, *(u_int *)addr); 1116163953Srrs break; 1117163953Srrs 1118163953Srrs /* 1119163953Srrs * Get interface name. 1120163953Srrs */ 1121163953Srrs case BIOCGETIF: 1122163953Srrs if (d->bd_bif == NULL) 1123163953Srrs error = EINVAL; 1124163953Srrs else { 1125163953Srrs struct ifnet *const ifp = d->bd_bif->bif_ifp; 1126163953Srrs struct ifreq *const ifr = (struct ifreq *)addr; 1127163953Srrs 1128163953Srrs strlcpy(ifr->ifr_name, ifp->if_xname, 1129172090Srrs sizeof(ifr->ifr_name)); 1130163953Srrs } 1131169352Srrs break; 1132170181Srrs 1133163953Srrs /* 1134163953Srrs * Set interface. 1135163953Srrs */ 1136163953Srrs case BIOCSETIF: 1137169420Srrs error = bpf_setif(d, (struct ifreq *)addr); 1138171943Srrs break; 1139163953Srrs 1140163953Srrs /* 1141171943Srrs * Set read timeout. 1142171943Srrs */ 1143171943Srrs case BIOCSRTIMEOUT: 1144171943Srrs { 1145171943Srrs struct timeval *tv = (struct timeval *)addr; 1146171943Srrs 1147171943Srrs /* 1148163953Srrs * Subtract 1 tick from tvtohz() since this isn't 1149169378Srrs * a one-shot timer. 1150163953Srrs */ 1151163953Srrs if ((error = itimerfix(tv)) == 0) 1152165220Srrs d->bd_rtout = tvtohz(tv) - 1; 1153163953Srrs break; 1154163953Srrs } 1155163953Srrs 1156163953Srrs /* 1157163953Srrs * Get read timeout. 1158163953Srrs */ 1159163953Srrs case BIOCGRTIMEOUT: 1160163953Srrs { 1161163953Srrs struct timeval *tv = (struct timeval *)addr; 1162163953Srrs 1163163953Srrs tv->tv_sec = d->bd_rtout / hz; 1164163953Srrs tv->tv_usec = (d->bd_rtout % hz) * tick; 1165163953Srrs break; 1166163953Srrs } 1167163953Srrs 1168163953Srrs /* 1169163953Srrs * Get packet stats. 1170163953Srrs */ 1171163953Srrs case BIOCGSTATS: 1172163953Srrs { 1173163953Srrs struct bpf_stat *bs = (struct bpf_stat *)addr; 1174163953Srrs 1175163953Srrs /* XXXCSJP overflow */ 1176163953Srrs bs->bs_recv = d->bd_rcount; 1177169420Srrs bs->bs_drop = d->bd_dcount; 1178163953Srrs break; 1179163953Srrs } 1180163953Srrs 1181163953Srrs /* 1182163953Srrs * Set immediate mode. 1183163953Srrs */ 1184163953Srrs case BIOCIMMEDIATE: 1185163953Srrs d->bd_immediate = *(u_int *)addr; 1186163953Srrs break; 1187163953Srrs 1188163953Srrs case BIOCVERSION: 1189163953Srrs { 1190163953Srrs struct bpf_version *bv = (struct bpf_version *)addr; 1191163953Srrs 1192169352Srrs bv->bv_major = BPF_MAJOR_VERSION; 1193170181Srrs bv->bv_minor = BPF_MINOR_VERSION; 1194163953Srrs break; 1195163953Srrs } 1196163953Srrs 1197163953Srrs /* 1198163953Srrs * Get "header already complete" flag 1199165220Srrs */ 1200163953Srrs case BIOCGHDRCMPLT: 1201164205Srrs *(u_int *)addr = d->bd_hdrcmplt; 1202170140Srrs break; 1203163953Srrs 1204163953Srrs /* 1205163953Srrs * Set "header already complete" flag 1206166675Srrs */ 1207166023Srrs case BIOCSHDRCMPLT: 1208166023Srrs d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1209166023Srrs break; 1210166023Srrs 1211166023Srrs /* 1212166023Srrs * Get packet direction flag 1213163953Srrs */ 1214163953Srrs case BIOCGDIRECTION: 1215163953Srrs *(u_int *)addr = d->bd_direction; 1216163953Srrs break; 1217163953Srrs 1218163953Srrs /* 1219163953Srrs * Set packet direction flag 1220165647Srrs */ 1221163953Srrs case BIOCSDIRECTION: 1222163953Srrs { 1223163953Srrs u_int direction; 1224163953Srrs 1225163953Srrs direction = *(u_int *)addr; 1226165647Srrs switch (direction) { 1227165647Srrs case BPF_D_IN: 1228165647Srrs case BPF_D_INOUT: 1229163953Srrs case BPF_D_OUT: 1230165647Srrs d->bd_direction = direction; 1231163953Srrs break; 1232163953Srrs default: 1233163953Srrs error = EINVAL; 1234169352Srrs } 1235170181Srrs } 1236166023Srrs break; 1237166023Srrs 1238163953Srrs case BIOCFEEDBACK: 1239163953Srrs d->bd_feedback = *(u_int *)addr; 1240163953Srrs break; 1241163953Srrs 1242163953Srrs case BIOCLOCK: 1243163953Srrs d->bd_locked = 1; 1244163953Srrs break; 1245163953Srrs 1246163953Srrs case FIONBIO: /* Non-blocking I/O */ 1247163953Srrs break; 1248163953Srrs 1249163953Srrs case FIOASYNC: /* Send signal on receive packets */ 1250163953Srrs d->bd_async = *(int *)addr; 1251163953Srrs break; 1252163953Srrs 1253163953Srrs case FIOSETOWN: 1254163953Srrs error = fsetown(*(int *)addr, &d->bd_sigio); 1255163953Srrs break; 1256163953Srrs 1257163953Srrs case FIOGETOWN: 1258163953Srrs *(int *)addr = fgetown(&d->bd_sigio); 1259163953Srrs break; 1260163953Srrs 1261163953Srrs /* This is deprecated, FIOSETOWN should be used instead. */ 1262163953Srrs case TIOCSPGRP: 1263163953Srrs error = fsetown(-(*(int *)addr), &d->bd_sigio); 1264163953Srrs break; 1265163953Srrs 1266163953Srrs /* This is deprecated, FIOGETOWN should be used instead. */ 1267163953Srrs case TIOCGPGRP: 1268163953Srrs *(int *)addr = -fgetown(&d->bd_sigio); 1269163953Srrs break; 1270163953Srrs 1271163953Srrs case BIOCSRSIG: /* Set receive signal */ 1272163953Srrs { 1273163953Srrs u_int sig; 1274163953Srrs 1275163953Srrs sig = *(u_int *)addr; 1276163953Srrs 1277163953Srrs if (sig >= NSIG) 1278163953Srrs error = EINVAL; 1279163953Srrs else 1280166023Srrs d->bd_sig = sig; 1281172091Srrs break; 1282172091Srrs } 1283172091Srrs case BIOCGRSIG: 1284172091Srrs *(u_int *)addr = d->bd_sig; 1285172091Srrs break; 1286172091Srrs 1287172091Srrs case BIOCGETBUFMODE: 1288172091Srrs *(u_int *)addr = d->bd_bufmode; 1289172091Srrs break; 1290172091Srrs 1291172091Srrs case BIOCSETBUFMODE: 1292172091Srrs /* 1293172091Srrs * Allow the buffering mode to be changed as long as we 1294172091Srrs * haven't yet committed to a particular mode. Our 1295172091Srrs * definition of commitment, for now, is whether or not a 1296172091Srrs * buffer has been allocated or an interface attached, since 1297172091Srrs * that's the point where things get tricky. 1298172091Srrs */ 1299166023Srrs switch (*(u_int *)addr) { 1300163953Srrs case BPF_BUFMODE_BUFFER: 1301163953Srrs break; 1302163953Srrs 1303166023Srrs case BPF_BUFMODE_ZBUF: 1304163953Srrs if (bpf_zerocopy_enable) 1305169352Srrs break; 1306166023Srrs /* FALLSTHROUGH */ 1307166023Srrs 1308166023Srrs default: 1309163953Srrs return (EINVAL); 1310163953Srrs } 1311163953Srrs 1312163953Srrs BPFD_LOCK(d); 1313166023Srrs if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1314166023Srrs d->bd_fbuf != NULL || d->bd_bif != NULL) { 1315163953Srrs BPFD_UNLOCK(d); 1316163953Srrs return (EBUSY); 1317163953Srrs } 1318163953Srrs d->bd_bufmode = *(u_int *)addr; 1319171440Srrs BPFD_UNLOCK(d); 1320171440Srrs break; 1321163953Srrs 1322166675Srrs case BIOCGETZMAX: 1323166675Srrs return (bpf_ioctl_getzmax(td, d, (size_t *)addr)); 1324166675Srrs 1325166675Srrs case BIOCSETZBUF: 1326171943Srrs return (bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr)); 1327171943Srrs 1328163953Srrs case BIOCROTZBUF: 1329163953Srrs return (bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr)); 1330163953Srrs } 1331163953Srrs CURVNET_RESTORE(); 1332166675Srrs return (error); 1333165220Srrs} 1334163953Srrs 1335163953Srrs/* 1336163953Srrs * Set d's packet filter program to fp. If this file already has a filter, 1337163953Srrs * free it and replace it. Returns EINVAL for bogus requests. 1338172090Srrs */ 1339172090Srrsstatic int 1340172090Srrsbpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1341172090Srrs{ 1342163953Srrs struct bpf_insn *fcode, *old; 1343163953Srrs u_int wfilter, flen, size; 1344163953Srrs#ifdef BPF_JITTER 1345163953Srrs bpf_jit_filter *ofunc; 1346163953Srrs#endif 1347163953Srrs 1348163953Srrs if (cmd == BIOCSETWF) { 1349163953Srrs old = d->bd_wfilter; 1350172090Srrs wfilter = 1; 1351172090Srrs#ifdef BPF_JITTER 1352172090Srrs ofunc = NULL; 1353172090Srrs#endif 1354172090Srrs } else { 1355172090Srrs wfilter = 0; 1356172090Srrs old = d->bd_rfilter; 1357172090Srrs#ifdef BPF_JITTER 1358172090Srrs ofunc = d->bd_bfilter; 1359172090Srrs#endif 1360172090Srrs } 1361172090Srrs if (fp->bf_insns == NULL) { 1362172090Srrs if (fp->bf_len != 0) 1363172090Srrs return (EINVAL); 1364172090Srrs BPFD_LOCK(d); 1365172090Srrs if (wfilter) 1366163953Srrs d->bd_wfilter = NULL; 1367163953Srrs else { 1368163953Srrs d->bd_rfilter = NULL; 1369163953Srrs#ifdef BPF_JITTER 1370163953Srrs d->bd_bfilter = NULL; 1371163953Srrs#endif 1372163953Srrs if (cmd == BIOCSETF) 1373163953Srrs reset_d(d); 1374170642Srrs } 1375171477Srrs BPFD_UNLOCK(d); 1376163953Srrs if (old != NULL) 1377163953Srrs free((caddr_t)old, M_BPF); 1378163953Srrs#ifdef BPF_JITTER 1379163953Srrs if (ofunc != NULL) 1380163953Srrs bpf_destroy_jit_filter(ofunc); 1381163953Srrs#endif 1382163953Srrs return (0); 1383163953Srrs } 1384163953Srrs flen = fp->bf_len; 1385163953Srrs if (flen > bpf_maxinsns) 1386163953Srrs return (EINVAL); 1387163953Srrs 1388163953Srrs size = flen * sizeof(*fp->bf_insns); 1389163953Srrs fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1390165220Srrs if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1391163953Srrs bpf_validate(fcode, (int)flen)) { 1392163953Srrs BPFD_LOCK(d); 1393163953Srrs if (wfilter) 1394163953Srrs d->bd_wfilter = fcode; 1395163953Srrs else { 1396163953Srrs d->bd_rfilter = fcode; 1397163953Srrs#ifdef BPF_JITTER 1398163953Srrs d->bd_bfilter = bpf_jitter(fcode, flen); 1399166023Srrs#endif 1400166023Srrs if (cmd == BIOCSETF) 1401163953Srrs reset_d(d); 1402163953Srrs } 1403163953Srrs BPFD_UNLOCK(d); 1404165220Srrs if (old != NULL) 1405163953Srrs free((caddr_t)old, M_BPF); 1406166023Srrs#ifdef BPF_JITTER 1407166023Srrs if (ofunc != NULL) 1408163953Srrs bpf_destroy_jit_filter(ofunc); 1409169352Srrs#endif 1410163953Srrs 1411163953Srrs return (0); 1412163953Srrs } 1413163953Srrs free((caddr_t)fcode, M_BPF); 1414163953Srrs return (EINVAL); 1415163953Srrs} 1416163953Srrs 1417166023Srrs/* 1418166023Srrs * Detach a file from its current interface (if attached at all) and attach 1419163953Srrs * to the interface indicated by the name stored in ifr. 1420163953Srrs * Return an errno or 0. 1421163953Srrs */ 1422163953Srrsstatic int 1423163953Srrsbpf_setif(struct bpf_d *d, struct ifreq *ifr) 1424163953Srrs{ 1425163953Srrs struct bpf_if *bp; 1426163953Srrs struct ifnet *theywant; 1427163953Srrs 1428166023Srrs theywant = ifunit(ifr->ifr_name); 1429166023Srrs if (theywant == NULL || theywant->if_bpf == NULL) 1430166023Srrs return (ENXIO); 1431166023Srrs 1432166023Srrs bp = theywant->if_bpf; 1433166023Srrs 1434166023Srrs /* 1435166023Srrs * Behavior here depends on the buffering model. If we're using 1436166023Srrs * kernel memory buffers, then we can allocate them here. If we're 1437166023Srrs * using zero-copy, then the user process must have registered 1438166023Srrs * buffers by the time we get here. If not, return an error. 1439166023Srrs * 1440166023Srrs * XXXRW: There are locking issues here with multi-threaded use: what 1441166023Srrs * if two threads try to set the interface at once? 1442166023Srrs */ 1443166023Srrs switch (d->bd_bufmode) { 1444166023Srrs case BPF_BUFMODE_BUFFER: 1445166023Srrs if (d->bd_sbuf == NULL) 1446166023Srrs bpf_buffer_alloc(d); 1447166023Srrs KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL")); 1448166023Srrs break; 1449166023Srrs 1450166023Srrs case BPF_BUFMODE_ZBUF: 1451166023Srrs if (d->bd_sbuf == NULL) 1452166023Srrs return (EINVAL); 1453171440Srrs break; 1454163953Srrs 1455163953Srrs default: 1456163953Srrs panic("bpf_setif: bufmode %d", d->bd_bufmode); 1457163953Srrs } 1458163953Srrs if (bp != d->bd_bif) { 1459163953Srrs if (d->bd_bif) 1460163953Srrs /* 1461163953Srrs * Detach if attached to something else. 1462163953Srrs */ 1463163953Srrs bpf_detachd(d); 1464163953Srrs 1465163953Srrs bpf_attachd(d, bp); 1466164205Srrs } 1467164205Srrs BPFD_LOCK(d); 1468164205Srrs reset_d(d); 1469163953Srrs BPFD_UNLOCK(d); 1470164205Srrs return (0); 1471164205Srrs} 1472164205Srrs 1473164205Srrs/* 1474164205Srrs * Support for select() and poll() system calls 1475164205Srrs * 1476164205Srrs * Return true iff the specific operation will not block indefinitely. 1477164205Srrs * Otherwise, return false but make a note that a selwakeup() must be done. 1478164205Srrs */ 1479164205Srrsstatic int 1480163953Srrsbpfpoll(struct cdev *dev, int events, struct thread *td) 1481164205Srrs{ 1482164205Srrs struct bpf_d *d; 1483164205Srrs int revents; 1484171440Srrs 1485171440Srrs if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1486168124Srrs return (events & 1487164205Srrs (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1488164205Srrs 1489164205Srrs /* 1490164205Srrs * Refresh PID associated with this descriptor. 1491164205Srrs */ 1492163953Srrs revents = events & (POLLOUT | POLLWRNORM); 1493163953Srrs BPFD_LOCK(d); 1494163953Srrs d->bd_pid = td->td_proc->p_pid; 1495166023Srrs if (events & (POLLIN | POLLRDNORM)) { 1496166023Srrs if (bpf_ready(d)) 1497163953Srrs revents |= events & (POLLIN | POLLRDNORM); 1498163953Srrs else { 1499163953Srrs selrecord(td, &d->bd_sel); 1500163953Srrs /* Start the read timeout if necessary. */ 1501163953Srrs if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1502166023Srrs callout_reset(&d->bd_callout, d->bd_rtout, 1503166023Srrs bpf_timed_out, d); 1504163953Srrs d->bd_state = BPF_WAITING; 1505163953Srrs } 1506163953Srrs } 1507163953Srrs } 1508163953Srrs BPFD_UNLOCK(d); 1509163953Srrs return (revents); 1510163953Srrs} 1511163953Srrs 1512163953Srrs/* 1513172090Srrs * Support for kevent() system call. Register EVFILT_READ filters and 1514172090Srrs * reject all others. 1515172090Srrs */ 1516172090Srrsint 1517163953Srrsbpfkqfilter(struct cdev *dev, struct knote *kn) 1518163953Srrs{ 1519172090Srrs struct bpf_d *d; 1520172090Srrs 1521172090Srrs if (devfs_get_cdevpriv((void **)&d) != 0 || 1522172090Srrs kn->kn_filter != EVFILT_READ) 1523172090Srrs return (1); 1524172090Srrs 1525172090Srrs /* 1526172090Srrs * Refresh PID associated with this descriptor. 1527172090Srrs */ 1528172090Srrs BPFD_LOCK(d); 1529172090Srrs d->bd_pid = curthread->td_proc->p_pid; 1530172090Srrs kn->kn_fop = &bpfread_filtops; 1531172090Srrs kn->kn_hook = d; 1532172090Srrs knlist_add(&d->bd_sel.si_note, kn, 1); 1533172090Srrs BPFD_UNLOCK(d); 1534172090Srrs 1535163953Srrs return (0); 1536166675Srrs} 1537166675Srrs 1538166675Srrsstatic void 1539166675Srrsfilt_bpfdetach(struct knote *kn) 1540166675Srrs{ 1541166675Srrs struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1542166675Srrs 1543166675Srrs knlist_remove(&d->bd_sel.si_note, kn, 0); 1544166675Srrs} 1545166675Srrs 1546163953Srrsstatic int 1547171943Srrsfilt_bpfread(struct knote *kn, long hint) 1548163953Srrs{ 1549163953Srrs struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1550163953Srrs int ready; 1551163953Srrs 1552163953Srrs BPFD_LOCK_ASSERT(d); 1553163953Srrs ready = bpf_ready(d); 1554165220Srrs if (ready) { 1555163953Srrs kn->kn_data = d->bd_slen; 1556164205Srrs if (d->bd_hbuf) 1557164205Srrs kn->kn_data += d->bd_hlen; 1558164205Srrs } 1559164205Srrs else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1560164205Srrs callout_reset(&d->bd_callout, d->bd_rtout, 1561164205Srrs bpf_timed_out, d); 1562164205Srrs d->bd_state = BPF_WAITING; 1563164205Srrs } 1564172090Srrs 1565164205Srrs return (ready); 1566166023Srrs} 1567166023Srrs 1568166023Srrs/* 1569163953Srrs * Incoming linkage from device drivers. Process the packet pkt, of length 1570163953Srrs * pktlen, which is stored in a contiguous buffer. The packet is parsed 1571163953Srrs * by each process' filter, and if accepted, stashed into the corresponding 1572163953Srrs * buffer. 1573163953Srrs */ 1574163953Srrsvoid 1575163953Srrsbpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1576163953Srrs{ 1577163953Srrs struct bpf_d *d; 1578163953Srrs u_int slen; 1579163953Srrs int gottime; 1580163953Srrs struct timeval tv; 1581165220Srrs 1582166023Srrs gottime = 0; 1583166023Srrs BPFIF_LOCK(bp); 1584171440Srrs LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1585171440Srrs BPFD_LOCK(d); 1586165647Srrs ++d->bd_rcount; 1587163953Srrs /* 1588163953Srrs * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 1589163953Srrs * way for the caller to indiciate to us whether this packet 1590165220Srrs * is inbound or outbound. In the bpf_mtap() routines, we use 1591166675Srrs * the interface pointers on the mbuf to figure it out. 1592166675Srrs */ 1593166675Srrs#ifdef BPF_JITTER 1594166675Srrs if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1595166675Srrs slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1596166675Srrs else 1597166675Srrs#endif 1598166675Srrs slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1599166675Srrs if (slen != 0) { 1600166675Srrs d->bd_fcount++; 1601165647Srrs if (!gottime) { 1602171943Srrs microtime(&tv); 1603165647Srrs gottime = 1; 1604165647Srrs } 1605165647Srrs#ifdef MAC 1606165647Srrs if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1607165647Srrs#endif 1608171943Srrs catchpacket(d, pkt, pktlen, slen, 1609165647Srrs bpf_append_bytes, &tv); 1610165647Srrs } 1611165647Srrs BPFD_UNLOCK(d); 1612165647Srrs } 1613165647Srrs BPFIF_UNLOCK(bp); 1614165647Srrs} 1615165647Srrs 1616165647Srrs#define BPF_CHECK_DIRECTION(d, r, i) \ 1617165647Srrs (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 1618165647Srrs ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 1619165647Srrs 1620165647Srrs/* 1621170138Srrs * Incoming linkage from device drivers, when packet is in an mbuf chain. 1622165647Srrs */ 1623165647Srrsvoid 1624170138Srrsbpf_mtap(struct bpf_if *bp, struct mbuf *m) 1625165220Srrs{ 1626165220Srrs struct bpf_d *d; 1627165220Srrs u_int pktlen, slen; 1628165220Srrs int gottime; 1629165220Srrs struct timeval tv; 1630163953Srrs 1631164205Srrs /* Skip outgoing duplicate packets. */ 1632165220Srrs if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1633172090Srrs m->m_flags &= ~M_PROMISC; 1634165220Srrs return; 1635165220Srrs } 1636165220Srrs 1637165220Srrs gottime = 0; 1638165220Srrs 1639163953Srrs pktlen = m_length(m, NULL); 1640163953Srrs 1641163953Srrs BPFIF_LOCK(bp); 1642163953Srrs LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1643163953Srrs if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1644163953Srrs continue; 1645163953Srrs BPFD_LOCK(d); 1646163953Srrs ++d->bd_rcount; 1647163953Srrs#ifdef BPF_JITTER 1648163953Srrs /* XXX We cannot handle multiple mbufs. */ 1649163953Srrs if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1650163953Srrs m->m_next == NULL) 1651163953Srrs slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1652163953Srrs pktlen, pktlen); 1653163953Srrs else 1654163953Srrs#endif 1655163953Srrs slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1656163953Srrs if (slen != 0) { 1657163953Srrs d->bd_fcount++; 1658163953Srrs if (!gottime) { 1659163953Srrs microtime(&tv); 1660163953Srrs gottime = 1; 1661163953Srrs } 1662163953Srrs#ifdef MAC 1663164205Srrs if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1664165220Srrs#endif 1665165220Srrs catchpacket(d, (u_char *)m, pktlen, slen, 1666165220Srrs bpf_append_mbuf, &tv); 1667163953Srrs } 1668163953Srrs BPFD_UNLOCK(d); 1669166023Srrs } 1670166023Srrs BPFIF_UNLOCK(bp); 1671166023Srrs} 1672163953Srrs 1673163953Srrs/* 1674163953Srrs * Incoming linkage from device drivers, when packet is in 1675163953Srrs * an mbuf chain and to be prepended by a contiguous header. 1676163953Srrs */ 1677163953Srrsvoid 1678163953Srrsbpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 1679163953Srrs{ 1680163953Srrs struct mbuf mb; 1681163953Srrs struct bpf_d *d; 1682163953Srrs u_int pktlen, slen; 1683166023Srrs int gottime; 1684166023Srrs struct timeval tv; 1685166023Srrs 1686163953Srrs /* Skip outgoing duplicate packets. */ 1687163953Srrs if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 1688163953Srrs m->m_flags &= ~M_PROMISC; 1689163953Srrs return; 1690165220Srrs } 1691163953Srrs 1692166023Srrs gottime = 0; 1693166023Srrs 1694163953Srrs pktlen = m_length(m, NULL); 1695163953Srrs /* 1696163953Srrs * Craft on-stack mbuf suitable for passing to bpf_filter. 1697166023Srrs * Note that we cut corners here; we only setup what's 1698166023Srrs * absolutely needed--this mbuf should never go anywhere else. 1699163953Srrs */ 1700163953Srrs mb.m_next = m; 1701163953Srrs mb.m_data = data; 1702163953Srrs mb.m_len = dlen; 1703166086Srrs pktlen += dlen; 1704163953Srrs 1705163953Srrs BPFIF_LOCK(bp); 1706163953Srrs LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1707163953Srrs if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 1708163953Srrs continue; 1709163953Srrs BPFD_LOCK(d); 1710163953Srrs ++d->bd_rcount; 1711163953Srrs slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1712163953Srrs if (slen != 0) { 1713163953Srrs d->bd_fcount++; 1714163953Srrs if (!gottime) { 1715163953Srrs microtime(&tv); 1716169352Srrs gottime = 1; 1717170181Srrs } 1718163953Srrs#ifdef MAC 1719163953Srrs if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 1720163953Srrs#endif 1721163953Srrs catchpacket(d, (u_char *)&mb, pktlen, slen, 1722163953Srrs bpf_append_mbuf, &tv); 1723163953Srrs } 1724163953Srrs BPFD_UNLOCK(d); 1725163953Srrs } 1726163953Srrs BPFIF_UNLOCK(bp); 1727163953Srrs} 1728163953Srrs 1729163953Srrs#undef BPF_CHECK_DIRECTION 1730163953Srrs 1731163953Srrs/* 1732166675Srrs * Move the packet data from interface memory (pkt) into the 1733163953Srrs * store buffer. "cpfn" is the routine called to do the actual data 1734172090Srrs * transfer. bcopy is passed in to copy contiguous chunks, while 1735172090Srrs * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 1736172090Srrs * pkt is really an mbuf. 1737172090Srrs */ 1738172090Srrsstatic void 1739172090Srrscatchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1740163953Srrs void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 1741163953Srrs struct timeval *tv) 1742163953Srrs{ 1743163953Srrs struct bpf_hdr hdr; 1744163953Srrs int totlen, curlen; 1745163953Srrs int hdrlen = d->bd_bif->bif_hdrlen; 1746163953Srrs int do_wakeup = 0; 1747163953Srrs 1748163953Srrs BPFD_LOCK_ASSERT(d); 1749163953Srrs 1750163953Srrs /* 1751169420Srrs * Detect whether user space has released a buffer back to us, and if 1752169420Srrs * so, move it from being a hold buffer to a free buffer. This may 1753163953Srrs * not be the best place to do it (for example, we might only want to 1754163953Srrs * run this check if we need the space), but for now it's a reliable 1755163953Srrs * spot to do it. 1756163953Srrs */ 1757169420Srrs if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 1758163953Srrs d->bd_fbuf = d->bd_hbuf; 1759163953Srrs d->bd_hbuf = NULL; 1760163953Srrs d->bd_hlen = 0; 1761163953Srrs bpf_buf_reclaimed(d); 1762163953Srrs } 1763163953Srrs 1764163953Srrs /* 1765163953Srrs * Figure out how many bytes to move. If the packet is 1766163953Srrs * greater or equal to the snapshot length, transfer that 1767163953Srrs * much. Otherwise, transfer the whole packet (unless 1768163953Srrs * we hit the buffer size limit). 1769163953Srrs */ 1770169420Srrs totlen = hdrlen + min(snaplen, pktlen); 1771163953Srrs if (totlen > d->bd_bufsize) 1772163953Srrs totlen = d->bd_bufsize; 1773163953Srrs 1774163953Srrs /* 1775163953Srrs * Round up the end of the previous packet to the next longword. 1776163953Srrs * 1777163953Srrs * Drop the packet if there's no room and no hope of room 1778163953Srrs * If the packet would overflow the storage buffer or the storage 1779163953Srrs * buffer is considered immutable by the buffer model, try to rotate 1780163953Srrs * the buffer and wakeup pending processes. 1781163953Srrs */ 1782163953Srrs curlen = BPF_WORDALIGN(d->bd_slen); 1783163953Srrs if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 1784163953Srrs if (d->bd_fbuf == NULL) { 1785163953Srrs /* 1786163953Srrs * There's no room in the store buffer, and no 1787163953Srrs * prospect of room, so drop the packet. Notify the 1788163953Srrs * buffer model. 1789171531Srrs */ 1790171531Srrs bpf_buffull(d); 1791171531Srrs ++d->bd_dcount; 1792171531Srrs return; 1793171531Srrs } 1794171531Srrs ROTATE_BUFFERS(d); 1795171531Srrs do_wakeup = 1; 1796163953Srrs curlen = 0; 1797171531Srrs } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1798171531Srrs /* 1799171531Srrs * Immediate mode is set, or the read timeout has already 1800163953Srrs * expired during a select call. A packet arrived, so the 1801163953Srrs * reader should be woken up. 1802163953Srrs */ 1803163953Srrs do_wakeup = 1; 1804169420Srrs 1805169420Srrs /* 1806163953Srrs * Append the bpf header. Note we append the actual header size, but 1807170140Srrs * move forward the length of the header plus padding. 1808163953Srrs */ 1809170181Srrs bzero(&hdr, sizeof(hdr)); 1810163953Srrs hdr.bh_tstamp = *tv; 1811163953Srrs hdr.bh_datalen = pktlen; 1812163953Srrs hdr.bh_hdrlen = hdrlen; 1813169420Srrs hdr.bh_caplen = totlen - hdrlen; 1814169420Srrs bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 1815169420Srrs 1816163953Srrs /* 1817163953Srrs * Copy the packet data into the store buffer and update its length. 1818163953Srrs */ 1819163953Srrs (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, hdr.bh_caplen); 1820163953Srrs d->bd_slen = curlen + totlen; 1821163953Srrs 1822163953Srrs if (do_wakeup) 1823163953Srrs bpf_wakeup(d); 1824163953Srrs} 1825163953Srrs 1826163953Srrs/* 1827163953Srrs * Free buffers currently in use by a descriptor. 1828163953Srrs * Called on close. 1829163953Srrs */ 1830163953Srrsstatic void 1831163953Srrsbpf_freed(struct bpf_d *d) 1832170140Srrs{ 1833163953Srrs 1834163953Srrs /* 1835170181Srrs * We don't need to lock out interrupts since this descriptor has 1836172090Srrs * been detached from its interface and it yet hasn't been marked 1837172090Srrs * free. 1838172090Srrs */ 1839172090Srrs bpf_free(d); 1840172090Srrs if (d->bd_rfilter) { 1841171943Srrs free((caddr_t)d->bd_rfilter, M_BPF); 1842171440Srrs#ifdef BPF_JITTER 1843172090Srrs bpf_destroy_jit_filter(d->bd_bfilter); 1844172090Srrs#endif 1845172090Srrs } 1846172090Srrs if (d->bd_wfilter) 1847163953Srrs free((caddr_t)d->bd_wfilter, M_BPF); 1848163953Srrs mtx_destroy(&d->bd_mtx); 1849163953Srrs} 1850163953Srrs 1851163953Srrs/* 1852163953Srrs * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1853163953Srrs * fixed size of the link header (variable length headers not yet supported). 1854163953Srrs */ 1855163953Srrsvoid 1856163953Srrsbpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1857163953Srrs{ 1858163953Srrs 1859163953Srrs bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1860163953Srrs} 1861163953Srrs 1862163953Srrs/* 1863169420Srrs * Attach an interface to bpf. ifp is a pointer to the structure 1864169420Srrs * defining the interface to be attached, dlt is the link layer type, 1865169420Srrs * and hdrlen is the fixed size of the link header (variable length 1866169420Srrs * headers are not yet supporrted). 1867163953Srrs */ 1868170140Srrsvoid 1869172090Srrsbpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1870172090Srrs{ 1871172090Srrs struct bpf_if *bp; 1872172090Srrs 1873172090Srrs bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1874171943Srrs if (bp == NULL) 1875172090Srrs panic("bpfattach"); 1876172090Srrs 1877172090Srrs LIST_INIT(&bp->bif_dlist); 1878172090Srrs bp->bif_ifp = ifp; 1879163953Srrs bp->bif_dlt = dlt; 1880163953Srrs mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1881163953Srrs KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1882163953Srrs *driverp = bp; 1883163953Srrs 1884163953Srrs mtx_lock(&bpf_mtx); 1885170140Srrs LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1886172090Srrs mtx_unlock(&bpf_mtx); 1887172090Srrs 1888172090Srrs /* 1889172090Srrs * Compute the length of the bpf header. This is not necessarily 1890172090Srrs * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1891171943Srrs * that the network layer header begins on a longword boundary (for 1892172090Srrs * performance reasons and to alleviate alignment restrictions). 1893172090Srrs */ 1894172090Srrs bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1895172090Srrs 1896163953Srrs if (bootverbose) 1897163953Srrs if_printf(ifp, "bpf attached\n"); 1898163953Srrs} 1899163953Srrs 1900163953Srrs/* 1901163953Srrs * Detach bpf from an interface. This involves detaching each descriptor 1902163953Srrs * associated with the interface, and leaving bd_bif NULL. Notify each 1903163953Srrs * descriptor as it's detached so that any sleepers wake up and get 1904163953Srrs * ENXIO. 1905163953Srrs */ 1906163953Srrsvoid 1907163953Srrsbpfdetach(struct ifnet *ifp) 1908163953Srrs{ 1909166675Srrs struct bpf_if *bp; 1910169420Srrs struct bpf_d *d; 1911163953Srrs 1912169420Srrs /* Locate BPF interface information */ 1913169420Srrs mtx_lock(&bpf_mtx); 1914171440Srrs LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1915172090Srrs if (ifp == bp->bif_ifp) 1916172090Srrs break; 1917172090Srrs } 1918172090Srrs 1919172090Srrs /* Interface wasn't attached */ 1920171943Srrs if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1921172090Srrs mtx_unlock(&bpf_mtx); 1922172090Srrs printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1923172090Srrs return; 1924172090Srrs } 1925163953Srrs 1926163953Srrs LIST_REMOVE(bp, bif_next); 1927163953Srrs mtx_unlock(&bpf_mtx); 1928163953Srrs 1929163953Srrs while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1930163953Srrs bpf_detachd(d); 1931163953Srrs BPFD_LOCK(d); 1932169420Srrs bpf_wakeup(d); 1933171943Srrs BPFD_UNLOCK(d); 1934163953Srrs } 1935163953Srrs 1936163953Srrs mtx_destroy(&bp->bif_mtx); 1937163953Srrs free(bp, M_BPF); 1938165220Srrs} 1939163953Srrs 1940163953Srrs/* 1941163953Srrs * Get a list of available data link type of the interface. 1942163953Srrs */ 1943163953Srrsstatic int 1944163953Srrsbpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1945163953Srrs{ 1946163953Srrs int n, error; 1947163953Srrs struct ifnet *ifp; 1948163953Srrs struct bpf_if *bp; 1949163953Srrs 1950163953Srrs ifp = d->bd_bif->bif_ifp; 1951163953Srrs n = 0; 1952163953Srrs error = 0; 1953163953Srrs mtx_lock(&bpf_mtx); 1954163953Srrs LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1955163953Srrs if (bp->bif_ifp != ifp) 1956163953Srrs continue; 1957163953Srrs if (bfl->bfl_list != NULL) { 1958163953Srrs if (n >= bfl->bfl_len) { 1959163953Srrs mtx_unlock(&bpf_mtx); 1960163953Srrs return (ENOMEM); 1961163953Srrs } 1962163953Srrs error = copyout(&bp->bif_dlt, 1963163953Srrs bfl->bfl_list + n, sizeof(u_int)); 1964163953Srrs } 1965163953Srrs n++; 1966163953Srrs } 1967163953Srrs mtx_unlock(&bpf_mtx); 1968163953Srrs bfl->bfl_len = n; 1969163953Srrs return (error); 1970170140Srrs} 1971172090Srrs 1972172090Srrs/* 1973172090Srrs * Set the data link type of a BPF instance. 1974172090Srrs */ 1975172090Srrsstatic int 1976171943Srrsbpf_setdlt(struct bpf_d *d, u_int dlt) 1977172090Srrs{ 1978172090Srrs int error, opromisc; 1979172090Srrs struct ifnet *ifp; 1980172090Srrs struct bpf_if *bp; 1981163953Srrs 1982163953Srrs if (d->bd_bif->bif_dlt == dlt) 1983163953Srrs return (0); 1984163953Srrs ifp = d->bd_bif->bif_ifp; 1985163953Srrs mtx_lock(&bpf_mtx); 1986163953Srrs LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1987163953Srrs if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1988163953Srrs break; 1989163953Srrs } 1990163953Srrs mtx_unlock(&bpf_mtx); 1991163953Srrs if (bp != NULL) { 1992163953Srrs opromisc = d->bd_promisc; 1993163953Srrs bpf_detachd(d); 1994163953Srrs bpf_attachd(d, bp); 1995163953Srrs BPFD_LOCK(d); 1996163953Srrs reset_d(d); 1997163953Srrs BPFD_UNLOCK(d); 1998163953Srrs if (opromisc) { 1999163953Srrs error = ifpromisc(bp->bif_ifp, 1); 2000163953Srrs if (error) 2001163953Srrs if_printf(bp->bif_ifp, 2002163953Srrs "bpf_setdlt: ifpromisc failed (%d)\n", 2003163953Srrs error); 2004163953Srrs else 2005172090Srrs d->bd_promisc = 1; 2006172090Srrs } 2007172090Srrs } 2008172090Srrs return (bp == NULL ? EINVAL : 0); 2009172090Srrs} 2010172090Srrs 2011172090Srrsstatic void 2012172090Srrsbpf_drvinit(void *unused) 2013172090Srrs{ 2014172090Srrs struct cdev *dev; 2015172090Srrs 2016172090Srrs mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2017172090Srrs LIST_INIT(&bpf_iflist); 2018172090Srrs 2019172090Srrs dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2020163953Srrs /* For compatibility */ 2021163953Srrs make_dev_alias(dev, "bpf0"); 2022163953Srrs 2023163953Srrs} 2024163953Srrs 2025163953Srrsstatic void 2026163953Srrsbpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2027163953Srrs{ 2028163953Srrs 2029163953Srrs bzero(d, sizeof(*d)); 2030169420Srrs BPFD_LOCK_ASSERT(bd); 2031169420Srrs d->bd_structsize = sizeof(*d); 2032163953Srrs d->bd_immediate = bd->bd_immediate; 2033163953Srrs d->bd_promisc = bd->bd_promisc; 2034163953Srrs d->bd_hdrcmplt = bd->bd_hdrcmplt; 2035163953Srrs d->bd_direction = bd->bd_direction; 2036163953Srrs d->bd_feedback = bd->bd_feedback; 2037163953Srrs d->bd_async = bd->bd_async; 2038165220Srrs d->bd_rcount = bd->bd_rcount; 2039171440Srrs d->bd_dcount = bd->bd_dcount; 2040170642Srrs d->bd_fcount = bd->bd_fcount; 2041169420Srrs d->bd_sig = bd->bd_sig; 2042171477Srrs d->bd_slen = bd->bd_slen; 2043170642Srrs d->bd_hlen = bd->bd_hlen; 2044163953Srrs d->bd_bufsize = bd->bd_bufsize; 2045163953Srrs d->bd_pid = bd->bd_pid; 2046163953Srrs strlcpy(d->bd_ifname, 2047163953Srrs bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2048163953Srrs d->bd_locked = bd->bd_locked; 2049163953Srrs d->bd_wcount = bd->bd_wcount; 2050163953Srrs d->bd_wdcount = bd->bd_wdcount; 2051163953Srrs d->bd_wfcount = bd->bd_wfcount; 2052163953Srrs d->bd_zcopy = bd->bd_zcopy; 2053163953Srrs d->bd_bufmode = bd->bd_bufmode; 2054163953Srrs} 2055163953Srrs 2056163953Srrsstatic int 2057169352Srrsbpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2058170181Srrs{ 2059163953Srrs struct xbpf_d *xbdbuf, *xbd; 2060163953Srrs int index, error; 2061163953Srrs struct bpf_if *bp; 2062163953Srrs struct bpf_d *bd; 2063163953Srrs 2064163953Srrs /* 2065163953Srrs * XXX This is not technically correct. It is possible for non 2066163953Srrs * privileged users to open bpf devices. It would make sense 2067163953Srrs * if the users who opened the devices were able to retrieve 2068163953Srrs * the statistics for them, too. 2069163953Srrs */ 2070163953Srrs error = priv_check(req->td, PRIV_NET_BPF); 2071163953Srrs if (error) 2072163953Srrs return (error); 2073163953Srrs if (req->oldptr == NULL) 2074163953Srrs return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2075163953Srrs if (bpf_bpfd_cnt == 0) 2076163953Srrs return (SYSCTL_OUT(req, 0, 0)); 2077163953Srrs xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2078163953Srrs mtx_lock(&bpf_mtx); 2079163953Srrs if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2080163953Srrs mtx_unlock(&bpf_mtx); 2081163953Srrs free(xbdbuf, M_BPF); 2082163953Srrs return (ENOMEM); 2083169420Srrs } 2084169420Srrs index = 0; 2085163953Srrs LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2086163953Srrs BPFIF_LOCK(bp); 2087163953Srrs LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2088163953Srrs xbd = &xbdbuf[index++]; 2089163953Srrs BPFD_LOCK(bd); 2090163953Srrs bpfstats_fill_xbpf(xbd, bd); 2091163953Srrs BPFD_UNLOCK(bd); 2092163953Srrs } 2093170140Srrs BPFIF_UNLOCK(bp); 2094163953Srrs } 2095170140Srrs mtx_unlock(&bpf_mtx); 2096170140Srrs error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2097170140Srrs free(xbdbuf, M_BPF); 2098170140Srrs return (error); 2099170140Srrs} 2100170140Srrs 2101165647SrrsSYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2102163953Srrs 2103163953Srrs#else /* !DEV_BPF && !NETGRAPH_BPF */ 2104163953Srrs/* 2105170140Srrs * NOP stubs to allow bpf-using drivers to load and function. 2106163953Srrs * 2107170140Srrs * A 'better' implementation would allow the core bpf functionality 2108170140Srrs * to be loaded at runtime. 2109170140Srrs */ 2110170140Srrsstatic struct bpf_if bp_null; 2111163953Srrs 2112170140Srrsvoid 2113170140Srrsbpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2114166675Srrs{ 2115163953Srrs} 2116163953Srrs 2117163953Srrsvoid 2118163953Srrsbpf_mtap(struct bpf_if *bp, struct mbuf *m) 2119163953Srrs{ 2120163953Srrs} 2121163953Srrs 2122163953Srrsvoid 2123163953Srrsbpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2124163953Srrs{ 2125163953Srrs} 2126163953Srrs 2127163953Srrsvoid 2128163953Srrsbpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2129163953Srrs{ 2130169352Srrs 2131163953Srrs bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2132163953Srrs} 2133163953Srrs 2134163953Srrsvoid 2135163953Srrsbpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2136163953Srrs{ 2137163953Srrs 2138163953Srrs *driverp = &bp_null; 2139163953Srrs} 2140163953Srrs 2141163953Srrsvoid 2142163953Srrsbpfdetach(struct ifnet *ifp) 2143163953Srrs{ 2144163953Srrs} 2145163953Srrs 2146163953Srrsu_int 2147163953Srrsbpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2148163953Srrs{ 2149163953Srrs return -1; /* "no filter" behaviour */ 2150163953Srrs} 2151163953Srrs 2152163953Srrsint 2153163953Srrsbpf_validate(const struct bpf_insn *f, int len) 2154163953Srrs{ 2155163953Srrs return 0; /* false */ 2156163953Srrs} 2157163953Srrs 2158163953Srrs#endif /* !DEV_BPF && !NETGRAPH_BPF */ 2159163953Srrs