bpf.c revision 235745
1/*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/net/bpf.c 235745 2012-05-21 22:17:29Z melifaro $"); 39 40#include "opt_bpf.h" 41#include "opt_compat.h" 42#include "opt_netgraph.h" 43 44#include <sys/types.h> 45#include <sys/param.h> 46#include <sys/lock.h> 47#include <sys/rwlock.h> 48#include <sys/systm.h> 49#include <sys/conf.h> 50#include <sys/fcntl.h> 51#include <sys/jail.h> 52#include <sys/malloc.h> 53#include <sys/mbuf.h> 54#include <sys/time.h> 55#include <sys/priv.h> 56#include <sys/proc.h> 57#include <sys/signalvar.h> 58#include <sys/filio.h> 59#include <sys/sockio.h> 60#include <sys/ttycom.h> 61#include <sys/uio.h> 62 63#include <sys/event.h> 64#include <sys/file.h> 65#include <sys/poll.h> 66#include <sys/proc.h> 67 68#include <sys/socket.h> 69 70#include <net/if.h> 71#define BPF_INTERNAL 72#include <net/bpf.h> 73#include <net/bpf_buffer.h> 74#ifdef BPF_JITTER 75#include <net/bpf_jitter.h> 76#endif 77#include <net/bpf_zerocopy.h> 78#include <net/bpfdesc.h> 79#include <net/vnet.h> 80 81#include <netinet/in.h> 82#include <netinet/if_ether.h> 83#include <sys/kernel.h> 84#include <sys/sysctl.h> 85 86#include <net80211/ieee80211_freebsd.h> 87 88#include <security/mac/mac_framework.h> 89 90MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 91 92#if defined(DEV_BPF) || defined(NETGRAPH_BPF) 93 94#define PRINET 26 /* interruptible */ 95 96#define SIZEOF_BPF_HDR(type) \ 97 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 98 99#ifdef COMPAT_FREEBSD32 100#include <sys/mount.h> 101#include <compat/freebsd32/freebsd32.h> 102#define BPF_ALIGNMENT32 sizeof(int32_t) 103#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1)) 104 105#ifndef BURN_BRIDGES 106/* 107 * 32-bit version of structure prepended to each packet. We use this header 108 * instead of the standard one for 32-bit streams. We mark the a stream as 109 * 32-bit the first time we see a 32-bit compat ioctl request. 110 */ 111struct bpf_hdr32 { 112 struct timeval32 bh_tstamp; /* time stamp */ 113 uint32_t bh_caplen; /* length of captured portion */ 114 uint32_t bh_datalen; /* original length of packet */ 115 uint16_t bh_hdrlen; /* length of bpf header (this struct 116 plus alignment padding) */ 117}; 118#endif 119 120struct bpf_program32 { 121 u_int bf_len; 122 uint32_t bf_insns; 123}; 124 125struct bpf_dltlist32 { 126 u_int bfl_len; 127 u_int bfl_list; 128}; 129 130#define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 131#define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 132#define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 133#define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 134#define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 135#define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 136#endif 137 138/* 139 * bpf_iflist is a list of BPF interface structures, each corresponding to a 140 * specific DLT. The same network interface might have several BPF interface 141 * structures registered by different layers in the stack (i.e., 802.11 142 * frames, ethernet frames, etc). 143 */ 144static LIST_HEAD(, bpf_if) bpf_iflist; 145static struct mtx bpf_mtx; /* bpf global lock */ 146static int bpf_bpfd_cnt; 147 148static void bpf_attachd(struct bpf_d *, struct bpf_if *); 149static void bpf_detachd(struct bpf_d *); 150static void bpf_detachd_locked(struct bpf_d *); 151static void bpf_freed(struct bpf_d *); 152static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 153 struct sockaddr *, int *, struct bpf_insn *); 154static int bpf_setif(struct bpf_d *, struct ifreq *); 155static void bpf_timed_out(void *); 156static __inline void 157 bpf_wakeup(struct bpf_d *); 158static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 159 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 160 struct bintime *); 161static void reset_d(struct bpf_d *); 162static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 163static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 164static int bpf_setdlt(struct bpf_d *, u_int); 165static void filt_bpfdetach(struct knote *); 166static int filt_bpfread(struct knote *, long); 167static void bpf_drvinit(void *); 168static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 169 170SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 171int bpf_maxinsns = BPF_MAXINSNS; 172SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 173 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 174static int bpf_zerocopy_enable = 0; 175SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 176 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 177static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 178 bpf_stats_sysctl, "bpf statistics portal"); 179 180static VNET_DEFINE(int, bpf_optimize_writers) = 0; 181#define V_bpf_optimize_writers VNET(bpf_optimize_writers) 182SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers, 183 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0, 184 "Do not send packets until BPF program is set"); 185 186static d_open_t bpfopen; 187static d_read_t bpfread; 188static d_write_t bpfwrite; 189static d_ioctl_t bpfioctl; 190static d_poll_t bpfpoll; 191static d_kqfilter_t bpfkqfilter; 192 193static struct cdevsw bpf_cdevsw = { 194 .d_version = D_VERSION, 195 .d_open = bpfopen, 196 .d_read = bpfread, 197 .d_write = bpfwrite, 198 .d_ioctl = bpfioctl, 199 .d_poll = bpfpoll, 200 .d_name = "bpf", 201 .d_kqfilter = bpfkqfilter, 202}; 203 204static struct filterops bpfread_filtops = { 205 .f_isfd = 1, 206 .f_detach = filt_bpfdetach, 207 .f_event = filt_bpfread, 208}; 209 210eventhandler_tag bpf_ifdetach_cookie = NULL; 211 212/* 213 * LOCKING MODEL USED BY BPF: 214 * Locks: 215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal, 216 * some global counters and every bpf_if reference. 217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters. 218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields 219 * used by bpf_mtap code. 220 * 221 * Lock order: 222 * 223 * Global lock, interface lock, descriptor lock 224 * 225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2] 226 * working model. In many places (like bpf_detachd) we start with BPF descriptor 227 * (and we need to at least rlock it to get reliable interface pointer). This 228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if 229 * change in every such place. 230 * 231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and 232 * 3) descriptor main wlock. 233 * Reading bd_bif can be protected by any of these locks, typically global lock. 234 * 235 * Changing read/write BPF filter is protected by the same three locks, 236 * the same applies for reading. 237 * 238 * Sleeping in global lock is not allowed due to bpfdetach() using it. 239 */ 240 241/* 242 * Wrapper functions for various buffering methods. If the set of buffer 243 * modes expands, we will probably want to introduce a switch data structure 244 * similar to protosw, et. 245 */ 246static void 247bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 248 u_int len) 249{ 250 251 BPFD_LOCK_ASSERT(d); 252 253 switch (d->bd_bufmode) { 254 case BPF_BUFMODE_BUFFER: 255 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 256 257 case BPF_BUFMODE_ZBUF: 258 d->bd_zcopy++; 259 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 260 261 default: 262 panic("bpf_buf_append_bytes"); 263 } 264} 265 266static void 267bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 268 u_int len) 269{ 270 271 BPFD_LOCK_ASSERT(d); 272 273 switch (d->bd_bufmode) { 274 case BPF_BUFMODE_BUFFER: 275 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 276 277 case BPF_BUFMODE_ZBUF: 278 d->bd_zcopy++; 279 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 280 281 default: 282 panic("bpf_buf_append_mbuf"); 283 } 284} 285 286/* 287 * This function gets called when the free buffer is re-assigned. 288 */ 289static void 290bpf_buf_reclaimed(struct bpf_d *d) 291{ 292 293 BPFD_LOCK_ASSERT(d); 294 295 switch (d->bd_bufmode) { 296 case BPF_BUFMODE_BUFFER: 297 return; 298 299 case BPF_BUFMODE_ZBUF: 300 bpf_zerocopy_buf_reclaimed(d); 301 return; 302 303 default: 304 panic("bpf_buf_reclaimed"); 305 } 306} 307 308/* 309 * If the buffer mechanism has a way to decide that a held buffer can be made 310 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 311 * returned if the buffer can be discarded, (0) is returned if it cannot. 312 */ 313static int 314bpf_canfreebuf(struct bpf_d *d) 315{ 316 317 BPFD_LOCK_ASSERT(d); 318 319 switch (d->bd_bufmode) { 320 case BPF_BUFMODE_ZBUF: 321 return (bpf_zerocopy_canfreebuf(d)); 322 } 323 return (0); 324} 325 326/* 327 * Allow the buffer model to indicate that the current store buffer is 328 * immutable, regardless of the appearance of space. Return (1) if the 329 * buffer is writable, and (0) if not. 330 */ 331static int 332bpf_canwritebuf(struct bpf_d *d) 333{ 334 BPFD_LOCK_ASSERT(d); 335 336 switch (d->bd_bufmode) { 337 case BPF_BUFMODE_ZBUF: 338 return (bpf_zerocopy_canwritebuf(d)); 339 } 340 return (1); 341} 342 343/* 344 * Notify buffer model that an attempt to write to the store buffer has 345 * resulted in a dropped packet, in which case the buffer may be considered 346 * full. 347 */ 348static void 349bpf_buffull(struct bpf_d *d) 350{ 351 352 BPFD_LOCK_ASSERT(d); 353 354 switch (d->bd_bufmode) { 355 case BPF_BUFMODE_ZBUF: 356 bpf_zerocopy_buffull(d); 357 break; 358 } 359} 360 361/* 362 * Notify the buffer model that a buffer has moved into the hold position. 363 */ 364void 365bpf_bufheld(struct bpf_d *d) 366{ 367 368 BPFD_LOCK_ASSERT(d); 369 370 switch (d->bd_bufmode) { 371 case BPF_BUFMODE_ZBUF: 372 bpf_zerocopy_bufheld(d); 373 break; 374 } 375} 376 377static void 378bpf_free(struct bpf_d *d) 379{ 380 381 switch (d->bd_bufmode) { 382 case BPF_BUFMODE_BUFFER: 383 return (bpf_buffer_free(d)); 384 385 case BPF_BUFMODE_ZBUF: 386 return (bpf_zerocopy_free(d)); 387 388 default: 389 panic("bpf_buf_free"); 390 } 391} 392 393static int 394bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 395{ 396 397 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 398 return (EOPNOTSUPP); 399 return (bpf_buffer_uiomove(d, buf, len, uio)); 400} 401 402static int 403bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 404{ 405 406 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 407 return (EOPNOTSUPP); 408 return (bpf_buffer_ioctl_sblen(d, i)); 409} 410 411static int 412bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 413{ 414 415 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 416 return (EOPNOTSUPP); 417 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 418} 419 420static int 421bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 422{ 423 424 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 425 return (EOPNOTSUPP); 426 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 427} 428 429static int 430bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 431{ 432 433 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 434 return (EOPNOTSUPP); 435 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 436} 437 438/* 439 * General BPF functions. 440 */ 441static int 442bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 443 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 444{ 445 const struct ieee80211_bpf_params *p; 446 struct ether_header *eh; 447 struct mbuf *m; 448 int error; 449 int len; 450 int hlen; 451 int slen; 452 453 /* 454 * Build a sockaddr based on the data link layer type. 455 * We do this at this level because the ethernet header 456 * is copied directly into the data field of the sockaddr. 457 * In the case of SLIP, there is no header and the packet 458 * is forwarded as is. 459 * Also, we are careful to leave room at the front of the mbuf 460 * for the link level header. 461 */ 462 switch (linktype) { 463 464 case DLT_SLIP: 465 sockp->sa_family = AF_INET; 466 hlen = 0; 467 break; 468 469 case DLT_EN10MB: 470 sockp->sa_family = AF_UNSPEC; 471 /* XXX Would MAXLINKHDR be better? */ 472 hlen = ETHER_HDR_LEN; 473 break; 474 475 case DLT_FDDI: 476 sockp->sa_family = AF_IMPLINK; 477 hlen = 0; 478 break; 479 480 case DLT_RAW: 481 sockp->sa_family = AF_UNSPEC; 482 hlen = 0; 483 break; 484 485 case DLT_NULL: 486 /* 487 * null interface types require a 4 byte pseudo header which 488 * corresponds to the address family of the packet. 489 */ 490 sockp->sa_family = AF_UNSPEC; 491 hlen = 4; 492 break; 493 494 case DLT_ATM_RFC1483: 495 /* 496 * en atm driver requires 4-byte atm pseudo header. 497 * though it isn't standard, vpi:vci needs to be 498 * specified anyway. 499 */ 500 sockp->sa_family = AF_UNSPEC; 501 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 502 break; 503 504 case DLT_PPP: 505 sockp->sa_family = AF_UNSPEC; 506 hlen = 4; /* This should match PPP_HDRLEN */ 507 break; 508 509 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 510 sockp->sa_family = AF_IEEE80211; 511 hlen = 0; 512 break; 513 514 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 515 sockp->sa_family = AF_IEEE80211; 516 sockp->sa_len = 12; /* XXX != 0 */ 517 hlen = sizeof(struct ieee80211_bpf_params); 518 break; 519 520 default: 521 return (EIO); 522 } 523 524 len = uio->uio_resid; 525 526 if (len - hlen > ifp->if_mtu) 527 return (EMSGSIZE); 528 529 if ((unsigned)len > MJUM16BYTES) 530 return (EIO); 531 532 if (len <= MHLEN) 533 MGETHDR(m, M_WAIT, MT_DATA); 534 else if (len <= MCLBYTES) 535 m = m_getcl(M_WAIT, MT_DATA, M_PKTHDR); 536 else 537 m = m_getjcl(M_WAIT, MT_DATA, M_PKTHDR, 538#if (MJUMPAGESIZE > MCLBYTES) 539 len <= MJUMPAGESIZE ? MJUMPAGESIZE : 540#endif 541 (len <= MJUM9BYTES ? MJUM9BYTES : MJUM16BYTES)); 542 m->m_pkthdr.len = m->m_len = len; 543 m->m_pkthdr.rcvif = NULL; 544 *mp = m; 545 546 if (m->m_len < hlen) { 547 error = EPERM; 548 goto bad; 549 } 550 551 error = uiomove(mtod(m, u_char *), len, uio); 552 if (error) 553 goto bad; 554 555 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 556 if (slen == 0) { 557 error = EPERM; 558 goto bad; 559 } 560 561 /* Check for multicast destination */ 562 switch (linktype) { 563 case DLT_EN10MB: 564 eh = mtod(m, struct ether_header *); 565 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 566 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 567 ETHER_ADDR_LEN) == 0) 568 m->m_flags |= M_BCAST; 569 else 570 m->m_flags |= M_MCAST; 571 } 572 break; 573 } 574 575 /* 576 * Make room for link header, and copy it to sockaddr 577 */ 578 if (hlen != 0) { 579 if (sockp->sa_family == AF_IEEE80211) { 580 /* 581 * Collect true length from the parameter header 582 * NB: sockp is known to be zero'd so if we do a 583 * short copy unspecified parameters will be 584 * zero. 585 * NB: packet may not be aligned after stripping 586 * bpf params 587 * XXX check ibp_vers 588 */ 589 p = mtod(m, const struct ieee80211_bpf_params *); 590 hlen = p->ibp_len; 591 if (hlen > sizeof(sockp->sa_data)) { 592 error = EINVAL; 593 goto bad; 594 } 595 } 596 bcopy(m->m_data, sockp->sa_data, hlen); 597 } 598 *hdrlen = hlen; 599 600 return (0); 601bad: 602 m_freem(m); 603 return (error); 604} 605 606/* 607 * Attach file to the bpf interface, i.e. make d listen on bp. 608 */ 609static void 610bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 611{ 612 int op_w; 613 614 BPF_LOCK_ASSERT(); 615 616 /* 617 * Save sysctl value to protect from sysctl change 618 * between reads 619 */ 620 op_w = V_bpf_optimize_writers; 621 622 if (d->bd_bif != NULL) 623 bpf_detachd_locked(d); 624 /* 625 * Point d at bp, and add d to the interface's list. 626 * Since there are many applicaiotns using BPF for 627 * sending raw packets only (dhcpd, cdpd are good examples) 628 * we can delay adding d to the list of active listeners until 629 * some filter is configured. 630 */ 631 632 BPFIF_WLOCK(bp); 633 BPFD_LOCK(d); 634 635 d->bd_bif = bp; 636 637 if (op_w != 0) { 638 /* Add to writers-only list */ 639 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 640 /* 641 * We decrement bd_writer on every filter set operation. 642 * First BIOCSETF is done by pcap_open_live() to set up 643 * snap length. After that appliation usually sets its own filter 644 */ 645 d->bd_writer = 2; 646 } else 647 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 648 649 BPFD_UNLOCK(d); 650 BPFIF_WUNLOCK(bp); 651 652 bpf_bpfd_cnt++; 653 654 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 655 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 656 657 if (op_w == 0) 658 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 659} 660 661/* 662 * Add d to the list of active bp filters. 663 * Reuqires bpf_attachd() to be called before 664 */ 665static void 666bpf_upgraded(struct bpf_d *d) 667{ 668 struct bpf_if *bp; 669 670 BPF_LOCK_ASSERT(); 671 672 bp = d->bd_bif; 673 674 /* 675 * Filter can be set several times without specifying interface. 676 * Mark d as reader and exit. 677 */ 678 if (bp == NULL) { 679 BPFD_LOCK(d); 680 d->bd_writer = 0; 681 BPFD_UNLOCK(d); 682 return; 683 } 684 685 BPFIF_WLOCK(bp); 686 BPFD_LOCK(d); 687 688 /* Remove from writers-only list */ 689 LIST_REMOVE(d, bd_next); 690 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 691 /* Mark d as reader */ 692 d->bd_writer = 0; 693 694 BPFD_UNLOCK(d); 695 BPFIF_WUNLOCK(bp); 696 697 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid); 698 699 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 700} 701 702/* 703 * Detach a file from its interface. 704 */ 705static void 706bpf_detachd(struct bpf_d *d) 707{ 708 BPF_LOCK(); 709 bpf_detachd_locked(d); 710 BPF_UNLOCK(); 711} 712 713static void 714bpf_detachd_locked(struct bpf_d *d) 715{ 716 int error; 717 struct bpf_if *bp; 718 struct ifnet *ifp; 719 720 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 721 722 BPF_LOCK_ASSERT(); 723 724 /* Check if descriptor is attached */ 725 if ((bp = d->bd_bif) == NULL) 726 return; 727 728 BPFIF_WLOCK(bp); 729 BPFD_LOCK(d); 730 731 /* Save bd_writer value */ 732 error = d->bd_writer; 733 734 /* 735 * Remove d from the interface's descriptor list. 736 */ 737 LIST_REMOVE(d, bd_next); 738 739 ifp = bp->bif_ifp; 740 d->bd_bif = NULL; 741 BPFD_UNLOCK(d); 742 BPFIF_WUNLOCK(bp); 743 744 bpf_bpfd_cnt--; 745 746 /* Call event handler iff d is attached */ 747 if (error == 0) 748 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 749 750 /* 751 * Check if this descriptor had requested promiscuous mode. 752 * If so, turn it off. 753 */ 754 if (d->bd_promisc) { 755 d->bd_promisc = 0; 756 CURVNET_SET(ifp->if_vnet); 757 error = ifpromisc(ifp, 0); 758 CURVNET_RESTORE(); 759 if (error != 0 && error != ENXIO) { 760 /* 761 * ENXIO can happen if a pccard is unplugged 762 * Something is really wrong if we were able to put 763 * the driver into promiscuous mode, but can't 764 * take it out. 765 */ 766 if_printf(bp->bif_ifp, 767 "bpf_detach: ifpromisc failed (%d)\n", error); 768 } 769 } 770} 771 772/* 773 * Close the descriptor by detaching it from its interface, 774 * deallocating its buffers, and marking it free. 775 */ 776static void 777bpf_dtor(void *data) 778{ 779 struct bpf_d *d = data; 780 781 BPFD_LOCK(d); 782 if (d->bd_state == BPF_WAITING) 783 callout_stop(&d->bd_callout); 784 d->bd_state = BPF_IDLE; 785 BPFD_UNLOCK(d); 786 funsetown(&d->bd_sigio); 787 bpf_detachd(d); 788#ifdef MAC 789 mac_bpfdesc_destroy(d); 790#endif /* MAC */ 791 seldrain(&d->bd_sel); 792 knlist_destroy(&d->bd_sel.si_note); 793 callout_drain(&d->bd_callout); 794 bpf_freed(d); 795 free(d, M_BPF); 796} 797 798/* 799 * Open ethernet device. Returns ENXIO for illegal minor device number, 800 * EBUSY if file is open by another process. 801 */ 802/* ARGSUSED */ 803static int 804bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 805{ 806 struct bpf_d *d; 807 int error; 808 809 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 810 error = devfs_set_cdevpriv(d, bpf_dtor); 811 if (error != 0) { 812 free(d, M_BPF); 813 return (error); 814 } 815 816 /* 817 * For historical reasons, perform a one-time initialization call to 818 * the buffer routines, even though we're not yet committed to a 819 * particular buffer method. 820 */ 821 bpf_buffer_init(d); 822 d->bd_bufmode = BPF_BUFMODE_BUFFER; 823 d->bd_sig = SIGIO; 824 d->bd_direction = BPF_D_INOUT; 825 BPF_PID_REFRESH(d, td); 826#ifdef MAC 827 mac_bpfdesc_init(d); 828 mac_bpfdesc_create(td->td_ucred, d); 829#endif 830 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); 831 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); 832 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); 833 834 return (0); 835} 836 837/* 838 * bpfread - read next chunk of packets from buffers 839 */ 840static int 841bpfread(struct cdev *dev, struct uio *uio, int ioflag) 842{ 843 struct bpf_d *d; 844 int error; 845 int non_block; 846 int timed_out; 847 848 error = devfs_get_cdevpriv((void **)&d); 849 if (error != 0) 850 return (error); 851 852 /* 853 * Restrict application to use a buffer the same size as 854 * as kernel buffers. 855 */ 856 if (uio->uio_resid != d->bd_bufsize) 857 return (EINVAL); 858 859 non_block = ((ioflag & O_NONBLOCK) != 0); 860 861 BPFD_LOCK(d); 862 BPF_PID_REFRESH_CUR(d); 863 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 864 BPFD_UNLOCK(d); 865 return (EOPNOTSUPP); 866 } 867 if (d->bd_state == BPF_WAITING) 868 callout_stop(&d->bd_callout); 869 timed_out = (d->bd_state == BPF_TIMED_OUT); 870 d->bd_state = BPF_IDLE; 871 /* 872 * If the hold buffer is empty, then do a timed sleep, which 873 * ends when the timeout expires or when enough packets 874 * have arrived to fill the store buffer. 875 */ 876 while (d->bd_hbuf == NULL) { 877 if (d->bd_slen != 0) { 878 /* 879 * A packet(s) either arrived since the previous 880 * read or arrived while we were asleep. 881 */ 882 if (d->bd_immediate || non_block || timed_out) { 883 /* 884 * Rotate the buffers and return what's here 885 * if we are in immediate mode, non-blocking 886 * flag is set, or this descriptor timed out. 887 */ 888 ROTATE_BUFFERS(d); 889 break; 890 } 891 } 892 893 /* 894 * No data is available, check to see if the bpf device 895 * is still pointed at a real interface. If not, return 896 * ENXIO so that the userland process knows to rebind 897 * it before using it again. 898 */ 899 if (d->bd_bif == NULL) { 900 BPFD_UNLOCK(d); 901 return (ENXIO); 902 } 903 904 if (non_block) { 905 BPFD_UNLOCK(d); 906 return (EWOULDBLOCK); 907 } 908 error = msleep(d, &d->bd_lock, PRINET|PCATCH, 909 "bpf", d->bd_rtout); 910 if (error == EINTR || error == ERESTART) { 911 BPFD_UNLOCK(d); 912 return (error); 913 } 914 if (error == EWOULDBLOCK) { 915 /* 916 * On a timeout, return what's in the buffer, 917 * which may be nothing. If there is something 918 * in the store buffer, we can rotate the buffers. 919 */ 920 if (d->bd_hbuf) 921 /* 922 * We filled up the buffer in between 923 * getting the timeout and arriving 924 * here, so we don't need to rotate. 925 */ 926 break; 927 928 if (d->bd_slen == 0) { 929 BPFD_UNLOCK(d); 930 return (0); 931 } 932 ROTATE_BUFFERS(d); 933 break; 934 } 935 } 936 /* 937 * At this point, we know we have something in the hold slot. 938 */ 939 BPFD_UNLOCK(d); 940 941 /* 942 * Move data from hold buffer into user space. 943 * We know the entire buffer is transferred since 944 * we checked above that the read buffer is bpf_bufsize bytes. 945 * 946 * XXXRW: More synchronization needed here: what if a second thread 947 * issues a read on the same fd at the same time? Don't want this 948 * getting invalidated. 949 */ 950 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 951 952 BPFD_LOCK(d); 953 d->bd_fbuf = d->bd_hbuf; 954 d->bd_hbuf = NULL; 955 d->bd_hlen = 0; 956 bpf_buf_reclaimed(d); 957 BPFD_UNLOCK(d); 958 959 return (error); 960} 961 962/* 963 * If there are processes sleeping on this descriptor, wake them up. 964 */ 965static __inline void 966bpf_wakeup(struct bpf_d *d) 967{ 968 969 BPFD_LOCK_ASSERT(d); 970 if (d->bd_state == BPF_WAITING) { 971 callout_stop(&d->bd_callout); 972 d->bd_state = BPF_IDLE; 973 } 974 wakeup(d); 975 if (d->bd_async && d->bd_sig && d->bd_sigio) 976 pgsigio(&d->bd_sigio, d->bd_sig, 0); 977 978 selwakeuppri(&d->bd_sel, PRINET); 979 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 980} 981 982static void 983bpf_timed_out(void *arg) 984{ 985 struct bpf_d *d = (struct bpf_d *)arg; 986 987 BPFD_LOCK_ASSERT(d); 988 989 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout)) 990 return; 991 if (d->bd_state == BPF_WAITING) { 992 d->bd_state = BPF_TIMED_OUT; 993 if (d->bd_slen != 0) 994 bpf_wakeup(d); 995 } 996} 997 998static int 999bpf_ready(struct bpf_d *d) 1000{ 1001 1002 BPFD_LOCK_ASSERT(d); 1003 1004 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1005 return (1); 1006 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1007 d->bd_slen != 0) 1008 return (1); 1009 return (0); 1010} 1011 1012static int 1013bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1014{ 1015 struct bpf_d *d; 1016 struct ifnet *ifp; 1017 struct mbuf *m, *mc; 1018 struct sockaddr dst; 1019 int error, hlen; 1020 1021 error = devfs_get_cdevpriv((void **)&d); 1022 if (error != 0) 1023 return (error); 1024 1025 BPF_PID_REFRESH_CUR(d); 1026 d->bd_wcount++; 1027 /* XXX: locking required */ 1028 if (d->bd_bif == NULL) { 1029 d->bd_wdcount++; 1030 return (ENXIO); 1031 } 1032 1033 ifp = d->bd_bif->bif_ifp; 1034 1035 if ((ifp->if_flags & IFF_UP) == 0) { 1036 d->bd_wdcount++; 1037 return (ENETDOWN); 1038 } 1039 1040 if (uio->uio_resid == 0) { 1041 d->bd_wdcount++; 1042 return (0); 1043 } 1044 1045 bzero(&dst, sizeof(dst)); 1046 m = NULL; 1047 hlen = 0; 1048 /* XXX: bpf_movein() can sleep */ 1049 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 1050 &m, &dst, &hlen, d->bd_wfilter); 1051 if (error) { 1052 d->bd_wdcount++; 1053 return (error); 1054 } 1055 d->bd_wfcount++; 1056 if (d->bd_hdrcmplt) 1057 dst.sa_family = pseudo_AF_HDRCMPLT; 1058 1059 if (d->bd_feedback) { 1060 mc = m_dup(m, M_DONTWAIT); 1061 if (mc != NULL) 1062 mc->m_pkthdr.rcvif = ifp; 1063 /* Set M_PROMISC for outgoing packets to be discarded. */ 1064 if (d->bd_direction == BPF_D_INOUT) 1065 m->m_flags |= M_PROMISC; 1066 } else 1067 mc = NULL; 1068 1069 m->m_pkthdr.len -= hlen; 1070 m->m_len -= hlen; 1071 m->m_data += hlen; /* XXX */ 1072 1073 CURVNET_SET(ifp->if_vnet); 1074#ifdef MAC 1075 BPFD_LOCK(d); 1076 mac_bpfdesc_create_mbuf(d, m); 1077 if (mc != NULL) 1078 mac_bpfdesc_create_mbuf(d, mc); 1079 BPFD_UNLOCK(d); 1080#endif 1081 1082 error = (*ifp->if_output)(ifp, m, &dst, NULL); 1083 if (error) 1084 d->bd_wdcount++; 1085 1086 if (mc != NULL) { 1087 if (error == 0) 1088 (*ifp->if_input)(ifp, mc); 1089 else 1090 m_freem(mc); 1091 } 1092 CURVNET_RESTORE(); 1093 1094 return (error); 1095} 1096 1097/* 1098 * Reset a descriptor by flushing its packet buffer and clearing the receive 1099 * and drop counts. This is doable for kernel-only buffers, but with 1100 * zero-copy buffers, we can't write to (or rotate) buffers that are 1101 * currently owned by userspace. It would be nice if we could encapsulate 1102 * this logic in the buffer code rather than here. 1103 */ 1104static void 1105reset_d(struct bpf_d *d) 1106{ 1107 1108 BPFD_LOCK_ASSERT(d); 1109 1110 if ((d->bd_hbuf != NULL) && 1111 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1112 /* Free the hold buffer. */ 1113 d->bd_fbuf = d->bd_hbuf; 1114 d->bd_hbuf = NULL; 1115 d->bd_hlen = 0; 1116 bpf_buf_reclaimed(d); 1117 } 1118 if (bpf_canwritebuf(d)) 1119 d->bd_slen = 0; 1120 d->bd_rcount = 0; 1121 d->bd_dcount = 0; 1122 d->bd_fcount = 0; 1123 d->bd_wcount = 0; 1124 d->bd_wfcount = 0; 1125 d->bd_wdcount = 0; 1126 d->bd_zcopy = 0; 1127} 1128 1129/* 1130 * FIONREAD Check for read packet available. 1131 * SIOCGIFADDR Get interface address - convenient hook to driver. 1132 * BIOCGBLEN Get buffer len [for read()]. 1133 * BIOCSETF Set read filter. 1134 * BIOCSETFNR Set read filter without resetting descriptor. 1135 * BIOCSETWF Set write filter. 1136 * BIOCFLUSH Flush read packet buffer. 1137 * BIOCPROMISC Put interface into promiscuous mode. 1138 * BIOCGDLT Get link layer type. 1139 * BIOCGETIF Get interface name. 1140 * BIOCSETIF Set interface. 1141 * BIOCSRTIMEOUT Set read timeout. 1142 * BIOCGRTIMEOUT Get read timeout. 1143 * BIOCGSTATS Get packet stats. 1144 * BIOCIMMEDIATE Set immediate mode. 1145 * BIOCVERSION Get filter language version. 1146 * BIOCGHDRCMPLT Get "header already complete" flag 1147 * BIOCSHDRCMPLT Set "header already complete" flag 1148 * BIOCGDIRECTION Get packet direction flag 1149 * BIOCSDIRECTION Set packet direction flag 1150 * BIOCGTSTAMP Get time stamp format and resolution. 1151 * BIOCSTSTAMP Set time stamp format and resolution. 1152 * BIOCLOCK Set "locked" flag 1153 * BIOCFEEDBACK Set packet feedback mode. 1154 * BIOCSETZBUF Set current zero-copy buffer locations. 1155 * BIOCGETZMAX Get maximum zero-copy buffer size. 1156 * BIOCROTZBUF Force rotation of zero-copy buffer 1157 * BIOCSETBUFMODE Set buffer mode. 1158 * BIOCGETBUFMODE Get current buffer mode. 1159 */ 1160/* ARGSUSED */ 1161static int 1162bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1163 struct thread *td) 1164{ 1165 struct bpf_d *d; 1166 int error; 1167 1168 error = devfs_get_cdevpriv((void **)&d); 1169 if (error != 0) 1170 return (error); 1171 1172 /* 1173 * Refresh PID associated with this descriptor. 1174 */ 1175 BPFD_LOCK(d); 1176 BPF_PID_REFRESH(d, td); 1177 if (d->bd_state == BPF_WAITING) 1178 callout_stop(&d->bd_callout); 1179 d->bd_state = BPF_IDLE; 1180 BPFD_UNLOCK(d); 1181 1182 if (d->bd_locked == 1) { 1183 switch (cmd) { 1184 case BIOCGBLEN: 1185 case BIOCFLUSH: 1186 case BIOCGDLT: 1187 case BIOCGDLTLIST: 1188#ifdef COMPAT_FREEBSD32 1189 case BIOCGDLTLIST32: 1190#endif 1191 case BIOCGETIF: 1192 case BIOCGRTIMEOUT: 1193#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1194 case BIOCGRTIMEOUT32: 1195#endif 1196 case BIOCGSTATS: 1197 case BIOCVERSION: 1198 case BIOCGRSIG: 1199 case BIOCGHDRCMPLT: 1200 case BIOCSTSTAMP: 1201 case BIOCFEEDBACK: 1202 case FIONREAD: 1203 case BIOCLOCK: 1204 case BIOCSRTIMEOUT: 1205#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1206 case BIOCSRTIMEOUT32: 1207#endif 1208 case BIOCIMMEDIATE: 1209 case TIOCGPGRP: 1210 case BIOCROTZBUF: 1211 break; 1212 default: 1213 return (EPERM); 1214 } 1215 } 1216#ifdef COMPAT_FREEBSD32 1217 /* 1218 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1219 * that it will get 32-bit packet headers. 1220 */ 1221 switch (cmd) { 1222 case BIOCSETF32: 1223 case BIOCSETFNR32: 1224 case BIOCSETWF32: 1225 case BIOCGDLTLIST32: 1226 case BIOCGRTIMEOUT32: 1227 case BIOCSRTIMEOUT32: 1228 d->bd_compat32 = 1; 1229 } 1230#endif 1231 1232 CURVNET_SET(TD_TO_VNET(td)); 1233 switch (cmd) { 1234 1235 default: 1236 error = EINVAL; 1237 break; 1238 1239 /* 1240 * Check for read packet available. 1241 */ 1242 case FIONREAD: 1243 { 1244 int n; 1245 1246 BPFD_LOCK(d); 1247 n = d->bd_slen; 1248 if (d->bd_hbuf) 1249 n += d->bd_hlen; 1250 BPFD_UNLOCK(d); 1251 1252 *(int *)addr = n; 1253 break; 1254 } 1255 1256 case SIOCGIFADDR: 1257 { 1258 struct ifnet *ifp; 1259 1260 if (d->bd_bif == NULL) 1261 error = EINVAL; 1262 else { 1263 ifp = d->bd_bif->bif_ifp; 1264 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1265 } 1266 break; 1267 } 1268 1269 /* 1270 * Get buffer len [for read()]. 1271 */ 1272 case BIOCGBLEN: 1273 *(u_int *)addr = d->bd_bufsize; 1274 break; 1275 1276 /* 1277 * Set buffer length. 1278 */ 1279 case BIOCSBLEN: 1280 error = bpf_ioctl_sblen(d, (u_int *)addr); 1281 break; 1282 1283 /* 1284 * Set link layer read filter. 1285 */ 1286 case BIOCSETF: 1287 case BIOCSETFNR: 1288 case BIOCSETWF: 1289#ifdef COMPAT_FREEBSD32 1290 case BIOCSETF32: 1291 case BIOCSETFNR32: 1292 case BIOCSETWF32: 1293#endif 1294 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1295 break; 1296 1297 /* 1298 * Flush read packet buffer. 1299 */ 1300 case BIOCFLUSH: 1301 BPFD_LOCK(d); 1302 reset_d(d); 1303 BPFD_UNLOCK(d); 1304 break; 1305 1306 /* 1307 * Put interface into promiscuous mode. 1308 */ 1309 case BIOCPROMISC: 1310 if (d->bd_bif == NULL) { 1311 /* 1312 * No interface attached yet. 1313 */ 1314 error = EINVAL; 1315 break; 1316 } 1317 if (d->bd_promisc == 0) { 1318 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1319 if (error == 0) 1320 d->bd_promisc = 1; 1321 } 1322 break; 1323 1324 /* 1325 * Get current data link type. 1326 */ 1327 case BIOCGDLT: 1328 if (d->bd_bif == NULL) 1329 error = EINVAL; 1330 else 1331 *(u_int *)addr = d->bd_bif->bif_dlt; 1332 break; 1333 1334 /* 1335 * Get a list of supported data link types. 1336 */ 1337#ifdef COMPAT_FREEBSD32 1338 case BIOCGDLTLIST32: 1339 { 1340 struct bpf_dltlist32 *list32; 1341 struct bpf_dltlist dltlist; 1342 1343 list32 = (struct bpf_dltlist32 *)addr; 1344 dltlist.bfl_len = list32->bfl_len; 1345 dltlist.bfl_list = PTRIN(list32->bfl_list); 1346 if (d->bd_bif == NULL) 1347 error = EINVAL; 1348 else { 1349 error = bpf_getdltlist(d, &dltlist); 1350 if (error == 0) 1351 list32->bfl_len = dltlist.bfl_len; 1352 } 1353 break; 1354 } 1355#endif 1356 1357 case BIOCGDLTLIST: 1358 if (d->bd_bif == NULL) 1359 error = EINVAL; 1360 else 1361 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1362 break; 1363 1364 /* 1365 * Set data link type. 1366 */ 1367 case BIOCSDLT: 1368 BPF_LOCK(); 1369 if (d->bd_bif == NULL) 1370 error = EINVAL; 1371 else 1372 error = bpf_setdlt(d, *(u_int *)addr); 1373 BPF_UNLOCK(); 1374 break; 1375 1376 /* 1377 * Get interface name. 1378 */ 1379 case BIOCGETIF: 1380 if (d->bd_bif == NULL) 1381 error = EINVAL; 1382 else { 1383 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1384 struct ifreq *const ifr = (struct ifreq *)addr; 1385 1386 strlcpy(ifr->ifr_name, ifp->if_xname, 1387 sizeof(ifr->ifr_name)); 1388 } 1389 break; 1390 1391 /* 1392 * Set interface. 1393 */ 1394 case BIOCSETIF: 1395 BPF_LOCK(); 1396 error = bpf_setif(d, (struct ifreq *)addr); 1397 BPF_UNLOCK(); 1398 break; 1399 1400 /* 1401 * Set read timeout. 1402 */ 1403 case BIOCSRTIMEOUT: 1404#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1405 case BIOCSRTIMEOUT32: 1406#endif 1407 { 1408 struct timeval *tv = (struct timeval *)addr; 1409#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1410 struct timeval32 *tv32; 1411 struct timeval tv64; 1412 1413 if (cmd == BIOCSRTIMEOUT32) { 1414 tv32 = (struct timeval32 *)addr; 1415 tv = &tv64; 1416 tv->tv_sec = tv32->tv_sec; 1417 tv->tv_usec = tv32->tv_usec; 1418 } else 1419#endif 1420 tv = (struct timeval *)addr; 1421 1422 /* 1423 * Subtract 1 tick from tvtohz() since this isn't 1424 * a one-shot timer. 1425 */ 1426 if ((error = itimerfix(tv)) == 0) 1427 d->bd_rtout = tvtohz(tv) - 1; 1428 break; 1429 } 1430 1431 /* 1432 * Get read timeout. 1433 */ 1434 case BIOCGRTIMEOUT: 1435#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1436 case BIOCGRTIMEOUT32: 1437#endif 1438 { 1439 struct timeval *tv; 1440#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1441 struct timeval32 *tv32; 1442 struct timeval tv64; 1443 1444 if (cmd == BIOCGRTIMEOUT32) 1445 tv = &tv64; 1446 else 1447#endif 1448 tv = (struct timeval *)addr; 1449 1450 tv->tv_sec = d->bd_rtout / hz; 1451 tv->tv_usec = (d->bd_rtout % hz) * tick; 1452#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1453 if (cmd == BIOCGRTIMEOUT32) { 1454 tv32 = (struct timeval32 *)addr; 1455 tv32->tv_sec = tv->tv_sec; 1456 tv32->tv_usec = tv->tv_usec; 1457 } 1458#endif 1459 1460 break; 1461 } 1462 1463 /* 1464 * Get packet stats. 1465 */ 1466 case BIOCGSTATS: 1467 { 1468 struct bpf_stat *bs = (struct bpf_stat *)addr; 1469 1470 /* XXXCSJP overflow */ 1471 bs->bs_recv = d->bd_rcount; 1472 bs->bs_drop = d->bd_dcount; 1473 break; 1474 } 1475 1476 /* 1477 * Set immediate mode. 1478 */ 1479 case BIOCIMMEDIATE: 1480 d->bd_immediate = *(u_int *)addr; 1481 break; 1482 1483 case BIOCVERSION: 1484 { 1485 struct bpf_version *bv = (struct bpf_version *)addr; 1486 1487 bv->bv_major = BPF_MAJOR_VERSION; 1488 bv->bv_minor = BPF_MINOR_VERSION; 1489 break; 1490 } 1491 1492 /* 1493 * Get "header already complete" flag 1494 */ 1495 case BIOCGHDRCMPLT: 1496 *(u_int *)addr = d->bd_hdrcmplt; 1497 break; 1498 1499 /* 1500 * Set "header already complete" flag 1501 */ 1502 case BIOCSHDRCMPLT: 1503 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1504 break; 1505 1506 /* 1507 * Get packet direction flag 1508 */ 1509 case BIOCGDIRECTION: 1510 *(u_int *)addr = d->bd_direction; 1511 break; 1512 1513 /* 1514 * Set packet direction flag 1515 */ 1516 case BIOCSDIRECTION: 1517 { 1518 u_int direction; 1519 1520 direction = *(u_int *)addr; 1521 switch (direction) { 1522 case BPF_D_IN: 1523 case BPF_D_INOUT: 1524 case BPF_D_OUT: 1525 d->bd_direction = direction; 1526 break; 1527 default: 1528 error = EINVAL; 1529 } 1530 } 1531 break; 1532 1533 /* 1534 * Get packet timestamp format and resolution. 1535 */ 1536 case BIOCGTSTAMP: 1537 *(u_int *)addr = d->bd_tstamp; 1538 break; 1539 1540 /* 1541 * Set packet timestamp format and resolution. 1542 */ 1543 case BIOCSTSTAMP: 1544 { 1545 u_int func; 1546 1547 func = *(u_int *)addr; 1548 if (BPF_T_VALID(func)) 1549 d->bd_tstamp = func; 1550 else 1551 error = EINVAL; 1552 } 1553 break; 1554 1555 case BIOCFEEDBACK: 1556 d->bd_feedback = *(u_int *)addr; 1557 break; 1558 1559 case BIOCLOCK: 1560 d->bd_locked = 1; 1561 break; 1562 1563 case FIONBIO: /* Non-blocking I/O */ 1564 break; 1565 1566 case FIOASYNC: /* Send signal on receive packets */ 1567 d->bd_async = *(int *)addr; 1568 break; 1569 1570 case FIOSETOWN: 1571 error = fsetown(*(int *)addr, &d->bd_sigio); 1572 break; 1573 1574 case FIOGETOWN: 1575 *(int *)addr = fgetown(&d->bd_sigio); 1576 break; 1577 1578 /* This is deprecated, FIOSETOWN should be used instead. */ 1579 case TIOCSPGRP: 1580 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1581 break; 1582 1583 /* This is deprecated, FIOGETOWN should be used instead. */ 1584 case TIOCGPGRP: 1585 *(int *)addr = -fgetown(&d->bd_sigio); 1586 break; 1587 1588 case BIOCSRSIG: /* Set receive signal */ 1589 { 1590 u_int sig; 1591 1592 sig = *(u_int *)addr; 1593 1594 if (sig >= NSIG) 1595 error = EINVAL; 1596 else 1597 d->bd_sig = sig; 1598 break; 1599 } 1600 case BIOCGRSIG: 1601 *(u_int *)addr = d->bd_sig; 1602 break; 1603 1604 case BIOCGETBUFMODE: 1605 *(u_int *)addr = d->bd_bufmode; 1606 break; 1607 1608 case BIOCSETBUFMODE: 1609 /* 1610 * Allow the buffering mode to be changed as long as we 1611 * haven't yet committed to a particular mode. Our 1612 * definition of commitment, for now, is whether or not a 1613 * buffer has been allocated or an interface attached, since 1614 * that's the point where things get tricky. 1615 */ 1616 switch (*(u_int *)addr) { 1617 case BPF_BUFMODE_BUFFER: 1618 break; 1619 1620 case BPF_BUFMODE_ZBUF: 1621 if (bpf_zerocopy_enable) 1622 break; 1623 /* FALLSTHROUGH */ 1624 1625 default: 1626 CURVNET_RESTORE(); 1627 return (EINVAL); 1628 } 1629 1630 BPFD_LOCK(d); 1631 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1632 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1633 BPFD_UNLOCK(d); 1634 CURVNET_RESTORE(); 1635 return (EBUSY); 1636 } 1637 d->bd_bufmode = *(u_int *)addr; 1638 BPFD_UNLOCK(d); 1639 break; 1640 1641 case BIOCGETZMAX: 1642 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1643 break; 1644 1645 case BIOCSETZBUF: 1646 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1647 break; 1648 1649 case BIOCROTZBUF: 1650 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1651 break; 1652 } 1653 CURVNET_RESTORE(); 1654 return (error); 1655} 1656 1657/* 1658 * Set d's packet filter program to fp. If this file already has a filter, 1659 * free it and replace it. Returns EINVAL for bogus requests. 1660 */ 1661static int 1662bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1663{ 1664 struct bpf_insn *fcode, *old; 1665 u_int wfilter, flen, size; 1666#ifdef BPF_JITTER 1667 bpf_jit_filter *ofunc; 1668#endif 1669 int need_upgrade; 1670#ifdef COMPAT_FREEBSD32 1671 struct bpf_program32 *fp32; 1672 struct bpf_program fp_swab; 1673 1674 if (cmd == BIOCSETWF32 || cmd == BIOCSETF32 || cmd == BIOCSETFNR32) { 1675 fp32 = (struct bpf_program32 *)fp; 1676 fp_swab.bf_len = fp32->bf_len; 1677 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1678 fp = &fp_swab; 1679 if (cmd == BIOCSETWF32) 1680 cmd = BIOCSETWF; 1681 } 1682#endif 1683 /* 1684 * Check new filter validness before acquiring any locks. 1685 * Allocate memory for new filter, if needed. 1686 */ 1687 flen = fp->bf_len; 1688 if ((flen > bpf_maxinsns) || ((fp->bf_insns == NULL) && (flen != 0))) 1689 return (EINVAL); 1690 1691 need_upgrade = 0; 1692 size = flen * sizeof(*fp->bf_insns); 1693 if (size > 0) 1694 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1695 else 1696 fcode = NULL; /* Make compiler happy */ 1697 1698 BPF_LOCK(); 1699 1700 if (cmd == BIOCSETWF) { 1701 old = d->bd_wfilter; 1702 wfilter = 1; 1703#ifdef BPF_JITTER 1704 ofunc = NULL; 1705#endif 1706 } else { 1707 wfilter = 0; 1708 old = d->bd_rfilter; 1709#ifdef BPF_JITTER 1710 ofunc = d->bd_bfilter; 1711#endif 1712 } 1713 if (fp->bf_insns == NULL) { 1714 /* 1715 * Protect filter removal by interface lock. 1716 * Additionally, we are protected by global lock here. 1717 */ 1718 if (d->bd_bif != NULL) 1719 BPFIF_WLOCK(d->bd_bif); 1720 BPFD_LOCK(d); 1721 if (wfilter) 1722 d->bd_wfilter = NULL; 1723 else { 1724 d->bd_rfilter = NULL; 1725#ifdef BPF_JITTER 1726 d->bd_bfilter = NULL; 1727#endif 1728 if (cmd == BIOCSETF) 1729 reset_d(d); 1730 } 1731 BPFD_UNLOCK(d); 1732 if (d->bd_bif != NULL) 1733 BPFIF_WUNLOCK(d->bd_bif); 1734 if (old != NULL) 1735 free((caddr_t)old, M_BPF); 1736#ifdef BPF_JITTER 1737 if (ofunc != NULL) 1738 bpf_destroy_jit_filter(ofunc); 1739#endif 1740 BPF_UNLOCK(); 1741 return (0); 1742 } 1743 1744 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1745 bpf_validate(fcode, (int)flen)) { 1746 /* 1747 * Protect filter change by interface lock 1748 * Additionally, we are protected by global lock here. 1749 */ 1750 if (d->bd_bif != NULL) 1751 BPFIF_WLOCK(d->bd_bif); 1752 BPFD_LOCK(d); 1753 if (wfilter) 1754 d->bd_wfilter = fcode; 1755 else { 1756 d->bd_rfilter = fcode; 1757#ifdef BPF_JITTER 1758 d->bd_bfilter = bpf_jitter(fcode, flen); 1759#endif 1760 if (cmd == BIOCSETF) 1761 reset_d(d); 1762 1763 /* 1764 * Do not require upgrade by first BIOCSETF 1765 * (used to set snaplen) by pcap_open_live() 1766 */ 1767 if ((d->bd_writer != 0) && (--d->bd_writer == 0)) 1768 need_upgrade = 1; 1769 CTR4(KTR_NET, "%s: filter function set by pid %d, " 1770 "bd_writer counter %d, need_upgrade %d", 1771 __func__, d->bd_pid, d->bd_writer, need_upgrade); 1772 } 1773 BPFD_UNLOCK(d); 1774 if (d->bd_bif != NULL) 1775 BPFIF_WUNLOCK(d->bd_bif); 1776 if (old != NULL) 1777 free((caddr_t)old, M_BPF); 1778#ifdef BPF_JITTER 1779 if (ofunc != NULL) 1780 bpf_destroy_jit_filter(ofunc); 1781#endif 1782 1783 /* Move d to active readers list */ 1784 if (need_upgrade != 0) 1785 bpf_upgraded(d); 1786 1787 BPF_UNLOCK(); 1788 return (0); 1789 } 1790 free((caddr_t)fcode, M_BPF); 1791 BPF_UNLOCK(); 1792 return (EINVAL); 1793} 1794 1795/* 1796 * Detach a file from its current interface (if attached at all) and attach 1797 * to the interface indicated by the name stored in ifr. 1798 * Return an errno or 0. 1799 */ 1800static int 1801bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1802{ 1803 struct bpf_if *bp; 1804 struct ifnet *theywant; 1805 1806 BPF_LOCK_ASSERT(); 1807 1808 theywant = ifunit(ifr->ifr_name); 1809 if (theywant == NULL || theywant->if_bpf == NULL) 1810 return (ENXIO); 1811 1812 bp = theywant->if_bpf; 1813 1814 /* Check if interface is not being detached from BPF */ 1815 BPFIF_RLOCK(bp); 1816 if (bp->flags & BPFIF_FLAG_DYING) { 1817 BPFIF_RUNLOCK(bp); 1818 return (ENXIO); 1819 } 1820 BPFIF_RUNLOCK(bp); 1821 1822 /* 1823 * Behavior here depends on the buffering model. If we're using 1824 * kernel memory buffers, then we can allocate them here. If we're 1825 * using zero-copy, then the user process must have registered 1826 * buffers by the time we get here. If not, return an error. 1827 */ 1828 switch (d->bd_bufmode) { 1829 case BPF_BUFMODE_BUFFER: 1830 if (d->bd_sbuf == NULL) 1831 bpf_buffer_alloc(d); 1832 KASSERT(d->bd_sbuf != NULL, ("bpf_setif: bd_sbuf NULL")); 1833 break; 1834 1835 case BPF_BUFMODE_ZBUF: 1836 if (d->bd_sbuf == NULL) 1837 return (EINVAL); 1838 break; 1839 1840 default: 1841 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1842 } 1843 if (bp != d->bd_bif) 1844 bpf_attachd(d, bp); 1845 BPFD_LOCK(d); 1846 reset_d(d); 1847 BPFD_UNLOCK(d); 1848 return (0); 1849} 1850 1851/* 1852 * Support for select() and poll() system calls 1853 * 1854 * Return true iff the specific operation will not block indefinitely. 1855 * Otherwise, return false but make a note that a selwakeup() must be done. 1856 */ 1857static int 1858bpfpoll(struct cdev *dev, int events, struct thread *td) 1859{ 1860 struct bpf_d *d; 1861 int revents; 1862 1863 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1864 return (events & 1865 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1866 1867 /* 1868 * Refresh PID associated with this descriptor. 1869 */ 1870 revents = events & (POLLOUT | POLLWRNORM); 1871 BPFD_LOCK(d); 1872 BPF_PID_REFRESH(d, td); 1873 if (events & (POLLIN | POLLRDNORM)) { 1874 if (bpf_ready(d)) 1875 revents |= events & (POLLIN | POLLRDNORM); 1876 else { 1877 selrecord(td, &d->bd_sel); 1878 /* Start the read timeout if necessary. */ 1879 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1880 callout_reset(&d->bd_callout, d->bd_rtout, 1881 bpf_timed_out, d); 1882 d->bd_state = BPF_WAITING; 1883 } 1884 } 1885 } 1886 BPFD_UNLOCK(d); 1887 return (revents); 1888} 1889 1890/* 1891 * Support for kevent() system call. Register EVFILT_READ filters and 1892 * reject all others. 1893 */ 1894int 1895bpfkqfilter(struct cdev *dev, struct knote *kn) 1896{ 1897 struct bpf_d *d; 1898 1899 if (devfs_get_cdevpriv((void **)&d) != 0 || 1900 kn->kn_filter != EVFILT_READ) 1901 return (1); 1902 1903 /* 1904 * Refresh PID associated with this descriptor. 1905 */ 1906 BPFD_LOCK(d); 1907 BPF_PID_REFRESH_CUR(d); 1908 kn->kn_fop = &bpfread_filtops; 1909 kn->kn_hook = d; 1910 knlist_add(&d->bd_sel.si_note, kn, 1); 1911 BPFD_UNLOCK(d); 1912 1913 return (0); 1914} 1915 1916static void 1917filt_bpfdetach(struct knote *kn) 1918{ 1919 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1920 1921 knlist_remove(&d->bd_sel.si_note, kn, 0); 1922} 1923 1924static int 1925filt_bpfread(struct knote *kn, long hint) 1926{ 1927 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1928 int ready; 1929 1930 BPFD_LOCK_ASSERT(d); 1931 ready = bpf_ready(d); 1932 if (ready) { 1933 kn->kn_data = d->bd_slen; 1934 if (d->bd_hbuf) 1935 kn->kn_data += d->bd_hlen; 1936 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1937 callout_reset(&d->bd_callout, d->bd_rtout, 1938 bpf_timed_out, d); 1939 d->bd_state = BPF_WAITING; 1940 } 1941 1942 return (ready); 1943} 1944 1945#define BPF_TSTAMP_NONE 0 1946#define BPF_TSTAMP_FAST 1 1947#define BPF_TSTAMP_NORMAL 2 1948#define BPF_TSTAMP_EXTERN 3 1949 1950static int 1951bpf_ts_quality(int tstype) 1952{ 1953 1954 if (tstype == BPF_T_NONE) 1955 return (BPF_TSTAMP_NONE); 1956 if ((tstype & BPF_T_FAST) != 0) 1957 return (BPF_TSTAMP_FAST); 1958 1959 return (BPF_TSTAMP_NORMAL); 1960} 1961 1962static int 1963bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 1964{ 1965 struct m_tag *tag; 1966 int quality; 1967 1968 quality = bpf_ts_quality(tstype); 1969 if (quality == BPF_TSTAMP_NONE) 1970 return (quality); 1971 1972 if (m != NULL) { 1973 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 1974 if (tag != NULL) { 1975 *bt = *(struct bintime *)(tag + 1); 1976 return (BPF_TSTAMP_EXTERN); 1977 } 1978 } 1979 if (quality == BPF_TSTAMP_NORMAL) 1980 binuptime(bt); 1981 else 1982 getbinuptime(bt); 1983 1984 return (quality); 1985} 1986 1987/* 1988 * Incoming linkage from device drivers. Process the packet pkt, of length 1989 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1990 * by each process' filter, and if accepted, stashed into the corresponding 1991 * buffer. 1992 */ 1993void 1994bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1995{ 1996 struct bintime bt; 1997 struct bpf_d *d; 1998#ifdef BPF_JITTER 1999 bpf_jit_filter *bf; 2000#endif 2001 u_int slen; 2002 int gottime; 2003 2004 gottime = BPF_TSTAMP_NONE; 2005 2006 BPFIF_RLOCK(bp); 2007 2008 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2009 /* 2010 * We are not using any locks for d here because: 2011 * 1) any filter change is protected by interface 2012 * write lock 2013 * 2) destroying/detaching d is protected by interface 2014 * write lock, too 2015 */ 2016 2017 /* XXX: Do not protect counter for the sake of performance. */ 2018 ++d->bd_rcount; 2019 /* 2020 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 2021 * way for the caller to indiciate to us whether this packet 2022 * is inbound or outbound. In the bpf_mtap() routines, we use 2023 * the interface pointers on the mbuf to figure it out. 2024 */ 2025#ifdef BPF_JITTER 2026 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2027 if (bf != NULL) 2028 slen = (*(bf->func))(pkt, pktlen, pktlen); 2029 else 2030#endif 2031 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2032 if (slen != 0) { 2033 /* 2034 * Filter matches. Let's to acquire write lock. 2035 */ 2036 BPFD_LOCK(d); 2037 2038 d->bd_fcount++; 2039 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2040 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL); 2041#ifdef MAC 2042 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2043#endif 2044 catchpacket(d, pkt, pktlen, slen, 2045 bpf_append_bytes, &bt); 2046 BPFD_UNLOCK(d); 2047 } 2048 } 2049 BPFIF_RUNLOCK(bp); 2050} 2051 2052#define BPF_CHECK_DIRECTION(d, r, i) \ 2053 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2054 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2055 2056/* 2057 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2058 * Locking model is explained in bpf_tap(). 2059 */ 2060void 2061bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2062{ 2063 struct bintime bt; 2064 struct bpf_d *d; 2065#ifdef BPF_JITTER 2066 bpf_jit_filter *bf; 2067#endif 2068 u_int pktlen, slen; 2069 int gottime; 2070 2071 /* Skip outgoing duplicate packets. */ 2072 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2073 m->m_flags &= ~M_PROMISC; 2074 return; 2075 } 2076 2077 pktlen = m_length(m, NULL); 2078 gottime = BPF_TSTAMP_NONE; 2079 2080 BPFIF_RLOCK(bp); 2081 2082 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2083 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2084 continue; 2085 ++d->bd_rcount; 2086#ifdef BPF_JITTER 2087 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2088 /* XXX We cannot handle multiple mbufs. */ 2089 if (bf != NULL && m->m_next == NULL) 2090 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); 2091 else 2092#endif 2093 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2094 if (slen != 0) { 2095 BPFD_LOCK(d); 2096 2097 d->bd_fcount++; 2098 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2099 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2100#ifdef MAC 2101 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2102#endif 2103 catchpacket(d, (u_char *)m, pktlen, slen, 2104 bpf_append_mbuf, &bt); 2105 BPFD_UNLOCK(d); 2106 } 2107 } 2108 BPFIF_RUNLOCK(bp); 2109} 2110 2111/* 2112 * Incoming linkage from device drivers, when packet is in 2113 * an mbuf chain and to be prepended by a contiguous header. 2114 */ 2115void 2116bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2117{ 2118 struct bintime bt; 2119 struct mbuf mb; 2120 struct bpf_d *d; 2121 u_int pktlen, slen; 2122 int gottime; 2123 2124 /* Skip outgoing duplicate packets. */ 2125 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2126 m->m_flags &= ~M_PROMISC; 2127 return; 2128 } 2129 2130 pktlen = m_length(m, NULL); 2131 /* 2132 * Craft on-stack mbuf suitable for passing to bpf_filter. 2133 * Note that we cut corners here; we only setup what's 2134 * absolutely needed--this mbuf should never go anywhere else. 2135 */ 2136 mb.m_next = m; 2137 mb.m_data = data; 2138 mb.m_len = dlen; 2139 pktlen += dlen; 2140 2141 gottime = BPF_TSTAMP_NONE; 2142 2143 BPFIF_RLOCK(bp); 2144 2145 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2146 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2147 continue; 2148 ++d->bd_rcount; 2149 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2150 if (slen != 0) { 2151 BPFD_LOCK(d); 2152 2153 d->bd_fcount++; 2154 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2155 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2156#ifdef MAC 2157 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2158#endif 2159 catchpacket(d, (u_char *)&mb, pktlen, slen, 2160 bpf_append_mbuf, &bt); 2161 BPFD_UNLOCK(d); 2162 } 2163 } 2164 BPFIF_RUNLOCK(bp); 2165} 2166 2167#undef BPF_CHECK_DIRECTION 2168 2169#undef BPF_TSTAMP_NONE 2170#undef BPF_TSTAMP_FAST 2171#undef BPF_TSTAMP_NORMAL 2172#undef BPF_TSTAMP_EXTERN 2173 2174static int 2175bpf_hdrlen(struct bpf_d *d) 2176{ 2177 int hdrlen; 2178 2179 hdrlen = d->bd_bif->bif_hdrlen; 2180#ifndef BURN_BRIDGES 2181 if (d->bd_tstamp == BPF_T_NONE || 2182 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2183#ifdef COMPAT_FREEBSD32 2184 if (d->bd_compat32) 2185 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2186 else 2187#endif 2188 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2189 else 2190#endif 2191 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2192#ifdef COMPAT_FREEBSD32 2193 if (d->bd_compat32) 2194 hdrlen = BPF_WORDALIGN32(hdrlen); 2195 else 2196#endif 2197 hdrlen = BPF_WORDALIGN(hdrlen); 2198 2199 return (hdrlen - d->bd_bif->bif_hdrlen); 2200} 2201 2202static void 2203bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2204{ 2205 struct bintime bt2; 2206 struct timeval tsm; 2207 struct timespec tsn; 2208 2209 if ((tstype & BPF_T_MONOTONIC) == 0) { 2210 bt2 = *bt; 2211 bintime_add(&bt2, &boottimebin); 2212 bt = &bt2; 2213 } 2214 switch (BPF_T_FORMAT(tstype)) { 2215 case BPF_T_MICROTIME: 2216 bintime2timeval(bt, &tsm); 2217 ts->bt_sec = tsm.tv_sec; 2218 ts->bt_frac = tsm.tv_usec; 2219 break; 2220 case BPF_T_NANOTIME: 2221 bintime2timespec(bt, &tsn); 2222 ts->bt_sec = tsn.tv_sec; 2223 ts->bt_frac = tsn.tv_nsec; 2224 break; 2225 case BPF_T_BINTIME: 2226 ts->bt_sec = bt->sec; 2227 ts->bt_frac = bt->frac; 2228 break; 2229 } 2230} 2231 2232/* 2233 * Move the packet data from interface memory (pkt) into the 2234 * store buffer. "cpfn" is the routine called to do the actual data 2235 * transfer. bcopy is passed in to copy contiguous chunks, while 2236 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2237 * pkt is really an mbuf. 2238 */ 2239static void 2240catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2241 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2242 struct bintime *bt) 2243{ 2244 struct bpf_xhdr hdr; 2245#ifndef BURN_BRIDGES 2246 struct bpf_hdr hdr_old; 2247#ifdef COMPAT_FREEBSD32 2248 struct bpf_hdr32 hdr32_old; 2249#endif 2250#endif 2251 int caplen, curlen, hdrlen, totlen; 2252 int do_wakeup = 0; 2253 int do_timestamp; 2254 int tstype; 2255 2256 BPFD_LOCK_ASSERT(d); 2257 2258 /* 2259 * Detect whether user space has released a buffer back to us, and if 2260 * so, move it from being a hold buffer to a free buffer. This may 2261 * not be the best place to do it (for example, we might only want to 2262 * run this check if we need the space), but for now it's a reliable 2263 * spot to do it. 2264 */ 2265 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2266 d->bd_fbuf = d->bd_hbuf; 2267 d->bd_hbuf = NULL; 2268 d->bd_hlen = 0; 2269 bpf_buf_reclaimed(d); 2270 } 2271 2272 /* 2273 * Figure out how many bytes to move. If the packet is 2274 * greater or equal to the snapshot length, transfer that 2275 * much. Otherwise, transfer the whole packet (unless 2276 * we hit the buffer size limit). 2277 */ 2278 hdrlen = bpf_hdrlen(d); 2279 totlen = hdrlen + min(snaplen, pktlen); 2280 if (totlen > d->bd_bufsize) 2281 totlen = d->bd_bufsize; 2282 2283 /* 2284 * Round up the end of the previous packet to the next longword. 2285 * 2286 * Drop the packet if there's no room and no hope of room 2287 * If the packet would overflow the storage buffer or the storage 2288 * buffer is considered immutable by the buffer model, try to rotate 2289 * the buffer and wakeup pending processes. 2290 */ 2291#ifdef COMPAT_FREEBSD32 2292 if (d->bd_compat32) 2293 curlen = BPF_WORDALIGN32(d->bd_slen); 2294 else 2295#endif 2296 curlen = BPF_WORDALIGN(d->bd_slen); 2297 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2298 if (d->bd_fbuf == NULL) { 2299 /* 2300 * There's no room in the store buffer, and no 2301 * prospect of room, so drop the packet. Notify the 2302 * buffer model. 2303 */ 2304 bpf_buffull(d); 2305 ++d->bd_dcount; 2306 return; 2307 } 2308 ROTATE_BUFFERS(d); 2309 do_wakeup = 1; 2310 curlen = 0; 2311 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2312 /* 2313 * Immediate mode is set, or the read timeout has already 2314 * expired during a select call. A packet arrived, so the 2315 * reader should be woken up. 2316 */ 2317 do_wakeup = 1; 2318 caplen = totlen - hdrlen; 2319 tstype = d->bd_tstamp; 2320 do_timestamp = tstype != BPF_T_NONE; 2321#ifndef BURN_BRIDGES 2322 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2323 struct bpf_ts ts; 2324 if (do_timestamp) 2325 bpf_bintime2ts(bt, &ts, tstype); 2326#ifdef COMPAT_FREEBSD32 2327 if (d->bd_compat32) { 2328 bzero(&hdr32_old, sizeof(hdr32_old)); 2329 if (do_timestamp) { 2330 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2331 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2332 } 2333 hdr32_old.bh_datalen = pktlen; 2334 hdr32_old.bh_hdrlen = hdrlen; 2335 hdr32_old.bh_caplen = caplen; 2336 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2337 sizeof(hdr32_old)); 2338 goto copy; 2339 } 2340#endif 2341 bzero(&hdr_old, sizeof(hdr_old)); 2342 if (do_timestamp) { 2343 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2344 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2345 } 2346 hdr_old.bh_datalen = pktlen; 2347 hdr_old.bh_hdrlen = hdrlen; 2348 hdr_old.bh_caplen = caplen; 2349 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2350 sizeof(hdr_old)); 2351 goto copy; 2352 } 2353#endif 2354 2355 /* 2356 * Append the bpf header. Note we append the actual header size, but 2357 * move forward the length of the header plus padding. 2358 */ 2359 bzero(&hdr, sizeof(hdr)); 2360 if (do_timestamp) 2361 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2362 hdr.bh_datalen = pktlen; 2363 hdr.bh_hdrlen = hdrlen; 2364 hdr.bh_caplen = caplen; 2365 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2366 2367 /* 2368 * Copy the packet data into the store buffer and update its length. 2369 */ 2370#ifndef BURN_BRIDGES 2371copy: 2372#endif 2373 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2374 d->bd_slen = curlen + totlen; 2375 2376 if (do_wakeup) 2377 bpf_wakeup(d); 2378} 2379 2380/* 2381 * Free buffers currently in use by a descriptor. 2382 * Called on close. 2383 */ 2384static void 2385bpf_freed(struct bpf_d *d) 2386{ 2387 2388 /* 2389 * We don't need to lock out interrupts since this descriptor has 2390 * been detached from its interface and it yet hasn't been marked 2391 * free. 2392 */ 2393 bpf_free(d); 2394 if (d->bd_rfilter != NULL) { 2395 free((caddr_t)d->bd_rfilter, M_BPF); 2396#ifdef BPF_JITTER 2397 if (d->bd_bfilter != NULL) 2398 bpf_destroy_jit_filter(d->bd_bfilter); 2399#endif 2400 } 2401 if (d->bd_wfilter != NULL) 2402 free((caddr_t)d->bd_wfilter, M_BPF); 2403 mtx_destroy(&d->bd_lock); 2404} 2405 2406/* 2407 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2408 * fixed size of the link header (variable length headers not yet supported). 2409 */ 2410void 2411bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2412{ 2413 2414 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2415} 2416 2417/* 2418 * Attach an interface to bpf. ifp is a pointer to the structure 2419 * defining the interface to be attached, dlt is the link layer type, 2420 * and hdrlen is the fixed size of the link header (variable length 2421 * headers are not yet supporrted). 2422 */ 2423void 2424bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2425{ 2426 struct bpf_if *bp; 2427 2428 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 2429 if (bp == NULL) 2430 panic("bpfattach"); 2431 2432 LIST_INIT(&bp->bif_dlist); 2433 LIST_INIT(&bp->bif_wlist); 2434 bp->bif_ifp = ifp; 2435 bp->bif_dlt = dlt; 2436 rw_init(&bp->bif_lock, "bpf interface lock"); 2437 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 2438 *driverp = bp; 2439 2440 BPF_LOCK(); 2441 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2442 BPF_UNLOCK(); 2443 2444 bp->bif_hdrlen = hdrlen; 2445 2446 if (bootverbose) 2447 if_printf(ifp, "bpf attached\n"); 2448} 2449 2450/* 2451 * Detach bpf from an interface. This involves detaching each descriptor 2452 * associated with the interface. Notify each descriptor as it's detached 2453 * so that any sleepers wake up and get ENXIO. 2454 */ 2455void 2456bpfdetach(struct ifnet *ifp) 2457{ 2458 struct bpf_if *bp; 2459 struct bpf_d *d; 2460#ifdef INVARIANTS 2461 int ndetached; 2462 2463 ndetached = 0; 2464#endif 2465 2466 BPF_LOCK(); 2467 /* Find all bpf_if struct's which reference ifp and detach them. */ 2468 do { 2469 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2470 if (ifp == bp->bif_ifp) 2471 break; 2472 } 2473 if (bp != NULL) 2474 LIST_REMOVE(bp, bif_next); 2475 2476 if (bp != NULL) { 2477#ifdef INVARIANTS 2478 ndetached++; 2479#endif 2480 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 2481 bpf_detachd_locked(d); 2482 BPFD_LOCK(d); 2483 bpf_wakeup(d); 2484 BPFD_UNLOCK(d); 2485 } 2486 /* Free writer-only descriptors */ 2487 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) { 2488 bpf_detachd_locked(d); 2489 BPFD_LOCK(d); 2490 bpf_wakeup(d); 2491 BPFD_UNLOCK(d); 2492 } 2493 2494 /* 2495 * Delay freing bp till interface is detached 2496 * and all routes through this interface are removed. 2497 * Mark bp as detached to restrict new consumers. 2498 */ 2499 BPFIF_WLOCK(bp); 2500 bp->flags |= BPFIF_FLAG_DYING; 2501 BPFIF_WUNLOCK(bp); 2502 } 2503 } while (bp != NULL); 2504 BPF_UNLOCK(); 2505 2506#ifdef INVARIANTS 2507 if (ndetached == 0) 2508 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 2509#endif 2510} 2511 2512/* 2513 * Interface departure handler 2514 */ 2515static void 2516bpf_ifdetach(void *arg __unused, struct ifnet *ifp) 2517{ 2518 struct bpf_if *bp; 2519 2520 if ((bp = ifp->if_bpf) == NULL) 2521 return; 2522 2523 CTR3(KTR_NET, "%s: freing BPF instance %p for interface %p", 2524 __func__, bp, ifp); 2525 2526 ifp->if_bpf = NULL; 2527 rw_destroy(&bp->bif_lock); 2528 free(bp, M_BPF); 2529} 2530 2531/* 2532 * Get a list of available data link type of the interface. 2533 */ 2534static int 2535bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2536{ 2537 int n, error; 2538 struct ifnet *ifp; 2539 struct bpf_if *bp; 2540 2541 ifp = d->bd_bif->bif_ifp; 2542 n = 0; 2543 error = 0; 2544 BPF_LOCK(); 2545 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2546 if (bp->bif_ifp != ifp) 2547 continue; 2548 if (bfl->bfl_list != NULL) { 2549 if (n >= bfl->bfl_len) { 2550 BPF_UNLOCK(); 2551 return (ENOMEM); 2552 } 2553 error = copyout(&bp->bif_dlt, 2554 bfl->bfl_list + n, sizeof(u_int)); 2555 } 2556 n++; 2557 } 2558 BPF_UNLOCK(); 2559 bfl->bfl_len = n; 2560 return (error); 2561} 2562 2563/* 2564 * Set the data link type of a BPF instance. 2565 */ 2566static int 2567bpf_setdlt(struct bpf_d *d, u_int dlt) 2568{ 2569 int error, opromisc; 2570 struct ifnet *ifp; 2571 struct bpf_if *bp; 2572 2573 BPF_LOCK_ASSERT(); 2574 2575 if (d->bd_bif->bif_dlt == dlt) 2576 return (0); 2577 ifp = d->bd_bif->bif_ifp; 2578 2579 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2580 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2581 break; 2582 } 2583 2584 if (bp != NULL) { 2585 opromisc = d->bd_promisc; 2586 bpf_attachd(d, bp); 2587 BPFD_LOCK(d); 2588 reset_d(d); 2589 BPFD_UNLOCK(d); 2590 if (opromisc) { 2591 error = ifpromisc(bp->bif_ifp, 1); 2592 if (error) 2593 if_printf(bp->bif_ifp, 2594 "bpf_setdlt: ifpromisc failed (%d)\n", 2595 error); 2596 else 2597 d->bd_promisc = 1; 2598 } 2599 } 2600 return (bp == NULL ? EINVAL : 0); 2601} 2602 2603static void 2604bpf_drvinit(void *unused) 2605{ 2606 struct cdev *dev; 2607 2608 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2609 LIST_INIT(&bpf_iflist); 2610 2611 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2612 /* For compatibility */ 2613 make_dev_alias(dev, "bpf0"); 2614 2615 /* Register interface departure handler */ 2616 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER( 2617 ifnet_departure_event, bpf_ifdetach, NULL, 2618 EVENTHANDLER_PRI_ANY); 2619} 2620 2621/* 2622 * Zero out the various packet counters associated with all of the bpf 2623 * descriptors. At some point, we will probably want to get a bit more 2624 * granular and allow the user to specify descriptors to be zeroed. 2625 */ 2626static void 2627bpf_zero_counters(void) 2628{ 2629 struct bpf_if *bp; 2630 struct bpf_d *bd; 2631 2632 BPF_LOCK(); 2633 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2634 BPFIF_RLOCK(bp); 2635 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2636 BPFD_LOCK(bd); 2637 bd->bd_rcount = 0; 2638 bd->bd_dcount = 0; 2639 bd->bd_fcount = 0; 2640 bd->bd_wcount = 0; 2641 bd->bd_wfcount = 0; 2642 bd->bd_zcopy = 0; 2643 BPFD_UNLOCK(bd); 2644 } 2645 BPFIF_RUNLOCK(bp); 2646 } 2647 BPF_UNLOCK(); 2648} 2649 2650/* 2651 * Fill filter statistics 2652 */ 2653static void 2654bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2655{ 2656 2657 bzero(d, sizeof(*d)); 2658 BPFD_LOCK_ASSERT(bd); 2659 d->bd_structsize = sizeof(*d); 2660 /* XXX: reading should be protected by global lock */ 2661 d->bd_immediate = bd->bd_immediate; 2662 d->bd_promisc = bd->bd_promisc; 2663 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2664 d->bd_direction = bd->bd_direction; 2665 d->bd_feedback = bd->bd_feedback; 2666 d->bd_async = bd->bd_async; 2667 d->bd_rcount = bd->bd_rcount; 2668 d->bd_dcount = bd->bd_dcount; 2669 d->bd_fcount = bd->bd_fcount; 2670 d->bd_sig = bd->bd_sig; 2671 d->bd_slen = bd->bd_slen; 2672 d->bd_hlen = bd->bd_hlen; 2673 d->bd_bufsize = bd->bd_bufsize; 2674 d->bd_pid = bd->bd_pid; 2675 strlcpy(d->bd_ifname, 2676 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2677 d->bd_locked = bd->bd_locked; 2678 d->bd_wcount = bd->bd_wcount; 2679 d->bd_wdcount = bd->bd_wdcount; 2680 d->bd_wfcount = bd->bd_wfcount; 2681 d->bd_zcopy = bd->bd_zcopy; 2682 d->bd_bufmode = bd->bd_bufmode; 2683} 2684 2685/* 2686 * Handle `netstat -B' stats request 2687 */ 2688static int 2689bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2690{ 2691 struct xbpf_d *xbdbuf, *xbd, zerostats; 2692 int index, error; 2693 struct bpf_if *bp; 2694 struct bpf_d *bd; 2695 2696 /* 2697 * XXX This is not technically correct. It is possible for non 2698 * privileged users to open bpf devices. It would make sense 2699 * if the users who opened the devices were able to retrieve 2700 * the statistics for them, too. 2701 */ 2702 error = priv_check(req->td, PRIV_NET_BPF); 2703 if (error) 2704 return (error); 2705 /* 2706 * Check to see if the user is requesting that the counters be 2707 * zeroed out. Explicitly check that the supplied data is zeroed, 2708 * as we aren't allowing the user to set the counters currently. 2709 */ 2710 if (req->newptr != NULL) { 2711 if (req->newlen != sizeof(zerostats)) 2712 return (EINVAL); 2713 bzero(&zerostats, sizeof(zerostats)); 2714 xbd = req->newptr; 2715 if (bcmp(xbd, &zerostats, sizeof(*xbd)) != 0) 2716 return (EINVAL); 2717 bpf_zero_counters(); 2718 return (0); 2719 } 2720 if (req->oldptr == NULL) 2721 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2722 if (bpf_bpfd_cnt == 0) 2723 return (SYSCTL_OUT(req, 0, 0)); 2724 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2725 BPF_LOCK(); 2726 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2727 BPF_UNLOCK(); 2728 free(xbdbuf, M_BPF); 2729 return (ENOMEM); 2730 } 2731 index = 0; 2732 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2733 BPFIF_RLOCK(bp); 2734 /* Send writers-only first */ 2735 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 2736 xbd = &xbdbuf[index++]; 2737 BPFD_LOCK(bd); 2738 bpfstats_fill_xbpf(xbd, bd); 2739 BPFD_UNLOCK(bd); 2740 } 2741 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2742 xbd = &xbdbuf[index++]; 2743 BPFD_LOCK(bd); 2744 bpfstats_fill_xbpf(xbd, bd); 2745 BPFD_UNLOCK(bd); 2746 } 2747 BPFIF_RUNLOCK(bp); 2748 } 2749 BPF_UNLOCK(); 2750 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2751 free(xbdbuf, M_BPF); 2752 return (error); 2753} 2754 2755SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2756 2757#else /* !DEV_BPF && !NETGRAPH_BPF */ 2758/* 2759 * NOP stubs to allow bpf-using drivers to load and function. 2760 * 2761 * A 'better' implementation would allow the core bpf functionality 2762 * to be loaded at runtime. 2763 */ 2764static struct bpf_if bp_null; 2765 2766void 2767bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2768{ 2769} 2770 2771void 2772bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2773{ 2774} 2775 2776void 2777bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2778{ 2779} 2780 2781void 2782bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2783{ 2784 2785 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2786} 2787 2788void 2789bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2790{ 2791 2792 *driverp = &bp_null; 2793} 2794 2795void 2796bpfdetach(struct ifnet *ifp) 2797{ 2798} 2799 2800u_int 2801bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2802{ 2803 return -1; /* "no filter" behaviour */ 2804} 2805 2806int 2807bpf_validate(const struct bpf_insn *f, int len) 2808{ 2809 return 0; /* false */ 2810} 2811 2812#endif /* !DEV_BPF && !NETGRAPH_BPF */ 2813