1/*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: releng/10.2/sys/net/bpf.c 282427 2015-05-04 19:33:51Z mav $"); 39 40#include "opt_bpf.h" 41#include "opt_compat.h" 42#include "opt_netgraph.h" 43 44#include <sys/types.h> 45#include <sys/param.h> 46#include <sys/lock.h> 47#include <sys/rwlock.h> 48#include <sys/systm.h> 49#include <sys/conf.h> 50#include <sys/fcntl.h> 51#include <sys/jail.h> 52#include <sys/malloc.h> 53#include <sys/mbuf.h> 54#include <sys/time.h> 55#include <sys/priv.h> 56#include <sys/proc.h> 57#include <sys/signalvar.h> 58#include <sys/filio.h> 59#include <sys/sockio.h> 60#include <sys/ttycom.h> 61#include <sys/uio.h> 62 63#include <sys/event.h> 64#include <sys/file.h> 65#include <sys/poll.h> 66#include <sys/proc.h> 67 68#include <sys/socket.h> 69 70#include <net/if.h> 71#define BPF_INTERNAL 72#include <net/bpf.h> 73#include <net/bpf_buffer.h> 74#ifdef BPF_JITTER 75#include <net/bpf_jitter.h> 76#endif 77#include <net/bpf_zerocopy.h> 78#include <net/bpfdesc.h> 79#include <net/vnet.h> 80 81#include <netinet/in.h> 82#include <netinet/if_ether.h> 83#include <sys/kernel.h> 84#include <sys/sysctl.h> 85 86#include <net80211/ieee80211_freebsd.h> 87 88#include <security/mac/mac_framework.h> 89 90MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 91 92#if defined(DEV_BPF) || defined(NETGRAPH_BPF) 93 94#define PRINET 26 /* interruptible */ 95 96#define SIZEOF_BPF_HDR(type) \ 97 (offsetof(type, bh_hdrlen) + sizeof(((type *)0)->bh_hdrlen)) 98 99#ifdef COMPAT_FREEBSD32 100#include <sys/mount.h> 101#include <compat/freebsd32/freebsd32.h> 102#define BPF_ALIGNMENT32 sizeof(int32_t) 103#define BPF_WORDALIGN32(x) (((x)+(BPF_ALIGNMENT32-1))&~(BPF_ALIGNMENT32-1)) 104 105#ifndef BURN_BRIDGES 106/* 107 * 32-bit version of structure prepended to each packet. We use this header 108 * instead of the standard one for 32-bit streams. We mark the a stream as 109 * 32-bit the first time we see a 32-bit compat ioctl request. 110 */ 111struct bpf_hdr32 { 112 struct timeval32 bh_tstamp; /* time stamp */ 113 uint32_t bh_caplen; /* length of captured portion */ 114 uint32_t bh_datalen; /* original length of packet */ 115 uint16_t bh_hdrlen; /* length of bpf header (this struct 116 plus alignment padding) */ 117}; 118#endif 119 120struct bpf_program32 { 121 u_int bf_len; 122 uint32_t bf_insns; 123}; 124 125struct bpf_dltlist32 { 126 u_int bfl_len; 127 u_int bfl_list; 128}; 129 130#define BIOCSETF32 _IOW('B', 103, struct bpf_program32) 131#define BIOCSRTIMEOUT32 _IOW('B', 109, struct timeval32) 132#define BIOCGRTIMEOUT32 _IOR('B', 110, struct timeval32) 133#define BIOCGDLTLIST32 _IOWR('B', 121, struct bpf_dltlist32) 134#define BIOCSETWF32 _IOW('B', 123, struct bpf_program32) 135#define BIOCSETFNR32 _IOW('B', 130, struct bpf_program32) 136#endif 137 138/* 139 * bpf_iflist is a list of BPF interface structures, each corresponding to a 140 * specific DLT. The same network interface might have several BPF interface 141 * structures registered by different layers in the stack (i.e., 802.11 142 * frames, ethernet frames, etc). 143 */ 144static LIST_HEAD(, bpf_if) bpf_iflist, bpf_freelist; 145static struct mtx bpf_mtx; /* bpf global lock */ 146static int bpf_bpfd_cnt; 147 148static void bpf_attachd(struct bpf_d *, struct bpf_if *); 149static void bpf_detachd(struct bpf_d *); 150static void bpf_detachd_locked(struct bpf_d *); 151static void bpf_freed(struct bpf_d *); 152static int bpf_movein(struct uio *, int, struct ifnet *, struct mbuf **, 153 struct sockaddr *, int *, struct bpf_insn *); 154static int bpf_setif(struct bpf_d *, struct ifreq *); 155static void bpf_timed_out(void *); 156static __inline void 157 bpf_wakeup(struct bpf_d *); 158static void catchpacket(struct bpf_d *, u_char *, u_int, u_int, 159 void (*)(struct bpf_d *, caddr_t, u_int, void *, u_int), 160 struct bintime *); 161static void reset_d(struct bpf_d *); 162static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 163static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 164static int bpf_setdlt(struct bpf_d *, u_int); 165static void filt_bpfdetach(struct knote *); 166static int filt_bpfread(struct knote *, long); 167static void bpf_drvinit(void *); 168static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 169 170SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 171int bpf_maxinsns = BPF_MAXINSNS; 172SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 173 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 174static int bpf_zerocopy_enable = 0; 175SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW, 176 &bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions"); 177static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW, 178 bpf_stats_sysctl, "bpf statistics portal"); 179 180static VNET_DEFINE(int, bpf_optimize_writers) = 0; 181#define V_bpf_optimize_writers VNET(bpf_optimize_writers) 182SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers, 183 CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0, 184 "Do not send packets until BPF program is set"); 185 186static d_open_t bpfopen; 187static d_read_t bpfread; 188static d_write_t bpfwrite; 189static d_ioctl_t bpfioctl; 190static d_poll_t bpfpoll; 191static d_kqfilter_t bpfkqfilter; 192 193static struct cdevsw bpf_cdevsw = { 194 .d_version = D_VERSION, 195 .d_open = bpfopen, 196 .d_read = bpfread, 197 .d_write = bpfwrite, 198 .d_ioctl = bpfioctl, 199 .d_poll = bpfpoll, 200 .d_name = "bpf", 201 .d_kqfilter = bpfkqfilter, 202}; 203 204static struct filterops bpfread_filtops = { 205 .f_isfd = 1, 206 .f_detach = filt_bpfdetach, 207 .f_event = filt_bpfread, 208}; 209 210eventhandler_tag bpf_ifdetach_cookie = NULL; 211 212/* 213 * LOCKING MODEL USED BY BPF: 214 * Locks: 215 * 1) global lock (BPF_LOCK). Mutex, used to protect interface addition/removal, 216 * some global counters and every bpf_if reference. 217 * 2) Interface lock. Rwlock, used to protect list of BPF descriptors and their filters. 218 * 3) Descriptor lock. Mutex, used to protect BPF buffers and various structure fields 219 * used by bpf_mtap code. 220 * 221 * Lock order: 222 * 223 * Global lock, interface lock, descriptor lock 224 * 225 * We have to acquire interface lock before descriptor main lock due to BPF_MTAP[2] 226 * working model. In many places (like bpf_detachd) we start with BPF descriptor 227 * (and we need to at least rlock it to get reliable interface pointer). This 228 * gives us potential LOR. As a result, we use global lock to protect from bpf_if 229 * change in every such place. 230 * 231 * Changing d->bd_bif is protected by 1) global lock, 2) interface lock and 232 * 3) descriptor main wlock. 233 * Reading bd_bif can be protected by any of these locks, typically global lock. 234 * 235 * Changing read/write BPF filter is protected by the same three locks, 236 * the same applies for reading. 237 * 238 * Sleeping in global lock is not allowed due to bpfdetach() using it. 239 */ 240 241/* 242 * Wrapper functions for various buffering methods. If the set of buffer 243 * modes expands, we will probably want to introduce a switch data structure 244 * similar to protosw, et. 245 */ 246static void 247bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 248 u_int len) 249{ 250 251 BPFD_LOCK_ASSERT(d); 252 253 switch (d->bd_bufmode) { 254 case BPF_BUFMODE_BUFFER: 255 return (bpf_buffer_append_bytes(d, buf, offset, src, len)); 256 257 case BPF_BUFMODE_ZBUF: 258 d->bd_zcopy++; 259 return (bpf_zerocopy_append_bytes(d, buf, offset, src, len)); 260 261 default: 262 panic("bpf_buf_append_bytes"); 263 } 264} 265 266static void 267bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src, 268 u_int len) 269{ 270 271 BPFD_LOCK_ASSERT(d); 272 273 switch (d->bd_bufmode) { 274 case BPF_BUFMODE_BUFFER: 275 return (bpf_buffer_append_mbuf(d, buf, offset, src, len)); 276 277 case BPF_BUFMODE_ZBUF: 278 d->bd_zcopy++; 279 return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len)); 280 281 default: 282 panic("bpf_buf_append_mbuf"); 283 } 284} 285 286/* 287 * This function gets called when the free buffer is re-assigned. 288 */ 289static void 290bpf_buf_reclaimed(struct bpf_d *d) 291{ 292 293 BPFD_LOCK_ASSERT(d); 294 295 switch (d->bd_bufmode) { 296 case BPF_BUFMODE_BUFFER: 297 return; 298 299 case BPF_BUFMODE_ZBUF: 300 bpf_zerocopy_buf_reclaimed(d); 301 return; 302 303 default: 304 panic("bpf_buf_reclaimed"); 305 } 306} 307 308/* 309 * If the buffer mechanism has a way to decide that a held buffer can be made 310 * free, then it is exposed via the bpf_canfreebuf() interface. (1) is 311 * returned if the buffer can be discarded, (0) is returned if it cannot. 312 */ 313static int 314bpf_canfreebuf(struct bpf_d *d) 315{ 316 317 BPFD_LOCK_ASSERT(d); 318 319 switch (d->bd_bufmode) { 320 case BPF_BUFMODE_ZBUF: 321 return (bpf_zerocopy_canfreebuf(d)); 322 } 323 return (0); 324} 325 326/* 327 * Allow the buffer model to indicate that the current store buffer is 328 * immutable, regardless of the appearance of space. Return (1) if the 329 * buffer is writable, and (0) if not. 330 */ 331static int 332bpf_canwritebuf(struct bpf_d *d) 333{ 334 BPFD_LOCK_ASSERT(d); 335 336 switch (d->bd_bufmode) { 337 case BPF_BUFMODE_ZBUF: 338 return (bpf_zerocopy_canwritebuf(d)); 339 } 340 return (1); 341} 342 343/* 344 * Notify buffer model that an attempt to write to the store buffer has 345 * resulted in a dropped packet, in which case the buffer may be considered 346 * full. 347 */ 348static void 349bpf_buffull(struct bpf_d *d) 350{ 351 352 BPFD_LOCK_ASSERT(d); 353 354 switch (d->bd_bufmode) { 355 case BPF_BUFMODE_ZBUF: 356 bpf_zerocopy_buffull(d); 357 break; 358 } 359} 360 361/* 362 * Notify the buffer model that a buffer has moved into the hold position. 363 */ 364void 365bpf_bufheld(struct bpf_d *d) 366{ 367 368 BPFD_LOCK_ASSERT(d); 369 370 switch (d->bd_bufmode) { 371 case BPF_BUFMODE_ZBUF: 372 bpf_zerocopy_bufheld(d); 373 break; 374 } 375} 376 377static void 378bpf_free(struct bpf_d *d) 379{ 380 381 switch (d->bd_bufmode) { 382 case BPF_BUFMODE_BUFFER: 383 return (bpf_buffer_free(d)); 384 385 case BPF_BUFMODE_ZBUF: 386 return (bpf_zerocopy_free(d)); 387 388 default: 389 panic("bpf_buf_free"); 390 } 391} 392 393static int 394bpf_uiomove(struct bpf_d *d, caddr_t buf, u_int len, struct uio *uio) 395{ 396 397 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 398 return (EOPNOTSUPP); 399 return (bpf_buffer_uiomove(d, buf, len, uio)); 400} 401 402static int 403bpf_ioctl_sblen(struct bpf_d *d, u_int *i) 404{ 405 406 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) 407 return (EOPNOTSUPP); 408 return (bpf_buffer_ioctl_sblen(d, i)); 409} 410 411static int 412bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i) 413{ 414 415 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 416 return (EOPNOTSUPP); 417 return (bpf_zerocopy_ioctl_getzmax(td, d, i)); 418} 419 420static int 421bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 422{ 423 424 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 425 return (EOPNOTSUPP); 426 return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz)); 427} 428 429static int 430bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz) 431{ 432 433 if (d->bd_bufmode != BPF_BUFMODE_ZBUF) 434 return (EOPNOTSUPP); 435 return (bpf_zerocopy_ioctl_setzbuf(td, d, bz)); 436} 437 438/* 439 * General BPF functions. 440 */ 441static int 442bpf_movein(struct uio *uio, int linktype, struct ifnet *ifp, struct mbuf **mp, 443 struct sockaddr *sockp, int *hdrlen, struct bpf_insn *wfilter) 444{ 445 const struct ieee80211_bpf_params *p; 446 struct ether_header *eh; 447 struct mbuf *m; 448 int error; 449 int len; 450 int hlen; 451 int slen; 452 453 /* 454 * Build a sockaddr based on the data link layer type. 455 * We do this at this level because the ethernet header 456 * is copied directly into the data field of the sockaddr. 457 * In the case of SLIP, there is no header and the packet 458 * is forwarded as is. 459 * Also, we are careful to leave room at the front of the mbuf 460 * for the link level header. 461 */ 462 switch (linktype) { 463 464 case DLT_SLIP: 465 sockp->sa_family = AF_INET; 466 hlen = 0; 467 break; 468 469 case DLT_EN10MB: 470 sockp->sa_family = AF_UNSPEC; 471 /* XXX Would MAXLINKHDR be better? */ 472 hlen = ETHER_HDR_LEN; 473 break; 474 475 case DLT_FDDI: 476 sockp->sa_family = AF_IMPLINK; 477 hlen = 0; 478 break; 479 480 case DLT_RAW: 481 sockp->sa_family = AF_UNSPEC; 482 hlen = 0; 483 break; 484 485 case DLT_NULL: 486 /* 487 * null interface types require a 4 byte pseudo header which 488 * corresponds to the address family of the packet. 489 */ 490 sockp->sa_family = AF_UNSPEC; 491 hlen = 4; 492 break; 493 494 case DLT_ATM_RFC1483: 495 /* 496 * en atm driver requires 4-byte atm pseudo header. 497 * though it isn't standard, vpi:vci needs to be 498 * specified anyway. 499 */ 500 sockp->sa_family = AF_UNSPEC; 501 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 502 break; 503 504 case DLT_PPP: 505 sockp->sa_family = AF_UNSPEC; 506 hlen = 4; /* This should match PPP_HDRLEN */ 507 break; 508 509 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 510 sockp->sa_family = AF_IEEE80211; 511 hlen = 0; 512 break; 513 514 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 515 sockp->sa_family = AF_IEEE80211; 516 sockp->sa_len = 12; /* XXX != 0 */ 517 hlen = sizeof(struct ieee80211_bpf_params); 518 break; 519 520 default: 521 return (EIO); 522 } 523 524 len = uio->uio_resid; 525 if (len < hlen || len - hlen > ifp->if_mtu) 526 return (EMSGSIZE); 527 528 m = m_get2(len, M_WAITOK, MT_DATA, M_PKTHDR); 529 if (m == NULL) 530 return (EIO); 531 m->m_pkthdr.len = m->m_len = len; 532 *mp = m; 533 534 error = uiomove(mtod(m, u_char *), len, uio); 535 if (error) 536 goto bad; 537 538 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 539 if (slen == 0) { 540 error = EPERM; 541 goto bad; 542 } 543 544 /* Check for multicast destination */ 545 switch (linktype) { 546 case DLT_EN10MB: 547 eh = mtod(m, struct ether_header *); 548 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 549 if (bcmp(ifp->if_broadcastaddr, eh->ether_dhost, 550 ETHER_ADDR_LEN) == 0) 551 m->m_flags |= M_BCAST; 552 else 553 m->m_flags |= M_MCAST; 554 } 555 break; 556 } 557 558 /* 559 * Make room for link header, and copy it to sockaddr 560 */ 561 if (hlen != 0) { 562 if (sockp->sa_family == AF_IEEE80211) { 563 /* 564 * Collect true length from the parameter header 565 * NB: sockp is known to be zero'd so if we do a 566 * short copy unspecified parameters will be 567 * zero. 568 * NB: packet may not be aligned after stripping 569 * bpf params 570 * XXX check ibp_vers 571 */ 572 p = mtod(m, const struct ieee80211_bpf_params *); 573 hlen = p->ibp_len; 574 if (hlen > sizeof(sockp->sa_data)) { 575 error = EINVAL; 576 goto bad; 577 } 578 } 579 bcopy(m->m_data, sockp->sa_data, hlen); 580 } 581 *hdrlen = hlen; 582 583 return (0); 584bad: 585 m_freem(m); 586 return (error); 587} 588 589/* 590 * Attach file to the bpf interface, i.e. make d listen on bp. 591 */ 592static void 593bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 594{ 595 int op_w; 596 597 BPF_LOCK_ASSERT(); 598 599 /* 600 * Save sysctl value to protect from sysctl change 601 * between reads 602 */ 603 op_w = V_bpf_optimize_writers || d->bd_writer; 604 605 if (d->bd_bif != NULL) 606 bpf_detachd_locked(d); 607 /* 608 * Point d at bp, and add d to the interface's list. 609 * Since there are many applicaiotns using BPF for 610 * sending raw packets only (dhcpd, cdpd are good examples) 611 * we can delay adding d to the list of active listeners until 612 * some filter is configured. 613 */ 614 615 BPFIF_WLOCK(bp); 616 BPFD_LOCK(d); 617 618 d->bd_bif = bp; 619 620 if (op_w != 0) { 621 /* Add to writers-only list */ 622 LIST_INSERT_HEAD(&bp->bif_wlist, d, bd_next); 623 /* 624 * We decrement bd_writer on every filter set operation. 625 * First BIOCSETF is done by pcap_open_live() to set up 626 * snap length. After that appliation usually sets its own filter 627 */ 628 d->bd_writer = 2; 629 } else 630 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 631 632 BPFD_UNLOCK(d); 633 BPFIF_WUNLOCK(bp); 634 635 bpf_bpfd_cnt++; 636 637 CTR3(KTR_NET, "%s: bpf_attach called by pid %d, adding to %s list", 638 __func__, d->bd_pid, d->bd_writer ? "writer" : "active"); 639 640 if (op_w == 0) 641 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 642} 643 644/* 645 * Add d to the list of active bp filters. 646 * Reuqires bpf_attachd() to be called before 647 */ 648static void 649bpf_upgraded(struct bpf_d *d) 650{ 651 struct bpf_if *bp; 652 653 BPF_LOCK_ASSERT(); 654 655 bp = d->bd_bif; 656 657 /* 658 * Filter can be set several times without specifying interface. 659 * Mark d as reader and exit. 660 */ 661 if (bp == NULL) { 662 BPFD_LOCK(d); 663 d->bd_writer = 0; 664 BPFD_UNLOCK(d); 665 return; 666 } 667 668 BPFIF_WLOCK(bp); 669 BPFD_LOCK(d); 670 671 /* Remove from writers-only list */ 672 LIST_REMOVE(d, bd_next); 673 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 674 /* Mark d as reader */ 675 d->bd_writer = 0; 676 677 BPFD_UNLOCK(d); 678 BPFIF_WUNLOCK(bp); 679 680 CTR2(KTR_NET, "%s: upgrade required by pid %d", __func__, d->bd_pid); 681 682 EVENTHANDLER_INVOKE(bpf_track, bp->bif_ifp, bp->bif_dlt, 1); 683} 684 685/* 686 * Detach a file from its interface. 687 */ 688static void 689bpf_detachd(struct bpf_d *d) 690{ 691 BPF_LOCK(); 692 bpf_detachd_locked(d); 693 BPF_UNLOCK(); 694} 695 696static void 697bpf_detachd_locked(struct bpf_d *d) 698{ 699 int error; 700 struct bpf_if *bp; 701 struct ifnet *ifp; 702 703 CTR2(KTR_NET, "%s: detach required by pid %d", __func__, d->bd_pid); 704 705 BPF_LOCK_ASSERT(); 706 707 /* Check if descriptor is attached */ 708 if ((bp = d->bd_bif) == NULL) 709 return; 710 711 BPFIF_WLOCK(bp); 712 BPFD_LOCK(d); 713 714 /* Save bd_writer value */ 715 error = d->bd_writer; 716 717 /* 718 * Remove d from the interface's descriptor list. 719 */ 720 LIST_REMOVE(d, bd_next); 721 722 ifp = bp->bif_ifp; 723 d->bd_bif = NULL; 724 BPFD_UNLOCK(d); 725 BPFIF_WUNLOCK(bp); 726 727 bpf_bpfd_cnt--; 728 729 /* Call event handler iff d is attached */ 730 if (error == 0) 731 EVENTHANDLER_INVOKE(bpf_track, ifp, bp->bif_dlt, 0); 732 733 /* 734 * Check if this descriptor had requested promiscuous mode. 735 * If so, turn it off. 736 */ 737 if (d->bd_promisc) { 738 d->bd_promisc = 0; 739 CURVNET_SET(ifp->if_vnet); 740 error = ifpromisc(ifp, 0); 741 CURVNET_RESTORE(); 742 if (error != 0 && error != ENXIO) { 743 /* 744 * ENXIO can happen if a pccard is unplugged 745 * Something is really wrong if we were able to put 746 * the driver into promiscuous mode, but can't 747 * take it out. 748 */ 749 if_printf(bp->bif_ifp, 750 "bpf_detach: ifpromisc failed (%d)\n", error); 751 } 752 } 753} 754 755/* 756 * Close the descriptor by detaching it from its interface, 757 * deallocating its buffers, and marking it free. 758 */ 759static void 760bpf_dtor(void *data) 761{ 762 struct bpf_d *d = data; 763 764 BPFD_LOCK(d); 765 if (d->bd_state == BPF_WAITING) 766 callout_stop(&d->bd_callout); 767 d->bd_state = BPF_IDLE; 768 BPFD_UNLOCK(d); 769 funsetown(&d->bd_sigio); 770 bpf_detachd(d); 771#ifdef MAC 772 mac_bpfdesc_destroy(d); 773#endif /* MAC */ 774 seldrain(&d->bd_sel); 775 knlist_destroy(&d->bd_sel.si_note); 776 callout_drain(&d->bd_callout); 777 bpf_freed(d); 778 free(d, M_BPF); 779} 780 781/* 782 * Open ethernet device. Returns ENXIO for illegal minor device number, 783 * EBUSY if file is open by another process. 784 */ 785/* ARGSUSED */ 786static int 787bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 788{ 789 struct bpf_d *d; 790 int error, size; 791 792 d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 793 error = devfs_set_cdevpriv(d, bpf_dtor); 794 if (error != 0) { 795 free(d, M_BPF); 796 return (error); 797 } 798 799 /* 800 * For historical reasons, perform a one-time initialization call to 801 * the buffer routines, even though we're not yet committed to a 802 * particular buffer method. 803 */ 804 bpf_buffer_init(d); 805 if ((flags & FREAD) == 0) 806 d->bd_writer = 2; 807 d->bd_hbuf_in_use = 0; 808 d->bd_bufmode = BPF_BUFMODE_BUFFER; 809 d->bd_sig = SIGIO; 810 d->bd_direction = BPF_D_INOUT; 811 BPF_PID_REFRESH(d, td); 812#ifdef MAC 813 mac_bpfdesc_init(d); 814 mac_bpfdesc_create(td->td_ucred, d); 815#endif 816 mtx_init(&d->bd_lock, devtoname(dev), "bpf cdev lock", MTX_DEF); 817 callout_init_mtx(&d->bd_callout, &d->bd_lock, 0); 818 knlist_init_mtx(&d->bd_sel.si_note, &d->bd_lock); 819 820 /* Allocate default buffers */ 821 size = d->bd_bufsize; 822 bpf_buffer_ioctl_sblen(d, &size); 823 824 return (0); 825} 826 827/* 828 * bpfread - read next chunk of packets from buffers 829 */ 830static int 831bpfread(struct cdev *dev, struct uio *uio, int ioflag) 832{ 833 struct bpf_d *d; 834 int error; 835 int non_block; 836 int timed_out; 837 838 error = devfs_get_cdevpriv((void **)&d); 839 if (error != 0) 840 return (error); 841 842 /* 843 * Restrict application to use a buffer the same size as 844 * as kernel buffers. 845 */ 846 if (uio->uio_resid != d->bd_bufsize) 847 return (EINVAL); 848 849 non_block = ((ioflag & O_NONBLOCK) != 0); 850 851 BPFD_LOCK(d); 852 BPF_PID_REFRESH_CUR(d); 853 if (d->bd_bufmode != BPF_BUFMODE_BUFFER) { 854 BPFD_UNLOCK(d); 855 return (EOPNOTSUPP); 856 } 857 if (d->bd_state == BPF_WAITING) 858 callout_stop(&d->bd_callout); 859 timed_out = (d->bd_state == BPF_TIMED_OUT); 860 d->bd_state = BPF_IDLE; 861 while (d->bd_hbuf_in_use) { 862 error = mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 863 PRINET|PCATCH, "bd_hbuf", 0); 864 if (error != 0) { 865 BPFD_UNLOCK(d); 866 return (error); 867 } 868 } 869 /* 870 * If the hold buffer is empty, then do a timed sleep, which 871 * ends when the timeout expires or when enough packets 872 * have arrived to fill the store buffer. 873 */ 874 while (d->bd_hbuf == NULL) { 875 if (d->bd_slen != 0) { 876 /* 877 * A packet(s) either arrived since the previous 878 * read or arrived while we were asleep. 879 */ 880 if (d->bd_immediate || non_block || timed_out) { 881 /* 882 * Rotate the buffers and return what's here 883 * if we are in immediate mode, non-blocking 884 * flag is set, or this descriptor timed out. 885 */ 886 ROTATE_BUFFERS(d); 887 break; 888 } 889 } 890 891 /* 892 * No data is available, check to see if the bpf device 893 * is still pointed at a real interface. If not, return 894 * ENXIO so that the userland process knows to rebind 895 * it before using it again. 896 */ 897 if (d->bd_bif == NULL) { 898 BPFD_UNLOCK(d); 899 return (ENXIO); 900 } 901 902 if (non_block) { 903 BPFD_UNLOCK(d); 904 return (EWOULDBLOCK); 905 } 906 error = msleep(d, &d->bd_lock, PRINET|PCATCH, 907 "bpf", d->bd_rtout); 908 if (error == EINTR || error == ERESTART) { 909 BPFD_UNLOCK(d); 910 return (error); 911 } 912 if (error == EWOULDBLOCK) { 913 /* 914 * On a timeout, return what's in the buffer, 915 * which may be nothing. If there is something 916 * in the store buffer, we can rotate the buffers. 917 */ 918 if (d->bd_hbuf) 919 /* 920 * We filled up the buffer in between 921 * getting the timeout and arriving 922 * here, so we don't need to rotate. 923 */ 924 break; 925 926 if (d->bd_slen == 0) { 927 BPFD_UNLOCK(d); 928 return (0); 929 } 930 ROTATE_BUFFERS(d); 931 break; 932 } 933 } 934 /* 935 * At this point, we know we have something in the hold slot. 936 */ 937 d->bd_hbuf_in_use = 1; 938 BPFD_UNLOCK(d); 939 940 /* 941 * Move data from hold buffer into user space. 942 * We know the entire buffer is transferred since 943 * we checked above that the read buffer is bpf_bufsize bytes. 944 * 945 * We do not have to worry about simultaneous reads because 946 * we waited for sole access to the hold buffer above. 947 */ 948 error = bpf_uiomove(d, d->bd_hbuf, d->bd_hlen, uio); 949 950 BPFD_LOCK(d); 951 KASSERT(d->bd_hbuf != NULL, ("bpfread: lost bd_hbuf")); 952 d->bd_fbuf = d->bd_hbuf; 953 d->bd_hbuf = NULL; 954 d->bd_hlen = 0; 955 bpf_buf_reclaimed(d); 956 d->bd_hbuf_in_use = 0; 957 wakeup(&d->bd_hbuf_in_use); 958 BPFD_UNLOCK(d); 959 960 return (error); 961} 962 963/* 964 * If there are processes sleeping on this descriptor, wake them up. 965 */ 966static __inline void 967bpf_wakeup(struct bpf_d *d) 968{ 969 970 BPFD_LOCK_ASSERT(d); 971 if (d->bd_state == BPF_WAITING) { 972 callout_stop(&d->bd_callout); 973 d->bd_state = BPF_IDLE; 974 } 975 wakeup(d); 976 if (d->bd_async && d->bd_sig && d->bd_sigio) 977 pgsigio(&d->bd_sigio, d->bd_sig, 0); 978 979 selwakeuppri(&d->bd_sel, PRINET); 980 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 981} 982 983static void 984bpf_timed_out(void *arg) 985{ 986 struct bpf_d *d = (struct bpf_d *)arg; 987 988 BPFD_LOCK_ASSERT(d); 989 990 if (callout_pending(&d->bd_callout) || !callout_active(&d->bd_callout)) 991 return; 992 if (d->bd_state == BPF_WAITING) { 993 d->bd_state = BPF_TIMED_OUT; 994 if (d->bd_slen != 0) 995 bpf_wakeup(d); 996 } 997} 998 999static int 1000bpf_ready(struct bpf_d *d) 1001{ 1002 1003 BPFD_LOCK_ASSERT(d); 1004 1005 if (!bpf_canfreebuf(d) && d->bd_hlen != 0) 1006 return (1); 1007 if ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) && 1008 d->bd_slen != 0) 1009 return (1); 1010 return (0); 1011} 1012 1013static int 1014bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 1015{ 1016 struct bpf_d *d; 1017 struct ifnet *ifp; 1018 struct mbuf *m, *mc; 1019 struct sockaddr dst; 1020 int error, hlen; 1021 1022 error = devfs_get_cdevpriv((void **)&d); 1023 if (error != 0) 1024 return (error); 1025 1026 BPF_PID_REFRESH_CUR(d); 1027 d->bd_wcount++; 1028 /* XXX: locking required */ 1029 if (d->bd_bif == NULL) { 1030 d->bd_wdcount++; 1031 return (ENXIO); 1032 } 1033 1034 ifp = d->bd_bif->bif_ifp; 1035 1036 if ((ifp->if_flags & IFF_UP) == 0) { 1037 d->bd_wdcount++; 1038 return (ENETDOWN); 1039 } 1040 1041 if (uio->uio_resid == 0) { 1042 d->bd_wdcount++; 1043 return (0); 1044 } 1045 1046 bzero(&dst, sizeof(dst)); 1047 m = NULL; 1048 hlen = 0; 1049 /* XXX: bpf_movein() can sleep */ 1050 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp, 1051 &m, &dst, &hlen, d->bd_wfilter); 1052 if (error) { 1053 d->bd_wdcount++; 1054 return (error); 1055 } 1056 d->bd_wfcount++; 1057 if (d->bd_hdrcmplt) 1058 dst.sa_family = pseudo_AF_HDRCMPLT; 1059 1060 if (d->bd_feedback) { 1061 mc = m_dup(m, M_NOWAIT); 1062 if (mc != NULL) 1063 mc->m_pkthdr.rcvif = ifp; 1064 /* Set M_PROMISC for outgoing packets to be discarded. */ 1065 if (d->bd_direction == BPF_D_INOUT) 1066 m->m_flags |= M_PROMISC; 1067 } else 1068 mc = NULL; 1069 1070 m->m_pkthdr.len -= hlen; 1071 m->m_len -= hlen; 1072 m->m_data += hlen; /* XXX */ 1073 1074 CURVNET_SET(ifp->if_vnet); 1075#ifdef MAC 1076 BPFD_LOCK(d); 1077 mac_bpfdesc_create_mbuf(d, m); 1078 if (mc != NULL) 1079 mac_bpfdesc_create_mbuf(d, mc); 1080 BPFD_UNLOCK(d); 1081#endif 1082 1083 error = (*ifp->if_output)(ifp, m, &dst, NULL); 1084 if (error) 1085 d->bd_wdcount++; 1086 1087 if (mc != NULL) { 1088 if (error == 0) 1089 (*ifp->if_input)(ifp, mc); 1090 else 1091 m_freem(mc); 1092 } 1093 CURVNET_RESTORE(); 1094 1095 return (error); 1096} 1097 1098/* 1099 * Reset a descriptor by flushing its packet buffer and clearing the receive 1100 * and drop counts. This is doable for kernel-only buffers, but with 1101 * zero-copy buffers, we can't write to (or rotate) buffers that are 1102 * currently owned by userspace. It would be nice if we could encapsulate 1103 * this logic in the buffer code rather than here. 1104 */ 1105static void 1106reset_d(struct bpf_d *d) 1107{ 1108 1109 BPFD_LOCK_ASSERT(d); 1110 1111 while (d->bd_hbuf_in_use) 1112 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, PRINET, 1113 "bd_hbuf", 0); 1114 if ((d->bd_hbuf != NULL) && 1115 (d->bd_bufmode != BPF_BUFMODE_ZBUF || bpf_canfreebuf(d))) { 1116 /* Free the hold buffer. */ 1117 d->bd_fbuf = d->bd_hbuf; 1118 d->bd_hbuf = NULL; 1119 d->bd_hlen = 0; 1120 bpf_buf_reclaimed(d); 1121 } 1122 if (bpf_canwritebuf(d)) 1123 d->bd_slen = 0; 1124 d->bd_rcount = 0; 1125 d->bd_dcount = 0; 1126 d->bd_fcount = 0; 1127 d->bd_wcount = 0; 1128 d->bd_wfcount = 0; 1129 d->bd_wdcount = 0; 1130 d->bd_zcopy = 0; 1131} 1132 1133/* 1134 * FIONREAD Check for read packet available. 1135 * SIOCGIFADDR Get interface address - convenient hook to driver. 1136 * BIOCGBLEN Get buffer len [for read()]. 1137 * BIOCSETF Set read filter. 1138 * BIOCSETFNR Set read filter without resetting descriptor. 1139 * BIOCSETWF Set write filter. 1140 * BIOCFLUSH Flush read packet buffer. 1141 * BIOCPROMISC Put interface into promiscuous mode. 1142 * BIOCGDLT Get link layer type. 1143 * BIOCGETIF Get interface name. 1144 * BIOCSETIF Set interface. 1145 * BIOCSRTIMEOUT Set read timeout. 1146 * BIOCGRTIMEOUT Get read timeout. 1147 * BIOCGSTATS Get packet stats. 1148 * BIOCIMMEDIATE Set immediate mode. 1149 * BIOCVERSION Get filter language version. 1150 * BIOCGHDRCMPLT Get "header already complete" flag 1151 * BIOCSHDRCMPLT Set "header already complete" flag 1152 * BIOCGDIRECTION Get packet direction flag 1153 * BIOCSDIRECTION Set packet direction flag 1154 * BIOCGTSTAMP Get time stamp format and resolution. 1155 * BIOCSTSTAMP Set time stamp format and resolution. 1156 * BIOCLOCK Set "locked" flag 1157 * BIOCFEEDBACK Set packet feedback mode. 1158 * BIOCSETZBUF Set current zero-copy buffer locations. 1159 * BIOCGETZMAX Get maximum zero-copy buffer size. 1160 * BIOCROTZBUF Force rotation of zero-copy buffer 1161 * BIOCSETBUFMODE Set buffer mode. 1162 * BIOCGETBUFMODE Get current buffer mode. 1163 */ 1164/* ARGSUSED */ 1165static int 1166bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 1167 struct thread *td) 1168{ 1169 struct bpf_d *d; 1170 int error; 1171 1172 error = devfs_get_cdevpriv((void **)&d); 1173 if (error != 0) 1174 return (error); 1175 1176 /* 1177 * Refresh PID associated with this descriptor. 1178 */ 1179 BPFD_LOCK(d); 1180 BPF_PID_REFRESH(d, td); 1181 if (d->bd_state == BPF_WAITING) 1182 callout_stop(&d->bd_callout); 1183 d->bd_state = BPF_IDLE; 1184 BPFD_UNLOCK(d); 1185 1186 if (d->bd_locked == 1) { 1187 switch (cmd) { 1188 case BIOCGBLEN: 1189 case BIOCFLUSH: 1190 case BIOCGDLT: 1191 case BIOCGDLTLIST: 1192#ifdef COMPAT_FREEBSD32 1193 case BIOCGDLTLIST32: 1194#endif 1195 case BIOCGETIF: 1196 case BIOCGRTIMEOUT: 1197#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1198 case BIOCGRTIMEOUT32: 1199#endif 1200 case BIOCGSTATS: 1201 case BIOCVERSION: 1202 case BIOCGRSIG: 1203 case BIOCGHDRCMPLT: 1204 case BIOCSTSTAMP: 1205 case BIOCFEEDBACK: 1206 case FIONREAD: 1207 case BIOCLOCK: 1208 case BIOCSRTIMEOUT: 1209#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1210 case BIOCSRTIMEOUT32: 1211#endif 1212 case BIOCIMMEDIATE: 1213 case TIOCGPGRP: 1214 case BIOCROTZBUF: 1215 break; 1216 default: 1217 return (EPERM); 1218 } 1219 } 1220#ifdef COMPAT_FREEBSD32 1221 /* 1222 * If we see a 32-bit compat ioctl, mark the stream as 32-bit so 1223 * that it will get 32-bit packet headers. 1224 */ 1225 switch (cmd) { 1226 case BIOCSETF32: 1227 case BIOCSETFNR32: 1228 case BIOCSETWF32: 1229 case BIOCGDLTLIST32: 1230 case BIOCGRTIMEOUT32: 1231 case BIOCSRTIMEOUT32: 1232 BPFD_LOCK(d); 1233 d->bd_compat32 = 1; 1234 BPFD_UNLOCK(d); 1235 } 1236#endif 1237 1238 CURVNET_SET(TD_TO_VNET(td)); 1239 switch (cmd) { 1240 1241 default: 1242 error = EINVAL; 1243 break; 1244 1245 /* 1246 * Check for read packet available. 1247 */ 1248 case FIONREAD: 1249 { 1250 int n; 1251 1252 BPFD_LOCK(d); 1253 n = d->bd_slen; 1254 while (d->bd_hbuf_in_use) 1255 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1256 PRINET, "bd_hbuf", 0); 1257 if (d->bd_hbuf) 1258 n += d->bd_hlen; 1259 BPFD_UNLOCK(d); 1260 1261 *(int *)addr = n; 1262 break; 1263 } 1264 1265 case SIOCGIFADDR: 1266 { 1267 struct ifnet *ifp; 1268 1269 if (d->bd_bif == NULL) 1270 error = EINVAL; 1271 else { 1272 ifp = d->bd_bif->bif_ifp; 1273 error = (*ifp->if_ioctl)(ifp, cmd, addr); 1274 } 1275 break; 1276 } 1277 1278 /* 1279 * Get buffer len [for read()]. 1280 */ 1281 case BIOCGBLEN: 1282 BPFD_LOCK(d); 1283 *(u_int *)addr = d->bd_bufsize; 1284 BPFD_UNLOCK(d); 1285 break; 1286 1287 /* 1288 * Set buffer length. 1289 */ 1290 case BIOCSBLEN: 1291 error = bpf_ioctl_sblen(d, (u_int *)addr); 1292 break; 1293 1294 /* 1295 * Set link layer read filter. 1296 */ 1297 case BIOCSETF: 1298 case BIOCSETFNR: 1299 case BIOCSETWF: 1300#ifdef COMPAT_FREEBSD32 1301 case BIOCSETF32: 1302 case BIOCSETFNR32: 1303 case BIOCSETWF32: 1304#endif 1305 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 1306 break; 1307 1308 /* 1309 * Flush read packet buffer. 1310 */ 1311 case BIOCFLUSH: 1312 BPFD_LOCK(d); 1313 reset_d(d); 1314 BPFD_UNLOCK(d); 1315 break; 1316 1317 /* 1318 * Put interface into promiscuous mode. 1319 */ 1320 case BIOCPROMISC: 1321 if (d->bd_bif == NULL) { 1322 /* 1323 * No interface attached yet. 1324 */ 1325 error = EINVAL; 1326 break; 1327 } 1328 if (d->bd_promisc == 0) { 1329 error = ifpromisc(d->bd_bif->bif_ifp, 1); 1330 if (error == 0) 1331 d->bd_promisc = 1; 1332 } 1333 break; 1334 1335 /* 1336 * Get current data link type. 1337 */ 1338 case BIOCGDLT: 1339 BPF_LOCK(); 1340 if (d->bd_bif == NULL) 1341 error = EINVAL; 1342 else 1343 *(u_int *)addr = d->bd_bif->bif_dlt; 1344 BPF_UNLOCK(); 1345 break; 1346 1347 /* 1348 * Get a list of supported data link types. 1349 */ 1350#ifdef COMPAT_FREEBSD32 1351 case BIOCGDLTLIST32: 1352 { 1353 struct bpf_dltlist32 *list32; 1354 struct bpf_dltlist dltlist; 1355 1356 list32 = (struct bpf_dltlist32 *)addr; 1357 dltlist.bfl_len = list32->bfl_len; 1358 dltlist.bfl_list = PTRIN(list32->bfl_list); 1359 BPF_LOCK(); 1360 if (d->bd_bif == NULL) 1361 error = EINVAL; 1362 else { 1363 error = bpf_getdltlist(d, &dltlist); 1364 if (error == 0) 1365 list32->bfl_len = dltlist.bfl_len; 1366 } 1367 BPF_UNLOCK(); 1368 break; 1369 } 1370#endif 1371 1372 case BIOCGDLTLIST: 1373 BPF_LOCK(); 1374 if (d->bd_bif == NULL) 1375 error = EINVAL; 1376 else 1377 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 1378 BPF_UNLOCK(); 1379 break; 1380 1381 /* 1382 * Set data link type. 1383 */ 1384 case BIOCSDLT: 1385 BPF_LOCK(); 1386 if (d->bd_bif == NULL) 1387 error = EINVAL; 1388 else 1389 error = bpf_setdlt(d, *(u_int *)addr); 1390 BPF_UNLOCK(); 1391 break; 1392 1393 /* 1394 * Get interface name. 1395 */ 1396 case BIOCGETIF: 1397 BPF_LOCK(); 1398 if (d->bd_bif == NULL) 1399 error = EINVAL; 1400 else { 1401 struct ifnet *const ifp = d->bd_bif->bif_ifp; 1402 struct ifreq *const ifr = (struct ifreq *)addr; 1403 1404 strlcpy(ifr->ifr_name, ifp->if_xname, 1405 sizeof(ifr->ifr_name)); 1406 } 1407 BPF_UNLOCK(); 1408 break; 1409 1410 /* 1411 * Set interface. 1412 */ 1413 case BIOCSETIF: 1414 BPF_LOCK(); 1415 error = bpf_setif(d, (struct ifreq *)addr); 1416 BPF_UNLOCK(); 1417 break; 1418 1419 /* 1420 * Set read timeout. 1421 */ 1422 case BIOCSRTIMEOUT: 1423#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1424 case BIOCSRTIMEOUT32: 1425#endif 1426 { 1427 struct timeval *tv = (struct timeval *)addr; 1428#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1429 struct timeval32 *tv32; 1430 struct timeval tv64; 1431 1432 if (cmd == BIOCSRTIMEOUT32) { 1433 tv32 = (struct timeval32 *)addr; 1434 tv = &tv64; 1435 tv->tv_sec = tv32->tv_sec; 1436 tv->tv_usec = tv32->tv_usec; 1437 } else 1438#endif 1439 tv = (struct timeval *)addr; 1440 1441 /* 1442 * Subtract 1 tick from tvtohz() since this isn't 1443 * a one-shot timer. 1444 */ 1445 if ((error = itimerfix(tv)) == 0) 1446 d->bd_rtout = tvtohz(tv) - 1; 1447 break; 1448 } 1449 1450 /* 1451 * Get read timeout. 1452 */ 1453 case BIOCGRTIMEOUT: 1454#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1455 case BIOCGRTIMEOUT32: 1456#endif 1457 { 1458 struct timeval *tv; 1459#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1460 struct timeval32 *tv32; 1461 struct timeval tv64; 1462 1463 if (cmd == BIOCGRTIMEOUT32) 1464 tv = &tv64; 1465 else 1466#endif 1467 tv = (struct timeval *)addr; 1468 1469 tv->tv_sec = d->bd_rtout / hz; 1470 tv->tv_usec = (d->bd_rtout % hz) * tick; 1471#if defined(COMPAT_FREEBSD32) && !defined(__mips__) 1472 if (cmd == BIOCGRTIMEOUT32) { 1473 tv32 = (struct timeval32 *)addr; 1474 tv32->tv_sec = tv->tv_sec; 1475 tv32->tv_usec = tv->tv_usec; 1476 } 1477#endif 1478 1479 break; 1480 } 1481 1482 /* 1483 * Get packet stats. 1484 */ 1485 case BIOCGSTATS: 1486 { 1487 struct bpf_stat *bs = (struct bpf_stat *)addr; 1488 1489 /* XXXCSJP overflow */ 1490 bs->bs_recv = d->bd_rcount; 1491 bs->bs_drop = d->bd_dcount; 1492 break; 1493 } 1494 1495 /* 1496 * Set immediate mode. 1497 */ 1498 case BIOCIMMEDIATE: 1499 BPFD_LOCK(d); 1500 d->bd_immediate = *(u_int *)addr; 1501 BPFD_UNLOCK(d); 1502 break; 1503 1504 case BIOCVERSION: 1505 { 1506 struct bpf_version *bv = (struct bpf_version *)addr; 1507 1508 bv->bv_major = BPF_MAJOR_VERSION; 1509 bv->bv_minor = BPF_MINOR_VERSION; 1510 break; 1511 } 1512 1513 /* 1514 * Get "header already complete" flag 1515 */ 1516 case BIOCGHDRCMPLT: 1517 BPFD_LOCK(d); 1518 *(u_int *)addr = d->bd_hdrcmplt; 1519 BPFD_UNLOCK(d); 1520 break; 1521 1522 /* 1523 * Set "header already complete" flag 1524 */ 1525 case BIOCSHDRCMPLT: 1526 BPFD_LOCK(d); 1527 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 1528 BPFD_UNLOCK(d); 1529 break; 1530 1531 /* 1532 * Get packet direction flag 1533 */ 1534 case BIOCGDIRECTION: 1535 BPFD_LOCK(d); 1536 *(u_int *)addr = d->bd_direction; 1537 BPFD_UNLOCK(d); 1538 break; 1539 1540 /* 1541 * Set packet direction flag 1542 */ 1543 case BIOCSDIRECTION: 1544 { 1545 u_int direction; 1546 1547 direction = *(u_int *)addr; 1548 switch (direction) { 1549 case BPF_D_IN: 1550 case BPF_D_INOUT: 1551 case BPF_D_OUT: 1552 BPFD_LOCK(d); 1553 d->bd_direction = direction; 1554 BPFD_UNLOCK(d); 1555 break; 1556 default: 1557 error = EINVAL; 1558 } 1559 } 1560 break; 1561 1562 /* 1563 * Get packet timestamp format and resolution. 1564 */ 1565 case BIOCGTSTAMP: 1566 BPFD_LOCK(d); 1567 *(u_int *)addr = d->bd_tstamp; 1568 BPFD_UNLOCK(d); 1569 break; 1570 1571 /* 1572 * Set packet timestamp format and resolution. 1573 */ 1574 case BIOCSTSTAMP: 1575 { 1576 u_int func; 1577 1578 func = *(u_int *)addr; 1579 if (BPF_T_VALID(func)) 1580 d->bd_tstamp = func; 1581 else 1582 error = EINVAL; 1583 } 1584 break; 1585 1586 case BIOCFEEDBACK: 1587 BPFD_LOCK(d); 1588 d->bd_feedback = *(u_int *)addr; 1589 BPFD_UNLOCK(d); 1590 break; 1591 1592 case BIOCLOCK: 1593 BPFD_LOCK(d); 1594 d->bd_locked = 1; 1595 BPFD_UNLOCK(d); 1596 break; 1597 1598 case FIONBIO: /* Non-blocking I/O */ 1599 break; 1600 1601 case FIOASYNC: /* Send signal on receive packets */ 1602 BPFD_LOCK(d); 1603 d->bd_async = *(int *)addr; 1604 BPFD_UNLOCK(d); 1605 break; 1606 1607 case FIOSETOWN: 1608 /* 1609 * XXX: Add some sort of locking here? 1610 * fsetown() can sleep. 1611 */ 1612 error = fsetown(*(int *)addr, &d->bd_sigio); 1613 break; 1614 1615 case FIOGETOWN: 1616 BPFD_LOCK(d); 1617 *(int *)addr = fgetown(&d->bd_sigio); 1618 BPFD_UNLOCK(d); 1619 break; 1620 1621 /* This is deprecated, FIOSETOWN should be used instead. */ 1622 case TIOCSPGRP: 1623 error = fsetown(-(*(int *)addr), &d->bd_sigio); 1624 break; 1625 1626 /* This is deprecated, FIOGETOWN should be used instead. */ 1627 case TIOCGPGRP: 1628 *(int *)addr = -fgetown(&d->bd_sigio); 1629 break; 1630 1631 case BIOCSRSIG: /* Set receive signal */ 1632 { 1633 u_int sig; 1634 1635 sig = *(u_int *)addr; 1636 1637 if (sig >= NSIG) 1638 error = EINVAL; 1639 else { 1640 BPFD_LOCK(d); 1641 d->bd_sig = sig; 1642 BPFD_UNLOCK(d); 1643 } 1644 break; 1645 } 1646 case BIOCGRSIG: 1647 BPFD_LOCK(d); 1648 *(u_int *)addr = d->bd_sig; 1649 BPFD_UNLOCK(d); 1650 break; 1651 1652 case BIOCGETBUFMODE: 1653 BPFD_LOCK(d); 1654 *(u_int *)addr = d->bd_bufmode; 1655 BPFD_UNLOCK(d); 1656 break; 1657 1658 case BIOCSETBUFMODE: 1659 /* 1660 * Allow the buffering mode to be changed as long as we 1661 * haven't yet committed to a particular mode. Our 1662 * definition of commitment, for now, is whether or not a 1663 * buffer has been allocated or an interface attached, since 1664 * that's the point where things get tricky. 1665 */ 1666 switch (*(u_int *)addr) { 1667 case BPF_BUFMODE_BUFFER: 1668 break; 1669 1670 case BPF_BUFMODE_ZBUF: 1671 if (bpf_zerocopy_enable) 1672 break; 1673 /* FALLSTHROUGH */ 1674 1675 default: 1676 CURVNET_RESTORE(); 1677 return (EINVAL); 1678 } 1679 1680 BPFD_LOCK(d); 1681 if (d->bd_sbuf != NULL || d->bd_hbuf != NULL || 1682 d->bd_fbuf != NULL || d->bd_bif != NULL) { 1683 BPFD_UNLOCK(d); 1684 CURVNET_RESTORE(); 1685 return (EBUSY); 1686 } 1687 d->bd_bufmode = *(u_int *)addr; 1688 BPFD_UNLOCK(d); 1689 break; 1690 1691 case BIOCGETZMAX: 1692 error = bpf_ioctl_getzmax(td, d, (size_t *)addr); 1693 break; 1694 1695 case BIOCSETZBUF: 1696 error = bpf_ioctl_setzbuf(td, d, (struct bpf_zbuf *)addr); 1697 break; 1698 1699 case BIOCROTZBUF: 1700 error = bpf_ioctl_rotzbuf(td, d, (struct bpf_zbuf *)addr); 1701 break; 1702 } 1703 CURVNET_RESTORE(); 1704 return (error); 1705} 1706 1707/* 1708 * Set d's packet filter program to fp. If this file already has a filter, 1709 * free it and replace it. Returns EINVAL for bogus requests. 1710 * 1711 * Note we need global lock here to serialize bpf_setf() and bpf_setif() calls 1712 * since reading d->bd_bif can't be protected by d or interface lock due to 1713 * lock order. 1714 * 1715 * Additionally, we have to acquire interface write lock due to bpf_mtap() uses 1716 * interface read lock to read all filers. 1717 * 1718 */ 1719static int 1720bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1721{ 1722#ifdef COMPAT_FREEBSD32 1723 struct bpf_program fp_swab; 1724 struct bpf_program32 *fp32; 1725#endif 1726 struct bpf_insn *fcode, *old; 1727#ifdef BPF_JITTER 1728 bpf_jit_filter *jfunc, *ofunc; 1729#endif 1730 size_t size; 1731 u_int flen; 1732 int need_upgrade; 1733 1734#ifdef COMPAT_FREEBSD32 1735 switch (cmd) { 1736 case BIOCSETF32: 1737 case BIOCSETWF32: 1738 case BIOCSETFNR32: 1739 fp32 = (struct bpf_program32 *)fp; 1740 fp_swab.bf_len = fp32->bf_len; 1741 fp_swab.bf_insns = (struct bpf_insn *)(uintptr_t)fp32->bf_insns; 1742 fp = &fp_swab; 1743 switch (cmd) { 1744 case BIOCSETF32: 1745 cmd = BIOCSETF; 1746 break; 1747 case BIOCSETWF32: 1748 cmd = BIOCSETWF; 1749 break; 1750 } 1751 break; 1752 } 1753#endif 1754 1755 fcode = NULL; 1756#ifdef BPF_JITTER 1757 jfunc = ofunc = NULL; 1758#endif 1759 need_upgrade = 0; 1760 1761 /* 1762 * Check new filter validness before acquiring any locks. 1763 * Allocate memory for new filter, if needed. 1764 */ 1765 flen = fp->bf_len; 1766 if (flen > bpf_maxinsns || (fp->bf_insns == NULL && flen != 0)) 1767 return (EINVAL); 1768 size = flen * sizeof(*fp->bf_insns); 1769 if (size > 0) { 1770 /* We're setting up new filter. Copy and check actual data. */ 1771 fcode = malloc(size, M_BPF, M_WAITOK); 1772 if (copyin(fp->bf_insns, fcode, size) != 0 || 1773 !bpf_validate(fcode, flen)) { 1774 free(fcode, M_BPF); 1775 return (EINVAL); 1776 } 1777#ifdef BPF_JITTER 1778 /* Filter is copied inside fcode and is perfectly valid. */ 1779 jfunc = bpf_jitter(fcode, flen); 1780#endif 1781 } 1782 1783 BPF_LOCK(); 1784 1785 /* 1786 * Set up new filter. 1787 * Protect filter change by interface lock. 1788 * Additionally, we are protected by global lock here. 1789 */ 1790 if (d->bd_bif != NULL) 1791 BPFIF_WLOCK(d->bd_bif); 1792 BPFD_LOCK(d); 1793 if (cmd == BIOCSETWF) { 1794 old = d->bd_wfilter; 1795 d->bd_wfilter = fcode; 1796 } else { 1797 old = d->bd_rfilter; 1798 d->bd_rfilter = fcode; 1799#ifdef BPF_JITTER 1800 ofunc = d->bd_bfilter; 1801 d->bd_bfilter = jfunc; 1802#endif 1803 if (cmd == BIOCSETF) 1804 reset_d(d); 1805 1806 if (fcode != NULL) { 1807 /* 1808 * Do not require upgrade by first BIOCSETF 1809 * (used to set snaplen) by pcap_open_live(). 1810 */ 1811 if (d->bd_writer != 0 && --d->bd_writer == 0) 1812 need_upgrade = 1; 1813 CTR4(KTR_NET, "%s: filter function set by pid %d, " 1814 "bd_writer counter %d, need_upgrade %d", 1815 __func__, d->bd_pid, d->bd_writer, need_upgrade); 1816 } 1817 } 1818 BPFD_UNLOCK(d); 1819 if (d->bd_bif != NULL) 1820 BPFIF_WUNLOCK(d->bd_bif); 1821 if (old != NULL) 1822 free(old, M_BPF); 1823#ifdef BPF_JITTER 1824 if (ofunc != NULL) 1825 bpf_destroy_jit_filter(ofunc); 1826#endif 1827 1828 /* Move d to active readers list. */ 1829 if (need_upgrade) 1830 bpf_upgraded(d); 1831 1832 BPF_UNLOCK(); 1833 return (0); 1834} 1835 1836/* 1837 * Detach a file from its current interface (if attached at all) and attach 1838 * to the interface indicated by the name stored in ifr. 1839 * Return an errno or 0. 1840 */ 1841static int 1842bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1843{ 1844 struct bpf_if *bp; 1845 struct ifnet *theywant; 1846 1847 BPF_LOCK_ASSERT(); 1848 1849 theywant = ifunit(ifr->ifr_name); 1850 if (theywant == NULL || theywant->if_bpf == NULL) 1851 return (ENXIO); 1852 1853 bp = theywant->if_bpf; 1854 1855 /* Check if interface is not being detached from BPF */ 1856 BPFIF_RLOCK(bp); 1857 if (bp->flags & BPFIF_FLAG_DYING) { 1858 BPFIF_RUNLOCK(bp); 1859 return (ENXIO); 1860 } 1861 BPFIF_RUNLOCK(bp); 1862 1863 /* 1864 * Behavior here depends on the buffering model. If we're using 1865 * kernel memory buffers, then we can allocate them here. If we're 1866 * using zero-copy, then the user process must have registered 1867 * buffers by the time we get here. If not, return an error. 1868 */ 1869 switch (d->bd_bufmode) { 1870 case BPF_BUFMODE_BUFFER: 1871 case BPF_BUFMODE_ZBUF: 1872 if (d->bd_sbuf == NULL) 1873 return (EINVAL); 1874 break; 1875 1876 default: 1877 panic("bpf_setif: bufmode %d", d->bd_bufmode); 1878 } 1879 if (bp != d->bd_bif) 1880 bpf_attachd(d, bp); 1881 BPFD_LOCK(d); 1882 reset_d(d); 1883 BPFD_UNLOCK(d); 1884 return (0); 1885} 1886 1887/* 1888 * Support for select() and poll() system calls 1889 * 1890 * Return true iff the specific operation will not block indefinitely. 1891 * Otherwise, return false but make a note that a selwakeup() must be done. 1892 */ 1893static int 1894bpfpoll(struct cdev *dev, int events, struct thread *td) 1895{ 1896 struct bpf_d *d; 1897 int revents; 1898 1899 if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL) 1900 return (events & 1901 (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); 1902 1903 /* 1904 * Refresh PID associated with this descriptor. 1905 */ 1906 revents = events & (POLLOUT | POLLWRNORM); 1907 BPFD_LOCK(d); 1908 BPF_PID_REFRESH(d, td); 1909 if (events & (POLLIN | POLLRDNORM)) { 1910 if (bpf_ready(d)) 1911 revents |= events & (POLLIN | POLLRDNORM); 1912 else { 1913 selrecord(td, &d->bd_sel); 1914 /* Start the read timeout if necessary. */ 1915 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1916 callout_reset(&d->bd_callout, d->bd_rtout, 1917 bpf_timed_out, d); 1918 d->bd_state = BPF_WAITING; 1919 } 1920 } 1921 } 1922 BPFD_UNLOCK(d); 1923 return (revents); 1924} 1925 1926/* 1927 * Support for kevent() system call. Register EVFILT_READ filters and 1928 * reject all others. 1929 */ 1930int 1931bpfkqfilter(struct cdev *dev, struct knote *kn) 1932{ 1933 struct bpf_d *d; 1934 1935 if (devfs_get_cdevpriv((void **)&d) != 0 || 1936 kn->kn_filter != EVFILT_READ) 1937 return (1); 1938 1939 /* 1940 * Refresh PID associated with this descriptor. 1941 */ 1942 BPFD_LOCK(d); 1943 BPF_PID_REFRESH_CUR(d); 1944 kn->kn_fop = &bpfread_filtops; 1945 kn->kn_hook = d; 1946 knlist_add(&d->bd_sel.si_note, kn, 1); 1947 BPFD_UNLOCK(d); 1948 1949 return (0); 1950} 1951 1952static void 1953filt_bpfdetach(struct knote *kn) 1954{ 1955 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1956 1957 knlist_remove(&d->bd_sel.si_note, kn, 0); 1958} 1959 1960static int 1961filt_bpfread(struct knote *kn, long hint) 1962{ 1963 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1964 int ready; 1965 1966 BPFD_LOCK_ASSERT(d); 1967 ready = bpf_ready(d); 1968 if (ready) { 1969 kn->kn_data = d->bd_slen; 1970 while (d->bd_hbuf_in_use) 1971 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 1972 PRINET, "bd_hbuf", 0); 1973 if (d->bd_hbuf) 1974 kn->kn_data += d->bd_hlen; 1975 } else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1976 callout_reset(&d->bd_callout, d->bd_rtout, 1977 bpf_timed_out, d); 1978 d->bd_state = BPF_WAITING; 1979 } 1980 1981 return (ready); 1982} 1983 1984#define BPF_TSTAMP_NONE 0 1985#define BPF_TSTAMP_FAST 1 1986#define BPF_TSTAMP_NORMAL 2 1987#define BPF_TSTAMP_EXTERN 3 1988 1989static int 1990bpf_ts_quality(int tstype) 1991{ 1992 1993 if (tstype == BPF_T_NONE) 1994 return (BPF_TSTAMP_NONE); 1995 if ((tstype & BPF_T_FAST) != 0) 1996 return (BPF_TSTAMP_FAST); 1997 1998 return (BPF_TSTAMP_NORMAL); 1999} 2000 2001static int 2002bpf_gettime(struct bintime *bt, int tstype, struct mbuf *m) 2003{ 2004 struct m_tag *tag; 2005 int quality; 2006 2007 quality = bpf_ts_quality(tstype); 2008 if (quality == BPF_TSTAMP_NONE) 2009 return (quality); 2010 2011 if (m != NULL) { 2012 tag = m_tag_locate(m, MTAG_BPF, MTAG_BPF_TIMESTAMP, NULL); 2013 if (tag != NULL) { 2014 *bt = *(struct bintime *)(tag + 1); 2015 return (BPF_TSTAMP_EXTERN); 2016 } 2017 } 2018 if (quality == BPF_TSTAMP_NORMAL) 2019 binuptime(bt); 2020 else 2021 getbinuptime(bt); 2022 2023 return (quality); 2024} 2025 2026/* 2027 * Incoming linkage from device drivers. Process the packet pkt, of length 2028 * pktlen, which is stored in a contiguous buffer. The packet is parsed 2029 * by each process' filter, and if accepted, stashed into the corresponding 2030 * buffer. 2031 */ 2032void 2033bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2034{ 2035 struct bintime bt; 2036 struct bpf_d *d; 2037#ifdef BPF_JITTER 2038 bpf_jit_filter *bf; 2039#endif 2040 u_int slen; 2041 int gottime; 2042 2043 gottime = BPF_TSTAMP_NONE; 2044 2045 BPFIF_RLOCK(bp); 2046 2047 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2048 /* 2049 * We are not using any locks for d here because: 2050 * 1) any filter change is protected by interface 2051 * write lock 2052 * 2) destroying/detaching d is protected by interface 2053 * write lock, too 2054 */ 2055 2056 /* XXX: Do not protect counter for the sake of performance. */ 2057 ++d->bd_rcount; 2058 /* 2059 * NB: We dont call BPF_CHECK_DIRECTION() here since there is no 2060 * way for the caller to indiciate to us whether this packet 2061 * is inbound or outbound. In the bpf_mtap() routines, we use 2062 * the interface pointers on the mbuf to figure it out. 2063 */ 2064#ifdef BPF_JITTER 2065 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2066 if (bf != NULL) 2067 slen = (*(bf->func))(pkt, pktlen, pktlen); 2068 else 2069#endif 2070 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 2071 if (slen != 0) { 2072 /* 2073 * Filter matches. Let's to acquire write lock. 2074 */ 2075 BPFD_LOCK(d); 2076 2077 d->bd_fcount++; 2078 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2079 gottime = bpf_gettime(&bt, d->bd_tstamp, NULL); 2080#ifdef MAC 2081 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2082#endif 2083 catchpacket(d, pkt, pktlen, slen, 2084 bpf_append_bytes, &bt); 2085 BPFD_UNLOCK(d); 2086 } 2087 } 2088 BPFIF_RUNLOCK(bp); 2089} 2090 2091#define BPF_CHECK_DIRECTION(d, r, i) \ 2092 (((d)->bd_direction == BPF_D_IN && (r) != (i)) || \ 2093 ((d)->bd_direction == BPF_D_OUT && (r) == (i))) 2094 2095/* 2096 * Incoming linkage from device drivers, when packet is in an mbuf chain. 2097 * Locking model is explained in bpf_tap(). 2098 */ 2099void 2100bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2101{ 2102 struct bintime bt; 2103 struct bpf_d *d; 2104#ifdef BPF_JITTER 2105 bpf_jit_filter *bf; 2106#endif 2107 u_int pktlen, slen; 2108 int gottime; 2109 2110 /* Skip outgoing duplicate packets. */ 2111 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2112 m->m_flags &= ~M_PROMISC; 2113 return; 2114 } 2115 2116 pktlen = m_length(m, NULL); 2117 gottime = BPF_TSTAMP_NONE; 2118 2119 BPFIF_RLOCK(bp); 2120 2121 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2122 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2123 continue; 2124 ++d->bd_rcount; 2125#ifdef BPF_JITTER 2126 bf = bpf_jitter_enable != 0 ? d->bd_bfilter : NULL; 2127 /* XXX We cannot handle multiple mbufs. */ 2128 if (bf != NULL && m->m_next == NULL) 2129 slen = (*(bf->func))(mtod(m, u_char *), pktlen, pktlen); 2130 else 2131#endif 2132 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 2133 if (slen != 0) { 2134 BPFD_LOCK(d); 2135 2136 d->bd_fcount++; 2137 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2138 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2139#ifdef MAC 2140 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2141#endif 2142 catchpacket(d, (u_char *)m, pktlen, slen, 2143 bpf_append_mbuf, &bt); 2144 BPFD_UNLOCK(d); 2145 } 2146 } 2147 BPFIF_RUNLOCK(bp); 2148} 2149 2150/* 2151 * Incoming linkage from device drivers, when packet is in 2152 * an mbuf chain and to be prepended by a contiguous header. 2153 */ 2154void 2155bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 2156{ 2157 struct bintime bt; 2158 struct mbuf mb; 2159 struct bpf_d *d; 2160 u_int pktlen, slen; 2161 int gottime; 2162 2163 /* Skip outgoing duplicate packets. */ 2164 if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) { 2165 m->m_flags &= ~M_PROMISC; 2166 return; 2167 } 2168 2169 pktlen = m_length(m, NULL); 2170 /* 2171 * Craft on-stack mbuf suitable for passing to bpf_filter. 2172 * Note that we cut corners here; we only setup what's 2173 * absolutely needed--this mbuf should never go anywhere else. 2174 */ 2175 mb.m_next = m; 2176 mb.m_data = data; 2177 mb.m_len = dlen; 2178 pktlen += dlen; 2179 2180 gottime = BPF_TSTAMP_NONE; 2181 2182 BPFIF_RLOCK(bp); 2183 2184 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 2185 if (BPF_CHECK_DIRECTION(d, m->m_pkthdr.rcvif, bp->bif_ifp)) 2186 continue; 2187 ++d->bd_rcount; 2188 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 2189 if (slen != 0) { 2190 BPFD_LOCK(d); 2191 2192 d->bd_fcount++; 2193 if (gottime < bpf_ts_quality(d->bd_tstamp)) 2194 gottime = bpf_gettime(&bt, d->bd_tstamp, m); 2195#ifdef MAC 2196 if (mac_bpfdesc_check_receive(d, bp->bif_ifp) == 0) 2197#endif 2198 catchpacket(d, (u_char *)&mb, pktlen, slen, 2199 bpf_append_mbuf, &bt); 2200 BPFD_UNLOCK(d); 2201 } 2202 } 2203 BPFIF_RUNLOCK(bp); 2204} 2205 2206#undef BPF_CHECK_DIRECTION 2207 2208#undef BPF_TSTAMP_NONE 2209#undef BPF_TSTAMP_FAST 2210#undef BPF_TSTAMP_NORMAL 2211#undef BPF_TSTAMP_EXTERN 2212 2213static int 2214bpf_hdrlen(struct bpf_d *d) 2215{ 2216 int hdrlen; 2217 2218 hdrlen = d->bd_bif->bif_hdrlen; 2219#ifndef BURN_BRIDGES 2220 if (d->bd_tstamp == BPF_T_NONE || 2221 BPF_T_FORMAT(d->bd_tstamp) == BPF_T_MICROTIME) 2222#ifdef COMPAT_FREEBSD32 2223 if (d->bd_compat32) 2224 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr32); 2225 else 2226#endif 2227 hdrlen += SIZEOF_BPF_HDR(struct bpf_hdr); 2228 else 2229#endif 2230 hdrlen += SIZEOF_BPF_HDR(struct bpf_xhdr); 2231#ifdef COMPAT_FREEBSD32 2232 if (d->bd_compat32) 2233 hdrlen = BPF_WORDALIGN32(hdrlen); 2234 else 2235#endif 2236 hdrlen = BPF_WORDALIGN(hdrlen); 2237 2238 return (hdrlen - d->bd_bif->bif_hdrlen); 2239} 2240 2241static void 2242bpf_bintime2ts(struct bintime *bt, struct bpf_ts *ts, int tstype) 2243{ 2244 struct bintime bt2; 2245 struct timeval tsm; 2246 struct timespec tsn; 2247 2248 if ((tstype & BPF_T_MONOTONIC) == 0) { 2249 bt2 = *bt; 2250 bintime_add(&bt2, &boottimebin); 2251 bt = &bt2; 2252 } 2253 switch (BPF_T_FORMAT(tstype)) { 2254 case BPF_T_MICROTIME: 2255 bintime2timeval(bt, &tsm); 2256 ts->bt_sec = tsm.tv_sec; 2257 ts->bt_frac = tsm.tv_usec; 2258 break; 2259 case BPF_T_NANOTIME: 2260 bintime2timespec(bt, &tsn); 2261 ts->bt_sec = tsn.tv_sec; 2262 ts->bt_frac = tsn.tv_nsec; 2263 break; 2264 case BPF_T_BINTIME: 2265 ts->bt_sec = bt->sec; 2266 ts->bt_frac = bt->frac; 2267 break; 2268 } 2269} 2270 2271/* 2272 * Move the packet data from interface memory (pkt) into the 2273 * store buffer. "cpfn" is the routine called to do the actual data 2274 * transfer. bcopy is passed in to copy contiguous chunks, while 2275 * bpf_append_mbuf is passed in to copy mbuf chains. In the latter case, 2276 * pkt is really an mbuf. 2277 */ 2278static void 2279catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 2280 void (*cpfn)(struct bpf_d *, caddr_t, u_int, void *, u_int), 2281 struct bintime *bt) 2282{ 2283 struct bpf_xhdr hdr; 2284#ifndef BURN_BRIDGES 2285 struct bpf_hdr hdr_old; 2286#ifdef COMPAT_FREEBSD32 2287 struct bpf_hdr32 hdr32_old; 2288#endif 2289#endif 2290 int caplen, curlen, hdrlen, totlen; 2291 int do_wakeup = 0; 2292 int do_timestamp; 2293 int tstype; 2294 2295 BPFD_LOCK_ASSERT(d); 2296 2297 /* 2298 * Detect whether user space has released a buffer back to us, and if 2299 * so, move it from being a hold buffer to a free buffer. This may 2300 * not be the best place to do it (for example, we might only want to 2301 * run this check if we need the space), but for now it's a reliable 2302 * spot to do it. 2303 */ 2304 if (d->bd_fbuf == NULL && bpf_canfreebuf(d)) { 2305 while (d->bd_hbuf_in_use) 2306 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 2307 PRINET, "bd_hbuf", 0); 2308 d->bd_fbuf = d->bd_hbuf; 2309 d->bd_hbuf = NULL; 2310 d->bd_hlen = 0; 2311 bpf_buf_reclaimed(d); 2312 } 2313 2314 /* 2315 * Figure out how many bytes to move. If the packet is 2316 * greater or equal to the snapshot length, transfer that 2317 * much. Otherwise, transfer the whole packet (unless 2318 * we hit the buffer size limit). 2319 */ 2320 hdrlen = bpf_hdrlen(d); 2321 totlen = hdrlen + min(snaplen, pktlen); 2322 if (totlen > d->bd_bufsize) 2323 totlen = d->bd_bufsize; 2324 2325 /* 2326 * Round up the end of the previous packet to the next longword. 2327 * 2328 * Drop the packet if there's no room and no hope of room 2329 * If the packet would overflow the storage buffer or the storage 2330 * buffer is considered immutable by the buffer model, try to rotate 2331 * the buffer and wakeup pending processes. 2332 */ 2333#ifdef COMPAT_FREEBSD32 2334 if (d->bd_compat32) 2335 curlen = BPF_WORDALIGN32(d->bd_slen); 2336 else 2337#endif 2338 curlen = BPF_WORDALIGN(d->bd_slen); 2339 if (curlen + totlen > d->bd_bufsize || !bpf_canwritebuf(d)) { 2340 if (d->bd_fbuf == NULL) { 2341 /* 2342 * There's no room in the store buffer, and no 2343 * prospect of room, so drop the packet. Notify the 2344 * buffer model. 2345 */ 2346 bpf_buffull(d); 2347 ++d->bd_dcount; 2348 return; 2349 } 2350 while (d->bd_hbuf_in_use) 2351 mtx_sleep(&d->bd_hbuf_in_use, &d->bd_lock, 2352 PRINET, "bd_hbuf", 0); 2353 ROTATE_BUFFERS(d); 2354 do_wakeup = 1; 2355 curlen = 0; 2356 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 2357 /* 2358 * Immediate mode is set, or the read timeout has already 2359 * expired during a select call. A packet arrived, so the 2360 * reader should be woken up. 2361 */ 2362 do_wakeup = 1; 2363 caplen = totlen - hdrlen; 2364 tstype = d->bd_tstamp; 2365 do_timestamp = tstype != BPF_T_NONE; 2366#ifndef BURN_BRIDGES 2367 if (tstype == BPF_T_NONE || BPF_T_FORMAT(tstype) == BPF_T_MICROTIME) { 2368 struct bpf_ts ts; 2369 if (do_timestamp) 2370 bpf_bintime2ts(bt, &ts, tstype); 2371#ifdef COMPAT_FREEBSD32 2372 if (d->bd_compat32) { 2373 bzero(&hdr32_old, sizeof(hdr32_old)); 2374 if (do_timestamp) { 2375 hdr32_old.bh_tstamp.tv_sec = ts.bt_sec; 2376 hdr32_old.bh_tstamp.tv_usec = ts.bt_frac; 2377 } 2378 hdr32_old.bh_datalen = pktlen; 2379 hdr32_old.bh_hdrlen = hdrlen; 2380 hdr32_old.bh_caplen = caplen; 2381 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr32_old, 2382 sizeof(hdr32_old)); 2383 goto copy; 2384 } 2385#endif 2386 bzero(&hdr_old, sizeof(hdr_old)); 2387 if (do_timestamp) { 2388 hdr_old.bh_tstamp.tv_sec = ts.bt_sec; 2389 hdr_old.bh_tstamp.tv_usec = ts.bt_frac; 2390 } 2391 hdr_old.bh_datalen = pktlen; 2392 hdr_old.bh_hdrlen = hdrlen; 2393 hdr_old.bh_caplen = caplen; 2394 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr_old, 2395 sizeof(hdr_old)); 2396 goto copy; 2397 } 2398#endif 2399 2400 /* 2401 * Append the bpf header. Note we append the actual header size, but 2402 * move forward the length of the header plus padding. 2403 */ 2404 bzero(&hdr, sizeof(hdr)); 2405 if (do_timestamp) 2406 bpf_bintime2ts(bt, &hdr.bh_tstamp, tstype); 2407 hdr.bh_datalen = pktlen; 2408 hdr.bh_hdrlen = hdrlen; 2409 hdr.bh_caplen = caplen; 2410 bpf_append_bytes(d, d->bd_sbuf, curlen, &hdr, sizeof(hdr)); 2411 2412 /* 2413 * Copy the packet data into the store buffer and update its length. 2414 */ 2415#ifndef BURN_BRIDGES 2416copy: 2417#endif 2418 (*cpfn)(d, d->bd_sbuf, curlen + hdrlen, pkt, caplen); 2419 d->bd_slen = curlen + totlen; 2420 2421 if (do_wakeup) 2422 bpf_wakeup(d); 2423} 2424 2425/* 2426 * Free buffers currently in use by a descriptor. 2427 * Called on close. 2428 */ 2429static void 2430bpf_freed(struct bpf_d *d) 2431{ 2432 2433 /* 2434 * We don't need to lock out interrupts since this descriptor has 2435 * been detached from its interface and it yet hasn't been marked 2436 * free. 2437 */ 2438 bpf_free(d); 2439 if (d->bd_rfilter != NULL) { 2440 free((caddr_t)d->bd_rfilter, M_BPF); 2441#ifdef BPF_JITTER 2442 if (d->bd_bfilter != NULL) 2443 bpf_destroy_jit_filter(d->bd_bfilter); 2444#endif 2445 } 2446 if (d->bd_wfilter != NULL) 2447 free((caddr_t)d->bd_wfilter, M_BPF); 2448 mtx_destroy(&d->bd_lock); 2449} 2450 2451/* 2452 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 2453 * fixed size of the link header (variable length headers not yet supported). 2454 */ 2455void 2456bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2457{ 2458 2459 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2460} 2461 2462/* 2463 * Attach an interface to bpf. ifp is a pointer to the structure 2464 * defining the interface to be attached, dlt is the link layer type, 2465 * and hdrlen is the fixed size of the link header (variable length 2466 * headers are not yet supporrted). 2467 */ 2468void 2469bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2470{ 2471 struct bpf_if *bp; 2472 2473 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 2474 if (bp == NULL) 2475 panic("bpfattach"); 2476 2477 LIST_INIT(&bp->bif_dlist); 2478 LIST_INIT(&bp->bif_wlist); 2479 bp->bif_ifp = ifp; 2480 bp->bif_dlt = dlt; 2481 rw_init(&bp->bif_lock, "bpf interface lock"); 2482 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 2483 *driverp = bp; 2484 2485 BPF_LOCK(); 2486 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 2487 BPF_UNLOCK(); 2488 2489 bp->bif_hdrlen = hdrlen; 2490 2491 if (bootverbose) 2492 if_printf(ifp, "bpf attached\n"); 2493} 2494 2495/* 2496 * Detach bpf from an interface. This involves detaching each descriptor 2497 * associated with the interface. Notify each descriptor as it's detached 2498 * so that any sleepers wake up and get ENXIO. 2499 */ 2500void 2501bpfdetach(struct ifnet *ifp) 2502{ 2503 struct bpf_if *bp, *bp_temp; 2504 struct bpf_d *d; 2505 int ndetached; 2506 2507 ndetached = 0; 2508 2509 BPF_LOCK(); 2510 /* Find all bpf_if struct's which reference ifp and detach them. */ 2511 LIST_FOREACH_SAFE(bp, &bpf_iflist, bif_next, bp_temp) { 2512 if (ifp != bp->bif_ifp) 2513 continue; 2514 2515 LIST_REMOVE(bp, bif_next); 2516 /* Add to to-be-freed list */ 2517 LIST_INSERT_HEAD(&bpf_freelist, bp, bif_next); 2518 2519 ndetached++; 2520 /* 2521 * Delay freeing bp till interface is detached 2522 * and all routes through this interface are removed. 2523 * Mark bp as detached to restrict new consumers. 2524 */ 2525 BPFIF_WLOCK(bp); 2526 bp->flags |= BPFIF_FLAG_DYING; 2527 BPFIF_WUNLOCK(bp); 2528 2529 CTR4(KTR_NET, "%s: sheduling free for encap %d (%p) for if %p", 2530 __func__, bp->bif_dlt, bp, ifp); 2531 2532 /* Free common descriptors */ 2533 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 2534 bpf_detachd_locked(d); 2535 BPFD_LOCK(d); 2536 bpf_wakeup(d); 2537 BPFD_UNLOCK(d); 2538 } 2539 2540 /* Free writer-only descriptors */ 2541 while ((d = LIST_FIRST(&bp->bif_wlist)) != NULL) { 2542 bpf_detachd_locked(d); 2543 BPFD_LOCK(d); 2544 bpf_wakeup(d); 2545 BPFD_UNLOCK(d); 2546 } 2547 } 2548 BPF_UNLOCK(); 2549 2550#ifdef INVARIANTS 2551 if (ndetached == 0) 2552 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 2553#endif 2554} 2555 2556/* 2557 * Interface departure handler. 2558 * Note departure event does not guarantee interface is going down. 2559 * Interface renaming is currently done via departure/arrival event set. 2560 * 2561 * Departure handled is called after all routes pointing to 2562 * given interface are removed and interface is in down state 2563 * restricting any packets to be sent/received. We assume it is now safe 2564 * to free data allocated by BPF. 2565 */ 2566static void 2567bpf_ifdetach(void *arg __unused, struct ifnet *ifp) 2568{ 2569 struct bpf_if *bp, *bp_temp; 2570 int nmatched = 0; 2571 2572 BPF_LOCK(); 2573 /* 2574 * Find matching entries in free list. 2575 * Nothing should be found if bpfdetach() was not called. 2576 */ 2577 LIST_FOREACH_SAFE(bp, &bpf_freelist, bif_next, bp_temp) { 2578 if (ifp != bp->bif_ifp) 2579 continue; 2580 2581 CTR3(KTR_NET, "%s: freeing BPF instance %p for interface %p", 2582 __func__, bp, ifp); 2583 2584 LIST_REMOVE(bp, bif_next); 2585 2586 rw_destroy(&bp->bif_lock); 2587 free(bp, M_BPF); 2588 2589 nmatched++; 2590 } 2591 BPF_UNLOCK(); 2592 2593 /* 2594 * Note that we cannot zero other pointers to 2595 * custom DLTs possibly used by given interface. 2596 */ 2597 if (nmatched != 0) 2598 ifp->if_bpf = NULL; 2599} 2600 2601/* 2602 * Get a list of available data link type of the interface. 2603 */ 2604static int 2605bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 2606{ 2607 int n, error; 2608 struct ifnet *ifp; 2609 struct bpf_if *bp; 2610 2611 BPF_LOCK_ASSERT(); 2612 2613 ifp = d->bd_bif->bif_ifp; 2614 n = 0; 2615 error = 0; 2616 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2617 if (bp->bif_ifp != ifp) 2618 continue; 2619 if (bfl->bfl_list != NULL) { 2620 if (n >= bfl->bfl_len) 2621 return (ENOMEM); 2622 error = copyout(&bp->bif_dlt, 2623 bfl->bfl_list + n, sizeof(u_int)); 2624 } 2625 n++; 2626 } 2627 bfl->bfl_len = n; 2628 return (error); 2629} 2630 2631/* 2632 * Set the data link type of a BPF instance. 2633 */ 2634static int 2635bpf_setdlt(struct bpf_d *d, u_int dlt) 2636{ 2637 int error, opromisc; 2638 struct ifnet *ifp; 2639 struct bpf_if *bp; 2640 2641 BPF_LOCK_ASSERT(); 2642 2643 if (d->bd_bif->bif_dlt == dlt) 2644 return (0); 2645 ifp = d->bd_bif->bif_ifp; 2646 2647 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2648 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 2649 break; 2650 } 2651 2652 if (bp != NULL) { 2653 opromisc = d->bd_promisc; 2654 bpf_attachd(d, bp); 2655 BPFD_LOCK(d); 2656 reset_d(d); 2657 BPFD_UNLOCK(d); 2658 if (opromisc) { 2659 error = ifpromisc(bp->bif_ifp, 1); 2660 if (error) 2661 if_printf(bp->bif_ifp, 2662 "bpf_setdlt: ifpromisc failed (%d)\n", 2663 error); 2664 else 2665 d->bd_promisc = 1; 2666 } 2667 } 2668 return (bp == NULL ? EINVAL : 0); 2669} 2670 2671static void 2672bpf_drvinit(void *unused) 2673{ 2674 struct cdev *dev; 2675 2676 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 2677 LIST_INIT(&bpf_iflist); 2678 LIST_INIT(&bpf_freelist); 2679 2680 dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf"); 2681 /* For compatibility */ 2682 make_dev_alias(dev, "bpf0"); 2683 2684 /* Register interface departure handler */ 2685 bpf_ifdetach_cookie = EVENTHANDLER_REGISTER( 2686 ifnet_departure_event, bpf_ifdetach, NULL, 2687 EVENTHANDLER_PRI_ANY); 2688} 2689 2690/* 2691 * Zero out the various packet counters associated with all of the bpf 2692 * descriptors. At some point, we will probably want to get a bit more 2693 * granular and allow the user to specify descriptors to be zeroed. 2694 */ 2695static void 2696bpf_zero_counters(void) 2697{ 2698 struct bpf_if *bp; 2699 struct bpf_d *bd; 2700 2701 BPF_LOCK(); 2702 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2703 BPFIF_RLOCK(bp); 2704 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2705 BPFD_LOCK(bd); 2706 bd->bd_rcount = 0; 2707 bd->bd_dcount = 0; 2708 bd->bd_fcount = 0; 2709 bd->bd_wcount = 0; 2710 bd->bd_wfcount = 0; 2711 bd->bd_zcopy = 0; 2712 BPFD_UNLOCK(bd); 2713 } 2714 BPFIF_RUNLOCK(bp); 2715 } 2716 BPF_UNLOCK(); 2717} 2718 2719/* 2720 * Fill filter statistics 2721 */ 2722static void 2723bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 2724{ 2725 2726 bzero(d, sizeof(*d)); 2727 BPFD_LOCK_ASSERT(bd); 2728 d->bd_structsize = sizeof(*d); 2729 /* XXX: reading should be protected by global lock */ 2730 d->bd_immediate = bd->bd_immediate; 2731 d->bd_promisc = bd->bd_promisc; 2732 d->bd_hdrcmplt = bd->bd_hdrcmplt; 2733 d->bd_direction = bd->bd_direction; 2734 d->bd_feedback = bd->bd_feedback; 2735 d->bd_async = bd->bd_async; 2736 d->bd_rcount = bd->bd_rcount; 2737 d->bd_dcount = bd->bd_dcount; 2738 d->bd_fcount = bd->bd_fcount; 2739 d->bd_sig = bd->bd_sig; 2740 d->bd_slen = bd->bd_slen; 2741 d->bd_hlen = bd->bd_hlen; 2742 d->bd_bufsize = bd->bd_bufsize; 2743 d->bd_pid = bd->bd_pid; 2744 strlcpy(d->bd_ifname, 2745 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 2746 d->bd_locked = bd->bd_locked; 2747 d->bd_wcount = bd->bd_wcount; 2748 d->bd_wdcount = bd->bd_wdcount; 2749 d->bd_wfcount = bd->bd_wfcount; 2750 d->bd_zcopy = bd->bd_zcopy; 2751 d->bd_bufmode = bd->bd_bufmode; 2752} 2753 2754/* 2755 * Handle `netstat -B' stats request 2756 */ 2757static int 2758bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 2759{ 2760 static const struct xbpf_d zerostats; 2761 struct xbpf_d *xbdbuf, *xbd, tempstats; 2762 int index, error; 2763 struct bpf_if *bp; 2764 struct bpf_d *bd; 2765 2766 /* 2767 * XXX This is not technically correct. It is possible for non 2768 * privileged users to open bpf devices. It would make sense 2769 * if the users who opened the devices were able to retrieve 2770 * the statistics for them, too. 2771 */ 2772 error = priv_check(req->td, PRIV_NET_BPF); 2773 if (error) 2774 return (error); 2775 /* 2776 * Check to see if the user is requesting that the counters be 2777 * zeroed out. Explicitly check that the supplied data is zeroed, 2778 * as we aren't allowing the user to set the counters currently. 2779 */ 2780 if (req->newptr != NULL) { 2781 if (req->newlen != sizeof(tempstats)) 2782 return (EINVAL); 2783 memset(&tempstats, 0, sizeof(tempstats)); 2784 error = SYSCTL_IN(req, &tempstats, sizeof(tempstats)); 2785 if (error) 2786 return (error); 2787 if (bcmp(&tempstats, &zerostats, sizeof(tempstats)) != 0) 2788 return (EINVAL); 2789 bpf_zero_counters(); 2790 return (0); 2791 } 2792 if (req->oldptr == NULL) 2793 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 2794 if (bpf_bpfd_cnt == 0) 2795 return (SYSCTL_OUT(req, 0, 0)); 2796 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 2797 BPF_LOCK(); 2798 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 2799 BPF_UNLOCK(); 2800 free(xbdbuf, M_BPF); 2801 return (ENOMEM); 2802 } 2803 index = 0; 2804 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 2805 BPFIF_RLOCK(bp); 2806 /* Send writers-only first */ 2807 LIST_FOREACH(bd, &bp->bif_wlist, bd_next) { 2808 xbd = &xbdbuf[index++]; 2809 BPFD_LOCK(bd); 2810 bpfstats_fill_xbpf(xbd, bd); 2811 BPFD_UNLOCK(bd); 2812 } 2813 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 2814 xbd = &xbdbuf[index++]; 2815 BPFD_LOCK(bd); 2816 bpfstats_fill_xbpf(xbd, bd); 2817 BPFD_UNLOCK(bd); 2818 } 2819 BPFIF_RUNLOCK(bp); 2820 } 2821 BPF_UNLOCK(); 2822 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 2823 free(xbdbuf, M_BPF); 2824 return (error); 2825} 2826 2827SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL); 2828 2829#else /* !DEV_BPF && !NETGRAPH_BPF */ 2830/* 2831 * NOP stubs to allow bpf-using drivers to load and function. 2832 * 2833 * A 'better' implementation would allow the core bpf functionality 2834 * to be loaded at runtime. 2835 */ 2836static struct bpf_if bp_null; 2837 2838void 2839bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 2840{ 2841} 2842 2843void 2844bpf_mtap(struct bpf_if *bp, struct mbuf *m) 2845{ 2846} 2847 2848void 2849bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 2850{ 2851} 2852 2853void 2854bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 2855{ 2856 2857 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 2858} 2859 2860void 2861bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 2862{ 2863 2864 *driverp = &bp_null; 2865} 2866 2867void 2868bpfdetach(struct ifnet *ifp) 2869{ 2870} 2871 2872u_int 2873bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 2874{ 2875 return -1; /* "no filter" behaviour */ 2876} 2877 2878int 2879bpf_validate(const struct bpf_insn *f, int len) 2880{ 2881 return 0; /* false */ 2882} 2883 2884#endif /* !DEV_BPF && !NETGRAPH_BPF */ 2885