bpf.c revision 163606
1/*- 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95 35 * 36 * $FreeBSD: head/sys/net/bpf.c 163606 2006-10-22 11:52:19Z rwatson $ 37 */ 38 39#include "opt_bpf.h" 40#include "opt_mac.h" 41#include "opt_netgraph.h" 42 43#include <sys/types.h> 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/conf.h> 47#include <sys/fcntl.h> 48#include <sys/malloc.h> 49#include <sys/mbuf.h> 50#include <sys/time.h> 51#include <sys/proc.h> 52#include <sys/signalvar.h> 53#include <sys/filio.h> 54#include <sys/sockio.h> 55#include <sys/ttycom.h> 56#include <sys/uio.h> 57 58#include <sys/event.h> 59#include <sys/file.h> 60#include <sys/poll.h> 61#include <sys/proc.h> 62 63#include <sys/socket.h> 64 65#include <net/if.h> 66#include <net/bpf.h> 67#ifdef BPF_JITTER 68#include <net/bpf_jitter.h> 69#endif 70#include <net/bpfdesc.h> 71 72#include <netinet/in.h> 73#include <netinet/if_ether.h> 74#include <sys/kernel.h> 75#include <sys/sysctl.h> 76 77#include <net80211/ieee80211_freebsd.h> 78 79#include <security/mac/mac_framework.h> 80 81static MALLOC_DEFINE(M_BPF, "BPF", "BPF data"); 82 83#if defined(DEV_BPF) || defined(NETGRAPH_BPF) 84 85#define PRINET 26 /* interruptible */ 86 87/* 88 * bpf_iflist is a list of BPF interface structures, each corresponding to a 89 * specific DLT. The same network interface might have several BPF interface 90 * structures registered by different layers in the stack (i.e., 802.11 91 * frames, ethernet frames, etc). 92 */ 93static LIST_HEAD(, bpf_if) bpf_iflist; 94static struct mtx bpf_mtx; /* bpf global lock */ 95static int bpf_bpfd_cnt; 96 97static void bpf_allocbufs(struct bpf_d *); 98static void bpf_attachd(struct bpf_d *, struct bpf_if *); 99static void bpf_detachd(struct bpf_d *); 100static void bpf_freed(struct bpf_d *); 101static void bpf_mcopy(const void *, void *, size_t); 102static int bpf_movein(struct uio *, int, int, 103 struct mbuf **, struct sockaddr *, struct bpf_insn *); 104static int bpf_setif(struct bpf_d *, struct ifreq *); 105static void bpf_timed_out(void *); 106static __inline void 107 bpf_wakeup(struct bpf_d *); 108static void catchpacket(struct bpf_d *, u_char *, u_int, 109 u_int, void (*)(const void *, void *, size_t), 110 struct timeval *); 111static void reset_d(struct bpf_d *); 112static int bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd); 113static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *); 114static int bpf_setdlt(struct bpf_d *, u_int); 115static void filt_bpfdetach(struct knote *); 116static int filt_bpfread(struct knote *, long); 117static void bpf_drvinit(void *); 118static void bpf_clone(void *, struct ucred *, char *, int, struct cdev **); 119static int bpf_stats_sysctl(SYSCTL_HANDLER_ARGS); 120 121/* 122 * The default read buffer size is patchable. 123 */ 124SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl"); 125static int bpf_bufsize = 4096; 126SYSCTL_INT(_net_bpf, OID_AUTO, bufsize, CTLFLAG_RW, 127 &bpf_bufsize, 0, ""); 128static int bpf_maxbufsize = BPF_MAXBUFSIZE; 129SYSCTL_INT(_net_bpf, OID_AUTO, maxbufsize, CTLFLAG_RW, 130 &bpf_maxbufsize, 0, ""); 131static int bpf_maxinsns = BPF_MAXINSNS; 132SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW, 133 &bpf_maxinsns, 0, "Maximum bpf program instructions"); 134SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_RW, 135 bpf_stats_sysctl, "bpf statistics portal"); 136 137static d_open_t bpfopen; 138static d_close_t bpfclose; 139static d_read_t bpfread; 140static d_write_t bpfwrite; 141static d_ioctl_t bpfioctl; 142static d_poll_t bpfpoll; 143static d_kqfilter_t bpfkqfilter; 144 145static struct cdevsw bpf_cdevsw = { 146 .d_version = D_VERSION, 147 .d_flags = D_NEEDGIANT, 148 .d_open = bpfopen, 149 .d_close = bpfclose, 150 .d_read = bpfread, 151 .d_write = bpfwrite, 152 .d_ioctl = bpfioctl, 153 .d_poll = bpfpoll, 154 .d_name = "bpf", 155 .d_kqfilter = bpfkqfilter, 156}; 157 158static struct filterops bpfread_filtops = 159 { 1, NULL, filt_bpfdetach, filt_bpfread }; 160 161static int 162bpf_movein(struct uio *uio, int linktype, int mtu, struct mbuf **mp, 163 struct sockaddr *sockp, struct bpf_insn *wfilter) 164{ 165 const struct ieee80211_bpf_params *p; 166 struct mbuf *m; 167 int error; 168 int len; 169 int hlen; 170 int slen; 171 172 /* 173 * Build a sockaddr based on the data link layer type. 174 * We do this at this level because the ethernet header 175 * is copied directly into the data field of the sockaddr. 176 * In the case of SLIP, there is no header and the packet 177 * is forwarded as is. 178 * Also, we are careful to leave room at the front of the mbuf 179 * for the link level header. 180 */ 181 switch (linktype) { 182 183 case DLT_SLIP: 184 sockp->sa_family = AF_INET; 185 hlen = 0; 186 break; 187 188 case DLT_EN10MB: 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX Would MAXLINKHDR be better? */ 191 hlen = ETHER_HDR_LEN; 192 break; 193 194 case DLT_FDDI: 195 sockp->sa_family = AF_IMPLINK; 196 hlen = 0; 197 break; 198 199 case DLT_RAW: 200 sockp->sa_family = AF_UNSPEC; 201 hlen = 0; 202 break; 203 204 case DLT_NULL: 205 /* 206 * null interface types require a 4 byte pseudo header which 207 * corresponds to the address family of the packet. 208 */ 209 sockp->sa_family = AF_UNSPEC; 210 hlen = 4; 211 break; 212 213 case DLT_ATM_RFC1483: 214 /* 215 * en atm driver requires 4-byte atm pseudo header. 216 * though it isn't standard, vpi:vci needs to be 217 * specified anyway. 218 */ 219 sockp->sa_family = AF_UNSPEC; 220 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 221 break; 222 223 case DLT_PPP: 224 sockp->sa_family = AF_UNSPEC; 225 hlen = 4; /* This should match PPP_HDRLEN */ 226 break; 227 228 case DLT_IEEE802_11: /* IEEE 802.11 wireless */ 229 sockp->sa_family = AF_IEEE80211; 230 hlen = 0; 231 break; 232 233 case DLT_IEEE802_11_RADIO: /* IEEE 802.11 wireless w/ phy params */ 234 sockp->sa_family = AF_IEEE80211; 235 sockp->sa_len = 12; /* XXX != 0 */ 236 hlen = sizeof(struct ieee80211_bpf_params); 237 break; 238 239 default: 240 return (EIO); 241 } 242 243 len = uio->uio_resid; 244 245 if (len - hlen > mtu) 246 return (EMSGSIZE); 247 248 if ((unsigned)len > MCLBYTES) 249 return (EIO); 250 251 if (len > MHLEN) { 252 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR); 253 } else { 254 MGETHDR(m, M_TRYWAIT, MT_DATA); 255 } 256 if (m == NULL) 257 return (ENOBUFS); 258 m->m_pkthdr.len = m->m_len = len; 259 m->m_pkthdr.rcvif = NULL; 260 *mp = m; 261 262 if (m->m_len < hlen) { 263 error = EPERM; 264 goto bad; 265 } 266 267 error = uiomove(mtod(m, u_char *), len, uio); 268 if (error) 269 goto bad; 270 271 slen = bpf_filter(wfilter, mtod(m, u_char *), len, len); 272 if (slen == 0) { 273 error = EPERM; 274 goto bad; 275 } 276 277 /* 278 * Make room for link header, and copy it to sockaddr 279 */ 280 if (hlen != 0) { 281 if (sockp->sa_family == AF_IEEE80211) { 282 /* 283 * Collect true length from the parameter header 284 * NB: sockp is known to be zero'd so if we do a 285 * short copy unspecified parameters will be 286 * zero. 287 * NB: packet may not be aligned after stripping 288 * bpf params 289 * XXX check ibp_vers 290 */ 291 p = mtod(m, const struct ieee80211_bpf_params *); 292 hlen = p->ibp_len; 293 if (hlen > sizeof(sockp->sa_data)) { 294 error = EINVAL; 295 goto bad; 296 } 297 } 298 bcopy(m->m_data, sockp->sa_data, hlen); 299 m->m_pkthdr.len -= hlen; 300 m->m_len -= hlen; 301#if BSD >= 199103 302 m->m_data += hlen; /* XXX */ 303#else 304 m->m_off += hlen; 305#endif 306 } 307 308 return (0); 309bad: 310 m_freem(m); 311 return (error); 312} 313 314/* 315 * Attach file to the bpf interface, i.e. make d listen on bp. 316 */ 317static void 318bpf_attachd(struct bpf_d *d, struct bpf_if *bp) 319{ 320 /* 321 * Point d at bp, and add d to the interface's list of listeners. 322 * Finally, point the driver's bpf cookie at the interface so 323 * it will divert packets to bpf. 324 */ 325 BPFIF_LOCK(bp); 326 d->bd_bif = bp; 327 LIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next); 328 329 bpf_bpfd_cnt++; 330 BPFIF_UNLOCK(bp); 331} 332 333/* 334 * Detach a file from its interface. 335 */ 336static void 337bpf_detachd(struct bpf_d *d) 338{ 339 int error; 340 struct bpf_if *bp; 341 struct ifnet *ifp; 342 343 bp = d->bd_bif; 344 BPFIF_LOCK(bp); 345 BPFD_LOCK(d); 346 ifp = d->bd_bif->bif_ifp; 347 348 /* 349 * Remove d from the interface's descriptor list. 350 */ 351 LIST_REMOVE(d, bd_next); 352 353 bpf_bpfd_cnt--; 354 d->bd_bif = NULL; 355 BPFD_UNLOCK(d); 356 BPFIF_UNLOCK(bp); 357 358 /* 359 * Check if this descriptor had requested promiscuous mode. 360 * If so, turn it off. 361 */ 362 if (d->bd_promisc) { 363 d->bd_promisc = 0; 364 error = ifpromisc(ifp, 0); 365 if (error != 0 && error != ENXIO) { 366 /* 367 * ENXIO can happen if a pccard is unplugged 368 * Something is really wrong if we were able to put 369 * the driver into promiscuous mode, but can't 370 * take it out. 371 */ 372 if_printf(bp->bif_ifp, 373 "bpf_detach: ifpromisc failed (%d)\n", error); 374 } 375 } 376} 377 378/* 379 * Open ethernet device. Returns ENXIO for illegal minor device number, 380 * EBUSY if file is open by another process. 381 */ 382/* ARGSUSED */ 383static int 384bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td) 385{ 386 struct bpf_d *d; 387 388 mtx_lock(&bpf_mtx); 389 d = dev->si_drv1; 390 /* 391 * Each minor can be opened by only one process. If the requested 392 * minor is in use, return EBUSY. 393 */ 394 if (d != NULL) { 395 mtx_unlock(&bpf_mtx); 396 return (EBUSY); 397 } 398 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */ 399 mtx_unlock(&bpf_mtx); 400 401 if ((dev->si_flags & SI_NAMED) == 0) 402 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600, 403 "bpf%d", dev2unit(dev)); 404 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO); 405 dev->si_drv1 = d; 406 d->bd_bufsize = bpf_bufsize; 407 d->bd_sig = SIGIO; 408 d->bd_seesent = 1; 409 d->bd_pid = td->td_proc->p_pid; 410#ifdef MAC 411 mac_init_bpfdesc(d); 412 mac_create_bpfdesc(td->td_ucred, d); 413#endif 414 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF); 415 callout_init(&d->bd_callout, NET_CALLOUT_MPSAFE); 416 knlist_init(&d->bd_sel.si_note, &d->bd_mtx, NULL, NULL, NULL); 417 418 return (0); 419} 420 421/* 422 * Close the descriptor by detaching it from its interface, 423 * deallocating its buffers, and marking it free. 424 */ 425/* ARGSUSED */ 426static int 427bpfclose(struct cdev *dev, int flags, int fmt, struct thread *td) 428{ 429 struct bpf_d *d = dev->si_drv1; 430 431 BPFD_LOCK(d); 432 if (d->bd_state == BPF_WAITING) 433 callout_stop(&d->bd_callout); 434 d->bd_state = BPF_IDLE; 435 BPFD_UNLOCK(d); 436 funsetown(&d->bd_sigio); 437 mtx_lock(&bpf_mtx); 438 if (d->bd_bif) 439 bpf_detachd(d); 440 mtx_unlock(&bpf_mtx); 441 selwakeuppri(&d->bd_sel, PRINET); 442#ifdef MAC 443 mac_destroy_bpfdesc(d); 444#endif /* MAC */ 445 knlist_destroy(&d->bd_sel.si_note); 446 bpf_freed(d); 447 dev->si_drv1 = NULL; 448 free(d, M_BPF); 449 450 return (0); 451} 452 453 454/* 455 * Rotate the packet buffers in descriptor d. Move the store buffer 456 * into the hold slot, and the free buffer into the store slot. 457 * Zero the length of the new store buffer. 458 */ 459#define ROTATE_BUFFERS(d) \ 460 (d)->bd_hbuf = (d)->bd_sbuf; \ 461 (d)->bd_hlen = (d)->bd_slen; \ 462 (d)->bd_sbuf = (d)->bd_fbuf; \ 463 (d)->bd_slen = 0; \ 464 (d)->bd_fbuf = NULL; 465/* 466 * bpfread - read next chunk of packets from buffers 467 */ 468static int 469bpfread(struct cdev *dev, struct uio *uio, int ioflag) 470{ 471 struct bpf_d *d = dev->si_drv1; 472 int timed_out; 473 int error; 474 475 /* 476 * Restrict application to use a buffer the same size as 477 * as kernel buffers. 478 */ 479 if (uio->uio_resid != d->bd_bufsize) 480 return (EINVAL); 481 482 BPFD_LOCK(d); 483 if (d->bd_state == BPF_WAITING) 484 callout_stop(&d->bd_callout); 485 timed_out = (d->bd_state == BPF_TIMED_OUT); 486 d->bd_state = BPF_IDLE; 487 /* 488 * If the hold buffer is empty, then do a timed sleep, which 489 * ends when the timeout expires or when enough packets 490 * have arrived to fill the store buffer. 491 */ 492 while (d->bd_hbuf == NULL) { 493 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) { 494 /* 495 * A packet(s) either arrived since the previous 496 * read or arrived while we were asleep. 497 * Rotate the buffers and return what's here. 498 */ 499 ROTATE_BUFFERS(d); 500 break; 501 } 502 503 /* 504 * No data is available, check to see if the bpf device 505 * is still pointed at a real interface. If not, return 506 * ENXIO so that the userland process knows to rebind 507 * it before using it again. 508 */ 509 if (d->bd_bif == NULL) { 510 BPFD_UNLOCK(d); 511 return (ENXIO); 512 } 513 514 if (ioflag & O_NONBLOCK) { 515 BPFD_UNLOCK(d); 516 return (EWOULDBLOCK); 517 } 518 error = msleep(d, &d->bd_mtx, PRINET|PCATCH, 519 "bpf", d->bd_rtout); 520 if (error == EINTR || error == ERESTART) { 521 BPFD_UNLOCK(d); 522 return (error); 523 } 524 if (error == EWOULDBLOCK) { 525 /* 526 * On a timeout, return what's in the buffer, 527 * which may be nothing. If there is something 528 * in the store buffer, we can rotate the buffers. 529 */ 530 if (d->bd_hbuf) 531 /* 532 * We filled up the buffer in between 533 * getting the timeout and arriving 534 * here, so we don't need to rotate. 535 */ 536 break; 537 538 if (d->bd_slen == 0) { 539 BPFD_UNLOCK(d); 540 return (0); 541 } 542 ROTATE_BUFFERS(d); 543 break; 544 } 545 } 546 /* 547 * At this point, we know we have something in the hold slot. 548 */ 549 BPFD_UNLOCK(d); 550 551 /* 552 * Move data from hold buffer into user space. 553 * We know the entire buffer is transferred since 554 * we checked above that the read buffer is bpf_bufsize bytes. 555 */ 556 error = uiomove(d->bd_hbuf, d->bd_hlen, uio); 557 558 BPFD_LOCK(d); 559 d->bd_fbuf = d->bd_hbuf; 560 d->bd_hbuf = NULL; 561 d->bd_hlen = 0; 562 BPFD_UNLOCK(d); 563 564 return (error); 565} 566 567 568/* 569 * If there are processes sleeping on this descriptor, wake them up. 570 */ 571static __inline void 572bpf_wakeup(struct bpf_d *d) 573{ 574 575 BPFD_LOCK_ASSERT(d); 576 if (d->bd_state == BPF_WAITING) { 577 callout_stop(&d->bd_callout); 578 d->bd_state = BPF_IDLE; 579 } 580 wakeup(d); 581 if (d->bd_async && d->bd_sig && d->bd_sigio) 582 pgsigio(&d->bd_sigio, d->bd_sig, 0); 583 584 selwakeuppri(&d->bd_sel, PRINET); 585 KNOTE_LOCKED(&d->bd_sel.si_note, 0); 586} 587 588static void 589bpf_timed_out(void *arg) 590{ 591 struct bpf_d *d = (struct bpf_d *)arg; 592 593 BPFD_LOCK(d); 594 if (d->bd_state == BPF_WAITING) { 595 d->bd_state = BPF_TIMED_OUT; 596 if (d->bd_slen != 0) 597 bpf_wakeup(d); 598 } 599 BPFD_UNLOCK(d); 600} 601 602static int 603bpfwrite(struct cdev *dev, struct uio *uio, int ioflag) 604{ 605 struct bpf_d *d = dev->si_drv1; 606 struct ifnet *ifp; 607 struct mbuf *m; 608 int error; 609 struct sockaddr dst; 610 611 if (d->bd_bif == NULL) 612 return (ENXIO); 613 614 ifp = d->bd_bif->bif_ifp; 615 616 if ((ifp->if_flags & IFF_UP) == 0) 617 return (ENETDOWN); 618 619 if (uio->uio_resid == 0) 620 return (0); 621 622 bzero(&dst, sizeof(dst)); 623 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, 624 &m, &dst, d->bd_wfilter); 625 if (error) 626 return (error); 627 628 if (d->bd_hdrcmplt) 629 dst.sa_family = pseudo_AF_HDRCMPLT; 630 631#ifdef MAC 632 BPFD_LOCK(d); 633 mac_create_mbuf_from_bpfdesc(d, m); 634 BPFD_UNLOCK(d); 635#endif 636 NET_LOCK_GIANT(); 637 error = (*ifp->if_output)(ifp, m, &dst, NULL); 638 NET_UNLOCK_GIANT(); 639 /* 640 * The driver frees the mbuf. 641 */ 642 return (error); 643} 644 645/* 646 * Reset a descriptor by flushing its packet buffer and clearing the 647 * receive and drop counts. 648 */ 649static void 650reset_d(struct bpf_d *d) 651{ 652 653 mtx_assert(&d->bd_mtx, MA_OWNED); 654 if (d->bd_hbuf) { 655 /* Free the hold buffer. */ 656 d->bd_fbuf = d->bd_hbuf; 657 d->bd_hbuf = NULL; 658 } 659 d->bd_slen = 0; 660 d->bd_hlen = 0; 661 d->bd_rcount = 0; 662 d->bd_dcount = 0; 663 d->bd_fcount = 0; 664} 665 666/* 667 * FIONREAD Check for read packet available. 668 * SIOCGIFADDR Get interface address - convenient hook to driver. 669 * BIOCGBLEN Get buffer len [for read()]. 670 * BIOCSETF Set ethernet read filter. 671 * BIOCSETWF Set ethernet write filter. 672 * BIOCFLUSH Flush read packet buffer. 673 * BIOCPROMISC Put interface into promiscuous mode. 674 * BIOCGDLT Get link layer type. 675 * BIOCGETIF Get interface name. 676 * BIOCSETIF Set interface. 677 * BIOCSRTIMEOUT Set read timeout. 678 * BIOCGRTIMEOUT Get read timeout. 679 * BIOCGSTATS Get packet stats. 680 * BIOCIMMEDIATE Set immediate mode. 681 * BIOCVERSION Get filter language version. 682 * BIOCGHDRCMPLT Get "header already complete" flag 683 * BIOCSHDRCMPLT Set "header already complete" flag 684 * BIOCGSEESENT Get "see packets sent" flag 685 * BIOCSSEESENT Set "see packets sent" flag 686 * BIOCLOCK Set "locked" flag 687 */ 688/* ARGSUSED */ 689static int 690bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, 691 struct thread *td) 692{ 693 struct bpf_d *d = dev->si_drv1; 694 int error = 0; 695 696 /* 697 * Refresh PID associated with this descriptor. 698 */ 699 BPFD_LOCK(d); 700 d->bd_pid = td->td_proc->p_pid; 701 if (d->bd_state == BPF_WAITING) 702 callout_stop(&d->bd_callout); 703 d->bd_state = BPF_IDLE; 704 BPFD_UNLOCK(d); 705 706 if (d->bd_locked == 1) { 707 switch (cmd) { 708 case BIOCGBLEN: 709 case BIOCFLUSH: 710 case BIOCGDLT: 711 case BIOCGDLTLIST: 712 case BIOCGETIF: 713 case BIOCGRTIMEOUT: 714 case BIOCGSTATS: 715 case BIOCVERSION: 716 case BIOCGRSIG: 717 case BIOCGHDRCMPLT: 718 case FIONREAD: 719 case BIOCLOCK: 720 case BIOCSRTIMEOUT: 721 case BIOCIMMEDIATE: 722 case TIOCGPGRP: 723 break; 724 default: 725 return (EPERM); 726 } 727 } 728 switch (cmd) { 729 730 default: 731 error = EINVAL; 732 break; 733 734 /* 735 * Check for read packet available. 736 */ 737 case FIONREAD: 738 { 739 int n; 740 741 BPFD_LOCK(d); 742 n = d->bd_slen; 743 if (d->bd_hbuf) 744 n += d->bd_hlen; 745 BPFD_UNLOCK(d); 746 747 *(int *)addr = n; 748 break; 749 } 750 751 case SIOCGIFADDR: 752 { 753 struct ifnet *ifp; 754 755 if (d->bd_bif == NULL) 756 error = EINVAL; 757 else { 758 ifp = d->bd_bif->bif_ifp; 759 error = (*ifp->if_ioctl)(ifp, cmd, addr); 760 } 761 break; 762 } 763 764 /* 765 * Get buffer len [for read()]. 766 */ 767 case BIOCGBLEN: 768 *(u_int *)addr = d->bd_bufsize; 769 break; 770 771 /* 772 * Set buffer length. 773 */ 774 case BIOCSBLEN: 775 if (d->bd_bif != NULL) 776 error = EINVAL; 777 else { 778 u_int size = *(u_int *)addr; 779 780 if (size > bpf_maxbufsize) 781 *(u_int *)addr = size = bpf_maxbufsize; 782 else if (size < BPF_MINBUFSIZE) 783 *(u_int *)addr = size = BPF_MINBUFSIZE; 784 d->bd_bufsize = size; 785 } 786 break; 787 788 /* 789 * Set link layer read filter. 790 */ 791 case BIOCSETF: 792 case BIOCSETWF: 793 error = bpf_setf(d, (struct bpf_program *)addr, cmd); 794 break; 795 796 /* 797 * Flush read packet buffer. 798 */ 799 case BIOCFLUSH: 800 BPFD_LOCK(d); 801 reset_d(d); 802 BPFD_UNLOCK(d); 803 break; 804 805 /* 806 * Put interface into promiscuous mode. 807 */ 808 case BIOCPROMISC: 809 if (d->bd_bif == NULL) { 810 /* 811 * No interface attached yet. 812 */ 813 error = EINVAL; 814 break; 815 } 816 if (d->bd_promisc == 0) { 817 mtx_lock(&Giant); 818 error = ifpromisc(d->bd_bif->bif_ifp, 1); 819 mtx_unlock(&Giant); 820 if (error == 0) 821 d->bd_promisc = 1; 822 } 823 break; 824 825 /* 826 * Get current data link type. 827 */ 828 case BIOCGDLT: 829 if (d->bd_bif == NULL) 830 error = EINVAL; 831 else 832 *(u_int *)addr = d->bd_bif->bif_dlt; 833 break; 834 835 /* 836 * Get a list of supported data link types. 837 */ 838 case BIOCGDLTLIST: 839 if (d->bd_bif == NULL) 840 error = EINVAL; 841 else 842 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr); 843 break; 844 845 /* 846 * Set data link type. 847 */ 848 case BIOCSDLT: 849 if (d->bd_bif == NULL) 850 error = EINVAL; 851 else 852 error = bpf_setdlt(d, *(u_int *)addr); 853 break; 854 855 /* 856 * Get interface name. 857 */ 858 case BIOCGETIF: 859 if (d->bd_bif == NULL) 860 error = EINVAL; 861 else { 862 struct ifnet *const ifp = d->bd_bif->bif_ifp; 863 struct ifreq *const ifr = (struct ifreq *)addr; 864 865 strlcpy(ifr->ifr_name, ifp->if_xname, 866 sizeof(ifr->ifr_name)); 867 } 868 break; 869 870 /* 871 * Set interface. 872 */ 873 case BIOCSETIF: 874 error = bpf_setif(d, (struct ifreq *)addr); 875 break; 876 877 /* 878 * Set read timeout. 879 */ 880 case BIOCSRTIMEOUT: 881 { 882 struct timeval *tv = (struct timeval *)addr; 883 884 /* 885 * Subtract 1 tick from tvtohz() since this isn't 886 * a one-shot timer. 887 */ 888 if ((error = itimerfix(tv)) == 0) 889 d->bd_rtout = tvtohz(tv) - 1; 890 break; 891 } 892 893 /* 894 * Get read timeout. 895 */ 896 case BIOCGRTIMEOUT: 897 { 898 struct timeval *tv = (struct timeval *)addr; 899 900 tv->tv_sec = d->bd_rtout / hz; 901 tv->tv_usec = (d->bd_rtout % hz) * tick; 902 break; 903 } 904 905 /* 906 * Get packet stats. 907 */ 908 case BIOCGSTATS: 909 { 910 struct bpf_stat *bs = (struct bpf_stat *)addr; 911 912 bs->bs_recv = d->bd_rcount; 913 bs->bs_drop = d->bd_dcount; 914 break; 915 } 916 917 /* 918 * Set immediate mode. 919 */ 920 case BIOCIMMEDIATE: 921 d->bd_immediate = *(u_int *)addr; 922 break; 923 924 case BIOCVERSION: 925 { 926 struct bpf_version *bv = (struct bpf_version *)addr; 927 928 bv->bv_major = BPF_MAJOR_VERSION; 929 bv->bv_minor = BPF_MINOR_VERSION; 930 break; 931 } 932 933 /* 934 * Get "header already complete" flag 935 */ 936 case BIOCGHDRCMPLT: 937 *(u_int *)addr = d->bd_hdrcmplt; 938 break; 939 940 case BIOCLOCK: 941 d->bd_locked = 1; 942 break; 943 /* 944 * Set "header already complete" flag 945 */ 946 case BIOCSHDRCMPLT: 947 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0; 948 break; 949 950 /* 951 * Get "see sent packets" flag 952 */ 953 case BIOCGSEESENT: 954 *(u_int *)addr = d->bd_seesent; 955 break; 956 957 /* 958 * Set "see sent packets" flag 959 */ 960 case BIOCSSEESENT: 961 d->bd_seesent = *(u_int *)addr; 962 break; 963 964 case FIONBIO: /* Non-blocking I/O */ 965 break; 966 967 case FIOASYNC: /* Send signal on receive packets */ 968 d->bd_async = *(int *)addr; 969 break; 970 971 case FIOSETOWN: 972 error = fsetown(*(int *)addr, &d->bd_sigio); 973 break; 974 975 case FIOGETOWN: 976 *(int *)addr = fgetown(&d->bd_sigio); 977 break; 978 979 /* This is deprecated, FIOSETOWN should be used instead. */ 980 case TIOCSPGRP: 981 error = fsetown(-(*(int *)addr), &d->bd_sigio); 982 break; 983 984 /* This is deprecated, FIOGETOWN should be used instead. */ 985 case TIOCGPGRP: 986 *(int *)addr = -fgetown(&d->bd_sigio); 987 break; 988 989 case BIOCSRSIG: /* Set receive signal */ 990 { 991 u_int sig; 992 993 sig = *(u_int *)addr; 994 995 if (sig >= NSIG) 996 error = EINVAL; 997 else 998 d->bd_sig = sig; 999 break; 1000 } 1001 case BIOCGRSIG: 1002 *(u_int *)addr = d->bd_sig; 1003 break; 1004 } 1005 return (error); 1006} 1007 1008/* 1009 * Set d's packet filter program to fp. If this file already has a filter, 1010 * free it and replace it. Returns EINVAL for bogus requests. 1011 */ 1012static int 1013bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd) 1014{ 1015 struct bpf_insn *fcode, *old; 1016 u_int wfilter, flen, size; 1017#ifdef BPF_JITTER 1018 bpf_jit_filter *ofunc; 1019#endif 1020 1021 if (cmd == BIOCSETWF) { 1022 old = d->bd_wfilter; 1023 wfilter = 1; 1024#ifdef BPF_JITTER 1025 ofunc = NULL; 1026#endif 1027 } else { 1028 wfilter = 0; 1029 old = d->bd_rfilter; 1030#ifdef BPF_JITTER 1031 ofunc = d->bd_bfilter; 1032#endif 1033 } 1034 if (fp->bf_insns == NULL) { 1035 if (fp->bf_len != 0) 1036 return (EINVAL); 1037 BPFD_LOCK(d); 1038 if (wfilter) 1039 d->bd_wfilter = NULL; 1040 else { 1041 d->bd_rfilter = NULL; 1042#ifdef BPF_JITTER 1043 d->bd_bfilter = NULL; 1044#endif 1045 } 1046 reset_d(d); 1047 BPFD_UNLOCK(d); 1048 if (old != NULL) 1049 free((caddr_t)old, M_BPF); 1050#ifdef BPF_JITTER 1051 if (ofunc != NULL) 1052 bpf_destroy_jit_filter(ofunc); 1053#endif 1054 return (0); 1055 } 1056 flen = fp->bf_len; 1057 if (flen > bpf_maxinsns) 1058 return (EINVAL); 1059 1060 size = flen * sizeof(*fp->bf_insns); 1061 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK); 1062 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 1063 bpf_validate(fcode, (int)flen)) { 1064 BPFD_LOCK(d); 1065 if (wfilter) 1066 d->bd_wfilter = fcode; 1067 else { 1068 d->bd_rfilter = fcode; 1069#ifdef BPF_JITTER 1070 d->bd_bfilter = bpf_jitter(fcode, flen); 1071#endif 1072 } 1073 reset_d(d); 1074 BPFD_UNLOCK(d); 1075 if (old != NULL) 1076 free((caddr_t)old, M_BPF); 1077#ifdef BPF_JITTER 1078 if (ofunc != NULL) 1079 bpf_destroy_jit_filter(ofunc); 1080#endif 1081 1082 return (0); 1083 } 1084 free((caddr_t)fcode, M_BPF); 1085 return (EINVAL); 1086} 1087 1088/* 1089 * Detach a file from its current interface (if attached at all) and attach 1090 * to the interface indicated by the name stored in ifr. 1091 * Return an errno or 0. 1092 */ 1093static int 1094bpf_setif(struct bpf_d *d, struct ifreq *ifr) 1095{ 1096 struct bpf_if *bp; 1097 struct ifnet *theywant; 1098 1099 theywant = ifunit(ifr->ifr_name); 1100 if (theywant == NULL || theywant->if_bpf == NULL) 1101 return (ENXIO); 1102 1103 bp = theywant->if_bpf; 1104 /* 1105 * Allocate the packet buffers if we need to. 1106 * If we're already attached to requested interface, 1107 * just flush the buffer. 1108 */ 1109 if (d->bd_sbuf == NULL) 1110 bpf_allocbufs(d); 1111 if (bp != d->bd_bif) { 1112 if (d->bd_bif) 1113 /* 1114 * Detach if attached to something else. 1115 */ 1116 bpf_detachd(d); 1117 1118 bpf_attachd(d, bp); 1119 } 1120 BPFD_LOCK(d); 1121 reset_d(d); 1122 BPFD_UNLOCK(d); 1123 return (0); 1124} 1125 1126/* 1127 * Support for select() and poll() system calls 1128 * 1129 * Return true iff the specific operation will not block indefinitely. 1130 * Otherwise, return false but make a note that a selwakeup() must be done. 1131 */ 1132static int 1133bpfpoll(struct cdev *dev, int events, struct thread *td) 1134{ 1135 struct bpf_d *d; 1136 int revents; 1137 1138 d = dev->si_drv1; 1139 if (d->bd_bif == NULL) 1140 return (ENXIO); 1141 1142 /* 1143 * Refresh PID associated with this descriptor. 1144 */ 1145 revents = events & (POLLOUT | POLLWRNORM); 1146 BPFD_LOCK(d); 1147 d->bd_pid = td->td_proc->p_pid; 1148 if (events & (POLLIN | POLLRDNORM)) { 1149 if (bpf_ready(d)) 1150 revents |= events & (POLLIN | POLLRDNORM); 1151 else { 1152 selrecord(td, &d->bd_sel); 1153 /* Start the read timeout if necessary. */ 1154 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1155 callout_reset(&d->bd_callout, d->bd_rtout, 1156 bpf_timed_out, d); 1157 d->bd_state = BPF_WAITING; 1158 } 1159 } 1160 } 1161 BPFD_UNLOCK(d); 1162 return (revents); 1163} 1164 1165/* 1166 * Support for kevent() system call. Register EVFILT_READ filters and 1167 * reject all others. 1168 */ 1169int 1170bpfkqfilter(struct cdev *dev, struct knote *kn) 1171{ 1172 struct bpf_d *d = (struct bpf_d *)dev->si_drv1; 1173 1174 if (kn->kn_filter != EVFILT_READ) 1175 return (1); 1176 1177 /* 1178 * Refresh PID associated with this descriptor. 1179 */ 1180 BPFD_LOCK(d); 1181 d->bd_pid = curthread->td_proc->p_pid; 1182 kn->kn_fop = &bpfread_filtops; 1183 kn->kn_hook = d; 1184 knlist_add(&d->bd_sel.si_note, kn, 1); 1185 BPFD_UNLOCK(d); 1186 1187 return (0); 1188} 1189 1190static void 1191filt_bpfdetach(struct knote *kn) 1192{ 1193 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1194 1195 knlist_remove(&d->bd_sel.si_note, kn, 0); 1196} 1197 1198static int 1199filt_bpfread(struct knote *kn, long hint) 1200{ 1201 struct bpf_d *d = (struct bpf_d *)kn->kn_hook; 1202 int ready; 1203 1204 BPFD_LOCK_ASSERT(d); 1205 ready = bpf_ready(d); 1206 if (ready) { 1207 kn->kn_data = d->bd_slen; 1208 if (d->bd_hbuf) 1209 kn->kn_data += d->bd_hlen; 1210 } 1211 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) { 1212 callout_reset(&d->bd_callout, d->bd_rtout, 1213 bpf_timed_out, d); 1214 d->bd_state = BPF_WAITING; 1215 } 1216 1217 return (ready); 1218} 1219 1220/* 1221 * Incoming linkage from device drivers. Process the packet pkt, of length 1222 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1223 * by each process' filter, and if accepted, stashed into the corresponding 1224 * buffer. 1225 */ 1226void 1227bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1228{ 1229 struct bpf_d *d; 1230 u_int slen; 1231 int gottime; 1232 struct timeval tv; 1233 1234 gottime = 0; 1235 BPFIF_LOCK(bp); 1236 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1237 BPFD_LOCK(d); 1238 ++d->bd_rcount; 1239#ifdef BPF_JITTER 1240 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL) 1241 slen = (*(d->bd_bfilter->func))(pkt, pktlen, pktlen); 1242 else 1243#endif 1244 slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen); 1245 if (slen != 0) { 1246 d->bd_fcount++; 1247 if (!gottime) { 1248 microtime(&tv); 1249 gottime = 1; 1250 } 1251#ifdef MAC 1252 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1253#endif 1254 catchpacket(d, pkt, pktlen, slen, bcopy, &tv); 1255 } 1256 BPFD_UNLOCK(d); 1257 } 1258 BPFIF_UNLOCK(bp); 1259} 1260 1261/* 1262 * Copy data from an mbuf chain into a buffer. This code is derived 1263 * from m_copydata in sys/uipc_mbuf.c. 1264 */ 1265static void 1266bpf_mcopy(const void *src_arg, void *dst_arg, size_t len) 1267{ 1268 const struct mbuf *m; 1269 u_int count; 1270 u_char *dst; 1271 1272 m = src_arg; 1273 dst = dst_arg; 1274 while (len > 0) { 1275 if (m == NULL) 1276 panic("bpf_mcopy"); 1277 count = min(m->m_len, len); 1278 bcopy(mtod(m, void *), dst, count); 1279 m = m->m_next; 1280 dst += count; 1281 len -= count; 1282 } 1283} 1284 1285/* 1286 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1287 */ 1288void 1289bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1290{ 1291 struct bpf_d *d; 1292 u_int pktlen, slen; 1293 int gottime; 1294 struct timeval tv; 1295 1296 gottime = 0; 1297 1298 pktlen = m_length(m, NULL); 1299 1300 BPFIF_LOCK(bp); 1301 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1302 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1303 continue; 1304 BPFD_LOCK(d); 1305 ++d->bd_rcount; 1306#ifdef BPF_JITTER 1307 /* XXX We cannot handle multiple mbufs. */ 1308 if (bpf_jitter_enable != 0 && d->bd_bfilter != NULL && 1309 m->m_next == NULL) 1310 slen = (*(d->bd_bfilter->func))(mtod(m, u_char *), 1311 pktlen, pktlen); 1312 else 1313#endif 1314 slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0); 1315 if (slen != 0) { 1316 d->bd_fcount++; 1317 if (!gottime) { 1318 microtime(&tv); 1319 gottime = 1; 1320 } 1321#ifdef MAC 1322 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1323#endif 1324 catchpacket(d, (u_char *)m, pktlen, slen, 1325 bpf_mcopy, &tv); 1326 } 1327 BPFD_UNLOCK(d); 1328 } 1329 BPFIF_UNLOCK(bp); 1330} 1331 1332/* 1333 * Incoming linkage from device drivers, when packet is in 1334 * an mbuf chain and to be prepended by a contiguous header. 1335 */ 1336void 1337bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m) 1338{ 1339 struct mbuf mb; 1340 struct bpf_d *d; 1341 u_int pktlen, slen; 1342 int gottime; 1343 struct timeval tv; 1344 1345 gottime = 0; 1346 1347 pktlen = m_length(m, NULL); 1348 /* 1349 * Craft on-stack mbuf suitable for passing to bpf_filter. 1350 * Note that we cut corners here; we only setup what's 1351 * absolutely needed--this mbuf should never go anywhere else. 1352 */ 1353 mb.m_next = m; 1354 mb.m_data = data; 1355 mb.m_len = dlen; 1356 pktlen += dlen; 1357 1358 BPFIF_LOCK(bp); 1359 LIST_FOREACH(d, &bp->bif_dlist, bd_next) { 1360 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL)) 1361 continue; 1362 BPFD_LOCK(d); 1363 ++d->bd_rcount; 1364 slen = bpf_filter(d->bd_rfilter, (u_char *)&mb, pktlen, 0); 1365 if (slen != 0) { 1366 d->bd_fcount++; 1367 if (!gottime) { 1368 microtime(&tv); 1369 gottime = 1; 1370 } 1371#ifdef MAC 1372 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0) 1373#endif 1374 catchpacket(d, (u_char *)&mb, pktlen, slen, 1375 bpf_mcopy, &tv); 1376 } 1377 BPFD_UNLOCK(d); 1378 } 1379 BPFIF_UNLOCK(bp); 1380} 1381 1382/* 1383 * Move the packet data from interface memory (pkt) into the 1384 * store buffer. "cpfn" is the routine called to do the actual data 1385 * transfer. bcopy is passed in to copy contiguous chunks, while 1386 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1387 * pkt is really an mbuf. 1388 */ 1389static void 1390catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen, 1391 void (*cpfn)(const void *, void *, size_t), struct timeval *tv) 1392{ 1393 struct bpf_hdr *hp; 1394 int totlen, curlen; 1395 int hdrlen = d->bd_bif->bif_hdrlen; 1396 int do_wakeup = 0; 1397 1398 BPFD_LOCK_ASSERT(d); 1399 /* 1400 * Figure out how many bytes to move. If the packet is 1401 * greater or equal to the snapshot length, transfer that 1402 * much. Otherwise, transfer the whole packet (unless 1403 * we hit the buffer size limit). 1404 */ 1405 totlen = hdrlen + min(snaplen, pktlen); 1406 if (totlen > d->bd_bufsize) 1407 totlen = d->bd_bufsize; 1408 1409 /* 1410 * Round up the end of the previous packet to the next longword. 1411 */ 1412 curlen = BPF_WORDALIGN(d->bd_slen); 1413 if (curlen + totlen > d->bd_bufsize) { 1414 /* 1415 * This packet will overflow the storage buffer. 1416 * Rotate the buffers if we can, then wakeup any 1417 * pending reads. 1418 */ 1419 if (d->bd_fbuf == NULL) { 1420 /* 1421 * We haven't completed the previous read yet, 1422 * so drop the packet. 1423 */ 1424 ++d->bd_dcount; 1425 return; 1426 } 1427 ROTATE_BUFFERS(d); 1428 do_wakeup = 1; 1429 curlen = 0; 1430 } 1431 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) 1432 /* 1433 * Immediate mode is set, or the read timeout has 1434 * already expired during a select call. A packet 1435 * arrived, so the reader should be woken up. 1436 */ 1437 do_wakeup = 1; 1438 1439 /* 1440 * Append the bpf header. 1441 */ 1442 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1443 hp->bh_tstamp = *tv; 1444 hp->bh_datalen = pktlen; 1445 hp->bh_hdrlen = hdrlen; 1446 /* 1447 * Copy the packet data into the store buffer and update its length. 1448 */ 1449 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1450 d->bd_slen = curlen + totlen; 1451 1452 if (do_wakeup) 1453 bpf_wakeup(d); 1454} 1455 1456/* 1457 * Initialize all nonzero fields of a descriptor. 1458 */ 1459static void 1460bpf_allocbufs(struct bpf_d *d) 1461{ 1462 1463 KASSERT(d->bd_fbuf == NULL, ("bpf_allocbufs: bd_fbuf != NULL")); 1464 KASSERT(d->bd_sbuf == NULL, ("bpf_allocbufs: bd_sbuf != NULL")); 1465 KASSERT(d->bd_hbuf == NULL, ("bpf_allocbufs: bd_hbuf != NULL")); 1466 1467 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1468 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK); 1469 d->bd_slen = 0; 1470 d->bd_hlen = 0; 1471} 1472 1473/* 1474 * Free buffers currently in use by a descriptor. 1475 * Called on close. 1476 */ 1477static void 1478bpf_freed(struct bpf_d *d) 1479{ 1480 /* 1481 * We don't need to lock out interrupts since this descriptor has 1482 * been detached from its interface and it yet hasn't been marked 1483 * free. 1484 */ 1485 if (d->bd_sbuf != NULL) { 1486 free(d->bd_sbuf, M_BPF); 1487 if (d->bd_hbuf != NULL) 1488 free(d->bd_hbuf, M_BPF); 1489 if (d->bd_fbuf != NULL) 1490 free(d->bd_fbuf, M_BPF); 1491 } 1492 if (d->bd_rfilter) { 1493 free((caddr_t)d->bd_rfilter, M_BPF); 1494#ifdef BPF_JITTER 1495 bpf_destroy_jit_filter(d->bd_bfilter); 1496#endif 1497 } 1498 if (d->bd_wfilter) 1499 free((caddr_t)d->bd_wfilter, M_BPF); 1500 mtx_destroy(&d->bd_mtx); 1501} 1502 1503/* 1504 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the 1505 * fixed size of the link header (variable length headers not yet supported). 1506 */ 1507void 1508bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1509{ 1510 1511 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1512} 1513 1514/* 1515 * Attach an interface to bpf. ifp is a pointer to the structure 1516 * defining the interface to be attached, dlt is the link layer type, 1517 * and hdrlen is the fixed size of the link header (variable length 1518 * headers are not yet supporrted). 1519 */ 1520void 1521bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1522{ 1523 struct bpf_if *bp; 1524 1525 bp = malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO); 1526 if (bp == NULL) 1527 panic("bpfattach"); 1528 1529 LIST_INIT(&bp->bif_dlist); 1530 bp->bif_ifp = ifp; 1531 bp->bif_dlt = dlt; 1532 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF); 1533 KASSERT(*driverp == NULL, ("bpfattach2: driverp already initialized")); 1534 *driverp = bp; 1535 1536 mtx_lock(&bpf_mtx); 1537 LIST_INSERT_HEAD(&bpf_iflist, bp, bif_next); 1538 mtx_unlock(&bpf_mtx); 1539 1540 /* 1541 * Compute the length of the bpf header. This is not necessarily 1542 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1543 * that the network layer header begins on a longword boundary (for 1544 * performance reasons and to alleviate alignment restrictions). 1545 */ 1546 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1547 1548 if (bootverbose) 1549 if_printf(ifp, "bpf attached\n"); 1550} 1551 1552/* 1553 * Detach bpf from an interface. This involves detaching each descriptor 1554 * associated with the interface, and leaving bd_bif NULL. Notify each 1555 * descriptor as it's detached so that any sleepers wake up and get 1556 * ENXIO. 1557 */ 1558void 1559bpfdetach(struct ifnet *ifp) 1560{ 1561 struct bpf_if *bp; 1562 struct bpf_d *d; 1563 1564 /* Locate BPF interface information */ 1565 mtx_lock(&bpf_mtx); 1566 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1567 if (ifp == bp->bif_ifp) 1568 break; 1569 } 1570 1571 /* Interface wasn't attached */ 1572 if ((bp == NULL) || (bp->bif_ifp == NULL)) { 1573 mtx_unlock(&bpf_mtx); 1574 printf("bpfdetach: %s was not attached\n", ifp->if_xname); 1575 return; 1576 } 1577 1578 LIST_REMOVE(bp, bif_next); 1579 mtx_unlock(&bpf_mtx); 1580 1581 while ((d = LIST_FIRST(&bp->bif_dlist)) != NULL) { 1582 bpf_detachd(d); 1583 BPFD_LOCK(d); 1584 bpf_wakeup(d); 1585 BPFD_UNLOCK(d); 1586 } 1587 1588 mtx_destroy(&bp->bif_mtx); 1589 free(bp, M_BPF); 1590} 1591 1592/* 1593 * Get a list of available data link type of the interface. 1594 */ 1595static int 1596bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl) 1597{ 1598 int n, error; 1599 struct ifnet *ifp; 1600 struct bpf_if *bp; 1601 1602 ifp = d->bd_bif->bif_ifp; 1603 n = 0; 1604 error = 0; 1605 mtx_lock(&bpf_mtx); 1606 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1607 if (bp->bif_ifp != ifp) 1608 continue; 1609 if (bfl->bfl_list != NULL) { 1610 if (n >= bfl->bfl_len) { 1611 mtx_unlock(&bpf_mtx); 1612 return (ENOMEM); 1613 } 1614 error = copyout(&bp->bif_dlt, 1615 bfl->bfl_list + n, sizeof(u_int)); 1616 } 1617 n++; 1618 } 1619 mtx_unlock(&bpf_mtx); 1620 bfl->bfl_len = n; 1621 return (error); 1622} 1623 1624/* 1625 * Set the data link type of a BPF instance. 1626 */ 1627static int 1628bpf_setdlt(struct bpf_d *d, u_int dlt) 1629{ 1630 int error, opromisc; 1631 struct ifnet *ifp; 1632 struct bpf_if *bp; 1633 1634 if (d->bd_bif->bif_dlt == dlt) 1635 return (0); 1636 ifp = d->bd_bif->bif_ifp; 1637 mtx_lock(&bpf_mtx); 1638 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1639 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) 1640 break; 1641 } 1642 mtx_unlock(&bpf_mtx); 1643 if (bp != NULL) { 1644 opromisc = d->bd_promisc; 1645 bpf_detachd(d); 1646 bpf_attachd(d, bp); 1647 BPFD_LOCK(d); 1648 reset_d(d); 1649 BPFD_UNLOCK(d); 1650 if (opromisc) { 1651 error = ifpromisc(bp->bif_ifp, 1); 1652 if (error) 1653 if_printf(bp->bif_ifp, 1654 "bpf_setdlt: ifpromisc failed (%d)\n", 1655 error); 1656 else 1657 d->bd_promisc = 1; 1658 } 1659 } 1660 return (bp == NULL ? EINVAL : 0); 1661} 1662 1663static void 1664bpf_clone(void *arg, struct ucred *cred, char *name, int namelen, 1665 struct cdev **dev) 1666{ 1667 int u; 1668 1669 if (*dev != NULL) 1670 return; 1671 if (dev_stdclone(name, NULL, "bpf", &u) != 1) 1672 return; 1673 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600, 1674 "bpf%d", u); 1675 dev_ref(*dev); 1676 (*dev)->si_flags |= SI_CHEAPCLONE; 1677 return; 1678} 1679 1680static void 1681bpf_drvinit(void *unused) 1682{ 1683 1684 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF); 1685 LIST_INIT(&bpf_iflist); 1686 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000); 1687} 1688 1689static void 1690bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd) 1691{ 1692 1693 bzero(d, sizeof(*d)); 1694 BPFD_LOCK_ASSERT(bd); 1695 d->bd_immediate = bd->bd_immediate; 1696 d->bd_promisc = bd->bd_promisc; 1697 d->bd_hdrcmplt = bd->bd_hdrcmplt; 1698 d->bd_seesent = bd->bd_seesent; 1699 d->bd_async = bd->bd_async; 1700 d->bd_rcount = bd->bd_rcount; 1701 d->bd_dcount = bd->bd_dcount; 1702 d->bd_fcount = bd->bd_fcount; 1703 d->bd_sig = bd->bd_sig; 1704 d->bd_slen = bd->bd_slen; 1705 d->bd_hlen = bd->bd_hlen; 1706 d->bd_bufsize = bd->bd_bufsize; 1707 d->bd_pid = bd->bd_pid; 1708 strlcpy(d->bd_ifname, 1709 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ); 1710 d->bd_locked = bd->bd_locked; 1711} 1712 1713static int 1714bpf_stats_sysctl(SYSCTL_HANDLER_ARGS) 1715{ 1716 struct xbpf_d *xbdbuf, *xbd; 1717 int index, error; 1718 struct bpf_if *bp; 1719 struct bpf_d *bd; 1720 1721 /* 1722 * XXX This is not technically correct. It is possible for non 1723 * privileged users to open bpf devices. It would make sense 1724 * if the users who opened the devices were able to retrieve 1725 * the statistics for them, too. 1726 */ 1727 error = suser(req->td); 1728 if (error) 1729 return (error); 1730 if (req->oldptr == NULL) 1731 return (SYSCTL_OUT(req, 0, bpf_bpfd_cnt * sizeof(*xbd))); 1732 if (bpf_bpfd_cnt == 0) 1733 return (SYSCTL_OUT(req, 0, 0)); 1734 xbdbuf = malloc(req->oldlen, M_BPF, M_WAITOK); 1735 mtx_lock(&bpf_mtx); 1736 if (req->oldlen < (bpf_bpfd_cnt * sizeof(*xbd))) { 1737 mtx_unlock(&bpf_mtx); 1738 free(xbdbuf, M_BPF); 1739 return (ENOMEM); 1740 } 1741 index = 0; 1742 LIST_FOREACH(bp, &bpf_iflist, bif_next) { 1743 BPFIF_LOCK(bp); 1744 LIST_FOREACH(bd, &bp->bif_dlist, bd_next) { 1745 xbd = &xbdbuf[index++]; 1746 BPFD_LOCK(bd); 1747 bpfstats_fill_xbpf(xbd, bd); 1748 BPFD_UNLOCK(bd); 1749 } 1750 BPFIF_UNLOCK(bp); 1751 } 1752 mtx_unlock(&bpf_mtx); 1753 error = SYSCTL_OUT(req, xbdbuf, index * sizeof(*xbd)); 1754 free(xbdbuf, M_BPF); 1755 return (error); 1756} 1757 1758SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL) 1759 1760#else /* !DEV_BPF && !NETGRAPH_BPF */ 1761/* 1762 * NOP stubs to allow bpf-using drivers to load and function. 1763 * 1764 * A 'better' implementation would allow the core bpf functionality 1765 * to be loaded at runtime. 1766 */ 1767static struct bpf_if bp_null; 1768 1769void 1770bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen) 1771{ 1772} 1773 1774void 1775bpf_mtap(struct bpf_if *bp, struct mbuf *m) 1776{ 1777} 1778 1779void 1780bpf_mtap2(struct bpf_if *bp, void *d, u_int l, struct mbuf *m) 1781{ 1782} 1783 1784void 1785bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen) 1786{ 1787 1788 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf); 1789} 1790 1791void 1792bpfattach2(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp) 1793{ 1794 1795 *driverp = &bp_null; 1796} 1797 1798void 1799bpfdetach(struct ifnet *ifp) 1800{ 1801} 1802 1803u_int 1804bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen) 1805{ 1806 return -1; /* "no filter" behaviour */ 1807} 1808 1809int 1810bpf_validate(const struct bpf_insn *f, int len) 1811{ 1812 return 0; /* false */ 1813} 1814 1815#endif /* !DEV_BPF && !NETGRAPH_BPF */ 1816