bpf.c revision 37939
1/* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.39 1998/06/07 17:12:01 dfr Exp $ 41 */ 42 43#include "bpfilter.h" 44 45#if NBPFILTER > 0 46 47#ifndef __GNUC__ 48#define inline 49#else 50#define inline __inline 51#endif 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/conf.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/time.h> 59#include <sys/proc.h> 60#include <sys/signalvar.h> 61#include <sys/filio.h> 62#include <sys/sockio.h> 63#include <sys/ttycom.h> 64 65#if defined(sparc) && BSD < 199103 66#include <sys/stream.h> 67#endif 68#include <sys/poll.h> 69 70#include <sys/socket.h> 71#include <sys/vnode.h> 72 73#include <net/if.h> 74#include <net/bpf.h> 75#include <net/bpfdesc.h> 76 77#include <netinet/in.h> 78#include <netinet/if_ether.h> 79#include <sys/kernel.h> 80#include <sys/sysctl.h> 81 82#include "opt_devfs.h" 83 84#ifdef DEVFS 85#include <sys/devfsext.h> 86#endif /*DEVFS*/ 87 88 89/* 90 * Older BSDs don't have kernel malloc. 91 */ 92#if BSD < 199103 93extern bcopy(); 94static caddr_t bpf_alloc(); 95#include <net/bpf_compat.h> 96#define BPF_BUFSIZE (MCLBYTES-8) 97#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 98#else 99#define BPF_BUFSIZE 4096 100#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 101#endif 102 103#define PRINET 26 /* interruptible */ 104 105/* 106 * The default read buffer size is patchable. 107 */ 108static int bpf_bufsize = BPF_BUFSIZE; 109SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 110 &bpf_bufsize, 0, ""); 111 112/* 113 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 114 * bpf_dtab holds the descriptors, indexed by minor device # 115 */ 116static struct bpf_if *bpf_iflist; 117static struct bpf_d bpf_dtab[NBPFILTER]; 118static int bpf_dtab_init; 119 120static int bpf_allocbufs __P((struct bpf_d *)); 121static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 122static void bpf_detachd __P((struct bpf_d *d)); 123static void bpf_freed __P((struct bpf_d *)); 124static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 125static void bpf_mcopy __P((const void *, void *, u_int)); 126static int bpf_movein __P((struct uio *, int, 127 struct mbuf **, struct sockaddr *, int *)); 128static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 129static inline void 130 bpf_wakeup __P((struct bpf_d *)); 131static void catchpacket __P((struct bpf_d *, u_char *, u_int, 132 u_int, void (*)(const void *, void *, u_int))); 133static void reset_d __P((struct bpf_d *)); 134static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 135 136static d_open_t bpfopen; 137static d_close_t bpfclose; 138static d_read_t bpfread; 139static d_write_t bpfwrite; 140static d_ioctl_t bpfioctl; 141static d_poll_t bpfpoll; 142 143#define CDEV_MAJOR 23 144static struct cdevsw bpf_cdevsw = 145 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 146 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 147 bpfpoll, nommap, NULL, "bpf", NULL, -1 }; 148 149 150static int 151bpf_movein(uio, linktype, mp, sockp, datlen) 152 register struct uio *uio; 153 int linktype, *datlen; 154 register struct mbuf **mp; 155 register struct sockaddr *sockp; 156{ 157 struct mbuf *m; 158 int error; 159 int len; 160 int hlen; 161 162 /* 163 * Build a sockaddr based on the data link layer type. 164 * We do this at this level because the ethernet header 165 * is copied directly into the data field of the sockaddr. 166 * In the case of SLIP, there is no header and the packet 167 * is forwarded as is. 168 * Also, we are careful to leave room at the front of the mbuf 169 * for the link level header. 170 */ 171 switch (linktype) { 172 173 case DLT_SLIP: 174 sockp->sa_family = AF_INET; 175 hlen = 0; 176 break; 177 178 case DLT_EN10MB: 179 sockp->sa_family = AF_UNSPEC; 180 /* XXX Would MAXLINKHDR be better? */ 181 hlen = sizeof(struct ether_header); 182 break; 183 184 case DLT_FDDI: 185#if defined(__FreeBSD__) || defined(__bsdi__) 186 sockp->sa_family = AF_IMPLINK; 187 hlen = 0; 188#else 189 sockp->sa_family = AF_UNSPEC; 190 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 191 hlen = 24; 192#endif 193 break; 194 195 case DLT_NULL: 196 sockp->sa_family = AF_UNSPEC; 197 hlen = 0; 198 break; 199 200#ifdef __FreeBSD__ 201 case DLT_ATM_RFC1483: 202 /* 203 * en atm driver requires 4-byte atm pseudo header. 204 * though it isn't standard, vpi:vci needs to be 205 * specified anyway. 206 */ 207 sockp->sa_family = AF_UNSPEC; 208 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */ 209 break; 210#endif 211 212 default: 213 return (EIO); 214 } 215 216 len = uio->uio_resid; 217 *datlen = len - hlen; 218 if ((unsigned)len > MCLBYTES) 219 return (EIO); 220 221 MGETHDR(m, M_WAIT, MT_DATA); 222 if (m == 0) 223 return (ENOBUFS); 224 if (len > MHLEN) { 225#if BSD >= 199103 226 MCLGET(m, M_WAIT); 227 if ((m->m_flags & M_EXT) == 0) { 228#else 229 MCLGET(m); 230 if (m->m_len != MCLBYTES) { 231#endif 232 error = ENOBUFS; 233 goto bad; 234 } 235 } 236 m->m_pkthdr.len = m->m_len = len; 237 m->m_pkthdr.rcvif = NULL; 238 *mp = m; 239 /* 240 * Make room for link header. 241 */ 242 if (hlen != 0) { 243 m->m_pkthdr.len -= hlen; 244 m->m_len -= hlen; 245#if BSD >= 199103 246 m->m_data += hlen; /* XXX */ 247#else 248 m->m_off += hlen; 249#endif 250 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 251 if (error) 252 goto bad; 253 } 254 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 255 if (!error) 256 return (0); 257 bad: 258 m_freem(m); 259 return (error); 260} 261 262/* 263 * Attach file to the bpf interface, i.e. make d listen on bp. 264 * Must be called at splimp. 265 */ 266static void 267bpf_attachd(d, bp) 268 struct bpf_d *d; 269 struct bpf_if *bp; 270{ 271 /* 272 * Point d at bp, and add d to the interface's list of listeners. 273 * Finally, point the driver's bpf cookie at the interface so 274 * it will divert packets to bpf. 275 */ 276 d->bd_bif = bp; 277 d->bd_next = bp->bif_dlist; 278 bp->bif_dlist = d; 279 280 bp->bif_ifp->if_bpf = bp; 281} 282 283/* 284 * Detach a file from its interface. 285 */ 286static void 287bpf_detachd(d) 288 struct bpf_d *d; 289{ 290 struct bpf_d **p; 291 struct bpf_if *bp; 292 293 bp = d->bd_bif; 294 /* 295 * Check if this descriptor had requested promiscuous mode. 296 * If so, turn it off. 297 */ 298 if (d->bd_promisc) { 299 d->bd_promisc = 0; 300 if (ifpromisc(bp->bif_ifp, 0)) 301 /* 302 * Something is really wrong if we were able to put 303 * the driver into promiscuous mode, but can't 304 * take it out. 305 */ 306 panic("bpf: ifpromisc failed"); 307 } 308 /* Remove d from the interface's descriptor list. */ 309 p = &bp->bif_dlist; 310 while (*p != d) { 311 p = &(*p)->bd_next; 312 if (*p == 0) 313 panic("bpf_detachd: descriptor not in list"); 314 } 315 *p = (*p)->bd_next; 316 if (bp->bif_dlist == 0) 317 /* 318 * Let the driver know that there are no more listeners. 319 */ 320 d->bd_bif->bif_ifp->if_bpf = 0; 321 d->bd_bif = 0; 322} 323 324 325/* 326 * Mark a descriptor free by making it point to itself. 327 * This is probably cheaper than marking with a constant since 328 * the address should be in a register anyway. 329 */ 330#define D_ISFREE(d) ((d) == (d)->bd_next) 331#define D_MARKFREE(d) ((d)->bd_next = (d)) 332#define D_MARKUSED(d) ((d)->bd_next = 0) 333 334/* 335 * Open ethernet device. Returns ENXIO for illegal minor device number, 336 * EBUSY if file is open by another process. 337 */ 338/* ARGSUSED */ 339static int 340bpfopen(dev, flags, fmt, p) 341 dev_t dev; 342 int flags; 343 int fmt; 344 struct proc *p; 345{ 346 register struct bpf_d *d; 347 348 if (minor(dev) >= NBPFILTER) 349 return (ENXIO); 350 /* 351 * Each minor can be opened by only one process. If the requested 352 * minor is in use, return EBUSY. 353 */ 354 d = &bpf_dtab[minor(dev)]; 355 if (!D_ISFREE(d)) 356 return (EBUSY); 357 358 /* Mark "free" and do most initialization. */ 359 bzero((char *)d, sizeof(*d)); 360 d->bd_bufsize = bpf_bufsize; 361 d->bd_sig = SIGIO; 362 363 return (0); 364} 365 366/* 367 * Close the descriptor by detaching it from its interface, 368 * deallocating its buffers, and marking it free. 369 */ 370/* ARGSUSED */ 371static int 372bpfclose(dev, flags, fmt, p) 373 dev_t dev; 374 int flags; 375 int fmt; 376 struct proc *p; 377{ 378 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 379 register int s; 380 381 s = splimp(); 382 if (d->bd_bif) 383 bpf_detachd(d); 384 splx(s); 385 bpf_freed(d); 386 387 return (0); 388} 389 390/* 391 * Support for SunOS, which does not have tsleep. 392 */ 393#if BSD < 199103 394static 395bpf_timeout(arg) 396 caddr_t arg; 397{ 398 struct bpf_d *d = (struct bpf_d *)arg; 399 d->bd_timedout = 1; 400 wakeup(arg); 401} 402 403#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 404 405int 406bpf_sleep(d) 407 register struct bpf_d *d; 408{ 409 register int rto = d->bd_rtout; 410 register int st; 411 412 if (rto != 0) { 413 d->bd_timedout = 0; 414 timeout(bpf_timeout, (caddr_t)d, rto); 415 } 416 st = sleep((caddr_t)d, PRINET|PCATCH); 417 if (rto != 0) { 418 if (d->bd_timedout == 0) 419 untimeout(bpf_timeout, (caddr_t)d); 420 else if (st == 0) 421 return EWOULDBLOCK; 422 } 423 return (st != 0) ? EINTR : 0; 424} 425#else 426#define BPF_SLEEP tsleep 427#endif 428 429/* 430 * Rotate the packet buffers in descriptor d. Move the store buffer 431 * into the hold slot, and the free buffer into the store slot. 432 * Zero the length of the new store buffer. 433 */ 434#define ROTATE_BUFFERS(d) \ 435 (d)->bd_hbuf = (d)->bd_sbuf; \ 436 (d)->bd_hlen = (d)->bd_slen; \ 437 (d)->bd_sbuf = (d)->bd_fbuf; \ 438 (d)->bd_slen = 0; \ 439 (d)->bd_fbuf = 0; 440/* 441 * bpfread - read next chunk of packets from buffers 442 */ 443static int 444bpfread(dev, uio, ioflag) 445 dev_t dev; 446 register struct uio *uio; 447 int ioflag; 448{ 449 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 450 int error; 451 int s; 452 453 /* 454 * Restrict application to use a buffer the same size as 455 * as kernel buffers. 456 */ 457 if (uio->uio_resid != d->bd_bufsize) 458 return (EINVAL); 459 460 s = splimp(); 461 /* 462 * If the hold buffer is empty, then do a timed sleep, which 463 * ends when the timeout expires or when enough packets 464 * have arrived to fill the store buffer. 465 */ 466 while (d->bd_hbuf == 0) { 467 if (d->bd_immediate && d->bd_slen != 0) { 468 /* 469 * A packet(s) either arrived since the previous 470 * read or arrived while we were asleep. 471 * Rotate the buffers and return what's here. 472 */ 473 ROTATE_BUFFERS(d); 474 break; 475 } 476 if (ioflag & IO_NDELAY) 477 error = EWOULDBLOCK; 478 else 479 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 480 d->bd_rtout); 481 if (error == EINTR || error == ERESTART) { 482 splx(s); 483 return (error); 484 } 485 if (error == EWOULDBLOCK) { 486 /* 487 * On a timeout, return what's in the buffer, 488 * which may be nothing. If there is something 489 * in the store buffer, we can rotate the buffers. 490 */ 491 if (d->bd_hbuf) 492 /* 493 * We filled up the buffer in between 494 * getting the timeout and arriving 495 * here, so we don't need to rotate. 496 */ 497 break; 498 499 if (d->bd_slen == 0) { 500 splx(s); 501 return (0); 502 } 503 ROTATE_BUFFERS(d); 504 break; 505 } 506 } 507 /* 508 * At this point, we know we have something in the hold slot. 509 */ 510 splx(s); 511 512 /* 513 * Move data from hold buffer into user space. 514 * We know the entire buffer is transferred since 515 * we checked above that the read buffer is bpf_bufsize bytes. 516 */ 517 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 518 519 s = splimp(); 520 d->bd_fbuf = d->bd_hbuf; 521 d->bd_hbuf = 0; 522 d->bd_hlen = 0; 523 splx(s); 524 525 return (error); 526} 527 528 529/* 530 * If there are processes sleeping on this descriptor, wake them up. 531 */ 532static inline void 533bpf_wakeup(d) 534 register struct bpf_d *d; 535{ 536 struct proc *p; 537 538 wakeup((caddr_t)d); 539 if (d->bd_async && d->bd_sig) 540 if (d->bd_pgid > 0) 541 gsignal (d->bd_pgid, d->bd_sig); 542 else if (p = pfind (-d->bd_pgid)) 543 psignal (p, d->bd_sig); 544 545#if BSD >= 199103 546 selwakeup(&d->bd_sel); 547 /* XXX */ 548 d->bd_sel.si_pid = 0; 549#else 550 if (d->bd_selproc) { 551 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 552 d->bd_selcoll = 0; 553 d->bd_selproc = 0; 554 } 555#endif 556} 557 558static int 559bpfwrite(dev, uio, ioflag) 560 dev_t dev; 561 struct uio *uio; 562 int ioflag; 563{ 564 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 565 struct ifnet *ifp; 566 struct mbuf *m; 567 int error, s; 568 static struct sockaddr dst; 569 int datlen; 570 571 if (d->bd_bif == 0) 572 return (ENXIO); 573 574 ifp = d->bd_bif->bif_ifp; 575 576 if (uio->uio_resid == 0) 577 return (0); 578 579 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 580 if (error) 581 return (error); 582 583 if (datlen > ifp->if_mtu) 584 return (EMSGSIZE); 585 586 s = splnet(); 587#if BSD >= 199103 588 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 589#else 590 error = (*ifp->if_output)(ifp, m, &dst); 591#endif 592 splx(s); 593 /* 594 * The driver frees the mbuf. 595 */ 596 return (error); 597} 598 599/* 600 * Reset a descriptor by flushing its packet buffer and clearing the 601 * receive and drop counts. Should be called at splimp. 602 */ 603static void 604reset_d(d) 605 struct bpf_d *d; 606{ 607 if (d->bd_hbuf) { 608 /* Free the hold buffer. */ 609 d->bd_fbuf = d->bd_hbuf; 610 d->bd_hbuf = 0; 611 } 612 d->bd_slen = 0; 613 d->bd_hlen = 0; 614 d->bd_rcount = 0; 615 d->bd_dcount = 0; 616} 617 618/* 619 * FIONREAD Check for read packet available. 620 * SIOCGIFADDR Get interface address - convenient hook to driver. 621 * BIOCGBLEN Get buffer len [for read()]. 622 * BIOCSETF Set ethernet read filter. 623 * BIOCFLUSH Flush read packet buffer. 624 * BIOCPROMISC Put interface into promiscuous mode. 625 * BIOCGDLT Get link layer type. 626 * BIOCGETIF Get interface name. 627 * BIOCSETIF Set interface. 628 * BIOCSRTIMEOUT Set read timeout. 629 * BIOCGRTIMEOUT Get read timeout. 630 * BIOCGSTATS Get packet stats. 631 * BIOCIMMEDIATE Set immediate mode. 632 * BIOCVERSION Get filter language version. 633 */ 634/* ARGSUSED */ 635static int 636bpfioctl(dev, cmd, addr, flags, p) 637 dev_t dev; 638 u_long cmd; 639 caddr_t addr; 640 int flags; 641 struct proc *p; 642{ 643 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 644 int s, error = 0; 645 646 switch (cmd) { 647 648 default: 649 error = EINVAL; 650 break; 651 652 /* 653 * Check for read packet available. 654 */ 655 case FIONREAD: 656 { 657 int n; 658 659 s = splimp(); 660 n = d->bd_slen; 661 if (d->bd_hbuf) 662 n += d->bd_hlen; 663 splx(s); 664 665 *(int *)addr = n; 666 break; 667 } 668 669 case SIOCGIFADDR: 670 { 671 struct ifnet *ifp; 672 673 if (d->bd_bif == 0) 674 error = EINVAL; 675 else { 676 ifp = d->bd_bif->bif_ifp; 677 error = (*ifp->if_ioctl)(ifp, cmd, addr); 678 } 679 break; 680 } 681 682 /* 683 * Get buffer len [for read()]. 684 */ 685 case BIOCGBLEN: 686 *(u_int *)addr = d->bd_bufsize; 687 break; 688 689 /* 690 * Set buffer length. 691 */ 692 case BIOCSBLEN: 693#if BSD < 199103 694 error = EINVAL; 695#else 696 if (d->bd_bif != 0) 697 error = EINVAL; 698 else { 699 register u_int size = *(u_int *)addr; 700 701 if (size > BPF_MAXBUFSIZE) 702 *(u_int *)addr = size = BPF_MAXBUFSIZE; 703 else if (size < BPF_MINBUFSIZE) 704 *(u_int *)addr = size = BPF_MINBUFSIZE; 705 d->bd_bufsize = size; 706 } 707#endif 708 break; 709 710 /* 711 * Set link layer read filter. 712 */ 713 case BIOCSETF: 714 error = bpf_setf(d, (struct bpf_program *)addr); 715 break; 716 717 /* 718 * Flush read packet buffer. 719 */ 720 case BIOCFLUSH: 721 s = splimp(); 722 reset_d(d); 723 splx(s); 724 break; 725 726 /* 727 * Put interface into promiscuous mode. 728 */ 729 case BIOCPROMISC: 730 if (d->bd_bif == 0) { 731 /* 732 * No interface attached yet. 733 */ 734 error = EINVAL; 735 break; 736 } 737 s = splimp(); 738 if (d->bd_promisc == 0) { 739 error = ifpromisc(d->bd_bif->bif_ifp, 1); 740 if (error == 0) 741 d->bd_promisc = 1; 742 } 743 splx(s); 744 break; 745 746 /* 747 * Get device parameters. 748 */ 749 case BIOCGDLT: 750 if (d->bd_bif == 0) 751 error = EINVAL; 752 else 753 *(u_int *)addr = d->bd_bif->bif_dlt; 754 break; 755 756 /* 757 * Set interface name. 758 */ 759 case BIOCGETIF: 760 if (d->bd_bif == 0) 761 error = EINVAL; 762 else 763 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 764 break; 765 766 /* 767 * Set interface. 768 */ 769 case BIOCSETIF: 770 error = bpf_setif(d, (struct ifreq *)addr); 771 break; 772 773 /* 774 * Set read timeout. 775 */ 776 case BIOCSRTIMEOUT: 777 { 778 struct timeval *tv = (struct timeval *)addr; 779 u_long msec; 780 781 /* Compute number of milliseconds. */ 782 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 783 /* Scale milliseconds to ticks. Assume hard 784 clock has millisecond or greater resolution 785 (i.e. tick >= 1000). For 10ms hardclock, 786 tick/1000 = 10, so rtout<-msec/10. */ 787 d->bd_rtout = msec / (tick / 1000); 788 break; 789 } 790 791 /* 792 * Get read timeout. 793 */ 794 case BIOCGRTIMEOUT: 795 { 796 struct timeval *tv = (struct timeval *)addr; 797 u_long msec = d->bd_rtout; 798 799 msec *= tick / 1000; 800 tv->tv_sec = msec / 1000; 801 tv->tv_usec = msec % 1000; 802 break; 803 } 804 805 /* 806 * Get packet stats. 807 */ 808 case BIOCGSTATS: 809 { 810 struct bpf_stat *bs = (struct bpf_stat *)addr; 811 812 bs->bs_recv = d->bd_rcount; 813 bs->bs_drop = d->bd_dcount; 814 break; 815 } 816 817 /* 818 * Set immediate mode. 819 */ 820 case BIOCIMMEDIATE: 821 d->bd_immediate = *(u_int *)addr; 822 break; 823 824 case BIOCVERSION: 825 { 826 struct bpf_version *bv = (struct bpf_version *)addr; 827 828 bv->bv_major = BPF_MAJOR_VERSION; 829 bv->bv_minor = BPF_MINOR_VERSION; 830 break; 831 } 832 833 case FIONBIO: /* Non-blocking I/O */ 834 break; 835 836 case FIOASYNC: /* Send signal on receive packets */ 837 d->bd_async = *(int *)addr; 838 break; 839 840/* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 841 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 842 is a process group if it's positive and a process id if it's negative. This 843 is exactly the opposite of what the other two functions want! Therefore 844 there is code in ioctl and fcntl to negate the arg before calling here. */ 845 846 case TIOCSPGRP: /* Process or group to send signals to */ 847 d->bd_pgid = *(int *)addr; 848 break; 849 850 case TIOCGPGRP: 851 *(int *)addr = d->bd_pgid; 852 break; 853 854 case BIOCSRSIG: /* Set receive signal */ 855 { 856 u_int sig; 857 858 sig = *(u_int *)addr; 859 860 if (sig >= NSIG) 861 error = EINVAL; 862 else 863 d->bd_sig = sig; 864 break; 865 } 866 case BIOCGRSIG: 867 *(u_int *)addr = d->bd_sig; 868 break; 869 } 870 return (error); 871} 872 873/* 874 * Set d's packet filter program to fp. If this file already has a filter, 875 * free it and replace it. Returns EINVAL for bogus requests. 876 */ 877static int 878bpf_setf(d, fp) 879 struct bpf_d *d; 880 struct bpf_program *fp; 881{ 882 struct bpf_insn *fcode, *old; 883 u_int flen, size; 884 int s; 885 886 old = d->bd_filter; 887 if (fp->bf_insns == 0) { 888 if (fp->bf_len != 0) 889 return (EINVAL); 890 s = splimp(); 891 d->bd_filter = 0; 892 reset_d(d); 893 splx(s); 894 if (old != 0) 895 free((caddr_t)old, M_DEVBUF); 896 return (0); 897 } 898 flen = fp->bf_len; 899 if (flen > BPF_MAXINSNS) 900 return (EINVAL); 901 902 size = flen * sizeof(*fp->bf_insns); 903 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 904 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 905 bpf_validate(fcode, (int)flen)) { 906 s = splimp(); 907 d->bd_filter = fcode; 908 reset_d(d); 909 splx(s); 910 if (old != 0) 911 free((caddr_t)old, M_DEVBUF); 912 913 return (0); 914 } 915 free((caddr_t)fcode, M_DEVBUF); 916 return (EINVAL); 917} 918 919/* 920 * Detach a file from its current interface (if attached at all) and attach 921 * to the interface indicated by the name stored in ifr. 922 * Return an errno or 0. 923 */ 924static int 925bpf_setif(d, ifr) 926 struct bpf_d *d; 927 struct ifreq *ifr; 928{ 929 struct bpf_if *bp; 930 int s, error; 931 struct ifnet *theywant; 932 933 theywant = ifunit(ifr->ifr_name); 934 if (theywant == 0) 935 return ENXIO; 936 937 /* 938 * Look through attached interfaces for the named one. 939 */ 940 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 941 struct ifnet *ifp = bp->bif_ifp; 942 943 if (ifp == 0 || ifp != theywant) 944 continue; 945 /* 946 * We found the requested interface. 947 * If it's not up, return an error. 948 * Allocate the packet buffers if we need to. 949 * If we're already attached to requested interface, 950 * just flush the buffer. 951 */ 952 if ((ifp->if_flags & IFF_UP) == 0) 953 return (ENETDOWN); 954 955 if (d->bd_sbuf == 0) { 956 error = bpf_allocbufs(d); 957 if (error != 0) 958 return (error); 959 } 960 s = splimp(); 961 if (bp != d->bd_bif) { 962 if (d->bd_bif) 963 /* 964 * Detach if attached to something else. 965 */ 966 bpf_detachd(d); 967 968 bpf_attachd(d, bp); 969 } 970 reset_d(d); 971 splx(s); 972 return (0); 973 } 974 /* Not found. */ 975 return (ENXIO); 976} 977 978/* 979 * Convert an interface name plus unit number of an ifp to a single 980 * name which is returned in the ifr. 981 */ 982static void 983bpf_ifname(ifp, ifr) 984 struct ifnet *ifp; 985 struct ifreq *ifr; 986{ 987 char *s = ifp->if_name; 988 char *d = ifr->ifr_name; 989 990 while (*d++ = *s++) 991 continue; 992 d--; /* back to the null */ 993 /* XXX Assume that unit number is less than 10. */ 994 *d++ = ifp->if_unit + '0'; 995 *d = '\0'; 996} 997 998/* 999 * Support for select() and poll() system calls 1000 * 1001 * Return true iff the specific operation will not block indefinitely. 1002 * Otherwise, return false but make a note that a selwakeup() must be done. 1003 */ 1004int 1005bpfpoll(dev, events, p) 1006 register dev_t dev; 1007 int events; 1008 struct proc *p; 1009{ 1010 register struct bpf_d *d; 1011 register int s; 1012 int revents = 0; 1013 1014 /* 1015 * An imitation of the FIONREAD ioctl code. 1016 */ 1017 d = &bpf_dtab[minor(dev)]; 1018 1019 s = splimp(); 1020 if (events & (POLLIN | POLLRDNORM)) 1021 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1022 revents |= events & (POLLIN | POLLRDNORM); 1023 else 1024 selrecord(p, &d->bd_sel); 1025 1026 splx(s); 1027 return (revents); 1028} 1029 1030/* 1031 * Incoming linkage from device drivers. Process the packet pkt, of length 1032 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1033 * by each process' filter, and if accepted, stashed into the corresponding 1034 * buffer. 1035 */ 1036void 1037bpf_tap(ifp, pkt, pktlen) 1038 struct ifnet *ifp; 1039 register u_char *pkt; 1040 register u_int pktlen; 1041{ 1042 struct bpf_if *bp; 1043 register struct bpf_d *d; 1044 register u_int slen; 1045 /* 1046 * Note that the ipl does not have to be raised at this point. 1047 * The only problem that could arise here is that if two different 1048 * interfaces shared any data. This is not the case. 1049 */ 1050 bp = ifp->if_bpf; 1051 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1052 ++d->bd_rcount; 1053 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1054 if (slen != 0) 1055 catchpacket(d, pkt, pktlen, slen, bcopy); 1056 } 1057} 1058 1059/* 1060 * Copy data from an mbuf chain into a buffer. This code is derived 1061 * from m_copydata in sys/uipc_mbuf.c. 1062 */ 1063static void 1064bpf_mcopy(src_arg, dst_arg, len) 1065 const void *src_arg; 1066 void *dst_arg; 1067 register u_int len; 1068{ 1069 register const struct mbuf *m; 1070 register u_int count; 1071 u_char *dst; 1072 1073 m = src_arg; 1074 dst = dst_arg; 1075 while (len > 0) { 1076 if (m == 0) 1077 panic("bpf_mcopy"); 1078 count = min(m->m_len, len); 1079 bcopy(mtod(m, void *), dst, count); 1080 m = m->m_next; 1081 dst += count; 1082 len -= count; 1083 } 1084} 1085 1086/* 1087 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1088 */ 1089void 1090bpf_mtap(ifp, m) 1091 struct ifnet *ifp; 1092 struct mbuf *m; 1093{ 1094 struct bpf_if *bp = ifp->if_bpf; 1095 struct bpf_d *d; 1096 u_int pktlen, slen; 1097 struct mbuf *m0; 1098 1099 pktlen = 0; 1100 for (m0 = m; m0 != 0; m0 = m0->m_next) 1101 pktlen += m0->m_len; 1102 1103 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1104 ++d->bd_rcount; 1105 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1106 if (slen != 0) 1107 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1108 } 1109} 1110 1111/* 1112 * Move the packet data from interface memory (pkt) into the 1113 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1114 * otherwise 0. "copy" is the routine called to do the actual data 1115 * transfer. bcopy is passed in to copy contiguous chunks, while 1116 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1117 * pkt is really an mbuf. 1118 */ 1119static void 1120catchpacket(d, pkt, pktlen, snaplen, cpfn) 1121 register struct bpf_d *d; 1122 register u_char *pkt; 1123 register u_int pktlen, snaplen; 1124 register void (*cpfn) __P((const void *, void *, u_int)); 1125{ 1126 register struct bpf_hdr *hp; 1127 register int totlen, curlen; 1128 register int hdrlen = d->bd_bif->bif_hdrlen; 1129 /* 1130 * Figure out how many bytes to move. If the packet is 1131 * greater or equal to the snapshot length, transfer that 1132 * much. Otherwise, transfer the whole packet (unless 1133 * we hit the buffer size limit). 1134 */ 1135 totlen = hdrlen + min(snaplen, pktlen); 1136 if (totlen > d->bd_bufsize) 1137 totlen = d->bd_bufsize; 1138 1139 /* 1140 * Round up the end of the previous packet to the next longword. 1141 */ 1142 curlen = BPF_WORDALIGN(d->bd_slen); 1143 if (curlen + totlen > d->bd_bufsize) { 1144 /* 1145 * This packet will overflow the storage buffer. 1146 * Rotate the buffers if we can, then wakeup any 1147 * pending reads. 1148 */ 1149 if (d->bd_fbuf == 0) { 1150 /* 1151 * We haven't completed the previous read yet, 1152 * so drop the packet. 1153 */ 1154 ++d->bd_dcount; 1155 return; 1156 } 1157 ROTATE_BUFFERS(d); 1158 bpf_wakeup(d); 1159 curlen = 0; 1160 } 1161 else if (d->bd_immediate) 1162 /* 1163 * Immediate mode is set. A packet arrived so any 1164 * reads should be woken up. 1165 */ 1166 bpf_wakeup(d); 1167 1168 /* 1169 * Append the bpf header. 1170 */ 1171 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1172#if BSD >= 199103 1173 microtime(&hp->bh_tstamp); 1174#elif defined(sun) 1175 uniqtime(&hp->bh_tstamp); 1176#else 1177 hp->bh_tstamp = time; 1178#endif 1179 hp->bh_datalen = pktlen; 1180 hp->bh_hdrlen = hdrlen; 1181 /* 1182 * Copy the packet data into the store buffer and update its length. 1183 */ 1184 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1185 d->bd_slen = curlen + totlen; 1186} 1187 1188/* 1189 * Initialize all nonzero fields of a descriptor. 1190 */ 1191static int 1192bpf_allocbufs(d) 1193 register struct bpf_d *d; 1194{ 1195 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1196 if (d->bd_fbuf == 0) 1197 return (ENOBUFS); 1198 1199 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1200 if (d->bd_sbuf == 0) { 1201 free(d->bd_fbuf, M_DEVBUF); 1202 return (ENOBUFS); 1203 } 1204 d->bd_slen = 0; 1205 d->bd_hlen = 0; 1206 return (0); 1207} 1208 1209/* 1210 * Free buffers currently in use by a descriptor. 1211 * Called on close. 1212 */ 1213static void 1214bpf_freed(d) 1215 register struct bpf_d *d; 1216{ 1217 /* 1218 * We don't need to lock out interrupts since this descriptor has 1219 * been detached from its interface and it yet hasn't been marked 1220 * free. 1221 */ 1222 if (d->bd_sbuf != 0) { 1223 free(d->bd_sbuf, M_DEVBUF); 1224 if (d->bd_hbuf != 0) 1225 free(d->bd_hbuf, M_DEVBUF); 1226 if (d->bd_fbuf != 0) 1227 free(d->bd_fbuf, M_DEVBUF); 1228 } 1229 if (d->bd_filter) 1230 free((caddr_t)d->bd_filter, M_DEVBUF); 1231 1232 D_MARKFREE(d); 1233} 1234 1235/* 1236 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1237 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1238 * size of the link header (variable length headers not yet supported). 1239 */ 1240void 1241bpfattach(ifp, dlt, hdrlen) 1242 struct ifnet *ifp; 1243 u_int dlt, hdrlen; 1244{ 1245 struct bpf_if *bp; 1246 int i; 1247 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1248 if (bp == 0) 1249 panic("bpfattach"); 1250 1251 bp->bif_dlist = 0; 1252 bp->bif_ifp = ifp; 1253 bp->bif_dlt = dlt; 1254 1255 bp->bif_next = bpf_iflist; 1256 bpf_iflist = bp; 1257 1258 bp->bif_ifp->if_bpf = 0; 1259 1260 /* 1261 * Compute the length of the bpf header. This is not necessarily 1262 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1263 * that the network layer header begins on a longword boundary (for 1264 * performance reasons and to alleviate alignment restrictions). 1265 */ 1266 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1267 1268 /* 1269 * Mark all the descriptors free if this hasn't been done. 1270 */ 1271 if (!bpf_dtab_init) { 1272 for (i = 0; i < NBPFILTER; ++i) 1273 D_MARKFREE(&bpf_dtab[i]); 1274 bpf_dtab_init = 1; 1275 } 1276 1277 if (bootverbose) 1278 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1279} 1280 1281#ifdef DEVFS 1282static void *bpf_devfs_token[NBPFILTER]; 1283#endif 1284 1285static int bpf_devsw_installed; 1286 1287static void bpf_drvinit __P((void *unused)); 1288static void 1289bpf_drvinit(unused) 1290 void *unused; 1291{ 1292 dev_t dev; 1293#ifdef DEVFS 1294 int i; 1295#endif 1296 1297 if( ! bpf_devsw_installed ) { 1298 dev = makedev(CDEV_MAJOR, 0); 1299 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1300 bpf_devsw_installed = 1; 1301#ifdef DEVFS 1302 1303 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1304 bpf_devfs_token[i] = 1305 devfs_add_devswf(&bpf_cdevsw, i, DV_CHR, 0, 0, 1306 0600, "bpf%d", i); 1307 } 1308#endif 1309 } 1310} 1311 1312SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1313 1314#endif 1315