bpf.c revision 12521
1/* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.15 1995/11/29 10:48:44 julian Exp $ 41 */ 42 43#include "bpfilter.h" 44 45#if NBPFILTER > 0 46 47#ifndef __GNUC__ 48#define inline 49#else 50#define inline __inline 51#endif 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <machine/cpu.h> /* for bootverbose */ 56#include <sys/mbuf.h> 57#include <sys/buf.h> 58#include <sys/time.h> 59#include <sys/proc.h> 60#include <sys/user.h> 61#include <sys/ioctl.h> 62 63#include <sys/file.h> 64#if defined(sparc) && BSD < 199103 65#include <sys/stream.h> 66#endif 67#include <sys/uio.h> 68 69#include <sys/socket.h> 70#include <sys/socketvar.h> 71#include <sys/protosw.h> 72#include <net/if.h> 73 74#include <net/bpf.h> 75#include <net/bpfdesc.h> 76 77#include <sys/errno.h> 78 79#include <netinet/in.h> 80#include <netinet/if_ether.h> 81#include <sys/kernel.h> 82 83#ifdef JREMOD 84#include <sys/conf.h> 85#ifdef DEVFS 86#include <sys/devfsext.h> 87#endif /*DEVFS*/ 88#define CDEV_MAJOR 23 89#endif /*JREMOD*/ 90 91/* 92 * Older BSDs don't have kernel malloc. 93 */ 94#if BSD < 199103 95extern bcopy(); 96static caddr_t bpf_alloc(); 97#include <net/bpf_compat.h> 98#define BPF_BUFSIZE (MCLBYTES-8) 99#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 100#else 101#define BPF_BUFSIZE 4096 102#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 103#endif 104 105#define PRINET 26 /* interruptible */ 106 107/* 108 * The default read buffer size is patchable. 109 */ 110int bpf_bufsize = BPF_BUFSIZE; 111 112/* 113 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 114 * bpf_dtab holds the descriptors, indexed by minor device # 115 */ 116struct bpf_if *bpf_iflist; 117struct bpf_d bpf_dtab[NBPFILTER]; 118 119#if BSD >= 199207 120/* 121 * bpfilterattach() is called at boot time in new systems. We do 122 * nothing here since old systems will not call this. 123 */ 124/* ARGSUSED */ 125void 126bpfilterattach(n) 127 int n; 128{ 129} 130#endif 131 132static int bpf_allocbufs __P((struct bpf_d *)); 133static void bpf_freed __P((struct bpf_d *)); 134static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 135static void bpf_mcopy __P((const void *, void *, u_int)); 136static int bpf_movein __P((struct uio *, int, 137 struct mbuf **, struct sockaddr *, int *)); 138static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 139static inline void 140 bpf_wakeup __P((struct bpf_d *)); 141static void catchpacket __P((struct bpf_d *, u_char *, u_int, 142 u_int, void (*)(const void *, void *, u_int))); 143static void reset_d __P((struct bpf_d *)); 144 145static int 146bpf_movein(uio, linktype, mp, sockp, datlen) 147 register struct uio *uio; 148 int linktype, *datlen; 149 register struct mbuf **mp; 150 register struct sockaddr *sockp; 151{ 152 struct mbuf *m; 153 int error; 154 int len; 155 int hlen; 156 157 /* 158 * Build a sockaddr based on the data link layer type. 159 * We do this at this level because the ethernet header 160 * is copied directly into the data field of the sockaddr. 161 * In the case of SLIP, there is no header and the packet 162 * is forwarded as is. 163 * Also, we are careful to leave room at the front of the mbuf 164 * for the link level header. 165 */ 166 switch (linktype) { 167 168 case DLT_SLIP: 169 sockp->sa_family = AF_INET; 170 hlen = 0; 171 break; 172 173 case DLT_EN10MB: 174 sockp->sa_family = AF_UNSPEC; 175 /* XXX Would MAXLINKHDR be better? */ 176 hlen = sizeof(struct ether_header); 177 break; 178 179 case DLT_FDDI: 180#if defined(__FreeBSD__) || defined(__bsdi__) 181 sockp->sa_family = AF_IMPLINK; 182 hlen = 0; 183#else 184 sockp->sa_family = AF_UNSPEC; 185 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 186 hlen = 24; 187#endif 188 break; 189 190 case DLT_NULL: 191 sockp->sa_family = AF_UNSPEC; 192 hlen = 0; 193 break; 194 195 default: 196 return (EIO); 197 } 198 199 len = uio->uio_resid; 200 *datlen = len - hlen; 201 if ((unsigned)len > MCLBYTES) 202 return (EIO); 203 204 MGETHDR(m, M_WAIT, MT_DATA); 205 if (m == 0) 206 return (ENOBUFS); 207 if (len > MHLEN) { 208#if BSD >= 199103 209 MCLGET(m, M_WAIT); 210 if ((m->m_flags & M_EXT) == 0) { 211#else 212 MCLGET(m); 213 if (m->m_len != MCLBYTES) { 214#endif 215 error = ENOBUFS; 216 goto bad; 217 } 218 } 219 m->m_pkthdr.len = m->m_len = len; 220 m->m_pkthdr.rcvif = NULL; 221 *mp = m; 222 /* 223 * Make room for link header. 224 */ 225 if (hlen != 0) { 226 m->m_len -= hlen; 227#if BSD >= 199103 228 m->m_data += hlen; /* XXX */ 229#else 230 m->m_off += hlen; 231#endif 232 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 233 if (error) 234 goto bad; 235 } 236 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 237 if (!error) 238 return (0); 239 bad: 240 m_freem(m); 241 return (error); 242} 243 244/* 245 * Attach file to the bpf interface, i.e. make d listen on bp. 246 * Must be called at splimp. 247 */ 248static void 249bpf_attachd(d, bp) 250 struct bpf_d *d; 251 struct bpf_if *bp; 252{ 253 /* 254 * Point d at bp, and add d to the interface's list of listeners. 255 * Finally, point the driver's bpf cookie at the interface so 256 * it will divert packets to bpf. 257 */ 258 d->bd_bif = bp; 259 d->bd_next = bp->bif_dlist; 260 bp->bif_dlist = d; 261 262 *bp->bif_driverp = bp; 263} 264 265/* 266 * Detach a file from its interface. 267 */ 268static void 269bpf_detachd(d) 270 struct bpf_d *d; 271{ 272 struct bpf_d **p; 273 struct bpf_if *bp; 274 275 bp = d->bd_bif; 276 /* 277 * Check if this descriptor had requested promiscuous mode. 278 * If so, turn it off. 279 */ 280 if (d->bd_promisc) { 281 d->bd_promisc = 0; 282 if (ifpromisc(bp->bif_ifp, 0)) 283 /* 284 * Something is really wrong if we were able to put 285 * the driver into promiscuous mode, but can't 286 * take it out. 287 */ 288 panic("bpf: ifpromisc failed"); 289 } 290 /* Remove d from the interface's descriptor list. */ 291 p = &bp->bif_dlist; 292 while (*p != d) { 293 p = &(*p)->bd_next; 294 if (*p == 0) 295 panic("bpf_detachd: descriptor not in list"); 296 } 297 *p = (*p)->bd_next; 298 if (bp->bif_dlist == 0) 299 /* 300 * Let the driver know that there are no more listeners. 301 */ 302 *d->bd_bif->bif_driverp = 0; 303 d->bd_bif = 0; 304} 305 306 307/* 308 * Mark a descriptor free by making it point to itself. 309 * This is probably cheaper than marking with a constant since 310 * the address should be in a register anyway. 311 */ 312#define D_ISFREE(d) ((d) == (d)->bd_next) 313#define D_MARKFREE(d) ((d)->bd_next = (d)) 314#define D_MARKUSED(d) ((d)->bd_next = 0) 315 316/* 317 * Open ethernet device. Returns ENXIO for illegal minor device number, 318 * EBUSY if file is open by another process. 319 */ 320/* ARGSUSED */ 321int 322bpfopen(dev, flags, fmt, p) 323 dev_t dev; 324 int flags; 325 int fmt; 326 struct proc *p; 327{ 328 register struct bpf_d *d; 329 330 if (minor(dev) >= NBPFILTER) 331 return (ENXIO); 332 /* 333 * Each minor can be opened by only one process. If the requested 334 * minor is in use, return EBUSY. 335 */ 336 d = &bpf_dtab[minor(dev)]; 337 if (!D_ISFREE(d)) 338 return (EBUSY); 339 340 /* Mark "free" and do most initialization. */ 341 bzero((char *)d, sizeof(*d)); 342 d->bd_bufsize = bpf_bufsize; 343 d->bd_sig = SIGIO; 344 345 return (0); 346} 347 348/* 349 * Close the descriptor by detaching it from its interface, 350 * deallocating its buffers, and marking it free. 351 */ 352/* ARGSUSED */ 353int 354bpfclose(dev, flags, fmt, p) 355 dev_t dev; 356 int flags; 357 int fmt; 358 struct proc *p; 359{ 360 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 361 register int s; 362 363 s = splimp(); 364 if (d->bd_bif) 365 bpf_detachd(d); 366 splx(s); 367 bpf_freed(d); 368 369 return (0); 370} 371 372/* 373 * Support for SunOS, which does not have tsleep. 374 */ 375#if BSD < 199103 376static 377bpf_timeout(arg) 378 caddr_t arg; 379{ 380 struct bpf_d *d = (struct bpf_d *)arg; 381 d->bd_timedout = 1; 382 wakeup(arg); 383} 384 385#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 386 387int 388bpf_sleep(d) 389 register struct bpf_d *d; 390{ 391 register int rto = d->bd_rtout; 392 register int st; 393 394 if (rto != 0) { 395 d->bd_timedout = 0; 396 timeout(bpf_timeout, (caddr_t)d, rto); 397 } 398 st = sleep((caddr_t)d, PRINET|PCATCH); 399 if (rto != 0) { 400 if (d->bd_timedout == 0) 401 untimeout(bpf_timeout, (caddr_t)d); 402 else if (st == 0) 403 return EWOULDBLOCK; 404 } 405 return (st != 0) ? EINTR : 0; 406} 407#else 408#define BPF_SLEEP tsleep 409#endif 410 411/* 412 * Rotate the packet buffers in descriptor d. Move the store buffer 413 * into the hold slot, and the free buffer into the store slot. 414 * Zero the length of the new store buffer. 415 */ 416#define ROTATE_BUFFERS(d) \ 417 (d)->bd_hbuf = (d)->bd_sbuf; \ 418 (d)->bd_hlen = (d)->bd_slen; \ 419 (d)->bd_sbuf = (d)->bd_fbuf; \ 420 (d)->bd_slen = 0; \ 421 (d)->bd_fbuf = 0; 422/* 423 * bpfread - read next chunk of packets from buffers 424 */ 425int 426bpfread(dev, uio, ioflag) 427 dev_t dev; 428 register struct uio *uio; 429 int ioflag; 430{ 431 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 432 int error; 433 int s; 434 435 /* 436 * Restrict application to use a buffer the same size as 437 * as kernel buffers. 438 */ 439 if (uio->uio_resid != d->bd_bufsize) 440 return (EINVAL); 441 442 s = splimp(); 443 /* 444 * If the hold buffer is empty, then do a timed sleep, which 445 * ends when the timeout expires or when enough packets 446 * have arrived to fill the store buffer. 447 */ 448 while (d->bd_hbuf == 0) { 449 if (d->bd_immediate && d->bd_slen != 0) { 450 /* 451 * A packet(s) either arrived since the previous 452 * read or arrived while we were asleep. 453 * Rotate the buffers and return what's here. 454 */ 455 ROTATE_BUFFERS(d); 456 break; 457 } 458 if (d->bd_rtout != -1) 459 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 460 d->bd_rtout); 461 else 462 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 463 if (error == EINTR || error == ERESTART) { 464 splx(s); 465 return (error); 466 } 467 if (error == EWOULDBLOCK) { 468 /* 469 * On a timeout, return what's in the buffer, 470 * which may be nothing. If there is something 471 * in the store buffer, we can rotate the buffers. 472 */ 473 if (d->bd_hbuf) 474 /* 475 * We filled up the buffer in between 476 * getting the timeout and arriving 477 * here, so we don't need to rotate. 478 */ 479 break; 480 481 if (d->bd_slen == 0) { 482 splx(s); 483 return (0); 484 } 485 ROTATE_BUFFERS(d); 486 break; 487 } 488 } 489 /* 490 * At this point, we know we have something in the hold slot. 491 */ 492 splx(s); 493 494 /* 495 * Move data from hold buffer into user space. 496 * We know the entire buffer is transferred since 497 * we checked above that the read buffer is bpf_bufsize bytes. 498 */ 499 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 500 501 s = splimp(); 502 d->bd_fbuf = d->bd_hbuf; 503 d->bd_hbuf = 0; 504 d->bd_hlen = 0; 505 splx(s); 506 507 return (error); 508} 509 510 511/* 512 * If there are processes sleeping on this descriptor, wake them up. 513 */ 514static inline void 515bpf_wakeup(d) 516 register struct bpf_d *d; 517{ 518 struct proc *p; 519 520 wakeup((caddr_t)d); 521 if (d->bd_async && d->bd_sig) 522 if (d->bd_pgid > 0) 523 gsignal (d->bd_pgid, d->bd_sig); 524 else if (p = pfind (-d->bd_pgid)) 525 psignal (p, d->bd_sig); 526 527#if BSD >= 199103 528 selwakeup(&d->bd_sel); 529 /* XXX */ 530 d->bd_sel.si_pid = 0; 531#else 532 if (d->bd_selproc) { 533 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 534 d->bd_selcoll = 0; 535 d->bd_selproc = 0; 536 } 537#endif 538} 539 540int 541bpfwrite(dev, uio, ioflag) 542 dev_t dev; 543 struct uio *uio; 544 int ioflag; 545{ 546 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 547 struct ifnet *ifp; 548 struct mbuf *m; 549 int error, s; 550 static struct sockaddr dst; 551 int datlen; 552 553 if (d->bd_bif == 0) 554 return (ENXIO); 555 556 ifp = d->bd_bif->bif_ifp; 557 558 if (uio->uio_resid == 0) 559 return (0); 560 561 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 562 if (error) 563 return (error); 564 565 if (datlen > ifp->if_mtu) 566 return (EMSGSIZE); 567 568 s = splnet(); 569#if BSD >= 199103 570 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 571#else 572 error = (*ifp->if_output)(ifp, m, &dst); 573#endif 574 splx(s); 575 /* 576 * The driver frees the mbuf. 577 */ 578 return (error); 579} 580 581/* 582 * Reset a descriptor by flushing its packet buffer and clearing the 583 * receive and drop counts. Should be called at splimp. 584 */ 585static void 586reset_d(d) 587 struct bpf_d *d; 588{ 589 if (d->bd_hbuf) { 590 /* Free the hold buffer. */ 591 d->bd_fbuf = d->bd_hbuf; 592 d->bd_hbuf = 0; 593 } 594 d->bd_slen = 0; 595 d->bd_hlen = 0; 596 d->bd_rcount = 0; 597 d->bd_dcount = 0; 598} 599 600/* 601 * FIONREAD Check for read packet available. 602 * SIOCGIFADDR Get interface address - convenient hook to driver. 603 * BIOCGBLEN Get buffer len [for read()]. 604 * BIOCSETF Set ethernet read filter. 605 * BIOCFLUSH Flush read packet buffer. 606 * BIOCPROMISC Put interface into promiscuous mode. 607 * BIOCGDLT Get link layer type. 608 * BIOCGETIF Get interface name. 609 * BIOCSETIF Set interface. 610 * BIOCSRTIMEOUT Set read timeout. 611 * BIOCGRTIMEOUT Get read timeout. 612 * BIOCGSTATS Get packet stats. 613 * BIOCIMMEDIATE Set immediate mode. 614 * BIOCVERSION Get filter language version. 615 */ 616/* ARGSUSED */ 617int 618bpfioctl(dev, cmd, addr, flags, p) 619 dev_t dev; 620 int cmd; 621 caddr_t addr; 622 int flags; 623 struct proc *p; 624{ 625 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 626 int s, error = 0; 627 628 switch (cmd) { 629 630 default: 631 error = EINVAL; 632 break; 633 634 /* 635 * Check for read packet available. 636 */ 637 case FIONREAD: 638 { 639 int n; 640 641 s = splimp(); 642 n = d->bd_slen; 643 if (d->bd_hbuf) 644 n += d->bd_hlen; 645 splx(s); 646 647 *(int *)addr = n; 648 break; 649 } 650 651 case SIOCGIFADDR: 652 { 653 struct ifnet *ifp; 654 655 if (d->bd_bif == 0) 656 error = EINVAL; 657 else { 658 ifp = d->bd_bif->bif_ifp; 659 error = (*ifp->if_ioctl)(ifp, cmd, addr); 660 } 661 break; 662 } 663 664 /* 665 * Get buffer len [for read()]. 666 */ 667 case BIOCGBLEN: 668 *(u_int *)addr = d->bd_bufsize; 669 break; 670 671 /* 672 * Set buffer length. 673 */ 674 case BIOCSBLEN: 675#if BSD < 199103 676 error = EINVAL; 677#else 678 if (d->bd_bif != 0) 679 error = EINVAL; 680 else { 681 register u_int size = *(u_int *)addr; 682 683 if (size > BPF_MAXBUFSIZE) 684 *(u_int *)addr = size = BPF_MAXBUFSIZE; 685 else if (size < BPF_MINBUFSIZE) 686 *(u_int *)addr = size = BPF_MINBUFSIZE; 687 d->bd_bufsize = size; 688 } 689#endif 690 break; 691 692 /* 693 * Set link layer read filter. 694 */ 695 case BIOCSETF: 696 error = bpf_setf(d, (struct bpf_program *)addr); 697 break; 698 699 /* 700 * Flush read packet buffer. 701 */ 702 case BIOCFLUSH: 703 s = splimp(); 704 reset_d(d); 705 splx(s); 706 break; 707 708 /* 709 * Put interface into promiscuous mode. 710 */ 711 case BIOCPROMISC: 712 if (d->bd_bif == 0) { 713 /* 714 * No interface attached yet. 715 */ 716 error = EINVAL; 717 break; 718 } 719 s = splimp(); 720 if (d->bd_promisc == 0) { 721 error = ifpromisc(d->bd_bif->bif_ifp, 1); 722 if (error == 0) 723 d->bd_promisc = 1; 724 } 725 splx(s); 726 break; 727 728 /* 729 * Get device parameters. 730 */ 731 case BIOCGDLT: 732 if (d->bd_bif == 0) 733 error = EINVAL; 734 else 735 *(u_int *)addr = d->bd_bif->bif_dlt; 736 break; 737 738 /* 739 * Set interface name. 740 */ 741 case BIOCGETIF: 742 if (d->bd_bif == 0) 743 error = EINVAL; 744 else 745 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 746 break; 747 748 /* 749 * Set interface. 750 */ 751 case BIOCSETIF: 752 error = bpf_setif(d, (struct ifreq *)addr); 753 break; 754 755 /* 756 * Set read timeout. 757 */ 758 case BIOCSRTIMEOUT: 759 { 760 struct timeval *tv = (struct timeval *)addr; 761 u_long msec; 762 763 /* Compute number of milliseconds. */ 764 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 765 /* Scale milliseconds to ticks. Assume hard 766 clock has millisecond or greater resolution 767 (i.e. tick >= 1000). For 10ms hardclock, 768 tick/1000 = 10, so rtout<-msec/10. */ 769 d->bd_rtout = msec / (tick / 1000); 770 break; 771 } 772 773 /* 774 * Get read timeout. 775 */ 776 case BIOCGRTIMEOUT: 777 { 778 struct timeval *tv = (struct timeval *)addr; 779 u_long msec = d->bd_rtout; 780 781 msec *= tick / 1000; 782 tv->tv_sec = msec / 1000; 783 tv->tv_usec = msec % 1000; 784 break; 785 } 786 787 /* 788 * Get packet stats. 789 */ 790 case BIOCGSTATS: 791 { 792 struct bpf_stat *bs = (struct bpf_stat *)addr; 793 794 bs->bs_recv = d->bd_rcount; 795 bs->bs_drop = d->bd_dcount; 796 break; 797 } 798 799 /* 800 * Set immediate mode. 801 */ 802 case BIOCIMMEDIATE: 803 d->bd_immediate = *(u_int *)addr; 804 break; 805 806 case BIOCVERSION: 807 { 808 struct bpf_version *bv = (struct bpf_version *)addr; 809 810 bv->bv_major = BPF_MAJOR_VERSION; 811 bv->bv_minor = BPF_MINOR_VERSION; 812 break; 813 } 814 815 816 case FIONBIO: /* Non-blocking I/O */ 817 if (*(int *)addr) 818 d->bd_rtout = -1; 819 else 820 d->bd_rtout = 0; 821 break; 822 823 case FIOASYNC: /* Send signal on receive packets */ 824 d->bd_async = *(int *)addr; 825 break; 826 827/* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 828 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 829 is a process group if it's positive and a process id if it's negative. This 830 is exactly the opposite of what the other two functions want! Therefore 831 there is code in ioctl and fcntl to negate the arg before calling here. */ 832 833 case TIOCSPGRP: /* Process or group to send signals to */ 834 d->bd_pgid = *(int *)addr; 835 break; 836 837 case TIOCGPGRP: 838 *(int *)addr = d->bd_pgid; 839 break; 840 841 case BIOCSRSIG: /* Set receive signal */ 842 { 843 u_int sig; 844 845 sig = *(u_int *)addr; 846 847 if (sig >= NSIG) 848 error = EINVAL; 849 else 850 d->bd_sig = sig; 851 break; 852 } 853 case BIOCGRSIG: 854 *(u_int *)addr = d->bd_sig; 855 break; 856 } 857 return (error); 858} 859 860/* 861 * Set d's packet filter program to fp. If this file already has a filter, 862 * free it and replace it. Returns EINVAL for bogus requests. 863 */ 864int 865bpf_setf(d, fp) 866 struct bpf_d *d; 867 struct bpf_program *fp; 868{ 869 struct bpf_insn *fcode, *old; 870 u_int flen, size; 871 int s; 872 873 old = d->bd_filter; 874 if (fp->bf_insns == 0) { 875 if (fp->bf_len != 0) 876 return (EINVAL); 877 s = splimp(); 878 d->bd_filter = 0; 879 reset_d(d); 880 splx(s); 881 if (old != 0) 882 free((caddr_t)old, M_DEVBUF); 883 return (0); 884 } 885 flen = fp->bf_len; 886 if (flen > BPF_MAXINSNS) 887 return (EINVAL); 888 889 size = flen * sizeof(*fp->bf_insns); 890 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 891 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 892 bpf_validate(fcode, (int)flen)) { 893 s = splimp(); 894 d->bd_filter = fcode; 895 reset_d(d); 896 splx(s); 897 if (old != 0) 898 free((caddr_t)old, M_DEVBUF); 899 900 return (0); 901 } 902 free((caddr_t)fcode, M_DEVBUF); 903 return (EINVAL); 904} 905 906/* 907 * Detach a file from its current interface (if attached at all) and attach 908 * to the interface indicated by the name stored in ifr. 909 * Return an errno or 0. 910 */ 911static int 912bpf_setif(d, ifr) 913 struct bpf_d *d; 914 struct ifreq *ifr; 915{ 916 struct bpf_if *bp; 917 char *cp; 918 int unit, s, error; 919 920 /* 921 * Separate string into name part and unit number. Put a null 922 * byte at the end of the name part, and compute the number. 923 * If the a unit number is unspecified, the default is 0, 924 * as initialized above. XXX This should be common code. 925 */ 926 unit = 0; 927 cp = ifr->ifr_name; 928 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 929 while (*cp++) { 930 if (*cp >= '0' && *cp <= '9') { 931 unit = *cp - '0'; 932 *cp++ = '\0'; 933 while (*cp) 934 unit = 10 * unit + *cp++ - '0'; 935 break; 936 } 937 } 938 /* 939 * Look through attached interfaces for the named one. 940 */ 941 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 942 struct ifnet *ifp = bp->bif_ifp; 943 944 if (ifp == 0 || unit != ifp->if_unit 945 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 946 continue; 947 /* 948 * We found the requested interface. 949 * If it's not up, return an error. 950 * Allocate the packet buffers if we need to. 951 * If we're already attached to requested interface, 952 * just flush the buffer. 953 */ 954 if ((ifp->if_flags & IFF_UP) == 0) 955 return (ENETDOWN); 956 957 if (d->bd_sbuf == 0) { 958 error = bpf_allocbufs(d); 959 if (error != 0) 960 return (error); 961 } 962 s = splimp(); 963 if (bp != d->bd_bif) { 964 if (d->bd_bif) 965 /* 966 * Detach if attached to something else. 967 */ 968 bpf_detachd(d); 969 970 bpf_attachd(d, bp); 971 } 972 reset_d(d); 973 splx(s); 974 return (0); 975 } 976 /* Not found. */ 977 return (ENXIO); 978} 979 980/* 981 * Convert an interface name plus unit number of an ifp to a single 982 * name which is returned in the ifr. 983 */ 984static void 985bpf_ifname(ifp, ifr) 986 struct ifnet *ifp; 987 struct ifreq *ifr; 988{ 989 char *s = ifp->if_name; 990 char *d = ifr->ifr_name; 991 992 while (*d++ = *s++) 993 continue; 994 /* XXX Assume that unit number is less than 10. */ 995 *d++ = ifp->if_unit + '0'; 996 *d = '\0'; 997} 998 999/* 1000 * The new select interface passes down the proc pointer; the old select 1001 * stubs had to grab it out of the user struct. This glue allows either case. 1002 */ 1003#if BSD >= 199103 1004#define bpf_select bpfselect 1005#else 1006int 1007bpfselect(dev, rw) 1008 register dev_t dev; 1009 int rw; 1010{ 1011 return (bpf_select(dev, rw, u.u_procp)); 1012} 1013#endif 1014 1015/* 1016 * Support for select() system call 1017 * 1018 * Return true iff the specific operation will not block indefinitely. 1019 * Otherwise, return false but make a note that a selwakeup() must be done. 1020 */ 1021int 1022bpf_select(dev, rw, p) 1023 register dev_t dev; 1024 int rw; 1025 struct proc *p; 1026{ 1027 register struct bpf_d *d; 1028 register int s; 1029 1030 if (rw != FREAD) 1031 return (0); 1032 /* 1033 * An imitation of the FIONREAD ioctl code. 1034 */ 1035 d = &bpf_dtab[minor(dev)]; 1036 1037 s = splimp(); 1038 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 1039 /* 1040 * There is data waiting. 1041 */ 1042 splx(s); 1043 return (1); 1044 } 1045#if BSD >= 199103 1046 selrecord(p, &d->bd_sel); 1047#else 1048 /* 1049 * No data ready. If there's already a select() waiting on this 1050 * minor device then this is a collision. This shouldn't happen 1051 * because minors really should not be shared, but if a process 1052 * forks while one of these is open, it is possible that both 1053 * processes could select on the same descriptor. 1054 */ 1055 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 1056 d->bd_selcoll = 1; 1057 else 1058 d->bd_selproc = p; 1059#endif 1060 splx(s); 1061 return (0); 1062} 1063 1064/* 1065 * Incoming linkage from device drivers. Process the packet pkt, of length 1066 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1067 * by each process' filter, and if accepted, stashed into the corresponding 1068 * buffer. 1069 */ 1070void 1071bpf_tap(arg, pkt, pktlen) 1072 caddr_t arg; 1073 register u_char *pkt; 1074 register u_int pktlen; 1075{ 1076 struct bpf_if *bp; 1077 register struct bpf_d *d; 1078 register u_int slen; 1079 /* 1080 * Note that the ipl does not have to be raised at this point. 1081 * The only problem that could arise here is that if two different 1082 * interfaces shared any data. This is not the case. 1083 */ 1084 bp = (struct bpf_if *)arg; 1085 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1086 ++d->bd_rcount; 1087 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1088 if (slen != 0) 1089 catchpacket(d, pkt, pktlen, slen, bcopy); 1090 } 1091} 1092 1093/* 1094 * Copy data from an mbuf chain into a buffer. This code is derived 1095 * from m_copydata in sys/uipc_mbuf.c. 1096 */ 1097static void 1098bpf_mcopy(src_arg, dst_arg, len) 1099 const void *src_arg; 1100 void *dst_arg; 1101 register u_int len; 1102{ 1103 register const struct mbuf *m; 1104 register u_int count; 1105 u_char *dst; 1106 1107 m = src_arg; 1108 dst = dst_arg; 1109 while (len > 0) { 1110 if (m == 0) 1111 panic("bpf_mcopy"); 1112 count = min(m->m_len, len); 1113 (void)memcpy((caddr_t)dst, mtod(m, caddr_t), count); 1114 m = m->m_next; 1115 dst += count; 1116 len -= count; 1117 } 1118} 1119 1120/* 1121 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1122 */ 1123void 1124bpf_mtap(arg, m) 1125 caddr_t arg; 1126 struct mbuf *m; 1127{ 1128 struct bpf_if *bp = (struct bpf_if *)arg; 1129 struct bpf_d *d; 1130 u_int pktlen, slen; 1131 struct mbuf *m0; 1132 1133 pktlen = 0; 1134 for (m0 = m; m0 != 0; m0 = m0->m_next) 1135 pktlen += m0->m_len; 1136 1137 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1138 ++d->bd_rcount; 1139 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1140 if (slen != 0) 1141 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1142 } 1143} 1144 1145/* 1146 * Move the packet data from interface memory (pkt) into the 1147 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1148 * otherwise 0. "copy" is the routine called to do the actual data 1149 * transfer. bcopy is passed in to copy contiguous chunks, while 1150 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1151 * pkt is really an mbuf. 1152 */ 1153static void 1154catchpacket(d, pkt, pktlen, snaplen, cpfn) 1155 register struct bpf_d *d; 1156 register u_char *pkt; 1157 register u_int pktlen, snaplen; 1158 register void (*cpfn)(const void *, void *, u_int); 1159{ 1160 register struct bpf_hdr *hp; 1161 register int totlen, curlen; 1162 register int hdrlen = d->bd_bif->bif_hdrlen; 1163 /* 1164 * Figure out how many bytes to move. If the packet is 1165 * greater or equal to the snapshot length, transfer that 1166 * much. Otherwise, transfer the whole packet (unless 1167 * we hit the buffer size limit). 1168 */ 1169 totlen = hdrlen + min(snaplen, pktlen); 1170 if (totlen > d->bd_bufsize) 1171 totlen = d->bd_bufsize; 1172 1173 /* 1174 * Round up the end of the previous packet to the next longword. 1175 */ 1176 curlen = BPF_WORDALIGN(d->bd_slen); 1177 if (curlen + totlen > d->bd_bufsize) { 1178 /* 1179 * This packet will overflow the storage buffer. 1180 * Rotate the buffers if we can, then wakeup any 1181 * pending reads. 1182 */ 1183 if (d->bd_fbuf == 0) { 1184 /* 1185 * We haven't completed the previous read yet, 1186 * so drop the packet. 1187 */ 1188 ++d->bd_dcount; 1189 return; 1190 } 1191 ROTATE_BUFFERS(d); 1192 bpf_wakeup(d); 1193 curlen = 0; 1194 } 1195 else if (d->bd_immediate) 1196 /* 1197 * Immediate mode is set. A packet arrived so any 1198 * reads should be woken up. 1199 */ 1200 bpf_wakeup(d); 1201 1202 /* 1203 * Append the bpf header. 1204 */ 1205 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1206#if BSD >= 199103 1207 microtime(&hp->bh_tstamp); 1208#elif defined(sun) 1209 uniqtime(&hp->bh_tstamp); 1210#else 1211 hp->bh_tstamp = time; 1212#endif 1213 hp->bh_datalen = pktlen; 1214 hp->bh_hdrlen = hdrlen; 1215 /* 1216 * Copy the packet data into the store buffer and update its length. 1217 */ 1218 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1219 d->bd_slen = curlen + totlen; 1220} 1221 1222/* 1223 * Initialize all nonzero fields of a descriptor. 1224 */ 1225static int 1226bpf_allocbufs(d) 1227 register struct bpf_d *d; 1228{ 1229 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1230 if (d->bd_fbuf == 0) 1231 return (ENOBUFS); 1232 1233 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1234 if (d->bd_sbuf == 0) { 1235 free(d->bd_fbuf, M_DEVBUF); 1236 return (ENOBUFS); 1237 } 1238 d->bd_slen = 0; 1239 d->bd_hlen = 0; 1240 return (0); 1241} 1242 1243/* 1244 * Free buffers currently in use by a descriptor. 1245 * Called on close. 1246 */ 1247static void 1248bpf_freed(d) 1249 register struct bpf_d *d; 1250{ 1251 /* 1252 * We don't need to lock out interrupts since this descriptor has 1253 * been detached from its interface and it yet hasn't been marked 1254 * free. 1255 */ 1256 if (d->bd_sbuf != 0) { 1257 free(d->bd_sbuf, M_DEVBUF); 1258 if (d->bd_hbuf != 0) 1259 free(d->bd_hbuf, M_DEVBUF); 1260 if (d->bd_fbuf != 0) 1261 free(d->bd_fbuf, M_DEVBUF); 1262 } 1263 if (d->bd_filter) 1264 free((caddr_t)d->bd_filter, M_DEVBUF); 1265 1266 D_MARKFREE(d); 1267} 1268 1269/* 1270 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1271 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1272 * size of the link header (variable length headers not yet supported). 1273 */ 1274void 1275bpfattach(driverp, ifp, dlt, hdrlen) 1276 caddr_t *driverp; 1277 struct ifnet *ifp; 1278 u_int dlt, hdrlen; 1279{ 1280 struct bpf_if *bp; 1281 int i; 1282#if BSD < 199103 1283 static struct bpf_if bpf_ifs[NBPFILTER]; 1284 static int bpfifno; 1285 1286 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1287#else 1288 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1289#endif 1290 if (bp == 0) 1291 panic("bpfattach"); 1292 1293 bp->bif_dlist = 0; 1294 bp->bif_driverp = (struct bpf_if **)driverp; 1295 bp->bif_ifp = ifp; 1296 bp->bif_dlt = dlt; 1297 1298 bp->bif_next = bpf_iflist; 1299 bpf_iflist = bp; 1300 1301 *bp->bif_driverp = 0; 1302 1303 /* 1304 * Compute the length of the bpf header. This is not necessarily 1305 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1306 * that the network layer header begins on a longword boundary (for 1307 * performance reasons and to alleviate alignment restrictions). 1308 */ 1309 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1310 1311 /* 1312 * Mark all the descriptors free if this hasn't been done. 1313 */ 1314 if (!D_ISFREE(&bpf_dtab[0])) 1315 for (i = 0; i < NBPFILTER; ++i) 1316 D_MARKFREE(&bpf_dtab[i]); 1317 1318 if (bootverbose) 1319 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1320} 1321 1322 1323#ifdef JREMOD 1324struct cdevsw bpf_cdevsw = 1325 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 1326 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 1327 bpfselect, nommap, NULL }; 1328 1329static bpf_devsw_installed = 0; 1330 1331static void bpf_drvinit(void *unused) 1332{ 1333 dev_t dev; 1334 1335 if( ! bpf_devsw_installed ) { 1336 dev = makedev(CDEV_MAJOR,0); 1337 cdevsw_add(&dev,&bpf_cdevsw,NULL); 1338 bpf_devsw_installed = 1; 1339#ifdef DEVFS 1340 { 1341 int x; 1342/* default for a simple device with no probe routine (usually delete this) */ 1343 x=devfs_add_devsw( 1344/* path name devsw minor type uid gid perm*/ 1345 "/", "bpf", major(dev), 0, DV_CHR, 0, 0, 0600); 1346 } 1347#endif 1348 } 1349} 1350 1351SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1352 1353#endif /* JREMOD */ 1354 1355#endif 1356