bpf.c revision 12427
1/* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.13 1995/09/22 17:57:45 wollman Exp $ 41 */ 42 43#include "bpfilter.h" 44 45#if NBPFILTER > 0 46 47#ifndef __GNUC__ 48#define inline 49#else 50#define inline __inline 51#endif 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <machine/cpu.h> /* for bootverbose */ 56#include <sys/mbuf.h> 57#include <sys/buf.h> 58#include <sys/time.h> 59#include <sys/proc.h> 60#include <sys/user.h> 61#include <sys/ioctl.h> 62 63#include <sys/file.h> 64#if defined(sparc) && BSD < 199103 65#include <sys/stream.h> 66#endif 67#include <sys/uio.h> 68 69#include <sys/socket.h> 70#include <sys/socketvar.h> 71#include <sys/protosw.h> 72#include <net/if.h> 73 74#include <net/bpf.h> 75#include <net/bpfdesc.h> 76 77#include <sys/errno.h> 78 79#include <netinet/in.h> 80#include <netinet/if_ether.h> 81#include <sys/kernel.h> 82 83/* 84 * Older BSDs don't have kernel malloc. 85 */ 86#if BSD < 199103 87extern bcopy(); 88static caddr_t bpf_alloc(); 89#include <net/bpf_compat.h> 90#define BPF_BUFSIZE (MCLBYTES-8) 91#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 92#else 93#define BPF_BUFSIZE 4096 94#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 95#endif 96 97#define PRINET 26 /* interruptible */ 98 99/* 100 * The default read buffer size is patchable. 101 */ 102int bpf_bufsize = BPF_BUFSIZE; 103 104/* 105 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 106 * bpf_dtab holds the descriptors, indexed by minor device # 107 */ 108struct bpf_if *bpf_iflist; 109struct bpf_d bpf_dtab[NBPFILTER]; 110 111#if BSD >= 199207 112/* 113 * bpfilterattach() is called at boot time in new systems. We do 114 * nothing here since old systems will not call this. 115 */ 116/* ARGSUSED */ 117void 118bpfilterattach(n) 119 int n; 120{ 121} 122#endif 123 124static int bpf_allocbufs __P((struct bpf_d *)); 125static void bpf_freed __P((struct bpf_d *)); 126static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 127static void bpf_mcopy __P((const void *, void *, u_int)); 128static int bpf_movein __P((struct uio *, int, 129 struct mbuf **, struct sockaddr *, int *)); 130static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 131static inline void 132 bpf_wakeup __P((struct bpf_d *)); 133static void catchpacket __P((struct bpf_d *, u_char *, u_int, 134 u_int, void (*)(const void *, void *, u_int))); 135static void reset_d __P((struct bpf_d *)); 136 137static int 138bpf_movein(uio, linktype, mp, sockp, datlen) 139 register struct uio *uio; 140 int linktype, *datlen; 141 register struct mbuf **mp; 142 register struct sockaddr *sockp; 143{ 144 struct mbuf *m; 145 int error; 146 int len; 147 int hlen; 148 149 /* 150 * Build a sockaddr based on the data link layer type. 151 * We do this at this level because the ethernet header 152 * is copied directly into the data field of the sockaddr. 153 * In the case of SLIP, there is no header and the packet 154 * is forwarded as is. 155 * Also, we are careful to leave room at the front of the mbuf 156 * for the link level header. 157 */ 158 switch (linktype) { 159 160 case DLT_SLIP: 161 sockp->sa_family = AF_INET; 162 hlen = 0; 163 break; 164 165 case DLT_EN10MB: 166 sockp->sa_family = AF_UNSPEC; 167 /* XXX Would MAXLINKHDR be better? */ 168 hlen = sizeof(struct ether_header); 169 break; 170 171 case DLT_FDDI: 172#if defined(__FreeBSD__) || defined(__bsdi__) 173 sockp->sa_family = AF_IMPLINK; 174 hlen = 0; 175#else 176 sockp->sa_family = AF_UNSPEC; 177 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 178 hlen = 24; 179#endif 180 break; 181 182 case DLT_NULL: 183 sockp->sa_family = AF_UNSPEC; 184 hlen = 0; 185 break; 186 187 default: 188 return (EIO); 189 } 190 191 len = uio->uio_resid; 192 *datlen = len - hlen; 193 if ((unsigned)len > MCLBYTES) 194 return (EIO); 195 196 MGETHDR(m, M_WAIT, MT_DATA); 197 if (m == 0) 198 return (ENOBUFS); 199 if (len > MHLEN) { 200#if BSD >= 199103 201 MCLGET(m, M_WAIT); 202 if ((m->m_flags & M_EXT) == 0) { 203#else 204 MCLGET(m); 205 if (m->m_len != MCLBYTES) { 206#endif 207 error = ENOBUFS; 208 goto bad; 209 } 210 } 211 m->m_pkthdr.len = m->m_len = len; 212 m->m_pkthdr.rcvif = NULL; 213 *mp = m; 214 /* 215 * Make room for link header. 216 */ 217 if (hlen != 0) { 218 m->m_len -= hlen; 219#if BSD >= 199103 220 m->m_data += hlen; /* XXX */ 221#else 222 m->m_off += hlen; 223#endif 224 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 225 if (error) 226 goto bad; 227 } 228 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 229 if (!error) 230 return (0); 231 bad: 232 m_freem(m); 233 return (error); 234} 235 236/* 237 * Attach file to the bpf interface, i.e. make d listen on bp. 238 * Must be called at splimp. 239 */ 240static void 241bpf_attachd(d, bp) 242 struct bpf_d *d; 243 struct bpf_if *bp; 244{ 245 /* 246 * Point d at bp, and add d to the interface's list of listeners. 247 * Finally, point the driver's bpf cookie at the interface so 248 * it will divert packets to bpf. 249 */ 250 d->bd_bif = bp; 251 d->bd_next = bp->bif_dlist; 252 bp->bif_dlist = d; 253 254 *bp->bif_driverp = bp; 255} 256 257/* 258 * Detach a file from its interface. 259 */ 260static void 261bpf_detachd(d) 262 struct bpf_d *d; 263{ 264 struct bpf_d **p; 265 struct bpf_if *bp; 266 267 bp = d->bd_bif; 268 /* 269 * Check if this descriptor had requested promiscuous mode. 270 * If so, turn it off. 271 */ 272 if (d->bd_promisc) { 273 d->bd_promisc = 0; 274 if (ifpromisc(bp->bif_ifp, 0)) 275 /* 276 * Something is really wrong if we were able to put 277 * the driver into promiscuous mode, but can't 278 * take it out. 279 */ 280 panic("bpf: ifpromisc failed"); 281 } 282 /* Remove d from the interface's descriptor list. */ 283 p = &bp->bif_dlist; 284 while (*p != d) { 285 p = &(*p)->bd_next; 286 if (*p == 0) 287 panic("bpf_detachd: descriptor not in list"); 288 } 289 *p = (*p)->bd_next; 290 if (bp->bif_dlist == 0) 291 /* 292 * Let the driver know that there are no more listeners. 293 */ 294 *d->bd_bif->bif_driverp = 0; 295 d->bd_bif = 0; 296} 297 298 299/* 300 * Mark a descriptor free by making it point to itself. 301 * This is probably cheaper than marking with a constant since 302 * the address should be in a register anyway. 303 */ 304#define D_ISFREE(d) ((d) == (d)->bd_next) 305#define D_MARKFREE(d) ((d)->bd_next = (d)) 306#define D_MARKUSED(d) ((d)->bd_next = 0) 307 308/* 309 * Open ethernet device. Returns ENXIO for illegal minor device number, 310 * EBUSY if file is open by another process. 311 */ 312/* ARGSUSED */ 313int 314bpfopen(dev, flags, fmt, p) 315 dev_t dev; 316 int flags; 317 int fmt; 318 struct proc *p; 319{ 320 register struct bpf_d *d; 321 322 if (minor(dev) >= NBPFILTER) 323 return (ENXIO); 324 /* 325 * Each minor can be opened by only one process. If the requested 326 * minor is in use, return EBUSY. 327 */ 328 d = &bpf_dtab[minor(dev)]; 329 if (!D_ISFREE(d)) 330 return (EBUSY); 331 332 /* Mark "free" and do most initialization. */ 333 bzero((char *)d, sizeof(*d)); 334 d->bd_bufsize = bpf_bufsize; 335 d->bd_sig = SIGIO; 336 337 return (0); 338} 339 340/* 341 * Close the descriptor by detaching it from its interface, 342 * deallocating its buffers, and marking it free. 343 */ 344/* ARGSUSED */ 345int 346bpfclose(dev, flags, fmt, p) 347 dev_t dev; 348 int flags; 349 int fmt; 350 struct proc *p; 351{ 352 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 353 register int s; 354 355 s = splimp(); 356 if (d->bd_bif) 357 bpf_detachd(d); 358 splx(s); 359 bpf_freed(d); 360 361 return (0); 362} 363 364/* 365 * Support for SunOS, which does not have tsleep. 366 */ 367#if BSD < 199103 368static 369bpf_timeout(arg) 370 caddr_t arg; 371{ 372 struct bpf_d *d = (struct bpf_d *)arg; 373 d->bd_timedout = 1; 374 wakeup(arg); 375} 376 377#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 378 379int 380bpf_sleep(d) 381 register struct bpf_d *d; 382{ 383 register int rto = d->bd_rtout; 384 register int st; 385 386 if (rto != 0) { 387 d->bd_timedout = 0; 388 timeout(bpf_timeout, (caddr_t)d, rto); 389 } 390 st = sleep((caddr_t)d, PRINET|PCATCH); 391 if (rto != 0) { 392 if (d->bd_timedout == 0) 393 untimeout(bpf_timeout, (caddr_t)d); 394 else if (st == 0) 395 return EWOULDBLOCK; 396 } 397 return (st != 0) ? EINTR : 0; 398} 399#else 400#define BPF_SLEEP tsleep 401#endif 402 403/* 404 * Rotate the packet buffers in descriptor d. Move the store buffer 405 * into the hold slot, and the free buffer into the store slot. 406 * Zero the length of the new store buffer. 407 */ 408#define ROTATE_BUFFERS(d) \ 409 (d)->bd_hbuf = (d)->bd_sbuf; \ 410 (d)->bd_hlen = (d)->bd_slen; \ 411 (d)->bd_sbuf = (d)->bd_fbuf; \ 412 (d)->bd_slen = 0; \ 413 (d)->bd_fbuf = 0; 414/* 415 * bpfread - read next chunk of packets from buffers 416 */ 417int 418bpfread(dev, uio, ioflag) 419 dev_t dev; 420 register struct uio *uio; 421 int ioflag; 422{ 423 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 424 int error; 425 int s; 426 427 /* 428 * Restrict application to use a buffer the same size as 429 * as kernel buffers. 430 */ 431 if (uio->uio_resid != d->bd_bufsize) 432 return (EINVAL); 433 434 s = splimp(); 435 /* 436 * If the hold buffer is empty, then do a timed sleep, which 437 * ends when the timeout expires or when enough packets 438 * have arrived to fill the store buffer. 439 */ 440 while (d->bd_hbuf == 0) { 441 if (d->bd_immediate && d->bd_slen != 0) { 442 /* 443 * A packet(s) either arrived since the previous 444 * read or arrived while we were asleep. 445 * Rotate the buffers and return what's here. 446 */ 447 ROTATE_BUFFERS(d); 448 break; 449 } 450 if (d->bd_rtout != -1) 451 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 452 d->bd_rtout); 453 else 454 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 455 if (error == EINTR || error == ERESTART) { 456 splx(s); 457 return (error); 458 } 459 if (error == EWOULDBLOCK) { 460 /* 461 * On a timeout, return what's in the buffer, 462 * which may be nothing. If there is something 463 * in the store buffer, we can rotate the buffers. 464 */ 465 if (d->bd_hbuf) 466 /* 467 * We filled up the buffer in between 468 * getting the timeout and arriving 469 * here, so we don't need to rotate. 470 */ 471 break; 472 473 if (d->bd_slen == 0) { 474 splx(s); 475 return (0); 476 } 477 ROTATE_BUFFERS(d); 478 break; 479 } 480 } 481 /* 482 * At this point, we know we have something in the hold slot. 483 */ 484 splx(s); 485 486 /* 487 * Move data from hold buffer into user space. 488 * We know the entire buffer is transferred since 489 * we checked above that the read buffer is bpf_bufsize bytes. 490 */ 491 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 492 493 s = splimp(); 494 d->bd_fbuf = d->bd_hbuf; 495 d->bd_hbuf = 0; 496 d->bd_hlen = 0; 497 splx(s); 498 499 return (error); 500} 501 502 503/* 504 * If there are processes sleeping on this descriptor, wake them up. 505 */ 506static inline void 507bpf_wakeup(d) 508 register struct bpf_d *d; 509{ 510 struct proc *p; 511 512 wakeup((caddr_t)d); 513 if (d->bd_async && d->bd_sig) 514 if (d->bd_pgid > 0) 515 gsignal (d->bd_pgid, d->bd_sig); 516 else if (p = pfind (-d->bd_pgid)) 517 psignal (p, d->bd_sig); 518 519#if BSD >= 199103 520 selwakeup(&d->bd_sel); 521 /* XXX */ 522 d->bd_sel.si_pid = 0; 523#else 524 if (d->bd_selproc) { 525 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 526 d->bd_selcoll = 0; 527 d->bd_selproc = 0; 528 } 529#endif 530} 531 532int 533bpfwrite(dev, uio, ioflag) 534 dev_t dev; 535 struct uio *uio; 536 int ioflag; 537{ 538 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 539 struct ifnet *ifp; 540 struct mbuf *m; 541 int error, s; 542 static struct sockaddr dst; 543 int datlen; 544 545 if (d->bd_bif == 0) 546 return (ENXIO); 547 548 ifp = d->bd_bif->bif_ifp; 549 550 if (uio->uio_resid == 0) 551 return (0); 552 553 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 554 if (error) 555 return (error); 556 557 if (datlen > ifp->if_mtu) 558 return (EMSGSIZE); 559 560 s = splnet(); 561#if BSD >= 199103 562 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 563#else 564 error = (*ifp->if_output)(ifp, m, &dst); 565#endif 566 splx(s); 567 /* 568 * The driver frees the mbuf. 569 */ 570 return (error); 571} 572 573/* 574 * Reset a descriptor by flushing its packet buffer and clearing the 575 * receive and drop counts. Should be called at splimp. 576 */ 577static void 578reset_d(d) 579 struct bpf_d *d; 580{ 581 if (d->bd_hbuf) { 582 /* Free the hold buffer. */ 583 d->bd_fbuf = d->bd_hbuf; 584 d->bd_hbuf = 0; 585 } 586 d->bd_slen = 0; 587 d->bd_hlen = 0; 588 d->bd_rcount = 0; 589 d->bd_dcount = 0; 590} 591 592/* 593 * FIONREAD Check for read packet available. 594 * SIOCGIFADDR Get interface address - convenient hook to driver. 595 * BIOCGBLEN Get buffer len [for read()]. 596 * BIOCSETF Set ethernet read filter. 597 * BIOCFLUSH Flush read packet buffer. 598 * BIOCPROMISC Put interface into promiscuous mode. 599 * BIOCGDLT Get link layer type. 600 * BIOCGETIF Get interface name. 601 * BIOCSETIF Set interface. 602 * BIOCSRTIMEOUT Set read timeout. 603 * BIOCGRTIMEOUT Get read timeout. 604 * BIOCGSTATS Get packet stats. 605 * BIOCIMMEDIATE Set immediate mode. 606 * BIOCVERSION Get filter language version. 607 */ 608/* ARGSUSED */ 609int 610bpfioctl(dev, cmd, addr, flags, p) 611 dev_t dev; 612 int cmd; 613 caddr_t addr; 614 int flags; 615 struct proc *p; 616{ 617 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 618 int s, error = 0; 619 620 switch (cmd) { 621 622 default: 623 error = EINVAL; 624 break; 625 626 /* 627 * Check for read packet available. 628 */ 629 case FIONREAD: 630 { 631 int n; 632 633 s = splimp(); 634 n = d->bd_slen; 635 if (d->bd_hbuf) 636 n += d->bd_hlen; 637 splx(s); 638 639 *(int *)addr = n; 640 break; 641 } 642 643 case SIOCGIFADDR: 644 { 645 struct ifnet *ifp; 646 647 if (d->bd_bif == 0) 648 error = EINVAL; 649 else { 650 ifp = d->bd_bif->bif_ifp; 651 error = (*ifp->if_ioctl)(ifp, cmd, addr); 652 } 653 break; 654 } 655 656 /* 657 * Get buffer len [for read()]. 658 */ 659 case BIOCGBLEN: 660 *(u_int *)addr = d->bd_bufsize; 661 break; 662 663 /* 664 * Set buffer length. 665 */ 666 case BIOCSBLEN: 667#if BSD < 199103 668 error = EINVAL; 669#else 670 if (d->bd_bif != 0) 671 error = EINVAL; 672 else { 673 register u_int size = *(u_int *)addr; 674 675 if (size > BPF_MAXBUFSIZE) 676 *(u_int *)addr = size = BPF_MAXBUFSIZE; 677 else if (size < BPF_MINBUFSIZE) 678 *(u_int *)addr = size = BPF_MINBUFSIZE; 679 d->bd_bufsize = size; 680 } 681#endif 682 break; 683 684 /* 685 * Set link layer read filter. 686 */ 687 case BIOCSETF: 688 error = bpf_setf(d, (struct bpf_program *)addr); 689 break; 690 691 /* 692 * Flush read packet buffer. 693 */ 694 case BIOCFLUSH: 695 s = splimp(); 696 reset_d(d); 697 splx(s); 698 break; 699 700 /* 701 * Put interface into promiscuous mode. 702 */ 703 case BIOCPROMISC: 704 if (d->bd_bif == 0) { 705 /* 706 * No interface attached yet. 707 */ 708 error = EINVAL; 709 break; 710 } 711 s = splimp(); 712 if (d->bd_promisc == 0) { 713 error = ifpromisc(d->bd_bif->bif_ifp, 1); 714 if (error == 0) 715 d->bd_promisc = 1; 716 } 717 splx(s); 718 break; 719 720 /* 721 * Get device parameters. 722 */ 723 case BIOCGDLT: 724 if (d->bd_bif == 0) 725 error = EINVAL; 726 else 727 *(u_int *)addr = d->bd_bif->bif_dlt; 728 break; 729 730 /* 731 * Set interface name. 732 */ 733 case BIOCGETIF: 734 if (d->bd_bif == 0) 735 error = EINVAL; 736 else 737 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 738 break; 739 740 /* 741 * Set interface. 742 */ 743 case BIOCSETIF: 744 error = bpf_setif(d, (struct ifreq *)addr); 745 break; 746 747 /* 748 * Set read timeout. 749 */ 750 case BIOCSRTIMEOUT: 751 { 752 struct timeval *tv = (struct timeval *)addr; 753 u_long msec; 754 755 /* Compute number of milliseconds. */ 756 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 757 /* Scale milliseconds to ticks. Assume hard 758 clock has millisecond or greater resolution 759 (i.e. tick >= 1000). For 10ms hardclock, 760 tick/1000 = 10, so rtout<-msec/10. */ 761 d->bd_rtout = msec / (tick / 1000); 762 break; 763 } 764 765 /* 766 * Get read timeout. 767 */ 768 case BIOCGRTIMEOUT: 769 { 770 struct timeval *tv = (struct timeval *)addr; 771 u_long msec = d->bd_rtout; 772 773 msec *= tick / 1000; 774 tv->tv_sec = msec / 1000; 775 tv->tv_usec = msec % 1000; 776 break; 777 } 778 779 /* 780 * Get packet stats. 781 */ 782 case BIOCGSTATS: 783 { 784 struct bpf_stat *bs = (struct bpf_stat *)addr; 785 786 bs->bs_recv = d->bd_rcount; 787 bs->bs_drop = d->bd_dcount; 788 break; 789 } 790 791 /* 792 * Set immediate mode. 793 */ 794 case BIOCIMMEDIATE: 795 d->bd_immediate = *(u_int *)addr; 796 break; 797 798 case BIOCVERSION: 799 { 800 struct bpf_version *bv = (struct bpf_version *)addr; 801 802 bv->bv_major = BPF_MAJOR_VERSION; 803 bv->bv_minor = BPF_MINOR_VERSION; 804 break; 805 } 806 807 808 case FIONBIO: /* Non-blocking I/O */ 809 if (*(int *)addr) 810 d->bd_rtout = -1; 811 else 812 d->bd_rtout = 0; 813 break; 814 815 case FIOASYNC: /* Send signal on receive packets */ 816 d->bd_async = *(int *)addr; 817 break; 818 819/* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 820 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 821 is a process group if it's positive and a process id if it's negative. This 822 is exactly the opposite of what the other two functions want! Therefore 823 there is code in ioctl and fcntl to negate the arg before calling here. */ 824 825 case TIOCSPGRP: /* Process or group to send signals to */ 826 d->bd_pgid = *(int *)addr; 827 break; 828 829 case TIOCGPGRP: 830 *(int *)addr = d->bd_pgid; 831 break; 832 833 case BIOCSRSIG: /* Set receive signal */ 834 { 835 u_int sig; 836 837 sig = *(u_int *)addr; 838 839 if (sig >= NSIG) 840 error = EINVAL; 841 else 842 d->bd_sig = sig; 843 break; 844 } 845 case BIOCGRSIG: 846 *(u_int *)addr = d->bd_sig; 847 break; 848 } 849 return (error); 850} 851 852/* 853 * Set d's packet filter program to fp. If this file already has a filter, 854 * free it and replace it. Returns EINVAL for bogus requests. 855 */ 856int 857bpf_setf(d, fp) 858 struct bpf_d *d; 859 struct bpf_program *fp; 860{ 861 struct bpf_insn *fcode, *old; 862 u_int flen, size; 863 int s; 864 865 old = d->bd_filter; 866 if (fp->bf_insns == 0) { 867 if (fp->bf_len != 0) 868 return (EINVAL); 869 s = splimp(); 870 d->bd_filter = 0; 871 reset_d(d); 872 splx(s); 873 if (old != 0) 874 free((caddr_t)old, M_DEVBUF); 875 return (0); 876 } 877 flen = fp->bf_len; 878 if (flen > BPF_MAXINSNS) 879 return (EINVAL); 880 881 size = flen * sizeof(*fp->bf_insns); 882 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 883 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 884 bpf_validate(fcode, (int)flen)) { 885 s = splimp(); 886 d->bd_filter = fcode; 887 reset_d(d); 888 splx(s); 889 if (old != 0) 890 free((caddr_t)old, M_DEVBUF); 891 892 return (0); 893 } 894 free((caddr_t)fcode, M_DEVBUF); 895 return (EINVAL); 896} 897 898/* 899 * Detach a file from its current interface (if attached at all) and attach 900 * to the interface indicated by the name stored in ifr. 901 * Return an errno or 0. 902 */ 903static int 904bpf_setif(d, ifr) 905 struct bpf_d *d; 906 struct ifreq *ifr; 907{ 908 struct bpf_if *bp; 909 char *cp; 910 int unit, s, error; 911 912 /* 913 * Separate string into name part and unit number. Put a null 914 * byte at the end of the name part, and compute the number. 915 * If the a unit number is unspecified, the default is 0, 916 * as initialized above. XXX This should be common code. 917 */ 918 unit = 0; 919 cp = ifr->ifr_name; 920 cp[sizeof(ifr->ifr_name) - 1] = '\0'; 921 while (*cp++) { 922 if (*cp >= '0' && *cp <= '9') { 923 unit = *cp - '0'; 924 *cp++ = '\0'; 925 while (*cp) 926 unit = 10 * unit + *cp++ - '0'; 927 break; 928 } 929 } 930 /* 931 * Look through attached interfaces for the named one. 932 */ 933 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 934 struct ifnet *ifp = bp->bif_ifp; 935 936 if (ifp == 0 || unit != ifp->if_unit 937 || strcmp(ifp->if_name, ifr->ifr_name) != 0) 938 continue; 939 /* 940 * We found the requested interface. 941 * If it's not up, return an error. 942 * Allocate the packet buffers if we need to. 943 * If we're already attached to requested interface, 944 * just flush the buffer. 945 */ 946 if ((ifp->if_flags & IFF_UP) == 0) 947 return (ENETDOWN); 948 949 if (d->bd_sbuf == 0) { 950 error = bpf_allocbufs(d); 951 if (error != 0) 952 return (error); 953 } 954 s = splimp(); 955 if (bp != d->bd_bif) { 956 if (d->bd_bif) 957 /* 958 * Detach if attached to something else. 959 */ 960 bpf_detachd(d); 961 962 bpf_attachd(d, bp); 963 } 964 reset_d(d); 965 splx(s); 966 return (0); 967 } 968 /* Not found. */ 969 return (ENXIO); 970} 971 972/* 973 * Convert an interface name plus unit number of an ifp to a single 974 * name which is returned in the ifr. 975 */ 976static void 977bpf_ifname(ifp, ifr) 978 struct ifnet *ifp; 979 struct ifreq *ifr; 980{ 981 char *s = ifp->if_name; 982 char *d = ifr->ifr_name; 983 984 while (*d++ = *s++) 985 continue; 986 /* XXX Assume that unit number is less than 10. */ 987 *d++ = ifp->if_unit + '0'; 988 *d = '\0'; 989} 990 991/* 992 * The new select interface passes down the proc pointer; the old select 993 * stubs had to grab it out of the user struct. This glue allows either case. 994 */ 995#if BSD >= 199103 996#define bpf_select bpfselect 997#else 998int 999bpfselect(dev, rw) 1000 register dev_t dev; 1001 int rw; 1002{ 1003 return (bpf_select(dev, rw, u.u_procp)); 1004} 1005#endif 1006 1007/* 1008 * Support for select() system call 1009 * 1010 * Return true iff the specific operation will not block indefinitely. 1011 * Otherwise, return false but make a note that a selwakeup() must be done. 1012 */ 1013int 1014bpf_select(dev, rw, p) 1015 register dev_t dev; 1016 int rw; 1017 struct proc *p; 1018{ 1019 register struct bpf_d *d; 1020 register int s; 1021 1022 if (rw != FREAD) 1023 return (0); 1024 /* 1025 * An imitation of the FIONREAD ioctl code. 1026 */ 1027 d = &bpf_dtab[minor(dev)]; 1028 1029 s = splimp(); 1030 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) { 1031 /* 1032 * There is data waiting. 1033 */ 1034 splx(s); 1035 return (1); 1036 } 1037#if BSD >= 199103 1038 selrecord(p, &d->bd_sel); 1039#else 1040 /* 1041 * No data ready. If there's already a select() waiting on this 1042 * minor device then this is a collision. This shouldn't happen 1043 * because minors really should not be shared, but if a process 1044 * forks while one of these is open, it is possible that both 1045 * processes could select on the same descriptor. 1046 */ 1047 if (d->bd_selproc && d->bd_selproc->p_wchan == (caddr_t)&selwait) 1048 d->bd_selcoll = 1; 1049 else 1050 d->bd_selproc = p; 1051#endif 1052 splx(s); 1053 return (0); 1054} 1055 1056/* 1057 * Incoming linkage from device drivers. Process the packet pkt, of length 1058 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1059 * by each process' filter, and if accepted, stashed into the corresponding 1060 * buffer. 1061 */ 1062void 1063bpf_tap(arg, pkt, pktlen) 1064 caddr_t arg; 1065 register u_char *pkt; 1066 register u_int pktlen; 1067{ 1068 struct bpf_if *bp; 1069 register struct bpf_d *d; 1070 register u_int slen; 1071 /* 1072 * Note that the ipl does not have to be raised at this point. 1073 * The only problem that could arise here is that if two different 1074 * interfaces shared any data. This is not the case. 1075 */ 1076 bp = (struct bpf_if *)arg; 1077 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1078 ++d->bd_rcount; 1079 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1080 if (slen != 0) 1081 catchpacket(d, pkt, pktlen, slen, bcopy); 1082 } 1083} 1084 1085/* 1086 * Copy data from an mbuf chain into a buffer. This code is derived 1087 * from m_copydata in sys/uipc_mbuf.c. 1088 */ 1089static void 1090bpf_mcopy(src_arg, dst_arg, len) 1091 const void *src_arg; 1092 void *dst_arg; 1093 register u_int len; 1094{ 1095 register const struct mbuf *m; 1096 register u_int count; 1097 u_char *dst; 1098 1099 m = src_arg; 1100 dst = dst_arg; 1101 while (len > 0) { 1102 if (m == 0) 1103 panic("bpf_mcopy"); 1104 count = min(m->m_len, len); 1105 (void)memcpy((caddr_t)dst, mtod(m, caddr_t), count); 1106 m = m->m_next; 1107 dst += count; 1108 len -= count; 1109 } 1110} 1111 1112/* 1113 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1114 */ 1115void 1116bpf_mtap(arg, m) 1117 caddr_t arg; 1118 struct mbuf *m; 1119{ 1120 struct bpf_if *bp = (struct bpf_if *)arg; 1121 struct bpf_d *d; 1122 u_int pktlen, slen; 1123 struct mbuf *m0; 1124 1125 pktlen = 0; 1126 for (m0 = m; m0 != 0; m0 = m0->m_next) 1127 pktlen += m0->m_len; 1128 1129 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1130 ++d->bd_rcount; 1131 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1132 if (slen != 0) 1133 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1134 } 1135} 1136 1137/* 1138 * Move the packet data from interface memory (pkt) into the 1139 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1140 * otherwise 0. "copy" is the routine called to do the actual data 1141 * transfer. bcopy is passed in to copy contiguous chunks, while 1142 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1143 * pkt is really an mbuf. 1144 */ 1145static void 1146catchpacket(d, pkt, pktlen, snaplen, cpfn) 1147 register struct bpf_d *d; 1148 register u_char *pkt; 1149 register u_int pktlen, snaplen; 1150 register void (*cpfn)(const void *, void *, u_int); 1151{ 1152 register struct bpf_hdr *hp; 1153 register int totlen, curlen; 1154 register int hdrlen = d->bd_bif->bif_hdrlen; 1155 /* 1156 * Figure out how many bytes to move. If the packet is 1157 * greater or equal to the snapshot length, transfer that 1158 * much. Otherwise, transfer the whole packet (unless 1159 * we hit the buffer size limit). 1160 */ 1161 totlen = hdrlen + min(snaplen, pktlen); 1162 if (totlen > d->bd_bufsize) 1163 totlen = d->bd_bufsize; 1164 1165 /* 1166 * Round up the end of the previous packet to the next longword. 1167 */ 1168 curlen = BPF_WORDALIGN(d->bd_slen); 1169 if (curlen + totlen > d->bd_bufsize) { 1170 /* 1171 * This packet will overflow the storage buffer. 1172 * Rotate the buffers if we can, then wakeup any 1173 * pending reads. 1174 */ 1175 if (d->bd_fbuf == 0) { 1176 /* 1177 * We haven't completed the previous read yet, 1178 * so drop the packet. 1179 */ 1180 ++d->bd_dcount; 1181 return; 1182 } 1183 ROTATE_BUFFERS(d); 1184 bpf_wakeup(d); 1185 curlen = 0; 1186 } 1187 else if (d->bd_immediate) 1188 /* 1189 * Immediate mode is set. A packet arrived so any 1190 * reads should be woken up. 1191 */ 1192 bpf_wakeup(d); 1193 1194 /* 1195 * Append the bpf header. 1196 */ 1197 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1198#if BSD >= 199103 1199 microtime(&hp->bh_tstamp); 1200#elif defined(sun) 1201 uniqtime(&hp->bh_tstamp); 1202#else 1203 hp->bh_tstamp = time; 1204#endif 1205 hp->bh_datalen = pktlen; 1206 hp->bh_hdrlen = hdrlen; 1207 /* 1208 * Copy the packet data into the store buffer and update its length. 1209 */ 1210 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1211 d->bd_slen = curlen + totlen; 1212} 1213 1214/* 1215 * Initialize all nonzero fields of a descriptor. 1216 */ 1217static int 1218bpf_allocbufs(d) 1219 register struct bpf_d *d; 1220{ 1221 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1222 if (d->bd_fbuf == 0) 1223 return (ENOBUFS); 1224 1225 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1226 if (d->bd_sbuf == 0) { 1227 free(d->bd_fbuf, M_DEVBUF); 1228 return (ENOBUFS); 1229 } 1230 d->bd_slen = 0; 1231 d->bd_hlen = 0; 1232 return (0); 1233} 1234 1235/* 1236 * Free buffers currently in use by a descriptor. 1237 * Called on close. 1238 */ 1239static void 1240bpf_freed(d) 1241 register struct bpf_d *d; 1242{ 1243 /* 1244 * We don't need to lock out interrupts since this descriptor has 1245 * been detached from its interface and it yet hasn't been marked 1246 * free. 1247 */ 1248 if (d->bd_sbuf != 0) { 1249 free(d->bd_sbuf, M_DEVBUF); 1250 if (d->bd_hbuf != 0) 1251 free(d->bd_hbuf, M_DEVBUF); 1252 if (d->bd_fbuf != 0) 1253 free(d->bd_fbuf, M_DEVBUF); 1254 } 1255 if (d->bd_filter) 1256 free((caddr_t)d->bd_filter, M_DEVBUF); 1257 1258 D_MARKFREE(d); 1259} 1260 1261/* 1262 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1263 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1264 * size of the link header (variable length headers not yet supported). 1265 */ 1266void 1267bpfattach(driverp, ifp, dlt, hdrlen) 1268 caddr_t *driverp; 1269 struct ifnet *ifp; 1270 u_int dlt, hdrlen; 1271{ 1272 struct bpf_if *bp; 1273 int i; 1274#if BSD < 199103 1275 static struct bpf_if bpf_ifs[NBPFILTER]; 1276 static int bpfifno; 1277 1278 bp = (bpfifno < NBPFILTER) ? &bpf_ifs[bpfifno++] : 0; 1279#else 1280 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1281#endif 1282 if (bp == 0) 1283 panic("bpfattach"); 1284 1285 bp->bif_dlist = 0; 1286 bp->bif_driverp = (struct bpf_if **)driverp; 1287 bp->bif_ifp = ifp; 1288 bp->bif_dlt = dlt; 1289 1290 bp->bif_next = bpf_iflist; 1291 bpf_iflist = bp; 1292 1293 *bp->bif_driverp = 0; 1294 1295 /* 1296 * Compute the length of the bpf header. This is not necessarily 1297 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1298 * that the network layer header begins on a longword boundary (for 1299 * performance reasons and to alleviate alignment restrictions). 1300 */ 1301 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1302 1303 /* 1304 * Mark all the descriptors free if this hasn't been done. 1305 */ 1306 if (!D_ISFREE(&bpf_dtab[0])) 1307 for (i = 0; i < NBPFILTER; ++i) 1308 D_MARKFREE(&bpf_dtab[i]); 1309 1310 if (bootverbose) 1311 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1312} 1313#endif 1314