bpf.c revision 29364
1/* 2 * Copyright (c) 1990, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 8.2 (Berkeley) 3/28/94 39 * 40 * $Id: bpf.c,v 1.32 1997/09/02 01:18:28 bde Exp $ 41 */ 42 43#include "bpfilter.h" 44 45#if NBPFILTER > 0 46 47#ifndef __GNUC__ 48#define inline 49#else 50#define inline __inline 51#endif 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/conf.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/buf.h> 59#include <sys/time.h> 60#include <sys/proc.h> 61#include <sys/signalvar.h> 62#include <sys/filio.h> 63#include <sys/sockio.h> 64#include <sys/ttycom.h> 65 66#include <sys/fcntl.h> 67#if defined(sparc) && BSD < 199103 68#include <sys/stream.h> 69#endif 70#include <sys/uio.h> 71#include <sys/poll.h> 72 73#include <sys/socket.h> 74#include <sys/socketvar.h> 75#include <sys/protosw.h> 76#include <net/if.h> 77 78#include <net/bpf.h> 79#include <net/bpfdesc.h> 80 81#include <sys/errno.h> 82 83#include <netinet/in.h> 84#include <netinet/if_ether.h> 85#include <sys/kernel.h> 86#include <sys/sysctl.h> 87#include <sys/conf.h> 88#ifdef DEVFS 89#include <sys/devfsext.h> 90#endif /*DEVFS*/ 91 92 93/* 94 * Older BSDs don't have kernel malloc. 95 */ 96#if BSD < 199103 97extern bcopy(); 98static caddr_t bpf_alloc(); 99#include <net/bpf_compat.h> 100#define BPF_BUFSIZE (MCLBYTES-8) 101#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio) 102#else 103#define BPF_BUFSIZE 4096 104#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio) 105#endif 106 107#define PRINET 26 /* interruptible */ 108 109/* 110 * The default read buffer size is patchable. 111 */ 112static int bpf_bufsize = BPF_BUFSIZE; 113SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW, 114 &bpf_bufsize, 0, ""); 115 116/* 117 * bpf_iflist is the list of interfaces; each corresponds to an ifnet 118 * bpf_dtab holds the descriptors, indexed by minor device # 119 */ 120static struct bpf_if *bpf_iflist; 121static struct bpf_d bpf_dtab[NBPFILTER]; 122 123static int bpf_allocbufs __P((struct bpf_d *)); 124static void bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp)); 125static void bpf_detachd __P((struct bpf_d *d)); 126static void bpf_freed __P((struct bpf_d *)); 127static void bpf_ifname __P((struct ifnet *, struct ifreq *)); 128static void bpf_mcopy __P((const void *, void *, u_int)); 129static int bpf_movein __P((struct uio *, int, 130 struct mbuf **, struct sockaddr *, int *)); 131static int bpf_setif __P((struct bpf_d *, struct ifreq *)); 132static inline void 133 bpf_wakeup __P((struct bpf_d *)); 134static void catchpacket __P((struct bpf_d *, u_char *, u_int, 135 u_int, void (*)(const void *, void *, u_int))); 136static void reset_d __P((struct bpf_d *)); 137static int bpf_setf __P((struct bpf_d *, struct bpf_program *)); 138 139static d_open_t bpfopen; 140static d_close_t bpfclose; 141static d_read_t bpfread; 142static d_write_t bpfwrite; 143static d_ioctl_t bpfioctl; 144static d_poll_t bpfpoll; 145 146#define CDEV_MAJOR 23 147static struct cdevsw bpf_cdevsw = 148 { bpfopen, bpfclose, bpfread, bpfwrite, /*23*/ 149 bpfioctl, nostop, nullreset, nodevtotty,/* bpf */ 150 bpfpoll, nommap, NULL, "bpf", NULL, -1 }; 151 152 153static int 154bpf_movein(uio, linktype, mp, sockp, datlen) 155 register struct uio *uio; 156 int linktype, *datlen; 157 register struct mbuf **mp; 158 register struct sockaddr *sockp; 159{ 160 struct mbuf *m; 161 int error; 162 int len; 163 int hlen; 164 165 /* 166 * Build a sockaddr based on the data link layer type. 167 * We do this at this level because the ethernet header 168 * is copied directly into the data field of the sockaddr. 169 * In the case of SLIP, there is no header and the packet 170 * is forwarded as is. 171 * Also, we are careful to leave room at the front of the mbuf 172 * for the link level header. 173 */ 174 switch (linktype) { 175 176 case DLT_SLIP: 177 sockp->sa_family = AF_INET; 178 hlen = 0; 179 break; 180 181 case DLT_EN10MB: 182 sockp->sa_family = AF_UNSPEC; 183 /* XXX Would MAXLINKHDR be better? */ 184 hlen = sizeof(struct ether_header); 185 break; 186 187 case DLT_FDDI: 188#if defined(__FreeBSD__) || defined(__bsdi__) 189 sockp->sa_family = AF_IMPLINK; 190 hlen = 0; 191#else 192 sockp->sa_family = AF_UNSPEC; 193 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */ 194 hlen = 24; 195#endif 196 break; 197 198 case DLT_NULL: 199 sockp->sa_family = AF_UNSPEC; 200 hlen = 0; 201 break; 202 203 default: 204 return (EIO); 205 } 206 207 len = uio->uio_resid; 208 *datlen = len - hlen; 209 if ((unsigned)len > MCLBYTES) 210 return (EIO); 211 212 MGETHDR(m, M_WAIT, MT_DATA); 213 if (m == 0) 214 return (ENOBUFS); 215 if (len > MHLEN) { 216#if BSD >= 199103 217 MCLGET(m, M_WAIT); 218 if ((m->m_flags & M_EXT) == 0) { 219#else 220 MCLGET(m); 221 if (m->m_len != MCLBYTES) { 222#endif 223 error = ENOBUFS; 224 goto bad; 225 } 226 } 227 m->m_pkthdr.len = m->m_len = len; 228 m->m_pkthdr.rcvif = NULL; 229 *mp = m; 230 /* 231 * Make room for link header. 232 */ 233 if (hlen != 0) { 234 m->m_pkthdr.len -= hlen; 235 m->m_len -= hlen; 236#if BSD >= 199103 237 m->m_data += hlen; /* XXX */ 238#else 239 m->m_off += hlen; 240#endif 241 error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio); 242 if (error) 243 goto bad; 244 } 245 error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio); 246 if (!error) 247 return (0); 248 bad: 249 m_freem(m); 250 return (error); 251} 252 253/* 254 * Attach file to the bpf interface, i.e. make d listen on bp. 255 * Must be called at splimp. 256 */ 257static void 258bpf_attachd(d, bp) 259 struct bpf_d *d; 260 struct bpf_if *bp; 261{ 262 /* 263 * Point d at bp, and add d to the interface's list of listeners. 264 * Finally, point the driver's bpf cookie at the interface so 265 * it will divert packets to bpf. 266 */ 267 d->bd_bif = bp; 268 d->bd_next = bp->bif_dlist; 269 bp->bif_dlist = d; 270 271 bp->bif_ifp->if_bpf = bp; 272} 273 274/* 275 * Detach a file from its interface. 276 */ 277static void 278bpf_detachd(d) 279 struct bpf_d *d; 280{ 281 struct bpf_d **p; 282 struct bpf_if *bp; 283 284 bp = d->bd_bif; 285 /* 286 * Check if this descriptor had requested promiscuous mode. 287 * If so, turn it off. 288 */ 289 if (d->bd_promisc) { 290 d->bd_promisc = 0; 291 if (ifpromisc(bp->bif_ifp, 0)) 292 /* 293 * Something is really wrong if we were able to put 294 * the driver into promiscuous mode, but can't 295 * take it out. 296 */ 297 panic("bpf: ifpromisc failed"); 298 } 299 /* Remove d from the interface's descriptor list. */ 300 p = &bp->bif_dlist; 301 while (*p != d) { 302 p = &(*p)->bd_next; 303 if (*p == 0) 304 panic("bpf_detachd: descriptor not in list"); 305 } 306 *p = (*p)->bd_next; 307 if (bp->bif_dlist == 0) 308 /* 309 * Let the driver know that there are no more listeners. 310 */ 311 d->bd_bif->bif_ifp->if_bpf = 0; 312 d->bd_bif = 0; 313} 314 315 316/* 317 * Mark a descriptor free by making it point to itself. 318 * This is probably cheaper than marking with a constant since 319 * the address should be in a register anyway. 320 */ 321#define D_ISFREE(d) ((d) == (d)->bd_next) 322#define D_MARKFREE(d) ((d)->bd_next = (d)) 323#define D_MARKUSED(d) ((d)->bd_next = 0) 324 325/* 326 * Open ethernet device. Returns ENXIO for illegal minor device number, 327 * EBUSY if file is open by another process. 328 */ 329/* ARGSUSED */ 330static int 331bpfopen(dev, flags, fmt, p) 332 dev_t dev; 333 int flags; 334 int fmt; 335 struct proc *p; 336{ 337 register struct bpf_d *d; 338 339 if (minor(dev) >= NBPFILTER) 340 return (ENXIO); 341 /* 342 * Each minor can be opened by only one process. If the requested 343 * minor is in use, return EBUSY. 344 */ 345 d = &bpf_dtab[minor(dev)]; 346 if (!D_ISFREE(d)) 347 return (EBUSY); 348 349 /* Mark "free" and do most initialization. */ 350 bzero((char *)d, sizeof(*d)); 351 d->bd_bufsize = bpf_bufsize; 352 d->bd_sig = SIGIO; 353 354 return (0); 355} 356 357/* 358 * Close the descriptor by detaching it from its interface, 359 * deallocating its buffers, and marking it free. 360 */ 361/* ARGSUSED */ 362static int 363bpfclose(dev, flags, fmt, p) 364 dev_t dev; 365 int flags; 366 int fmt; 367 struct proc *p; 368{ 369 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 370 register int s; 371 372 s = splimp(); 373 if (d->bd_bif) 374 bpf_detachd(d); 375 splx(s); 376 bpf_freed(d); 377 378 return (0); 379} 380 381/* 382 * Support for SunOS, which does not have tsleep. 383 */ 384#if BSD < 199103 385static 386bpf_timeout(arg) 387 caddr_t arg; 388{ 389 struct bpf_d *d = (struct bpf_d *)arg; 390 d->bd_timedout = 1; 391 wakeup(arg); 392} 393 394#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan) 395 396int 397bpf_sleep(d) 398 register struct bpf_d *d; 399{ 400 register int rto = d->bd_rtout; 401 register int st; 402 403 if (rto != 0) { 404 d->bd_timedout = 0; 405 timeout(bpf_timeout, (caddr_t)d, rto); 406 } 407 st = sleep((caddr_t)d, PRINET|PCATCH); 408 if (rto != 0) { 409 if (d->bd_timedout == 0) 410 untimeout(bpf_timeout, (caddr_t)d); 411 else if (st == 0) 412 return EWOULDBLOCK; 413 } 414 return (st != 0) ? EINTR : 0; 415} 416#else 417#define BPF_SLEEP tsleep 418#endif 419 420/* 421 * Rotate the packet buffers in descriptor d. Move the store buffer 422 * into the hold slot, and the free buffer into the store slot. 423 * Zero the length of the new store buffer. 424 */ 425#define ROTATE_BUFFERS(d) \ 426 (d)->bd_hbuf = (d)->bd_sbuf; \ 427 (d)->bd_hlen = (d)->bd_slen; \ 428 (d)->bd_sbuf = (d)->bd_fbuf; \ 429 (d)->bd_slen = 0; \ 430 (d)->bd_fbuf = 0; 431/* 432 * bpfread - read next chunk of packets from buffers 433 */ 434static int 435bpfread(dev, uio, ioflag) 436 dev_t dev; 437 register struct uio *uio; 438 int ioflag; 439{ 440 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 441 int error; 442 int s; 443 444 /* 445 * Restrict application to use a buffer the same size as 446 * as kernel buffers. 447 */ 448 if (uio->uio_resid != d->bd_bufsize) 449 return (EINVAL); 450 451 s = splimp(); 452 /* 453 * If the hold buffer is empty, then do a timed sleep, which 454 * ends when the timeout expires or when enough packets 455 * have arrived to fill the store buffer. 456 */ 457 while (d->bd_hbuf == 0) { 458 if (d->bd_immediate && d->bd_slen != 0) { 459 /* 460 * A packet(s) either arrived since the previous 461 * read or arrived while we were asleep. 462 * Rotate the buffers and return what's here. 463 */ 464 ROTATE_BUFFERS(d); 465 break; 466 } 467 if (d->bd_rtout != -1) 468 error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf", 469 d->bd_rtout); 470 else 471 error = EWOULDBLOCK; /* User requested non-blocking I/O */ 472 if (error == EINTR || error == ERESTART) { 473 splx(s); 474 return (error); 475 } 476 if (error == EWOULDBLOCK) { 477 /* 478 * On a timeout, return what's in the buffer, 479 * which may be nothing. If there is something 480 * in the store buffer, we can rotate the buffers. 481 */ 482 if (d->bd_hbuf) 483 /* 484 * We filled up the buffer in between 485 * getting the timeout and arriving 486 * here, so we don't need to rotate. 487 */ 488 break; 489 490 if (d->bd_slen == 0) { 491 splx(s); 492 return (0); 493 } 494 ROTATE_BUFFERS(d); 495 break; 496 } 497 } 498 /* 499 * At this point, we know we have something in the hold slot. 500 */ 501 splx(s); 502 503 /* 504 * Move data from hold buffer into user space. 505 * We know the entire buffer is transferred since 506 * we checked above that the read buffer is bpf_bufsize bytes. 507 */ 508 error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio); 509 510 s = splimp(); 511 d->bd_fbuf = d->bd_hbuf; 512 d->bd_hbuf = 0; 513 d->bd_hlen = 0; 514 splx(s); 515 516 return (error); 517} 518 519 520/* 521 * If there are processes sleeping on this descriptor, wake them up. 522 */ 523static inline void 524bpf_wakeup(d) 525 register struct bpf_d *d; 526{ 527 struct proc *p; 528 529 wakeup((caddr_t)d); 530 if (d->bd_async && d->bd_sig) 531 if (d->bd_pgid > 0) 532 gsignal (d->bd_pgid, d->bd_sig); 533 else if (p = pfind (-d->bd_pgid)) 534 psignal (p, d->bd_sig); 535 536#if BSD >= 199103 537 selwakeup(&d->bd_sel); 538 /* XXX */ 539 d->bd_sel.si_pid = 0; 540#else 541 if (d->bd_selproc) { 542 selwakeup(d->bd_selproc, (int)d->bd_selcoll); 543 d->bd_selcoll = 0; 544 d->bd_selproc = 0; 545 } 546#endif 547} 548 549static int 550bpfwrite(dev, uio, ioflag) 551 dev_t dev; 552 struct uio *uio; 553 int ioflag; 554{ 555 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 556 struct ifnet *ifp; 557 struct mbuf *m; 558 int error, s; 559 static struct sockaddr dst; 560 int datlen; 561 562 if (d->bd_bif == 0) 563 return (ENXIO); 564 565 ifp = d->bd_bif->bif_ifp; 566 567 if (uio->uio_resid == 0) 568 return (0); 569 570 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen); 571 if (error) 572 return (error); 573 574 if (datlen > ifp->if_mtu) 575 return (EMSGSIZE); 576 577 s = splnet(); 578#if BSD >= 199103 579 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0); 580#else 581 error = (*ifp->if_output)(ifp, m, &dst); 582#endif 583 splx(s); 584 /* 585 * The driver frees the mbuf. 586 */ 587 return (error); 588} 589 590/* 591 * Reset a descriptor by flushing its packet buffer and clearing the 592 * receive and drop counts. Should be called at splimp. 593 */ 594static void 595reset_d(d) 596 struct bpf_d *d; 597{ 598 if (d->bd_hbuf) { 599 /* Free the hold buffer. */ 600 d->bd_fbuf = d->bd_hbuf; 601 d->bd_hbuf = 0; 602 } 603 d->bd_slen = 0; 604 d->bd_hlen = 0; 605 d->bd_rcount = 0; 606 d->bd_dcount = 0; 607} 608 609/* 610 * FIONREAD Check for read packet available. 611 * SIOCGIFADDR Get interface address - convenient hook to driver. 612 * BIOCGBLEN Get buffer len [for read()]. 613 * BIOCSETF Set ethernet read filter. 614 * BIOCFLUSH Flush read packet buffer. 615 * BIOCPROMISC Put interface into promiscuous mode. 616 * BIOCGDLT Get link layer type. 617 * BIOCGETIF Get interface name. 618 * BIOCSETIF Set interface. 619 * BIOCSRTIMEOUT Set read timeout. 620 * BIOCGRTIMEOUT Get read timeout. 621 * BIOCGSTATS Get packet stats. 622 * BIOCIMMEDIATE Set immediate mode. 623 * BIOCVERSION Get filter language version. 624 */ 625/* ARGSUSED */ 626static int 627bpfioctl(dev, cmd, addr, flags, p) 628 dev_t dev; 629 int cmd; 630 caddr_t addr; 631 int flags; 632 struct proc *p; 633{ 634 register struct bpf_d *d = &bpf_dtab[minor(dev)]; 635 int s, error = 0; 636 637 switch (cmd) { 638 639 default: 640 error = EINVAL; 641 break; 642 643 /* 644 * Check for read packet available. 645 */ 646 case FIONREAD: 647 { 648 int n; 649 650 s = splimp(); 651 n = d->bd_slen; 652 if (d->bd_hbuf) 653 n += d->bd_hlen; 654 splx(s); 655 656 *(int *)addr = n; 657 break; 658 } 659 660 case SIOCGIFADDR: 661 { 662 struct ifnet *ifp; 663 664 if (d->bd_bif == 0) 665 error = EINVAL; 666 else { 667 ifp = d->bd_bif->bif_ifp; 668 error = (*ifp->if_ioctl)(ifp, cmd, addr); 669 } 670 break; 671 } 672 673 /* 674 * Get buffer len [for read()]. 675 */ 676 case BIOCGBLEN: 677 *(u_int *)addr = d->bd_bufsize; 678 break; 679 680 /* 681 * Set buffer length. 682 */ 683 case BIOCSBLEN: 684#if BSD < 199103 685 error = EINVAL; 686#else 687 if (d->bd_bif != 0) 688 error = EINVAL; 689 else { 690 register u_int size = *(u_int *)addr; 691 692 if (size > BPF_MAXBUFSIZE) 693 *(u_int *)addr = size = BPF_MAXBUFSIZE; 694 else if (size < BPF_MINBUFSIZE) 695 *(u_int *)addr = size = BPF_MINBUFSIZE; 696 d->bd_bufsize = size; 697 } 698#endif 699 break; 700 701 /* 702 * Set link layer read filter. 703 */ 704 case BIOCSETF: 705 error = bpf_setf(d, (struct bpf_program *)addr); 706 break; 707 708 /* 709 * Flush read packet buffer. 710 */ 711 case BIOCFLUSH: 712 s = splimp(); 713 reset_d(d); 714 splx(s); 715 break; 716 717 /* 718 * Put interface into promiscuous mode. 719 */ 720 case BIOCPROMISC: 721 if (d->bd_bif == 0) { 722 /* 723 * No interface attached yet. 724 */ 725 error = EINVAL; 726 break; 727 } 728 s = splimp(); 729 if (d->bd_promisc == 0) { 730 error = ifpromisc(d->bd_bif->bif_ifp, 1); 731 if (error == 0) 732 d->bd_promisc = 1; 733 } 734 splx(s); 735 break; 736 737 /* 738 * Get device parameters. 739 */ 740 case BIOCGDLT: 741 if (d->bd_bif == 0) 742 error = EINVAL; 743 else 744 *(u_int *)addr = d->bd_bif->bif_dlt; 745 break; 746 747 /* 748 * Set interface name. 749 */ 750 case BIOCGETIF: 751 if (d->bd_bif == 0) 752 error = EINVAL; 753 else 754 bpf_ifname(d->bd_bif->bif_ifp, (struct ifreq *)addr); 755 break; 756 757 /* 758 * Set interface. 759 */ 760 case BIOCSETIF: 761 error = bpf_setif(d, (struct ifreq *)addr); 762 break; 763 764 /* 765 * Set read timeout. 766 */ 767 case BIOCSRTIMEOUT: 768 { 769 struct timeval *tv = (struct timeval *)addr; 770 u_long msec; 771 772 /* Compute number of milliseconds. */ 773 msec = tv->tv_sec * 1000 + tv->tv_usec / 1000; 774 /* Scale milliseconds to ticks. Assume hard 775 clock has millisecond or greater resolution 776 (i.e. tick >= 1000). For 10ms hardclock, 777 tick/1000 = 10, so rtout<-msec/10. */ 778 d->bd_rtout = msec / (tick / 1000); 779 break; 780 } 781 782 /* 783 * Get read timeout. 784 */ 785 case BIOCGRTIMEOUT: 786 { 787 struct timeval *tv = (struct timeval *)addr; 788 u_long msec = d->bd_rtout; 789 790 msec *= tick / 1000; 791 tv->tv_sec = msec / 1000; 792 tv->tv_usec = msec % 1000; 793 break; 794 } 795 796 /* 797 * Get packet stats. 798 */ 799 case BIOCGSTATS: 800 { 801 struct bpf_stat *bs = (struct bpf_stat *)addr; 802 803 bs->bs_recv = d->bd_rcount; 804 bs->bs_drop = d->bd_dcount; 805 break; 806 } 807 808 /* 809 * Set immediate mode. 810 */ 811 case BIOCIMMEDIATE: 812 d->bd_immediate = *(u_int *)addr; 813 break; 814 815 case BIOCVERSION: 816 { 817 struct bpf_version *bv = (struct bpf_version *)addr; 818 819 bv->bv_major = BPF_MAJOR_VERSION; 820 bv->bv_minor = BPF_MINOR_VERSION; 821 break; 822 } 823 824 825 case FIONBIO: /* Non-blocking I/O */ 826 if (*(int *)addr) 827 d->bd_rtout = -1; 828 else 829 d->bd_rtout = 0; 830 break; 831 832 case FIOASYNC: /* Send signal on receive packets */ 833 d->bd_async = *(int *)addr; 834 break; 835 836/* N.B. ioctl (FIOSETOWN) and fcntl (F_SETOWN) both end up doing the 837 equivalent of a TIOCSPGRP and hence end up here. *However* TIOCSPGRP's arg 838 is a process group if it's positive and a process id if it's negative. This 839 is exactly the opposite of what the other two functions want! Therefore 840 there is code in ioctl and fcntl to negate the arg before calling here. */ 841 842 case TIOCSPGRP: /* Process or group to send signals to */ 843 d->bd_pgid = *(int *)addr; 844 break; 845 846 case TIOCGPGRP: 847 *(int *)addr = d->bd_pgid; 848 break; 849 850 case BIOCSRSIG: /* Set receive signal */ 851 { 852 u_int sig; 853 854 sig = *(u_int *)addr; 855 856 if (sig >= NSIG) 857 error = EINVAL; 858 else 859 d->bd_sig = sig; 860 break; 861 } 862 case BIOCGRSIG: 863 *(u_int *)addr = d->bd_sig; 864 break; 865 } 866 return (error); 867} 868 869/* 870 * Set d's packet filter program to fp. If this file already has a filter, 871 * free it and replace it. Returns EINVAL for bogus requests. 872 */ 873static int 874bpf_setf(d, fp) 875 struct bpf_d *d; 876 struct bpf_program *fp; 877{ 878 struct bpf_insn *fcode, *old; 879 u_int flen, size; 880 int s; 881 882 old = d->bd_filter; 883 if (fp->bf_insns == 0) { 884 if (fp->bf_len != 0) 885 return (EINVAL); 886 s = splimp(); 887 d->bd_filter = 0; 888 reset_d(d); 889 splx(s); 890 if (old != 0) 891 free((caddr_t)old, M_DEVBUF); 892 return (0); 893 } 894 flen = fp->bf_len; 895 if (flen > BPF_MAXINSNS) 896 return (EINVAL); 897 898 size = flen * sizeof(*fp->bf_insns); 899 fcode = (struct bpf_insn *)malloc(size, M_DEVBUF, M_WAITOK); 900 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 && 901 bpf_validate(fcode, (int)flen)) { 902 s = splimp(); 903 d->bd_filter = fcode; 904 reset_d(d); 905 splx(s); 906 if (old != 0) 907 free((caddr_t)old, M_DEVBUF); 908 909 return (0); 910 } 911 free((caddr_t)fcode, M_DEVBUF); 912 return (EINVAL); 913} 914 915/* 916 * Detach a file from its current interface (if attached at all) and attach 917 * to the interface indicated by the name stored in ifr. 918 * Return an errno or 0. 919 */ 920static int 921bpf_setif(d, ifr) 922 struct bpf_d *d; 923 struct ifreq *ifr; 924{ 925 struct bpf_if *bp; 926 int s, error; 927 struct ifnet *theywant; 928 929 theywant = ifunit(ifr->ifr_name); 930 if (theywant == 0) 931 return ENXIO; 932 933 /* 934 * Look through attached interfaces for the named one. 935 */ 936 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) { 937 struct ifnet *ifp = bp->bif_ifp; 938 939 if (ifp == 0 || ifp != theywant) 940 continue; 941 /* 942 * We found the requested interface. 943 * If it's not up, return an error. 944 * Allocate the packet buffers if we need to. 945 * If we're already attached to requested interface, 946 * just flush the buffer. 947 */ 948 if ((ifp->if_flags & IFF_UP) == 0) 949 return (ENETDOWN); 950 951 if (d->bd_sbuf == 0) { 952 error = bpf_allocbufs(d); 953 if (error != 0) 954 return (error); 955 } 956 s = splimp(); 957 if (bp != d->bd_bif) { 958 if (d->bd_bif) 959 /* 960 * Detach if attached to something else. 961 */ 962 bpf_detachd(d); 963 964 bpf_attachd(d, bp); 965 } 966 reset_d(d); 967 splx(s); 968 return (0); 969 } 970 /* Not found. */ 971 return (ENXIO); 972} 973 974/* 975 * Convert an interface name plus unit number of an ifp to a single 976 * name which is returned in the ifr. 977 */ 978static void 979bpf_ifname(ifp, ifr) 980 struct ifnet *ifp; 981 struct ifreq *ifr; 982{ 983 char *s = ifp->if_name; 984 char *d = ifr->ifr_name; 985 986 while (*d++ = *s++) 987 continue; 988 d--; /* back to the null */ 989 /* XXX Assume that unit number is less than 10. */ 990 *d++ = ifp->if_unit + '0'; 991 *d = '\0'; 992} 993 994/* 995 * Support for select() and poll() system calls 996 * 997 * Return true iff the specific operation will not block indefinitely. 998 * Otherwise, return false but make a note that a selwakeup() must be done. 999 */ 1000int 1001bpfpoll(dev, events, p) 1002 register dev_t dev; 1003 int events; 1004 struct proc *p; 1005{ 1006 register struct bpf_d *d; 1007 register int s; 1008 int revents = 0; 1009 1010 /* 1011 * An imitation of the FIONREAD ioctl code. 1012 */ 1013 d = &bpf_dtab[minor(dev)]; 1014 1015 s = splimp(); 1016 if (events & (POLLIN | POLLRDNORM)) 1017 if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0)) 1018 revents |= events & (POLLIN | POLLRDNORM); 1019 else 1020 selrecord(p, &d->bd_sel); 1021 1022 splx(s); 1023 return (revents); 1024} 1025 1026/* 1027 * Incoming linkage from device drivers. Process the packet pkt, of length 1028 * pktlen, which is stored in a contiguous buffer. The packet is parsed 1029 * by each process' filter, and if accepted, stashed into the corresponding 1030 * buffer. 1031 */ 1032void 1033bpf_tap(ifp, pkt, pktlen) 1034 struct ifnet *ifp; 1035 register u_char *pkt; 1036 register u_int pktlen; 1037{ 1038 struct bpf_if *bp; 1039 register struct bpf_d *d; 1040 register u_int slen; 1041 /* 1042 * Note that the ipl does not have to be raised at this point. 1043 * The only problem that could arise here is that if two different 1044 * interfaces shared any data. This is not the case. 1045 */ 1046 bp = ifp->if_bpf; 1047 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1048 ++d->bd_rcount; 1049 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen); 1050 if (slen != 0) 1051 catchpacket(d, pkt, pktlen, slen, bcopy); 1052 } 1053} 1054 1055/* 1056 * Copy data from an mbuf chain into a buffer. This code is derived 1057 * from m_copydata in sys/uipc_mbuf.c. 1058 */ 1059static void 1060bpf_mcopy(src_arg, dst_arg, len) 1061 const void *src_arg; 1062 void *dst_arg; 1063 register u_int len; 1064{ 1065 register const struct mbuf *m; 1066 register u_int count; 1067 u_char *dst; 1068 1069 m = src_arg; 1070 dst = dst_arg; 1071 while (len > 0) { 1072 if (m == 0) 1073 panic("bpf_mcopy"); 1074 count = min(m->m_len, len); 1075 bcopy(mtod(m, void *), dst, count); 1076 m = m->m_next; 1077 dst += count; 1078 len -= count; 1079 } 1080} 1081 1082/* 1083 * Incoming linkage from device drivers, when packet is in an mbuf chain. 1084 */ 1085void 1086bpf_mtap(ifp, m) 1087 struct ifnet *ifp; 1088 struct mbuf *m; 1089{ 1090 struct bpf_if *bp = ifp->if_bpf; 1091 struct bpf_d *d; 1092 u_int pktlen, slen; 1093 struct mbuf *m0; 1094 1095 pktlen = 0; 1096 for (m0 = m; m0 != 0; m0 = m0->m_next) 1097 pktlen += m0->m_len; 1098 1099 for (d = bp->bif_dlist; d != 0; d = d->bd_next) { 1100 ++d->bd_rcount; 1101 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0); 1102 if (slen != 0) 1103 catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy); 1104 } 1105} 1106 1107/* 1108 * Move the packet data from interface memory (pkt) into the 1109 * store buffer. Return 1 if it's time to wakeup a listener (buffer full), 1110 * otherwise 0. "copy" is the routine called to do the actual data 1111 * transfer. bcopy is passed in to copy contiguous chunks, while 1112 * bpf_mcopy is passed in to copy mbuf chains. In the latter case, 1113 * pkt is really an mbuf. 1114 */ 1115static void 1116catchpacket(d, pkt, pktlen, snaplen, cpfn) 1117 register struct bpf_d *d; 1118 register u_char *pkt; 1119 register u_int pktlen, snaplen; 1120 register void (*cpfn)(const void *, void *, u_int); 1121{ 1122 register struct bpf_hdr *hp; 1123 register int totlen, curlen; 1124 register int hdrlen = d->bd_bif->bif_hdrlen; 1125 /* 1126 * Figure out how many bytes to move. If the packet is 1127 * greater or equal to the snapshot length, transfer that 1128 * much. Otherwise, transfer the whole packet (unless 1129 * we hit the buffer size limit). 1130 */ 1131 totlen = hdrlen + min(snaplen, pktlen); 1132 if (totlen > d->bd_bufsize) 1133 totlen = d->bd_bufsize; 1134 1135 /* 1136 * Round up the end of the previous packet to the next longword. 1137 */ 1138 curlen = BPF_WORDALIGN(d->bd_slen); 1139 if (curlen + totlen > d->bd_bufsize) { 1140 /* 1141 * This packet will overflow the storage buffer. 1142 * Rotate the buffers if we can, then wakeup any 1143 * pending reads. 1144 */ 1145 if (d->bd_fbuf == 0) { 1146 /* 1147 * We haven't completed the previous read yet, 1148 * so drop the packet. 1149 */ 1150 ++d->bd_dcount; 1151 return; 1152 } 1153 ROTATE_BUFFERS(d); 1154 bpf_wakeup(d); 1155 curlen = 0; 1156 } 1157 else if (d->bd_immediate) 1158 /* 1159 * Immediate mode is set. A packet arrived so any 1160 * reads should be woken up. 1161 */ 1162 bpf_wakeup(d); 1163 1164 /* 1165 * Append the bpf header. 1166 */ 1167 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen); 1168#if BSD >= 199103 1169 microtime(&hp->bh_tstamp); 1170#elif defined(sun) 1171 uniqtime(&hp->bh_tstamp); 1172#else 1173 hp->bh_tstamp = time; 1174#endif 1175 hp->bh_datalen = pktlen; 1176 hp->bh_hdrlen = hdrlen; 1177 /* 1178 * Copy the packet data into the store buffer and update its length. 1179 */ 1180 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen)); 1181 d->bd_slen = curlen + totlen; 1182} 1183 1184/* 1185 * Initialize all nonzero fields of a descriptor. 1186 */ 1187static int 1188bpf_allocbufs(d) 1189 register struct bpf_d *d; 1190{ 1191 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1192 if (d->bd_fbuf == 0) 1193 return (ENOBUFS); 1194 1195 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK); 1196 if (d->bd_sbuf == 0) { 1197 free(d->bd_fbuf, M_DEVBUF); 1198 return (ENOBUFS); 1199 } 1200 d->bd_slen = 0; 1201 d->bd_hlen = 0; 1202 return (0); 1203} 1204 1205/* 1206 * Free buffers currently in use by a descriptor. 1207 * Called on close. 1208 */ 1209static void 1210bpf_freed(d) 1211 register struct bpf_d *d; 1212{ 1213 /* 1214 * We don't need to lock out interrupts since this descriptor has 1215 * been detached from its interface and it yet hasn't been marked 1216 * free. 1217 */ 1218 if (d->bd_sbuf != 0) { 1219 free(d->bd_sbuf, M_DEVBUF); 1220 if (d->bd_hbuf != 0) 1221 free(d->bd_hbuf, M_DEVBUF); 1222 if (d->bd_fbuf != 0) 1223 free(d->bd_fbuf, M_DEVBUF); 1224 } 1225 if (d->bd_filter) 1226 free((caddr_t)d->bd_filter, M_DEVBUF); 1227 1228 D_MARKFREE(d); 1229} 1230 1231/* 1232 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *) 1233 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed 1234 * size of the link header (variable length headers not yet supported). 1235 */ 1236void 1237bpfattach(ifp, dlt, hdrlen) 1238 struct ifnet *ifp; 1239 u_int dlt, hdrlen; 1240{ 1241 struct bpf_if *bp; 1242 int i; 1243 bp = (struct bpf_if *)malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT); 1244 if (bp == 0) 1245 panic("bpfattach"); 1246 1247 bp->bif_dlist = 0; 1248 bp->bif_ifp = ifp; 1249 bp->bif_dlt = dlt; 1250 1251 bp->bif_next = bpf_iflist; 1252 bpf_iflist = bp; 1253 1254 bp->bif_ifp->if_bpf = 0; 1255 1256 /* 1257 * Compute the length of the bpf header. This is not necessarily 1258 * equal to SIZEOF_BPF_HDR because we want to insert spacing such 1259 * that the network layer header begins on a longword boundary (for 1260 * performance reasons and to alleviate alignment restrictions). 1261 */ 1262 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen; 1263 1264 /* 1265 * Mark all the descriptors free if this hasn't been done. 1266 */ 1267 if (!D_ISFREE(&bpf_dtab[0])) 1268 for (i = 0; i < NBPFILTER; ++i) 1269 D_MARKFREE(&bpf_dtab[i]); 1270 1271 if (bootverbose) 1272 printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit); 1273} 1274 1275#ifdef DEVFS 1276static void *bpf_devfs_token[NBPFILTER]; 1277#endif 1278 1279static bpf_devsw_installed = 0; 1280 1281static void bpf_drvinit(void *unused) 1282{ 1283 dev_t dev; 1284#ifdef DEVFS 1285 int i; 1286#endif 1287 1288 if( ! bpf_devsw_installed ) { 1289 dev = makedev(CDEV_MAJOR, 0); 1290 cdevsw_add(&dev,&bpf_cdevsw, NULL); 1291 bpf_devsw_installed = 1; 1292#ifdef DEVFS 1293 1294 for ( i = 0 ; i < NBPFILTER ; i++ ) { 1295 bpf_devfs_token[i] = 1296 devfs_add_devswf(&bpf_cdevsw, i, DV_CHR, 0, 0, 1297 0600, "bpf%d", i); 1298 } 1299#endif 1300 } 1301} 1302 1303SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL) 1304 1305#endif 1306