Deleted Added
full compact
bpf.c (130585) bpf.c (130640)
1/*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 *
1/*
2 * Copyright (c) 1990, 1991, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)bpf.c 8.4 (Berkeley) 1/9/95
35 *
36 * $FreeBSD: head/sys/net/bpf.c 130585 2004-06-16 09:47:26Z phk $
36 * $FreeBSD: head/sys/net/bpf.c 130640 2004-06-17 17:16:53Z phk $
37 */
38
39#include "opt_bpf.h"
40#include "opt_mac.h"
41#include "opt_netgraph.h"
42
43#include <sys/types.h>
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/conf.h>
47#include <sys/mac.h>
48#include <sys/malloc.h>
49#include <sys/mbuf.h>
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <sys/signalvar.h>
53#include <sys/filio.h>
54#include <sys/sockio.h>
55#include <sys/ttycom.h>
56#include <sys/filedesc.h>
57
58#include <sys/event.h>
59#include <sys/file.h>
60#include <sys/poll.h>
61#include <sys/proc.h>
62
63#include <sys/socket.h>
64#include <sys/vnode.h>
65
66#include <net/if.h>
67#include <net/bpf.h>
68#include <net/bpfdesc.h>
69
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <sys/kernel.h>
73#include <sys/sysctl.h>
74
75static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
76
77#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
78
79#define PRINET 26 /* interruptible */
80
81/*
82 * The default read buffer size is patchable.
83 */
84static int bpf_bufsize = 4096;
85SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
86 &bpf_bufsize, 0, "");
87static int bpf_maxbufsize = BPF_MAXBUFSIZE;
88SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
89 &bpf_maxbufsize, 0, "");
90
91/*
92 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
93 */
94static struct bpf_if *bpf_iflist;
95static struct mtx bpf_mtx; /* bpf global lock */
96
97static int bpf_allocbufs(struct bpf_d *);
98static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
99static void bpf_detachd(struct bpf_d *d);
100static void bpf_freed(struct bpf_d *);
101static void bpf_mcopy(const void *, void *, size_t);
102static int bpf_movein(struct uio *, int,
103 struct mbuf **, struct sockaddr *, int *);
104static int bpf_setif(struct bpf_d *, struct ifreq *);
105static void bpf_timed_out(void *);
106static __inline void
107 bpf_wakeup(struct bpf_d *);
108static void catchpacket(struct bpf_d *, u_char *, u_int,
109 u_int, void (*)(const void *, void *, size_t));
110static void reset_d(struct bpf_d *);
111static int bpf_setf(struct bpf_d *, struct bpf_program *);
112static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
113static int bpf_setdlt(struct bpf_d *, u_int);
114static void filt_bpfdetach(struct knote *);
115static int filt_bpfread(struct knote *, long);
116
117static d_open_t bpfopen;
118static d_close_t bpfclose;
119static d_read_t bpfread;
120static d_write_t bpfwrite;
121static d_ioctl_t bpfioctl;
122static d_poll_t bpfpoll;
123static d_kqfilter_t bpfkqfilter;
124
125static struct cdevsw bpf_cdevsw = {
126 .d_version = D_VERSION,
127 .d_flags = D_NEEDGIANT,
128 .d_open = bpfopen,
129 .d_close = bpfclose,
130 .d_read = bpfread,
131 .d_write = bpfwrite,
132 .d_ioctl = bpfioctl,
133 .d_poll = bpfpoll,
134 .d_name = "bpf",
135 .d_kqfilter = bpfkqfilter,
136};
137
138static struct filterops bpfread_filtops =
139 { 1, NULL, filt_bpfdetach, filt_bpfread };
140
141static int
142bpf_movein(uio, linktype, mp, sockp, datlen)
143 struct uio *uio;
144 int linktype, *datlen;
145 struct mbuf **mp;
146 struct sockaddr *sockp;
147{
148 struct mbuf *m;
149 int error;
150 int len;
151 int hlen;
152
153 /*
154 * Build a sockaddr based on the data link layer type.
155 * We do this at this level because the ethernet header
156 * is copied directly into the data field of the sockaddr.
157 * In the case of SLIP, there is no header and the packet
158 * is forwarded as is.
159 * Also, we are careful to leave room at the front of the mbuf
160 * for the link level header.
161 */
162 switch (linktype) {
163
164 case DLT_SLIP:
165 sockp->sa_family = AF_INET;
166 hlen = 0;
167 break;
168
169 case DLT_EN10MB:
170 sockp->sa_family = AF_UNSPEC;
171 /* XXX Would MAXLINKHDR be better? */
172 hlen = ETHER_HDR_LEN;
173 break;
174
175 case DLT_FDDI:
176 sockp->sa_family = AF_IMPLINK;
177 hlen = 0;
178 break;
179
180 case DLT_RAW:
181 case DLT_NULL:
182 sockp->sa_family = AF_UNSPEC;
183 hlen = 0;
184 break;
185
186 case DLT_ATM_RFC1483:
187 /*
188 * en atm driver requires 4-byte atm pseudo header.
189 * though it isn't standard, vpi:vci needs to be
190 * specified anyway.
191 */
192 sockp->sa_family = AF_UNSPEC;
193 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
194 break;
195
196 case DLT_PPP:
197 sockp->sa_family = AF_UNSPEC;
198 hlen = 4; /* This should match PPP_HDRLEN */
199 break;
200
201 default:
202 return (EIO);
203 }
204
205 len = uio->uio_resid;
206 *datlen = len - hlen;
207 if ((unsigned)len > MCLBYTES)
208 return (EIO);
209
210 if (len > MHLEN) {
211 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
212 } else {
213 MGETHDR(m, M_TRYWAIT, MT_DATA);
214 }
215 if (m == NULL)
216 return (ENOBUFS);
217 m->m_pkthdr.len = m->m_len = len;
218 m->m_pkthdr.rcvif = NULL;
219 *mp = m;
220
221 /*
222 * Make room for link header.
223 */
224 if (hlen != 0) {
225 m->m_pkthdr.len -= hlen;
226 m->m_len -= hlen;
227#if BSD >= 199103
228 m->m_data += hlen; /* XXX */
229#else
230 m->m_off += hlen;
231#endif
232 error = uiomove(sockp->sa_data, hlen, uio);
233 if (error)
234 goto bad;
235 }
236 error = uiomove(mtod(m, void *), len - hlen, uio);
237 if (!error)
238 return (0);
239bad:
240 m_freem(m);
241 return (error);
242}
243
244/*
245 * Attach file to the bpf interface, i.e. make d listen on bp.
246 */
247static void
248bpf_attachd(d, bp)
249 struct bpf_d *d;
250 struct bpf_if *bp;
251{
252 /*
253 * Point d at bp, and add d to the interface's list of listeners.
254 * Finally, point the driver's bpf cookie at the interface so
255 * it will divert packets to bpf.
256 */
257 BPFIF_LOCK(bp);
258 d->bd_bif = bp;
259 d->bd_next = bp->bif_dlist;
260 bp->bif_dlist = d;
261
262 *bp->bif_driverp = bp;
263 BPFIF_UNLOCK(bp);
264}
265
266/*
267 * Detach a file from its interface.
268 */
269static void
270bpf_detachd(d)
271 struct bpf_d *d;
272{
273 int error;
274 struct bpf_d **p;
275 struct bpf_if *bp;
276
277 /* XXX locking */
278 bp = d->bd_bif;
279 d->bd_bif = 0;
280 /*
281 * Check if this descriptor had requested promiscuous mode.
282 * If so, turn it off.
283 */
284 if (d->bd_promisc) {
285 d->bd_promisc = 0;
286 error = ifpromisc(bp->bif_ifp, 0);
287 if (error != 0 && error != ENXIO) {
288 /*
289 * ENXIO can happen if a pccard is unplugged
290 * Something is really wrong if we were able to put
291 * the driver into promiscuous mode, but can't
292 * take it out.
293 */
294 if_printf(bp->bif_ifp,
295 "bpf_detach: ifpromisc failed (%d)\n", error);
296 }
297 }
298 /* Remove d from the interface's descriptor list. */
299 BPFIF_LOCK(bp);
300 p = &bp->bif_dlist;
301 while (*p != d) {
302 p = &(*p)->bd_next;
303 if (*p == 0)
304 panic("bpf_detachd: descriptor not in list");
305 }
306 *p = (*p)->bd_next;
307 if (bp->bif_dlist == 0)
308 /*
309 * Let the driver know that there are no more listeners.
310 */
311 *bp->bif_driverp = 0;
312 BPFIF_UNLOCK(bp);
313}
314
315/*
316 * Open ethernet device. Returns ENXIO for illegal minor device number,
317 * EBUSY if file is open by another process.
318 */
319/* ARGSUSED */
320static int
321bpfopen(dev, flags, fmt, td)
322 struct cdev *dev;
323 int flags;
324 int fmt;
325 struct thread *td;
326{
327 struct bpf_d *d;
328
329 mtx_lock(&bpf_mtx);
330 d = dev->si_drv1;
331 /*
332 * Each minor can be opened by only one process. If the requested
333 * minor is in use, return EBUSY.
334 */
335 if (d) {
336 mtx_unlock(&bpf_mtx);
337 return (EBUSY);
338 }
339 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */
340 mtx_unlock(&bpf_mtx);
341
342 if ((dev->si_flags & SI_NAMED) == 0)
343 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
344 "bpf%d", dev2unit(dev));
345 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
346 dev->si_drv1 = d;
347 d->bd_bufsize = bpf_bufsize;
348 d->bd_sig = SIGIO;
349 d->bd_seesent = 1;
350#ifdef MAC
351 mac_init_bpfdesc(d);
352 mac_create_bpfdesc(td->td_ucred, d);
353#endif
354 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
355 callout_init(&d->bd_callout, CALLOUT_MPSAFE);
356
357 return (0);
358}
359
360/*
361 * Close the descriptor by detaching it from its interface,
362 * deallocating its buffers, and marking it free.
363 */
364/* ARGSUSED */
365static int
366bpfclose(dev, flags, fmt, td)
367 struct cdev *dev;
368 int flags;
369 int fmt;
370 struct thread *td;
371{
372 struct bpf_d *d = dev->si_drv1;
373
374 BPFD_LOCK(d);
375 if (d->bd_state == BPF_WAITING)
376 callout_stop(&d->bd_callout);
377 d->bd_state = BPF_IDLE;
378 BPFD_UNLOCK(d);
379 funsetown(&d->bd_sigio);
380 mtx_lock(&bpf_mtx);
381 if (d->bd_bif)
382 bpf_detachd(d);
383 mtx_unlock(&bpf_mtx);
384#ifdef MAC
385 mac_destroy_bpfdesc(d);
386#endif /* MAC */
387 bpf_freed(d);
388 dev->si_drv1 = 0;
389 free(d, M_BPF);
390
391 return (0);
392}
393
394
395/*
396 * Rotate the packet buffers in descriptor d. Move the store buffer
397 * into the hold slot, and the free buffer into the store slot.
398 * Zero the length of the new store buffer.
399 */
400#define ROTATE_BUFFERS(d) \
401 (d)->bd_hbuf = (d)->bd_sbuf; \
402 (d)->bd_hlen = (d)->bd_slen; \
403 (d)->bd_sbuf = (d)->bd_fbuf; \
404 (d)->bd_slen = 0; \
405 (d)->bd_fbuf = 0;
406/*
407 * bpfread - read next chunk of packets from buffers
408 */
409static int
410bpfread(dev, uio, ioflag)
411 struct cdev *dev;
412 struct uio *uio;
413 int ioflag;
414{
415 struct bpf_d *d = dev->si_drv1;
416 int timed_out;
417 int error;
418
419 /*
420 * Restrict application to use a buffer the same size as
421 * as kernel buffers.
422 */
423 if (uio->uio_resid != d->bd_bufsize)
424 return (EINVAL);
425
426 BPFD_LOCK(d);
427 if (d->bd_state == BPF_WAITING)
428 callout_stop(&d->bd_callout);
429 timed_out = (d->bd_state == BPF_TIMED_OUT);
430 d->bd_state = BPF_IDLE;
431 /*
432 * If the hold buffer is empty, then do a timed sleep, which
433 * ends when the timeout expires or when enough packets
434 * have arrived to fill the store buffer.
435 */
436 while (d->bd_hbuf == 0) {
437 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
438 /*
439 * A packet(s) either arrived since the previous
440 * read or arrived while we were asleep.
441 * Rotate the buffers and return what's here.
442 */
443 ROTATE_BUFFERS(d);
444 break;
445 }
446
447 /*
448 * No data is available, check to see if the bpf device
449 * is still pointed at a real interface. If not, return
450 * ENXIO so that the userland process knows to rebind
451 * it before using it again.
452 */
453 if (d->bd_bif == NULL) {
454 BPFD_UNLOCK(d);
455 return (ENXIO);
456 }
457
458 if (ioflag & IO_NDELAY) {
459 BPFD_UNLOCK(d);
460 return (EWOULDBLOCK);
461 }
462 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
463 "bpf", d->bd_rtout);
464 if (error == EINTR || error == ERESTART) {
465 BPFD_UNLOCK(d);
466 return (error);
467 }
468 if (error == EWOULDBLOCK) {
469 /*
470 * On a timeout, return what's in the buffer,
471 * which may be nothing. If there is something
472 * in the store buffer, we can rotate the buffers.
473 */
474 if (d->bd_hbuf)
475 /*
476 * We filled up the buffer in between
477 * getting the timeout and arriving
478 * here, so we don't need to rotate.
479 */
480 break;
481
482 if (d->bd_slen == 0) {
483 BPFD_UNLOCK(d);
484 return (0);
485 }
486 ROTATE_BUFFERS(d);
487 break;
488 }
489 }
490 /*
491 * At this point, we know we have something in the hold slot.
492 */
493 BPFD_UNLOCK(d);
494
495 /*
496 * Move data from hold buffer into user space.
497 * We know the entire buffer is transferred since
498 * we checked above that the read buffer is bpf_bufsize bytes.
499 */
500 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
501
502 BPFD_LOCK(d);
503 d->bd_fbuf = d->bd_hbuf;
504 d->bd_hbuf = 0;
505 d->bd_hlen = 0;
506 BPFD_UNLOCK(d);
507
508 return (error);
509}
510
511
512/*
513 * If there are processes sleeping on this descriptor, wake them up.
514 */
515static __inline void
516bpf_wakeup(d)
517 struct bpf_d *d;
518{
519 if (d->bd_state == BPF_WAITING) {
520 callout_stop(&d->bd_callout);
521 d->bd_state = BPF_IDLE;
522 }
523 wakeup(d);
524 if (d->bd_async && d->bd_sig && d->bd_sigio)
525 pgsigio(&d->bd_sigio, d->bd_sig, 0);
526
527 selwakeuppri(&d->bd_sel, PRINET);
528 KNOTE(&d->bd_sel.si_note, 0);
529}
530
531static void
532bpf_timed_out(arg)
533 void *arg;
534{
535 struct bpf_d *d = (struct bpf_d *)arg;
536
537 BPFD_LOCK(d);
538 if (d->bd_state == BPF_WAITING) {
539 d->bd_state = BPF_TIMED_OUT;
540 if (d->bd_slen != 0)
541 bpf_wakeup(d);
542 }
543 BPFD_UNLOCK(d);
544}
545
546static int
547bpfwrite(dev, uio, ioflag)
548 struct cdev *dev;
549 struct uio *uio;
550 int ioflag;
551{
552 struct bpf_d *d = dev->si_drv1;
553 struct ifnet *ifp;
554 struct mbuf *m;
555 int error;
556 struct sockaddr dst;
557 int datlen;
558
559 if (d->bd_bif == 0)
560 return (ENXIO);
561
562 ifp = d->bd_bif->bif_ifp;
563
564 if (uio->uio_resid == 0)
565 return (0);
566
567 bzero(&dst, sizeof(dst));
568 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
569 if (error)
570 return (error);
571
572 if (datlen > ifp->if_mtu)
573 return (EMSGSIZE);
574
575 if (d->bd_hdrcmplt)
576 dst.sa_family = pseudo_AF_HDRCMPLT;
577
578#ifdef MAC
579 BPFD_LOCK(d);
580 mac_create_mbuf_from_bpfdesc(d, m);
581 BPFD_UNLOCK(d);
582#endif
583 NET_LOCK_GIANT();
584 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
585 NET_UNLOCK_GIANT();
586 /*
587 * The driver frees the mbuf.
588 */
589 return (error);
590}
591
592/*
593 * Reset a descriptor by flushing its packet buffer and clearing the
594 * receive and drop counts.
595 */
596static void
597reset_d(d)
598 struct bpf_d *d;
599{
600
601 mtx_assert(&d->bd_mtx, MA_OWNED);
602 if (d->bd_hbuf) {
603 /* Free the hold buffer. */
604 d->bd_fbuf = d->bd_hbuf;
605 d->bd_hbuf = 0;
606 }
607 d->bd_slen = 0;
608 d->bd_hlen = 0;
609 d->bd_rcount = 0;
610 d->bd_dcount = 0;
611}
612
613/*
614 * FIONREAD Check for read packet available.
615 * SIOCGIFADDR Get interface address - convenient hook to driver.
616 * BIOCGBLEN Get buffer len [for read()].
617 * BIOCSETF Set ethernet read filter.
618 * BIOCFLUSH Flush read packet buffer.
619 * BIOCPROMISC Put interface into promiscuous mode.
620 * BIOCGDLT Get link layer type.
621 * BIOCGETIF Get interface name.
622 * BIOCSETIF Set interface.
623 * BIOCSRTIMEOUT Set read timeout.
624 * BIOCGRTIMEOUT Get read timeout.
625 * BIOCGSTATS Get packet stats.
626 * BIOCIMMEDIATE Set immediate mode.
627 * BIOCVERSION Get filter language version.
628 * BIOCGHDRCMPLT Get "header already complete" flag
629 * BIOCSHDRCMPLT Set "header already complete" flag
630 * BIOCGSEESENT Get "see packets sent" flag
631 * BIOCSSEESENT Set "see packets sent" flag
632 */
633/* ARGSUSED */
634static int
635bpfioctl(dev, cmd, addr, flags, td)
636 struct cdev *dev;
637 u_long cmd;
638 caddr_t addr;
639 int flags;
640 struct thread *td;
641{
642 struct bpf_d *d = dev->si_drv1;
643 int error = 0;
644
645 BPFD_LOCK(d);
646 if (d->bd_state == BPF_WAITING)
647 callout_stop(&d->bd_callout);
648 d->bd_state = BPF_IDLE;
649 BPFD_UNLOCK(d);
650
651 switch (cmd) {
652
653 default:
654 error = EINVAL;
655 break;
656
657 /*
658 * Check for read packet available.
659 */
660 case FIONREAD:
661 {
662 int n;
663
664 BPFD_LOCK(d);
665 n = d->bd_slen;
666 if (d->bd_hbuf)
667 n += d->bd_hlen;
668 BPFD_UNLOCK(d);
669
670 *(int *)addr = n;
671 break;
672 }
673
674 case SIOCGIFADDR:
675 {
676 struct ifnet *ifp;
677
678 if (d->bd_bif == 0)
679 error = EINVAL;
680 else {
681 ifp = d->bd_bif->bif_ifp;
682 error = (*ifp->if_ioctl)(ifp, cmd, addr);
683 }
684 break;
685 }
686
687 /*
688 * Get buffer len [for read()].
689 */
690 case BIOCGBLEN:
691 *(u_int *)addr = d->bd_bufsize;
692 break;
693
694 /*
695 * Set buffer length.
696 */
697 case BIOCSBLEN:
698 if (d->bd_bif != 0)
699 error = EINVAL;
700 else {
701 u_int size = *(u_int *)addr;
702
703 if (size > bpf_maxbufsize)
704 *(u_int *)addr = size = bpf_maxbufsize;
705 else if (size < BPF_MINBUFSIZE)
706 *(u_int *)addr = size = BPF_MINBUFSIZE;
707 d->bd_bufsize = size;
708 }
709 break;
710
711 /*
712 * Set link layer read filter.
713 */
714 case BIOCSETF:
715 error = bpf_setf(d, (struct bpf_program *)addr);
716 break;
717
718 /*
719 * Flush read packet buffer.
720 */
721 case BIOCFLUSH:
722 BPFD_LOCK(d);
723 reset_d(d);
724 BPFD_UNLOCK(d);
725 break;
726
727 /*
728 * Put interface into promiscuous mode.
729 */
730 case BIOCPROMISC:
731 if (d->bd_bif == 0) {
732 /*
733 * No interface attached yet.
734 */
735 error = EINVAL;
736 break;
737 }
738 if (d->bd_promisc == 0) {
739 mtx_lock(&Giant);
740 error = ifpromisc(d->bd_bif->bif_ifp, 1);
741 mtx_unlock(&Giant);
742 if (error == 0)
743 d->bd_promisc = 1;
744 }
745 break;
746
747 /*
748 * Get current data link type.
749 */
750 case BIOCGDLT:
751 if (d->bd_bif == 0)
752 error = EINVAL;
753 else
754 *(u_int *)addr = d->bd_bif->bif_dlt;
755 break;
756
757 /*
758 * Get a list of supported data link types.
759 */
760 case BIOCGDLTLIST:
761 if (d->bd_bif == 0)
762 error = EINVAL;
763 else
764 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
765 break;
766
767 /*
768 * Set data link type.
769 */
770 case BIOCSDLT:
771 if (d->bd_bif == 0)
772 error = EINVAL;
773 else
774 error = bpf_setdlt(d, *(u_int *)addr);
775 break;
776
777 /*
778 * Get interface name.
779 */
780 case BIOCGETIF:
781 if (d->bd_bif == 0)
782 error = EINVAL;
783 else {
784 struct ifnet *const ifp = d->bd_bif->bif_ifp;
785 struct ifreq *const ifr = (struct ifreq *)addr;
786
787 strlcpy(ifr->ifr_name, ifp->if_xname,
788 sizeof(ifr->ifr_name));
789 }
790 break;
791
792 /*
793 * Set interface.
794 */
795 case BIOCSETIF:
796 error = bpf_setif(d, (struct ifreq *)addr);
797 break;
798
799 /*
800 * Set read timeout.
801 */
802 case BIOCSRTIMEOUT:
803 {
804 struct timeval *tv = (struct timeval *)addr;
805
806 /*
807 * Subtract 1 tick from tvtohz() since this isn't
808 * a one-shot timer.
809 */
810 if ((error = itimerfix(tv)) == 0)
811 d->bd_rtout = tvtohz(tv) - 1;
812 break;
813 }
814
815 /*
816 * Get read timeout.
817 */
818 case BIOCGRTIMEOUT:
819 {
820 struct timeval *tv = (struct timeval *)addr;
821
822 tv->tv_sec = d->bd_rtout / hz;
823 tv->tv_usec = (d->bd_rtout % hz) * tick;
824 break;
825 }
826
827 /*
828 * Get packet stats.
829 */
830 case BIOCGSTATS:
831 {
832 struct bpf_stat *bs = (struct bpf_stat *)addr;
833
834 bs->bs_recv = d->bd_rcount;
835 bs->bs_drop = d->bd_dcount;
836 break;
837 }
838
839 /*
840 * Set immediate mode.
841 */
842 case BIOCIMMEDIATE:
843 d->bd_immediate = *(u_int *)addr;
844 break;
845
846 case BIOCVERSION:
847 {
848 struct bpf_version *bv = (struct bpf_version *)addr;
849
850 bv->bv_major = BPF_MAJOR_VERSION;
851 bv->bv_minor = BPF_MINOR_VERSION;
852 break;
853 }
854
855 /*
856 * Get "header already complete" flag
857 */
858 case BIOCGHDRCMPLT:
859 *(u_int *)addr = d->bd_hdrcmplt;
860 break;
861
862 /*
863 * Set "header already complete" flag
864 */
865 case BIOCSHDRCMPLT:
866 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
867 break;
868
869 /*
870 * Get "see sent packets" flag
871 */
872 case BIOCGSEESENT:
873 *(u_int *)addr = d->bd_seesent;
874 break;
875
876 /*
877 * Set "see sent packets" flag
878 */
879 case BIOCSSEESENT:
880 d->bd_seesent = *(u_int *)addr;
881 break;
882
883 case FIONBIO: /* Non-blocking I/O */
884 break;
885
886 case FIOASYNC: /* Send signal on receive packets */
887 d->bd_async = *(int *)addr;
888 break;
889
890 case FIOSETOWN:
891 error = fsetown(*(int *)addr, &d->bd_sigio);
892 break;
893
894 case FIOGETOWN:
895 *(int *)addr = fgetown(&d->bd_sigio);
896 break;
897
898 /* This is deprecated, FIOSETOWN should be used instead. */
899 case TIOCSPGRP:
900 error = fsetown(-(*(int *)addr), &d->bd_sigio);
901 break;
902
903 /* This is deprecated, FIOGETOWN should be used instead. */
904 case TIOCGPGRP:
905 *(int *)addr = -fgetown(&d->bd_sigio);
906 break;
907
908 case BIOCSRSIG: /* Set receive signal */
909 {
910 u_int sig;
911
912 sig = *(u_int *)addr;
913
914 if (sig >= NSIG)
915 error = EINVAL;
916 else
917 d->bd_sig = sig;
918 break;
919 }
920 case BIOCGRSIG:
921 *(u_int *)addr = d->bd_sig;
922 break;
923 }
924 return (error);
925}
926
927/*
928 * Set d's packet filter program to fp. If this file already has a filter,
929 * free it and replace it. Returns EINVAL for bogus requests.
930 */
931static int
932bpf_setf(d, fp)
933 struct bpf_d *d;
934 struct bpf_program *fp;
935{
936 struct bpf_insn *fcode, *old;
937 u_int flen, size;
938
939 old = d->bd_filter;
940 if (fp->bf_insns == 0) {
941 if (fp->bf_len != 0)
942 return (EINVAL);
943 BPFD_LOCK(d);
944 d->bd_filter = 0;
945 reset_d(d);
946 BPFD_UNLOCK(d);
947 if (old != 0)
948 free((caddr_t)old, M_BPF);
949 return (0);
950 }
951 flen = fp->bf_len;
952 if (flen > BPF_MAXINSNS)
953 return (EINVAL);
954
955 size = flen * sizeof(*fp->bf_insns);
956 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
957 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
958 bpf_validate(fcode, (int)flen)) {
959 BPFD_LOCK(d);
960 d->bd_filter = fcode;
961 reset_d(d);
962 BPFD_UNLOCK(d);
963 if (old != 0)
964 free((caddr_t)old, M_BPF);
965
966 return (0);
967 }
968 free((caddr_t)fcode, M_BPF);
969 return (EINVAL);
970}
971
972/*
973 * Detach a file from its current interface (if attached at all) and attach
974 * to the interface indicated by the name stored in ifr.
975 * Return an errno or 0.
976 */
977static int
978bpf_setif(d, ifr)
979 struct bpf_d *d;
980 struct ifreq *ifr;
981{
982 struct bpf_if *bp;
983 int error;
984 struct ifnet *theywant;
985
986 theywant = ifunit(ifr->ifr_name);
987 if (theywant == 0)
988 return ENXIO;
989
990 /*
991 * Look through attached interfaces for the named one.
992 */
993 mtx_lock(&bpf_mtx);
994 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
995 struct ifnet *ifp = bp->bif_ifp;
996
997 if (ifp == 0 || ifp != theywant)
998 continue;
999 /* skip additional entry */
1000 if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf)
1001 continue;
1002
1003 mtx_unlock(&bpf_mtx);
1004 /*
1005 * We found the requested interface.
1006 * If it's not up, return an error.
1007 * Allocate the packet buffers if we need to.
1008 * If we're already attached to requested interface,
1009 * just flush the buffer.
1010 */
1011 if ((ifp->if_flags & IFF_UP) == 0)
1012 return (ENETDOWN);
1013
1014 if (d->bd_sbuf == 0) {
1015 error = bpf_allocbufs(d);
1016 if (error != 0)
1017 return (error);
1018 }
1019 if (bp != d->bd_bif) {
1020 if (d->bd_bif)
1021 /*
1022 * Detach if attached to something else.
1023 */
1024 bpf_detachd(d);
1025
1026 bpf_attachd(d, bp);
1027 }
1028 BPFD_LOCK(d);
1029 reset_d(d);
1030 BPFD_UNLOCK(d);
1031 return (0);
1032 }
1033 mtx_unlock(&bpf_mtx);
1034 /* Not found. */
1035 return (ENXIO);
1036}
1037
1038/*
1039 * Support for select() and poll() system calls
1040 *
1041 * Return true iff the specific operation will not block indefinitely.
1042 * Otherwise, return false but make a note that a selwakeup() must be done.
1043 */
1044static int
1045bpfpoll(dev, events, td)
1046 struct cdev *dev;
1047 int events;
1048 struct thread *td;
1049{
1050 struct bpf_d *d;
1051 int revents;
1052
1053 d = dev->si_drv1;
1054 if (d->bd_bif == NULL)
1055 return (ENXIO);
1056
1057 revents = events & (POLLOUT | POLLWRNORM);
1058 BPFD_LOCK(d);
1059 if (events & (POLLIN | POLLRDNORM)) {
1060 if (bpf_ready(d))
1061 revents |= events & (POLLIN | POLLRDNORM);
1062 else {
1063 selrecord(td, &d->bd_sel);
1064 /* Start the read timeout if necessary. */
1065 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1066 callout_reset(&d->bd_callout, d->bd_rtout,
1067 bpf_timed_out, d);
1068 d->bd_state = BPF_WAITING;
1069 }
1070 }
1071 }
1072 BPFD_UNLOCK(d);
1073 return (revents);
1074}
1075
1076/*
1077 * Support for kevent() system call. Register EVFILT_READ filters and
1078 * reject all others.
1079 */
1080int
1081bpfkqfilter(dev, kn)
1082 struct cdev *dev;
1083 struct knote *kn;
1084{
1085 struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1086
1087 if (kn->kn_filter != EVFILT_READ)
1088 return (1);
1089
1090 kn->kn_fop = &bpfread_filtops;
1091 kn->kn_hook = d;
1092 BPFD_LOCK(d);
1093 SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
1094 BPFD_UNLOCK(d);
1095
1096 return (0);
1097}
1098
1099static void
1100filt_bpfdetach(kn)
1101 struct knote *kn;
1102{
1103 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1104
1105 BPFD_LOCK(d);
1106 SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
1107 BPFD_UNLOCK(d);
1108}
1109
1110static int
1111filt_bpfread(kn, hint)
1112 struct knote *kn;
1113 long hint;
1114{
1115 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1116 int ready;
1117
1118 BPFD_LOCK(d);
1119 ready = bpf_ready(d);
1120 if (ready) {
1121 kn->kn_data = d->bd_slen;
1122 if (d->bd_hbuf)
1123 kn->kn_data += d->bd_hlen;
1124 }
1125 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1126 callout_reset(&d->bd_callout, d->bd_rtout,
1127 bpf_timed_out, d);
1128 d->bd_state = BPF_WAITING;
1129 }
1130 BPFD_UNLOCK(d);
1131
1132 return (ready);
1133}
1134
1135/*
1136 * Incoming linkage from device drivers. Process the packet pkt, of length
1137 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1138 * by each process' filter, and if accepted, stashed into the corresponding
1139 * buffer.
1140 */
1141void
1142bpf_tap(bp, pkt, pktlen)
1143 struct bpf_if *bp;
1144 u_char *pkt;
1145 u_int pktlen;
1146{
1147 struct bpf_d *d;
1148 u_int slen;
1149
1150 BPFIF_LOCK(bp);
1151 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1152 BPFD_LOCK(d);
1153 ++d->bd_rcount;
1154 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1155 if (slen != 0) {
1156#ifdef MAC
1157 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1158#endif
1159 catchpacket(d, pkt, pktlen, slen, bcopy);
1160 }
1161 BPFD_UNLOCK(d);
1162 }
1163 BPFIF_UNLOCK(bp);
1164}
1165
1166/*
1167 * Copy data from an mbuf chain into a buffer. This code is derived
1168 * from m_copydata in sys/uipc_mbuf.c.
1169 */
1170static void
1171bpf_mcopy(src_arg, dst_arg, len)
1172 const void *src_arg;
1173 void *dst_arg;
1174 size_t len;
1175{
1176 const struct mbuf *m;
1177 u_int count;
1178 u_char *dst;
1179
1180 m = src_arg;
1181 dst = dst_arg;
1182 while (len > 0) {
1183 if (m == 0)
1184 panic("bpf_mcopy");
1185 count = min(m->m_len, len);
1186 bcopy(mtod(m, void *), dst, count);
1187 m = m->m_next;
1188 dst += count;
1189 len -= count;
1190 }
1191}
1192
1193/*
1194 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1195 */
1196void
1197bpf_mtap(bp, m)
1198 struct bpf_if *bp;
1199 struct mbuf *m;
1200{
1201 struct bpf_d *d;
1202 u_int pktlen, slen;
1203
1204 pktlen = m_length(m, NULL);
1205 if (pktlen == m->m_len) {
1206 bpf_tap(bp, mtod(m, u_char *), pktlen);
1207 return;
1208 }
1209
1210 BPFIF_LOCK(bp);
1211 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1212 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1213 continue;
1214 BPFD_LOCK(d);
1215 ++d->bd_rcount;
1216 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1217 if (slen != 0)
1218#ifdef MAC
1219 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1220#endif
1221 catchpacket(d, (u_char *)m, pktlen, slen,
1222 bpf_mcopy);
1223 BPFD_UNLOCK(d);
1224 }
1225 BPFIF_UNLOCK(bp);
1226}
1227
1228/*
1229 * Incoming linkage from device drivers, when packet is in
1230 * an mbuf chain and to be prepended by a contiguous header.
1231 */
1232void
1233bpf_mtap2(bp, data, dlen, m)
1234 struct bpf_if *bp;
1235 void *data;
1236 u_int dlen;
1237 struct mbuf *m;
1238{
1239 struct mbuf mb;
1240 struct bpf_d *d;
1241 u_int pktlen, slen;
1242
1243 pktlen = m_length(m, NULL);
1244 /*
1245 * Craft on-stack mbuf suitable for passing to bpf_filter.
1246 * Note that we cut corners here; we only setup what's
1247 * absolutely needed--this mbuf should never go anywhere else.
1248 */
1249 mb.m_next = m;
1250 mb.m_data = data;
1251 mb.m_len = dlen;
1252 pktlen += dlen;
1253
1254 BPFIF_LOCK(bp);
1255 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1256 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1257 continue;
1258 BPFD_LOCK(d);
1259 ++d->bd_rcount;
1260 slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0);
1261 if (slen != 0)
1262#ifdef MAC
1263 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1264#endif
1265 catchpacket(d, (u_char *)&mb, pktlen, slen,
1266 bpf_mcopy);
1267 BPFD_UNLOCK(d);
1268 }
1269 BPFIF_UNLOCK(bp);
1270}
1271
1272/*
1273 * Move the packet data from interface memory (pkt) into the
1274 * store buffer. "cpfn" is the routine called to do the actual data
1275 * transfer. bcopy is passed in to copy contiguous chunks, while
1276 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1277 * pkt is really an mbuf.
1278 */
1279static void
1280catchpacket(d, pkt, pktlen, snaplen, cpfn)
1281 struct bpf_d *d;
1282 u_char *pkt;
1283 u_int pktlen, snaplen;
1284 void (*cpfn)(const void *, void *, size_t);
1285{
1286 struct bpf_hdr *hp;
1287 int totlen, curlen;
1288 int hdrlen = d->bd_bif->bif_hdrlen;
1289
1290 /*
1291 * Figure out how many bytes to move. If the packet is
1292 * greater or equal to the snapshot length, transfer that
1293 * much. Otherwise, transfer the whole packet (unless
1294 * we hit the buffer size limit).
1295 */
1296 totlen = hdrlen + min(snaplen, pktlen);
1297 if (totlen > d->bd_bufsize)
1298 totlen = d->bd_bufsize;
1299
1300 /*
1301 * Round up the end of the previous packet to the next longword.
1302 */
1303 curlen = BPF_WORDALIGN(d->bd_slen);
1304 if (curlen + totlen > d->bd_bufsize) {
1305 /*
1306 * This packet will overflow the storage buffer.
1307 * Rotate the buffers if we can, then wakeup any
1308 * pending reads.
1309 */
1310 if (d->bd_fbuf == 0) {
1311 /*
1312 * We haven't completed the previous read yet,
1313 * so drop the packet.
1314 */
1315 ++d->bd_dcount;
1316 return;
1317 }
1318 ROTATE_BUFFERS(d);
1319 bpf_wakeup(d);
1320 curlen = 0;
1321 }
1322 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1323 /*
1324 * Immediate mode is set, or the read timeout has
1325 * already expired during a select call. A packet
1326 * arrived, so the reader should be woken up.
1327 */
1328 bpf_wakeup(d);
1329
1330 /*
1331 * Append the bpf header.
1332 */
1333 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1334 microtime(&hp->bh_tstamp);
1335 hp->bh_datalen = pktlen;
1336 hp->bh_hdrlen = hdrlen;
1337 /*
1338 * Copy the packet data into the store buffer and update its length.
1339 */
1340 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1341 d->bd_slen = curlen + totlen;
1342}
1343
1344/*
1345 * Initialize all nonzero fields of a descriptor.
1346 */
1347static int
1348bpf_allocbufs(d)
1349 struct bpf_d *d;
1350{
1351 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1352 if (d->bd_fbuf == 0)
1353 return (ENOBUFS);
1354
1355 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1356 if (d->bd_sbuf == 0) {
1357 free(d->bd_fbuf, M_BPF);
1358 return (ENOBUFS);
1359 }
1360 d->bd_slen = 0;
1361 d->bd_hlen = 0;
1362 return (0);
1363}
1364
1365/*
1366 * Free buffers currently in use by a descriptor.
1367 * Called on close.
1368 */
1369static void
1370bpf_freed(d)
1371 struct bpf_d *d;
1372{
1373 /*
1374 * We don't need to lock out interrupts since this descriptor has
1375 * been detached from its interface and it yet hasn't been marked
1376 * free.
1377 */
1378 if (d->bd_sbuf != 0) {
1379 free(d->bd_sbuf, M_BPF);
1380 if (d->bd_hbuf != 0)
1381 free(d->bd_hbuf, M_BPF);
1382 if (d->bd_fbuf != 0)
1383 free(d->bd_fbuf, M_BPF);
1384 }
1385 if (d->bd_filter)
1386 free((caddr_t)d->bd_filter, M_BPF);
1387 mtx_destroy(&d->bd_mtx);
1388}
1389
1390/*
1391 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1392 * fixed size of the link header (variable length headers not yet supported).
1393 */
1394void
1395bpfattach(ifp, dlt, hdrlen)
1396 struct ifnet *ifp;
1397 u_int dlt, hdrlen;
1398{
1399
1400 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1401}
1402
1403/*
1404 * Attach an interface to bpf. ifp is a pointer to the structure
1405 * defining the interface to be attached, dlt is the link layer type,
1406 * and hdrlen is the fixed size of the link header (variable length
1407 * headers are not yet supporrted).
1408 */
1409void
1410bpfattach2(ifp, dlt, hdrlen, driverp)
1411 struct ifnet *ifp;
1412 u_int dlt, hdrlen;
1413 struct bpf_if **driverp;
1414{
1415 struct bpf_if *bp;
1416 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1417 if (bp == 0)
1418 panic("bpfattach");
1419
1420 bp->bif_dlist = 0;
1421 bp->bif_driverp = driverp;
1422 bp->bif_ifp = ifp;
1423 bp->bif_dlt = dlt;
1424 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1425
1426 mtx_lock(&bpf_mtx);
1427 bp->bif_next = bpf_iflist;
1428 bpf_iflist = bp;
1429 mtx_unlock(&bpf_mtx);
1430
1431 *bp->bif_driverp = 0;
1432
1433 /*
1434 * Compute the length of the bpf header. This is not necessarily
1435 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1436 * that the network layer header begins on a longword boundary (for
1437 * performance reasons and to alleviate alignment restrictions).
1438 */
1439 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1440
1441 if (bootverbose)
1442 if_printf(ifp, "bpf attached\n");
1443}
1444
1445/*
1446 * Detach bpf from an interface. This involves detaching each descriptor
1447 * associated with the interface, and leaving bd_bif NULL. Notify each
1448 * descriptor as it's detached so that any sleepers wake up and get
1449 * ENXIO.
1450 */
1451void
1452bpfdetach(ifp)
1453 struct ifnet *ifp;
1454{
1455 struct bpf_if *bp, *bp_prev;
1456 struct bpf_d *d;
1457
1458 /* Locate BPF interface information */
1459 bp_prev = NULL;
1460
1461 mtx_lock(&bpf_mtx);
1462 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1463 if (ifp == bp->bif_ifp)
1464 break;
1465 bp_prev = bp;
1466 }
1467
1468 /* Interface wasn't attached */
1469 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1470 mtx_unlock(&bpf_mtx);
1471 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1472 return;
1473 }
1474
1475 if (bp_prev) {
1476 bp_prev->bif_next = bp->bif_next;
1477 } else {
1478 bpf_iflist = bp->bif_next;
1479 }
1480 mtx_unlock(&bpf_mtx);
1481
1482 while ((d = bp->bif_dlist) != NULL) {
1483 bpf_detachd(d);
1484 BPFD_LOCK(d);
1485 bpf_wakeup(d);
1486 BPFD_UNLOCK(d);
1487 }
1488
1489 mtx_destroy(&bp->bif_mtx);
1490 free(bp, M_BPF);
1491}
1492
1493/*
1494 * Get a list of available data link type of the interface.
1495 */
1496static int
1497bpf_getdltlist(d, bfl)
1498 struct bpf_d *d;
1499 struct bpf_dltlist *bfl;
1500{
1501 int n, error;
1502 struct ifnet *ifp;
1503 struct bpf_if *bp;
1504
1505 ifp = d->bd_bif->bif_ifp;
1506 n = 0;
1507 error = 0;
1508 mtx_lock(&bpf_mtx);
1509 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1510 if (bp->bif_ifp != ifp)
1511 continue;
1512 if (bfl->bfl_list != NULL) {
1513 if (n >= bfl->bfl_len) {
1514 mtx_unlock(&bpf_mtx);
1515 return (ENOMEM);
1516 }
1517 error = copyout(&bp->bif_dlt,
1518 bfl->bfl_list + n, sizeof(u_int));
1519 }
1520 n++;
1521 }
1522 mtx_unlock(&bpf_mtx);
1523 bfl->bfl_len = n;
1524 return (error);
1525}
1526
1527/*
1528 * Set the data link type of a BPF instance.
1529 */
1530static int
1531bpf_setdlt(d, dlt)
1532 struct bpf_d *d;
1533 u_int dlt;
1534{
1535 int error, opromisc;
1536 struct ifnet *ifp;
1537 struct bpf_if *bp;
1538
1539 if (d->bd_bif->bif_dlt == dlt)
1540 return (0);
1541 ifp = d->bd_bif->bif_ifp;
1542 mtx_lock(&bpf_mtx);
1543 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1544 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1545 break;
1546 }
1547 mtx_unlock(&bpf_mtx);
1548 if (bp != NULL) {
1549 BPFD_LOCK(d);
1550 opromisc = d->bd_promisc;
1551 bpf_detachd(d);
1552 bpf_attachd(d, bp);
1553 reset_d(d);
1554 BPFD_UNLOCK(d);
1555 if (opromisc) {
1556 error = ifpromisc(bp->bif_ifp, 1);
1557 if (error)
1558 if_printf(bp->bif_ifp,
1559 "bpf_setdlt: ifpromisc failed (%d)\n",
1560 error);
1561 else
1562 d->bd_promisc = 1;
1563 }
1564 }
1565 return (bp == NULL ? EINVAL : 0);
1566}
1567
1568static void bpf_drvinit(void *unused);
1569
1570static void bpf_clone(void *arg, char *name, int namelen, struct cdev **dev);
1571
1572static void
1573bpf_clone(arg, name, namelen, dev)
1574 void *arg;
1575 char *name;
1576 int namelen;
1577 struct cdev **dev;
1578{
1579 int u;
1580
37 */
38
39#include "opt_bpf.h"
40#include "opt_mac.h"
41#include "opt_netgraph.h"
42
43#include <sys/types.h>
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/conf.h>
47#include <sys/mac.h>
48#include <sys/malloc.h>
49#include <sys/mbuf.h>
50#include <sys/time.h>
51#include <sys/proc.h>
52#include <sys/signalvar.h>
53#include <sys/filio.h>
54#include <sys/sockio.h>
55#include <sys/ttycom.h>
56#include <sys/filedesc.h>
57
58#include <sys/event.h>
59#include <sys/file.h>
60#include <sys/poll.h>
61#include <sys/proc.h>
62
63#include <sys/socket.h>
64#include <sys/vnode.h>
65
66#include <net/if.h>
67#include <net/bpf.h>
68#include <net/bpfdesc.h>
69
70#include <netinet/in.h>
71#include <netinet/if_ether.h>
72#include <sys/kernel.h>
73#include <sys/sysctl.h>
74
75static MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
76
77#if defined(DEV_BPF) || defined(NETGRAPH_BPF)
78
79#define PRINET 26 /* interruptible */
80
81/*
82 * The default read buffer size is patchable.
83 */
84static int bpf_bufsize = 4096;
85SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
86 &bpf_bufsize, 0, "");
87static int bpf_maxbufsize = BPF_MAXBUFSIZE;
88SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
89 &bpf_maxbufsize, 0, "");
90
91/*
92 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
93 */
94static struct bpf_if *bpf_iflist;
95static struct mtx bpf_mtx; /* bpf global lock */
96
97static int bpf_allocbufs(struct bpf_d *);
98static void bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
99static void bpf_detachd(struct bpf_d *d);
100static void bpf_freed(struct bpf_d *);
101static void bpf_mcopy(const void *, void *, size_t);
102static int bpf_movein(struct uio *, int,
103 struct mbuf **, struct sockaddr *, int *);
104static int bpf_setif(struct bpf_d *, struct ifreq *);
105static void bpf_timed_out(void *);
106static __inline void
107 bpf_wakeup(struct bpf_d *);
108static void catchpacket(struct bpf_d *, u_char *, u_int,
109 u_int, void (*)(const void *, void *, size_t));
110static void reset_d(struct bpf_d *);
111static int bpf_setf(struct bpf_d *, struct bpf_program *);
112static int bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
113static int bpf_setdlt(struct bpf_d *, u_int);
114static void filt_bpfdetach(struct knote *);
115static int filt_bpfread(struct knote *, long);
116
117static d_open_t bpfopen;
118static d_close_t bpfclose;
119static d_read_t bpfread;
120static d_write_t bpfwrite;
121static d_ioctl_t bpfioctl;
122static d_poll_t bpfpoll;
123static d_kqfilter_t bpfkqfilter;
124
125static struct cdevsw bpf_cdevsw = {
126 .d_version = D_VERSION,
127 .d_flags = D_NEEDGIANT,
128 .d_open = bpfopen,
129 .d_close = bpfclose,
130 .d_read = bpfread,
131 .d_write = bpfwrite,
132 .d_ioctl = bpfioctl,
133 .d_poll = bpfpoll,
134 .d_name = "bpf",
135 .d_kqfilter = bpfkqfilter,
136};
137
138static struct filterops bpfread_filtops =
139 { 1, NULL, filt_bpfdetach, filt_bpfread };
140
141static int
142bpf_movein(uio, linktype, mp, sockp, datlen)
143 struct uio *uio;
144 int linktype, *datlen;
145 struct mbuf **mp;
146 struct sockaddr *sockp;
147{
148 struct mbuf *m;
149 int error;
150 int len;
151 int hlen;
152
153 /*
154 * Build a sockaddr based on the data link layer type.
155 * We do this at this level because the ethernet header
156 * is copied directly into the data field of the sockaddr.
157 * In the case of SLIP, there is no header and the packet
158 * is forwarded as is.
159 * Also, we are careful to leave room at the front of the mbuf
160 * for the link level header.
161 */
162 switch (linktype) {
163
164 case DLT_SLIP:
165 sockp->sa_family = AF_INET;
166 hlen = 0;
167 break;
168
169 case DLT_EN10MB:
170 sockp->sa_family = AF_UNSPEC;
171 /* XXX Would MAXLINKHDR be better? */
172 hlen = ETHER_HDR_LEN;
173 break;
174
175 case DLT_FDDI:
176 sockp->sa_family = AF_IMPLINK;
177 hlen = 0;
178 break;
179
180 case DLT_RAW:
181 case DLT_NULL:
182 sockp->sa_family = AF_UNSPEC;
183 hlen = 0;
184 break;
185
186 case DLT_ATM_RFC1483:
187 /*
188 * en atm driver requires 4-byte atm pseudo header.
189 * though it isn't standard, vpi:vci needs to be
190 * specified anyway.
191 */
192 sockp->sa_family = AF_UNSPEC;
193 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
194 break;
195
196 case DLT_PPP:
197 sockp->sa_family = AF_UNSPEC;
198 hlen = 4; /* This should match PPP_HDRLEN */
199 break;
200
201 default:
202 return (EIO);
203 }
204
205 len = uio->uio_resid;
206 *datlen = len - hlen;
207 if ((unsigned)len > MCLBYTES)
208 return (EIO);
209
210 if (len > MHLEN) {
211 m = m_getcl(M_TRYWAIT, MT_DATA, M_PKTHDR);
212 } else {
213 MGETHDR(m, M_TRYWAIT, MT_DATA);
214 }
215 if (m == NULL)
216 return (ENOBUFS);
217 m->m_pkthdr.len = m->m_len = len;
218 m->m_pkthdr.rcvif = NULL;
219 *mp = m;
220
221 /*
222 * Make room for link header.
223 */
224 if (hlen != 0) {
225 m->m_pkthdr.len -= hlen;
226 m->m_len -= hlen;
227#if BSD >= 199103
228 m->m_data += hlen; /* XXX */
229#else
230 m->m_off += hlen;
231#endif
232 error = uiomove(sockp->sa_data, hlen, uio);
233 if (error)
234 goto bad;
235 }
236 error = uiomove(mtod(m, void *), len - hlen, uio);
237 if (!error)
238 return (0);
239bad:
240 m_freem(m);
241 return (error);
242}
243
244/*
245 * Attach file to the bpf interface, i.e. make d listen on bp.
246 */
247static void
248bpf_attachd(d, bp)
249 struct bpf_d *d;
250 struct bpf_if *bp;
251{
252 /*
253 * Point d at bp, and add d to the interface's list of listeners.
254 * Finally, point the driver's bpf cookie at the interface so
255 * it will divert packets to bpf.
256 */
257 BPFIF_LOCK(bp);
258 d->bd_bif = bp;
259 d->bd_next = bp->bif_dlist;
260 bp->bif_dlist = d;
261
262 *bp->bif_driverp = bp;
263 BPFIF_UNLOCK(bp);
264}
265
266/*
267 * Detach a file from its interface.
268 */
269static void
270bpf_detachd(d)
271 struct bpf_d *d;
272{
273 int error;
274 struct bpf_d **p;
275 struct bpf_if *bp;
276
277 /* XXX locking */
278 bp = d->bd_bif;
279 d->bd_bif = 0;
280 /*
281 * Check if this descriptor had requested promiscuous mode.
282 * If so, turn it off.
283 */
284 if (d->bd_promisc) {
285 d->bd_promisc = 0;
286 error = ifpromisc(bp->bif_ifp, 0);
287 if (error != 0 && error != ENXIO) {
288 /*
289 * ENXIO can happen if a pccard is unplugged
290 * Something is really wrong if we were able to put
291 * the driver into promiscuous mode, but can't
292 * take it out.
293 */
294 if_printf(bp->bif_ifp,
295 "bpf_detach: ifpromisc failed (%d)\n", error);
296 }
297 }
298 /* Remove d from the interface's descriptor list. */
299 BPFIF_LOCK(bp);
300 p = &bp->bif_dlist;
301 while (*p != d) {
302 p = &(*p)->bd_next;
303 if (*p == 0)
304 panic("bpf_detachd: descriptor not in list");
305 }
306 *p = (*p)->bd_next;
307 if (bp->bif_dlist == 0)
308 /*
309 * Let the driver know that there are no more listeners.
310 */
311 *bp->bif_driverp = 0;
312 BPFIF_UNLOCK(bp);
313}
314
315/*
316 * Open ethernet device. Returns ENXIO for illegal minor device number,
317 * EBUSY if file is open by another process.
318 */
319/* ARGSUSED */
320static int
321bpfopen(dev, flags, fmt, td)
322 struct cdev *dev;
323 int flags;
324 int fmt;
325 struct thread *td;
326{
327 struct bpf_d *d;
328
329 mtx_lock(&bpf_mtx);
330 d = dev->si_drv1;
331 /*
332 * Each minor can be opened by only one process. If the requested
333 * minor is in use, return EBUSY.
334 */
335 if (d) {
336 mtx_unlock(&bpf_mtx);
337 return (EBUSY);
338 }
339 dev->si_drv1 = (struct bpf_d *)~0; /* mark device in use */
340 mtx_unlock(&bpf_mtx);
341
342 if ((dev->si_flags & SI_NAMED) == 0)
343 make_dev(&bpf_cdevsw, minor(dev), UID_ROOT, GID_WHEEL, 0600,
344 "bpf%d", dev2unit(dev));
345 MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
346 dev->si_drv1 = d;
347 d->bd_bufsize = bpf_bufsize;
348 d->bd_sig = SIGIO;
349 d->bd_seesent = 1;
350#ifdef MAC
351 mac_init_bpfdesc(d);
352 mac_create_bpfdesc(td->td_ucred, d);
353#endif
354 mtx_init(&d->bd_mtx, devtoname(dev), "bpf cdev lock", MTX_DEF);
355 callout_init(&d->bd_callout, CALLOUT_MPSAFE);
356
357 return (0);
358}
359
360/*
361 * Close the descriptor by detaching it from its interface,
362 * deallocating its buffers, and marking it free.
363 */
364/* ARGSUSED */
365static int
366bpfclose(dev, flags, fmt, td)
367 struct cdev *dev;
368 int flags;
369 int fmt;
370 struct thread *td;
371{
372 struct bpf_d *d = dev->si_drv1;
373
374 BPFD_LOCK(d);
375 if (d->bd_state == BPF_WAITING)
376 callout_stop(&d->bd_callout);
377 d->bd_state = BPF_IDLE;
378 BPFD_UNLOCK(d);
379 funsetown(&d->bd_sigio);
380 mtx_lock(&bpf_mtx);
381 if (d->bd_bif)
382 bpf_detachd(d);
383 mtx_unlock(&bpf_mtx);
384#ifdef MAC
385 mac_destroy_bpfdesc(d);
386#endif /* MAC */
387 bpf_freed(d);
388 dev->si_drv1 = 0;
389 free(d, M_BPF);
390
391 return (0);
392}
393
394
395/*
396 * Rotate the packet buffers in descriptor d. Move the store buffer
397 * into the hold slot, and the free buffer into the store slot.
398 * Zero the length of the new store buffer.
399 */
400#define ROTATE_BUFFERS(d) \
401 (d)->bd_hbuf = (d)->bd_sbuf; \
402 (d)->bd_hlen = (d)->bd_slen; \
403 (d)->bd_sbuf = (d)->bd_fbuf; \
404 (d)->bd_slen = 0; \
405 (d)->bd_fbuf = 0;
406/*
407 * bpfread - read next chunk of packets from buffers
408 */
409static int
410bpfread(dev, uio, ioflag)
411 struct cdev *dev;
412 struct uio *uio;
413 int ioflag;
414{
415 struct bpf_d *d = dev->si_drv1;
416 int timed_out;
417 int error;
418
419 /*
420 * Restrict application to use a buffer the same size as
421 * as kernel buffers.
422 */
423 if (uio->uio_resid != d->bd_bufsize)
424 return (EINVAL);
425
426 BPFD_LOCK(d);
427 if (d->bd_state == BPF_WAITING)
428 callout_stop(&d->bd_callout);
429 timed_out = (d->bd_state == BPF_TIMED_OUT);
430 d->bd_state = BPF_IDLE;
431 /*
432 * If the hold buffer is empty, then do a timed sleep, which
433 * ends when the timeout expires or when enough packets
434 * have arrived to fill the store buffer.
435 */
436 while (d->bd_hbuf == 0) {
437 if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
438 /*
439 * A packet(s) either arrived since the previous
440 * read or arrived while we were asleep.
441 * Rotate the buffers and return what's here.
442 */
443 ROTATE_BUFFERS(d);
444 break;
445 }
446
447 /*
448 * No data is available, check to see if the bpf device
449 * is still pointed at a real interface. If not, return
450 * ENXIO so that the userland process knows to rebind
451 * it before using it again.
452 */
453 if (d->bd_bif == NULL) {
454 BPFD_UNLOCK(d);
455 return (ENXIO);
456 }
457
458 if (ioflag & IO_NDELAY) {
459 BPFD_UNLOCK(d);
460 return (EWOULDBLOCK);
461 }
462 error = msleep(d, &d->bd_mtx, PRINET|PCATCH,
463 "bpf", d->bd_rtout);
464 if (error == EINTR || error == ERESTART) {
465 BPFD_UNLOCK(d);
466 return (error);
467 }
468 if (error == EWOULDBLOCK) {
469 /*
470 * On a timeout, return what's in the buffer,
471 * which may be nothing. If there is something
472 * in the store buffer, we can rotate the buffers.
473 */
474 if (d->bd_hbuf)
475 /*
476 * We filled up the buffer in between
477 * getting the timeout and arriving
478 * here, so we don't need to rotate.
479 */
480 break;
481
482 if (d->bd_slen == 0) {
483 BPFD_UNLOCK(d);
484 return (0);
485 }
486 ROTATE_BUFFERS(d);
487 break;
488 }
489 }
490 /*
491 * At this point, we know we have something in the hold slot.
492 */
493 BPFD_UNLOCK(d);
494
495 /*
496 * Move data from hold buffer into user space.
497 * We know the entire buffer is transferred since
498 * we checked above that the read buffer is bpf_bufsize bytes.
499 */
500 error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
501
502 BPFD_LOCK(d);
503 d->bd_fbuf = d->bd_hbuf;
504 d->bd_hbuf = 0;
505 d->bd_hlen = 0;
506 BPFD_UNLOCK(d);
507
508 return (error);
509}
510
511
512/*
513 * If there are processes sleeping on this descriptor, wake them up.
514 */
515static __inline void
516bpf_wakeup(d)
517 struct bpf_d *d;
518{
519 if (d->bd_state == BPF_WAITING) {
520 callout_stop(&d->bd_callout);
521 d->bd_state = BPF_IDLE;
522 }
523 wakeup(d);
524 if (d->bd_async && d->bd_sig && d->bd_sigio)
525 pgsigio(&d->bd_sigio, d->bd_sig, 0);
526
527 selwakeuppri(&d->bd_sel, PRINET);
528 KNOTE(&d->bd_sel.si_note, 0);
529}
530
531static void
532bpf_timed_out(arg)
533 void *arg;
534{
535 struct bpf_d *d = (struct bpf_d *)arg;
536
537 BPFD_LOCK(d);
538 if (d->bd_state == BPF_WAITING) {
539 d->bd_state = BPF_TIMED_OUT;
540 if (d->bd_slen != 0)
541 bpf_wakeup(d);
542 }
543 BPFD_UNLOCK(d);
544}
545
546static int
547bpfwrite(dev, uio, ioflag)
548 struct cdev *dev;
549 struct uio *uio;
550 int ioflag;
551{
552 struct bpf_d *d = dev->si_drv1;
553 struct ifnet *ifp;
554 struct mbuf *m;
555 int error;
556 struct sockaddr dst;
557 int datlen;
558
559 if (d->bd_bif == 0)
560 return (ENXIO);
561
562 ifp = d->bd_bif->bif_ifp;
563
564 if (uio->uio_resid == 0)
565 return (0);
566
567 bzero(&dst, sizeof(dst));
568 error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
569 if (error)
570 return (error);
571
572 if (datlen > ifp->if_mtu)
573 return (EMSGSIZE);
574
575 if (d->bd_hdrcmplt)
576 dst.sa_family = pseudo_AF_HDRCMPLT;
577
578#ifdef MAC
579 BPFD_LOCK(d);
580 mac_create_mbuf_from_bpfdesc(d, m);
581 BPFD_UNLOCK(d);
582#endif
583 NET_LOCK_GIANT();
584 error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
585 NET_UNLOCK_GIANT();
586 /*
587 * The driver frees the mbuf.
588 */
589 return (error);
590}
591
592/*
593 * Reset a descriptor by flushing its packet buffer and clearing the
594 * receive and drop counts.
595 */
596static void
597reset_d(d)
598 struct bpf_d *d;
599{
600
601 mtx_assert(&d->bd_mtx, MA_OWNED);
602 if (d->bd_hbuf) {
603 /* Free the hold buffer. */
604 d->bd_fbuf = d->bd_hbuf;
605 d->bd_hbuf = 0;
606 }
607 d->bd_slen = 0;
608 d->bd_hlen = 0;
609 d->bd_rcount = 0;
610 d->bd_dcount = 0;
611}
612
613/*
614 * FIONREAD Check for read packet available.
615 * SIOCGIFADDR Get interface address - convenient hook to driver.
616 * BIOCGBLEN Get buffer len [for read()].
617 * BIOCSETF Set ethernet read filter.
618 * BIOCFLUSH Flush read packet buffer.
619 * BIOCPROMISC Put interface into promiscuous mode.
620 * BIOCGDLT Get link layer type.
621 * BIOCGETIF Get interface name.
622 * BIOCSETIF Set interface.
623 * BIOCSRTIMEOUT Set read timeout.
624 * BIOCGRTIMEOUT Get read timeout.
625 * BIOCGSTATS Get packet stats.
626 * BIOCIMMEDIATE Set immediate mode.
627 * BIOCVERSION Get filter language version.
628 * BIOCGHDRCMPLT Get "header already complete" flag
629 * BIOCSHDRCMPLT Set "header already complete" flag
630 * BIOCGSEESENT Get "see packets sent" flag
631 * BIOCSSEESENT Set "see packets sent" flag
632 */
633/* ARGSUSED */
634static int
635bpfioctl(dev, cmd, addr, flags, td)
636 struct cdev *dev;
637 u_long cmd;
638 caddr_t addr;
639 int flags;
640 struct thread *td;
641{
642 struct bpf_d *d = dev->si_drv1;
643 int error = 0;
644
645 BPFD_LOCK(d);
646 if (d->bd_state == BPF_WAITING)
647 callout_stop(&d->bd_callout);
648 d->bd_state = BPF_IDLE;
649 BPFD_UNLOCK(d);
650
651 switch (cmd) {
652
653 default:
654 error = EINVAL;
655 break;
656
657 /*
658 * Check for read packet available.
659 */
660 case FIONREAD:
661 {
662 int n;
663
664 BPFD_LOCK(d);
665 n = d->bd_slen;
666 if (d->bd_hbuf)
667 n += d->bd_hlen;
668 BPFD_UNLOCK(d);
669
670 *(int *)addr = n;
671 break;
672 }
673
674 case SIOCGIFADDR:
675 {
676 struct ifnet *ifp;
677
678 if (d->bd_bif == 0)
679 error = EINVAL;
680 else {
681 ifp = d->bd_bif->bif_ifp;
682 error = (*ifp->if_ioctl)(ifp, cmd, addr);
683 }
684 break;
685 }
686
687 /*
688 * Get buffer len [for read()].
689 */
690 case BIOCGBLEN:
691 *(u_int *)addr = d->bd_bufsize;
692 break;
693
694 /*
695 * Set buffer length.
696 */
697 case BIOCSBLEN:
698 if (d->bd_bif != 0)
699 error = EINVAL;
700 else {
701 u_int size = *(u_int *)addr;
702
703 if (size > bpf_maxbufsize)
704 *(u_int *)addr = size = bpf_maxbufsize;
705 else if (size < BPF_MINBUFSIZE)
706 *(u_int *)addr = size = BPF_MINBUFSIZE;
707 d->bd_bufsize = size;
708 }
709 break;
710
711 /*
712 * Set link layer read filter.
713 */
714 case BIOCSETF:
715 error = bpf_setf(d, (struct bpf_program *)addr);
716 break;
717
718 /*
719 * Flush read packet buffer.
720 */
721 case BIOCFLUSH:
722 BPFD_LOCK(d);
723 reset_d(d);
724 BPFD_UNLOCK(d);
725 break;
726
727 /*
728 * Put interface into promiscuous mode.
729 */
730 case BIOCPROMISC:
731 if (d->bd_bif == 0) {
732 /*
733 * No interface attached yet.
734 */
735 error = EINVAL;
736 break;
737 }
738 if (d->bd_promisc == 0) {
739 mtx_lock(&Giant);
740 error = ifpromisc(d->bd_bif->bif_ifp, 1);
741 mtx_unlock(&Giant);
742 if (error == 0)
743 d->bd_promisc = 1;
744 }
745 break;
746
747 /*
748 * Get current data link type.
749 */
750 case BIOCGDLT:
751 if (d->bd_bif == 0)
752 error = EINVAL;
753 else
754 *(u_int *)addr = d->bd_bif->bif_dlt;
755 break;
756
757 /*
758 * Get a list of supported data link types.
759 */
760 case BIOCGDLTLIST:
761 if (d->bd_bif == 0)
762 error = EINVAL;
763 else
764 error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
765 break;
766
767 /*
768 * Set data link type.
769 */
770 case BIOCSDLT:
771 if (d->bd_bif == 0)
772 error = EINVAL;
773 else
774 error = bpf_setdlt(d, *(u_int *)addr);
775 break;
776
777 /*
778 * Get interface name.
779 */
780 case BIOCGETIF:
781 if (d->bd_bif == 0)
782 error = EINVAL;
783 else {
784 struct ifnet *const ifp = d->bd_bif->bif_ifp;
785 struct ifreq *const ifr = (struct ifreq *)addr;
786
787 strlcpy(ifr->ifr_name, ifp->if_xname,
788 sizeof(ifr->ifr_name));
789 }
790 break;
791
792 /*
793 * Set interface.
794 */
795 case BIOCSETIF:
796 error = bpf_setif(d, (struct ifreq *)addr);
797 break;
798
799 /*
800 * Set read timeout.
801 */
802 case BIOCSRTIMEOUT:
803 {
804 struct timeval *tv = (struct timeval *)addr;
805
806 /*
807 * Subtract 1 tick from tvtohz() since this isn't
808 * a one-shot timer.
809 */
810 if ((error = itimerfix(tv)) == 0)
811 d->bd_rtout = tvtohz(tv) - 1;
812 break;
813 }
814
815 /*
816 * Get read timeout.
817 */
818 case BIOCGRTIMEOUT:
819 {
820 struct timeval *tv = (struct timeval *)addr;
821
822 tv->tv_sec = d->bd_rtout / hz;
823 tv->tv_usec = (d->bd_rtout % hz) * tick;
824 break;
825 }
826
827 /*
828 * Get packet stats.
829 */
830 case BIOCGSTATS:
831 {
832 struct bpf_stat *bs = (struct bpf_stat *)addr;
833
834 bs->bs_recv = d->bd_rcount;
835 bs->bs_drop = d->bd_dcount;
836 break;
837 }
838
839 /*
840 * Set immediate mode.
841 */
842 case BIOCIMMEDIATE:
843 d->bd_immediate = *(u_int *)addr;
844 break;
845
846 case BIOCVERSION:
847 {
848 struct bpf_version *bv = (struct bpf_version *)addr;
849
850 bv->bv_major = BPF_MAJOR_VERSION;
851 bv->bv_minor = BPF_MINOR_VERSION;
852 break;
853 }
854
855 /*
856 * Get "header already complete" flag
857 */
858 case BIOCGHDRCMPLT:
859 *(u_int *)addr = d->bd_hdrcmplt;
860 break;
861
862 /*
863 * Set "header already complete" flag
864 */
865 case BIOCSHDRCMPLT:
866 d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
867 break;
868
869 /*
870 * Get "see sent packets" flag
871 */
872 case BIOCGSEESENT:
873 *(u_int *)addr = d->bd_seesent;
874 break;
875
876 /*
877 * Set "see sent packets" flag
878 */
879 case BIOCSSEESENT:
880 d->bd_seesent = *(u_int *)addr;
881 break;
882
883 case FIONBIO: /* Non-blocking I/O */
884 break;
885
886 case FIOASYNC: /* Send signal on receive packets */
887 d->bd_async = *(int *)addr;
888 break;
889
890 case FIOSETOWN:
891 error = fsetown(*(int *)addr, &d->bd_sigio);
892 break;
893
894 case FIOGETOWN:
895 *(int *)addr = fgetown(&d->bd_sigio);
896 break;
897
898 /* This is deprecated, FIOSETOWN should be used instead. */
899 case TIOCSPGRP:
900 error = fsetown(-(*(int *)addr), &d->bd_sigio);
901 break;
902
903 /* This is deprecated, FIOGETOWN should be used instead. */
904 case TIOCGPGRP:
905 *(int *)addr = -fgetown(&d->bd_sigio);
906 break;
907
908 case BIOCSRSIG: /* Set receive signal */
909 {
910 u_int sig;
911
912 sig = *(u_int *)addr;
913
914 if (sig >= NSIG)
915 error = EINVAL;
916 else
917 d->bd_sig = sig;
918 break;
919 }
920 case BIOCGRSIG:
921 *(u_int *)addr = d->bd_sig;
922 break;
923 }
924 return (error);
925}
926
927/*
928 * Set d's packet filter program to fp. If this file already has a filter,
929 * free it and replace it. Returns EINVAL for bogus requests.
930 */
931static int
932bpf_setf(d, fp)
933 struct bpf_d *d;
934 struct bpf_program *fp;
935{
936 struct bpf_insn *fcode, *old;
937 u_int flen, size;
938
939 old = d->bd_filter;
940 if (fp->bf_insns == 0) {
941 if (fp->bf_len != 0)
942 return (EINVAL);
943 BPFD_LOCK(d);
944 d->bd_filter = 0;
945 reset_d(d);
946 BPFD_UNLOCK(d);
947 if (old != 0)
948 free((caddr_t)old, M_BPF);
949 return (0);
950 }
951 flen = fp->bf_len;
952 if (flen > BPF_MAXINSNS)
953 return (EINVAL);
954
955 size = flen * sizeof(*fp->bf_insns);
956 fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
957 if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
958 bpf_validate(fcode, (int)flen)) {
959 BPFD_LOCK(d);
960 d->bd_filter = fcode;
961 reset_d(d);
962 BPFD_UNLOCK(d);
963 if (old != 0)
964 free((caddr_t)old, M_BPF);
965
966 return (0);
967 }
968 free((caddr_t)fcode, M_BPF);
969 return (EINVAL);
970}
971
972/*
973 * Detach a file from its current interface (if attached at all) and attach
974 * to the interface indicated by the name stored in ifr.
975 * Return an errno or 0.
976 */
977static int
978bpf_setif(d, ifr)
979 struct bpf_d *d;
980 struct ifreq *ifr;
981{
982 struct bpf_if *bp;
983 int error;
984 struct ifnet *theywant;
985
986 theywant = ifunit(ifr->ifr_name);
987 if (theywant == 0)
988 return ENXIO;
989
990 /*
991 * Look through attached interfaces for the named one.
992 */
993 mtx_lock(&bpf_mtx);
994 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
995 struct ifnet *ifp = bp->bif_ifp;
996
997 if (ifp == 0 || ifp != theywant)
998 continue;
999 /* skip additional entry */
1000 if (bp->bif_driverp != (struct bpf_if **)&ifp->if_bpf)
1001 continue;
1002
1003 mtx_unlock(&bpf_mtx);
1004 /*
1005 * We found the requested interface.
1006 * If it's not up, return an error.
1007 * Allocate the packet buffers if we need to.
1008 * If we're already attached to requested interface,
1009 * just flush the buffer.
1010 */
1011 if ((ifp->if_flags & IFF_UP) == 0)
1012 return (ENETDOWN);
1013
1014 if (d->bd_sbuf == 0) {
1015 error = bpf_allocbufs(d);
1016 if (error != 0)
1017 return (error);
1018 }
1019 if (bp != d->bd_bif) {
1020 if (d->bd_bif)
1021 /*
1022 * Detach if attached to something else.
1023 */
1024 bpf_detachd(d);
1025
1026 bpf_attachd(d, bp);
1027 }
1028 BPFD_LOCK(d);
1029 reset_d(d);
1030 BPFD_UNLOCK(d);
1031 return (0);
1032 }
1033 mtx_unlock(&bpf_mtx);
1034 /* Not found. */
1035 return (ENXIO);
1036}
1037
1038/*
1039 * Support for select() and poll() system calls
1040 *
1041 * Return true iff the specific operation will not block indefinitely.
1042 * Otherwise, return false but make a note that a selwakeup() must be done.
1043 */
1044static int
1045bpfpoll(dev, events, td)
1046 struct cdev *dev;
1047 int events;
1048 struct thread *td;
1049{
1050 struct bpf_d *d;
1051 int revents;
1052
1053 d = dev->si_drv1;
1054 if (d->bd_bif == NULL)
1055 return (ENXIO);
1056
1057 revents = events & (POLLOUT | POLLWRNORM);
1058 BPFD_LOCK(d);
1059 if (events & (POLLIN | POLLRDNORM)) {
1060 if (bpf_ready(d))
1061 revents |= events & (POLLIN | POLLRDNORM);
1062 else {
1063 selrecord(td, &d->bd_sel);
1064 /* Start the read timeout if necessary. */
1065 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1066 callout_reset(&d->bd_callout, d->bd_rtout,
1067 bpf_timed_out, d);
1068 d->bd_state = BPF_WAITING;
1069 }
1070 }
1071 }
1072 BPFD_UNLOCK(d);
1073 return (revents);
1074}
1075
1076/*
1077 * Support for kevent() system call. Register EVFILT_READ filters and
1078 * reject all others.
1079 */
1080int
1081bpfkqfilter(dev, kn)
1082 struct cdev *dev;
1083 struct knote *kn;
1084{
1085 struct bpf_d *d = (struct bpf_d *)dev->si_drv1;
1086
1087 if (kn->kn_filter != EVFILT_READ)
1088 return (1);
1089
1090 kn->kn_fop = &bpfread_filtops;
1091 kn->kn_hook = d;
1092 BPFD_LOCK(d);
1093 SLIST_INSERT_HEAD(&d->bd_sel.si_note, kn, kn_selnext);
1094 BPFD_UNLOCK(d);
1095
1096 return (0);
1097}
1098
1099static void
1100filt_bpfdetach(kn)
1101 struct knote *kn;
1102{
1103 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1104
1105 BPFD_LOCK(d);
1106 SLIST_REMOVE(&d->bd_sel.si_note, kn, knote, kn_selnext);
1107 BPFD_UNLOCK(d);
1108}
1109
1110static int
1111filt_bpfread(kn, hint)
1112 struct knote *kn;
1113 long hint;
1114{
1115 struct bpf_d *d = (struct bpf_d *)kn->kn_hook;
1116 int ready;
1117
1118 BPFD_LOCK(d);
1119 ready = bpf_ready(d);
1120 if (ready) {
1121 kn->kn_data = d->bd_slen;
1122 if (d->bd_hbuf)
1123 kn->kn_data += d->bd_hlen;
1124 }
1125 else if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1126 callout_reset(&d->bd_callout, d->bd_rtout,
1127 bpf_timed_out, d);
1128 d->bd_state = BPF_WAITING;
1129 }
1130 BPFD_UNLOCK(d);
1131
1132 return (ready);
1133}
1134
1135/*
1136 * Incoming linkage from device drivers. Process the packet pkt, of length
1137 * pktlen, which is stored in a contiguous buffer. The packet is parsed
1138 * by each process' filter, and if accepted, stashed into the corresponding
1139 * buffer.
1140 */
1141void
1142bpf_tap(bp, pkt, pktlen)
1143 struct bpf_if *bp;
1144 u_char *pkt;
1145 u_int pktlen;
1146{
1147 struct bpf_d *d;
1148 u_int slen;
1149
1150 BPFIF_LOCK(bp);
1151 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1152 BPFD_LOCK(d);
1153 ++d->bd_rcount;
1154 slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1155 if (slen != 0) {
1156#ifdef MAC
1157 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1158#endif
1159 catchpacket(d, pkt, pktlen, slen, bcopy);
1160 }
1161 BPFD_UNLOCK(d);
1162 }
1163 BPFIF_UNLOCK(bp);
1164}
1165
1166/*
1167 * Copy data from an mbuf chain into a buffer. This code is derived
1168 * from m_copydata in sys/uipc_mbuf.c.
1169 */
1170static void
1171bpf_mcopy(src_arg, dst_arg, len)
1172 const void *src_arg;
1173 void *dst_arg;
1174 size_t len;
1175{
1176 const struct mbuf *m;
1177 u_int count;
1178 u_char *dst;
1179
1180 m = src_arg;
1181 dst = dst_arg;
1182 while (len > 0) {
1183 if (m == 0)
1184 panic("bpf_mcopy");
1185 count = min(m->m_len, len);
1186 bcopy(mtod(m, void *), dst, count);
1187 m = m->m_next;
1188 dst += count;
1189 len -= count;
1190 }
1191}
1192
1193/*
1194 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1195 */
1196void
1197bpf_mtap(bp, m)
1198 struct bpf_if *bp;
1199 struct mbuf *m;
1200{
1201 struct bpf_d *d;
1202 u_int pktlen, slen;
1203
1204 pktlen = m_length(m, NULL);
1205 if (pktlen == m->m_len) {
1206 bpf_tap(bp, mtod(m, u_char *), pktlen);
1207 return;
1208 }
1209
1210 BPFIF_LOCK(bp);
1211 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1212 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1213 continue;
1214 BPFD_LOCK(d);
1215 ++d->bd_rcount;
1216 slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1217 if (slen != 0)
1218#ifdef MAC
1219 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1220#endif
1221 catchpacket(d, (u_char *)m, pktlen, slen,
1222 bpf_mcopy);
1223 BPFD_UNLOCK(d);
1224 }
1225 BPFIF_UNLOCK(bp);
1226}
1227
1228/*
1229 * Incoming linkage from device drivers, when packet is in
1230 * an mbuf chain and to be prepended by a contiguous header.
1231 */
1232void
1233bpf_mtap2(bp, data, dlen, m)
1234 struct bpf_if *bp;
1235 void *data;
1236 u_int dlen;
1237 struct mbuf *m;
1238{
1239 struct mbuf mb;
1240 struct bpf_d *d;
1241 u_int pktlen, slen;
1242
1243 pktlen = m_length(m, NULL);
1244 /*
1245 * Craft on-stack mbuf suitable for passing to bpf_filter.
1246 * Note that we cut corners here; we only setup what's
1247 * absolutely needed--this mbuf should never go anywhere else.
1248 */
1249 mb.m_next = m;
1250 mb.m_data = data;
1251 mb.m_len = dlen;
1252 pktlen += dlen;
1253
1254 BPFIF_LOCK(bp);
1255 for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1256 if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1257 continue;
1258 BPFD_LOCK(d);
1259 ++d->bd_rcount;
1260 slen = bpf_filter(d->bd_filter, (u_char *)&mb, pktlen, 0);
1261 if (slen != 0)
1262#ifdef MAC
1263 if (mac_check_bpfdesc_receive(d, bp->bif_ifp) == 0)
1264#endif
1265 catchpacket(d, (u_char *)&mb, pktlen, slen,
1266 bpf_mcopy);
1267 BPFD_UNLOCK(d);
1268 }
1269 BPFIF_UNLOCK(bp);
1270}
1271
1272/*
1273 * Move the packet data from interface memory (pkt) into the
1274 * store buffer. "cpfn" is the routine called to do the actual data
1275 * transfer. bcopy is passed in to copy contiguous chunks, while
1276 * bpf_mcopy is passed in to copy mbuf chains. In the latter case,
1277 * pkt is really an mbuf.
1278 */
1279static void
1280catchpacket(d, pkt, pktlen, snaplen, cpfn)
1281 struct bpf_d *d;
1282 u_char *pkt;
1283 u_int pktlen, snaplen;
1284 void (*cpfn)(const void *, void *, size_t);
1285{
1286 struct bpf_hdr *hp;
1287 int totlen, curlen;
1288 int hdrlen = d->bd_bif->bif_hdrlen;
1289
1290 /*
1291 * Figure out how many bytes to move. If the packet is
1292 * greater or equal to the snapshot length, transfer that
1293 * much. Otherwise, transfer the whole packet (unless
1294 * we hit the buffer size limit).
1295 */
1296 totlen = hdrlen + min(snaplen, pktlen);
1297 if (totlen > d->bd_bufsize)
1298 totlen = d->bd_bufsize;
1299
1300 /*
1301 * Round up the end of the previous packet to the next longword.
1302 */
1303 curlen = BPF_WORDALIGN(d->bd_slen);
1304 if (curlen + totlen > d->bd_bufsize) {
1305 /*
1306 * This packet will overflow the storage buffer.
1307 * Rotate the buffers if we can, then wakeup any
1308 * pending reads.
1309 */
1310 if (d->bd_fbuf == 0) {
1311 /*
1312 * We haven't completed the previous read yet,
1313 * so drop the packet.
1314 */
1315 ++d->bd_dcount;
1316 return;
1317 }
1318 ROTATE_BUFFERS(d);
1319 bpf_wakeup(d);
1320 curlen = 0;
1321 }
1322 else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT)
1323 /*
1324 * Immediate mode is set, or the read timeout has
1325 * already expired during a select call. A packet
1326 * arrived, so the reader should be woken up.
1327 */
1328 bpf_wakeup(d);
1329
1330 /*
1331 * Append the bpf header.
1332 */
1333 hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1334 microtime(&hp->bh_tstamp);
1335 hp->bh_datalen = pktlen;
1336 hp->bh_hdrlen = hdrlen;
1337 /*
1338 * Copy the packet data into the store buffer and update its length.
1339 */
1340 (*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1341 d->bd_slen = curlen + totlen;
1342}
1343
1344/*
1345 * Initialize all nonzero fields of a descriptor.
1346 */
1347static int
1348bpf_allocbufs(d)
1349 struct bpf_d *d;
1350{
1351 d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1352 if (d->bd_fbuf == 0)
1353 return (ENOBUFS);
1354
1355 d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1356 if (d->bd_sbuf == 0) {
1357 free(d->bd_fbuf, M_BPF);
1358 return (ENOBUFS);
1359 }
1360 d->bd_slen = 0;
1361 d->bd_hlen = 0;
1362 return (0);
1363}
1364
1365/*
1366 * Free buffers currently in use by a descriptor.
1367 * Called on close.
1368 */
1369static void
1370bpf_freed(d)
1371 struct bpf_d *d;
1372{
1373 /*
1374 * We don't need to lock out interrupts since this descriptor has
1375 * been detached from its interface and it yet hasn't been marked
1376 * free.
1377 */
1378 if (d->bd_sbuf != 0) {
1379 free(d->bd_sbuf, M_BPF);
1380 if (d->bd_hbuf != 0)
1381 free(d->bd_hbuf, M_BPF);
1382 if (d->bd_fbuf != 0)
1383 free(d->bd_fbuf, M_BPF);
1384 }
1385 if (d->bd_filter)
1386 free((caddr_t)d->bd_filter, M_BPF);
1387 mtx_destroy(&d->bd_mtx);
1388}
1389
1390/*
1391 * Attach an interface to bpf. dlt is the link layer type; hdrlen is the
1392 * fixed size of the link header (variable length headers not yet supported).
1393 */
1394void
1395bpfattach(ifp, dlt, hdrlen)
1396 struct ifnet *ifp;
1397 u_int dlt, hdrlen;
1398{
1399
1400 bpfattach2(ifp, dlt, hdrlen, &ifp->if_bpf);
1401}
1402
1403/*
1404 * Attach an interface to bpf. ifp is a pointer to the structure
1405 * defining the interface to be attached, dlt is the link layer type,
1406 * and hdrlen is the fixed size of the link header (variable length
1407 * headers are not yet supporrted).
1408 */
1409void
1410bpfattach2(ifp, dlt, hdrlen, driverp)
1411 struct ifnet *ifp;
1412 u_int dlt, hdrlen;
1413 struct bpf_if **driverp;
1414{
1415 struct bpf_if *bp;
1416 bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_NOWAIT | M_ZERO);
1417 if (bp == 0)
1418 panic("bpfattach");
1419
1420 bp->bif_dlist = 0;
1421 bp->bif_driverp = driverp;
1422 bp->bif_ifp = ifp;
1423 bp->bif_dlt = dlt;
1424 mtx_init(&bp->bif_mtx, "bpf interface lock", NULL, MTX_DEF);
1425
1426 mtx_lock(&bpf_mtx);
1427 bp->bif_next = bpf_iflist;
1428 bpf_iflist = bp;
1429 mtx_unlock(&bpf_mtx);
1430
1431 *bp->bif_driverp = 0;
1432
1433 /*
1434 * Compute the length of the bpf header. This is not necessarily
1435 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1436 * that the network layer header begins on a longword boundary (for
1437 * performance reasons and to alleviate alignment restrictions).
1438 */
1439 bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1440
1441 if (bootverbose)
1442 if_printf(ifp, "bpf attached\n");
1443}
1444
1445/*
1446 * Detach bpf from an interface. This involves detaching each descriptor
1447 * associated with the interface, and leaving bd_bif NULL. Notify each
1448 * descriptor as it's detached so that any sleepers wake up and get
1449 * ENXIO.
1450 */
1451void
1452bpfdetach(ifp)
1453 struct ifnet *ifp;
1454{
1455 struct bpf_if *bp, *bp_prev;
1456 struct bpf_d *d;
1457
1458 /* Locate BPF interface information */
1459 bp_prev = NULL;
1460
1461 mtx_lock(&bpf_mtx);
1462 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1463 if (ifp == bp->bif_ifp)
1464 break;
1465 bp_prev = bp;
1466 }
1467
1468 /* Interface wasn't attached */
1469 if ((bp == NULL) || (bp->bif_ifp == NULL)) {
1470 mtx_unlock(&bpf_mtx);
1471 printf("bpfdetach: %s was not attached\n", ifp->if_xname);
1472 return;
1473 }
1474
1475 if (bp_prev) {
1476 bp_prev->bif_next = bp->bif_next;
1477 } else {
1478 bpf_iflist = bp->bif_next;
1479 }
1480 mtx_unlock(&bpf_mtx);
1481
1482 while ((d = bp->bif_dlist) != NULL) {
1483 bpf_detachd(d);
1484 BPFD_LOCK(d);
1485 bpf_wakeup(d);
1486 BPFD_UNLOCK(d);
1487 }
1488
1489 mtx_destroy(&bp->bif_mtx);
1490 free(bp, M_BPF);
1491}
1492
1493/*
1494 * Get a list of available data link type of the interface.
1495 */
1496static int
1497bpf_getdltlist(d, bfl)
1498 struct bpf_d *d;
1499 struct bpf_dltlist *bfl;
1500{
1501 int n, error;
1502 struct ifnet *ifp;
1503 struct bpf_if *bp;
1504
1505 ifp = d->bd_bif->bif_ifp;
1506 n = 0;
1507 error = 0;
1508 mtx_lock(&bpf_mtx);
1509 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1510 if (bp->bif_ifp != ifp)
1511 continue;
1512 if (bfl->bfl_list != NULL) {
1513 if (n >= bfl->bfl_len) {
1514 mtx_unlock(&bpf_mtx);
1515 return (ENOMEM);
1516 }
1517 error = copyout(&bp->bif_dlt,
1518 bfl->bfl_list + n, sizeof(u_int));
1519 }
1520 n++;
1521 }
1522 mtx_unlock(&bpf_mtx);
1523 bfl->bfl_len = n;
1524 return (error);
1525}
1526
1527/*
1528 * Set the data link type of a BPF instance.
1529 */
1530static int
1531bpf_setdlt(d, dlt)
1532 struct bpf_d *d;
1533 u_int dlt;
1534{
1535 int error, opromisc;
1536 struct ifnet *ifp;
1537 struct bpf_if *bp;
1538
1539 if (d->bd_bif->bif_dlt == dlt)
1540 return (0);
1541 ifp = d->bd_bif->bif_ifp;
1542 mtx_lock(&bpf_mtx);
1543 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1544 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1545 break;
1546 }
1547 mtx_unlock(&bpf_mtx);
1548 if (bp != NULL) {
1549 BPFD_LOCK(d);
1550 opromisc = d->bd_promisc;
1551 bpf_detachd(d);
1552 bpf_attachd(d, bp);
1553 reset_d(d);
1554 BPFD_UNLOCK(d);
1555 if (opromisc) {
1556 error = ifpromisc(bp->bif_ifp, 1);
1557 if (error)
1558 if_printf(bp->bif_ifp,
1559 "bpf_setdlt: ifpromisc failed (%d)\n",
1560 error);
1561 else
1562 d->bd_promisc = 1;
1563 }
1564 }
1565 return (bp == NULL ? EINVAL : 0);
1566}
1567
1568static void bpf_drvinit(void *unused);
1569
1570static void bpf_clone(void *arg, char *name, int namelen, struct cdev **dev);
1571
1572static void
1573bpf_clone(arg, name, namelen, dev)
1574 void *arg;
1575 char *name;
1576 int namelen;
1577 struct cdev **dev;
1578{
1579 int u;
1580
1581 if (*dev != NODEV)
1581 if (*dev != NULL)
1582 return;
1583 if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1584 return;
1585 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1586 "bpf%d", u);
1587 (*dev)->si_flags |= SI_CHEAPCLONE;
1588 return;
1589}
1590
1591static void
1592bpf_drvinit(unused)
1593 void *unused;
1594{
1595
1596 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1597 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1598}
1599
1600SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1601
1602#else /* !DEV_BPF && !NETGRAPH_BPF */
1603/*
1604 * NOP stubs to allow bpf-using drivers to load and function.
1605 *
1606 * A 'better' implementation would allow the core bpf functionality
1607 * to be loaded at runtime.
1608 */
1609
1610void
1611bpf_tap(bp, pkt, pktlen)
1612 struct bpf_if *bp;
1613 u_char *pkt;
1614 u_int pktlen;
1615{
1616}
1617
1618void
1619bpf_mtap(bp, m)
1620 struct bpf_if *bp;
1621 struct mbuf *m;
1622{
1623}
1624
1625void
1626bpf_mtap2(bp, d, l, m)
1627 struct bpf_if *bp;
1628 void *d;
1629 u_int l;
1630 struct mbuf *m;
1631{
1632}
1633
1634void
1635bpfattach(ifp, dlt, hdrlen)
1636 struct ifnet *ifp;
1637 u_int dlt, hdrlen;
1638{
1639}
1640
1641void
1642bpfattach2(ifp, dlt, hdrlen, driverp)
1643 struct ifnet *ifp;
1644 u_int dlt, hdrlen;
1645 struct bpf_if **driverp;
1646{
1647}
1648
1649void
1650bpfdetach(ifp)
1651 struct ifnet *ifp;
1652{
1653}
1654
1655u_int
1656bpf_filter(pc, p, wirelen, buflen)
1657 const struct bpf_insn *pc;
1658 u_char *p;
1659 u_int wirelen;
1660 u_int buflen;
1661{
1662 return -1; /* "no filter" behaviour */
1663}
1664
1665int
1666bpf_validate(f, len)
1667 const struct bpf_insn *f;
1668 int len;
1669{
1670 return 0; /* false */
1671}
1672
1673#endif /* !DEV_BPF && !NETGRAPH_BPF */
1582 return;
1583 if (dev_stdclone(name, NULL, "bpf", &u) != 1)
1584 return;
1585 *dev = make_dev(&bpf_cdevsw, unit2minor(u), UID_ROOT, GID_WHEEL, 0600,
1586 "bpf%d", u);
1587 (*dev)->si_flags |= SI_CHEAPCLONE;
1588 return;
1589}
1590
1591static void
1592bpf_drvinit(unused)
1593 void *unused;
1594{
1595
1596 mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
1597 EVENTHANDLER_REGISTER(dev_clone, bpf_clone, 0, 1000);
1598}
1599
1600SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE,bpf_drvinit,NULL)
1601
1602#else /* !DEV_BPF && !NETGRAPH_BPF */
1603/*
1604 * NOP stubs to allow bpf-using drivers to load and function.
1605 *
1606 * A 'better' implementation would allow the core bpf functionality
1607 * to be loaded at runtime.
1608 */
1609
1610void
1611bpf_tap(bp, pkt, pktlen)
1612 struct bpf_if *bp;
1613 u_char *pkt;
1614 u_int pktlen;
1615{
1616}
1617
1618void
1619bpf_mtap(bp, m)
1620 struct bpf_if *bp;
1621 struct mbuf *m;
1622{
1623}
1624
1625void
1626bpf_mtap2(bp, d, l, m)
1627 struct bpf_if *bp;
1628 void *d;
1629 u_int l;
1630 struct mbuf *m;
1631{
1632}
1633
1634void
1635bpfattach(ifp, dlt, hdrlen)
1636 struct ifnet *ifp;
1637 u_int dlt, hdrlen;
1638{
1639}
1640
1641void
1642bpfattach2(ifp, dlt, hdrlen, driverp)
1643 struct ifnet *ifp;
1644 u_int dlt, hdrlen;
1645 struct bpf_if **driverp;
1646{
1647}
1648
1649void
1650bpfdetach(ifp)
1651 struct ifnet *ifp;
1652{
1653}
1654
1655u_int
1656bpf_filter(pc, p, wirelen, buflen)
1657 const struct bpf_insn *pc;
1658 u_char *p;
1659 u_int wirelen;
1660 u_int buflen;
1661{
1662 return -1; /* "no filter" behaviour */
1663}
1664
1665int
1666bpf_validate(f, len)
1667 const struct bpf_insn *f;
1668 int len;
1669{
1670 return 0; /* false */
1671}
1672
1673#endif /* !DEV_BPF && !NETGRAPH_BPF */