bpf.c revision 61153
1/*
2 * Copyright (c) 1990, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * This code is derived from the Stanford/CMU enet packet filter,
6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8 * Berkeley Laboratory.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *	This product includes software developed by the University of
21 *	California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 *    may be used to endorse or promote products derived from this software
24 *    without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 *      @(#)bpf.c	8.2 (Berkeley) 3/28/94
39 *
40 * $FreeBSD: head/sys/net/bpf.c 61153 2000-06-01 21:57:13Z phk $
41 */
42
43#include "bpf.h"
44
45#ifndef __GNUC__
46#define inline
47#else
48#define inline __inline
49#endif
50
51#include <sys/param.h>
52#include <sys/systm.h>
53#include <sys/conf.h>
54#include <sys/malloc.h>
55#include <sys/mbuf.h>
56#include <sys/time.h>
57#include <sys/proc.h>
58#include <sys/signalvar.h>
59#include <sys/filio.h>
60#include <sys/sockio.h>
61#include <sys/ttycom.h>
62#include <sys/filedesc.h>
63
64#if defined(sparc) && BSD < 199103
65#include <sys/stream.h>
66#endif
67#include <sys/poll.h>
68
69#include <sys/socket.h>
70#include <sys/vnode.h>
71
72#include <net/if.h>
73#include <net/bpf.h>
74#include <net/bpfdesc.h>
75
76#include <netinet/in.h>
77#include <netinet/if_ether.h>
78#include <sys/kernel.h>
79#include <sys/sysctl.h>
80
81MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
82
83#if NBPF > 0
84
85/*
86 * Older BSDs don't have kernel malloc.
87 */
88#if BSD < 199103
89extern bcopy();
90static caddr_t bpf_alloc();
91#include <net/bpf_compat.h>
92#define BPF_BUFSIZE (MCLBYTES-8)
93#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, code, uio)
94#else
95#define BPF_BUFSIZE 4096
96#define UIOMOVE(cp, len, code, uio) uiomove(cp, len, uio)
97#endif
98
99#define PRINET  26			/* interruptible */
100
101/*
102 * The default read buffer size is patchable.
103 */
104static int bpf_bufsize = BPF_BUFSIZE;
105SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
106	&bpf_bufsize, 0, "");
107static int bpf_maxbufsize = BPF_MAXBUFSIZE;
108SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
109	&bpf_maxbufsize, 0, "");
110
111/*
112 *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
113 */
114static struct bpf_if	*bpf_iflist;
115
116static int	bpf_allocbufs __P((struct bpf_d *));
117static void	bpf_attachd __P((struct bpf_d *d, struct bpf_if *bp));
118static void	bpf_detachd __P((struct bpf_d *d));
119static void	bpf_freed __P((struct bpf_d *));
120static void	bpf_mcopy __P((const void *, void *, size_t));
121static int	bpf_movein __P((struct uio *, int,
122		    struct mbuf **, struct sockaddr *, int *));
123static int	bpf_setif __P((struct bpf_d *, struct ifreq *));
124static inline void
125		bpf_wakeup __P((struct bpf_d *));
126static void	catchpacket __P((struct bpf_d *, u_char *, u_int,
127		    u_int, void (*)(const void *, void *, size_t)));
128static void	reset_d __P((struct bpf_d *));
129static int	 bpf_setf __P((struct bpf_d *, struct bpf_program *));
130
131static	d_open_t	bpfopen;
132static	d_close_t	bpfclose;
133static	d_read_t	bpfread;
134static	d_write_t	bpfwrite;
135static	d_ioctl_t	bpfioctl;
136static	d_poll_t	bpfpoll;
137
138#define CDEV_MAJOR 23
139static struct cdevsw bpf_cdevsw = {
140	/* open */	bpfopen,
141	/* close */	bpfclose,
142	/* read */	bpfread,
143	/* write */	bpfwrite,
144	/* ioctl */	bpfioctl,
145	/* poll */	bpfpoll,
146	/* mmap */	nommap,
147	/* strategy */	nostrategy,
148	/* name */	"bpf",
149	/* maj */	CDEV_MAJOR,
150	/* dump */	nodump,
151	/* psize */	nopsize,
152	/* flags */	0,
153	/* bmaj */	-1
154};
155
156
157static int
158bpf_movein(uio, linktype, mp, sockp, datlen)
159	register struct uio *uio;
160	int linktype, *datlen;
161	register struct mbuf **mp;
162	register struct sockaddr *sockp;
163{
164	struct mbuf *m;
165	int error;
166	int len;
167	int hlen;
168
169	/*
170	 * Build a sockaddr based on the data link layer type.
171	 * We do this at this level because the ethernet header
172	 * is copied directly into the data field of the sockaddr.
173	 * In the case of SLIP, there is no header and the packet
174	 * is forwarded as is.
175	 * Also, we are careful to leave room at the front of the mbuf
176	 * for the link level header.
177	 */
178	switch (linktype) {
179
180	case DLT_SLIP:
181		sockp->sa_family = AF_INET;
182		hlen = 0;
183		break;
184
185	case DLT_EN10MB:
186		sockp->sa_family = AF_UNSPEC;
187		/* XXX Would MAXLINKHDR be better? */
188		hlen = sizeof(struct ether_header);
189		break;
190
191	case DLT_FDDI:
192#if defined(__FreeBSD__) || defined(__bsdi__)
193		sockp->sa_family = AF_IMPLINK;
194		hlen = 0;
195#else
196		sockp->sa_family = AF_UNSPEC;
197		/* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
198		hlen = 24;
199#endif
200		break;
201
202	case DLT_RAW:
203	case DLT_NULL:
204		sockp->sa_family = AF_UNSPEC;
205		hlen = 0;
206		break;
207
208#ifdef __FreeBSD__
209	case DLT_ATM_RFC1483:
210		/*
211		 * en atm driver requires 4-byte atm pseudo header.
212		 * though it isn't standard, vpi:vci needs to be
213		 * specified anyway.
214		 */
215		sockp->sa_family = AF_UNSPEC;
216		hlen = 12; 	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
217		break;
218#endif
219
220	default:
221		return (EIO);
222	}
223
224	len = uio->uio_resid;
225	*datlen = len - hlen;
226	if ((unsigned)len > MCLBYTES)
227		return (EIO);
228
229	MGETHDR(m, M_WAIT, MT_DATA);
230	if (m == 0)
231		return (ENOBUFS);
232	if (len > MHLEN) {
233#if BSD >= 199103
234		MCLGET(m, M_WAIT);
235		if ((m->m_flags & M_EXT) == 0) {
236#else
237		MCLGET(m);
238		if (m->m_len != MCLBYTES) {
239#endif
240			error = ENOBUFS;
241			goto bad;
242		}
243	}
244	m->m_pkthdr.len = m->m_len = len;
245	m->m_pkthdr.rcvif = NULL;
246	*mp = m;
247	/*
248	 * Make room for link header.
249	 */
250	if (hlen != 0) {
251		m->m_pkthdr.len -= hlen;
252		m->m_len -= hlen;
253#if BSD >= 199103
254		m->m_data += hlen; /* XXX */
255#else
256		m->m_off += hlen;
257#endif
258		error = UIOMOVE((caddr_t)sockp->sa_data, hlen, UIO_WRITE, uio);
259		if (error)
260			goto bad;
261	}
262	error = UIOMOVE(mtod(m, caddr_t), len - hlen, UIO_WRITE, uio);
263	if (!error)
264		return (0);
265 bad:
266	m_freem(m);
267	return (error);
268}
269
270/*
271 * Attach file to the bpf interface, i.e. make d listen on bp.
272 * Must be called at splimp.
273 */
274static void
275bpf_attachd(d, bp)
276	struct bpf_d *d;
277	struct bpf_if *bp;
278{
279	/*
280	 * Point d at bp, and add d to the interface's list of listeners.
281	 * Finally, point the driver's bpf cookie at the interface so
282	 * it will divert packets to bpf.
283	 */
284	d->bd_bif = bp;
285	d->bd_next = bp->bif_dlist;
286	bp->bif_dlist = d;
287
288	bp->bif_ifp->if_bpf = bp;
289}
290
291/*
292 * Detach a file from its interface.
293 */
294static void
295bpf_detachd(d)
296	struct bpf_d *d;
297{
298	int error;
299	struct bpf_d **p;
300	struct bpf_if *bp;
301
302	bp = d->bd_bif;
303	/*
304	 * Check if this descriptor had requested promiscuous mode.
305	 * If so, turn it off.
306	 */
307	if (d->bd_promisc) {
308		d->bd_promisc = 0;
309		error = ifpromisc(bp->bif_ifp, 0);
310		if (error != 0 && error != ENXIO) {
311			/*
312			 * ENXIO can happen if a pccard is unplugged
313			 * Something is really wrong if we were able to put
314			 * the driver into promiscuous mode, but can't
315			 * take it out.
316			 */
317			printf("%s%d: ifpromisc failed %d\n",
318			    bp->bif_ifp->if_name, bp->bif_ifp->if_unit, error);
319		}
320	}
321	/* Remove d from the interface's descriptor list. */
322	p = &bp->bif_dlist;
323	while (*p != d) {
324		p = &(*p)->bd_next;
325		if (*p == 0)
326			panic("bpf_detachd: descriptor not in list");
327	}
328	*p = (*p)->bd_next;
329	if (bp->bif_dlist == 0)
330		/*
331		 * Let the driver know that there are no more listeners.
332		 */
333		d->bd_bif->bif_ifp->if_bpf = 0;
334	d->bd_bif = 0;
335}
336
337/*
338 * Open ethernet device.  Returns ENXIO for illegal minor device number,
339 * EBUSY if file is open by another process.
340 */
341/* ARGSUSED */
342static	int
343bpfopen(dev, flags, fmt, p)
344	dev_t dev;
345	int flags;
346	int fmt;
347	struct proc *p;
348{
349	register struct bpf_d *d;
350
351	if (p->p_prison)
352		return (EPERM);
353
354	d = dev->si_drv1;
355	/*
356	 * Each minor can be opened by only one process.  If the requested
357	 * minor is in use, return EBUSY.
358	 */
359	if (d)
360		return (EBUSY);
361	make_dev(&bpf_cdevsw, minor(dev), 0, 0, 0600, "bpf%d", lminor(dev));
362	MALLOC(d, struct bpf_d *, sizeof(*d), M_BPF, M_WAITOK);
363	bzero(d, sizeof(*d));
364	dev->si_drv1 = d;
365	d->bd_bufsize = bpf_bufsize;
366	d->bd_sig = SIGIO;
367	d->bd_seesent = 1;
368
369	return (0);
370}
371
372/*
373 * Close the descriptor by detaching it from its interface,
374 * deallocating its buffers, and marking it free.
375 */
376/* ARGSUSED */
377static	int
378bpfclose(dev, flags, fmt, p)
379	dev_t dev;
380	int flags;
381	int fmt;
382	struct proc *p;
383{
384	register struct bpf_d *d = dev->si_drv1;
385	register int s;
386
387	funsetown(d->bd_sigio);
388	s = splimp();
389	if (d->bd_bif)
390		bpf_detachd(d);
391	splx(s);
392	bpf_freed(d);
393	dev->si_drv1 = 0;
394	FREE(d, M_BPF);
395
396	return (0);
397}
398
399/*
400 * Support for SunOS, which does not have tsleep.
401 */
402#if BSD < 199103
403static
404bpf_timeout(arg)
405	caddr_t arg;
406{
407	struct bpf_d *d = (struct bpf_d *)arg;
408	d->bd_timedout = 1;
409	wakeup(arg);
410}
411
412#define BPF_SLEEP(chan, pri, s, t) bpf_sleep((struct bpf_d *)chan)
413
414int
415bpf_sleep(d)
416	register struct bpf_d *d;
417{
418	register int rto = d->bd_rtout;
419	register int st;
420
421	if (rto != 0) {
422		d->bd_timedout = 0;
423		timeout(bpf_timeout, (caddr_t)d, rto);
424	}
425	st = sleep((caddr_t)d, PRINET|PCATCH);
426	if (rto != 0) {
427		if (d->bd_timedout == 0)
428			untimeout(bpf_timeout, (caddr_t)d);
429		else if (st == 0)
430			return EWOULDBLOCK;
431	}
432	return (st != 0) ? EINTR : 0;
433}
434#else
435#define BPF_SLEEP tsleep
436#endif
437
438/*
439 * Rotate the packet buffers in descriptor d.  Move the store buffer
440 * into the hold slot, and the free buffer into the store slot.
441 * Zero the length of the new store buffer.
442 */
443#define ROTATE_BUFFERS(d) \
444	(d)->bd_hbuf = (d)->bd_sbuf; \
445	(d)->bd_hlen = (d)->bd_slen; \
446	(d)->bd_sbuf = (d)->bd_fbuf; \
447	(d)->bd_slen = 0; \
448	(d)->bd_fbuf = 0;
449/*
450 *  bpfread - read next chunk of packets from buffers
451 */
452static	int
453bpfread(dev, uio, ioflag)
454	dev_t dev;
455	register struct uio *uio;
456	int ioflag;
457{
458	register struct bpf_d *d = dev->si_drv1;
459	int error;
460	int s;
461
462	/*
463	 * Restrict application to use a buffer the same size as
464	 * as kernel buffers.
465	 */
466	if (uio->uio_resid != d->bd_bufsize)
467		return (EINVAL);
468
469	s = splimp();
470	/*
471	 * If the hold buffer is empty, then do a timed sleep, which
472	 * ends when the timeout expires or when enough packets
473	 * have arrived to fill the store buffer.
474	 */
475	while (d->bd_hbuf == 0) {
476		if (d->bd_immediate && d->bd_slen != 0) {
477			/*
478			 * A packet(s) either arrived since the previous
479			 * read or arrived while we were asleep.
480			 * Rotate the buffers and return what's here.
481			 */
482			ROTATE_BUFFERS(d);
483			break;
484		}
485
486		/*
487		 * No data is available, check to see if the bpf device
488		 * is still pointed at a real interface.  If not, return
489		 * ENXIO so that the userland process knows to rebind
490		 * it before using it again.
491		 */
492		if (d->bd_bif == NULL) {
493			splx(s);
494			return (ENXIO);
495		}
496
497		if (ioflag & IO_NDELAY)
498			error = EWOULDBLOCK;
499		else
500			error = BPF_SLEEP((caddr_t)d, PRINET|PCATCH, "bpf",
501					  d->bd_rtout);
502		if (error == EINTR || error == ERESTART) {
503			splx(s);
504			return (error);
505		}
506		if (error == EWOULDBLOCK) {
507			/*
508			 * On a timeout, return what's in the buffer,
509			 * which may be nothing.  If there is something
510			 * in the store buffer, we can rotate the buffers.
511			 */
512			if (d->bd_hbuf)
513				/*
514				 * We filled up the buffer in between
515				 * getting the timeout and arriving
516				 * here, so we don't need to rotate.
517				 */
518				break;
519
520			if (d->bd_slen == 0) {
521				splx(s);
522				return (0);
523			}
524			ROTATE_BUFFERS(d);
525			break;
526		}
527	}
528	/*
529	 * At this point, we know we have something in the hold slot.
530	 */
531	splx(s);
532
533	/*
534	 * Move data from hold buffer into user space.
535	 * We know the entire buffer is transferred since
536	 * we checked above that the read buffer is bpf_bufsize bytes.
537	 */
538	error = UIOMOVE(d->bd_hbuf, d->bd_hlen, UIO_READ, uio);
539
540	s = splimp();
541	d->bd_fbuf = d->bd_hbuf;
542	d->bd_hbuf = 0;
543	d->bd_hlen = 0;
544	splx(s);
545
546	return (error);
547}
548
549
550/*
551 * If there are processes sleeping on this descriptor, wake them up.
552 */
553static inline void
554bpf_wakeup(d)
555	register struct bpf_d *d;
556{
557	wakeup((caddr_t)d);
558	if (d->bd_async && d->bd_sig && d->bd_sigio)
559		pgsigio(d->bd_sigio, d->bd_sig, 0);
560
561#if BSD >= 199103
562	selwakeup(&d->bd_sel);
563	/* XXX */
564	d->bd_sel.si_pid = 0;
565#else
566	if (d->bd_selproc) {
567		selwakeup(d->bd_selproc, (int)d->bd_selcoll);
568		d->bd_selcoll = 0;
569		d->bd_selproc = 0;
570	}
571#endif
572}
573
574static	int
575bpfwrite(dev, uio, ioflag)
576	dev_t dev;
577	struct uio *uio;
578	int ioflag;
579{
580	register struct bpf_d *d = dev->si_drv1;
581	struct ifnet *ifp;
582	struct mbuf *m;
583	int error, s;
584	static struct sockaddr dst;
585	int datlen;
586
587	if (d->bd_bif == 0)
588		return (ENXIO);
589
590	ifp = d->bd_bif->bif_ifp;
591
592	if (uio->uio_resid == 0)
593		return (0);
594
595	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, &m, &dst, &datlen);
596	if (error)
597		return (error);
598
599	if (datlen > ifp->if_mtu)
600		return (EMSGSIZE);
601
602	if (d->bd_hdrcmplt)
603		dst.sa_family = pseudo_AF_HDRCMPLT;
604
605	s = splnet();
606#if BSD >= 199103
607	error = (*ifp->if_output)(ifp, m, &dst, (struct rtentry *)0);
608#else
609	error = (*ifp->if_output)(ifp, m, &dst);
610#endif
611	splx(s);
612	/*
613	 * The driver frees the mbuf.
614	 */
615	return (error);
616}
617
618/*
619 * Reset a descriptor by flushing its packet buffer and clearing the
620 * receive and drop counts.  Should be called at splimp.
621 */
622static void
623reset_d(d)
624	struct bpf_d *d;
625{
626	if (d->bd_hbuf) {
627		/* Free the hold buffer. */
628		d->bd_fbuf = d->bd_hbuf;
629		d->bd_hbuf = 0;
630	}
631	d->bd_slen = 0;
632	d->bd_hlen = 0;
633	d->bd_rcount = 0;
634	d->bd_dcount = 0;
635}
636
637/*
638 *  FIONREAD		Check for read packet available.
639 *  SIOCGIFADDR		Get interface address - convenient hook to driver.
640 *  BIOCGBLEN		Get buffer len [for read()].
641 *  BIOCSETF		Set ethernet read filter.
642 *  BIOCFLUSH		Flush read packet buffer.
643 *  BIOCPROMISC		Put interface into promiscuous mode.
644 *  BIOCGDLT		Get link layer type.
645 *  BIOCGETIF		Get interface name.
646 *  BIOCSETIF		Set interface.
647 *  BIOCSRTIMEOUT	Set read timeout.
648 *  BIOCGRTIMEOUT	Get read timeout.
649 *  BIOCGSTATS		Get packet stats.
650 *  BIOCIMMEDIATE	Set immediate mode.
651 *  BIOCVERSION		Get filter language version.
652 *  BIOCGHDRCMPLT	Get "header already complete" flag
653 *  BIOCSHDRCMPLT	Set "header already complete" flag
654 *  BIOCGSEESENT	Get "see packets sent" flag
655 *  BIOCSSEESENT	Set "see packets sent" flag
656 */
657/* ARGSUSED */
658static	int
659bpfioctl(dev, cmd, addr, flags, p)
660	dev_t dev;
661	u_long cmd;
662	caddr_t addr;
663	int flags;
664	struct proc *p;
665{
666	register struct bpf_d *d = dev->si_drv1;
667	int s, error = 0;
668
669	switch (cmd) {
670
671	default:
672		error = EINVAL;
673		break;
674
675	/*
676	 * Check for read packet available.
677	 */
678	case FIONREAD:
679		{
680			int n;
681
682			s = splimp();
683			n = d->bd_slen;
684			if (d->bd_hbuf)
685				n += d->bd_hlen;
686			splx(s);
687
688			*(int *)addr = n;
689			break;
690		}
691
692	case SIOCGIFADDR:
693		{
694			struct ifnet *ifp;
695
696			if (d->bd_bif == 0)
697				error = EINVAL;
698			else {
699				ifp = d->bd_bif->bif_ifp;
700				error = (*ifp->if_ioctl)(ifp, cmd, addr);
701			}
702			break;
703		}
704
705	/*
706	 * Get buffer len [for read()].
707	 */
708	case BIOCGBLEN:
709		*(u_int *)addr = d->bd_bufsize;
710		break;
711
712	/*
713	 * Set buffer length.
714	 */
715	case BIOCSBLEN:
716#if BSD < 199103
717		error = EINVAL;
718#else
719		if (d->bd_bif != 0)
720			error = EINVAL;
721		else {
722			register u_int size = *(u_int *)addr;
723
724			if (size > bpf_maxbufsize)
725				*(u_int *)addr = size = bpf_maxbufsize;
726			else if (size < BPF_MINBUFSIZE)
727				*(u_int *)addr = size = BPF_MINBUFSIZE;
728			d->bd_bufsize = size;
729		}
730#endif
731		break;
732
733	/*
734	 * Set link layer read filter.
735	 */
736	case BIOCSETF:
737		error = bpf_setf(d, (struct bpf_program *)addr);
738		break;
739
740	/*
741	 * Flush read packet buffer.
742	 */
743	case BIOCFLUSH:
744		s = splimp();
745		reset_d(d);
746		splx(s);
747		break;
748
749	/*
750	 * Put interface into promiscuous mode.
751	 */
752	case BIOCPROMISC:
753		if (d->bd_bif == 0) {
754			/*
755			 * No interface attached yet.
756			 */
757			error = EINVAL;
758			break;
759		}
760		s = splimp();
761		if (d->bd_promisc == 0) {
762			error = ifpromisc(d->bd_bif->bif_ifp, 1);
763			if (error == 0)
764				d->bd_promisc = 1;
765		}
766		splx(s);
767		break;
768
769	/*
770	 * Get device parameters.
771	 */
772	case BIOCGDLT:
773		if (d->bd_bif == 0)
774			error = EINVAL;
775		else
776			*(u_int *)addr = d->bd_bif->bif_dlt;
777		break;
778
779	/*
780	 * Get interface name.
781	 */
782	case BIOCGETIF:
783		if (d->bd_bif == 0)
784			error = EINVAL;
785		else {
786			struct ifnet *const ifp = d->bd_bif->bif_ifp;
787			struct ifreq *const ifr = (struct ifreq *)addr;
788
789			snprintf(ifr->ifr_name, sizeof(ifr->ifr_name),
790			    "%s%d", ifp->if_name, ifp->if_unit);
791		}
792		break;
793
794	/*
795	 * Set interface.
796	 */
797	case BIOCSETIF:
798		error = bpf_setif(d, (struct ifreq *)addr);
799		break;
800
801	/*
802	 * Set read timeout.
803	 */
804	case BIOCSRTIMEOUT:
805		{
806			struct timeval *tv = (struct timeval *)addr;
807
808			/*
809			 * Subtract 1 tick from tvtohz() since this isn't
810			 * a one-shot timer.
811			 */
812			if ((error = itimerfix(tv)) == 0)
813				d->bd_rtout = tvtohz(tv) - 1;
814			break;
815		}
816
817	/*
818	 * Get read timeout.
819	 */
820	case BIOCGRTIMEOUT:
821		{
822			struct timeval *tv = (struct timeval *)addr;
823
824			tv->tv_sec = d->bd_rtout / hz;
825			tv->tv_usec = (d->bd_rtout % hz) * tick;
826			break;
827		}
828
829	/*
830	 * Get packet stats.
831	 */
832	case BIOCGSTATS:
833		{
834			struct bpf_stat *bs = (struct bpf_stat *)addr;
835
836			bs->bs_recv = d->bd_rcount;
837			bs->bs_drop = d->bd_dcount;
838			break;
839		}
840
841	/*
842	 * Set immediate mode.
843	 */
844	case BIOCIMMEDIATE:
845		d->bd_immediate = *(u_int *)addr;
846		break;
847
848	case BIOCVERSION:
849		{
850			struct bpf_version *bv = (struct bpf_version *)addr;
851
852			bv->bv_major = BPF_MAJOR_VERSION;
853			bv->bv_minor = BPF_MINOR_VERSION;
854			break;
855		}
856
857	/*
858	 * Get "header already complete" flag
859	 */
860	case BIOCGHDRCMPLT:
861		*(u_int *)addr = d->bd_hdrcmplt;
862		break;
863
864	/*
865	 * Set "header already complete" flag
866	 */
867	case BIOCSHDRCMPLT:
868		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
869		break;
870
871	/*
872	 * Get "see sent packets" flag
873	 */
874	case BIOCGSEESENT:
875		*(u_int *)addr = d->bd_seesent;
876		break;
877
878	/*
879	 * Set "see sent packets" flag
880	 */
881	case BIOCSSEESENT:
882		d->bd_seesent = *(u_int *)addr;
883		break;
884
885	case FIONBIO:		/* Non-blocking I/O */
886		break;
887
888	case FIOASYNC:		/* Send signal on receive packets */
889		d->bd_async = *(int *)addr;
890		break;
891
892	case FIOSETOWN:
893		error = fsetown(*(int *)addr, &d->bd_sigio);
894		break;
895
896	case FIOGETOWN:
897		*(int *)addr = fgetown(d->bd_sigio);
898		break;
899
900	/* This is deprecated, FIOSETOWN should be used instead. */
901	case TIOCSPGRP:
902		error = fsetown(-(*(int *)addr), &d->bd_sigio);
903		break;
904
905	/* This is deprecated, FIOGETOWN should be used instead. */
906	case TIOCGPGRP:
907		*(int *)addr = -fgetown(d->bd_sigio);
908		break;
909
910	case BIOCSRSIG:		/* Set receive signal */
911		{
912		 	u_int sig;
913
914			sig = *(u_int *)addr;
915
916			if (sig >= NSIG)
917				error = EINVAL;
918			else
919				d->bd_sig = sig;
920			break;
921		}
922	case BIOCGRSIG:
923		*(u_int *)addr = d->bd_sig;
924		break;
925	}
926	return (error);
927}
928
929/*
930 * Set d's packet filter program to fp.  If this file already has a filter,
931 * free it and replace it.  Returns EINVAL for bogus requests.
932 */
933static int
934bpf_setf(d, fp)
935	struct bpf_d *d;
936	struct bpf_program *fp;
937{
938	struct bpf_insn *fcode, *old;
939	u_int flen, size;
940	int s;
941
942	old = d->bd_filter;
943	if (fp->bf_insns == 0) {
944		if (fp->bf_len != 0)
945			return (EINVAL);
946		s = splimp();
947		d->bd_filter = 0;
948		reset_d(d);
949		splx(s);
950		if (old != 0)
951			free((caddr_t)old, M_BPF);
952		return (0);
953	}
954	flen = fp->bf_len;
955	if (flen > BPF_MAXINSNS)
956		return (EINVAL);
957
958	size = flen * sizeof(*fp->bf_insns);
959	fcode = (struct bpf_insn *)malloc(size, M_BPF, M_WAITOK);
960	if (copyin((caddr_t)fp->bf_insns, (caddr_t)fcode, size) == 0 &&
961	    bpf_validate(fcode, (int)flen)) {
962		s = splimp();
963		d->bd_filter = fcode;
964		reset_d(d);
965		splx(s);
966		if (old != 0)
967			free((caddr_t)old, M_BPF);
968
969		return (0);
970	}
971	free((caddr_t)fcode, M_BPF);
972	return (EINVAL);
973}
974
975/*
976 * Detach a file from its current interface (if attached at all) and attach
977 * to the interface indicated by the name stored in ifr.
978 * Return an errno or 0.
979 */
980static int
981bpf_setif(d, ifr)
982	struct bpf_d *d;
983	struct ifreq *ifr;
984{
985	struct bpf_if *bp;
986	int s, error;
987	struct ifnet *theywant;
988
989	theywant = ifunit(ifr->ifr_name);
990	if (theywant == 0)
991		return ENXIO;
992
993	/*
994	 * Look through attached interfaces for the named one.
995	 */
996	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
997		struct ifnet *ifp = bp->bif_ifp;
998
999		if (ifp == 0 || ifp != theywant)
1000			continue;
1001		/*
1002		 * We found the requested interface.
1003		 * If it's not up, return an error.
1004		 * Allocate the packet buffers if we need to.
1005		 * If we're already attached to requested interface,
1006		 * just flush the buffer.
1007		 */
1008		if ((ifp->if_flags & IFF_UP) == 0)
1009			return (ENETDOWN);
1010
1011		if (d->bd_sbuf == 0) {
1012			error = bpf_allocbufs(d);
1013			if (error != 0)
1014				return (error);
1015		}
1016		s = splimp();
1017		if (bp != d->bd_bif) {
1018			if (d->bd_bif)
1019				/*
1020				 * Detach if attached to something else.
1021				 */
1022				bpf_detachd(d);
1023
1024			bpf_attachd(d, bp);
1025		}
1026		reset_d(d);
1027		splx(s);
1028		return (0);
1029	}
1030	/* Not found. */
1031	return (ENXIO);
1032}
1033
1034/*
1035 * Support for select() and poll() system calls
1036 *
1037 * Return true iff the specific operation will not block indefinitely.
1038 * Otherwise, return false but make a note that a selwakeup() must be done.
1039 */
1040int
1041bpfpoll(dev, events, p)
1042	register dev_t dev;
1043	int events;
1044	struct proc *p;
1045{
1046	register struct bpf_d *d;
1047	register int s;
1048	int revents = 0;
1049
1050	/*
1051	 * An imitation of the FIONREAD ioctl code.
1052	 */
1053	d = dev->si_drv1;
1054
1055	if (d->bd_bif == NULL)
1056		return (ENXIO);
1057
1058	s = splimp();
1059	if (events & (POLLIN | POLLRDNORM)) {
1060		if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1061			revents |= events & (POLLIN | POLLRDNORM);
1062		else
1063			selrecord(p, &d->bd_sel);
1064	}
1065	splx(s);
1066	return (revents);
1067}
1068
1069/*
1070 * Incoming linkage from device drivers.  Process the packet pkt, of length
1071 * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1072 * by each process' filter, and if accepted, stashed into the corresponding
1073 * buffer.
1074 */
1075void
1076bpf_tap(ifp, pkt, pktlen)
1077	struct ifnet *ifp;
1078	register u_char *pkt;
1079	register u_int pktlen;
1080{
1081	struct bpf_if *bp;
1082	register struct bpf_d *d;
1083	register u_int slen;
1084	/*
1085	 * Note that the ipl does not have to be raised at this point.
1086	 * The only problem that could arise here is that if two different
1087	 * interfaces shared any data.  This is not the case.
1088	 */
1089	bp = ifp->if_bpf;
1090	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1091		++d->bd_rcount;
1092		slen = bpf_filter(d->bd_filter, pkt, pktlen, pktlen);
1093		if (slen != 0)
1094			catchpacket(d, pkt, pktlen, slen, bcopy);
1095	}
1096}
1097
1098/*
1099 * Copy data from an mbuf chain into a buffer.  This code is derived
1100 * from m_copydata in sys/uipc_mbuf.c.
1101 */
1102static void
1103bpf_mcopy(src_arg, dst_arg, len)
1104	const void *src_arg;
1105	void *dst_arg;
1106	register size_t len;
1107{
1108	register const struct mbuf *m;
1109	register u_int count;
1110	u_char *dst;
1111
1112	m = src_arg;
1113	dst = dst_arg;
1114	while (len > 0) {
1115		if (m == 0)
1116			panic("bpf_mcopy");
1117		count = min(m->m_len, len);
1118		bcopy(mtod(m, void *), dst, count);
1119		m = m->m_next;
1120		dst += count;
1121		len -= count;
1122	}
1123}
1124
1125/*
1126 * Incoming linkage from device drivers, when packet is in an mbuf chain.
1127 */
1128void
1129bpf_mtap(ifp, m)
1130	struct ifnet *ifp;
1131	struct mbuf *m;
1132{
1133	struct bpf_if *bp = ifp->if_bpf;
1134	struct bpf_d *d;
1135	u_int pktlen, slen;
1136	struct mbuf *m0;
1137
1138	pktlen = 0;
1139	for (m0 = m; m0 != 0; m0 = m0->m_next)
1140		pktlen += m0->m_len;
1141
1142	for (d = bp->bif_dlist; d != 0; d = d->bd_next) {
1143		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1144			continue;
1145		++d->bd_rcount;
1146		slen = bpf_filter(d->bd_filter, (u_char *)m, pktlen, 0);
1147		if (slen != 0)
1148			catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy);
1149	}
1150}
1151
1152/*
1153 * Move the packet data from interface memory (pkt) into the
1154 * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
1155 * otherwise 0.  "copy" is the routine called to do the actual data
1156 * transfer.  bcopy is passed in to copy contiguous chunks, while
1157 * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1158 * pkt is really an mbuf.
1159 */
1160static void
1161catchpacket(d, pkt, pktlen, snaplen, cpfn)
1162	register struct bpf_d *d;
1163	register u_char *pkt;
1164	register u_int pktlen, snaplen;
1165	register void (*cpfn) __P((const void *, void *, size_t));
1166{
1167	register struct bpf_hdr *hp;
1168	register int totlen, curlen;
1169	register int hdrlen = d->bd_bif->bif_hdrlen;
1170	/*
1171	 * Figure out how many bytes to move.  If the packet is
1172	 * greater or equal to the snapshot length, transfer that
1173	 * much.  Otherwise, transfer the whole packet (unless
1174	 * we hit the buffer size limit).
1175	 */
1176	totlen = hdrlen + min(snaplen, pktlen);
1177	if (totlen > d->bd_bufsize)
1178		totlen = d->bd_bufsize;
1179
1180	/*
1181	 * Round up the end of the previous packet to the next longword.
1182	 */
1183	curlen = BPF_WORDALIGN(d->bd_slen);
1184	if (curlen + totlen > d->bd_bufsize) {
1185		/*
1186		 * This packet will overflow the storage buffer.
1187		 * Rotate the buffers if we can, then wakeup any
1188		 * pending reads.
1189		 */
1190		if (d->bd_fbuf == 0) {
1191			/*
1192			 * We haven't completed the previous read yet,
1193			 * so drop the packet.
1194			 */
1195			++d->bd_dcount;
1196			return;
1197		}
1198		ROTATE_BUFFERS(d);
1199		bpf_wakeup(d);
1200		curlen = 0;
1201	}
1202	else if (d->bd_immediate)
1203		/*
1204		 * Immediate mode is set.  A packet arrived so any
1205		 * reads should be woken up.
1206		 */
1207		bpf_wakeup(d);
1208
1209	/*
1210	 * Append the bpf header.
1211	 */
1212	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1213#if BSD >= 199103
1214	microtime(&hp->bh_tstamp);
1215#elif defined(sun)
1216	uniqtime(&hp->bh_tstamp);
1217#else
1218	hp->bh_tstamp = time;
1219#endif
1220	hp->bh_datalen = pktlen;
1221	hp->bh_hdrlen = hdrlen;
1222	/*
1223	 * Copy the packet data into the store buffer and update its length.
1224	 */
1225	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1226	d->bd_slen = curlen + totlen;
1227}
1228
1229/*
1230 * Initialize all nonzero fields of a descriptor.
1231 */
1232static int
1233bpf_allocbufs(d)
1234	register struct bpf_d *d;
1235{
1236	d->bd_fbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1237	if (d->bd_fbuf == 0)
1238		return (ENOBUFS);
1239
1240	d->bd_sbuf = (caddr_t)malloc(d->bd_bufsize, M_BPF, M_WAITOK);
1241	if (d->bd_sbuf == 0) {
1242		free(d->bd_fbuf, M_BPF);
1243		return (ENOBUFS);
1244	}
1245	d->bd_slen = 0;
1246	d->bd_hlen = 0;
1247	return (0);
1248}
1249
1250/*
1251 * Free buffers currently in use by a descriptor.
1252 * Called on close.
1253 */
1254static void
1255bpf_freed(d)
1256	register struct bpf_d *d;
1257{
1258	/*
1259	 * We don't need to lock out interrupts since this descriptor has
1260	 * been detached from its interface and it yet hasn't been marked
1261	 * free.
1262	 */
1263	if (d->bd_sbuf != 0) {
1264		free(d->bd_sbuf, M_BPF);
1265		if (d->bd_hbuf != 0)
1266			free(d->bd_hbuf, M_BPF);
1267		if (d->bd_fbuf != 0)
1268			free(d->bd_fbuf, M_BPF);
1269	}
1270	if (d->bd_filter)
1271		free((caddr_t)d->bd_filter, M_BPF);
1272}
1273
1274/*
1275 * Attach an interface to bpf.  driverp is a pointer to a (struct bpf_if *)
1276 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
1277 * size of the link header (variable length headers not yet supported).
1278 */
1279void
1280bpfattach(ifp, dlt, hdrlen)
1281	struct ifnet *ifp;
1282	u_int dlt, hdrlen;
1283{
1284	struct bpf_if *bp;
1285	bp = (struct bpf_if *)malloc(sizeof(*bp), M_BPF, M_DONTWAIT);
1286	if (bp == 0)
1287		panic("bpfattach");
1288
1289	bp->bif_dlist = 0;
1290	bp->bif_ifp = ifp;
1291	bp->bif_dlt = dlt;
1292
1293	bp->bif_next = bpf_iflist;
1294	bpf_iflist = bp;
1295
1296	bp->bif_ifp->if_bpf = 0;
1297
1298	/*
1299	 * Compute the length of the bpf header.  This is not necessarily
1300	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1301	 * that the network layer header begins on a longword boundary (for
1302	 * performance reasons and to alleviate alignment restrictions).
1303	 */
1304	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1305
1306	if (bootverbose)
1307		printf("bpf: %s%d attached\n", ifp->if_name, ifp->if_unit);
1308}
1309
1310/*
1311 * Detach bpf from an interface.  This involves detaching each descriptor
1312 * associated with the interface, and leaving bd_bif NULL.  Notify each
1313 * descriptor as it's detached so that any sleepers wake up and get
1314 * ENXIO.
1315 */
1316void
1317bpfdetach(ifp)
1318	struct ifnet *ifp;
1319{
1320	struct bpf_if	*bp, *bp_prev;
1321	struct bpf_d	*d;
1322	int	s;
1323
1324	s = splimp();
1325
1326	/* Locate BPF interface information */
1327	bp_prev = NULL;
1328	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1329		if (ifp == bp->bif_ifp)
1330			break;
1331		bp_prev = bp;
1332	}
1333
1334	/* Interface wasn't attached */
1335	if (bp->bif_ifp == NULL) {
1336		splx(s);
1337		printf("bpfdetach: %s%d was not attached\n", ifp->if_name,
1338		    ifp->if_unit);
1339		return;
1340	}
1341
1342	while ((d = bp->bif_dlist) != NULL) {
1343		bpf_detachd(d);
1344		bpf_wakeup(d);
1345	}
1346
1347	if (bp_prev) {
1348		bp_prev->bif_next = bp->bif_next;
1349	} else {
1350		bpf_iflist = bp->bif_next;
1351	}
1352
1353	free(bp, M_BPF);
1354
1355	splx(s);
1356}
1357
1358static void bpf_drvinit __P((void *unused));
1359
1360static void
1361bpf_drvinit(unused)
1362	void *unused;
1363{
1364
1365	cdevsw_add(&bpf_cdevsw);
1366}
1367
1368SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1369
1370#else /* !BPF */
1371/*
1372 * NOP stubs to allow bpf-using drivers to load and function.
1373 *
1374 * A 'better' implementation would allow the core bpf functionality
1375 * to be loaded at runtime.
1376 */
1377
1378void
1379bpf_tap(ifp, pkt, pktlen)
1380	struct ifnet *ifp;
1381	register u_char *pkt;
1382	register u_int pktlen;
1383{
1384}
1385
1386void
1387bpf_mtap(ifp, m)
1388	struct ifnet *ifp;
1389	struct mbuf *m;
1390{
1391}
1392
1393void
1394bpfattach(ifp, dlt, hdrlen)
1395	struct ifnet *ifp;
1396	u_int dlt, hdrlen;
1397{
1398}
1399
1400void
1401bpfdetach(ifp)
1402	struct ifnet *ifp;
1403{
1404}
1405
1406u_int
1407bpf_filter(pc, p, wirelen, buflen)
1408	register const struct bpf_insn *pc;
1409	register u_char *p;
1410	u_int wirelen;
1411	register u_int buflen;
1412{
1413	return -1;	/* "no filter" behaviour */
1414}
1415
1416#endif /* !BPF */
1417