if_tun.c revision 127580
1/*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2
3/*
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 *
16 * $FreeBSD: head/sys/net/if_tun.c 127580 2004-03-29 18:42:51Z rwatson $
17 */
18
19#include "opt_atalk.h"
20#include "opt_inet.h"
21#include "opt_inet6.h"
22#include "opt_ipx.h"
23#include "opt_mac.h"
24
25#include <sys/param.h>
26#include <sys/proc.h>
27#include <sys/systm.h>
28#include <sys/mac.h>
29#include <sys/mbuf.h>
30#include <sys/module.h>
31#include <sys/socket.h>
32#include <sys/filio.h>
33#include <sys/sockio.h>
34#include <sys/ttycom.h>
35#include <sys/poll.h>
36#include <sys/signalvar.h>
37#include <sys/filedesc.h>
38#include <sys/kernel.h>
39#include <sys/sysctl.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/malloc.h>
44#include <sys/random.h>
45
46#include <net/if.h>
47#include <net/if_types.h>
48#include <net/netisr.h>
49#include <net/route.h>
50#ifdef INET
51#include <netinet/in.h>
52#endif
53#include <net/bpf.h>
54#include <net/if_tun.h>
55
56#include <sys/queue.h>
57
58struct tun_softc {
59	TAILQ_ENTRY(tun_softc)	tun_list;
60	dev_t			tun_dev;
61	u_short	tun_flags;		/* misc flags */
62#define	TUN_OPEN	0x0001
63#define	TUN_INITED	0x0002
64#define	TUN_RCOLL	0x0004
65#define	TUN_IASET	0x0008
66#define	TUN_DSTADDR	0x0010
67#define	TUN_LMODE	0x0020
68#define	TUN_RWAIT	0x0040
69#define	TUN_ASYNC	0x0080
70#define	TUN_IFHEAD	0x0100
71
72#define TUN_READY       (TUN_OPEN | TUN_INITED)
73
74	/*
75	 * XXXRW: tun_pid is used to exclusively lock /dev/tun.  Is this
76	 * actually needed?  Can we just return EBUSY if already open?
77	 * Problem is that this involved inherent races when a tun device
78	 * is handed off from one process to another, as opposed to just
79	 * being slightly stale informationally.
80	 */
81	pid_t	tun_pid;		/* owning pid */
82	struct	ifnet tun_if;		/* the interface */
83	struct  sigio *tun_sigio;	/* information for async I/O */
84	struct	selinfo	tun_rsel;	/* read select */
85};
86
87#define TUNDEBUG	if (tundebug) if_printf
88#define	TUNNAME		"tun"
89
90/*
91 * All mutable global variables in if_tun are locked using tunmtx, with
92 * the exception of tundebug, which is used unlocked, and tunclones,
93 * which is static after setup.
94 */
95static struct mtx tunmtx;
96static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
97static int tundebug = 0;
98static struct clonedevs *tunclones;
99static TAILQ_HEAD(,tun_softc)	tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
100SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
101
102static void	tunclone(void *arg, char *name, int namelen, dev_t *dev);
103static void	tuncreate(dev_t dev);
104static int	tunifioctl(struct ifnet *, u_long, caddr_t);
105static int	tuninit(struct ifnet *);
106static int	tunmodevent(module_t, int, void *);
107static int	tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
108		    struct rtentry *rt);
109static void	tunstart(struct ifnet *);
110
111static d_open_t		tunopen;
112static d_close_t	tunclose;
113static d_read_t		tunread;
114static d_write_t	tunwrite;
115static d_ioctl_t	tunioctl;
116static d_poll_t		tunpoll;
117
118static struct cdevsw tun_cdevsw = {
119	.d_version =	D_VERSION,
120	.d_flags =	D_PSEUDO | D_NEEDGIANT,
121	.d_open =	tunopen,
122	.d_close =	tunclose,
123	.d_read =	tunread,
124	.d_write =	tunwrite,
125	.d_ioctl =	tunioctl,
126	.d_poll =	tunpoll,
127	.d_name =	TUNNAME,
128};
129
130static void
131tunclone(void *arg, char *name, int namelen, dev_t *dev)
132{
133	int u, i;
134
135	if (*dev != NODEV)
136		return;
137
138	if (strcmp(name, TUNNAME) == 0) {
139		u = -1;
140	} else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
141		return;	/* Don't recognise the name */
142	if (u != -1 && u > IF_MAXUNIT)
143		return;	/* Unit number too high */
144
145	/* find any existing device, or allocate new unit number */
146	i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
147	if (i) {
148		/* No preexisting dev_t, create one */
149		*dev = make_dev(&tun_cdevsw, unit2minor(u),
150		    UID_UUCP, GID_DIALER, 0600, "tun%d", u);
151		if (*dev != NULL)
152			(*dev)->si_flags |= SI_CHEAPCLONE;
153	}
154}
155
156static void
157tun_destroy(struct tun_softc *tp)
158{
159	dev_t dev;
160
161	KASSERT((tp->tun_flags & TUN_OPEN) == 0,
162	    ("tununits is out of sync - unit %d", tp->tun_if.if_dunit));
163
164	dev = tp->tun_dev;
165	bpfdetach(&tp->tun_if);
166	if_detach(&tp->tun_if);
167	destroy_dev(dev);
168	free(tp, M_TUN);
169}
170
171static int
172tunmodevent(module_t mod, int type, void *data)
173{
174	static eventhandler_tag tag;
175	struct tun_softc *tp;
176
177	switch (type) {
178	case MOD_LOAD:
179		mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
180		clone_setup(&tunclones);
181		tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
182		if (tag == NULL)
183			return (ENOMEM);
184		break;
185	case MOD_UNLOAD:
186		EVENTHANDLER_DEREGISTER(dev_clone, tag);
187
188		mtx_lock(&tunmtx);
189		while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
190			TAILQ_REMOVE(&tunhead, tp, tun_list);
191			mtx_unlock(&tunmtx);
192			tun_destroy(tp);
193			mtx_lock(&tunmtx);
194		}
195		mtx_unlock(&tunmtx);
196		clone_cleanup(&tunclones);
197		mtx_destroy(&tunmtx);
198		break;
199	}
200	return 0;
201}
202
203static moduledata_t tun_mod = {
204	"if_tun",
205	tunmodevent,
206	0
207};
208
209DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
210
211static void
212tunstart(struct ifnet *ifp)
213{
214	struct tun_softc *tp = ifp->if_softc;
215
216	if (tp->tun_flags & TUN_RWAIT) {
217		tp->tun_flags &= ~TUN_RWAIT;
218		wakeup(tp);
219	}
220	if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio)
221		pgsigio(&tp->tun_sigio, SIGIO, 0);
222	selwakeuppri(&tp->tun_rsel, PZERO + 1);
223}
224
225static void
226tuncreate(dev_t dev)
227{
228	struct tun_softc *sc;
229	struct ifnet *ifp;
230
231	dev->si_flags &= ~SI_CHEAPCLONE;
232
233	MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
234	sc->tun_flags = TUN_INITED;
235	sc->tun_dev = dev;
236	mtx_lock(&tunmtx);
237	TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
238	mtx_unlock(&tunmtx);
239
240	ifp = &sc->tun_if;
241	if_initname(ifp, TUNNAME, dev2unit(dev));
242	ifp->if_mtu = TUNMTU;
243	ifp->if_ioctl = tunifioctl;
244	ifp->if_output = tunoutput;
245	ifp->if_start = tunstart;
246	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
247	ifp->if_type = IFT_PPP;
248	ifp->if_snd.ifq_maxlen = ifqmaxlen;
249	ifp->if_softc = sc;
250	if_attach(ifp);
251	bpfattach(ifp, DLT_NULL, sizeof(u_int));
252	dev->si_drv1 = sc;
253}
254
255static int
256tunopen(dev_t dev, int flag, int mode, struct thread *td)
257{
258	struct ifnet	*ifp;
259	struct tun_softc *tp;
260
261	tp = dev->si_drv1;
262	if (!tp) {
263		tuncreate(dev);
264		tp = dev->si_drv1;
265	}
266
267	if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid)
268		return (EBUSY);
269	tp->tun_pid = td->td_proc->p_pid;
270
271	tp->tun_flags |= TUN_OPEN;
272	ifp = &tp->tun_if;
273	TUNDEBUG(ifp, "open\n");
274
275	return (0);
276}
277
278/*
279 * tunclose - close the device - mark i/f down & delete
280 * routing info
281 */
282static	int
283tunclose(dev_t dev, int foo, int bar, struct thread *td)
284{
285	struct tun_softc *tp;
286	struct ifnet *ifp;
287	int s;
288
289	tp = dev->si_drv1;
290	ifp = &tp->tun_if;
291
292	tp->tun_flags &= ~TUN_OPEN;
293	tp->tun_pid = 0;
294
295	/*
296	 * junk all pending output
297	 */
298	IF_DRAIN(&ifp->if_snd);
299
300	if (ifp->if_flags & IFF_UP) {
301		s = splimp();
302		if_down(ifp);
303		splx(s);
304	}
305
306	if (ifp->if_flags & IFF_RUNNING) {
307		struct ifaddr *ifa;
308
309		s = splimp();
310		/* find internet addresses and delete routes */
311		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
312			if (ifa->ifa_addr->sa_family == AF_INET)
313				rtinit(ifa, (int)RTM_DELETE,
314				    tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
315		ifp->if_flags &= ~IFF_RUNNING;
316		splx(s);
317	}
318
319	funsetown(&tp->tun_sigio);
320	selwakeuppri(&tp->tun_rsel, PZERO + 1);
321	TUNDEBUG (ifp, "closed\n");
322	return (0);
323}
324
325static int
326tuninit(struct ifnet *ifp)
327{
328	struct tun_softc *tp = ifp->if_softc;
329	struct ifaddr *ifa;
330	int error = 0;
331
332	TUNDEBUG(ifp, "tuninit\n");
333
334	ifp->if_flags |= IFF_UP | IFF_RUNNING;
335	getmicrotime(&ifp->if_lastchange);
336
337	for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
338	     ifa = TAILQ_NEXT(ifa, ifa_link)) {
339		if (ifa->ifa_addr == NULL)
340			error = EFAULT;
341			/* XXX: Should maybe return straight off? */
342		else {
343#ifdef INET
344			if (ifa->ifa_addr->sa_family == AF_INET) {
345			    struct sockaddr_in *si;
346
347			    si = (struct sockaddr_in *)ifa->ifa_addr;
348			    if (si->sin_addr.s_addr)
349				    tp->tun_flags |= TUN_IASET;
350
351			    si = (struct sockaddr_in *)ifa->ifa_dstaddr;
352			    if (si && si->sin_addr.s_addr)
353				    tp->tun_flags |= TUN_DSTADDR;
354			}
355#endif
356		}
357	}
358	return (error);
359}
360
361/*
362 * Process an ioctl request.
363 */
364static int
365tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
366{
367	struct ifreq *ifr = (struct ifreq *)data;
368	struct tun_softc *tp = ifp->if_softc;
369	struct ifstat *ifs;
370	int		error = 0, s;
371
372	s = splimp();
373	switch(cmd) {
374	case SIOCGIFSTATUS:
375		ifs = (struct ifstat *)data;
376		if (tp->tun_pid)
377			sprintf(ifs->ascii + strlen(ifs->ascii),
378			    "\tOpened by PID %d\n", tp->tun_pid);
379		break;
380	case SIOCSIFADDR:
381		error = tuninit(ifp);
382		TUNDEBUG(ifp, "address set, error=%d\n", error);
383		break;
384	case SIOCSIFDSTADDR:
385		error = tuninit(ifp);
386		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
387		break;
388	case SIOCSIFMTU:
389		ifp->if_mtu = ifr->ifr_mtu;
390		TUNDEBUG(ifp, "mtu set\n");
391		break;
392	case SIOCSIFFLAGS:
393	case SIOCADDMULTI:
394	case SIOCDELMULTI:
395		break;
396	default:
397		error = EINVAL;
398	}
399	splx(s);
400	return (error);
401}
402
403/*
404 * tunoutput - queue packets from higher level ready to put out.
405 */
406static int
407tunoutput(
408	struct ifnet *ifp,
409	struct mbuf *m0,
410	struct sockaddr *dst,
411	struct rtentry *rt)
412{
413	struct tun_softc *tp = ifp->if_softc;
414#ifdef MAC
415	int error;
416#endif
417
418	TUNDEBUG (ifp, "tunoutput\n");
419
420#ifdef MAC
421	error = mac_check_ifnet_transmit(ifp, m0);
422	if (error) {
423		m_freem(m0);
424		return (error);
425	}
426#endif
427
428	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
429		TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
430		m_freem (m0);
431		return (EHOSTDOWN);
432	}
433
434	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
435		m_freem (m0);
436		return (EHOSTDOWN);
437	}
438
439	/* BPF write needs to be handled specially */
440	if (dst->sa_family == AF_UNSPEC) {
441		dst->sa_family = *(mtod(m0, int *));
442		m0->m_len -= sizeof(int);
443		m0->m_pkthdr.len -= sizeof(int);
444		m0->m_data += sizeof(int);
445	}
446
447	if (ifp->if_bpf) {
448		uint32_t af = dst->sa_family;
449		bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
450	}
451
452	/* prepend sockaddr? this may abort if the mbuf allocation fails */
453	if (tp->tun_flags & TUN_LMODE) {
454		/* allocate space for sockaddr */
455		M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
456
457		/* if allocation failed drop packet */
458		if (m0 == NULL) {
459			ifp->if_iqdrops++;
460			ifp->if_oerrors++;
461			return (ENOBUFS);
462		} else {
463			bcopy(dst, m0->m_data, dst->sa_len);
464		}
465	}
466
467	if (tp->tun_flags & TUN_IFHEAD) {
468		/* Prepend the address family */
469		M_PREPEND(m0, 4, M_DONTWAIT);
470
471		/* if allocation failed drop packet */
472		if (m0 == NULL) {
473			ifp->if_iqdrops++;
474			ifp->if_oerrors++;
475			return (ENOBUFS);
476		} else
477			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
478	} else {
479#ifdef INET
480		if (dst->sa_family != AF_INET)
481#endif
482		{
483			m_freem(m0);
484			return (EAFNOSUPPORT);
485		}
486	}
487
488	if (! IF_HANDOFF(&ifp->if_snd, m0, ifp)) {
489		ifp->if_collisions++;
490		return (ENOBUFS);
491	}
492	ifp->if_opackets++;
493	return (0);
494}
495
496/*
497 * the cdevsw interface is now pretty minimal.
498 */
499static	int
500tunioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td)
501{
502	int		s;
503	int		error;
504	struct tun_softc *tp = dev->si_drv1;
505	struct tuninfo *tunp;
506
507	switch (cmd) {
508	case TUNSIFINFO:
509		tunp = (struct tuninfo *)data;
510		if (tunp->mtu < IF_MINMTU)
511			return (EINVAL);
512		if (tp->tun_if.if_mtu != tunp->mtu
513		&& (error = suser(td)) != 0)
514			return (error);
515		tp->tun_if.if_mtu = tunp->mtu;
516		tp->tun_if.if_type = tunp->type;
517		tp->tun_if.if_baudrate = tunp->baudrate;
518		break;
519	case TUNGIFINFO:
520		tunp = (struct tuninfo *)data;
521		tunp->mtu = tp->tun_if.if_mtu;
522		tunp->type = tp->tun_if.if_type;
523		tunp->baudrate = tp->tun_if.if_baudrate;
524		break;
525	case TUNSDEBUG:
526		tundebug = *(int *)data;
527		break;
528	case TUNGDEBUG:
529		*(int *)data = tundebug;
530		break;
531	case TUNSLMODE:
532		if (*(int *)data) {
533			tp->tun_flags |= TUN_LMODE;
534			tp->tun_flags &= ~TUN_IFHEAD;
535		} else
536			tp->tun_flags &= ~TUN_LMODE;
537		break;
538	case TUNSIFHEAD:
539		if (*(int *)data) {
540			tp->tun_flags |= TUN_IFHEAD;
541			tp->tun_flags &= ~TUN_LMODE;
542		} else
543			tp->tun_flags &= ~TUN_IFHEAD;
544		break;
545	case TUNGIFHEAD:
546		*(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
547		break;
548	case TUNSIFMODE:
549		/* deny this if UP */
550		if (tp->tun_if.if_flags & IFF_UP)
551			return(EBUSY);
552
553		switch (*(int *)data & ~IFF_MULTICAST) {
554		case IFF_POINTOPOINT:
555		case IFF_BROADCAST:
556			tp->tun_if.if_flags &=
557			    ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
558			tp->tun_if.if_flags |= *(int *)data;
559			break;
560		default:
561			return(EINVAL);
562		}
563		break;
564	case TUNSIFPID:
565		tp->tun_pid = curthread->td_proc->p_pid;
566		break;
567	case FIONBIO:
568		break;
569	case FIOASYNC:
570		if (*(int *)data)
571			tp->tun_flags |= TUN_ASYNC;
572		else
573			tp->tun_flags &= ~TUN_ASYNC;
574		break;
575	case FIONREAD:
576		s = splimp();
577		if (tp->tun_if.if_snd.ifq_head) {
578			struct mbuf *mb = tp->tun_if.if_snd.ifq_head;
579			for( *(int *)data = 0; mb != 0; mb = mb->m_next)
580				*(int *)data += mb->m_len;
581		} else
582			*(int *)data = 0;
583		splx(s);
584		break;
585	case FIOSETOWN:
586		return (fsetown(*(int *)data, &tp->tun_sigio));
587
588	case FIOGETOWN:
589		*(int *)data = fgetown(&tp->tun_sigio);
590		return (0);
591
592	/* This is deprecated, FIOSETOWN should be used instead. */
593	case TIOCSPGRP:
594		return (fsetown(-(*(int *)data), &tp->tun_sigio));
595
596	/* This is deprecated, FIOGETOWN should be used instead. */
597	case TIOCGPGRP:
598		*(int *)data = -fgetown(&tp->tun_sigio);
599		return (0);
600
601	default:
602		return (ENOTTY);
603	}
604	return (0);
605}
606
607/*
608 * The cdevsw read interface - reads a packet at a time, or at
609 * least as much of a packet as can be read.
610 */
611static	int
612tunread(dev_t dev, struct uio *uio, int flag)
613{
614	struct tun_softc *tp = dev->si_drv1;
615	struct ifnet	*ifp = &tp->tun_if;
616	struct mbuf	*m;
617	int		error=0, len, s;
618
619	TUNDEBUG (ifp, "read\n");
620	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
621		TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
622		return (EHOSTDOWN);
623	}
624
625	tp->tun_flags &= ~TUN_RWAIT;
626
627	s = splimp();
628	do {
629		IF_DEQUEUE(&ifp->if_snd, m);
630		if (m == NULL) {
631			if (flag & IO_NDELAY) {
632				splx(s);
633				return (EWOULDBLOCK);
634			}
635			tp->tun_flags |= TUN_RWAIT;
636			if((error = tsleep(tp, PCATCH | (PZERO + 1),
637					"tunread", 0)) != 0) {
638				splx(s);
639				return (error);
640			}
641		}
642	} while (m == NULL);
643	splx(s);
644
645	while (m && uio->uio_resid > 0 && error == 0) {
646		len = min(uio->uio_resid, m->m_len);
647		if (len != 0)
648			error = uiomove(mtod(m, void *), len, uio);
649		m = m_free(m);
650	}
651
652	if (m) {
653		TUNDEBUG(ifp, "Dropping mbuf\n");
654		m_freem(m);
655	}
656	return (error);
657}
658
659/*
660 * the cdevsw write interface - an atomic write is a packet - or else!
661 */
662static	int
663tunwrite(dev_t dev, struct uio *uio, int flag)
664{
665	struct tun_softc *tp = dev->si_drv1;
666	struct ifnet	*ifp = &tp->tun_if;
667	struct mbuf	*top, **mp, *m;
668	int		error=0, tlen, mlen;
669	uint32_t	family;
670	int 		isr;
671
672	TUNDEBUG(ifp, "tunwrite\n");
673
674	if ((ifp->if_flags & IFF_UP) != IFF_UP)
675		/* ignore silently */
676		return (0);
677
678	if (uio->uio_resid == 0)
679		return (0);
680
681	if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
682		TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
683		return (EIO);
684	}
685	tlen = uio->uio_resid;
686
687	/* get a header mbuf */
688	MGETHDR(m, M_DONTWAIT, MT_DATA);
689	if (m == NULL)
690		return (ENOBUFS);
691	mlen = MHLEN;
692
693	top = 0;
694	mp = &top;
695	while (error == 0 && uio->uio_resid > 0) {
696		m->m_len = min(mlen, uio->uio_resid);
697		error = uiomove(mtod(m, void *), m->m_len, uio);
698		*mp = m;
699		mp = &m->m_next;
700		if (uio->uio_resid > 0) {
701			MGET (m, M_DONTWAIT, MT_DATA);
702			if (m == 0) {
703				error = ENOBUFS;
704				break;
705			}
706			mlen = MLEN;
707		}
708	}
709	if (error) {
710		if (top)
711			m_freem (top);
712		ifp->if_ierrors++;
713		return (error);
714	}
715
716	top->m_pkthdr.len = tlen;
717	top->m_pkthdr.rcvif = ifp;
718#ifdef MAC
719	mac_create_mbuf_from_ifnet(ifp, top);
720#endif
721
722	if (tp->tun_flags & TUN_IFHEAD) {
723		if (top->m_len < sizeof(family) &&
724		    (top = m_pullup(top, sizeof(family))) == NULL)
725			return (ENOBUFS);
726		family = ntohl(*mtod(top, u_int32_t *));
727		m_adj(top, sizeof(family));
728	} else
729		family = AF_INET;
730
731	BPF_MTAP2(ifp, &family, sizeof(family), top);
732
733	switch (family) {
734#ifdef INET
735	case AF_INET:
736		isr = NETISR_IP;
737		break;
738#endif
739#ifdef INET6
740	case AF_INET6:
741		isr = NETISR_IPV6;
742		break;
743#endif
744#ifdef IPX
745	case AF_IPX:
746		isr = NETISR_IPX;
747		break;
748#endif
749#ifdef NETATALK
750	case AF_APPLETALK:
751		isr = NETISR_ATALK2;
752		break;
753#endif
754	default:
755		m_freem(m);
756		return (EAFNOSUPPORT);
757	}
758	/* First chunk of an mbuf contains good junk */
759	if (harvest.point_to_point)
760		random_harvest(m, 16, 3, 0, RANDOM_NET);
761	ifp->if_ibytes += top->m_pkthdr.len;
762	ifp->if_ipackets++;
763	netisr_dispatch(isr, top);
764	return (0);
765}
766
767/*
768 * tunpoll - the poll interface, this is only useful on reads
769 * really. The write detect always returns true, write never blocks
770 * anyway, it either accepts the packet or drops it.
771 */
772static	int
773tunpoll(dev_t dev, int events, struct thread *td)
774{
775	int		s;
776	struct tun_softc *tp = dev->si_drv1;
777	struct ifnet	*ifp = &tp->tun_if;
778	int		revents = 0;
779
780	s = splimp();
781	TUNDEBUG(ifp, "tunpoll\n");
782
783	if (events & (POLLIN | POLLRDNORM)) {
784		if (ifp->if_snd.ifq_len > 0) {
785			TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
786			revents |= events & (POLLIN | POLLRDNORM);
787		} else {
788			TUNDEBUG(ifp, "tunpoll waiting\n");
789			selrecord(td, &tp->tun_rsel);
790		}
791	}
792	if (events & (POLLOUT | POLLWRNORM))
793		revents |= events & (POLLOUT | POLLWRNORM);
794
795	splx(s);
796	return (revents);
797}
798