if_tun.c revision 132199
1/*	$NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $	*/
2
3/*
4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
5 * Nottingham University 1987.
6 *
7 * This source may be freely distributed, however I would be interested
8 * in any changes that are made.
9 *
10 * This driver takes packets off the IP i/f and hands them up to a
11 * user process to have its wicked way with. This driver has it's
12 * roots in a similar driver written by Phil Cockcroft (formerly) at
13 * UCL. This driver is based much more on read/write/poll mode of
14 * operation though.
15 *
16 * $FreeBSD: head/sys/net/if_tun.c 132199 2004-07-15 08:26:07Z phk $
17 */
18
19#include "opt_atalk.h"
20#include "opt_inet.h"
21#include "opt_inet6.h"
22#include "opt_ipx.h"
23#include "opt_mac.h"
24
25#include <sys/param.h>
26#include <sys/proc.h>
27#include <sys/systm.h>
28#include <sys/mac.h>
29#include <sys/mbuf.h>
30#include <sys/module.h>
31#include <sys/socket.h>
32#include <sys/filio.h>
33#include <sys/sockio.h>
34#include <sys/ttycom.h>
35#include <sys/poll.h>
36#include <sys/signalvar.h>
37#include <sys/filedesc.h>
38#include <sys/kernel.h>
39#include <sys/sysctl.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/vnode.h>
43#include <sys/malloc.h>
44#include <sys/random.h>
45
46#include <net/if.h>
47#include <net/if_types.h>
48#include <net/netisr.h>
49#include <net/route.h>
50#ifdef INET
51#include <netinet/in.h>
52#endif
53#include <net/bpf.h>
54#include <net/if_tun.h>
55
56#include <sys/queue.h>
57
58/*
59 * tun_list is protected by global tunmtx.  Other mutable fields are
60 * protected by tun->tun_mtx, or by their owning subsystem.  tun_dev is
61 * static for the duration of a tunnel interface.
62 */
63struct tun_softc {
64	TAILQ_ENTRY(tun_softc)	tun_list;
65	struct cdev *tun_dev;
66	u_short	tun_flags;		/* misc flags */
67#define	TUN_OPEN	0x0001
68#define	TUN_INITED	0x0002
69#define	TUN_RCOLL	0x0004
70#define	TUN_IASET	0x0008
71#define	TUN_DSTADDR	0x0010
72#define	TUN_LMODE	0x0020
73#define	TUN_RWAIT	0x0040
74#define	TUN_ASYNC	0x0080
75#define	TUN_IFHEAD	0x0100
76
77#define TUN_READY       (TUN_OPEN | TUN_INITED)
78
79	/*
80	 * XXXRW: tun_pid is used to exclusively lock /dev/tun.  Is this
81	 * actually needed?  Can we just return EBUSY if already open?
82	 * Problem is that this involved inherent races when a tun device
83	 * is handed off from one process to another, as opposed to just
84	 * being slightly stale informationally.
85	 */
86	pid_t	tun_pid;		/* owning pid */
87	struct	ifnet tun_if;		/* the interface */
88	struct  sigio *tun_sigio;	/* information for async I/O */
89	struct	selinfo	tun_rsel;	/* read select */
90	struct mtx	tun_mtx;	/* protect mutable softc fields */
91};
92
93#define TUNDEBUG	if (tundebug) if_printf
94#define	TUNNAME		"tun"
95
96/*
97 * All mutable global variables in if_tun are locked using tunmtx, with
98 * the exception of tundebug, which is used unlocked, and tunclones,
99 * which is static after setup.
100 */
101static struct mtx tunmtx;
102static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface");
103static int tundebug = 0;
104static struct clonedevs *tunclones;
105static TAILQ_HEAD(,tun_softc)	tunhead = TAILQ_HEAD_INITIALIZER(tunhead);
106SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, "");
107
108static void	tunclone(void *arg, char *name, int namelen, struct cdev **dev);
109static void	tuncreate(struct cdev *dev);
110static int	tunifioctl(struct ifnet *, u_long, caddr_t);
111static int	tuninit(struct ifnet *);
112static int	tunmodevent(module_t, int, void *);
113static int	tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *,
114		    struct rtentry *rt);
115static void	tunstart(struct ifnet *);
116
117static d_open_t		tunopen;
118static d_close_t	tunclose;
119static d_read_t		tunread;
120static d_write_t	tunwrite;
121static d_ioctl_t	tunioctl;
122static d_poll_t		tunpoll;
123
124static struct cdevsw tun_cdevsw = {
125	.d_version =	D_VERSION,
126	.d_flags =	D_PSEUDO | D_NEEDGIANT,
127	.d_open =	tunopen,
128	.d_close =	tunclose,
129	.d_read =	tunread,
130	.d_write =	tunwrite,
131	.d_ioctl =	tunioctl,
132	.d_poll =	tunpoll,
133	.d_name =	TUNNAME,
134};
135
136static void
137tunclone(void *arg, char *name, int namelen, struct cdev **dev)
138{
139	int u, i;
140
141	if (*dev != NULL)
142		return;
143
144	if (strcmp(name, TUNNAME) == 0) {
145		u = -1;
146	} else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1)
147		return;	/* Don't recognise the name */
148	if (u != -1 && u > IF_MAXUNIT)
149		return;	/* Unit number too high */
150
151	/* find any existing device, or allocate new unit number */
152	i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0);
153	if (i) {
154		/* No preexisting struct cdev *, create one */
155		*dev = make_dev(&tun_cdevsw, unit2minor(u),
156		    UID_UUCP, GID_DIALER, 0600, "tun%d", u);
157		if (*dev != NULL)
158			(*dev)->si_flags |= SI_CHEAPCLONE;
159	}
160}
161
162static void
163tun_destroy(struct tun_softc *tp)
164{
165	struct cdev *dev;
166
167	/* Unlocked read. */
168	KASSERT((tp->tun_flags & TUN_OPEN) == 0,
169	    ("tununits is out of sync - unit %d", tp->tun_if.if_dunit));
170
171	dev = tp->tun_dev;
172	bpfdetach(&tp->tun_if);
173	if_detach(&tp->tun_if);
174	destroy_dev(dev);
175	mtx_destroy(&tp->tun_mtx);
176	free(tp, M_TUN);
177}
178
179static int
180tunmodevent(module_t mod, int type, void *data)
181{
182	static eventhandler_tag tag;
183	struct tun_softc *tp;
184
185	switch (type) {
186	case MOD_LOAD:
187		mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF);
188		clone_setup(&tunclones);
189		tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000);
190		if (tag == NULL)
191			return (ENOMEM);
192		break;
193	case MOD_UNLOAD:
194		EVENTHANDLER_DEREGISTER(dev_clone, tag);
195
196		mtx_lock(&tunmtx);
197		while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
198			TAILQ_REMOVE(&tunhead, tp, tun_list);
199			mtx_unlock(&tunmtx);
200			tun_destroy(tp);
201			mtx_lock(&tunmtx);
202		}
203		mtx_unlock(&tunmtx);
204		clone_cleanup(&tunclones);
205		mtx_destroy(&tunmtx);
206		break;
207	default:
208		return EOPNOTSUPP;
209	}
210	return 0;
211}
212
213static moduledata_t tun_mod = {
214	"if_tun",
215	tunmodevent,
216	0
217};
218
219DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
220
221static void
222tunstart(struct ifnet *ifp)
223{
224	struct tun_softc *tp = ifp->if_softc;
225	struct mbuf *m;
226
227	if (ALTQ_IS_ENABLED(&ifp->if_snd)) {
228		IFQ_LOCK(&ifp->if_snd);
229		IFQ_POLL_NOLOCK(&ifp->if_snd, m);
230		if (m == NULL) {
231			IFQ_UNLOCK(&ifp->if_snd);
232			return;
233		}
234		IFQ_UNLOCK(&ifp->if_snd);
235	}
236
237	mtx_lock(&tp->tun_mtx);
238	if (tp->tun_flags & TUN_RWAIT) {
239		tp->tun_flags &= ~TUN_RWAIT;
240		wakeup(tp);
241	}
242	if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) {
243		mtx_unlock(&tp->tun_mtx);
244		pgsigio(&tp->tun_sigio, SIGIO, 0);
245	} else
246		mtx_unlock(&tp->tun_mtx);
247	selwakeuppri(&tp->tun_rsel, PZERO + 1);
248}
249
250static void
251tuncreate(struct cdev *dev)
252{
253	struct tun_softc *sc;
254	struct ifnet *ifp;
255
256	dev->si_flags &= ~SI_CHEAPCLONE;
257
258	MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO);
259	mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF);
260	sc->tun_flags = TUN_INITED;
261	sc->tun_dev = dev;
262	mtx_lock(&tunmtx);
263	TAILQ_INSERT_TAIL(&tunhead, sc, tun_list);
264	mtx_unlock(&tunmtx);
265
266	ifp = &sc->tun_if;
267	if_initname(ifp, TUNNAME, dev2unit(dev));
268	ifp->if_mtu = TUNMTU;
269	ifp->if_ioctl = tunifioctl;
270	ifp->if_output = tunoutput;
271	ifp->if_start = tunstart;
272	ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST;
273	ifp->if_type = IFT_PPP;
274	ifp->if_softc = sc;
275	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
276	ifp->if_snd.ifq_drv_maxlen = 0;
277	IFQ_SET_READY(&ifp->if_snd);
278
279	if_attach(ifp);
280	bpfattach(ifp, DLT_NULL, sizeof(u_int));
281	dev->si_drv1 = sc;
282}
283
284static int
285tunopen(struct cdev *dev, int flag, int mode, struct thread *td)
286{
287	struct ifnet	*ifp;
288	struct tun_softc *tp;
289
290	/*
291	 * XXXRW: Non-atomic test and set of dev->si_drv1 requires
292	 * synchronization.
293	 */
294	tp = dev->si_drv1;
295	if (!tp) {
296		tuncreate(dev);
297		tp = dev->si_drv1;
298	}
299
300	/*
301	 * XXXRW: This use of tun_pid is subject to error due to the
302	 * fact that a reference to the tunnel can live beyond the
303	 * death of the process that created it.  Can we replace this
304	 * with a simple busy flag?
305	 */
306	mtx_lock(&tp->tun_mtx);
307	if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) {
308		mtx_unlock(&tp->tun_mtx);
309		return (EBUSY);
310	}
311	tp->tun_pid = td->td_proc->p_pid;
312
313	tp->tun_flags |= TUN_OPEN;
314	mtx_unlock(&tp->tun_mtx);
315	ifp = &tp->tun_if;
316	TUNDEBUG(ifp, "open\n");
317
318	return (0);
319}
320
321/*
322 * tunclose - close the device - mark i/f down & delete
323 * routing info
324 */
325static	int
326tunclose(struct cdev *dev, int foo, int bar, struct thread *td)
327{
328	struct tun_softc *tp;
329	struct ifnet *ifp;
330	int s;
331
332	tp = dev->si_drv1;
333	ifp = &tp->tun_if;
334
335	mtx_lock(&tp->tun_mtx);
336	tp->tun_flags &= ~TUN_OPEN;
337	tp->tun_pid = 0;
338
339	/*
340	 * junk all pending output
341	 */
342	s = splimp();
343	IFQ_PURGE(&ifp->if_snd);
344	splx(s);
345	mtx_unlock(&tp->tun_mtx);
346
347	if (ifp->if_flags & IFF_UP) {
348		s = splimp();
349		if_down(ifp);
350		splx(s);
351	}
352
353	if (ifp->if_flags & IFF_RUNNING) {
354		struct ifaddr *ifa;
355
356		s = splimp();
357		/* find internet addresses and delete routes */
358		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
359			if (ifa->ifa_addr->sa_family == AF_INET)
360				/* Unlocked read. */
361				rtinit(ifa, (int)RTM_DELETE,
362				    tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0);
363		ifp->if_flags &= ~IFF_RUNNING;
364		splx(s);
365	}
366
367	funsetown(&tp->tun_sigio);
368	selwakeuppri(&tp->tun_rsel, PZERO + 1);
369	TUNDEBUG (ifp, "closed\n");
370	return (0);
371}
372
373static int
374tuninit(struct ifnet *ifp)
375{
376	struct tun_softc *tp = ifp->if_softc;
377	struct ifaddr *ifa;
378	int error = 0;
379
380	TUNDEBUG(ifp, "tuninit\n");
381
382	ifp->if_flags |= IFF_UP | IFF_RUNNING;
383	getmicrotime(&ifp->if_lastchange);
384
385	for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa;
386	     ifa = TAILQ_NEXT(ifa, ifa_link)) {
387		if (ifa->ifa_addr == NULL)
388			error = EFAULT;
389			/* XXX: Should maybe return straight off? */
390		else {
391#ifdef INET
392			if (ifa->ifa_addr->sa_family == AF_INET) {
393			    struct sockaddr_in *si;
394
395			    si = (struct sockaddr_in *)ifa->ifa_addr;
396			    mtx_lock(&tp->tun_mtx);
397			    if (si->sin_addr.s_addr)
398				    tp->tun_flags |= TUN_IASET;
399
400			    si = (struct sockaddr_in *)ifa->ifa_dstaddr;
401			    if (si && si->sin_addr.s_addr)
402				    tp->tun_flags |= TUN_DSTADDR;
403			    mtx_unlock(&tp->tun_mtx);
404			}
405#endif
406		}
407	}
408	return (error);
409}
410
411/*
412 * Process an ioctl request.
413 */
414static int
415tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
416{
417	struct ifreq *ifr = (struct ifreq *)data;
418	struct tun_softc *tp = ifp->if_softc;
419	struct ifstat *ifs;
420	int		error = 0, s;
421
422	s = splimp();
423	switch(cmd) {
424	case SIOCGIFSTATUS:
425		ifs = (struct ifstat *)data;
426		mtx_lock(&tp->tun_mtx);
427		if (tp->tun_pid)
428			sprintf(ifs->ascii + strlen(ifs->ascii),
429			    "\tOpened by PID %d\n", tp->tun_pid);
430		mtx_unlock(&tp->tun_mtx);
431		break;
432	case SIOCSIFADDR:
433		error = tuninit(ifp);
434		TUNDEBUG(ifp, "address set, error=%d\n", error);
435		break;
436	case SIOCSIFDSTADDR:
437		error = tuninit(ifp);
438		TUNDEBUG(ifp, "destination address set, error=%d\n", error);
439		break;
440	case SIOCSIFMTU:
441		ifp->if_mtu = ifr->ifr_mtu;
442		TUNDEBUG(ifp, "mtu set\n");
443		break;
444	case SIOCSIFFLAGS:
445	case SIOCADDMULTI:
446	case SIOCDELMULTI:
447		break;
448	default:
449		error = EINVAL;
450	}
451	splx(s);
452	return (error);
453}
454
455/*
456 * tunoutput - queue packets from higher level ready to put out.
457 */
458static int
459tunoutput(
460	struct ifnet *ifp,
461	struct mbuf *m0,
462	struct sockaddr *dst,
463	struct rtentry *rt)
464{
465	struct tun_softc *tp = ifp->if_softc;
466	u_short cached_tun_flags;
467	int error;
468
469	TUNDEBUG (ifp, "tunoutput\n");
470
471#ifdef MAC
472	error = mac_check_ifnet_transmit(ifp, m0);
473	if (error) {
474		m_freem(m0);
475		return (error);
476	}
477#endif
478
479	/* Could be unlocked read? */
480	mtx_lock(&tp->tun_mtx);
481	cached_tun_flags = tp->tun_flags;
482	mtx_unlock(&tp->tun_mtx);
483	if ((cached_tun_flags & TUN_READY) != TUN_READY) {
484		TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
485		m_freem (m0);
486		return (EHOSTDOWN);
487	}
488
489	if ((ifp->if_flags & IFF_UP) != IFF_UP) {
490		m_freem (m0);
491		return (EHOSTDOWN);
492	}
493
494	/* BPF write needs to be handled specially */
495	if (dst->sa_family == AF_UNSPEC) {
496		dst->sa_family = *(mtod(m0, int *));
497		m0->m_len -= sizeof(int);
498		m0->m_pkthdr.len -= sizeof(int);
499		m0->m_data += sizeof(int);
500	}
501
502	if (ifp->if_bpf) {
503		uint32_t af = dst->sa_family;
504		bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0);
505	}
506
507	/* prepend sockaddr? this may abort if the mbuf allocation fails */
508	if (cached_tun_flags & TUN_LMODE) {
509		/* allocate space for sockaddr */
510		M_PREPEND(m0, dst->sa_len, M_DONTWAIT);
511
512		/* if allocation failed drop packet */
513		if (m0 == NULL) {
514			ifp->if_iqdrops++;
515			ifp->if_oerrors++;
516			return (ENOBUFS);
517		} else {
518			bcopy(dst, m0->m_data, dst->sa_len);
519		}
520	}
521
522	if (cached_tun_flags & TUN_IFHEAD) {
523		/* Prepend the address family */
524		M_PREPEND(m0, 4, M_DONTWAIT);
525
526		/* if allocation failed drop packet */
527		if (m0 == NULL) {
528			ifp->if_iqdrops++;
529			ifp->if_oerrors++;
530			return (ENOBUFS);
531		} else
532			*(u_int32_t *)m0->m_data = htonl(dst->sa_family);
533	} else {
534#ifdef INET
535		if (dst->sa_family != AF_INET)
536#endif
537		{
538			m_freem(m0);
539			return (EAFNOSUPPORT);
540		}
541	}
542
543	IFQ_HANDOFF(ifp, m0, error);
544	if (error) {
545		ifp->if_collisions++;
546		return (ENOBUFS);
547	}
548	ifp->if_opackets++;
549	return (0);
550}
551
552/*
553 * the cdevsw interface is now pretty minimal.
554 */
555static	int
556tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
557{
558	int		s;
559	int		error;
560	struct tun_softc *tp = dev->si_drv1;
561	struct tuninfo *tunp;
562
563	switch (cmd) {
564	case TUNSIFINFO:
565		tunp = (struct tuninfo *)data;
566		if (tunp->mtu < IF_MINMTU)
567			return (EINVAL);
568		if (tp->tun_if.if_mtu != tunp->mtu
569		&& (error = suser(td)) != 0)
570			return (error);
571		tp->tun_if.if_mtu = tunp->mtu;
572		tp->tun_if.if_type = tunp->type;
573		tp->tun_if.if_baudrate = tunp->baudrate;
574		break;
575	case TUNGIFINFO:
576		tunp = (struct tuninfo *)data;
577		tunp->mtu = tp->tun_if.if_mtu;
578		tunp->type = tp->tun_if.if_type;
579		tunp->baudrate = tp->tun_if.if_baudrate;
580		break;
581	case TUNSDEBUG:
582		tundebug = *(int *)data;
583		break;
584	case TUNGDEBUG:
585		*(int *)data = tundebug;
586		break;
587	case TUNSLMODE:
588		mtx_lock(&tp->tun_mtx);
589		if (*(int *)data) {
590			tp->tun_flags |= TUN_LMODE;
591			tp->tun_flags &= ~TUN_IFHEAD;
592		} else
593			tp->tun_flags &= ~TUN_LMODE;
594		mtx_unlock(&tp->tun_mtx);
595		break;
596	case TUNSIFHEAD:
597		mtx_lock(&tp->tun_mtx);
598		if (*(int *)data) {
599			tp->tun_flags |= TUN_IFHEAD;
600			tp->tun_flags &= ~TUN_LMODE;
601		} else
602			tp->tun_flags &= ~TUN_IFHEAD;
603		mtx_unlock(&tp->tun_mtx);
604		break;
605	case TUNGIFHEAD:
606		/* Could be unlocked read? */
607		mtx_lock(&tp->tun_mtx);
608		*(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0;
609		mtx_unlock(&tp->tun_mtx);
610		break;
611	case TUNSIFMODE:
612		/* deny this if UP */
613		if (tp->tun_if.if_flags & IFF_UP)
614			return(EBUSY);
615
616		switch (*(int *)data & ~IFF_MULTICAST) {
617		case IFF_POINTOPOINT:
618		case IFF_BROADCAST:
619			tp->tun_if.if_flags &=
620			    ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST);
621			tp->tun_if.if_flags |= *(int *)data;
622			break;
623		default:
624			return(EINVAL);
625		}
626		break;
627	case TUNSIFPID:
628		mtx_lock(&tp->tun_mtx);
629		tp->tun_pid = curthread->td_proc->p_pid;
630		mtx_unlock(&tp->tun_mtx);
631		break;
632	case FIONBIO:
633		break;
634	case FIOASYNC:
635		mtx_lock(&tp->tun_mtx);
636		if (*(int *)data)
637			tp->tun_flags |= TUN_ASYNC;
638		else
639			tp->tun_flags &= ~TUN_ASYNC;
640		mtx_unlock(&tp->tun_mtx);
641		break;
642	case FIONREAD:
643		s = splimp();
644		if (!IFQ_IS_EMPTY(&tp->tun_if.if_snd)) {
645			struct mbuf *mb;
646			IFQ_LOCK(&tp->tun_if.if_snd);
647			IFQ_POLL_NOLOCK(&tp->tun_if.if_snd, mb);
648			for( *(int *)data = 0; mb != 0; mb = mb->m_next)
649				*(int *)data += mb->m_len;
650			IFQ_UNLOCK(&tp->tun_if.if_snd);
651		} else
652			*(int *)data = 0;
653		splx(s);
654		break;
655	case FIOSETOWN:
656		return (fsetown(*(int *)data, &tp->tun_sigio));
657
658	case FIOGETOWN:
659		*(int *)data = fgetown(&tp->tun_sigio);
660		return (0);
661
662	/* This is deprecated, FIOSETOWN should be used instead. */
663	case TIOCSPGRP:
664		return (fsetown(-(*(int *)data), &tp->tun_sigio));
665
666	/* This is deprecated, FIOGETOWN should be used instead. */
667	case TIOCGPGRP:
668		*(int *)data = -fgetown(&tp->tun_sigio);
669		return (0);
670
671	default:
672		return (ENOTTY);
673	}
674	return (0);
675}
676
677/*
678 * The cdevsw read interface - reads a packet at a time, or at
679 * least as much of a packet as can be read.
680 */
681static	int
682tunread(struct cdev *dev, struct uio *uio, int flag)
683{
684	struct tun_softc *tp = dev->si_drv1;
685	struct ifnet	*ifp = &tp->tun_if;
686	struct mbuf	*m;
687	int		error=0, len, s;
688
689	TUNDEBUG (ifp, "read\n");
690	mtx_lock(&tp->tun_mtx);
691	if ((tp->tun_flags & TUN_READY) != TUN_READY) {
692		mtx_unlock(&tp->tun_mtx);
693		TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags);
694		return (EHOSTDOWN);
695	}
696
697	tp->tun_flags &= ~TUN_RWAIT;
698	mtx_unlock(&tp->tun_mtx);
699
700	s = splimp();
701	do {
702		IFQ_DEQUEUE(&ifp->if_snd, m);
703		if (m == NULL) {
704			if (flag & IO_NDELAY) {
705				splx(s);
706				return (EWOULDBLOCK);
707			}
708			mtx_lock(&tp->tun_mtx);
709			tp->tun_flags |= TUN_RWAIT;
710			mtx_unlock(&tp->tun_mtx);
711			if((error = tsleep(tp, PCATCH | (PZERO + 1),
712					"tunread", 0)) != 0) {
713				splx(s);
714				return (error);
715			}
716		}
717	} while (m == NULL);
718	splx(s);
719
720	while (m && uio->uio_resid > 0 && error == 0) {
721		len = min(uio->uio_resid, m->m_len);
722		if (len != 0)
723			error = uiomove(mtod(m, void *), len, uio);
724		m = m_free(m);
725	}
726
727	if (m) {
728		TUNDEBUG(ifp, "Dropping mbuf\n");
729		m_freem(m);
730	}
731	return (error);
732}
733
734/*
735 * the cdevsw write interface - an atomic write is a packet - or else!
736 */
737static	int
738tunwrite(struct cdev *dev, struct uio *uio, int flag)
739{
740	struct tun_softc *tp = dev->si_drv1;
741	struct ifnet	*ifp = &tp->tun_if;
742	struct mbuf	*top, **mp, *m;
743	int		error=0, tlen, mlen;
744	uint32_t	family;
745	int 		isr;
746
747	TUNDEBUG(ifp, "tunwrite\n");
748
749	if ((ifp->if_flags & IFF_UP) != IFF_UP)
750		/* ignore silently */
751		return (0);
752
753	if (uio->uio_resid == 0)
754		return (0);
755
756	if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) {
757		TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid);
758		return (EIO);
759	}
760	tlen = uio->uio_resid;
761
762	/* get a header mbuf */
763	MGETHDR(m, M_DONTWAIT, MT_DATA);
764	if (m == NULL)
765		return (ENOBUFS);
766	mlen = MHLEN;
767
768	top = 0;
769	mp = &top;
770	while (error == 0 && uio->uio_resid > 0) {
771		m->m_len = min(mlen, uio->uio_resid);
772		error = uiomove(mtod(m, void *), m->m_len, uio);
773		*mp = m;
774		mp = &m->m_next;
775		if (uio->uio_resid > 0) {
776			MGET (m, M_DONTWAIT, MT_DATA);
777			if (m == 0) {
778				error = ENOBUFS;
779				break;
780			}
781			mlen = MLEN;
782		}
783	}
784	if (error) {
785		if (top)
786			m_freem (top);
787		ifp->if_ierrors++;
788		return (error);
789	}
790
791	top->m_pkthdr.len = tlen;
792	top->m_pkthdr.rcvif = ifp;
793#ifdef MAC
794	mac_create_mbuf_from_ifnet(ifp, top);
795#endif
796
797	/* Could be unlocked read? */
798	mtx_lock(&tp->tun_mtx);
799	if (tp->tun_flags & TUN_IFHEAD) {
800		mtx_unlock(&tp->tun_mtx);
801		if (top->m_len < sizeof(family) &&
802		    (top = m_pullup(top, sizeof(family))) == NULL)
803			return (ENOBUFS);
804		family = ntohl(*mtod(top, u_int32_t *));
805		m_adj(top, sizeof(family));
806	} else {
807		mtx_unlock(&tp->tun_mtx);
808		family = AF_INET;
809	}
810
811	BPF_MTAP2(ifp, &family, sizeof(family), top);
812
813	switch (family) {
814#ifdef INET
815	case AF_INET:
816		isr = NETISR_IP;
817		break;
818#endif
819#ifdef INET6
820	case AF_INET6:
821		isr = NETISR_IPV6;
822		break;
823#endif
824#ifdef IPX
825	case AF_IPX:
826		isr = NETISR_IPX;
827		break;
828#endif
829#ifdef NETATALK
830	case AF_APPLETALK:
831		isr = NETISR_ATALK2;
832		break;
833#endif
834	default:
835		m_freem(m);
836		return (EAFNOSUPPORT);
837	}
838	/* First chunk of an mbuf contains good junk */
839	if (harvest.point_to_point)
840		random_harvest(m, 16, 3, 0, RANDOM_NET);
841	ifp->if_ibytes += top->m_pkthdr.len;
842	ifp->if_ipackets++;
843	netisr_dispatch(isr, top);
844	return (0);
845}
846
847/*
848 * tunpoll - the poll interface, this is only useful on reads
849 * really. The write detect always returns true, write never blocks
850 * anyway, it either accepts the packet or drops it.
851 */
852static	int
853tunpoll(struct cdev *dev, int events, struct thread *td)
854{
855	int		s;
856	struct tun_softc *tp = dev->si_drv1;
857	struct ifnet	*ifp = &tp->tun_if;
858	int		revents = 0;
859	struct mbuf	*m;
860
861	s = splimp();
862	TUNDEBUG(ifp, "tunpoll\n");
863
864	if (events & (POLLIN | POLLRDNORM)) {
865		IFQ_LOCK(&ifp->if_snd);
866		IFQ_POLL_NOLOCK(&ifp->if_snd, m);
867		if (m != NULL) {
868			TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len);
869			revents |= events & (POLLIN | POLLRDNORM);
870		} else {
871			TUNDEBUG(ifp, "tunpoll waiting\n");
872			selrecord(td, &tp->tun_rsel);
873		}
874		IFQ_UNLOCK(&ifp->if_snd);
875	}
876	if (events & (POLLOUT | POLLWRNORM))
877		revents |= events & (POLLOUT | POLLWRNORM);
878
879	splx(s);
880	return (revents);
881}
882