if_tap.c revision 189866
1/*-
2 * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * BASED ON:
27 * -------------------------------------------------------------------------
28 *
29 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
30 * Nottingham University 1987.
31 */
32
33/*
34 * $FreeBSD: head/sys/net/if_tap.c 189866 2009-03-16 03:11:02Z scf $
35 * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $
36 */
37
38#include "opt_compat.h"
39#include "opt_inet.h"
40
41#include <sys/param.h>
42#include <sys/conf.h>
43#include <sys/fcntl.h>
44#include <sys/filio.h>
45#include <sys/kernel.h>
46#include <sys/malloc.h>
47#include <sys/mbuf.h>
48#include <sys/module.h>
49#include <sys/poll.h>
50#include <sys/priv.h>
51#include <sys/proc.h>
52#include <sys/selinfo.h>
53#include <sys/signalvar.h>
54#include <sys/socket.h>
55#include <sys/sockio.h>
56#include <sys/sysctl.h>
57#include <sys/systm.h>
58#include <sys/ttycom.h>
59#include <sys/uio.h>
60#include <sys/queue.h>
61
62#include <net/bpf.h>
63#include <net/ethernet.h>
64#include <net/if.h>
65#include <net/if_clone.h>
66#include <net/if_dl.h>
67#include <net/route.h>
68#include <net/if_types.h>
69
70#include <netinet/in.h>
71
72#include <net/if_tapvar.h>
73#include <net/if_tap.h>
74
75
76#define CDEV_NAME	"tap"
77#define TAPDEBUG	if (tapdebug) printf
78
79#define TAP		"tap"
80#define VMNET		"vmnet"
81#define TAPMAXUNIT	0x7fff
82#define VMNET_DEV_MASK	CLONE_FLAG0
83
84/* module */
85static int		tapmodevent(module_t, int, void *);
86
87/* device */
88static void		tapclone(void *, struct ucred *, char *, int,
89			    struct cdev **);
90static void		tapcreate(struct cdev *);
91
92/* network interface */
93static void		tapifstart(struct ifnet *);
94static int		tapifioctl(struct ifnet *, u_long, caddr_t);
95static void		tapifinit(void *);
96
97static int		tap_clone_create(struct if_clone *, int, caddr_t);
98static void		tap_clone_destroy(struct ifnet *);
99static int		vmnet_clone_create(struct if_clone *, int, caddr_t);
100static void		vmnet_clone_destroy(struct ifnet *);
101
102IFC_SIMPLE_DECLARE(tap, 0);
103IFC_SIMPLE_DECLARE(vmnet, 0);
104
105/* character device */
106static d_open_t		tapopen;
107static d_close_t	tapclose;
108static d_read_t		tapread;
109static d_write_t	tapwrite;
110static d_ioctl_t	tapioctl;
111static d_poll_t		tappoll;
112static d_kqfilter_t	tapkqfilter;
113
114/* kqueue(2) */
115static int		tapkqread(struct knote *, long);
116static int		tapkqwrite(struct knote *, long);
117static void		tapkqdetach(struct knote *);
118
119static struct filterops	tap_read_filterops = {
120	.f_isfd =	1,
121	.f_attach =	NULL,
122	.f_detach =	tapkqdetach,
123	.f_event =	tapkqread,
124};
125
126static struct filterops	tap_write_filterops = {
127	.f_isfd =	1,
128	.f_attach =	NULL,
129	.f_detach =	tapkqdetach,
130	.f_event =	tapkqwrite,
131};
132
133static struct cdevsw	tap_cdevsw = {
134	.d_version =	D_VERSION,
135	.d_flags =	D_PSEUDO | D_NEEDGIANT | D_NEEDMINOR,
136	.d_open =	tapopen,
137	.d_close =	tapclose,
138	.d_read =	tapread,
139	.d_write =	tapwrite,
140	.d_ioctl =	tapioctl,
141	.d_poll =	tappoll,
142	.d_name =	CDEV_NAME,
143	.d_kqfilter =	tapkqfilter,
144};
145
146/*
147 * All global variables in if_tap.c are locked with tapmtx, with the
148 * exception of tapdebug, which is accessed unlocked; tapclones is
149 * static at runtime.
150 */
151static struct mtx		tapmtx;
152static int			tapdebug = 0;        /* debug flag   */
153static int			tapuopen = 0;        /* allow user open() */
154static int			tapuponopen = 0;    /* IFF_UP on open() */
155static int			tapdclone = 1;	/* enable devfs cloning */
156static SLIST_HEAD(, tap_softc)	taphead;             /* first device */
157static struct clonedevs 	*tapclones;
158
159MALLOC_DECLARE(M_TAP);
160MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface");
161SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, "");
162
163SYSCTL_DECL(_net_link);
164SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0,
165    "Ethernet tunnel software network interface");
166SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0,
167	"Allow user to open /dev/tap (based on node permissions)");
168SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0,
169	"Bring interface up when /dev/tap is opened");
170SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RW, &tapdclone, 0,
171	"Enably legacy devfs interface creation");
172SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, "");
173
174TUNABLE_INT("net.link.tap.devfs_cloning", &tapdclone);
175
176DEV_MODULE(if_tap, tapmodevent, NULL);
177
178static int
179tap_clone_create(struct if_clone *ifc, int unit, caddr_t params)
180{
181	struct cdev *dev;
182	int i;
183	int extra;
184
185	if (strcmp(ifc->ifc_name, VMNET) == 0)
186		extra = VMNET_DEV_MASK;
187	else
188		extra = 0;
189
190	/* find any existing device, or allocate new unit number */
191	i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, extra);
192	if (i) {
193		dev = make_dev(&tap_cdevsw, unit | extra,
194		     UID_ROOT, GID_WHEEL, 0600, "%s%d", ifc->ifc_name, unit);
195		if (dev != NULL) {
196			dev_ref(dev);
197			dev->si_flags |= SI_CHEAPCLONE;
198		}
199	}
200
201	tapcreate(dev);
202	return (0);
203}
204
205/* vmnet devices are tap devices in disguise */
206static int
207vmnet_clone_create(struct if_clone *ifc, int unit, caddr_t params)
208{
209	return tap_clone_create(ifc, unit, params);
210}
211
212static void
213tap_destroy(struct tap_softc *tp)
214{
215	struct ifnet *ifp = tp->tap_ifp;
216	int s;
217
218	/* Unlocked read. */
219	KASSERT(!(tp->tap_flags & TAP_OPEN),
220		("%s flags is out of sync", ifp->if_xname));
221
222	knlist_destroy(&tp->tap_rsel.si_note);
223	destroy_dev(tp->tap_dev);
224	s = splimp();
225	ether_ifdetach(ifp);
226	if_free_type(ifp, IFT_ETHER);
227	splx(s);
228
229	mtx_destroy(&tp->tap_mtx);
230	free(tp, M_TAP);
231}
232
233static void
234tap_clone_destroy(struct ifnet *ifp)
235{
236	struct tap_softc *tp = ifp->if_softc;
237
238	mtx_lock(&tapmtx);
239	SLIST_REMOVE(&taphead, tp, tap_softc, tap_next);
240	mtx_unlock(&tapmtx);
241	tap_destroy(tp);
242}
243
244/* vmnet devices are tap devices in disguise */
245static void
246vmnet_clone_destroy(struct ifnet *ifp)
247{
248	tap_clone_destroy(ifp);
249}
250
251/*
252 * tapmodevent
253 *
254 * module event handler
255 */
256static int
257tapmodevent(module_t mod, int type, void *data)
258{
259	static eventhandler_tag	 eh_tag = NULL;
260	struct tap_softc	*tp = NULL;
261	struct ifnet		*ifp = NULL;
262
263	switch (type) {
264	case MOD_LOAD:
265
266		/* intitialize device */
267
268		mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF);
269		SLIST_INIT(&taphead);
270
271		clone_setup(&tapclones);
272		eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000);
273		if (eh_tag == NULL) {
274			clone_cleanup(&tapclones);
275			mtx_destroy(&tapmtx);
276			return (ENOMEM);
277		}
278		if_clone_attach(&tap_cloner);
279		if_clone_attach(&vmnet_cloner);
280		return (0);
281
282	case MOD_UNLOAD:
283		/*
284		 * The EBUSY algorithm here can't quite atomically
285		 * guarantee that this is race-free since we have to
286		 * release the tap mtx to deregister the clone handler.
287		 */
288		mtx_lock(&tapmtx);
289		SLIST_FOREACH(tp, &taphead, tap_next) {
290			mtx_lock(&tp->tap_mtx);
291			if (tp->tap_flags & TAP_OPEN) {
292				mtx_unlock(&tp->tap_mtx);
293				mtx_unlock(&tapmtx);
294				return (EBUSY);
295			}
296			mtx_unlock(&tp->tap_mtx);
297		}
298		mtx_unlock(&tapmtx);
299
300		EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
301		if_clone_detach(&tap_cloner);
302		if_clone_detach(&vmnet_cloner);
303
304		mtx_lock(&tapmtx);
305		while ((tp = SLIST_FIRST(&taphead)) != NULL) {
306			SLIST_REMOVE_HEAD(&taphead, tap_next);
307			mtx_unlock(&tapmtx);
308
309			ifp = tp->tap_ifp;
310
311			TAPDEBUG("detaching %s\n", ifp->if_xname);
312
313			tap_destroy(tp);
314			mtx_lock(&tapmtx);
315		}
316		mtx_unlock(&tapmtx);
317		clone_cleanup(&tapclones);
318
319		mtx_destroy(&tapmtx);
320
321		break;
322
323	default:
324		return (EOPNOTSUPP);
325	}
326
327	return (0);
328} /* tapmodevent */
329
330
331/*
332 * DEVFS handler
333 *
334 * We need to support two kind of devices - tap and vmnet
335 */
336static void
337tapclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev)
338{
339	char		devname[SPECNAMELEN + 1];
340	int		i, unit, append_unit;
341	int		extra;
342
343	if (*dev != NULL)
344		return;
345
346	if (!tapdclone ||
347	    (!tapuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0))
348		return;
349
350	unit = 0;
351	append_unit = 0;
352	extra = 0;
353
354	/* We're interested in only tap/vmnet devices. */
355	if (strcmp(name, TAP) == 0) {
356		unit = -1;
357	} else if (strcmp(name, VMNET) == 0) {
358		unit = -1;
359		extra = VMNET_DEV_MASK;
360	} else if (dev_stdclone(name, NULL, TAP, &unit) != 1) {
361		if (dev_stdclone(name, NULL, VMNET, &unit) != 1) {
362			return;
363		} else {
364			extra = VMNET_DEV_MASK;
365		}
366	}
367
368	if (unit == -1)
369		append_unit = 1;
370
371	/* find any existing device, or allocate new unit number */
372	i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra);
373	if (i) {
374		if (append_unit) {
375			/*
376			 * We were passed 'tun' or 'tap', with no unit specified
377			 * so we'll need to append it now.
378			 */
379			namelen = snprintf(devname, sizeof(devname), "%s%d", name,
380			    unit);
381			name = devname;
382		}
383
384		*dev = make_dev(&tap_cdevsw, unit | extra,
385		     UID_ROOT, GID_WHEEL, 0600, "%s", name);
386		if (*dev != NULL) {
387			dev_ref(*dev);
388			(*dev)->si_flags |= SI_CHEAPCLONE;
389		}
390	}
391
392	if_clone_create(name, namelen, NULL);
393} /* tapclone */
394
395
396/*
397 * tapcreate
398 *
399 * to create interface
400 */
401static void
402tapcreate(struct cdev *dev)
403{
404	struct ifnet		*ifp = NULL;
405	struct tap_softc	*tp = NULL;
406	unsigned short		 macaddr_hi;
407	uint32_t		 macaddr_mid;
408	int			 unit, s;
409	char			*name = NULL;
410	u_char			eaddr[6];
411
412	dev->si_flags &= ~SI_CHEAPCLONE;
413
414	/* allocate driver storage and create device */
415	tp = malloc(sizeof(*tp), M_TAP, M_WAITOK | M_ZERO);
416	mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF);
417	mtx_lock(&tapmtx);
418	SLIST_INSERT_HEAD(&taphead, tp, tap_next);
419	mtx_unlock(&tapmtx);
420
421	unit = dev2unit(dev);
422
423	/* select device: tap or vmnet */
424	if (unit & VMNET_DEV_MASK) {
425		name = VMNET;
426		tp->tap_flags |= TAP_VMNET;
427	} else
428		name = TAP;
429
430	unit &= TAPMAXUNIT;
431
432	TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev));
433
434	/* generate fake MAC address: 00 bd xx xx xx unit_no */
435	macaddr_hi = htons(0x00bd);
436	macaddr_mid = (uint32_t) ticks;
437	bcopy(&macaddr_hi, eaddr, sizeof(short));
438	bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
439	eaddr[5] = (u_char)unit;
440
441	/* fill the rest and attach interface */
442	ifp = tp->tap_ifp = if_alloc(IFT_ETHER);
443	if (ifp == NULL)
444		panic("%s%d: can not if_alloc()", name, unit);
445	ifp->if_softc = tp;
446	if_initname(ifp, name, unit);
447	ifp->if_init = tapifinit;
448	ifp->if_start = tapifstart;
449	ifp->if_ioctl = tapifioctl;
450	ifp->if_mtu = ETHERMTU;
451	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
452	ifp->if_snd.ifq_maxlen = ifqmaxlen;
453
454	dev->si_drv1 = tp;
455	tp->tap_dev = dev;
456
457	s = splimp();
458	ether_ifattach(ifp, eaddr);
459	splx(s);
460
461	mtx_lock(&tp->tap_mtx);
462	tp->tap_flags |= TAP_INITED;
463	mtx_unlock(&tp->tap_mtx);
464
465	knlist_init(&tp->tap_rsel.si_note, NULL, NULL, NULL, NULL);
466
467	TAPDEBUG("interface %s is created. minor = %#x\n",
468		ifp->if_xname, dev2unit(dev));
469} /* tapcreate */
470
471
472/*
473 * tapopen
474 *
475 * to open tunnel. must be superuser
476 */
477static int
478tapopen(struct cdev *dev, int flag, int mode, struct thread *td)
479{
480	struct tap_softc	*tp = NULL;
481	struct ifnet		*ifp = NULL;
482	int			 error, s;
483
484	if (tapuopen == 0) {
485		error = priv_check(td, PRIV_NET_TAP);
486		if (error)
487			return (error);
488	}
489
490	if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT)
491		return (ENXIO);
492
493	tp = dev->si_drv1;
494
495	mtx_lock(&tp->tap_mtx);
496	if (tp->tap_flags & TAP_OPEN) {
497		mtx_unlock(&tp->tap_mtx);
498		return (EBUSY);
499	}
500
501	bcopy(IF_LLADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr));
502	tp->tap_pid = td->td_proc->p_pid;
503	tp->tap_flags |= TAP_OPEN;
504	ifp = tp->tap_ifp;
505	mtx_unlock(&tp->tap_mtx);
506
507	s = splimp();
508	ifp->if_drv_flags |= IFF_DRV_RUNNING;
509	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
510	if (tapuponopen)
511		ifp->if_flags |= IFF_UP;
512	splx(s);
513
514	TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, dev2unit(dev));
515
516	return (0);
517} /* tapopen */
518
519
520/*
521 * tapclose
522 *
523 * close the device - mark i/f down & delete routing info
524 */
525static int
526tapclose(struct cdev *dev, int foo, int bar, struct thread *td)
527{
528	struct ifaddr		*ifa;
529	struct tap_softc	*tp = dev->si_drv1;
530	struct ifnet		*ifp = tp->tap_ifp;
531	int			s;
532
533	/* junk all pending output */
534	IF_DRAIN(&ifp->if_snd);
535
536	/*
537	 * do not bring the interface down, and do not anything with
538	 * interface, if we are in VMnet mode. just close the device.
539	 */
540
541	mtx_lock(&tp->tap_mtx);
542	if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) {
543		mtx_unlock(&tp->tap_mtx);
544		s = splimp();
545		if_down(ifp);
546		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
547			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
548				rtinit(ifa, (int)RTM_DELETE, 0);
549			}
550			if_purgeaddrs(ifp);
551			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
552		}
553		splx(s);
554	} else
555		mtx_unlock(&tp->tap_mtx);
556
557	funsetown(&tp->tap_sigio);
558	selwakeuppri(&tp->tap_rsel, PZERO+1);
559	KNOTE_UNLOCKED(&tp->tap_rsel.si_note, 0);
560
561	mtx_lock(&tp->tap_mtx);
562	tp->tap_flags &= ~TAP_OPEN;
563	tp->tap_pid = 0;
564	mtx_unlock(&tp->tap_mtx);
565
566	TAPDEBUG("%s is closed. minor = %#x\n",
567		ifp->if_xname, dev2unit(dev));
568
569	return (0);
570} /* tapclose */
571
572
573/*
574 * tapifinit
575 *
576 * network interface initialization function
577 */
578static void
579tapifinit(void *xtp)
580{
581	struct tap_softc	*tp = (struct tap_softc *)xtp;
582	struct ifnet		*ifp = tp->tap_ifp;
583
584	TAPDEBUG("initializing %s\n", ifp->if_xname);
585
586	ifp->if_drv_flags |= IFF_DRV_RUNNING;
587	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
588
589	/* attempt to start output */
590	tapifstart(ifp);
591} /* tapifinit */
592
593
594/*
595 * tapifioctl
596 *
597 * Process an ioctl request on network interface
598 */
599static int
600tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
601{
602	struct tap_softc	*tp = ifp->if_softc;
603	struct ifreq		*ifr = (struct ifreq *)data;
604	struct ifstat		*ifs = NULL;
605	int			 s, dummy;
606
607	switch (cmd) {
608		case SIOCSIFFLAGS: /* XXX -- just like vmnet does */
609		case SIOCADDMULTI:
610		case SIOCDELMULTI:
611			break;
612
613		case SIOCSIFMTU:
614			s = splimp();
615			ifp->if_mtu = ifr->ifr_mtu;
616			splx(s);
617			break;
618
619		case SIOCGIFSTATUS:
620			s = splimp();
621			ifs = (struct ifstat *)data;
622			dummy = strlen(ifs->ascii);
623			mtx_lock(&tp->tap_mtx);
624			if (tp->tap_pid != 0 && dummy < sizeof(ifs->ascii))
625				snprintf(ifs->ascii + dummy,
626					sizeof(ifs->ascii) - dummy,
627					"\tOpened by PID %d\n", tp->tap_pid);
628			mtx_unlock(&tp->tap_mtx);
629			splx(s);
630			break;
631
632		default:
633			s = splimp();
634			dummy = ether_ioctl(ifp, cmd, data);
635			splx(s);
636			return (dummy);
637			/* NOT REACHED */
638	}
639
640	return (0);
641} /* tapifioctl */
642
643
644/*
645 * tapifstart
646 *
647 * queue packets from higher level ready to put out
648 */
649static void
650tapifstart(struct ifnet *ifp)
651{
652	struct tap_softc	*tp = ifp->if_softc;
653	int			 s;
654
655	TAPDEBUG("%s starting\n", ifp->if_xname);
656
657	/*
658	 * do not junk pending output if we are in VMnet mode.
659	 * XXX: can this do any harm because of queue overflow?
660	 */
661
662	mtx_lock(&tp->tap_mtx);
663	if (((tp->tap_flags & TAP_VMNET) == 0) &&
664	    ((tp->tap_flags & TAP_READY) != TAP_READY)) {
665		struct mbuf	*m = NULL;
666
667		mtx_unlock(&tp->tap_mtx);
668
669		/* Unlocked read. */
670		TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname,
671		    tp->tap_flags);
672
673		s = splimp();
674		do {
675			IF_DEQUEUE(&ifp->if_snd, m);
676			if (m != NULL)
677				m_freem(m);
678			ifp->if_oerrors ++;
679		} while (m != NULL);
680		splx(s);
681
682		return;
683	}
684	mtx_unlock(&tp->tap_mtx);
685
686	s = splimp();
687	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
688
689	if (ifp->if_snd.ifq_len != 0) {
690		mtx_lock(&tp->tap_mtx);
691		if (tp->tap_flags & TAP_RWAIT) {
692			tp->tap_flags &= ~TAP_RWAIT;
693			wakeup(tp);
694		}
695
696		if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) {
697			mtx_unlock(&tp->tap_mtx);
698			pgsigio(&tp->tap_sigio, SIGIO, 0);
699		} else
700			mtx_unlock(&tp->tap_mtx);
701
702		selwakeuppri(&tp->tap_rsel, PZERO+1);
703		KNOTE_UNLOCKED(&tp->tap_rsel.si_note, 0);
704		ifp->if_opackets ++; /* obytes are counted in ether_output */
705	}
706
707	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
708	splx(s);
709} /* tapifstart */
710
711
712/*
713 * tapioctl
714 *
715 * the cdevsw interface is now pretty minimal
716 */
717static int
718tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
719{
720	struct tap_softc	*tp = dev->si_drv1;
721	struct ifnet		*ifp = tp->tap_ifp;
722	struct tapinfo		*tapp = NULL;
723	int			 s;
724	int			 f;
725#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
726    defined(COMPAT_FREEBSD4)
727	int			 ival;
728#endif
729
730	switch (cmd) {
731		case TAPSIFINFO:
732			s = splimp();
733			tapp = (struct tapinfo *)data;
734			ifp->if_mtu = tapp->mtu;
735			ifp->if_type = tapp->type;
736			ifp->if_baudrate = tapp->baudrate;
737			splx(s);
738			break;
739
740		case TAPGIFINFO:
741			tapp = (struct tapinfo *)data;
742			tapp->mtu = ifp->if_mtu;
743			tapp->type = ifp->if_type;
744			tapp->baudrate = ifp->if_baudrate;
745			break;
746
747		case TAPSDEBUG:
748			tapdebug = *(int *)data;
749			break;
750
751		case TAPGDEBUG:
752			*(int *)data = tapdebug;
753			break;
754
755		case TAPGIFNAME: {
756			struct ifreq	*ifr = (struct ifreq *) data;
757
758			strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
759			} break;
760
761		case FIONBIO:
762			break;
763
764		case FIOASYNC:
765			s = splimp();
766			mtx_lock(&tp->tap_mtx);
767			if (*(int *)data)
768				tp->tap_flags |= TAP_ASYNC;
769			else
770				tp->tap_flags &= ~TAP_ASYNC;
771			mtx_unlock(&tp->tap_mtx);
772			splx(s);
773			break;
774
775		case FIONREAD:
776			s = splimp();
777			if (ifp->if_snd.ifq_head) {
778				struct mbuf	*mb = ifp->if_snd.ifq_head;
779
780				for(*(int *)data = 0;mb != NULL;mb = mb->m_next)
781					*(int *)data += mb->m_len;
782			} else
783				*(int *)data = 0;
784			splx(s);
785			break;
786
787		case FIOSETOWN:
788			return (fsetown(*(int *)data, &tp->tap_sigio));
789
790		case FIOGETOWN:
791			*(int *)data = fgetown(&tp->tap_sigio);
792			return (0);
793
794		/* this is deprecated, FIOSETOWN should be used instead */
795		case TIOCSPGRP:
796			return (fsetown(-(*(int *)data), &tp->tap_sigio));
797
798		/* this is deprecated, FIOGETOWN should be used instead */
799		case TIOCGPGRP:
800			*(int *)data = -fgetown(&tp->tap_sigio);
801			return (0);
802
803		/* VMware/VMnet port ioctl's */
804
805		case SIOCGIFFLAGS:	/* get ifnet flags */
806			bcopy(&ifp->if_flags, data, sizeof(ifp->if_flags));
807			break;
808
809#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
810    defined(COMPAT_FREEBSD4)
811		case _IO('V', 0):
812			ival = IOCPARM_IVAL(data);
813			data = (caddr_t)&ival;
814			/* FALLTHROUGH */
815#endif
816		case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
817			f = *(int *)data;
818			f &= 0x0fff;
819			f &= ~IFF_CANTCHANGE;
820			f |= IFF_UP;
821
822			s = splimp();
823			ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE);
824			splx(s);
825			break;
826
827		case OSIOCGIFADDR:	/* get MAC address of the remote side */
828		case SIOCGIFADDR:
829			mtx_lock(&tp->tap_mtx);
830			bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
831			mtx_unlock(&tp->tap_mtx);
832			break;
833
834		case SIOCSIFADDR:	/* set MAC address of the remote side */
835			mtx_lock(&tp->tap_mtx);
836			bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
837			mtx_unlock(&tp->tap_mtx);
838			break;
839
840		default:
841			return (ENOTTY);
842	}
843	return (0);
844} /* tapioctl */
845
846
847/*
848 * tapread
849 *
850 * the cdevsw read interface - reads a packet at a time, or at
851 * least as much of a packet as can be read
852 */
853static int
854tapread(struct cdev *dev, struct uio *uio, int flag)
855{
856	struct tap_softc	*tp = dev->si_drv1;
857	struct ifnet		*ifp = tp->tap_ifp;
858	struct mbuf		*m = NULL;
859	int			 error = 0, len, s;
860
861	TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, dev2unit(dev));
862
863	mtx_lock(&tp->tap_mtx);
864	if ((tp->tap_flags & TAP_READY) != TAP_READY) {
865		mtx_unlock(&tp->tap_mtx);
866
867		/* Unlocked read. */
868		TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n",
869			ifp->if_xname, dev2unit(dev), tp->tap_flags);
870
871		return (EHOSTDOWN);
872	}
873
874	tp->tap_flags &= ~TAP_RWAIT;
875	mtx_unlock(&tp->tap_mtx);
876
877	/* sleep until we get a packet */
878	do {
879		s = splimp();
880		IF_DEQUEUE(&ifp->if_snd, m);
881		splx(s);
882
883		if (m == NULL) {
884			if (flag & O_NONBLOCK)
885				return (EWOULDBLOCK);
886
887			mtx_lock(&tp->tap_mtx);
888			tp->tap_flags |= TAP_RWAIT;
889			mtx_unlock(&tp->tap_mtx);
890			error = tsleep(tp,PCATCH|(PZERO+1),"taprd",0);
891			if (error)
892				return (error);
893		}
894	} while (m == NULL);
895
896	/* feed packet to bpf */
897	BPF_MTAP(ifp, m);
898
899	/* xfer packet to user space */
900	while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) {
901		len = min(uio->uio_resid, m->m_len);
902		if (len == 0)
903			break;
904
905		error = uiomove(mtod(m, void *), len, uio);
906		m = m_free(m);
907	}
908
909	if (m != NULL) {
910		TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname,
911			dev2unit(dev));
912		m_freem(m);
913	}
914
915	return (error);
916} /* tapread */
917
918
919/*
920 * tapwrite
921 *
922 * the cdevsw write interface - an atomic write is a packet - or else!
923 */
924static int
925tapwrite(struct cdev *dev, struct uio *uio, int flag)
926{
927	struct ether_header	*eh;
928	struct tap_softc	*tp = dev->si_drv1;
929	struct ifnet		*ifp = tp->tap_ifp;
930	struct mbuf		*m;
931
932	TAPDEBUG("%s writting, minor = %#x\n",
933		ifp->if_xname, dev2unit(dev));
934
935	if (uio->uio_resid == 0)
936		return (0);
937
938	if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) {
939		TAPDEBUG("%s invalid packet len = %d, minor = %#x\n",
940			ifp->if_xname, uio->uio_resid, dev2unit(dev));
941
942		return (EIO);
943	}
944
945	if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, ETHER_ALIGN,
946	    M_PKTHDR)) == NULL) {
947		ifp->if_ierrors ++;
948		return (ENOBUFS);
949	}
950
951	m->m_pkthdr.rcvif = ifp;
952
953	/*
954	 * Only pass a unicast frame to ether_input(), if it would actually
955	 * have been received by non-virtual hardware.
956	 */
957	if (m->m_len < sizeof(struct ether_header)) {
958		m_freem(m);
959		return (0);
960	}
961	eh = mtod(m, struct ether_header *);
962
963	if (eh && (ifp->if_flags & IFF_PROMISC) == 0 &&
964	    !ETHER_IS_MULTICAST(eh->ether_dhost) &&
965	    bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) {
966		m_freem(m);
967		return (0);
968	}
969
970	/* Pass packet up to parent. */
971	(*ifp->if_input)(ifp, m);
972	ifp->if_ipackets ++; /* ibytes are counted in parent */
973
974	return (0);
975} /* tapwrite */
976
977
978/*
979 * tappoll
980 *
981 * the poll interface, this is only useful on reads
982 * really. the write detect always returns true, write never blocks
983 * anyway, it either accepts the packet or drops it
984 */
985static int
986tappoll(struct cdev *dev, int events, struct thread *td)
987{
988	struct tap_softc	*tp = dev->si_drv1;
989	struct ifnet		*ifp = tp->tap_ifp;
990	int			 s, revents = 0;
991
992	TAPDEBUG("%s polling, minor = %#x\n",
993		ifp->if_xname, dev2unit(dev));
994
995	s = splimp();
996	if (events & (POLLIN | POLLRDNORM)) {
997		if (ifp->if_snd.ifq_len > 0) {
998			TAPDEBUG("%s have data in queue. len = %d, " \
999				"minor = %#x\n", ifp->if_xname,
1000				ifp->if_snd.ifq_len, dev2unit(dev));
1001
1002			revents |= (events & (POLLIN | POLLRDNORM));
1003		} else {
1004			TAPDEBUG("%s waiting for data, minor = %#x\n",
1005				ifp->if_xname, dev2unit(dev));
1006
1007			selrecord(td, &tp->tap_rsel);
1008		}
1009	}
1010
1011	if (events & (POLLOUT | POLLWRNORM))
1012		revents |= (events & (POLLOUT | POLLWRNORM));
1013
1014	splx(s);
1015	return (revents);
1016} /* tappoll */
1017
1018
1019/*
1020 * tap_kqfilter
1021 *
1022 * support for kevent() system call
1023 */
1024static int
1025tapkqfilter(struct cdev *dev, struct knote *kn)
1026{
1027    	int			 s;
1028	struct tap_softc	*tp = dev->si_drv1;
1029	struct ifnet		*ifp = tp->tap_ifp;
1030
1031	s = splimp();
1032	switch (kn->kn_filter) {
1033	case EVFILT_READ:
1034		TAPDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n",
1035			ifp->if_xname, dev2unit(dev));
1036		kn->kn_fop = &tap_read_filterops;
1037		break;
1038
1039	case EVFILT_WRITE:
1040		TAPDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n",
1041			ifp->if_xname, dev2unit(dev));
1042		kn->kn_fop = &tap_write_filterops;
1043		break;
1044
1045	default:
1046		TAPDEBUG("%s kqfilter: invalid filter, minor = %#x\n",
1047			ifp->if_xname, dev2unit(dev));
1048		splx(s);
1049		return (EINVAL);
1050		/* NOT REACHED */
1051	}
1052	splx(s);
1053
1054	kn->kn_hook = (caddr_t) dev;
1055	knlist_add(&tp->tap_rsel.si_note, kn, 0);
1056
1057	return (0);
1058} /* tapkqfilter */
1059
1060
1061/*
1062 * tap_kqread
1063 *
1064 * Return true if there is data in the interface queue
1065 */
1066static int
1067tapkqread(struct knote *kn, long hint)
1068{
1069	int			 ret, s;
1070	struct cdev		*dev = (struct cdev *)(kn->kn_hook);
1071	struct tap_softc	*tp = dev->si_drv1;
1072	struct ifnet		*ifp = tp->tap_ifp;
1073
1074	s = splimp();
1075	if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
1076		TAPDEBUG("%s have data in queue. len = %d, minor = %#x\n",
1077			ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
1078		ret = 1;
1079	} else {
1080		TAPDEBUG("%s waiting for data, minor = %#x\n",
1081			ifp->if_xname, dev2unit(dev));
1082		ret = 0;
1083	}
1084	splx(s);
1085
1086	return (ret);
1087} /* tapkqread */
1088
1089
1090/*
1091 * tap_kqwrite
1092 *
1093 * Always can write. Return the MTU in kn->data
1094 */
1095static int
1096tapkqwrite(struct knote *kn, long hint)
1097{
1098	int			 s;
1099	struct tap_softc	*tp = ((struct cdev *) kn->kn_hook)->si_drv1;
1100	struct ifnet		*ifp = tp->tap_ifp;
1101
1102	s = splimp();
1103	kn->kn_data = ifp->if_mtu;
1104	splx(s);
1105
1106	return (1);
1107} /* tapkqwrite */
1108
1109
1110static void
1111tapkqdetach(struct knote *kn)
1112{
1113	struct tap_softc	*tp = ((struct cdev *) kn->kn_hook)->si_drv1;
1114
1115	knlist_remove(&tp->tap_rsel.si_note, kn, 0);
1116} /* tapkqdetach */
1117
1118