if_tap.c revision 260394
1/*-
2 * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * BASED ON:
27 * -------------------------------------------------------------------------
28 *
29 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
30 * Nottingham University 1987.
31 */
32
33/*
34 * $FreeBSD: head/sys/net/if_tap.c 260394 2014-01-07 15:59:33Z melifaro $
35 * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $
36 */
37
38#include "opt_compat.h"
39#include "opt_inet.h"
40
41#include <sys/param.h>
42#include <sys/conf.h>
43#include <sys/fcntl.h>
44#include <sys/filio.h>
45#include <sys/jail.h>
46#include <sys/kernel.h>
47#include <sys/malloc.h>
48#include <sys/mbuf.h>
49#include <sys/module.h>
50#include <sys/poll.h>
51#include <sys/priv.h>
52#include <sys/proc.h>
53#include <sys/selinfo.h>
54#include <sys/signalvar.h>
55#include <sys/socket.h>
56#include <sys/sockio.h>
57#include <sys/sysctl.h>
58#include <sys/systm.h>
59#include <sys/ttycom.h>
60#include <sys/uio.h>
61#include <sys/queue.h>
62
63#include <net/bpf.h>
64#include <net/ethernet.h>
65#include <net/if.h>
66#include <net/if_var.h>
67#include <net/if_clone.h>
68#include <net/if_dl.h>
69#include <net/if_media.h>
70#include <net/if_types.h>
71#include <net/route.h>
72#include <net/vnet.h>
73
74#include <netinet/in.h>
75
76#include <net/if_tapvar.h>
77#include <net/if_tap.h>
78
79
80#define CDEV_NAME	"tap"
81#define TAPDEBUG	if (tapdebug) printf
82
83static const char tapname[] = "tap";
84static const char vmnetname[] = "vmnet";
85#define TAPMAXUNIT	0x7fff
86#define VMNET_DEV_MASK	CLONE_FLAG0
87
88/* module */
89static int		tapmodevent(module_t, int, void *);
90
91/* device */
92static void		tapclone(void *, struct ucred *, char *, int,
93			    struct cdev **);
94static void		tapcreate(struct cdev *);
95
96/* network interface */
97static void		tapifstart(struct ifnet *);
98static int		tapifioctl(struct ifnet *, u_long, caddr_t);
99static void		tapifinit(void *);
100
101static int		tap_clone_create(struct if_clone *, int, caddr_t);
102static void		tap_clone_destroy(struct ifnet *);
103static struct if_clone *tap_cloner;
104static int		vmnet_clone_create(struct if_clone *, int, caddr_t);
105static void		vmnet_clone_destroy(struct ifnet *);
106static struct if_clone *vmnet_cloner;
107
108/* character device */
109static d_open_t		tapopen;
110static d_close_t	tapclose;
111static d_read_t		tapread;
112static d_write_t	tapwrite;
113static d_ioctl_t	tapioctl;
114static d_poll_t		tappoll;
115static d_kqfilter_t	tapkqfilter;
116
117/* kqueue(2) */
118static int		tapkqread(struct knote *, long);
119static int		tapkqwrite(struct knote *, long);
120static void		tapkqdetach(struct knote *);
121
122static struct filterops	tap_read_filterops = {
123	.f_isfd =	1,
124	.f_attach =	NULL,
125	.f_detach =	tapkqdetach,
126	.f_event =	tapkqread,
127};
128
129static struct filterops	tap_write_filterops = {
130	.f_isfd =	1,
131	.f_attach =	NULL,
132	.f_detach =	tapkqdetach,
133	.f_event =	tapkqwrite,
134};
135
136static struct cdevsw	tap_cdevsw = {
137	.d_version =	D_VERSION,
138	.d_flags =	D_NEEDMINOR,
139	.d_open =	tapopen,
140	.d_close =	tapclose,
141	.d_read =	tapread,
142	.d_write =	tapwrite,
143	.d_ioctl =	tapioctl,
144	.d_poll =	tappoll,
145	.d_name =	CDEV_NAME,
146	.d_kqfilter =	tapkqfilter,
147};
148
149/*
150 * All global variables in if_tap.c are locked with tapmtx, with the
151 * exception of tapdebug, which is accessed unlocked; tapclones is
152 * static at runtime.
153 */
154static struct mtx		tapmtx;
155static int			tapdebug = 0;        /* debug flag   */
156static int			tapuopen = 0;        /* allow user open() */
157static int			tapuponopen = 0;    /* IFF_UP on open() */
158static int			tapdclone = 1;	/* enable devfs cloning */
159static SLIST_HEAD(, tap_softc)	taphead;             /* first device */
160static struct clonedevs 	*tapclones;
161
162MALLOC_DECLARE(M_TAP);
163MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface");
164SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, "");
165
166SYSCTL_DECL(_net_link);
167static SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0,
168    "Ethernet tunnel software network interface");
169SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0,
170	"Allow user to open /dev/tap (based on node permissions)");
171SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0,
172	"Bring interface up when /dev/tap is opened");
173SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RW, &tapdclone, 0,
174	"Enably legacy devfs interface creation");
175SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, "");
176
177TUNABLE_INT("net.link.tap.devfs_cloning", &tapdclone);
178
179DEV_MODULE(if_tap, tapmodevent, NULL);
180
181static int
182tap_clone_create(struct if_clone *ifc, int unit, caddr_t params)
183{
184	struct cdev *dev;
185	int i;
186
187	/* Find any existing device, or allocate new unit number. */
188	i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, 0);
189	if (i) {
190		dev = make_dev(&tap_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600,
191		    "%s%d", tapname, unit);
192	}
193
194	tapcreate(dev);
195	return (0);
196}
197
198/* vmnet devices are tap devices in disguise */
199static int
200vmnet_clone_create(struct if_clone *ifc, int unit, caddr_t params)
201{
202	struct cdev *dev;
203	int i;
204
205	/* Find any existing device, or allocate new unit number. */
206	i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, VMNET_DEV_MASK);
207	if (i) {
208		dev = make_dev(&tap_cdevsw, unit | VMNET_DEV_MASK, UID_ROOT,
209		    GID_WHEEL, 0600, "%s%d", vmnetname, unit);
210	}
211
212	tapcreate(dev);
213	return (0);
214}
215
216static void
217tap_destroy(struct tap_softc *tp)
218{
219	struct ifnet *ifp = tp->tap_ifp;
220
221	CURVNET_SET(ifp->if_vnet);
222	destroy_dev(tp->tap_dev);
223	seldrain(&tp->tap_rsel);
224	knlist_clear(&tp->tap_rsel.si_note, 0);
225	knlist_destroy(&tp->tap_rsel.si_note);
226	ether_ifdetach(ifp);
227	if_free(ifp);
228
229	mtx_destroy(&tp->tap_mtx);
230	free(tp, M_TAP);
231	CURVNET_RESTORE();
232}
233
234static void
235tap_clone_destroy(struct ifnet *ifp)
236{
237	struct tap_softc *tp = ifp->if_softc;
238
239	mtx_lock(&tapmtx);
240	SLIST_REMOVE(&taphead, tp, tap_softc, tap_next);
241	mtx_unlock(&tapmtx);
242	tap_destroy(tp);
243}
244
245/* vmnet devices are tap devices in disguise */
246static void
247vmnet_clone_destroy(struct ifnet *ifp)
248{
249	tap_clone_destroy(ifp);
250}
251
252/*
253 * tapmodevent
254 *
255 * module event handler
256 */
257static int
258tapmodevent(module_t mod, int type, void *data)
259{
260	static eventhandler_tag	 eh_tag = NULL;
261	struct tap_softc	*tp = NULL;
262	struct ifnet		*ifp = NULL;
263
264	switch (type) {
265	case MOD_LOAD:
266
267		/* intitialize device */
268
269		mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF);
270		SLIST_INIT(&taphead);
271
272		clone_setup(&tapclones);
273		eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000);
274		if (eh_tag == NULL) {
275			clone_cleanup(&tapclones);
276			mtx_destroy(&tapmtx);
277			return (ENOMEM);
278		}
279		tap_cloner = if_clone_simple(tapname, tap_clone_create,
280		    tap_clone_destroy, 0);
281		vmnet_cloner = if_clone_simple(vmnetname, vmnet_clone_create,
282		    vmnet_clone_destroy, 0);
283		return (0);
284
285	case MOD_UNLOAD:
286		/*
287		 * The EBUSY algorithm here can't quite atomically
288		 * guarantee that this is race-free since we have to
289		 * release the tap mtx to deregister the clone handler.
290		 */
291		mtx_lock(&tapmtx);
292		SLIST_FOREACH(tp, &taphead, tap_next) {
293			mtx_lock(&tp->tap_mtx);
294			if (tp->tap_flags & TAP_OPEN) {
295				mtx_unlock(&tp->tap_mtx);
296				mtx_unlock(&tapmtx);
297				return (EBUSY);
298			}
299			mtx_unlock(&tp->tap_mtx);
300		}
301		mtx_unlock(&tapmtx);
302
303		EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
304		if_clone_detach(tap_cloner);
305		if_clone_detach(vmnet_cloner);
306		drain_dev_clone_events();
307
308		mtx_lock(&tapmtx);
309		while ((tp = SLIST_FIRST(&taphead)) != NULL) {
310			SLIST_REMOVE_HEAD(&taphead, tap_next);
311			mtx_unlock(&tapmtx);
312
313			ifp = tp->tap_ifp;
314
315			TAPDEBUG("detaching %s\n", ifp->if_xname);
316
317			tap_destroy(tp);
318			mtx_lock(&tapmtx);
319		}
320		mtx_unlock(&tapmtx);
321		clone_cleanup(&tapclones);
322
323		mtx_destroy(&tapmtx);
324
325		break;
326
327	default:
328		return (EOPNOTSUPP);
329	}
330
331	return (0);
332} /* tapmodevent */
333
334
335/*
336 * DEVFS handler
337 *
338 * We need to support two kind of devices - tap and vmnet
339 */
340static void
341tapclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev)
342{
343	char		devname[SPECNAMELEN + 1];
344	int		i, unit, append_unit;
345	int		extra;
346
347	if (*dev != NULL)
348		return;
349
350	if (!tapdclone ||
351	    (!tapuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0))
352		return;
353
354	unit = 0;
355	append_unit = 0;
356	extra = 0;
357
358	/* We're interested in only tap/vmnet devices. */
359	if (strcmp(name, tapname) == 0) {
360		unit = -1;
361	} else if (strcmp(name, vmnetname) == 0) {
362		unit = -1;
363		extra = VMNET_DEV_MASK;
364	} else if (dev_stdclone(name, NULL, tapname, &unit) != 1) {
365		if (dev_stdclone(name, NULL, vmnetname, &unit) != 1) {
366			return;
367		} else {
368			extra = VMNET_DEV_MASK;
369		}
370	}
371
372	if (unit == -1)
373		append_unit = 1;
374
375	CURVNET_SET(CRED_TO_VNET(cred));
376	/* find any existing device, or allocate new unit number */
377	i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra);
378	if (i) {
379		if (append_unit) {
380			/*
381			 * We were passed 'tun' or 'tap', with no unit specified
382			 * so we'll need to append it now.
383			 */
384			namelen = snprintf(devname, sizeof(devname), "%s%d", name,
385			    unit);
386			name = devname;
387		}
388
389		*dev = make_dev_credf(MAKEDEV_REF, &tap_cdevsw, unit | extra,
390		     cred, UID_ROOT, GID_WHEEL, 0600, "%s", name);
391	}
392
393	if_clone_create(name, namelen, NULL);
394	CURVNET_RESTORE();
395} /* tapclone */
396
397
398/*
399 * tapcreate
400 *
401 * to create interface
402 */
403static void
404tapcreate(struct cdev *dev)
405{
406	struct ifnet		*ifp = NULL;
407	struct tap_softc	*tp = NULL;
408	unsigned short		 macaddr_hi;
409	uint32_t		 macaddr_mid;
410	int			 unit;
411	const char		*name = NULL;
412	u_char			eaddr[6];
413
414	/* allocate driver storage and create device */
415	tp = malloc(sizeof(*tp), M_TAP, M_WAITOK | M_ZERO);
416	mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF);
417	mtx_lock(&tapmtx);
418	SLIST_INSERT_HEAD(&taphead, tp, tap_next);
419	mtx_unlock(&tapmtx);
420
421	unit = dev2unit(dev);
422
423	/* select device: tap or vmnet */
424	if (unit & VMNET_DEV_MASK) {
425		name = vmnetname;
426		tp->tap_flags |= TAP_VMNET;
427	} else
428		name = tapname;
429
430	unit &= TAPMAXUNIT;
431
432	TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev));
433
434	/* generate fake MAC address: 00 bd xx xx xx unit_no */
435	macaddr_hi = htons(0x00bd);
436	macaddr_mid = (uint32_t) ticks;
437	bcopy(&macaddr_hi, eaddr, sizeof(short));
438	bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
439	eaddr[5] = (u_char)unit;
440
441	/* fill the rest and attach interface */
442	ifp = tp->tap_ifp = if_alloc(IFT_ETHER);
443	if (ifp == NULL)
444		panic("%s%d: can not if_alloc()", name, unit);
445	ifp->if_softc = tp;
446	if_initname(ifp, name, unit);
447	ifp->if_init = tapifinit;
448	ifp->if_start = tapifstart;
449	ifp->if_ioctl = tapifioctl;
450	ifp->if_mtu = ETHERMTU;
451	ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
452	IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
453	ifp->if_capabilities |= IFCAP_LINKSTATE;
454	ifp->if_capenable |= IFCAP_LINKSTATE;
455
456	dev->si_drv1 = tp;
457	tp->tap_dev = dev;
458
459	ether_ifattach(ifp, eaddr);
460
461	mtx_lock(&tp->tap_mtx);
462	tp->tap_flags |= TAP_INITED;
463	mtx_unlock(&tp->tap_mtx);
464
465	knlist_init_mtx(&tp->tap_rsel.si_note, &tp->tap_mtx);
466
467	TAPDEBUG("interface %s is created. minor = %#x\n",
468		ifp->if_xname, dev2unit(dev));
469} /* tapcreate */
470
471
472/*
473 * tapopen
474 *
475 * to open tunnel. must be superuser
476 */
477static int
478tapopen(struct cdev *dev, int flag, int mode, struct thread *td)
479{
480	struct tap_softc	*tp = NULL;
481	struct ifnet		*ifp = NULL;
482	int			 error;
483
484	if (tapuopen == 0) {
485		error = priv_check(td, PRIV_NET_TAP);
486		if (error)
487			return (error);
488	}
489
490	if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT)
491		return (ENXIO);
492
493	tp = dev->si_drv1;
494
495	mtx_lock(&tp->tap_mtx);
496	if (tp->tap_flags & TAP_OPEN) {
497		mtx_unlock(&tp->tap_mtx);
498		return (EBUSY);
499	}
500
501	bcopy(IF_LLADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr));
502	tp->tap_pid = td->td_proc->p_pid;
503	tp->tap_flags |= TAP_OPEN;
504	ifp = tp->tap_ifp;
505
506	ifp->if_drv_flags |= IFF_DRV_RUNNING;
507	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
508	if (tapuponopen)
509		ifp->if_flags |= IFF_UP;
510	if_link_state_change(ifp, LINK_STATE_UP);
511	mtx_unlock(&tp->tap_mtx);
512
513	TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, dev2unit(dev));
514
515	return (0);
516} /* tapopen */
517
518
519/*
520 * tapclose
521 *
522 * close the device - mark i/f down & delete routing info
523 */
524static int
525tapclose(struct cdev *dev, int foo, int bar, struct thread *td)
526{
527	struct ifaddr		*ifa;
528	struct tap_softc	*tp = dev->si_drv1;
529	struct ifnet		*ifp = tp->tap_ifp;
530
531	/* junk all pending output */
532	mtx_lock(&tp->tap_mtx);
533	CURVNET_SET(ifp->if_vnet);
534	IF_DRAIN(&ifp->if_snd);
535
536	/*
537	 * do not bring the interface down, and do not anything with
538	 * interface, if we are in VMnet mode. just close the device.
539	 */
540
541	if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) {
542		mtx_unlock(&tp->tap_mtx);
543		if_down(ifp);
544		mtx_lock(&tp->tap_mtx);
545		if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
546			ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
547			mtx_unlock(&tp->tap_mtx);
548			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
549				rtinit(ifa, (int)RTM_DELETE, 0);
550			}
551			if_purgeaddrs(ifp);
552			mtx_lock(&tp->tap_mtx);
553		}
554	}
555
556	if_link_state_change(ifp, LINK_STATE_DOWN);
557	CURVNET_RESTORE();
558
559	funsetown(&tp->tap_sigio);
560	selwakeuppri(&tp->tap_rsel, PZERO+1);
561	KNOTE_LOCKED(&tp->tap_rsel.si_note, 0);
562
563	tp->tap_flags &= ~TAP_OPEN;
564	tp->tap_pid = 0;
565	mtx_unlock(&tp->tap_mtx);
566
567	TAPDEBUG("%s is closed. minor = %#x\n",
568		ifp->if_xname, dev2unit(dev));
569
570	return (0);
571} /* tapclose */
572
573
574/*
575 * tapifinit
576 *
577 * network interface initialization function
578 */
579static void
580tapifinit(void *xtp)
581{
582	struct tap_softc	*tp = (struct tap_softc *)xtp;
583	struct ifnet		*ifp = tp->tap_ifp;
584
585	TAPDEBUG("initializing %s\n", ifp->if_xname);
586
587	mtx_lock(&tp->tap_mtx);
588	ifp->if_drv_flags |= IFF_DRV_RUNNING;
589	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
590	mtx_unlock(&tp->tap_mtx);
591
592	/* attempt to start output */
593	tapifstart(ifp);
594} /* tapifinit */
595
596
597/*
598 * tapifioctl
599 *
600 * Process an ioctl request on network interface
601 */
602static int
603tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
604{
605	struct tap_softc	*tp = ifp->if_softc;
606	struct ifreq		*ifr = (struct ifreq *)data;
607	struct ifstat		*ifs = NULL;
608	struct ifmediareq	*ifmr = NULL;
609	int			 dummy, error = 0;
610
611	switch (cmd) {
612		case SIOCSIFFLAGS: /* XXX -- just like vmnet does */
613		case SIOCADDMULTI:
614		case SIOCDELMULTI:
615			break;
616
617		case SIOCGIFMEDIA:
618			ifmr = (struct ifmediareq *)data;
619			dummy = ifmr->ifm_count;
620			ifmr->ifm_count = 1;
621			ifmr->ifm_status = IFM_AVALID;
622			ifmr->ifm_active = IFM_ETHER;
623			if (tp->tap_flags & TAP_OPEN)
624				ifmr->ifm_status |= IFM_ACTIVE;
625			ifmr->ifm_current = ifmr->ifm_active;
626			if (dummy >= 1) {
627				int media = IFM_ETHER;
628				error = copyout(&media, ifmr->ifm_ulist,
629				    sizeof(int));
630			}
631			break;
632
633		case SIOCSIFMTU:
634			ifp->if_mtu = ifr->ifr_mtu;
635			break;
636
637		case SIOCGIFSTATUS:
638			ifs = (struct ifstat *)data;
639			mtx_lock(&tp->tap_mtx);
640			if (tp->tap_pid != 0)
641				snprintf(ifs->ascii, sizeof(ifs->ascii),
642					"\tOpened by PID %d\n", tp->tap_pid);
643			else
644				ifs->ascii[0] = '\0';
645			mtx_unlock(&tp->tap_mtx);
646			break;
647
648		default:
649			error = ether_ioctl(ifp, cmd, data);
650			break;
651	}
652
653	return (error);
654} /* tapifioctl */
655
656
657/*
658 * tapifstart
659 *
660 * queue packets from higher level ready to put out
661 */
662static void
663tapifstart(struct ifnet *ifp)
664{
665	struct tap_softc	*tp = ifp->if_softc;
666
667	TAPDEBUG("%s starting\n", ifp->if_xname);
668
669	/*
670	 * do not junk pending output if we are in VMnet mode.
671	 * XXX: can this do any harm because of queue overflow?
672	 */
673
674	mtx_lock(&tp->tap_mtx);
675	if (((tp->tap_flags & TAP_VMNET) == 0) &&
676	    ((tp->tap_flags & TAP_READY) != TAP_READY)) {
677		struct mbuf *m;
678
679		/* Unlocked read. */
680		TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname,
681		    tp->tap_flags);
682
683		for (;;) {
684			IF_DEQUEUE(&ifp->if_snd, m);
685			if (m != NULL) {
686				m_freem(m);
687				ifp->if_oerrors++;
688			} else
689				break;
690		}
691		mtx_unlock(&tp->tap_mtx);
692
693		return;
694	}
695
696	ifp->if_drv_flags |= IFF_DRV_OACTIVE;
697
698	if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
699		if (tp->tap_flags & TAP_RWAIT) {
700			tp->tap_flags &= ~TAP_RWAIT;
701			wakeup(tp);
702		}
703
704		if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) {
705			mtx_unlock(&tp->tap_mtx);
706			pgsigio(&tp->tap_sigio, SIGIO, 0);
707			mtx_lock(&tp->tap_mtx);
708		}
709
710		selwakeuppri(&tp->tap_rsel, PZERO+1);
711		KNOTE_LOCKED(&tp->tap_rsel.si_note, 0);
712		ifp->if_opackets ++; /* obytes are counted in ether_output */
713	}
714
715	ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
716	mtx_unlock(&tp->tap_mtx);
717} /* tapifstart */
718
719
720/*
721 * tapioctl
722 *
723 * the cdevsw interface is now pretty minimal
724 */
725static int
726tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
727{
728	struct tap_softc	*tp = dev->si_drv1;
729	struct ifnet		*ifp = tp->tap_ifp;
730	struct tapinfo		*tapp = NULL;
731	int			 f;
732#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
733    defined(COMPAT_FREEBSD4)
734	int			 ival;
735#endif
736
737	switch (cmd) {
738		case TAPSIFINFO:
739			tapp = (struct tapinfo *)data;
740			mtx_lock(&tp->tap_mtx);
741			ifp->if_mtu = tapp->mtu;
742			ifp->if_type = tapp->type;
743			ifp->if_baudrate = tapp->baudrate;
744			mtx_unlock(&tp->tap_mtx);
745			break;
746
747		case TAPGIFINFO:
748			tapp = (struct tapinfo *)data;
749			mtx_lock(&tp->tap_mtx);
750			tapp->mtu = ifp->if_mtu;
751			tapp->type = ifp->if_type;
752			tapp->baudrate = ifp->if_baudrate;
753			mtx_unlock(&tp->tap_mtx);
754			break;
755
756		case TAPSDEBUG:
757			tapdebug = *(int *)data;
758			break;
759
760		case TAPGDEBUG:
761			*(int *)data = tapdebug;
762			break;
763
764		case TAPGIFNAME: {
765			struct ifreq	*ifr = (struct ifreq *) data;
766
767			strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
768			} break;
769
770		case FIONBIO:
771			break;
772
773		case FIOASYNC:
774			mtx_lock(&tp->tap_mtx);
775			if (*(int *)data)
776				tp->tap_flags |= TAP_ASYNC;
777			else
778				tp->tap_flags &= ~TAP_ASYNC;
779			mtx_unlock(&tp->tap_mtx);
780			break;
781
782		case FIONREAD:
783			if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
784				struct mbuf *mb;
785
786				IFQ_LOCK(&ifp->if_snd);
787				IFQ_POLL_NOLOCK(&ifp->if_snd, mb);
788				for (*(int *)data = 0; mb != NULL;
789				     mb = mb->m_next)
790					*(int *)data += mb->m_len;
791				IFQ_UNLOCK(&ifp->if_snd);
792			} else
793				*(int *)data = 0;
794			break;
795
796		case FIOSETOWN:
797			return (fsetown(*(int *)data, &tp->tap_sigio));
798
799		case FIOGETOWN:
800			*(int *)data = fgetown(&tp->tap_sigio);
801			return (0);
802
803		/* this is deprecated, FIOSETOWN should be used instead */
804		case TIOCSPGRP:
805			return (fsetown(-(*(int *)data), &tp->tap_sigio));
806
807		/* this is deprecated, FIOGETOWN should be used instead */
808		case TIOCGPGRP:
809			*(int *)data = -fgetown(&tp->tap_sigio);
810			return (0);
811
812		/* VMware/VMnet port ioctl's */
813
814#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \
815    defined(COMPAT_FREEBSD4)
816		case _IO('V', 0):
817			ival = IOCPARM_IVAL(data);
818			data = (caddr_t)&ival;
819			/* FALLTHROUGH */
820#endif
821		case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
822			f = *(int *)data;
823			f &= 0x0fff;
824			f &= ~IFF_CANTCHANGE;
825			f |= IFF_UP;
826
827			mtx_lock(&tp->tap_mtx);
828			ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE);
829			mtx_unlock(&tp->tap_mtx);
830			break;
831
832		case SIOCGIFADDR:	/* get MAC address of the remote side */
833			mtx_lock(&tp->tap_mtx);
834			bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
835			mtx_unlock(&tp->tap_mtx);
836			break;
837
838		case SIOCSIFADDR:	/* set MAC address of the remote side */
839			mtx_lock(&tp->tap_mtx);
840			bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
841			mtx_unlock(&tp->tap_mtx);
842			break;
843
844		default:
845			return (ENOTTY);
846	}
847	return (0);
848} /* tapioctl */
849
850
851/*
852 * tapread
853 *
854 * the cdevsw read interface - reads a packet at a time, or at
855 * least as much of a packet as can be read
856 */
857static int
858tapread(struct cdev *dev, struct uio *uio, int flag)
859{
860	struct tap_softc	*tp = dev->si_drv1;
861	struct ifnet		*ifp = tp->tap_ifp;
862	struct mbuf		*m = NULL;
863	int			 error = 0, len;
864
865	TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, dev2unit(dev));
866
867	mtx_lock(&tp->tap_mtx);
868	if ((tp->tap_flags & TAP_READY) != TAP_READY) {
869		mtx_unlock(&tp->tap_mtx);
870
871		/* Unlocked read. */
872		TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n",
873			ifp->if_xname, dev2unit(dev), tp->tap_flags);
874
875		return (EHOSTDOWN);
876	}
877
878	tp->tap_flags &= ~TAP_RWAIT;
879
880	/* sleep until we get a packet */
881	do {
882		IF_DEQUEUE(&ifp->if_snd, m);
883
884		if (m == NULL) {
885			if (flag & O_NONBLOCK) {
886				mtx_unlock(&tp->tap_mtx);
887				return (EWOULDBLOCK);
888			}
889
890			tp->tap_flags |= TAP_RWAIT;
891			error = mtx_sleep(tp, &tp->tap_mtx, PCATCH | (PZERO + 1),
892			    "taprd", 0);
893			if (error) {
894				mtx_unlock(&tp->tap_mtx);
895				return (error);
896			}
897		}
898	} while (m == NULL);
899	mtx_unlock(&tp->tap_mtx);
900
901	/* feed packet to bpf */
902	BPF_MTAP(ifp, m);
903
904	/* xfer packet to user space */
905	while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) {
906		len = min(uio->uio_resid, m->m_len);
907		if (len == 0)
908			break;
909
910		error = uiomove(mtod(m, void *), len, uio);
911		m = m_free(m);
912	}
913
914	if (m != NULL) {
915		TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname,
916			dev2unit(dev));
917		m_freem(m);
918	}
919
920	return (error);
921} /* tapread */
922
923
924/*
925 * tapwrite
926 *
927 * the cdevsw write interface - an atomic write is a packet - or else!
928 */
929static int
930tapwrite(struct cdev *dev, struct uio *uio, int flag)
931{
932	struct ether_header	*eh;
933	struct tap_softc	*tp = dev->si_drv1;
934	struct ifnet		*ifp = tp->tap_ifp;
935	struct mbuf		*m;
936
937	TAPDEBUG("%s writing, minor = %#x\n",
938		ifp->if_xname, dev2unit(dev));
939
940	if (uio->uio_resid == 0)
941		return (0);
942
943	if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) {
944		TAPDEBUG("%s invalid packet len = %zd, minor = %#x\n",
945			ifp->if_xname, uio->uio_resid, dev2unit(dev));
946
947		return (EIO);
948	}
949
950	if ((m = m_uiotombuf(uio, M_NOWAIT, 0, ETHER_ALIGN,
951	    M_PKTHDR)) == NULL) {
952		ifp->if_ierrors ++;
953		return (ENOBUFS);
954	}
955
956	m->m_pkthdr.rcvif = ifp;
957
958	/*
959	 * Only pass a unicast frame to ether_input(), if it would actually
960	 * have been received by non-virtual hardware.
961	 */
962	if (m->m_len < sizeof(struct ether_header)) {
963		m_freem(m);
964		return (0);
965	}
966	eh = mtod(m, struct ether_header *);
967
968	if (eh && (ifp->if_flags & IFF_PROMISC) == 0 &&
969	    !ETHER_IS_MULTICAST(eh->ether_dhost) &&
970	    bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) {
971		m_freem(m);
972		return (0);
973	}
974
975	/* Pass packet up to parent. */
976	CURVNET_SET(ifp->if_vnet);
977	(*ifp->if_input)(ifp, m);
978	CURVNET_RESTORE();
979	ifp->if_ipackets ++; /* ibytes are counted in parent */
980
981	return (0);
982} /* tapwrite */
983
984
985/*
986 * tappoll
987 *
988 * the poll interface, this is only useful on reads
989 * really. the write detect always returns true, write never blocks
990 * anyway, it either accepts the packet or drops it
991 */
992static int
993tappoll(struct cdev *dev, int events, struct thread *td)
994{
995	struct tap_softc	*tp = dev->si_drv1;
996	struct ifnet		*ifp = tp->tap_ifp;
997	int			 revents = 0;
998
999	TAPDEBUG("%s polling, minor = %#x\n",
1000		ifp->if_xname, dev2unit(dev));
1001
1002	if (events & (POLLIN | POLLRDNORM)) {
1003		IFQ_LOCK(&ifp->if_snd);
1004		if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
1005			TAPDEBUG("%s have data in queue. len = %d, " \
1006				"minor = %#x\n", ifp->if_xname,
1007				ifp->if_snd.ifq_len, dev2unit(dev));
1008
1009			revents |= (events & (POLLIN | POLLRDNORM));
1010		} else {
1011			TAPDEBUG("%s waiting for data, minor = %#x\n",
1012				ifp->if_xname, dev2unit(dev));
1013
1014			selrecord(td, &tp->tap_rsel);
1015		}
1016		IFQ_UNLOCK(&ifp->if_snd);
1017	}
1018
1019	if (events & (POLLOUT | POLLWRNORM))
1020		revents |= (events & (POLLOUT | POLLWRNORM));
1021
1022	return (revents);
1023} /* tappoll */
1024
1025
1026/*
1027 * tap_kqfilter
1028 *
1029 * support for kevent() system call
1030 */
1031static int
1032tapkqfilter(struct cdev *dev, struct knote *kn)
1033{
1034	struct tap_softc	*tp = dev->si_drv1;
1035	struct ifnet		*ifp = tp->tap_ifp;
1036
1037	switch (kn->kn_filter) {
1038	case EVFILT_READ:
1039		TAPDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n",
1040			ifp->if_xname, dev2unit(dev));
1041		kn->kn_fop = &tap_read_filterops;
1042		break;
1043
1044	case EVFILT_WRITE:
1045		TAPDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n",
1046			ifp->if_xname, dev2unit(dev));
1047		kn->kn_fop = &tap_write_filterops;
1048		break;
1049
1050	default:
1051		TAPDEBUG("%s kqfilter: invalid filter, minor = %#x\n",
1052			ifp->if_xname, dev2unit(dev));
1053		return (EINVAL);
1054		/* NOT REACHED */
1055	}
1056
1057	kn->kn_hook = tp;
1058	knlist_add(&tp->tap_rsel.si_note, kn, 0);
1059
1060	return (0);
1061} /* tapkqfilter */
1062
1063
1064/*
1065 * tap_kqread
1066 *
1067 * Return true if there is data in the interface queue
1068 */
1069static int
1070tapkqread(struct knote *kn, long hint)
1071{
1072	int			 ret;
1073	struct tap_softc	*tp = kn->kn_hook;
1074	struct cdev		*dev = tp->tap_dev;
1075	struct ifnet		*ifp = tp->tap_ifp;
1076
1077	if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) {
1078		TAPDEBUG("%s have data in queue. len = %d, minor = %#x\n",
1079			ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev));
1080		ret = 1;
1081	} else {
1082		TAPDEBUG("%s waiting for data, minor = %#x\n",
1083			ifp->if_xname, dev2unit(dev));
1084		ret = 0;
1085	}
1086
1087	return (ret);
1088} /* tapkqread */
1089
1090
1091/*
1092 * tap_kqwrite
1093 *
1094 * Always can write. Return the MTU in kn->data
1095 */
1096static int
1097tapkqwrite(struct knote *kn, long hint)
1098{
1099	struct tap_softc	*tp = kn->kn_hook;
1100	struct ifnet		*ifp = tp->tap_ifp;
1101
1102	kn->kn_data = ifp->if_mtu;
1103
1104	return (1);
1105} /* tapkqwrite */
1106
1107
1108static void
1109tapkqdetach(struct knote *kn)
1110{
1111	struct tap_softc	*tp = kn->kn_hook;
1112
1113	knlist_remove(&tp->tap_rsel.si_note, kn, 0);
1114} /* tapkqdetach */
1115
1116