Deleted Added
sdiff udiff text old ( 148868 ) new ( 148887 )
full compact
1/*-
2 * Copyright (C) 1999-2000 by Maksim Yevmenkin <m_evmenkin@yahoo.com>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * BASED ON:
27 * -------------------------------------------------------------------------
28 *
29 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk>
30 * Nottingham University 1987.
31 */
32
33/*
34 * $FreeBSD: head/sys/net/if_tap.c 148868 2005-08-08 19:55:32Z rwatson $
35 * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $
36 */
37
38#include "opt_inet.h"
39
40#include <sys/param.h>
41#include <sys/conf.h>
42#include <sys/fcntl.h>
43#include <sys/filio.h>
44#include <sys/kernel.h>
45#include <sys/malloc.h>
46#include <sys/mbuf.h>
47#include <sys/module.h>
48#include <sys/poll.h>
49#include <sys/proc.h>
50#include <sys/selinfo.h>
51#include <sys/signalvar.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/sysctl.h>
55#include <sys/systm.h>
56#include <sys/ttycom.h>
57#include <sys/uio.h>
58#include <sys/queue.h>
59
60#include <net/bpf.h>
61#include <net/ethernet.h>
62#include <net/if.h>
63#include <net/if_arp.h>
64#include <net/route.h>
65#include <net/if_types.h>
66
67#include <netinet/in.h>
68
69#include <net/if_tapvar.h>
70#include <net/if_tap.h>
71
72
73#define CDEV_NAME "tap"
74#define TAPDEBUG if (tapdebug) printf
75
76#define TAP "tap"
77#define VMNET "vmnet"
78#define TAPMAXUNIT 0x7fff
79#define VMNET_DEV_MASK CLONE_FLAG0
80
81/* module */
82static int tapmodevent(module_t, int, void *);
83
84/* device */
85static void tapclone(void *, struct ucred *, char *, int,
86 struct cdev **);
87static void tapcreate(struct cdev *);
88
89/* network interface */
90static void tapifstart(struct ifnet *);
91static int tapifioctl(struct ifnet *, u_long, caddr_t);
92static void tapifinit(void *);
93
94/* character device */
95static d_open_t tapopen;
96static d_close_t tapclose;
97static d_read_t tapread;
98static d_write_t tapwrite;
99static d_ioctl_t tapioctl;
100static d_poll_t tappoll;
101
102static struct cdevsw tap_cdevsw = {
103 .d_version = D_VERSION,
104 .d_flags = D_PSEUDO | D_NEEDGIANT,
105 .d_open = tapopen,
106 .d_close = tapclose,
107 .d_read = tapread,
108 .d_write = tapwrite,
109 .d_ioctl = tapioctl,
110 .d_poll = tappoll,
111 .d_name = CDEV_NAME,
112};
113
114/*
115 * All global variables in if_tap.c are locked with tapmtx, with the
116 * exception of tapdebug, which is accessed unlocked; tapclones is
117 * static at runtime.
118 */
119static struct mtx tapmtx;
120static int tapdebug = 0; /* debug flag */
121static int tapuopen = 0; /* allow user open() */
122static SLIST_HEAD(, tap_softc) taphead; /* first device */
123static struct clonedevs *tapclones;
124
125MALLOC_DECLARE(M_TAP);
126MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface");
127SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, "");
128
129SYSCTL_DECL(_net_link);
130SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0,
131 "Ethernet tunnel software network interface");
132SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0,
133 "Allow user to open /dev/tap (based on node permissions)");
134SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, "");
135
136DEV_MODULE(if_tap, tapmodevent, NULL);
137
138/*
139 * tapmodevent
140 *
141 * module event handler
142 */
143static int
144tapmodevent(mod, type, data)
145 module_t mod;
146 int type;
147 void *data;
148{
149 static eventhandler_tag eh_tag = NULL;
150 struct tap_softc *tp = NULL;
151 struct ifnet *ifp = NULL;
152 int s;
153
154 switch (type) {
155 case MOD_LOAD:
156
157 /* intitialize device */
158
159 mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF);
160 SLIST_INIT(&taphead);
161
162 clone_setup(&tapclones);
163 eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000);
164 if (eh_tag == NULL) {
165 clone_cleanup(&tapclones);
166 mtx_destroy(&tapmtx);
167 return (ENOMEM);
168 }
169 return (0);
170
171 case MOD_UNLOAD:
172 /*
173 * The EBUSY algorithm here can't quite atomically
174 * guarantee that this is race-free since we have to
175 * release the tap mtx to deregister the clone handler.
176 */
177 mtx_lock(&tapmtx);
178 SLIST_FOREACH(tp, &taphead, tap_next) {
179 mtx_lock(&tp->tap_mtx);
180 if (tp->tap_flags & TAP_OPEN) {
181 mtx_unlock(&tp->tap_mtx);
182 mtx_unlock(&tapmtx);
183 return (EBUSY);
184 }
185 mtx_unlock(&tp->tap_mtx);
186 }
187 mtx_unlock(&tapmtx);
188
189 EVENTHANDLER_DEREGISTER(dev_clone, eh_tag);
190
191 mtx_lock(&tapmtx);
192 while ((tp = SLIST_FIRST(&taphead)) != NULL) {
193 SLIST_REMOVE_HEAD(&taphead, tap_next);
194 mtx_unlock(&tapmtx);
195
196 ifp = tp->tap_ifp;
197
198 TAPDEBUG("detaching %s\n", ifp->if_xname);
199
200 /* Unlocked read. */
201 KASSERT(!(tp->tap_flags & TAP_OPEN),
202 ("%s flags is out of sync", ifp->if_xname));
203
204 destroy_dev(tp->tap_dev);
205 s = splimp();
206 ether_ifdetach(ifp);
207 if_free_type(ifp, IFT_ETHER);
208 splx(s);
209
210 mtx_destroy(&tp->tap_mtx);
211 free(tp, M_TAP);
212 mtx_lock(&tapmtx);
213 }
214 mtx_unlock(&tapmtx);
215 clone_cleanup(&tapclones);
216
217 mtx_destroy(&tapmtx);
218
219 break;
220
221 default:
222 return (EOPNOTSUPP);
223 }
224
225 return (0);
226} /* tapmodevent */
227
228
229/*
230 * DEVFS handler
231 *
232 * We need to support two kind of devices - tap and vmnet
233 */
234static void
235tapclone(arg, cred, name, namelen, dev)
236 void *arg;
237 struct ucred *cred;
238 char *name;
239 int namelen;
240 struct cdev **dev;
241{
242 u_int extra;
243 int i, unit;
244 char *device_name = name;
245
246 if (*dev != NULL)
247 return;
248
249 device_name = TAP;
250 extra = 0;
251 if (strcmp(name, TAP) == 0) {
252 unit = -1;
253 } else if (strcmp(name, VMNET) == 0) {
254 device_name = VMNET;
255 extra = VMNET_DEV_MASK;
256 unit = -1;
257 } else if (dev_stdclone(name, NULL, device_name, &unit) != 1) {
258 device_name = VMNET;
259 extra = VMNET_DEV_MASK;
260 if (dev_stdclone(name, NULL, device_name, &unit) != 1)
261 return;
262 }
263
264 /* find any existing device, or allocate new unit number */
265 i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra);
266 if (i) {
267 *dev = make_dev(&tap_cdevsw, unit2minor(unit | extra),
268 UID_ROOT, GID_WHEEL, 0600, "%s%d", device_name, unit);
269 if (*dev != NULL) {
270 dev_ref(*dev);
271 (*dev)->si_flags |= SI_CHEAPCLONE;
272 }
273 }
274} /* tapclone */
275
276
277/*
278 * tapcreate
279 *
280 * to create interface
281 */
282static void
283tapcreate(dev)
284 struct cdev *dev;
285{
286 struct ifnet *ifp = NULL;
287 struct tap_softc *tp = NULL;
288 unsigned short macaddr_hi;
289 int unit, s;
290 char *name = NULL;
291 u_char eaddr[6];
292
293 dev->si_flags &= ~SI_CHEAPCLONE;
294
295 /* allocate driver storage and create device */
296 MALLOC(tp, struct tap_softc *, sizeof(*tp), M_TAP, M_WAITOK | M_ZERO);
297 mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF);
298 mtx_lock(&tapmtx);
299 SLIST_INSERT_HEAD(&taphead, tp, tap_next);
300 mtx_unlock(&tapmtx);
301
302 unit = dev2unit(dev);
303
304 /* select device: tap or vmnet */
305 if (unit & VMNET_DEV_MASK) {
306 name = VMNET;
307 tp->tap_flags |= TAP_VMNET;
308 } else
309 name = TAP;
310
311 unit &= TAPMAXUNIT;
312
313 TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, minor(dev));
314
315 /* generate fake MAC address: 00 bd xx xx xx unit_no */
316 macaddr_hi = htons(0x00bd);
317 bcopy(&macaddr_hi, eaddr, sizeof(short));
318 bcopy(&ticks, &eaddr[2], sizeof(long));
319 eaddr[5] = (u_char)unit;
320
321 /* fill the rest and attach interface */
322 ifp = tp->tap_ifp = if_alloc(IFT_ETHER);
323 if (ifp == NULL)
324 panic("%s%d: can not if_alloc()", name, unit);
325 ifp->if_softc = tp;
326 if_initname(ifp, name, unit);
327 ifp->if_init = tapifinit;
328 ifp->if_start = tapifstart;
329 ifp->if_ioctl = tapifioctl;
330 ifp->if_mtu = ETHERMTU;
331 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
332 ifp->if_snd.ifq_maxlen = ifqmaxlen;
333
334 dev->si_drv1 = tp;
335 tp->tap_dev = dev;
336
337 s = splimp();
338 ether_ifattach(ifp, eaddr);
339 splx(s);
340
341 mtx_lock(&tp->tap_mtx);
342 tp->tap_flags |= TAP_INITED;
343 mtx_unlock(&tp->tap_mtx);
344
345 TAPDEBUG("interface %s is created. minor = %#x\n",
346 ifp->if_xname, minor(dev));
347} /* tapcreate */
348
349
350/*
351 * tapopen
352 *
353 * to open tunnel. must be superuser
354 */
355static int
356tapopen(dev, flag, mode, td)
357 struct cdev *dev;
358 int flag;
359 int mode;
360 struct thread *td;
361{
362 struct tap_softc *tp = NULL;
363 struct ifnet *ifp = NULL;
364 int s;
365
366 if (tapuopen == 0 && suser(td) != 0)
367 return (EPERM);
368
369 if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT)
370 return (ENXIO);
371
372 /*
373 * XXXRW: Non-atomic test-and-set of si_drv1. Currently protected
374 * by Giant, but the race actually exists under memory pressure as
375 * well even when running with Giant, as malloc() may sleep.
376 */
377 tp = dev->si_drv1;
378 if (tp == NULL) {
379 tapcreate(dev);
380 tp = dev->si_drv1;
381 }
382
383 mtx_lock(&tp->tap_mtx);
384 if (tp->tap_flags & TAP_OPEN) {
385 mtx_unlock(&tp->tap_mtx);
386 return (EBUSY);
387 }
388
389 bcopy(IFP2ENADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr));
390 tp->tap_pid = td->td_proc->p_pid;
391 tp->tap_flags |= TAP_OPEN;
392 ifp = tp->tap_ifp;
393 mtx_unlock(&tp->tap_mtx);
394
395 s = splimp();
396 ifp->if_flags |= IFF_RUNNING;
397 ifp->if_flags &= ~IFF_OACTIVE;
398 splx(s);
399
400 TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, minor(dev));
401
402 return (0);
403} /* tapopen */
404
405
406/*
407 * tapclose
408 *
409 * close the device - mark i/f down & delete routing info
410 */
411static int
412tapclose(dev, foo, bar, td)
413 struct cdev *dev;
414 int foo;
415 int bar;
416 struct thread *td;
417{
418 struct ifaddr *ifa;
419 struct tap_softc *tp = dev->si_drv1;
420 struct ifnet *ifp = tp->tap_ifp;
421 int s;
422
423 /* junk all pending output */
424 IF_DRAIN(&ifp->if_snd);
425
426 /*
427 * do not bring the interface down, and do not anything with
428 * interface, if we are in VMnet mode. just close the device.
429 */
430
431 mtx_lock(&tp->tap_mtx);
432 if (((tp->tap_flags & TAP_VMNET) == 0) && (ifp->if_flags & IFF_UP)) {
433 mtx_unlock(&tp->tap_mtx);
434 s = splimp();
435 if_down(ifp);
436 if (ifp->if_flags & IFF_RUNNING) {
437 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
438 rtinit(ifa, (int)RTM_DELETE, 0);
439 }
440 if_purgeaddrs(ifp);
441 ifp->if_flags &= ~IFF_RUNNING;
442 }
443 splx(s);
444 } else
445 mtx_unlock(&tp->tap_mtx);
446
447 funsetown(&tp->tap_sigio);
448 selwakeuppri(&tp->tap_rsel, PZERO+1);
449
450 mtx_lock(&tp->tap_mtx);
451 tp->tap_flags &= ~TAP_OPEN;
452 tp->tap_pid = 0;
453 mtx_unlock(&tp->tap_mtx);
454
455 TAPDEBUG("%s is closed. minor = %#x\n",
456 ifp->if_xname, minor(dev));
457
458 return (0);
459} /* tapclose */
460
461
462/*
463 * tapifinit
464 *
465 * network interface initialization function
466 */
467static void
468tapifinit(xtp)
469 void *xtp;
470{
471 struct tap_softc *tp = (struct tap_softc *)xtp;
472 struct ifnet *ifp = tp->tap_ifp;
473
474 TAPDEBUG("initializing %s\n", ifp->if_xname);
475
476 ifp->if_flags |= IFF_RUNNING;
477 ifp->if_flags &= ~IFF_OACTIVE;
478
479 /* attempt to start output */
480 tapifstart(ifp);
481} /* tapifinit */
482
483
484/*
485 * tapifioctl
486 *
487 * Process an ioctl request on network interface
488 */
489static int
490tapifioctl(ifp, cmd, data)
491 struct ifnet *ifp;
492 u_long cmd;
493 caddr_t data;
494{
495 struct tap_softc *tp = (struct tap_softc *)(ifp->if_softc);
496 struct ifstat *ifs = NULL;
497 int s, dummy;
498
499 switch (cmd) {
500 case SIOCSIFFLAGS: /* XXX -- just like vmnet does */
501 case SIOCADDMULTI:
502 case SIOCDELMULTI:
503 break;
504
505 case SIOCGIFSTATUS:
506 s = splimp();
507 ifs = (struct ifstat *)data;
508 dummy = strlen(ifs->ascii);
509 mtx_lock(&tp->tap_mtx);
510 if (tp->tap_pid != 0 && dummy < sizeof(ifs->ascii))
511 snprintf(ifs->ascii + dummy,
512 sizeof(ifs->ascii) - dummy,
513 "\tOpened by PID %d\n", tp->tap_pid);
514 mtx_unlock(&tp->tap_mtx);
515 splx(s);
516 break;
517
518 default:
519 s = splimp();
520 dummy = ether_ioctl(ifp, cmd, data);
521 splx(s);
522 return (dummy);
523 }
524
525 return (0);
526} /* tapifioctl */
527
528
529/*
530 * tapifstart
531 *
532 * queue packets from higher level ready to put out
533 */
534static void
535tapifstart(ifp)
536 struct ifnet *ifp;
537{
538 struct tap_softc *tp = ifp->if_softc;
539 int s;
540
541 TAPDEBUG("%s starting\n", ifp->if_xname);
542
543 /*
544 * do not junk pending output if we are in VMnet mode.
545 * XXX: can this do any harm because of queue overflow?
546 */
547
548 mtx_lock(&tp->tap_mtx);
549 if (((tp->tap_flags & TAP_VMNET) == 0) &&
550 ((tp->tap_flags & TAP_READY) != TAP_READY)) {
551 struct mbuf *m = NULL;
552
553 mtx_unlock(&tp->tap_mtx);
554
555 /* Unlocked read. */
556 TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname,
557 tp->tap_flags);
558
559 s = splimp();
560 do {
561 IF_DEQUEUE(&ifp->if_snd, m);
562 if (m != NULL)
563 m_freem(m);
564 ifp->if_oerrors ++;
565 } while (m != NULL);
566 splx(s);
567
568 return;
569 }
570 mtx_unlock(&tp->tap_mtx);
571
572 s = splimp();
573 ifp->if_flags |= IFF_OACTIVE;
574
575 if (ifp->if_snd.ifq_len != 0) {
576 mtx_lock(&tp->tap_mtx);
577 if (tp->tap_flags & TAP_RWAIT) {
578 tp->tap_flags &= ~TAP_RWAIT;
579 wakeup(tp);
580 }
581
582 if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) {
583 mtx_unlock(&tp->tap_mtx);
584 pgsigio(&tp->tap_sigio, SIGIO, 0);
585 } else
586 mtx_unlock(&tp->tap_mtx);
587
588 selwakeuppri(&tp->tap_rsel, PZERO+1);
589 ifp->if_opackets ++; /* obytes are counted in ether_output */
590 }
591
592 ifp->if_flags &= ~IFF_OACTIVE;
593 splx(s);
594} /* tapifstart */
595
596
597/*
598 * tapioctl
599 *
600 * the cdevsw interface is now pretty minimal
601 */
602static int
603tapioctl(dev, cmd, data, flag, td)
604 struct cdev *dev;
605 u_long cmd;
606 caddr_t data;
607 int flag;
608 struct thread *td;
609{
610 struct tap_softc *tp = dev->si_drv1;
611 struct ifnet *ifp = tp->tap_ifp;
612 struct tapinfo *tapp = NULL;
613 int s;
614 int f;
615
616 switch (cmd) {
617 case TAPSIFINFO:
618 s = splimp();
619 tapp = (struct tapinfo *)data;
620 ifp->if_mtu = tapp->mtu;
621 ifp->if_type = tapp->type;
622 ifp->if_baudrate = tapp->baudrate;
623 splx(s);
624 break;
625
626 case TAPGIFINFO:
627 tapp = (struct tapinfo *)data;
628 tapp->mtu = ifp->if_mtu;
629 tapp->type = ifp->if_type;
630 tapp->baudrate = ifp->if_baudrate;
631 break;
632
633 case TAPSDEBUG:
634 tapdebug = *(int *)data;
635 break;
636
637 case TAPGDEBUG:
638 *(int *)data = tapdebug;
639 break;
640
641 case FIONBIO:
642 break;
643
644 case FIOASYNC:
645 s = splimp();
646 mtx_lock(&tp->tap_mtx);
647 if (*(int *)data)
648 tp->tap_flags |= TAP_ASYNC;
649 else
650 tp->tap_flags &= ~TAP_ASYNC;
651 mtx_unlock(&tp->tap_mtx);
652 splx(s);
653 break;
654
655 case FIONREAD:
656 s = splimp();
657 if (ifp->if_snd.ifq_head) {
658 struct mbuf *mb = ifp->if_snd.ifq_head;
659
660 for(*(int *)data = 0;mb != NULL;mb = mb->m_next)
661 *(int *)data += mb->m_len;
662 } else
663 *(int *)data = 0;
664 splx(s);
665 break;
666
667 case FIOSETOWN:
668 return (fsetown(*(int *)data, &tp->tap_sigio));
669
670 case FIOGETOWN:
671 *(int *)data = fgetown(&tp->tap_sigio);
672 return (0);
673
674 /* this is deprecated, FIOSETOWN should be used instead */
675 case TIOCSPGRP:
676 return (fsetown(-(*(int *)data), &tp->tap_sigio));
677
678 /* this is deprecated, FIOGETOWN should be used instead */
679 case TIOCGPGRP:
680 *(int *)data = -fgetown(&tp->tap_sigio);
681 return (0);
682
683 /* VMware/VMnet port ioctl's */
684
685 case SIOCGIFFLAGS: /* get ifnet flags */
686 bcopy(&ifp->if_flags, data, sizeof(ifp->if_flags));
687 break;
688
689 case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */
690 f = *(int *)data;
691 f &= 0x0fff;
692 f &= ~IFF_CANTCHANGE;
693 f |= IFF_UP;
694
695 s = splimp();
696 ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE);
697 splx(s);
698 break;
699
700 case OSIOCGIFADDR: /* get MAC address of the remote side */
701 case SIOCGIFADDR:
702 mtx_lock(&tp->tap_mtx);
703 bcopy(tp->ether_addr, data, sizeof(tp->ether_addr));
704 mtx_unlock(&tp->tap_mtx);
705 break;
706
707 case SIOCSIFADDR: /* set MAC address of the remote side */
708 mtx_lock(&tp->tap_mtx);
709 bcopy(data, tp->ether_addr, sizeof(tp->ether_addr));
710 mtx_unlock(&tp->tap_mtx);
711 break;
712
713 default:
714 return (ENOTTY);
715 }
716 return (0);
717} /* tapioctl */
718
719
720/*
721 * tapread
722 *
723 * the cdevsw read interface - reads a packet at a time, or at
724 * least as much of a packet as can be read
725 */
726static int
727tapread(dev, uio, flag)
728 struct cdev *dev;
729 struct uio *uio;
730 int flag;
731{
732 struct tap_softc *tp = dev->si_drv1;
733 struct ifnet *ifp = tp->tap_ifp;
734 struct mbuf *m = NULL;
735 int error = 0, len, s;
736
737 TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, minor(dev));
738
739 mtx_lock(&tp->tap_mtx);
740 if ((tp->tap_flags & TAP_READY) != TAP_READY) {
741 mtx_unlock(&tp->tap_mtx);
742
743 /* Unlocked read. */
744 TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n",
745 ifp->if_xname, minor(dev), tp->tap_flags);
746
747 return (EHOSTDOWN);
748 }
749
750 tp->tap_flags &= ~TAP_RWAIT;
751 mtx_unlock(&tp->tap_mtx);
752
753 /* sleep until we get a packet */
754 do {
755 s = splimp();
756 IF_DEQUEUE(&ifp->if_snd, m);
757 splx(s);
758
759 if (m == NULL) {
760 if (flag & O_NONBLOCK)
761 return (EWOULDBLOCK);
762
763 mtx_lock(&tp->tap_mtx);
764 tp->tap_flags |= TAP_RWAIT;
765 mtx_unlock(&tp->tap_mtx);
766 error = tsleep(tp,PCATCH|(PZERO+1),"taprd",0);
767 if (error)
768 return (error);
769 }
770 } while (m == NULL);
771
772 /* feed packet to bpf */
773 BPF_MTAP(ifp, m);
774
775 /* xfer packet to user space */
776 while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) {
777 len = min(uio->uio_resid, m->m_len);
778 if (len == 0)
779 break;
780
781 error = uiomove(mtod(m, void *), len, uio);
782 m = m_free(m);
783 }
784
785 if (m != NULL) {
786 TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname,
787 minor(dev));
788 m_freem(m);
789 }
790
791 return (error);
792} /* tapread */
793
794
795/*
796 * tapwrite
797 *
798 * the cdevsw write interface - an atomic write is a packet - or else!
799 */
800static int
801tapwrite(dev, uio, flag)
802 struct cdev *dev;
803 struct uio *uio;
804 int flag;
805{
806 struct tap_softc *tp = dev->si_drv1;
807 struct ifnet *ifp = tp->tap_ifp;
808 struct mbuf *m;
809 int error = 0;
810
811 TAPDEBUG("%s writting, minor = %#x\n",
812 ifp->if_xname, minor(dev));
813
814 if (uio->uio_resid == 0)
815 return (0);
816
817 if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) {
818 TAPDEBUG("%s invalid packet len = %d, minor = %#x\n",
819 ifp->if_xname, uio->uio_resid, minor(dev));
820
821 return (EIO);
822 }
823
824 if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, ETHER_ALIGN)) == NULL) {
825 ifp->if_ierrors ++;
826 return (error);
827 }
828
829 m->m_pkthdr.rcvif = ifp;
830
831 /* Pass packet up to parent. */
832 (*ifp->if_input)(ifp, m);
833 ifp->if_ipackets ++; /* ibytes are counted in parent */
834
835 return (0);
836} /* tapwrite */
837
838
839/*
840 * tappoll
841 *
842 * the poll interface, this is only useful on reads
843 * really. the write detect always returns true, write never blocks
844 * anyway, it either accepts the packet or drops it
845 */
846static int
847tappoll(dev, events, td)
848 struct cdev *dev;
849 int events;
850 struct thread *td;
851{
852 struct tap_softc *tp = dev->si_drv1;
853 struct ifnet *ifp = tp->tap_ifp;
854 int s, revents = 0;
855
856 TAPDEBUG("%s polling, minor = %#x\n",
857 ifp->if_xname, minor(dev));
858
859 s = splimp();
860 if (events & (POLLIN | POLLRDNORM)) {
861 if (ifp->if_snd.ifq_len > 0) {
862 TAPDEBUG("%s have data in queue. len = %d, " \
863 "minor = %#x\n", ifp->if_xname,
864 ifp->if_snd.ifq_len, minor(dev));
865
866 revents |= (events & (POLLIN | POLLRDNORM));
867 } else {
868 TAPDEBUG("%s waiting for data, minor = %#x\n",
869 ifp->if_xname, minor(dev));
870
871 selrecord(td, &tp->tap_rsel);
872 }
873 }
874
875 if (events & (POLLOUT | POLLWRNORM))
876 revents |= (events & (POLLOUT | POLLWRNORM));
877
878 splx(s);
879 return (revents);
880} /* tappoll */