if_tun.c revision 147611
1/* $NetBSD: if_tun.c,v 1.14 1994/06/29 06:36:25 cgd Exp $ */ 2 3/*- 4 * Copyright (c) 1988, Julian Onions <jpo@cs.nott.ac.uk> 5 * Nottingham University 1987. 6 * 7 * This source may be freely distributed, however I would be interested 8 * in any changes that are made. 9 * 10 * This driver takes packets off the IP i/f and hands them up to a 11 * user process to have its wicked way with. This driver has it's 12 * roots in a similar driver written by Phil Cockcroft (formerly) at 13 * UCL. This driver is based much more on read/write/poll mode of 14 * operation though. 15 * 16 * $FreeBSD: head/sys/net/if_tun.c 147611 2005-06-26 18:11:11Z dwmalone $ 17 */ 18 19#include "opt_atalk.h" 20#include "opt_inet.h" 21#include "opt_inet6.h" 22#include "opt_ipx.h" 23#include "opt_mac.h" 24 25#include <sys/param.h> 26#include <sys/proc.h> 27#include <sys/systm.h> 28#include <sys/mac.h> 29#include <sys/mbuf.h> 30#include <sys/module.h> 31#include <sys/socket.h> 32#include <sys/fcntl.h> 33#include <sys/filio.h> 34#include <sys/sockio.h> 35#include <sys/ttycom.h> 36#include <sys/poll.h> 37#include <sys/selinfo.h> 38#include <sys/signalvar.h> 39#include <sys/filedesc.h> 40#include <sys/kernel.h> 41#include <sys/sysctl.h> 42#include <sys/conf.h> 43#include <sys/uio.h> 44#include <sys/malloc.h> 45#include <sys/random.h> 46 47#include <net/if.h> 48#include <net/if_types.h> 49#include <net/netisr.h> 50#include <net/route.h> 51#ifdef INET 52#include <netinet/in.h> 53#endif 54#include <net/bpf.h> 55#include <net/if_tun.h> 56 57#include <sys/queue.h> 58 59/* 60 * tun_list is protected by global tunmtx. Other mutable fields are 61 * protected by tun->tun_mtx, or by their owning subsystem. tun_dev is 62 * static for the duration of a tunnel interface. 63 */ 64struct tun_softc { 65 TAILQ_ENTRY(tun_softc) tun_list; 66 struct cdev *tun_dev; 67 u_short tun_flags; /* misc flags */ 68#define TUN_OPEN 0x0001 69#define TUN_INITED 0x0002 70#define TUN_RCOLL 0x0004 71#define TUN_IASET 0x0008 72#define TUN_DSTADDR 0x0010 73#define TUN_LMODE 0x0020 74#define TUN_RWAIT 0x0040 75#define TUN_ASYNC 0x0080 76#define TUN_IFHEAD 0x0100 77 78#define TUN_READY (TUN_OPEN | TUN_INITED) 79 80 /* 81 * XXXRW: tun_pid is used to exclusively lock /dev/tun. Is this 82 * actually needed? Can we just return EBUSY if already open? 83 * Problem is that this involved inherent races when a tun device 84 * is handed off from one process to another, as opposed to just 85 * being slightly stale informationally. 86 */ 87 pid_t tun_pid; /* owning pid */ 88 struct ifnet *tun_ifp; /* the interface */ 89 struct sigio *tun_sigio; /* information for async I/O */ 90 struct selinfo tun_rsel; /* read select */ 91 struct mtx tun_mtx; /* protect mutable softc fields */ 92}; 93#define TUN2IFP(sc) ((sc)->tun_ifp) 94 95#define TUNDEBUG if (tundebug) if_printf 96#define TUNNAME "tun" 97 98/* 99 * All mutable global variables in if_tun are locked using tunmtx, with 100 * the exception of tundebug, which is used unlocked, and tunclones, 101 * which is static after setup. 102 */ 103static struct mtx tunmtx; 104static MALLOC_DEFINE(M_TUN, TUNNAME, "Tunnel Interface"); 105static int tundebug = 0; 106static struct clonedevs *tunclones; 107static TAILQ_HEAD(,tun_softc) tunhead = TAILQ_HEAD_INITIALIZER(tunhead); 108SYSCTL_INT(_debug, OID_AUTO, if_tun_debug, CTLFLAG_RW, &tundebug, 0, ""); 109 110static void tunclone(void *arg, char *name, int namelen, struct cdev **dev); 111static void tuncreate(struct cdev *dev); 112static int tunifioctl(struct ifnet *, u_long, caddr_t); 113static int tuninit(struct ifnet *); 114static int tunmodevent(module_t, int, void *); 115static int tunoutput(struct ifnet *, struct mbuf *, struct sockaddr *, 116 struct rtentry *rt); 117static void tunstart(struct ifnet *); 118 119static d_open_t tunopen; 120static d_close_t tunclose; 121static d_read_t tunread; 122static d_write_t tunwrite; 123static d_ioctl_t tunioctl; 124static d_poll_t tunpoll; 125 126static struct cdevsw tun_cdevsw = { 127 .d_version = D_VERSION, 128 .d_flags = D_PSEUDO | D_NEEDGIANT, 129 .d_open = tunopen, 130 .d_close = tunclose, 131 .d_read = tunread, 132 .d_write = tunwrite, 133 .d_ioctl = tunioctl, 134 .d_poll = tunpoll, 135 .d_name = TUNNAME, 136}; 137 138static void 139tunclone(void *arg, char *name, int namelen, struct cdev **dev) 140{ 141 int u, i; 142 143 if (*dev != NULL) 144 return; 145 146 if (strcmp(name, TUNNAME) == 0) { 147 u = -1; 148 } else if (dev_stdclone(name, NULL, TUNNAME, &u) != 1) 149 return; /* Don't recognise the name */ 150 if (u != -1 && u > IF_MAXUNIT) 151 return; /* Unit number too high */ 152 153 /* find any existing device, or allocate new unit number */ 154 i = clone_create(&tunclones, &tun_cdevsw, &u, dev, 0); 155 if (i) { 156 /* No preexisting struct cdev *, create one */ 157 *dev = make_dev(&tun_cdevsw, unit2minor(u), 158 UID_UUCP, GID_DIALER, 0600, "tun%d", u); 159 if (*dev != NULL) { 160 dev_ref(*dev); 161 (*dev)->si_flags |= SI_CHEAPCLONE; 162 } 163 } 164} 165 166static void 167tun_destroy(struct tun_softc *tp) 168{ 169 struct cdev *dev; 170 171 /* Unlocked read. */ 172 KASSERT((tp->tun_flags & TUN_OPEN) == 0, 173 ("tununits is out of sync - unit %d", TUN2IFP(tp)->if_dunit)); 174 175 dev = tp->tun_dev; 176 bpfdetach(TUN2IFP(tp)); 177 if_detach(TUN2IFP(tp)); 178 if_free(TUN2IFP(tp)); 179 destroy_dev(dev); 180 mtx_destroy(&tp->tun_mtx); 181 free(tp, M_TUN); 182} 183 184static int 185tunmodevent(module_t mod, int type, void *data) 186{ 187 static eventhandler_tag tag; 188 struct tun_softc *tp; 189 190 switch (type) { 191 case MOD_LOAD: 192 mtx_init(&tunmtx, "tunmtx", NULL, MTX_DEF); 193 clone_setup(&tunclones); 194 tag = EVENTHANDLER_REGISTER(dev_clone, tunclone, 0, 1000); 195 if (tag == NULL) 196 return (ENOMEM); 197 break; 198 case MOD_UNLOAD: 199 EVENTHANDLER_DEREGISTER(dev_clone, tag); 200 201 mtx_lock(&tunmtx); 202 while ((tp = TAILQ_FIRST(&tunhead)) != NULL) { 203 TAILQ_REMOVE(&tunhead, tp, tun_list); 204 mtx_unlock(&tunmtx); 205 tun_destroy(tp); 206 mtx_lock(&tunmtx); 207 } 208 mtx_unlock(&tunmtx); 209 clone_cleanup(&tunclones); 210 mtx_destroy(&tunmtx); 211 break; 212 default: 213 return EOPNOTSUPP; 214 } 215 return 0; 216} 217 218static moduledata_t tun_mod = { 219 "if_tun", 220 tunmodevent, 221 0 222}; 223 224DECLARE_MODULE(if_tun, tun_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 225 226static void 227tunstart(struct ifnet *ifp) 228{ 229 struct tun_softc *tp = ifp->if_softc; 230 struct mbuf *m; 231 232 if (ALTQ_IS_ENABLED(&ifp->if_snd)) { 233 IFQ_LOCK(&ifp->if_snd); 234 IFQ_POLL_NOLOCK(&ifp->if_snd, m); 235 if (m == NULL) { 236 IFQ_UNLOCK(&ifp->if_snd); 237 return; 238 } 239 IFQ_UNLOCK(&ifp->if_snd); 240 } 241 242 mtx_lock(&tp->tun_mtx); 243 if (tp->tun_flags & TUN_RWAIT) { 244 tp->tun_flags &= ~TUN_RWAIT; 245 wakeup(tp); 246 } 247 if (tp->tun_flags & TUN_ASYNC && tp->tun_sigio) { 248 mtx_unlock(&tp->tun_mtx); 249 pgsigio(&tp->tun_sigio, SIGIO, 0); 250 } else 251 mtx_unlock(&tp->tun_mtx); 252 selwakeuppri(&tp->tun_rsel, PZERO + 1); 253} 254 255/* XXX: should return an error code so it can fail. */ 256static void 257tuncreate(struct cdev *dev) 258{ 259 struct tun_softc *sc; 260 struct ifnet *ifp; 261 262 dev->si_flags &= ~SI_CHEAPCLONE; 263 264 MALLOC(sc, struct tun_softc *, sizeof(*sc), M_TUN, M_WAITOK | M_ZERO); 265 mtx_init(&sc->tun_mtx, "tun_mtx", NULL, MTX_DEF); 266 sc->tun_flags = TUN_INITED; 267 sc->tun_dev = dev; 268 mtx_lock(&tunmtx); 269 TAILQ_INSERT_TAIL(&tunhead, sc, tun_list); 270 mtx_unlock(&tunmtx); 271 272 ifp = sc->tun_ifp = if_alloc(IFT_PPP); 273 if (ifp == NULL) 274 panic("%s%d: failed to if_alloc() interface.\n", 275 TUNNAME, dev2unit(dev)); 276 if_initname(ifp, TUNNAME, dev2unit(dev)); 277 ifp->if_mtu = TUNMTU; 278 ifp->if_ioctl = tunifioctl; 279 ifp->if_output = tunoutput; 280 ifp->if_start = tunstart; 281 ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; 282 ifp->if_softc = sc; 283 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 284 ifp->if_snd.ifq_drv_maxlen = 0; 285 IFQ_SET_READY(&ifp->if_snd); 286 287 if_attach(ifp); 288 bpfattach(ifp, DLT_NULL, sizeof(u_int32_t)); 289 dev->si_drv1 = sc; 290} 291 292static int 293tunopen(struct cdev *dev, int flag, int mode, struct thread *td) 294{ 295 struct ifnet *ifp; 296 struct tun_softc *tp; 297 298 /* 299 * XXXRW: Non-atomic test and set of dev->si_drv1 requires 300 * synchronization. 301 */ 302 tp = dev->si_drv1; 303 if (!tp) { 304 tuncreate(dev); 305 tp = dev->si_drv1; 306 } 307 308 /* 309 * XXXRW: This use of tun_pid is subject to error due to the 310 * fact that a reference to the tunnel can live beyond the 311 * death of the process that created it. Can we replace this 312 * with a simple busy flag? 313 */ 314 mtx_lock(&tp->tun_mtx); 315 if (tp->tun_pid != 0 && tp->tun_pid != td->td_proc->p_pid) { 316 mtx_unlock(&tp->tun_mtx); 317 return (EBUSY); 318 } 319 tp->tun_pid = td->td_proc->p_pid; 320 321 tp->tun_flags |= TUN_OPEN; 322 mtx_unlock(&tp->tun_mtx); 323 ifp = TUN2IFP(tp); 324 TUNDEBUG(ifp, "open\n"); 325 326 return (0); 327} 328 329/* 330 * tunclose - close the device - mark i/f down & delete 331 * routing info 332 */ 333static int 334tunclose(struct cdev *dev, int foo, int bar, struct thread *td) 335{ 336 struct tun_softc *tp; 337 struct ifnet *ifp; 338 int s; 339 340 tp = dev->si_drv1; 341 ifp = TUN2IFP(tp); 342 343 mtx_lock(&tp->tun_mtx); 344 tp->tun_flags &= ~TUN_OPEN; 345 tp->tun_pid = 0; 346 347 /* 348 * junk all pending output 349 */ 350 s = splimp(); 351 IFQ_PURGE(&ifp->if_snd); 352 splx(s); 353 mtx_unlock(&tp->tun_mtx); 354 355 if (ifp->if_flags & IFF_UP) { 356 s = splimp(); 357 if_down(ifp); 358 splx(s); 359 } 360 361 if (ifp->if_flags & IFF_RUNNING) { 362 struct ifaddr *ifa; 363 364 s = splimp(); 365 /* find internet addresses and delete routes */ 366 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 367 if (ifa->ifa_addr->sa_family == AF_INET) 368 /* Unlocked read. */ 369 rtinit(ifa, (int)RTM_DELETE, 370 tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0); 371 ifp->if_flags &= ~IFF_RUNNING; 372 splx(s); 373 } 374 375 funsetown(&tp->tun_sigio); 376 selwakeuppri(&tp->tun_rsel, PZERO + 1); 377 TUNDEBUG (ifp, "closed\n"); 378 return (0); 379} 380 381static int 382tuninit(struct ifnet *ifp) 383{ 384 struct tun_softc *tp = ifp->if_softc; 385 struct ifaddr *ifa; 386 int error = 0; 387 388 TUNDEBUG(ifp, "tuninit\n"); 389 390 ifp->if_flags |= IFF_UP | IFF_RUNNING; 391 getmicrotime(&ifp->if_lastchange); 392 393 for (ifa = TAILQ_FIRST(&ifp->if_addrhead); ifa; 394 ifa = TAILQ_NEXT(ifa, ifa_link)) { 395 if (ifa->ifa_addr == NULL) 396 error = EFAULT; 397 /* XXX: Should maybe return straight off? */ 398 else { 399#ifdef INET 400 if (ifa->ifa_addr->sa_family == AF_INET) { 401 struct sockaddr_in *si; 402 403 si = (struct sockaddr_in *)ifa->ifa_addr; 404 mtx_lock(&tp->tun_mtx); 405 if (si->sin_addr.s_addr) 406 tp->tun_flags |= TUN_IASET; 407 408 si = (struct sockaddr_in *)ifa->ifa_dstaddr; 409 if (si && si->sin_addr.s_addr) 410 tp->tun_flags |= TUN_DSTADDR; 411 mtx_unlock(&tp->tun_mtx); 412 } 413#endif 414 } 415 } 416 return (error); 417} 418 419/* 420 * Process an ioctl request. 421 */ 422static int 423tunifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 424{ 425 struct ifreq *ifr = (struct ifreq *)data; 426 struct tun_softc *tp = ifp->if_softc; 427 struct ifstat *ifs; 428 int error = 0, s; 429 430 s = splimp(); 431 switch(cmd) { 432 case SIOCGIFSTATUS: 433 ifs = (struct ifstat *)data; 434 mtx_lock(&tp->tun_mtx); 435 if (tp->tun_pid) 436 sprintf(ifs->ascii + strlen(ifs->ascii), 437 "\tOpened by PID %d\n", tp->tun_pid); 438 mtx_unlock(&tp->tun_mtx); 439 break; 440 case SIOCSIFADDR: 441 error = tuninit(ifp); 442 TUNDEBUG(ifp, "address set, error=%d\n", error); 443 break; 444 case SIOCSIFDSTADDR: 445 error = tuninit(ifp); 446 TUNDEBUG(ifp, "destination address set, error=%d\n", error); 447 break; 448 case SIOCSIFMTU: 449 ifp->if_mtu = ifr->ifr_mtu; 450 TUNDEBUG(ifp, "mtu set\n"); 451 break; 452 case SIOCSIFFLAGS: 453 case SIOCADDMULTI: 454 case SIOCDELMULTI: 455 break; 456 default: 457 error = EINVAL; 458 } 459 splx(s); 460 return (error); 461} 462 463/* 464 * tunoutput - queue packets from higher level ready to put out. 465 */ 466static int 467tunoutput( 468 struct ifnet *ifp, 469 struct mbuf *m0, 470 struct sockaddr *dst, 471 struct rtentry *rt) 472{ 473 struct tun_softc *tp = ifp->if_softc; 474 u_short cached_tun_flags; 475 int error; 476 u_int32_t af; 477 478 TUNDEBUG (ifp, "tunoutput\n"); 479 480#ifdef MAC 481 error = mac_check_ifnet_transmit(ifp, m0); 482 if (error) { 483 m_freem(m0); 484 return (error); 485 } 486#endif 487 488 /* Could be unlocked read? */ 489 mtx_lock(&tp->tun_mtx); 490 cached_tun_flags = tp->tun_flags; 491 mtx_unlock(&tp->tun_mtx); 492 if ((cached_tun_flags & TUN_READY) != TUN_READY) { 493 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags); 494 m_freem (m0); 495 return (EHOSTDOWN); 496 } 497 498 if ((ifp->if_flags & IFF_UP) != IFF_UP) { 499 m_freem (m0); 500 return (EHOSTDOWN); 501 } 502 503 /* BPF writes need to be handled specially. */ 504 if (dst->sa_family == AF_UNSPEC) { 505 bcopy(dst->sa_data, &af, sizeof(af)); 506 dst->sa_family = af; 507 } 508 509 if (ifp->if_bpf) { 510 af = dst->sa_family; 511 bpf_mtap2(ifp->if_bpf, &af, sizeof(af), m0); 512 } 513 514 /* prepend sockaddr? this may abort if the mbuf allocation fails */ 515 if (cached_tun_flags & TUN_LMODE) { 516 /* allocate space for sockaddr */ 517 M_PREPEND(m0, dst->sa_len, M_DONTWAIT); 518 519 /* if allocation failed drop packet */ 520 if (m0 == NULL) { 521 ifp->if_iqdrops++; 522 ifp->if_oerrors++; 523 return (ENOBUFS); 524 } else { 525 bcopy(dst, m0->m_data, dst->sa_len); 526 } 527 } 528 529 if (cached_tun_flags & TUN_IFHEAD) { 530 /* Prepend the address family */ 531 M_PREPEND(m0, 4, M_DONTWAIT); 532 533 /* if allocation failed drop packet */ 534 if (m0 == NULL) { 535 ifp->if_iqdrops++; 536 ifp->if_oerrors++; 537 return (ENOBUFS); 538 } else 539 *(u_int32_t *)m0->m_data = htonl(dst->sa_family); 540 } else { 541#ifdef INET 542 if (dst->sa_family != AF_INET) 543#endif 544 { 545 m_freem(m0); 546 return (EAFNOSUPPORT); 547 } 548 } 549 550 IFQ_HANDOFF(ifp, m0, error); 551 if (error) { 552 ifp->if_collisions++; 553 return (ENOBUFS); 554 } 555 ifp->if_opackets++; 556 return (0); 557} 558 559/* 560 * the cdevsw interface is now pretty minimal. 561 */ 562static int 563tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 564{ 565 int s; 566 int error; 567 struct tun_softc *tp = dev->si_drv1; 568 struct tuninfo *tunp; 569 570 switch (cmd) { 571 case TUNSIFINFO: 572 tunp = (struct tuninfo *)data; 573 if (tunp->mtu < IF_MINMTU) 574 return (EINVAL); 575 if (TUN2IFP(tp)->if_mtu != tunp->mtu 576 && (error = suser(td)) != 0) 577 return (error); 578 TUN2IFP(tp)->if_mtu = tunp->mtu; 579 TUN2IFP(tp)->if_type = tunp->type; 580 TUN2IFP(tp)->if_baudrate = tunp->baudrate; 581 break; 582 case TUNGIFINFO: 583 tunp = (struct tuninfo *)data; 584 tunp->mtu = TUN2IFP(tp)->if_mtu; 585 tunp->type = TUN2IFP(tp)->if_type; 586 tunp->baudrate = TUN2IFP(tp)->if_baudrate; 587 break; 588 case TUNSDEBUG: 589 tundebug = *(int *)data; 590 break; 591 case TUNGDEBUG: 592 *(int *)data = tundebug; 593 break; 594 case TUNSLMODE: 595 mtx_lock(&tp->tun_mtx); 596 if (*(int *)data) { 597 tp->tun_flags |= TUN_LMODE; 598 tp->tun_flags &= ~TUN_IFHEAD; 599 } else 600 tp->tun_flags &= ~TUN_LMODE; 601 mtx_unlock(&tp->tun_mtx); 602 break; 603 case TUNSIFHEAD: 604 mtx_lock(&tp->tun_mtx); 605 if (*(int *)data) { 606 tp->tun_flags |= TUN_IFHEAD; 607 tp->tun_flags &= ~TUN_LMODE; 608 } else 609 tp->tun_flags &= ~TUN_IFHEAD; 610 mtx_unlock(&tp->tun_mtx); 611 break; 612 case TUNGIFHEAD: 613 /* Could be unlocked read? */ 614 mtx_lock(&tp->tun_mtx); 615 *(int *)data = (tp->tun_flags & TUN_IFHEAD) ? 1 : 0; 616 mtx_unlock(&tp->tun_mtx); 617 break; 618 case TUNSIFMODE: 619 /* deny this if UP */ 620 if (TUN2IFP(tp)->if_flags & IFF_UP) 621 return(EBUSY); 622 623 switch (*(int *)data & ~IFF_MULTICAST) { 624 case IFF_POINTOPOINT: 625 case IFF_BROADCAST: 626 TUN2IFP(tp)->if_flags &= 627 ~(IFF_BROADCAST|IFF_POINTOPOINT|IFF_MULTICAST); 628 TUN2IFP(tp)->if_flags |= *(int *)data; 629 break; 630 default: 631 return(EINVAL); 632 } 633 break; 634 case TUNSIFPID: 635 mtx_lock(&tp->tun_mtx); 636 tp->tun_pid = curthread->td_proc->p_pid; 637 mtx_unlock(&tp->tun_mtx); 638 break; 639 case FIONBIO: 640 break; 641 case FIOASYNC: 642 mtx_lock(&tp->tun_mtx); 643 if (*(int *)data) 644 tp->tun_flags |= TUN_ASYNC; 645 else 646 tp->tun_flags &= ~TUN_ASYNC; 647 mtx_unlock(&tp->tun_mtx); 648 break; 649 case FIONREAD: 650 s = splimp(); 651 if (!IFQ_IS_EMPTY(&TUN2IFP(tp)->if_snd)) { 652 struct mbuf *mb; 653 IFQ_LOCK(&TUN2IFP(tp)->if_snd); 654 IFQ_POLL_NOLOCK(&TUN2IFP(tp)->if_snd, mb); 655 for( *(int *)data = 0; mb != 0; mb = mb->m_next) 656 *(int *)data += mb->m_len; 657 IFQ_UNLOCK(&TUN2IFP(tp)->if_snd); 658 } else 659 *(int *)data = 0; 660 splx(s); 661 break; 662 case FIOSETOWN: 663 return (fsetown(*(int *)data, &tp->tun_sigio)); 664 665 case FIOGETOWN: 666 *(int *)data = fgetown(&tp->tun_sigio); 667 return (0); 668 669 /* This is deprecated, FIOSETOWN should be used instead. */ 670 case TIOCSPGRP: 671 return (fsetown(-(*(int *)data), &tp->tun_sigio)); 672 673 /* This is deprecated, FIOGETOWN should be used instead. */ 674 case TIOCGPGRP: 675 *(int *)data = -fgetown(&tp->tun_sigio); 676 return (0); 677 678 default: 679 return (ENOTTY); 680 } 681 return (0); 682} 683 684/* 685 * The cdevsw read interface - reads a packet at a time, or at 686 * least as much of a packet as can be read. 687 */ 688static int 689tunread(struct cdev *dev, struct uio *uio, int flag) 690{ 691 struct tun_softc *tp = dev->si_drv1; 692 struct ifnet *ifp = TUN2IFP(tp); 693 struct mbuf *m; 694 int error=0, len, s; 695 696 TUNDEBUG (ifp, "read\n"); 697 mtx_lock(&tp->tun_mtx); 698 if ((tp->tun_flags & TUN_READY) != TUN_READY) { 699 mtx_unlock(&tp->tun_mtx); 700 TUNDEBUG (ifp, "not ready 0%o\n", tp->tun_flags); 701 return (EHOSTDOWN); 702 } 703 704 tp->tun_flags &= ~TUN_RWAIT; 705 mtx_unlock(&tp->tun_mtx); 706 707 s = splimp(); 708 do { 709 IFQ_DEQUEUE(&ifp->if_snd, m); 710 if (m == NULL) { 711 if (flag & O_NONBLOCK) { 712 splx(s); 713 return (EWOULDBLOCK); 714 } 715 mtx_lock(&tp->tun_mtx); 716 tp->tun_flags |= TUN_RWAIT; 717 mtx_unlock(&tp->tun_mtx); 718 if((error = tsleep(tp, PCATCH | (PZERO + 1), 719 "tunread", 0)) != 0) { 720 splx(s); 721 return (error); 722 } 723 } 724 } while (m == NULL); 725 splx(s); 726 727 while (m && uio->uio_resid > 0 && error == 0) { 728 len = min(uio->uio_resid, m->m_len); 729 if (len != 0) 730 error = uiomove(mtod(m, void *), len, uio); 731 m = m_free(m); 732 } 733 734 if (m) { 735 TUNDEBUG(ifp, "Dropping mbuf\n"); 736 m_freem(m); 737 } 738 return (error); 739} 740 741/* 742 * the cdevsw write interface - an atomic write is a packet - or else! 743 */ 744static int 745tunwrite(struct cdev *dev, struct uio *uio, int flag) 746{ 747 struct tun_softc *tp = dev->si_drv1; 748 struct ifnet *ifp = TUN2IFP(tp); 749 struct mbuf *m; 750 int error = 0; 751 uint32_t family; 752 int isr; 753 754 TUNDEBUG(ifp, "tunwrite\n"); 755 756 if ((ifp->if_flags & IFF_UP) != IFF_UP) 757 /* ignore silently */ 758 return (0); 759 760 if (uio->uio_resid == 0) 761 return (0); 762 763 if (uio->uio_resid < 0 || uio->uio_resid > TUNMRU) { 764 TUNDEBUG(ifp, "len=%d!\n", uio->uio_resid); 765 return (EIO); 766 } 767 768 if ((m = m_uiotombuf(uio, M_DONTWAIT, 0, 0)) == NULL) { 769 ifp->if_ierrors++; 770 return (error); 771 } 772 773 m->m_pkthdr.rcvif = ifp; 774#ifdef MAC 775 mac_create_mbuf_from_ifnet(ifp, m); 776#endif 777 778 /* Could be unlocked read? */ 779 mtx_lock(&tp->tun_mtx); 780 if (tp->tun_flags & TUN_IFHEAD) { 781 mtx_unlock(&tp->tun_mtx); 782 if (m->m_len < sizeof(family) && 783 (m = m_pullup(m, sizeof(family))) == NULL) 784 return (ENOBUFS); 785 family = ntohl(*mtod(m, u_int32_t *)); 786 m_adj(m, sizeof(family)); 787 } else { 788 mtx_unlock(&tp->tun_mtx); 789 family = AF_INET; 790 } 791 792 BPF_MTAP2(ifp, &family, sizeof(family), m); 793 794 switch (family) { 795#ifdef INET 796 case AF_INET: 797 isr = NETISR_IP; 798 break; 799#endif 800#ifdef INET6 801 case AF_INET6: 802 isr = NETISR_IPV6; 803 break; 804#endif 805#ifdef IPX 806 case AF_IPX: 807 isr = NETISR_IPX; 808 break; 809#endif 810#ifdef NETATALK 811 case AF_APPLETALK: 812 isr = NETISR_ATALK2; 813 break; 814#endif 815 default: 816 m_freem(m); 817 return (EAFNOSUPPORT); 818 } 819 /* First chunk of an mbuf contains good junk */ 820 if (harvest.point_to_point) 821 random_harvest(m, 16, 3, 0, RANDOM_NET); 822 ifp->if_ibytes += m->m_pkthdr.len; 823 ifp->if_ipackets++; 824 netisr_dispatch(isr, m); 825 return (0); 826} 827 828/* 829 * tunpoll - the poll interface, this is only useful on reads 830 * really. The write detect always returns true, write never blocks 831 * anyway, it either accepts the packet or drops it. 832 */ 833static int 834tunpoll(struct cdev *dev, int events, struct thread *td) 835{ 836 int s; 837 struct tun_softc *tp = dev->si_drv1; 838 struct ifnet *ifp = TUN2IFP(tp); 839 int revents = 0; 840 struct mbuf *m; 841 842 s = splimp(); 843 TUNDEBUG(ifp, "tunpoll\n"); 844 845 if (events & (POLLIN | POLLRDNORM)) { 846 IFQ_LOCK(&ifp->if_snd); 847 IFQ_POLL_NOLOCK(&ifp->if_snd, m); 848 if (m != NULL) { 849 TUNDEBUG(ifp, "tunpoll q=%d\n", ifp->if_snd.ifq_len); 850 revents |= events & (POLLIN | POLLRDNORM); 851 } else { 852 TUNDEBUG(ifp, "tunpoll waiting\n"); 853 selrecord(td, &tp->tun_rsel); 854 } 855 IFQ_UNLOCK(&ifp->if_snd); 856 } 857 if (events & (POLLOUT | POLLWRNORM)) 858 revents |= events & (POLLOUT | POLLWRNORM); 859 860 splx(s); 861 return (revents); 862} 863