35 * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $ 36 */ 37 38#include "opt_compat.h" 39#include "opt_inet.h" 40 41#include <sys/param.h> 42#include <sys/conf.h> 43#include <sys/fcntl.h> 44#include <sys/filio.h> 45#include <sys/jail.h> 46#include <sys/kernel.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> 49#include <sys/module.h> 50#include <sys/poll.h> 51#include <sys/priv.h> 52#include <sys/proc.h> 53#include <sys/selinfo.h> 54#include <sys/signalvar.h> 55#include <sys/socket.h> 56#include <sys/sockio.h> 57#include <sys/sysctl.h> 58#include <sys/systm.h> 59#include <sys/ttycom.h> 60#include <sys/uio.h> 61#include <sys/queue.h> 62 63#include <net/bpf.h> 64#include <net/ethernet.h> 65#include <net/if.h> 66#include <net/if_var.h> 67#include <net/if_clone.h> 68#include <net/if_dl.h> 69#include <net/if_media.h> 70#include <net/if_types.h> 71#include <net/route.h> 72#include <net/vnet.h> 73 74#include <netinet/in.h> 75 76#include <net/if_tapvar.h> 77#include <net/if_tap.h> 78 79 80#define CDEV_NAME "tap" 81#define TAPDEBUG if (tapdebug) printf 82 83static const char tapname[] = "tap"; 84static const char vmnetname[] = "vmnet"; 85#define TAPMAXUNIT 0x7fff 86#define VMNET_DEV_MASK CLONE_FLAG0 87 88/* module */ 89static int tapmodevent(module_t, int, void *); 90 91/* device */ 92static void tapclone(void *, struct ucred *, char *, int, 93 struct cdev **); 94static void tapcreate(struct cdev *); 95 96/* network interface */ 97static void tapifstart(struct ifnet *); 98static int tapifioctl(struct ifnet *, u_long, caddr_t); 99static void tapifinit(void *); 100 101static int tap_clone_create(struct if_clone *, int, caddr_t); 102static void tap_clone_destroy(struct ifnet *); 103static struct if_clone *tap_cloner; 104static int vmnet_clone_create(struct if_clone *, int, caddr_t); 105static void vmnet_clone_destroy(struct ifnet *); 106static struct if_clone *vmnet_cloner; 107 108/* character device */ 109static d_open_t tapopen; 110static d_close_t tapclose; 111static d_read_t tapread; 112static d_write_t tapwrite; 113static d_ioctl_t tapioctl; 114static d_poll_t tappoll; 115static d_kqfilter_t tapkqfilter; 116 117/* kqueue(2) */ 118static int tapkqread(struct knote *, long); 119static int tapkqwrite(struct knote *, long); 120static void tapkqdetach(struct knote *); 121 122static struct filterops tap_read_filterops = { 123 .f_isfd = 1, 124 .f_attach = NULL, 125 .f_detach = tapkqdetach, 126 .f_event = tapkqread, 127}; 128 129static struct filterops tap_write_filterops = { 130 .f_isfd = 1, 131 .f_attach = NULL, 132 .f_detach = tapkqdetach, 133 .f_event = tapkqwrite, 134}; 135 136static struct cdevsw tap_cdevsw = { 137 .d_version = D_VERSION, 138 .d_flags = D_NEEDMINOR, 139 .d_open = tapopen, 140 .d_close = tapclose, 141 .d_read = tapread, 142 .d_write = tapwrite, 143 .d_ioctl = tapioctl, 144 .d_poll = tappoll, 145 .d_name = CDEV_NAME, 146 .d_kqfilter = tapkqfilter, 147}; 148 149/* 150 * All global variables in if_tap.c are locked with tapmtx, with the 151 * exception of tapdebug, which is accessed unlocked; tapclones is 152 * static at runtime. 153 */ 154static struct mtx tapmtx; 155static int tapdebug = 0; /* debug flag */ 156static int tapuopen = 0; /* allow user open() */ 157static int tapuponopen = 0; /* IFF_UP on open() */ 158static int tapdclone = 1; /* enable devfs cloning */ 159static SLIST_HEAD(, tap_softc) taphead; /* first device */ 160static struct clonedevs *tapclones; 161 162MALLOC_DECLARE(M_TAP); 163MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface"); 164SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, ""); 165 166SYSCTL_DECL(_net_link); 167static SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0, 168 "Ethernet tunnel software network interface"); 169SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0, 170 "Allow user to open /dev/tap (based on node permissions)"); 171SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0, 172 "Bring interface up when /dev/tap is opened"); 173SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tapdclone, 0, 174 "Enable legacy devfs interface creation"); 175SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, ""); 176 177DEV_MODULE(if_tap, tapmodevent, NULL);
| 35 * $Id: if_tap.c,v 0.21 2000/07/23 21:46:02 max Exp $ 36 */ 37 38#include "opt_compat.h" 39#include "opt_inet.h" 40 41#include <sys/param.h> 42#include <sys/conf.h> 43#include <sys/fcntl.h> 44#include <sys/filio.h> 45#include <sys/jail.h> 46#include <sys/kernel.h> 47#include <sys/malloc.h> 48#include <sys/mbuf.h> 49#include <sys/module.h> 50#include <sys/poll.h> 51#include <sys/priv.h> 52#include <sys/proc.h> 53#include <sys/selinfo.h> 54#include <sys/signalvar.h> 55#include <sys/socket.h> 56#include <sys/sockio.h> 57#include <sys/sysctl.h> 58#include <sys/systm.h> 59#include <sys/ttycom.h> 60#include <sys/uio.h> 61#include <sys/queue.h> 62 63#include <net/bpf.h> 64#include <net/ethernet.h> 65#include <net/if.h> 66#include <net/if_var.h> 67#include <net/if_clone.h> 68#include <net/if_dl.h> 69#include <net/if_media.h> 70#include <net/if_types.h> 71#include <net/route.h> 72#include <net/vnet.h> 73 74#include <netinet/in.h> 75 76#include <net/if_tapvar.h> 77#include <net/if_tap.h> 78 79 80#define CDEV_NAME "tap" 81#define TAPDEBUG if (tapdebug) printf 82 83static const char tapname[] = "tap"; 84static const char vmnetname[] = "vmnet"; 85#define TAPMAXUNIT 0x7fff 86#define VMNET_DEV_MASK CLONE_FLAG0 87 88/* module */ 89static int tapmodevent(module_t, int, void *); 90 91/* device */ 92static void tapclone(void *, struct ucred *, char *, int, 93 struct cdev **); 94static void tapcreate(struct cdev *); 95 96/* network interface */ 97static void tapifstart(struct ifnet *); 98static int tapifioctl(struct ifnet *, u_long, caddr_t); 99static void tapifinit(void *); 100 101static int tap_clone_create(struct if_clone *, int, caddr_t); 102static void tap_clone_destroy(struct ifnet *); 103static struct if_clone *tap_cloner; 104static int vmnet_clone_create(struct if_clone *, int, caddr_t); 105static void vmnet_clone_destroy(struct ifnet *); 106static struct if_clone *vmnet_cloner; 107 108/* character device */ 109static d_open_t tapopen; 110static d_close_t tapclose; 111static d_read_t tapread; 112static d_write_t tapwrite; 113static d_ioctl_t tapioctl; 114static d_poll_t tappoll; 115static d_kqfilter_t tapkqfilter; 116 117/* kqueue(2) */ 118static int tapkqread(struct knote *, long); 119static int tapkqwrite(struct knote *, long); 120static void tapkqdetach(struct knote *); 121 122static struct filterops tap_read_filterops = { 123 .f_isfd = 1, 124 .f_attach = NULL, 125 .f_detach = tapkqdetach, 126 .f_event = tapkqread, 127}; 128 129static struct filterops tap_write_filterops = { 130 .f_isfd = 1, 131 .f_attach = NULL, 132 .f_detach = tapkqdetach, 133 .f_event = tapkqwrite, 134}; 135 136static struct cdevsw tap_cdevsw = { 137 .d_version = D_VERSION, 138 .d_flags = D_NEEDMINOR, 139 .d_open = tapopen, 140 .d_close = tapclose, 141 .d_read = tapread, 142 .d_write = tapwrite, 143 .d_ioctl = tapioctl, 144 .d_poll = tappoll, 145 .d_name = CDEV_NAME, 146 .d_kqfilter = tapkqfilter, 147}; 148 149/* 150 * All global variables in if_tap.c are locked with tapmtx, with the 151 * exception of tapdebug, which is accessed unlocked; tapclones is 152 * static at runtime. 153 */ 154static struct mtx tapmtx; 155static int tapdebug = 0; /* debug flag */ 156static int tapuopen = 0; /* allow user open() */ 157static int tapuponopen = 0; /* IFF_UP on open() */ 158static int tapdclone = 1; /* enable devfs cloning */ 159static SLIST_HEAD(, tap_softc) taphead; /* first device */ 160static struct clonedevs *tapclones; 161 162MALLOC_DECLARE(M_TAP); 163MALLOC_DEFINE(M_TAP, CDEV_NAME, "Ethernet tunnel interface"); 164SYSCTL_INT(_debug, OID_AUTO, if_tap_debug, CTLFLAG_RW, &tapdebug, 0, ""); 165 166SYSCTL_DECL(_net_link); 167static SYSCTL_NODE(_net_link, OID_AUTO, tap, CTLFLAG_RW, 0, 168 "Ethernet tunnel software network interface"); 169SYSCTL_INT(_net_link_tap, OID_AUTO, user_open, CTLFLAG_RW, &tapuopen, 0, 170 "Allow user to open /dev/tap (based on node permissions)"); 171SYSCTL_INT(_net_link_tap, OID_AUTO, up_on_open, CTLFLAG_RW, &tapuponopen, 0, 172 "Bring interface up when /dev/tap is opened"); 173SYSCTL_INT(_net_link_tap, OID_AUTO, devfs_cloning, CTLFLAG_RWTUN, &tapdclone, 0, 174 "Enable legacy devfs interface creation"); 175SYSCTL_INT(_net_link_tap, OID_AUTO, debug, CTLFLAG_RW, &tapdebug, 0, ""); 176 177DEV_MODULE(if_tap, tapmodevent, NULL);
|
178 179static int 180tap_clone_create(struct if_clone *ifc, int unit, caddr_t params) 181{ 182 struct cdev *dev; 183 int i; 184 185 /* Find any existing device, or allocate new unit number. */ 186 i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, 0); 187 if (i) { 188 dev = make_dev(&tap_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, 189 "%s%d", tapname, unit); 190 } 191 192 tapcreate(dev); 193 return (0); 194} 195 196/* vmnet devices are tap devices in disguise */ 197static int 198vmnet_clone_create(struct if_clone *ifc, int unit, caddr_t params) 199{ 200 struct cdev *dev; 201 int i; 202 203 /* Find any existing device, or allocate new unit number. */ 204 i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, VMNET_DEV_MASK); 205 if (i) { 206 dev = make_dev(&tap_cdevsw, unit | VMNET_DEV_MASK, UID_ROOT, 207 GID_WHEEL, 0600, "%s%d", vmnetname, unit); 208 } 209 210 tapcreate(dev); 211 return (0); 212} 213 214static void 215tap_destroy(struct tap_softc *tp) 216{ 217 struct ifnet *ifp = tp->tap_ifp; 218 219 CURVNET_SET(ifp->if_vnet); 220 destroy_dev(tp->tap_dev); 221 seldrain(&tp->tap_rsel); 222 knlist_clear(&tp->tap_rsel.si_note, 0); 223 knlist_destroy(&tp->tap_rsel.si_note); 224 ether_ifdetach(ifp); 225 if_free(ifp); 226 227 mtx_destroy(&tp->tap_mtx); 228 free(tp, M_TAP); 229 CURVNET_RESTORE(); 230} 231 232static void 233tap_clone_destroy(struct ifnet *ifp) 234{ 235 struct tap_softc *tp = ifp->if_softc; 236 237 mtx_lock(&tapmtx); 238 SLIST_REMOVE(&taphead, tp, tap_softc, tap_next); 239 mtx_unlock(&tapmtx); 240 tap_destroy(tp); 241} 242 243/* vmnet devices are tap devices in disguise */ 244static void 245vmnet_clone_destroy(struct ifnet *ifp) 246{ 247 tap_clone_destroy(ifp); 248} 249 250/* 251 * tapmodevent 252 * 253 * module event handler 254 */ 255static int 256tapmodevent(module_t mod, int type, void *data) 257{ 258 static eventhandler_tag eh_tag = NULL; 259 struct tap_softc *tp = NULL; 260 struct ifnet *ifp = NULL; 261 262 switch (type) { 263 case MOD_LOAD: 264 265 /* intitialize device */ 266 267 mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF); 268 SLIST_INIT(&taphead); 269 270 clone_setup(&tapclones); 271 eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000); 272 if (eh_tag == NULL) { 273 clone_cleanup(&tapclones); 274 mtx_destroy(&tapmtx); 275 return (ENOMEM); 276 } 277 tap_cloner = if_clone_simple(tapname, tap_clone_create, 278 tap_clone_destroy, 0); 279 vmnet_cloner = if_clone_simple(vmnetname, vmnet_clone_create, 280 vmnet_clone_destroy, 0); 281 return (0); 282 283 case MOD_UNLOAD: 284 /* 285 * The EBUSY algorithm here can't quite atomically 286 * guarantee that this is race-free since we have to 287 * release the tap mtx to deregister the clone handler. 288 */ 289 mtx_lock(&tapmtx); 290 SLIST_FOREACH(tp, &taphead, tap_next) { 291 mtx_lock(&tp->tap_mtx); 292 if (tp->tap_flags & TAP_OPEN) { 293 mtx_unlock(&tp->tap_mtx); 294 mtx_unlock(&tapmtx); 295 return (EBUSY); 296 } 297 mtx_unlock(&tp->tap_mtx); 298 } 299 mtx_unlock(&tapmtx); 300 301 EVENTHANDLER_DEREGISTER(dev_clone, eh_tag); 302 if_clone_detach(tap_cloner); 303 if_clone_detach(vmnet_cloner); 304 drain_dev_clone_events(); 305 306 mtx_lock(&tapmtx); 307 while ((tp = SLIST_FIRST(&taphead)) != NULL) { 308 SLIST_REMOVE_HEAD(&taphead, tap_next); 309 mtx_unlock(&tapmtx); 310 311 ifp = tp->tap_ifp; 312 313 TAPDEBUG("detaching %s\n", ifp->if_xname); 314 315 tap_destroy(tp); 316 mtx_lock(&tapmtx); 317 } 318 mtx_unlock(&tapmtx); 319 clone_cleanup(&tapclones); 320 321 mtx_destroy(&tapmtx); 322 323 break; 324 325 default: 326 return (EOPNOTSUPP); 327 } 328 329 return (0); 330} /* tapmodevent */ 331 332 333/* 334 * DEVFS handler 335 * 336 * We need to support two kind of devices - tap and vmnet 337 */ 338static void 339tapclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev) 340{ 341 char devname[SPECNAMELEN + 1]; 342 int i, unit, append_unit; 343 int extra; 344 345 if (*dev != NULL) 346 return; 347 348 if (!tapdclone || 349 (!tapuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0)) 350 return; 351 352 unit = 0; 353 append_unit = 0; 354 extra = 0; 355 356 /* We're interested in only tap/vmnet devices. */ 357 if (strcmp(name, tapname) == 0) { 358 unit = -1; 359 } else if (strcmp(name, vmnetname) == 0) { 360 unit = -1; 361 extra = VMNET_DEV_MASK; 362 } else if (dev_stdclone(name, NULL, tapname, &unit) != 1) { 363 if (dev_stdclone(name, NULL, vmnetname, &unit) != 1) { 364 return; 365 } else { 366 extra = VMNET_DEV_MASK; 367 } 368 } 369 370 if (unit == -1) 371 append_unit = 1; 372 373 CURVNET_SET(CRED_TO_VNET(cred)); 374 /* find any existing device, or allocate new unit number */ 375 i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra); 376 if (i) { 377 if (append_unit) { 378 /* 379 * We were passed 'tun' or 'tap', with no unit specified 380 * so we'll need to append it now. 381 */ 382 namelen = snprintf(devname, sizeof(devname), "%s%d", name, 383 unit); 384 name = devname; 385 } 386 387 *dev = make_dev_credf(MAKEDEV_REF, &tap_cdevsw, unit | extra, 388 cred, UID_ROOT, GID_WHEEL, 0600, "%s", name); 389 } 390 391 if_clone_create(name, namelen, NULL); 392 CURVNET_RESTORE(); 393} /* tapclone */ 394 395 396/* 397 * tapcreate 398 * 399 * to create interface 400 */ 401static void 402tapcreate(struct cdev *dev) 403{ 404 struct ifnet *ifp = NULL; 405 struct tap_softc *tp = NULL; 406 unsigned short macaddr_hi; 407 uint32_t macaddr_mid; 408 int unit; 409 const char *name = NULL; 410 u_char eaddr[6]; 411 412 /* allocate driver storage and create device */ 413 tp = malloc(sizeof(*tp), M_TAP, M_WAITOK | M_ZERO); 414 mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF); 415 mtx_lock(&tapmtx); 416 SLIST_INSERT_HEAD(&taphead, tp, tap_next); 417 mtx_unlock(&tapmtx); 418 419 unit = dev2unit(dev); 420 421 /* select device: tap or vmnet */ 422 if (unit & VMNET_DEV_MASK) { 423 name = vmnetname; 424 tp->tap_flags |= TAP_VMNET; 425 } else 426 name = tapname; 427 428 unit &= TAPMAXUNIT; 429 430 TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev)); 431 432 /* generate fake MAC address: 00 bd xx xx xx unit_no */ 433 macaddr_hi = htons(0x00bd); 434 macaddr_mid = (uint32_t) ticks; 435 bcopy(&macaddr_hi, eaddr, sizeof(short)); 436 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t)); 437 eaddr[5] = (u_char)unit; 438 439 /* fill the rest and attach interface */ 440 ifp = tp->tap_ifp = if_alloc(IFT_ETHER); 441 if (ifp == NULL) 442 panic("%s%d: can not if_alloc()", name, unit); 443 ifp->if_softc = tp; 444 if_initname(ifp, name, unit); 445 ifp->if_init = tapifinit; 446 ifp->if_start = tapifstart; 447 ifp->if_ioctl = tapifioctl; 448 ifp->if_mtu = ETHERMTU; 449 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 450 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 451 ifp->if_capabilities |= IFCAP_LINKSTATE; 452 ifp->if_capenable |= IFCAP_LINKSTATE; 453 454 dev->si_drv1 = tp; 455 tp->tap_dev = dev; 456 457 ether_ifattach(ifp, eaddr); 458 459 mtx_lock(&tp->tap_mtx); 460 tp->tap_flags |= TAP_INITED; 461 mtx_unlock(&tp->tap_mtx); 462 463 knlist_init_mtx(&tp->tap_rsel.si_note, &tp->tap_mtx); 464 465 TAPDEBUG("interface %s is created. minor = %#x\n", 466 ifp->if_xname, dev2unit(dev)); 467} /* tapcreate */ 468 469 470/* 471 * tapopen 472 * 473 * to open tunnel. must be superuser 474 */ 475static int 476tapopen(struct cdev *dev, int flag, int mode, struct thread *td) 477{ 478 struct tap_softc *tp = NULL; 479 struct ifnet *ifp = NULL; 480 int error; 481 482 if (tapuopen == 0) { 483 error = priv_check(td, PRIV_NET_TAP); 484 if (error) 485 return (error); 486 } 487 488 if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT) 489 return (ENXIO); 490 491 tp = dev->si_drv1; 492 493 mtx_lock(&tp->tap_mtx); 494 if (tp->tap_flags & TAP_OPEN) { 495 mtx_unlock(&tp->tap_mtx); 496 return (EBUSY); 497 } 498 499 bcopy(IF_LLADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr)); 500 tp->tap_pid = td->td_proc->p_pid; 501 tp->tap_flags |= TAP_OPEN; 502 ifp = tp->tap_ifp; 503 504 ifp->if_drv_flags |= IFF_DRV_RUNNING; 505 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 506 if (tapuponopen) 507 ifp->if_flags |= IFF_UP; 508 if_link_state_change(ifp, LINK_STATE_UP); 509 mtx_unlock(&tp->tap_mtx); 510 511 TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, dev2unit(dev)); 512 513 return (0); 514} /* tapopen */ 515 516 517/* 518 * tapclose 519 * 520 * close the device - mark i/f down & delete routing info 521 */ 522static int 523tapclose(struct cdev *dev, int foo, int bar, struct thread *td) 524{ 525 struct ifaddr *ifa; 526 struct tap_softc *tp = dev->si_drv1; 527 struct ifnet *ifp = tp->tap_ifp; 528 529 /* junk all pending output */ 530 mtx_lock(&tp->tap_mtx); 531 CURVNET_SET(ifp->if_vnet); 532 IF_DRAIN(&ifp->if_snd); 533 534 /* 535 * Do not bring the interface down, and do not anything with 536 * interface, if we are in VMnet mode. Just close the device. 537 */ 538 if (((tp->tap_flags & TAP_VMNET) == 0) && 539 (ifp->if_flags & (IFF_UP | IFF_LINK0)) == IFF_UP) { 540 mtx_unlock(&tp->tap_mtx); 541 if_down(ifp); 542 mtx_lock(&tp->tap_mtx); 543 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 544 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 545 mtx_unlock(&tp->tap_mtx); 546 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 547 rtinit(ifa, (int)RTM_DELETE, 0); 548 } 549 if_purgeaddrs(ifp); 550 mtx_lock(&tp->tap_mtx); 551 } 552 } 553 554 if_link_state_change(ifp, LINK_STATE_DOWN); 555 CURVNET_RESTORE(); 556 557 funsetown(&tp->tap_sigio); 558 selwakeuppri(&tp->tap_rsel, PZERO+1); 559 KNOTE_LOCKED(&tp->tap_rsel.si_note, 0); 560 561 tp->tap_flags &= ~TAP_OPEN; 562 tp->tap_pid = 0; 563 mtx_unlock(&tp->tap_mtx); 564 565 TAPDEBUG("%s is closed. minor = %#x\n", 566 ifp->if_xname, dev2unit(dev)); 567 568 return (0); 569} /* tapclose */ 570 571 572/* 573 * tapifinit 574 * 575 * network interface initialization function 576 */ 577static void 578tapifinit(void *xtp) 579{ 580 struct tap_softc *tp = (struct tap_softc *)xtp; 581 struct ifnet *ifp = tp->tap_ifp; 582 583 TAPDEBUG("initializing %s\n", ifp->if_xname); 584 585 mtx_lock(&tp->tap_mtx); 586 ifp->if_drv_flags |= IFF_DRV_RUNNING; 587 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 588 mtx_unlock(&tp->tap_mtx); 589 590 /* attempt to start output */ 591 tapifstart(ifp); 592} /* tapifinit */ 593 594 595/* 596 * tapifioctl 597 * 598 * Process an ioctl request on network interface 599 */ 600static int 601tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 602{ 603 struct tap_softc *tp = ifp->if_softc; 604 struct ifreq *ifr = (struct ifreq *)data; 605 struct ifstat *ifs = NULL; 606 struct ifmediareq *ifmr = NULL; 607 int dummy, error = 0; 608 609 switch (cmd) { 610 case SIOCSIFFLAGS: /* XXX -- just like vmnet does */ 611 case SIOCADDMULTI: 612 case SIOCDELMULTI: 613 break; 614 615 case SIOCGIFMEDIA: 616 ifmr = (struct ifmediareq *)data; 617 dummy = ifmr->ifm_count; 618 ifmr->ifm_count = 1; 619 ifmr->ifm_status = IFM_AVALID; 620 ifmr->ifm_active = IFM_ETHER; 621 if (tp->tap_flags & TAP_OPEN) 622 ifmr->ifm_status |= IFM_ACTIVE; 623 ifmr->ifm_current = ifmr->ifm_active; 624 if (dummy >= 1) { 625 int media = IFM_ETHER; 626 error = copyout(&media, ifmr->ifm_ulist, 627 sizeof(int)); 628 } 629 break; 630 631 case SIOCSIFMTU: 632 ifp->if_mtu = ifr->ifr_mtu; 633 break; 634 635 case SIOCGIFSTATUS: 636 ifs = (struct ifstat *)data; 637 mtx_lock(&tp->tap_mtx); 638 if (tp->tap_pid != 0) 639 snprintf(ifs->ascii, sizeof(ifs->ascii), 640 "\tOpened by PID %d\n", tp->tap_pid); 641 else 642 ifs->ascii[0] = '\0'; 643 mtx_unlock(&tp->tap_mtx); 644 break; 645 646 default: 647 error = ether_ioctl(ifp, cmd, data); 648 break; 649 } 650 651 return (error); 652} /* tapifioctl */ 653 654 655/* 656 * tapifstart 657 * 658 * queue packets from higher level ready to put out 659 */ 660static void 661tapifstart(struct ifnet *ifp) 662{ 663 struct tap_softc *tp = ifp->if_softc; 664 665 TAPDEBUG("%s starting\n", ifp->if_xname); 666 667 /* 668 * do not junk pending output if we are in VMnet mode. 669 * XXX: can this do any harm because of queue overflow? 670 */ 671 672 mtx_lock(&tp->tap_mtx); 673 if (((tp->tap_flags & TAP_VMNET) == 0) && 674 ((tp->tap_flags & TAP_READY) != TAP_READY)) { 675 struct mbuf *m; 676 677 /* Unlocked read. */ 678 TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname, 679 tp->tap_flags); 680 681 for (;;) { 682 IF_DEQUEUE(&ifp->if_snd, m); 683 if (m != NULL) { 684 m_freem(m); 685 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 686 } else 687 break; 688 } 689 mtx_unlock(&tp->tap_mtx); 690 691 return; 692 } 693 694 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 695 696 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 697 if (tp->tap_flags & TAP_RWAIT) { 698 tp->tap_flags &= ~TAP_RWAIT; 699 wakeup(tp); 700 } 701 702 if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) { 703 mtx_unlock(&tp->tap_mtx); 704 pgsigio(&tp->tap_sigio, SIGIO, 0); 705 mtx_lock(&tp->tap_mtx); 706 } 707 708 selwakeuppri(&tp->tap_rsel, PZERO+1); 709 KNOTE_LOCKED(&tp->tap_rsel.si_note, 0); 710 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* obytes are counted in ether_output */ 711 } 712 713 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 714 mtx_unlock(&tp->tap_mtx); 715} /* tapifstart */ 716 717 718/* 719 * tapioctl 720 * 721 * the cdevsw interface is now pretty minimal 722 */ 723static int 724tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 725{ 726 struct ifreq ifr; 727 struct tap_softc *tp = dev->si_drv1; 728 struct ifnet *ifp = tp->tap_ifp; 729 struct tapinfo *tapp = NULL; 730 int f; 731 int error; 732#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ 733 defined(COMPAT_FREEBSD4) 734 int ival; 735#endif 736 737 switch (cmd) { 738 case TAPSIFINFO: 739 tapp = (struct tapinfo *)data; 740 if (ifp->if_type != tapp->type) 741 return (EPROTOTYPE); 742 mtx_lock(&tp->tap_mtx); 743 if (ifp->if_mtu != tapp->mtu) { 744 strncpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ); 745 ifr.ifr_mtu = tapp->mtu; 746 CURVNET_SET(ifp->if_vnet); 747 error = ifhwioctl(SIOCSIFMTU, ifp, 748 (caddr_t)&ifr, td); 749 CURVNET_RESTORE(); 750 if (error) { 751 mtx_unlock(&tp->tap_mtx); 752 return (error); 753 } 754 } 755 ifp->if_baudrate = tapp->baudrate; 756 mtx_unlock(&tp->tap_mtx); 757 break; 758 759 case TAPGIFINFO: 760 tapp = (struct tapinfo *)data; 761 mtx_lock(&tp->tap_mtx); 762 tapp->mtu = ifp->if_mtu; 763 tapp->type = ifp->if_type; 764 tapp->baudrate = ifp->if_baudrate; 765 mtx_unlock(&tp->tap_mtx); 766 break; 767 768 case TAPSDEBUG: 769 tapdebug = *(int *)data; 770 break; 771 772 case TAPGDEBUG: 773 *(int *)data = tapdebug; 774 break; 775 776 case TAPGIFNAME: { 777 struct ifreq *ifr = (struct ifreq *) data; 778 779 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); 780 } break; 781 782 case FIONBIO: 783 break; 784 785 case FIOASYNC: 786 mtx_lock(&tp->tap_mtx); 787 if (*(int *)data) 788 tp->tap_flags |= TAP_ASYNC; 789 else 790 tp->tap_flags &= ~TAP_ASYNC; 791 mtx_unlock(&tp->tap_mtx); 792 break; 793 794 case FIONREAD: 795 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 796 struct mbuf *mb; 797 798 IFQ_LOCK(&ifp->if_snd); 799 IFQ_POLL_NOLOCK(&ifp->if_snd, mb); 800 for (*(int *)data = 0; mb != NULL; 801 mb = mb->m_next) 802 *(int *)data += mb->m_len; 803 IFQ_UNLOCK(&ifp->if_snd); 804 } else 805 *(int *)data = 0; 806 break; 807 808 case FIOSETOWN: 809 return (fsetown(*(int *)data, &tp->tap_sigio)); 810 811 case FIOGETOWN: 812 *(int *)data = fgetown(&tp->tap_sigio); 813 return (0); 814 815 /* this is deprecated, FIOSETOWN should be used instead */ 816 case TIOCSPGRP: 817 return (fsetown(-(*(int *)data), &tp->tap_sigio)); 818 819 /* this is deprecated, FIOGETOWN should be used instead */ 820 case TIOCGPGRP: 821 *(int *)data = -fgetown(&tp->tap_sigio); 822 return (0); 823 824 /* VMware/VMnet port ioctl's */ 825 826#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ 827 defined(COMPAT_FREEBSD4) 828 case _IO('V', 0): 829 ival = IOCPARM_IVAL(data); 830 data = (caddr_t)&ival; 831 /* FALLTHROUGH */ 832#endif 833 case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */ 834 f = *(int *)data; 835 f &= 0x0fff; 836 f &= ~IFF_CANTCHANGE; 837 f |= IFF_UP; 838 839 mtx_lock(&tp->tap_mtx); 840 ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE); 841 mtx_unlock(&tp->tap_mtx); 842 break; 843 844 case SIOCGIFADDR: /* get MAC address of the remote side */ 845 mtx_lock(&tp->tap_mtx); 846 bcopy(tp->ether_addr, data, sizeof(tp->ether_addr)); 847 mtx_unlock(&tp->tap_mtx); 848 break; 849 850 case SIOCSIFADDR: /* set MAC address of the remote side */ 851 mtx_lock(&tp->tap_mtx); 852 bcopy(data, tp->ether_addr, sizeof(tp->ether_addr)); 853 mtx_unlock(&tp->tap_mtx); 854 break; 855 856 default: 857 return (ENOTTY); 858 } 859 return (0); 860} /* tapioctl */ 861 862 863/* 864 * tapread 865 * 866 * the cdevsw read interface - reads a packet at a time, or at 867 * least as much of a packet as can be read 868 */ 869static int 870tapread(struct cdev *dev, struct uio *uio, int flag) 871{ 872 struct tap_softc *tp = dev->si_drv1; 873 struct ifnet *ifp = tp->tap_ifp; 874 struct mbuf *m = NULL; 875 int error = 0, len; 876 877 TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, dev2unit(dev)); 878 879 mtx_lock(&tp->tap_mtx); 880 if ((tp->tap_flags & TAP_READY) != TAP_READY) { 881 mtx_unlock(&tp->tap_mtx); 882 883 /* Unlocked read. */ 884 TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n", 885 ifp->if_xname, dev2unit(dev), tp->tap_flags); 886 887 return (EHOSTDOWN); 888 } 889 890 tp->tap_flags &= ~TAP_RWAIT; 891 892 /* sleep until we get a packet */ 893 do { 894 IF_DEQUEUE(&ifp->if_snd, m); 895 896 if (m == NULL) { 897 if (flag & O_NONBLOCK) { 898 mtx_unlock(&tp->tap_mtx); 899 return (EWOULDBLOCK); 900 } 901 902 tp->tap_flags |= TAP_RWAIT; 903 error = mtx_sleep(tp, &tp->tap_mtx, PCATCH | (PZERO + 1), 904 "taprd", 0); 905 if (error) { 906 mtx_unlock(&tp->tap_mtx); 907 return (error); 908 } 909 } 910 } while (m == NULL); 911 mtx_unlock(&tp->tap_mtx); 912 913 /* feed packet to bpf */ 914 BPF_MTAP(ifp, m); 915 916 /* xfer packet to user space */ 917 while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) { 918 len = min(uio->uio_resid, m->m_len); 919 if (len == 0) 920 break; 921 922 error = uiomove(mtod(m, void *), len, uio); 923 m = m_free(m); 924 } 925 926 if (m != NULL) { 927 TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname, 928 dev2unit(dev)); 929 m_freem(m); 930 } 931 932 return (error); 933} /* tapread */ 934 935 936/* 937 * tapwrite 938 * 939 * the cdevsw write interface - an atomic write is a packet - or else! 940 */ 941static int 942tapwrite(struct cdev *dev, struct uio *uio, int flag) 943{ 944 struct ether_header *eh; 945 struct tap_softc *tp = dev->si_drv1; 946 struct ifnet *ifp = tp->tap_ifp; 947 struct mbuf *m; 948 949 TAPDEBUG("%s writing, minor = %#x\n", 950 ifp->if_xname, dev2unit(dev)); 951 952 if (uio->uio_resid == 0) 953 return (0); 954 955 if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) { 956 TAPDEBUG("%s invalid packet len = %zd, minor = %#x\n", 957 ifp->if_xname, uio->uio_resid, dev2unit(dev)); 958 959 return (EIO); 960 } 961 962 if ((m = m_uiotombuf(uio, M_NOWAIT, 0, ETHER_ALIGN, 963 M_PKTHDR)) == NULL) { 964 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 965 return (ENOBUFS); 966 } 967 968 m->m_pkthdr.rcvif = ifp; 969 970 /* 971 * Only pass a unicast frame to ether_input(), if it would actually 972 * have been received by non-virtual hardware. 973 */ 974 if (m->m_len < sizeof(struct ether_header)) { 975 m_freem(m); 976 return (0); 977 } 978 eh = mtod(m, struct ether_header *); 979 980 if (eh && (ifp->if_flags & IFF_PROMISC) == 0 && 981 !ETHER_IS_MULTICAST(eh->ether_dhost) && 982 bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) { 983 m_freem(m); 984 return (0); 985 } 986 987 /* Pass packet up to parent. */ 988 CURVNET_SET(ifp->if_vnet); 989 (*ifp->if_input)(ifp, m); 990 CURVNET_RESTORE(); 991 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* ibytes are counted in parent */ 992 993 return (0); 994} /* tapwrite */ 995 996 997/* 998 * tappoll 999 * 1000 * the poll interface, this is only useful on reads 1001 * really. the write detect always returns true, write never blocks 1002 * anyway, it either accepts the packet or drops it 1003 */ 1004static int 1005tappoll(struct cdev *dev, int events, struct thread *td) 1006{ 1007 struct tap_softc *tp = dev->si_drv1; 1008 struct ifnet *ifp = tp->tap_ifp; 1009 int revents = 0; 1010 1011 TAPDEBUG("%s polling, minor = %#x\n", 1012 ifp->if_xname, dev2unit(dev)); 1013 1014 if (events & (POLLIN | POLLRDNORM)) { 1015 IFQ_LOCK(&ifp->if_snd); 1016 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1017 TAPDEBUG("%s have data in queue. len = %d, " \ 1018 "minor = %#x\n", ifp->if_xname, 1019 ifp->if_snd.ifq_len, dev2unit(dev)); 1020 1021 revents |= (events & (POLLIN | POLLRDNORM)); 1022 } else { 1023 TAPDEBUG("%s waiting for data, minor = %#x\n", 1024 ifp->if_xname, dev2unit(dev)); 1025 1026 selrecord(td, &tp->tap_rsel); 1027 } 1028 IFQ_UNLOCK(&ifp->if_snd); 1029 } 1030 1031 if (events & (POLLOUT | POLLWRNORM)) 1032 revents |= (events & (POLLOUT | POLLWRNORM)); 1033 1034 return (revents); 1035} /* tappoll */ 1036 1037 1038/* 1039 * tap_kqfilter 1040 * 1041 * support for kevent() system call 1042 */ 1043static int 1044tapkqfilter(struct cdev *dev, struct knote *kn) 1045{ 1046 struct tap_softc *tp = dev->si_drv1; 1047 struct ifnet *ifp = tp->tap_ifp; 1048 1049 switch (kn->kn_filter) { 1050 case EVFILT_READ: 1051 TAPDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n", 1052 ifp->if_xname, dev2unit(dev)); 1053 kn->kn_fop = &tap_read_filterops; 1054 break; 1055 1056 case EVFILT_WRITE: 1057 TAPDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n", 1058 ifp->if_xname, dev2unit(dev)); 1059 kn->kn_fop = &tap_write_filterops; 1060 break; 1061 1062 default: 1063 TAPDEBUG("%s kqfilter: invalid filter, minor = %#x\n", 1064 ifp->if_xname, dev2unit(dev)); 1065 return (EINVAL); 1066 /* NOT REACHED */ 1067 } 1068 1069 kn->kn_hook = tp; 1070 knlist_add(&tp->tap_rsel.si_note, kn, 0); 1071 1072 return (0); 1073} /* tapkqfilter */ 1074 1075 1076/* 1077 * tap_kqread 1078 * 1079 * Return true if there is data in the interface queue 1080 */ 1081static int 1082tapkqread(struct knote *kn, long hint) 1083{ 1084 int ret; 1085 struct tap_softc *tp = kn->kn_hook; 1086 struct cdev *dev = tp->tap_dev; 1087 struct ifnet *ifp = tp->tap_ifp; 1088 1089 if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) { 1090 TAPDEBUG("%s have data in queue. len = %d, minor = %#x\n", 1091 ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev)); 1092 ret = 1; 1093 } else { 1094 TAPDEBUG("%s waiting for data, minor = %#x\n", 1095 ifp->if_xname, dev2unit(dev)); 1096 ret = 0; 1097 } 1098 1099 return (ret); 1100} /* tapkqread */ 1101 1102 1103/* 1104 * tap_kqwrite 1105 * 1106 * Always can write. Return the MTU in kn->data 1107 */ 1108static int 1109tapkqwrite(struct knote *kn, long hint) 1110{ 1111 struct tap_softc *tp = kn->kn_hook; 1112 struct ifnet *ifp = tp->tap_ifp; 1113 1114 kn->kn_data = ifp->if_mtu; 1115 1116 return (1); 1117} /* tapkqwrite */ 1118 1119 1120static void 1121tapkqdetach(struct knote *kn) 1122{ 1123 struct tap_softc *tp = kn->kn_hook; 1124 1125 knlist_remove(&tp->tap_rsel.si_note, kn, 0); 1126} /* tapkqdetach */ 1127
| 179 180static int 181tap_clone_create(struct if_clone *ifc, int unit, caddr_t params) 182{ 183 struct cdev *dev; 184 int i; 185 186 /* Find any existing device, or allocate new unit number. */ 187 i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, 0); 188 if (i) { 189 dev = make_dev(&tap_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, 190 "%s%d", tapname, unit); 191 } 192 193 tapcreate(dev); 194 return (0); 195} 196 197/* vmnet devices are tap devices in disguise */ 198static int 199vmnet_clone_create(struct if_clone *ifc, int unit, caddr_t params) 200{ 201 struct cdev *dev; 202 int i; 203 204 /* Find any existing device, or allocate new unit number. */ 205 i = clone_create(&tapclones, &tap_cdevsw, &unit, &dev, VMNET_DEV_MASK); 206 if (i) { 207 dev = make_dev(&tap_cdevsw, unit | VMNET_DEV_MASK, UID_ROOT, 208 GID_WHEEL, 0600, "%s%d", vmnetname, unit); 209 } 210 211 tapcreate(dev); 212 return (0); 213} 214 215static void 216tap_destroy(struct tap_softc *tp) 217{ 218 struct ifnet *ifp = tp->tap_ifp; 219 220 CURVNET_SET(ifp->if_vnet); 221 destroy_dev(tp->tap_dev); 222 seldrain(&tp->tap_rsel); 223 knlist_clear(&tp->tap_rsel.si_note, 0); 224 knlist_destroy(&tp->tap_rsel.si_note); 225 ether_ifdetach(ifp); 226 if_free(ifp); 227 228 mtx_destroy(&tp->tap_mtx); 229 free(tp, M_TAP); 230 CURVNET_RESTORE(); 231} 232 233static void 234tap_clone_destroy(struct ifnet *ifp) 235{ 236 struct tap_softc *tp = ifp->if_softc; 237 238 mtx_lock(&tapmtx); 239 SLIST_REMOVE(&taphead, tp, tap_softc, tap_next); 240 mtx_unlock(&tapmtx); 241 tap_destroy(tp); 242} 243 244/* vmnet devices are tap devices in disguise */ 245static void 246vmnet_clone_destroy(struct ifnet *ifp) 247{ 248 tap_clone_destroy(ifp); 249} 250 251/* 252 * tapmodevent 253 * 254 * module event handler 255 */ 256static int 257tapmodevent(module_t mod, int type, void *data) 258{ 259 static eventhandler_tag eh_tag = NULL; 260 struct tap_softc *tp = NULL; 261 struct ifnet *ifp = NULL; 262 263 switch (type) { 264 case MOD_LOAD: 265 266 /* intitialize device */ 267 268 mtx_init(&tapmtx, "tapmtx", NULL, MTX_DEF); 269 SLIST_INIT(&taphead); 270 271 clone_setup(&tapclones); 272 eh_tag = EVENTHANDLER_REGISTER(dev_clone, tapclone, 0, 1000); 273 if (eh_tag == NULL) { 274 clone_cleanup(&tapclones); 275 mtx_destroy(&tapmtx); 276 return (ENOMEM); 277 } 278 tap_cloner = if_clone_simple(tapname, tap_clone_create, 279 tap_clone_destroy, 0); 280 vmnet_cloner = if_clone_simple(vmnetname, vmnet_clone_create, 281 vmnet_clone_destroy, 0); 282 return (0); 283 284 case MOD_UNLOAD: 285 /* 286 * The EBUSY algorithm here can't quite atomically 287 * guarantee that this is race-free since we have to 288 * release the tap mtx to deregister the clone handler. 289 */ 290 mtx_lock(&tapmtx); 291 SLIST_FOREACH(tp, &taphead, tap_next) { 292 mtx_lock(&tp->tap_mtx); 293 if (tp->tap_flags & TAP_OPEN) { 294 mtx_unlock(&tp->tap_mtx); 295 mtx_unlock(&tapmtx); 296 return (EBUSY); 297 } 298 mtx_unlock(&tp->tap_mtx); 299 } 300 mtx_unlock(&tapmtx); 301 302 EVENTHANDLER_DEREGISTER(dev_clone, eh_tag); 303 if_clone_detach(tap_cloner); 304 if_clone_detach(vmnet_cloner); 305 drain_dev_clone_events(); 306 307 mtx_lock(&tapmtx); 308 while ((tp = SLIST_FIRST(&taphead)) != NULL) { 309 SLIST_REMOVE_HEAD(&taphead, tap_next); 310 mtx_unlock(&tapmtx); 311 312 ifp = tp->tap_ifp; 313 314 TAPDEBUG("detaching %s\n", ifp->if_xname); 315 316 tap_destroy(tp); 317 mtx_lock(&tapmtx); 318 } 319 mtx_unlock(&tapmtx); 320 clone_cleanup(&tapclones); 321 322 mtx_destroy(&tapmtx); 323 324 break; 325 326 default: 327 return (EOPNOTSUPP); 328 } 329 330 return (0); 331} /* tapmodevent */ 332 333 334/* 335 * DEVFS handler 336 * 337 * We need to support two kind of devices - tap and vmnet 338 */ 339static void 340tapclone(void *arg, struct ucred *cred, char *name, int namelen, struct cdev **dev) 341{ 342 char devname[SPECNAMELEN + 1]; 343 int i, unit, append_unit; 344 int extra; 345 346 if (*dev != NULL) 347 return; 348 349 if (!tapdclone || 350 (!tapuopen && priv_check_cred(cred, PRIV_NET_IFCREATE, 0) != 0)) 351 return; 352 353 unit = 0; 354 append_unit = 0; 355 extra = 0; 356 357 /* We're interested in only tap/vmnet devices. */ 358 if (strcmp(name, tapname) == 0) { 359 unit = -1; 360 } else if (strcmp(name, vmnetname) == 0) { 361 unit = -1; 362 extra = VMNET_DEV_MASK; 363 } else if (dev_stdclone(name, NULL, tapname, &unit) != 1) { 364 if (dev_stdclone(name, NULL, vmnetname, &unit) != 1) { 365 return; 366 } else { 367 extra = VMNET_DEV_MASK; 368 } 369 } 370 371 if (unit == -1) 372 append_unit = 1; 373 374 CURVNET_SET(CRED_TO_VNET(cred)); 375 /* find any existing device, or allocate new unit number */ 376 i = clone_create(&tapclones, &tap_cdevsw, &unit, dev, extra); 377 if (i) { 378 if (append_unit) { 379 /* 380 * We were passed 'tun' or 'tap', with no unit specified 381 * so we'll need to append it now. 382 */ 383 namelen = snprintf(devname, sizeof(devname), "%s%d", name, 384 unit); 385 name = devname; 386 } 387 388 *dev = make_dev_credf(MAKEDEV_REF, &tap_cdevsw, unit | extra, 389 cred, UID_ROOT, GID_WHEEL, 0600, "%s", name); 390 } 391 392 if_clone_create(name, namelen, NULL); 393 CURVNET_RESTORE(); 394} /* tapclone */ 395 396 397/* 398 * tapcreate 399 * 400 * to create interface 401 */ 402static void 403tapcreate(struct cdev *dev) 404{ 405 struct ifnet *ifp = NULL; 406 struct tap_softc *tp = NULL; 407 unsigned short macaddr_hi; 408 uint32_t macaddr_mid; 409 int unit; 410 const char *name = NULL; 411 u_char eaddr[6]; 412 413 /* allocate driver storage and create device */ 414 tp = malloc(sizeof(*tp), M_TAP, M_WAITOK | M_ZERO); 415 mtx_init(&tp->tap_mtx, "tap_mtx", NULL, MTX_DEF); 416 mtx_lock(&tapmtx); 417 SLIST_INSERT_HEAD(&taphead, tp, tap_next); 418 mtx_unlock(&tapmtx); 419 420 unit = dev2unit(dev); 421 422 /* select device: tap or vmnet */ 423 if (unit & VMNET_DEV_MASK) { 424 name = vmnetname; 425 tp->tap_flags |= TAP_VMNET; 426 } else 427 name = tapname; 428 429 unit &= TAPMAXUNIT; 430 431 TAPDEBUG("tapcreate(%s%d). minor = %#x\n", name, unit, dev2unit(dev)); 432 433 /* generate fake MAC address: 00 bd xx xx xx unit_no */ 434 macaddr_hi = htons(0x00bd); 435 macaddr_mid = (uint32_t) ticks; 436 bcopy(&macaddr_hi, eaddr, sizeof(short)); 437 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t)); 438 eaddr[5] = (u_char)unit; 439 440 /* fill the rest and attach interface */ 441 ifp = tp->tap_ifp = if_alloc(IFT_ETHER); 442 if (ifp == NULL) 443 panic("%s%d: can not if_alloc()", name, unit); 444 ifp->if_softc = tp; 445 if_initname(ifp, name, unit); 446 ifp->if_init = tapifinit; 447 ifp->if_start = tapifstart; 448 ifp->if_ioctl = tapifioctl; 449 ifp->if_mtu = ETHERMTU; 450 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); 451 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); 452 ifp->if_capabilities |= IFCAP_LINKSTATE; 453 ifp->if_capenable |= IFCAP_LINKSTATE; 454 455 dev->si_drv1 = tp; 456 tp->tap_dev = dev; 457 458 ether_ifattach(ifp, eaddr); 459 460 mtx_lock(&tp->tap_mtx); 461 tp->tap_flags |= TAP_INITED; 462 mtx_unlock(&tp->tap_mtx); 463 464 knlist_init_mtx(&tp->tap_rsel.si_note, &tp->tap_mtx); 465 466 TAPDEBUG("interface %s is created. minor = %#x\n", 467 ifp->if_xname, dev2unit(dev)); 468} /* tapcreate */ 469 470 471/* 472 * tapopen 473 * 474 * to open tunnel. must be superuser 475 */ 476static int 477tapopen(struct cdev *dev, int flag, int mode, struct thread *td) 478{ 479 struct tap_softc *tp = NULL; 480 struct ifnet *ifp = NULL; 481 int error; 482 483 if (tapuopen == 0) { 484 error = priv_check(td, PRIV_NET_TAP); 485 if (error) 486 return (error); 487 } 488 489 if ((dev2unit(dev) & CLONE_UNITMASK) > TAPMAXUNIT) 490 return (ENXIO); 491 492 tp = dev->si_drv1; 493 494 mtx_lock(&tp->tap_mtx); 495 if (tp->tap_flags & TAP_OPEN) { 496 mtx_unlock(&tp->tap_mtx); 497 return (EBUSY); 498 } 499 500 bcopy(IF_LLADDR(tp->tap_ifp), tp->ether_addr, sizeof(tp->ether_addr)); 501 tp->tap_pid = td->td_proc->p_pid; 502 tp->tap_flags |= TAP_OPEN; 503 ifp = tp->tap_ifp; 504 505 ifp->if_drv_flags |= IFF_DRV_RUNNING; 506 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 507 if (tapuponopen) 508 ifp->if_flags |= IFF_UP; 509 if_link_state_change(ifp, LINK_STATE_UP); 510 mtx_unlock(&tp->tap_mtx); 511 512 TAPDEBUG("%s is open. minor = %#x\n", ifp->if_xname, dev2unit(dev)); 513 514 return (0); 515} /* tapopen */ 516 517 518/* 519 * tapclose 520 * 521 * close the device - mark i/f down & delete routing info 522 */ 523static int 524tapclose(struct cdev *dev, int foo, int bar, struct thread *td) 525{ 526 struct ifaddr *ifa; 527 struct tap_softc *tp = dev->si_drv1; 528 struct ifnet *ifp = tp->tap_ifp; 529 530 /* junk all pending output */ 531 mtx_lock(&tp->tap_mtx); 532 CURVNET_SET(ifp->if_vnet); 533 IF_DRAIN(&ifp->if_snd); 534 535 /* 536 * Do not bring the interface down, and do not anything with 537 * interface, if we are in VMnet mode. Just close the device. 538 */ 539 if (((tp->tap_flags & TAP_VMNET) == 0) && 540 (ifp->if_flags & (IFF_UP | IFF_LINK0)) == IFF_UP) { 541 mtx_unlock(&tp->tap_mtx); 542 if_down(ifp); 543 mtx_lock(&tp->tap_mtx); 544 if (ifp->if_drv_flags & IFF_DRV_RUNNING) { 545 ifp->if_drv_flags &= ~IFF_DRV_RUNNING; 546 mtx_unlock(&tp->tap_mtx); 547 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 548 rtinit(ifa, (int)RTM_DELETE, 0); 549 } 550 if_purgeaddrs(ifp); 551 mtx_lock(&tp->tap_mtx); 552 } 553 } 554 555 if_link_state_change(ifp, LINK_STATE_DOWN); 556 CURVNET_RESTORE(); 557 558 funsetown(&tp->tap_sigio); 559 selwakeuppri(&tp->tap_rsel, PZERO+1); 560 KNOTE_LOCKED(&tp->tap_rsel.si_note, 0); 561 562 tp->tap_flags &= ~TAP_OPEN; 563 tp->tap_pid = 0; 564 mtx_unlock(&tp->tap_mtx); 565 566 TAPDEBUG("%s is closed. minor = %#x\n", 567 ifp->if_xname, dev2unit(dev)); 568 569 return (0); 570} /* tapclose */ 571 572 573/* 574 * tapifinit 575 * 576 * network interface initialization function 577 */ 578static void 579tapifinit(void *xtp) 580{ 581 struct tap_softc *tp = (struct tap_softc *)xtp; 582 struct ifnet *ifp = tp->tap_ifp; 583 584 TAPDEBUG("initializing %s\n", ifp->if_xname); 585 586 mtx_lock(&tp->tap_mtx); 587 ifp->if_drv_flags |= IFF_DRV_RUNNING; 588 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 589 mtx_unlock(&tp->tap_mtx); 590 591 /* attempt to start output */ 592 tapifstart(ifp); 593} /* tapifinit */ 594 595 596/* 597 * tapifioctl 598 * 599 * Process an ioctl request on network interface 600 */ 601static int 602tapifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 603{ 604 struct tap_softc *tp = ifp->if_softc; 605 struct ifreq *ifr = (struct ifreq *)data; 606 struct ifstat *ifs = NULL; 607 struct ifmediareq *ifmr = NULL; 608 int dummy, error = 0; 609 610 switch (cmd) { 611 case SIOCSIFFLAGS: /* XXX -- just like vmnet does */ 612 case SIOCADDMULTI: 613 case SIOCDELMULTI: 614 break; 615 616 case SIOCGIFMEDIA: 617 ifmr = (struct ifmediareq *)data; 618 dummy = ifmr->ifm_count; 619 ifmr->ifm_count = 1; 620 ifmr->ifm_status = IFM_AVALID; 621 ifmr->ifm_active = IFM_ETHER; 622 if (tp->tap_flags & TAP_OPEN) 623 ifmr->ifm_status |= IFM_ACTIVE; 624 ifmr->ifm_current = ifmr->ifm_active; 625 if (dummy >= 1) { 626 int media = IFM_ETHER; 627 error = copyout(&media, ifmr->ifm_ulist, 628 sizeof(int)); 629 } 630 break; 631 632 case SIOCSIFMTU: 633 ifp->if_mtu = ifr->ifr_mtu; 634 break; 635 636 case SIOCGIFSTATUS: 637 ifs = (struct ifstat *)data; 638 mtx_lock(&tp->tap_mtx); 639 if (tp->tap_pid != 0) 640 snprintf(ifs->ascii, sizeof(ifs->ascii), 641 "\tOpened by PID %d\n", tp->tap_pid); 642 else 643 ifs->ascii[0] = '\0'; 644 mtx_unlock(&tp->tap_mtx); 645 break; 646 647 default: 648 error = ether_ioctl(ifp, cmd, data); 649 break; 650 } 651 652 return (error); 653} /* tapifioctl */ 654 655 656/* 657 * tapifstart 658 * 659 * queue packets from higher level ready to put out 660 */ 661static void 662tapifstart(struct ifnet *ifp) 663{ 664 struct tap_softc *tp = ifp->if_softc; 665 666 TAPDEBUG("%s starting\n", ifp->if_xname); 667 668 /* 669 * do not junk pending output if we are in VMnet mode. 670 * XXX: can this do any harm because of queue overflow? 671 */ 672 673 mtx_lock(&tp->tap_mtx); 674 if (((tp->tap_flags & TAP_VMNET) == 0) && 675 ((tp->tap_flags & TAP_READY) != TAP_READY)) { 676 struct mbuf *m; 677 678 /* Unlocked read. */ 679 TAPDEBUG("%s not ready, tap_flags = 0x%x\n", ifp->if_xname, 680 tp->tap_flags); 681 682 for (;;) { 683 IF_DEQUEUE(&ifp->if_snd, m); 684 if (m != NULL) { 685 m_freem(m); 686 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); 687 } else 688 break; 689 } 690 mtx_unlock(&tp->tap_mtx); 691 692 return; 693 } 694 695 ifp->if_drv_flags |= IFF_DRV_OACTIVE; 696 697 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 698 if (tp->tap_flags & TAP_RWAIT) { 699 tp->tap_flags &= ~TAP_RWAIT; 700 wakeup(tp); 701 } 702 703 if ((tp->tap_flags & TAP_ASYNC) && (tp->tap_sigio != NULL)) { 704 mtx_unlock(&tp->tap_mtx); 705 pgsigio(&tp->tap_sigio, SIGIO, 0); 706 mtx_lock(&tp->tap_mtx); 707 } 708 709 selwakeuppri(&tp->tap_rsel, PZERO+1); 710 KNOTE_LOCKED(&tp->tap_rsel.si_note, 0); 711 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); /* obytes are counted in ether_output */ 712 } 713 714 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; 715 mtx_unlock(&tp->tap_mtx); 716} /* tapifstart */ 717 718 719/* 720 * tapioctl 721 * 722 * the cdevsw interface is now pretty minimal 723 */ 724static int 725tapioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 726{ 727 struct ifreq ifr; 728 struct tap_softc *tp = dev->si_drv1; 729 struct ifnet *ifp = tp->tap_ifp; 730 struct tapinfo *tapp = NULL; 731 int f; 732 int error; 733#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ 734 defined(COMPAT_FREEBSD4) 735 int ival; 736#endif 737 738 switch (cmd) { 739 case TAPSIFINFO: 740 tapp = (struct tapinfo *)data; 741 if (ifp->if_type != tapp->type) 742 return (EPROTOTYPE); 743 mtx_lock(&tp->tap_mtx); 744 if (ifp->if_mtu != tapp->mtu) { 745 strncpy(ifr.ifr_name, if_name(ifp), IFNAMSIZ); 746 ifr.ifr_mtu = tapp->mtu; 747 CURVNET_SET(ifp->if_vnet); 748 error = ifhwioctl(SIOCSIFMTU, ifp, 749 (caddr_t)&ifr, td); 750 CURVNET_RESTORE(); 751 if (error) { 752 mtx_unlock(&tp->tap_mtx); 753 return (error); 754 } 755 } 756 ifp->if_baudrate = tapp->baudrate; 757 mtx_unlock(&tp->tap_mtx); 758 break; 759 760 case TAPGIFINFO: 761 tapp = (struct tapinfo *)data; 762 mtx_lock(&tp->tap_mtx); 763 tapp->mtu = ifp->if_mtu; 764 tapp->type = ifp->if_type; 765 tapp->baudrate = ifp->if_baudrate; 766 mtx_unlock(&tp->tap_mtx); 767 break; 768 769 case TAPSDEBUG: 770 tapdebug = *(int *)data; 771 break; 772 773 case TAPGDEBUG: 774 *(int *)data = tapdebug; 775 break; 776 777 case TAPGIFNAME: { 778 struct ifreq *ifr = (struct ifreq *) data; 779 780 strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ); 781 } break; 782 783 case FIONBIO: 784 break; 785 786 case FIOASYNC: 787 mtx_lock(&tp->tap_mtx); 788 if (*(int *)data) 789 tp->tap_flags |= TAP_ASYNC; 790 else 791 tp->tap_flags &= ~TAP_ASYNC; 792 mtx_unlock(&tp->tap_mtx); 793 break; 794 795 case FIONREAD: 796 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 797 struct mbuf *mb; 798 799 IFQ_LOCK(&ifp->if_snd); 800 IFQ_POLL_NOLOCK(&ifp->if_snd, mb); 801 for (*(int *)data = 0; mb != NULL; 802 mb = mb->m_next) 803 *(int *)data += mb->m_len; 804 IFQ_UNLOCK(&ifp->if_snd); 805 } else 806 *(int *)data = 0; 807 break; 808 809 case FIOSETOWN: 810 return (fsetown(*(int *)data, &tp->tap_sigio)); 811 812 case FIOGETOWN: 813 *(int *)data = fgetown(&tp->tap_sigio); 814 return (0); 815 816 /* this is deprecated, FIOSETOWN should be used instead */ 817 case TIOCSPGRP: 818 return (fsetown(-(*(int *)data), &tp->tap_sigio)); 819 820 /* this is deprecated, FIOGETOWN should be used instead */ 821 case TIOCGPGRP: 822 *(int *)data = -fgetown(&tp->tap_sigio); 823 return (0); 824 825 /* VMware/VMnet port ioctl's */ 826 827#if defined(COMPAT_FREEBSD6) || defined(COMPAT_FREEBSD5) || \ 828 defined(COMPAT_FREEBSD4) 829 case _IO('V', 0): 830 ival = IOCPARM_IVAL(data); 831 data = (caddr_t)&ival; 832 /* FALLTHROUGH */ 833#endif 834 case VMIO_SIOCSIFFLAGS: /* VMware/VMnet SIOCSIFFLAGS */ 835 f = *(int *)data; 836 f &= 0x0fff; 837 f &= ~IFF_CANTCHANGE; 838 f |= IFF_UP; 839 840 mtx_lock(&tp->tap_mtx); 841 ifp->if_flags = f | (ifp->if_flags & IFF_CANTCHANGE); 842 mtx_unlock(&tp->tap_mtx); 843 break; 844 845 case SIOCGIFADDR: /* get MAC address of the remote side */ 846 mtx_lock(&tp->tap_mtx); 847 bcopy(tp->ether_addr, data, sizeof(tp->ether_addr)); 848 mtx_unlock(&tp->tap_mtx); 849 break; 850 851 case SIOCSIFADDR: /* set MAC address of the remote side */ 852 mtx_lock(&tp->tap_mtx); 853 bcopy(data, tp->ether_addr, sizeof(tp->ether_addr)); 854 mtx_unlock(&tp->tap_mtx); 855 break; 856 857 default: 858 return (ENOTTY); 859 } 860 return (0); 861} /* tapioctl */ 862 863 864/* 865 * tapread 866 * 867 * the cdevsw read interface - reads a packet at a time, or at 868 * least as much of a packet as can be read 869 */ 870static int 871tapread(struct cdev *dev, struct uio *uio, int flag) 872{ 873 struct tap_softc *tp = dev->si_drv1; 874 struct ifnet *ifp = tp->tap_ifp; 875 struct mbuf *m = NULL; 876 int error = 0, len; 877 878 TAPDEBUG("%s reading, minor = %#x\n", ifp->if_xname, dev2unit(dev)); 879 880 mtx_lock(&tp->tap_mtx); 881 if ((tp->tap_flags & TAP_READY) != TAP_READY) { 882 mtx_unlock(&tp->tap_mtx); 883 884 /* Unlocked read. */ 885 TAPDEBUG("%s not ready. minor = %#x, tap_flags = 0x%x\n", 886 ifp->if_xname, dev2unit(dev), tp->tap_flags); 887 888 return (EHOSTDOWN); 889 } 890 891 tp->tap_flags &= ~TAP_RWAIT; 892 893 /* sleep until we get a packet */ 894 do { 895 IF_DEQUEUE(&ifp->if_snd, m); 896 897 if (m == NULL) { 898 if (flag & O_NONBLOCK) { 899 mtx_unlock(&tp->tap_mtx); 900 return (EWOULDBLOCK); 901 } 902 903 tp->tap_flags |= TAP_RWAIT; 904 error = mtx_sleep(tp, &tp->tap_mtx, PCATCH | (PZERO + 1), 905 "taprd", 0); 906 if (error) { 907 mtx_unlock(&tp->tap_mtx); 908 return (error); 909 } 910 } 911 } while (m == NULL); 912 mtx_unlock(&tp->tap_mtx); 913 914 /* feed packet to bpf */ 915 BPF_MTAP(ifp, m); 916 917 /* xfer packet to user space */ 918 while ((m != NULL) && (uio->uio_resid > 0) && (error == 0)) { 919 len = min(uio->uio_resid, m->m_len); 920 if (len == 0) 921 break; 922 923 error = uiomove(mtod(m, void *), len, uio); 924 m = m_free(m); 925 } 926 927 if (m != NULL) { 928 TAPDEBUG("%s dropping mbuf, minor = %#x\n", ifp->if_xname, 929 dev2unit(dev)); 930 m_freem(m); 931 } 932 933 return (error); 934} /* tapread */ 935 936 937/* 938 * tapwrite 939 * 940 * the cdevsw write interface - an atomic write is a packet - or else! 941 */ 942static int 943tapwrite(struct cdev *dev, struct uio *uio, int flag) 944{ 945 struct ether_header *eh; 946 struct tap_softc *tp = dev->si_drv1; 947 struct ifnet *ifp = tp->tap_ifp; 948 struct mbuf *m; 949 950 TAPDEBUG("%s writing, minor = %#x\n", 951 ifp->if_xname, dev2unit(dev)); 952 953 if (uio->uio_resid == 0) 954 return (0); 955 956 if ((uio->uio_resid < 0) || (uio->uio_resid > TAPMRU)) { 957 TAPDEBUG("%s invalid packet len = %zd, minor = %#x\n", 958 ifp->if_xname, uio->uio_resid, dev2unit(dev)); 959 960 return (EIO); 961 } 962 963 if ((m = m_uiotombuf(uio, M_NOWAIT, 0, ETHER_ALIGN, 964 M_PKTHDR)) == NULL) { 965 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); 966 return (ENOBUFS); 967 } 968 969 m->m_pkthdr.rcvif = ifp; 970 971 /* 972 * Only pass a unicast frame to ether_input(), if it would actually 973 * have been received by non-virtual hardware. 974 */ 975 if (m->m_len < sizeof(struct ether_header)) { 976 m_freem(m); 977 return (0); 978 } 979 eh = mtod(m, struct ether_header *); 980 981 if (eh && (ifp->if_flags & IFF_PROMISC) == 0 && 982 !ETHER_IS_MULTICAST(eh->ether_dhost) && 983 bcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) != 0) { 984 m_freem(m); 985 return (0); 986 } 987 988 /* Pass packet up to parent. */ 989 CURVNET_SET(ifp->if_vnet); 990 (*ifp->if_input)(ifp, m); 991 CURVNET_RESTORE(); 992 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); /* ibytes are counted in parent */ 993 994 return (0); 995} /* tapwrite */ 996 997 998/* 999 * tappoll 1000 * 1001 * the poll interface, this is only useful on reads 1002 * really. the write detect always returns true, write never blocks 1003 * anyway, it either accepts the packet or drops it 1004 */ 1005static int 1006tappoll(struct cdev *dev, int events, struct thread *td) 1007{ 1008 struct tap_softc *tp = dev->si_drv1; 1009 struct ifnet *ifp = tp->tap_ifp; 1010 int revents = 0; 1011 1012 TAPDEBUG("%s polling, minor = %#x\n", 1013 ifp->if_xname, dev2unit(dev)); 1014 1015 if (events & (POLLIN | POLLRDNORM)) { 1016 IFQ_LOCK(&ifp->if_snd); 1017 if (!IFQ_IS_EMPTY(&ifp->if_snd)) { 1018 TAPDEBUG("%s have data in queue. len = %d, " \ 1019 "minor = %#x\n", ifp->if_xname, 1020 ifp->if_snd.ifq_len, dev2unit(dev)); 1021 1022 revents |= (events & (POLLIN | POLLRDNORM)); 1023 } else { 1024 TAPDEBUG("%s waiting for data, minor = %#x\n", 1025 ifp->if_xname, dev2unit(dev)); 1026 1027 selrecord(td, &tp->tap_rsel); 1028 } 1029 IFQ_UNLOCK(&ifp->if_snd); 1030 } 1031 1032 if (events & (POLLOUT | POLLWRNORM)) 1033 revents |= (events & (POLLOUT | POLLWRNORM)); 1034 1035 return (revents); 1036} /* tappoll */ 1037 1038 1039/* 1040 * tap_kqfilter 1041 * 1042 * support for kevent() system call 1043 */ 1044static int 1045tapkqfilter(struct cdev *dev, struct knote *kn) 1046{ 1047 struct tap_softc *tp = dev->si_drv1; 1048 struct ifnet *ifp = tp->tap_ifp; 1049 1050 switch (kn->kn_filter) { 1051 case EVFILT_READ: 1052 TAPDEBUG("%s kqfilter: EVFILT_READ, minor = %#x\n", 1053 ifp->if_xname, dev2unit(dev)); 1054 kn->kn_fop = &tap_read_filterops; 1055 break; 1056 1057 case EVFILT_WRITE: 1058 TAPDEBUG("%s kqfilter: EVFILT_WRITE, minor = %#x\n", 1059 ifp->if_xname, dev2unit(dev)); 1060 kn->kn_fop = &tap_write_filterops; 1061 break; 1062 1063 default: 1064 TAPDEBUG("%s kqfilter: invalid filter, minor = %#x\n", 1065 ifp->if_xname, dev2unit(dev)); 1066 return (EINVAL); 1067 /* NOT REACHED */ 1068 } 1069 1070 kn->kn_hook = tp; 1071 knlist_add(&tp->tap_rsel.si_note, kn, 0); 1072 1073 return (0); 1074} /* tapkqfilter */ 1075 1076 1077/* 1078 * tap_kqread 1079 * 1080 * Return true if there is data in the interface queue 1081 */ 1082static int 1083tapkqread(struct knote *kn, long hint) 1084{ 1085 int ret; 1086 struct tap_softc *tp = kn->kn_hook; 1087 struct cdev *dev = tp->tap_dev; 1088 struct ifnet *ifp = tp->tap_ifp; 1089 1090 if ((kn->kn_data = ifp->if_snd.ifq_len) > 0) { 1091 TAPDEBUG("%s have data in queue. len = %d, minor = %#x\n", 1092 ifp->if_xname, ifp->if_snd.ifq_len, dev2unit(dev)); 1093 ret = 1; 1094 } else { 1095 TAPDEBUG("%s waiting for data, minor = %#x\n", 1096 ifp->if_xname, dev2unit(dev)); 1097 ret = 0; 1098 } 1099 1100 return (ret); 1101} /* tapkqread */ 1102 1103 1104/* 1105 * tap_kqwrite 1106 * 1107 * Always can write. Return the MTU in kn->data 1108 */ 1109static int 1110tapkqwrite(struct knote *kn, long hint) 1111{ 1112 struct tap_softc *tp = kn->kn_hook; 1113 struct ifnet *ifp = tp->tap_ifp; 1114 1115 kn->kn_data = ifp->if_mtu; 1116 1117 return (1); 1118} /* tapkqwrite */ 1119 1120 1121static void 1122tapkqdetach(struct knote *kn) 1123{ 1124 struct tap_softc *tp = kn->kn_hook; 1125 1126 knlist_remove(&tp->tap_rsel.si_note, kn, 0); 1127} /* tapkqdetach */ 1128
|