if.c revision 194620
1/*- 2 * Copyright (c) 1980, 1986, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)if.c 8.5 (Berkeley) 1/9/95 30 * $FreeBSD: head/sys/net/if.c 194620 2009-06-22 10:27:20Z bz $ 31 */ 32 33#include "opt_compat.h" 34#include "opt_inet6.h" 35#include "opt_inet.h" 36#include "opt_carp.h" 37 38#include <sys/param.h> 39#include <sys/types.h> 40#include <sys/conf.h> 41#include <sys/malloc.h> 42#include <sys/sbuf.h> 43#include <sys/bus.h> 44#include <sys/mbuf.h> 45#include <sys/systm.h> 46#include <sys/priv.h> 47#include <sys/proc.h> 48#include <sys/socket.h> 49#include <sys/socketvar.h> 50#include <sys/protosw.h> 51#include <sys/kernel.h> 52#include <sys/lock.h> 53#include <sys/refcount.h> 54#include <sys/module.h> 55#include <sys/rwlock.h> 56#include <sys/sockio.h> 57#include <sys/syslog.h> 58#include <sys/sysctl.h> 59#include <sys/taskqueue.h> 60#include <sys/domain.h> 61#include <sys/jail.h> 62#include <sys/vimage.h> 63#include <machine/stdarg.h> 64#include <vm/uma.h> 65 66#include <net/if.h> 67#include <net/if_arp.h> 68#include <net/if_clone.h> 69#include <net/if_dl.h> 70#include <net/if_types.h> 71#include <net/if_var.h> 72#include <net/radix.h> 73#include <net/route.h> 74#include <net/vnet.h> 75 76#if defined(INET) || defined(INET6) 77/*XXX*/ 78#include <netinet/in.h> 79#include <netinet/in_var.h> 80#ifdef INET6 81#include <netinet6/in6_var.h> 82#include <netinet6/in6_ifattach.h> 83#endif 84#endif 85#ifdef INET 86#include <netinet/if_ether.h> 87#include <netinet/vinet.h> 88#endif 89#if defined(INET) || defined(INET6) 90#ifdef DEV_CARP 91#include <netinet/ip_carp.h> 92#endif 93#endif 94 95#include <security/mac/mac_framework.h> 96 97#ifndef VIMAGE 98#ifndef VIMAGE_GLOBALS 99struct vnet_net vnet_net_0; 100#endif 101#endif 102 103static int slowtimo_started; 104 105SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers"); 106SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management"); 107 108/* Log link state change events */ 109static int log_link_state_change = 1; 110 111SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW, 112 &log_link_state_change, 0, 113 "log interface link state change events"); 114 115void (*bstp_linkstate_p)(struct ifnet *ifp, int state); 116void (*ng_ether_link_state_p)(struct ifnet *ifp, int state); 117void (*lagg_linkstate_p)(struct ifnet *ifp, int state); 118 119struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL; 120 121/* 122 * XXX: Style; these should be sorted alphabetically, and unprototyped 123 * static functions should be prototyped. Currently they are sorted by 124 * declaration order. 125 */ 126static void if_attachdomain(void *); 127static void if_attachdomain1(struct ifnet *); 128static int ifconf(u_long, caddr_t); 129static void if_freemulti(struct ifmultiaddr *); 130static void if_init(void *); 131static void if_check(void *); 132static void if_route(struct ifnet *, int flag, int fam); 133static int if_setflag(struct ifnet *, int, int, int *, int); 134static void if_slowtimo(void *); 135static int if_transmit(struct ifnet *ifp, struct mbuf *m); 136static void if_unroute(struct ifnet *, int flag, int fam); 137static void link_rtrequest(int, struct rtentry *, struct rt_addrinfo *); 138static int if_rtdel(struct radix_node *, void *); 139static int ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *); 140static int if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int); 141static void do_link_state_change(void *, int); 142static int if_getgroup(struct ifgroupreq *, struct ifnet *); 143static int if_getgroupmembers(struct ifgroupreq *); 144static void if_delgroups(struct ifnet *); 145static void if_attach_internal(struct ifnet *, int); 146static void if_detach_internal(struct ifnet *, int); 147 148#ifdef INET6 149/* 150 * XXX: declare here to avoid to include many inet6 related files.. 151 * should be more generalized? 152 */ 153extern void nd6_setmtu(struct ifnet *); 154#endif 155 156static int vnet_net_iattach(const void *); 157#ifdef VIMAGE 158static int vnet_net_idetach(const void *); 159#endif 160 161#ifdef VIMAGE_GLOBALS 162struct ifnethead ifnet; /* depend on static init XXX */ 163struct ifgrouphead ifg_head; 164int if_index; 165static int if_indexlim; 166/* Table of ifnet/cdev by index. Locked with ifnet_lock. */ 167static struct ifindex_entry *ifindex_table; 168static struct knlist ifklist; 169#endif 170 171int ifqmaxlen = IFQ_MAXLEN; 172struct rwlock ifnet_lock; 173static if_com_alloc_t *if_com_alloc[256]; 174static if_com_free_t *if_com_free[256]; 175 176static void filt_netdetach(struct knote *kn); 177static int filt_netdev(struct knote *kn, long hint); 178 179static struct filterops netdev_filtops = 180 { 1, NULL, filt_netdetach, filt_netdev }; 181 182#ifndef VIMAGE_GLOBALS 183static struct vnet_symmap vnet_net_symmap[] = { 184 VNET_SYMMAP(net, ifnet), 185 VNET_SYMMAP(net, rt_tables), 186 VNET_SYMMAP(net, rtstat), 187 VNET_SYMMAP(net, rttrash), 188 VNET_SYMMAP_END 189}; 190 191static const vnet_modinfo_t vnet_net_modinfo = { 192 .vmi_id = VNET_MOD_NET, 193 .vmi_name = "net", 194 .vmi_size = sizeof(struct vnet_net), 195 .vmi_symmap = vnet_net_symmap, 196 .vmi_iattach = vnet_net_iattach, 197#ifdef VIMAGE 198 .vmi_idetach = vnet_net_idetach 199#endif 200}; 201#endif /* !VIMAGE_GLOBALS */ 202 203/* 204 * System initialization 205 */ 206SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_FIRST, if_init, NULL); 207SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_check, NULL); 208 209MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals"); 210MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address"); 211MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address"); 212 213struct ifnet * 214ifnet_byindex_locked(u_short idx) 215{ 216 INIT_VNET_NET(curvnet); 217 218 if (idx > V_if_index) 219 return (NULL); 220 return (V_ifindex_table[idx].ife_ifnet); 221} 222 223struct ifnet * 224ifnet_byindex(u_short idx) 225{ 226 struct ifnet *ifp; 227 228 IFNET_RLOCK(); 229 ifp = ifnet_byindex_locked(idx); 230 IFNET_RUNLOCK(); 231 return (ifp); 232} 233 234struct ifnet * 235ifnet_byindex_ref(u_short idx) 236{ 237 struct ifnet *ifp; 238 239 IFNET_RLOCK(); 240 ifp = ifnet_byindex_locked(idx); 241 if (ifp == NULL || (ifp->if_flags & IFF_DYING)) { 242 IFNET_RUNLOCK(); 243 return (NULL); 244 } 245 if_ref(ifp); 246 IFNET_RUNLOCK(); 247 return (ifp); 248} 249 250static void 251ifnet_setbyindex(u_short idx, struct ifnet *ifp) 252{ 253 INIT_VNET_NET(curvnet); 254 255 IFNET_WLOCK_ASSERT(); 256 257 V_ifindex_table[idx].ife_ifnet = ifp; 258} 259 260struct ifaddr * 261ifaddr_byindex(u_short idx) 262{ 263 struct ifaddr *ifa; 264 265 IFNET_RLOCK(); 266 ifa = ifnet_byindex_locked(idx)->if_addr; 267 IFNET_RUNLOCK(); 268 return (ifa); 269} 270 271struct cdev * 272ifdev_byindex(u_short idx) 273{ 274 INIT_VNET_NET(curvnet); 275 struct cdev *cdev; 276 277 IFNET_RLOCK(); 278 cdev = V_ifindex_table[idx].ife_dev; 279 IFNET_RUNLOCK(); 280 return (cdev); 281} 282 283static void 284ifdev_setbyindex(u_short idx, struct cdev *cdev) 285{ 286 INIT_VNET_NET(curvnet); 287 288 IFNET_WLOCK(); 289 V_ifindex_table[idx].ife_dev = cdev; 290 IFNET_WUNLOCK(); 291} 292 293static d_open_t netopen; 294static d_close_t netclose; 295static d_ioctl_t netioctl; 296static d_kqfilter_t netkqfilter; 297 298static struct cdevsw net_cdevsw = { 299 .d_version = D_VERSION, 300 .d_flags = D_NEEDGIANT, 301 .d_open = netopen, 302 .d_close = netclose, 303 .d_ioctl = netioctl, 304 .d_name = "net", 305 .d_kqfilter = netkqfilter, 306}; 307 308static int 309netopen(struct cdev *dev, int flag, int mode, struct thread *td) 310{ 311 return (0); 312} 313 314static int 315netclose(struct cdev *dev, int flags, int fmt, struct thread *td) 316{ 317 return (0); 318} 319 320static int 321netioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) 322{ 323 struct ifnet *ifp; 324 int error, idx; 325 326 /* only support interface specific ioctls */ 327 if (IOCGROUP(cmd) != 'i') 328 return (EOPNOTSUPP); 329 idx = dev2unit(dev); 330 if (idx == 0) { 331 /* 332 * special network device, not interface. 333 */ 334 if (cmd == SIOCGIFCONF) 335 return (ifconf(cmd, data)); /* XXX remove cmd */ 336#ifdef __amd64__ 337 if (cmd == SIOCGIFCONF32) 338 return (ifconf(cmd, data)); /* XXX remove cmd */ 339#endif 340 return (EOPNOTSUPP); 341 } 342 343 ifp = ifnet_byindex(idx); 344 if (ifp == NULL) 345 return (ENXIO); 346 347 error = ifhwioctl(cmd, ifp, data, td); 348 if (error == ENOIOCTL) 349 error = EOPNOTSUPP; 350 return (error); 351} 352 353static int 354netkqfilter(struct cdev *dev, struct knote *kn) 355{ 356 INIT_VNET_NET(curvnet); 357 struct knlist *klist; 358 struct ifnet *ifp; 359 int idx; 360 361 switch (kn->kn_filter) { 362 case EVFILT_NETDEV: 363 kn->kn_fop = &netdev_filtops; 364 break; 365 default: 366 return (EINVAL); 367 } 368 369 idx = dev2unit(dev); 370 if (idx == 0) { 371 klist = &V_ifklist; 372 } else { 373 ifp = ifnet_byindex(idx); 374 if (ifp == NULL) 375 return (1); 376 klist = &ifp->if_klist; 377 } 378 379 kn->kn_hook = (caddr_t)klist; 380 381 knlist_add(klist, kn, 0); 382 383 return (0); 384} 385 386static void 387filt_netdetach(struct knote *kn) 388{ 389 struct knlist *klist = (struct knlist *)kn->kn_hook; 390 391 knlist_remove(klist, kn, 0); 392} 393 394static int 395filt_netdev(struct knote *kn, long hint) 396{ 397 struct knlist *klist = (struct knlist *)kn->kn_hook; 398 399 /* 400 * Currently NOTE_EXIT is abused to indicate device detach. 401 */ 402 if (hint == NOTE_EXIT) { 403 kn->kn_data = NOTE_LINKINV; 404 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 405 knlist_remove_inevent(klist, kn); 406 return (1); 407 } 408 if (hint != 0) 409 kn->kn_data = hint; /* current status */ 410 if (kn->kn_sfflags & hint) 411 kn->kn_fflags |= hint; 412 return (kn->kn_fflags != 0); 413} 414 415/* 416 * Network interface utility routines. 417 * 418 * Routines with ifa_ifwith* names take sockaddr *'s as 419 * parameters. 420 */ 421 422/* ARGSUSED*/ 423static void 424if_init(void *dummy __unused) 425{ 426 427#ifndef VIMAGE_GLOBALS 428 vnet_mod_register(&vnet_net_modinfo); 429#else 430 vnet_net_iattach(NULL); 431#endif 432 433 IFNET_LOCK_INIT(); 434 ifdev_setbyindex(0, make_dev(&net_cdevsw, 0, UID_ROOT, GID_WHEEL, 435 0600, "network")); 436 if_clone_init(); 437} 438 439static int 440vnet_net_iattach(const void *unused __unused) 441{ 442 INIT_VNET_NET(curvnet); 443 444 V_if_index = 0; 445 V_ifindex_table = NULL; 446 V_if_indexlim = 8; 447 448 TAILQ_INIT(&V_ifnet); 449 TAILQ_INIT(&V_ifg_head); 450 knlist_init_mtx(&V_ifklist, NULL); 451 if_grow(); /* create initial table */ 452 453 return (0); 454} 455 456#ifdef VIMAGE 457static int 458vnet_net_idetach(const void *unused __unused) 459{ 460 INIT_VNET_NET(curvnet); 461 462 VNET_ASSERT(TAILQ_EMPTY(&V_ifnet)); 463 VNET_ASSERT(TAILQ_EMPTY(&V_ifg_head)); 464 VNET_ASSERT(SLIST_EMPTY(&V_ifklist.kl_list)); 465 466 free((caddr_t)V_ifindex_table, M_IFNET); 467 468 return (0); 469} 470#endif 471 472void 473if_grow(void) 474{ 475 INIT_VNET_NET(curvnet); 476 u_int n; 477 struct ifindex_entry *e; 478 479 V_if_indexlim <<= 1; 480 n = V_if_indexlim * sizeof(*e); 481 e = malloc(n, M_IFNET, M_WAITOK | M_ZERO); 482 if (V_ifindex_table != NULL) { 483 memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2); 484 free((caddr_t)V_ifindex_table, M_IFNET); 485 } 486 V_ifindex_table = e; 487} 488 489static void 490if_check(void *dummy __unused) 491{ 492 493 /* 494 * If at least one interface added during boot uses 495 * if_watchdog then start the timer. 496 */ 497 if (slowtimo_started) 498 if_slowtimo(0); 499} 500 501/* 502 * Allocate a struct ifnet and an index for an interface. A layer 2 503 * common structure will also be allocated if an allocation routine is 504 * registered for the passed type. 505 */ 506struct ifnet * 507if_alloc(u_char type) 508{ 509 INIT_VNET_NET(curvnet); 510 struct ifnet *ifp; 511 512 ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO); 513 514 /* 515 * Try to find an empty slot below if_index. If we fail, take 516 * the next slot. 517 * 518 * XXX: should be locked! 519 */ 520 for (ifp->if_index = 1; ifp->if_index <= V_if_index; ifp->if_index++) { 521 if (ifnet_byindex(ifp->if_index) == NULL) 522 break; 523 } 524 /* Catch if_index overflow. */ 525 if (ifp->if_index < 1) { 526 free(ifp, M_IFNET); 527 return (NULL); 528 } 529 if (ifp->if_index > V_if_index) 530 V_if_index = ifp->if_index; 531 if (V_if_index >= V_if_indexlim) 532 if_grow(); 533 534 ifp->if_type = type; 535 ifp->if_alloctype = type; 536 537 if (if_com_alloc[type] != NULL) { 538 ifp->if_l2com = if_com_alloc[type](type, ifp); 539 if (ifp->if_l2com == NULL) { 540 free(ifp, M_IFNET); 541 return (NULL); 542 } 543 } 544 545 IF_ADDR_LOCK_INIT(ifp); 546 TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp); 547 ifp->if_afdata_initialized = 0; 548 IF_AFDATA_LOCK_INIT(ifp); 549 TAILQ_INIT(&ifp->if_addrhead); 550 TAILQ_INIT(&ifp->if_prefixhead); 551 TAILQ_INIT(&ifp->if_multiaddrs); 552 TAILQ_INIT(&ifp->if_groups); 553 knlist_init_mtx(&ifp->if_klist, NULL); 554#ifdef MAC 555 mac_ifnet_init(ifp); 556#endif 557 ifq_init(&ifp->if_snd, ifp); 558 559 refcount_init(&ifp->if_refcount, 1); /* Index reference. */ 560 IFNET_WLOCK(); 561 ifnet_setbyindex(ifp->if_index, ifp); 562 IFNET_WUNLOCK(); 563 return (ifp); 564} 565 566/* 567 * Do the actual work of freeing a struct ifnet, associated index, and layer 568 * 2 common structure. This call is made when the last reference to an 569 * interface is released. 570 */ 571static void 572if_free_internal(struct ifnet *ifp) 573{ 574 INIT_VNET_NET(curvnet); /* ifp->if_vnet is already NULL here */ 575 576 KASSERT((ifp->if_flags & IFF_DYING), 577 ("if_free_internal: interface not dying")); 578 579 IFNET_WLOCK(); 580 KASSERT(ifp == ifnet_byindex_locked(ifp->if_index), 581 ("%s: freeing unallocated ifnet", ifp->if_xname)); 582 583 ifnet_setbyindex(ifp->if_index, NULL); 584 while (V_if_index > 0 && ifnet_byindex_locked(V_if_index) == NULL) 585 V_if_index--; 586 IFNET_WUNLOCK(); 587 588 if (if_com_free[ifp->if_alloctype] != NULL) 589 if_com_free[ifp->if_alloctype](ifp->if_l2com, 590 ifp->if_alloctype); 591 592#ifdef MAC 593 mac_ifnet_destroy(ifp); 594#endif /* MAC */ 595 KNOTE_UNLOCKED(&ifp->if_klist, NOTE_EXIT); 596 knlist_clear(&ifp->if_klist, 0); 597 knlist_destroy(&ifp->if_klist); 598 IF_AFDATA_DESTROY(ifp); 599 IF_ADDR_LOCK_DESTROY(ifp); 600 ifq_delete(&ifp->if_snd); 601 free(ifp, M_IFNET); 602} 603 604/* 605 * This version should only be called by intefaces that switch their type 606 * after calling if_alloc(). if_free_type() will go away again now that we 607 * have if_alloctype to cache the original allocation type. For now, assert 608 * that they match, since we require that in practice. 609 */ 610void 611if_free_type(struct ifnet *ifp, u_char type) 612{ 613 614 KASSERT(ifp->if_alloctype == type, 615 ("if_free_type: type (%d) != alloctype (%d)", type, 616 ifp->if_alloctype)); 617 618 ifp->if_flags |= IFF_DYING; /* XXX: Locking */ 619 if (!refcount_release(&ifp->if_refcount)) 620 return; 621 if_free_internal(ifp); 622} 623 624/* 625 * This is the normal version of if_free(), used by device drivers to free a 626 * detached network interface. The contents of if_free_type() will move into 627 * here when if_free_type() goes away. 628 */ 629void 630if_free(struct ifnet *ifp) 631{ 632 633 if_free_type(ifp, ifp->if_alloctype); 634} 635 636/* 637 * Interfaces to keep an ifnet type-stable despite the possibility of the 638 * driver calling if_free(). If there are additional references, we defer 639 * freeing the underlying data structure. 640 */ 641void 642if_ref(struct ifnet *ifp) 643{ 644 645 /* We don't assert the ifnet list lock here, but arguably should. */ 646 refcount_acquire(&ifp->if_refcount); 647} 648 649void 650if_rele(struct ifnet *ifp) 651{ 652 653 if (!refcount_release(&ifp->if_refcount)) 654 return; 655 if_free_internal(ifp); 656} 657 658void 659ifq_init(struct ifaltq *ifq, struct ifnet *ifp) 660{ 661 662 mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF); 663 664 if (ifq->ifq_maxlen == 0) 665 ifq->ifq_maxlen = ifqmaxlen; 666 667 ifq->altq_type = 0; 668 ifq->altq_disc = NULL; 669 ifq->altq_flags &= ALTQF_CANTCHANGE; 670 ifq->altq_tbr = NULL; 671 ifq->altq_ifp = ifp; 672} 673 674void 675ifq_delete(struct ifaltq *ifq) 676{ 677 mtx_destroy(&ifq->ifq_mtx); 678} 679 680/* 681 * Perform generic interface initalization tasks and attach the interface 682 * to the list of "active" interfaces. If vmove flag is set on entry 683 * to if_attach_internal(), perform only a limited subset of initialization 684 * tasks, given that we are moving from one vnet to another an ifnet which 685 * has already been fully initialized. 686 * 687 * XXX: 688 * - The decision to return void and thus require this function to 689 * succeed is questionable. 690 * - We should probably do more sanity checking. For instance we don't 691 * do anything to insure if_xname is unique or non-empty. 692 */ 693void 694if_attach(struct ifnet *ifp) 695{ 696 697 if_attach_internal(ifp, 0); 698} 699 700static void 701if_attach_internal(struct ifnet *ifp, int vmove) 702{ 703 INIT_VNET_NET(curvnet); 704 unsigned socksize, ifasize; 705 int namelen, masklen; 706 struct sockaddr_dl *sdl; 707 struct ifaddr *ifa; 708 709 if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index)) 710 panic ("%s: BUG: if_attach called without if_alloc'd input()\n", 711 ifp->if_xname); 712 713#ifdef VIMAGE 714 ifp->if_vnet = curvnet; 715 if (ifp->if_home_vnet == NULL) 716 ifp->if_home_vnet = curvnet; 717#endif 718 719 if_addgroup(ifp, IFG_ALL); 720 721 getmicrotime(&ifp->if_lastchange); 722 ifp->if_data.ifi_epoch = time_uptime; 723 ifp->if_data.ifi_datalen = sizeof(struct if_data); 724 725 KASSERT((ifp->if_transmit == NULL && ifp->if_qflush == NULL) || 726 (ifp->if_transmit != NULL && ifp->if_qflush != NULL), 727 ("transmit and qflush must both either be set or both be NULL")); 728 if (ifp->if_transmit == NULL) { 729 ifp->if_transmit = if_transmit; 730 ifp->if_qflush = if_qflush; 731 } 732 733 if (!vmove) { 734#ifdef MAC 735 mac_ifnet_create(ifp); 736#endif 737 738 if (IS_DEFAULT_VNET(curvnet)) { 739 ifdev_setbyindex(ifp->if_index, make_dev(&net_cdevsw, 740 ifp->if_index, UID_ROOT, GID_WHEEL, 0600, "%s/%s", 741 net_cdevsw.d_name, ifp->if_xname)); 742 make_dev_alias(ifdev_byindex(ifp->if_index), "%s%d", 743 net_cdevsw.d_name, ifp->if_index); 744 } 745 746 /* 747 * Create a Link Level name for this device. 748 */ 749 namelen = strlen(ifp->if_xname); 750 /* 751 * Always save enough space for any possiable name so we 752 * can do a rename in place later. 753 */ 754 masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ; 755 socksize = masklen + ifp->if_addrlen; 756 if (socksize < sizeof(*sdl)) 757 socksize = sizeof(*sdl); 758 socksize = roundup2(socksize, sizeof(long)); 759 ifasize = sizeof(*ifa) + 2 * socksize; 760 ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO); 761 ifa_init(ifa); 762 sdl = (struct sockaddr_dl *)(ifa + 1); 763 sdl->sdl_len = socksize; 764 sdl->sdl_family = AF_LINK; 765 bcopy(ifp->if_xname, sdl->sdl_data, namelen); 766 sdl->sdl_nlen = namelen; 767 sdl->sdl_index = ifp->if_index; 768 sdl->sdl_type = ifp->if_type; 769 ifp->if_addr = ifa; 770 ifa->ifa_ifp = ifp; 771 ifa->ifa_rtrequest = link_rtrequest; 772 ifa->ifa_addr = (struct sockaddr *)sdl; 773 sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl); 774 ifa->ifa_netmask = (struct sockaddr *)sdl; 775 sdl->sdl_len = masklen; 776 while (namelen != 0) 777 sdl->sdl_data[--namelen] = 0xff; 778 TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link); 779 /* Reliably crash if used uninitialized. */ 780 ifp->if_broadcastaddr = NULL; 781 } 782 783 IFNET_WLOCK(); 784 TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link); 785#ifdef VIMAGE 786 curvnet->ifcnt++; 787#endif 788 IFNET_WUNLOCK(); 789 790 if (domain_init_status >= 2) 791 if_attachdomain1(ifp); 792 793 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 794 if (IS_DEFAULT_VNET(curvnet)) 795 devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL); 796 797 /* Announce the interface. */ 798 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 799 800 if (!vmove && ifp->if_watchdog != NULL) { 801 if_printf(ifp, 802 "WARNING: using obsoleted if_watchdog interface\n"); 803 804 /* 805 * Note that we need if_slowtimo(). If this happens after 806 * boot, then call if_slowtimo() directly. 807 */ 808 if (atomic_cmpset_int(&slowtimo_started, 0, 1) && !cold) 809 if_slowtimo(0); 810 } 811} 812 813static void 814if_attachdomain(void *dummy) 815{ 816 INIT_VNET_NET(curvnet); 817 struct ifnet *ifp; 818 int s; 819 820 s = splnet(); 821 TAILQ_FOREACH(ifp, &V_ifnet, if_link) 822 if_attachdomain1(ifp); 823 splx(s); 824} 825SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND, 826 if_attachdomain, NULL); 827 828static void 829if_attachdomain1(struct ifnet *ifp) 830{ 831 struct domain *dp; 832 int s; 833 834 s = splnet(); 835 836 /* 837 * Since dp->dom_ifattach calls malloc() with M_WAITOK, we 838 * cannot lock ifp->if_afdata initialization, entirely. 839 */ 840 if (IF_AFDATA_TRYLOCK(ifp) == 0) { 841 splx(s); 842 return; 843 } 844 if (ifp->if_afdata_initialized >= domain_init_status) { 845 IF_AFDATA_UNLOCK(ifp); 846 splx(s); 847 printf("if_attachdomain called more than once on %s\n", 848 ifp->if_xname); 849 return; 850 } 851 ifp->if_afdata_initialized = domain_init_status; 852 IF_AFDATA_UNLOCK(ifp); 853 854 /* address family dependent data region */ 855 bzero(ifp->if_afdata, sizeof(ifp->if_afdata)); 856 for (dp = domains; dp; dp = dp->dom_next) { 857 if (dp->dom_ifattach) 858 ifp->if_afdata[dp->dom_family] = 859 (*dp->dom_ifattach)(ifp); 860 } 861 862 splx(s); 863} 864 865/* 866 * Remove any unicast or broadcast network addresses from an interface. 867 */ 868void 869if_purgeaddrs(struct ifnet *ifp) 870{ 871 struct ifaddr *ifa, *next; 872 873 TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) { 874 if (ifa->ifa_addr->sa_family == AF_LINK) 875 continue; 876#ifdef INET 877 /* XXX: Ugly!! ad hoc just for INET */ 878 if (ifa->ifa_addr->sa_family == AF_INET) { 879 struct ifaliasreq ifr; 880 881 bzero(&ifr, sizeof(ifr)); 882 ifr.ifra_addr = *ifa->ifa_addr; 883 if (ifa->ifa_dstaddr) 884 ifr.ifra_broadaddr = *ifa->ifa_dstaddr; 885 if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp, 886 NULL) == 0) 887 continue; 888 } 889#endif /* INET */ 890#ifdef INET6 891 if (ifa->ifa_addr->sa_family == AF_INET6) { 892 in6_purgeaddr(ifa); 893 /* ifp_addrhead is already updated */ 894 continue; 895 } 896#endif /* INET6 */ 897 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 898 ifa_free(ifa); 899 } 900} 901 902/* 903 * Remove any multicast network addresses from an interface. 904 */ 905void 906if_purgemaddrs(struct ifnet *ifp) 907{ 908 struct ifmultiaddr *ifma; 909 struct ifmultiaddr *next; 910 911 IF_ADDR_LOCK(ifp); 912 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next) 913 if_delmulti_locked(ifp, ifma, 1); 914 IF_ADDR_UNLOCK(ifp); 915} 916 917/* 918 * Detach an interface, removing it from the list of "active" interfaces. 919 * If vmove flag is set on entry to if_detach_internal(), perform only a 920 * limited subset of cleanup tasks, given that we are moving an ifnet from 921 * one vnet to another, where it must be fully operational. 922 * 923 * XXXRW: There are some significant questions about event ordering, and 924 * how to prevent things from starting to use the interface during detach. 925 */ 926void 927if_detach(struct ifnet *ifp) 928{ 929 930 if_detach_internal(ifp, 0); 931} 932 933static void 934if_detach_internal(struct ifnet *ifp, int vmove) 935{ 936 INIT_VNET_NET(ifp->if_vnet); 937 struct ifaddr *ifa; 938 struct radix_node_head *rnh; 939 int i, j; 940 struct domain *dp; 941 struct ifnet *iter; 942 int found = 0; 943 944 IFNET_WLOCK(); 945 TAILQ_FOREACH(iter, &V_ifnet, if_link) 946 if (iter == ifp) { 947 TAILQ_REMOVE(&V_ifnet, ifp, if_link); 948 found = 1; 949 break; 950 } 951#ifdef VIMAGE 952 if (found) 953 curvnet->ifcnt--; 954#endif 955 IFNET_WUNLOCK(); 956 if (!found) { 957 if (vmove) 958 panic("interface not in it's own ifnet list"); 959 else 960 return; /* XXX this should panic as well? */ 961 } 962 963 /* 964 * Remove/wait for pending events. 965 */ 966 taskqueue_drain(taskqueue_swi, &ifp->if_linktask); 967 968 /* 969 * Remove routes and flush queues. 970 */ 971 if_down(ifp); 972#ifdef ALTQ 973 if (ALTQ_IS_ENABLED(&ifp->if_snd)) 974 altq_disable(&ifp->if_snd); 975 if (ALTQ_IS_ATTACHED(&ifp->if_snd)) 976 altq_detach(&ifp->if_snd); 977#endif 978 979 if_purgeaddrs(ifp); 980 981#ifdef INET 982 in_ifdetach(ifp); 983#endif 984 985#ifdef INET6 986 /* 987 * Remove all IPv6 kernel structs related to ifp. This should be done 988 * before removing routing entries below, since IPv6 interface direct 989 * routes are expected to be removed by the IPv6-specific kernel API. 990 * Otherwise, the kernel will detect some inconsistency and bark it. 991 */ 992 in6_ifdetach(ifp); 993#endif 994 if_purgemaddrs(ifp); 995 996 if (!vmove) { 997 /* 998 * Prevent further calls into the device driver via ifnet. 999 */ 1000 if_dead(ifp); 1001 1002 /* 1003 * Remove link ifaddr pointer and maybe decrement if_index. 1004 * Clean up all addresses. 1005 */ 1006 ifp->if_addr = NULL; 1007 if (IS_DEFAULT_VNET(curvnet)) 1008 destroy_dev(ifdev_byindex(ifp->if_index)); 1009 ifdev_setbyindex(ifp->if_index, NULL); 1010 1011 /* We can now free link ifaddr. */ 1012 if (!TAILQ_EMPTY(&ifp->if_addrhead)) { 1013 ifa = TAILQ_FIRST(&ifp->if_addrhead); 1014 TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link); 1015 ifa_free(ifa); 1016 } 1017 } 1018 1019 /* 1020 * Delete all remaining routes using this interface 1021 * Unfortuneatly the only way to do this is to slog through 1022 * the entire routing table looking for routes which point 1023 * to this interface...oh well... 1024 */ 1025 for (i = 1; i <= AF_MAX; i++) { 1026 for (j = 0; j < rt_numfibs; j++) { 1027 rnh = rt_tables_get_rnh(j, i); 1028 if (rnh == NULL) 1029 continue; 1030 RADIX_NODE_HEAD_LOCK(rnh); 1031 (void) rnh->rnh_walktree(rnh, if_rtdel, ifp); 1032 RADIX_NODE_HEAD_UNLOCK(rnh); 1033 } 1034 } 1035 1036 /* Announce that the interface is gone. */ 1037 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 1038 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 1039 if (IS_DEFAULT_VNET(curvnet)) 1040 devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL); 1041 if_delgroups(ifp); 1042 1043 IF_AFDATA_LOCK(ifp); 1044 for (dp = domains; dp; dp = dp->dom_next) { 1045 if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family]) 1046 (*dp->dom_ifdetach)(ifp, 1047 ifp->if_afdata[dp->dom_family]); 1048 } 1049 ifp->if_afdata_initialized = 0; 1050 IF_AFDATA_UNLOCK(ifp); 1051} 1052 1053#ifdef VIMAGE 1054/* 1055 * if_vmove() performs a limited version of if_detach() in current 1056 * vnet and if_attach()es the ifnet to the vnet specified as 2nd arg. 1057 * An attempt is made to shrink if_index in current vnet, find an 1058 * unused if_index in target vnet and calls if_grow() if necessary, 1059 * and finally find an unused if_xname for the target vnet. 1060 */ 1061void 1062if_vmove(struct ifnet *ifp, struct vnet *new_vnet) 1063{ 1064 1065 /* 1066 * Detach from current vnet, but preserve LLADDR info, do not 1067 * mark as dead etc. so that the ifnet can be reattached later. 1068 */ 1069 if_detach_internal(ifp, 1); 1070 1071 /* 1072 * Unlink the ifnet from ifindex_table[] in current vnet, 1073 * and shrink the if_index for that vnet if possible. 1074 * do / while construct below is needed to confine the scope 1075 * of INIT_VNET_NET(). 1076 */ 1077 { 1078 INIT_VNET_NET(curvnet); 1079 1080 IFNET_WLOCK(); 1081 ifnet_setbyindex(ifp->if_index, NULL); 1082 while (V_if_index > 0 && \ 1083 ifnet_byindex_locked(V_if_index) == NULL) 1084 V_if_index--; 1085 IFNET_WUNLOCK(); 1086 }; 1087 1088 /* 1089 * Switch to the context of the target vnet. 1090 */ 1091 CURVNET_SET_QUIET(new_vnet); 1092 INIT_VNET_NET(new_vnet); 1093 1094 /* 1095 * Try to find an empty slot below if_index. If we fail, take 1096 * the next slot. 1097 */ 1098 IFNET_WLOCK(); 1099 for (ifp->if_index = 1; ifp->if_index <= V_if_index; ifp->if_index++) { 1100 if (ifnet_byindex_locked(ifp->if_index) == NULL) 1101 break; 1102 } 1103 /* Catch if_index overflow. */ 1104 if (ifp->if_index < 1) 1105 panic("if_index overflow"); 1106 1107 if (ifp->if_index > V_if_index) 1108 V_if_index = ifp->if_index; 1109 if (V_if_index >= V_if_indexlim) 1110 if_grow(); 1111 ifnet_setbyindex(ifp->if_index, ifp); 1112 IFNET_WUNLOCK(); 1113 1114 if_attach_internal(ifp, 1); 1115 1116 CURVNET_RESTORE(); 1117} 1118#endif /* VIMAGE */ 1119 1120/* 1121 * Add a group to an interface 1122 */ 1123int 1124if_addgroup(struct ifnet *ifp, const char *groupname) 1125{ 1126 INIT_VNET_NET(ifp->if_vnet); 1127 struct ifg_list *ifgl; 1128 struct ifg_group *ifg = NULL; 1129 struct ifg_member *ifgm; 1130 1131 if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' && 1132 groupname[strlen(groupname) - 1] <= '9') 1133 return (EINVAL); 1134 1135 IFNET_WLOCK(); 1136 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1137 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) { 1138 IFNET_WUNLOCK(); 1139 return (EEXIST); 1140 } 1141 1142 if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP, 1143 M_NOWAIT)) == NULL) { 1144 IFNET_WUNLOCK(); 1145 return (ENOMEM); 1146 } 1147 1148 if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member), 1149 M_TEMP, M_NOWAIT)) == NULL) { 1150 free(ifgl, M_TEMP); 1151 IFNET_WUNLOCK(); 1152 return (ENOMEM); 1153 } 1154 1155 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1156 if (!strcmp(ifg->ifg_group, groupname)) 1157 break; 1158 1159 if (ifg == NULL) { 1160 if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group), 1161 M_TEMP, M_NOWAIT)) == NULL) { 1162 free(ifgl, M_TEMP); 1163 free(ifgm, M_TEMP); 1164 IFNET_WUNLOCK(); 1165 return (ENOMEM); 1166 } 1167 strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group)); 1168 ifg->ifg_refcnt = 0; 1169 TAILQ_INIT(&ifg->ifg_members); 1170 EVENTHANDLER_INVOKE(group_attach_event, ifg); 1171 TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next); 1172 } 1173 1174 ifg->ifg_refcnt++; 1175 ifgl->ifgl_group = ifg; 1176 ifgm->ifgm_ifp = ifp; 1177 1178 IF_ADDR_LOCK(ifp); 1179 TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next); 1180 TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next); 1181 IF_ADDR_UNLOCK(ifp); 1182 1183 IFNET_WUNLOCK(); 1184 1185 EVENTHANDLER_INVOKE(group_change_event, groupname); 1186 1187 return (0); 1188} 1189 1190/* 1191 * Remove a group from an interface 1192 */ 1193int 1194if_delgroup(struct ifnet *ifp, const char *groupname) 1195{ 1196 INIT_VNET_NET(ifp->if_vnet); 1197 struct ifg_list *ifgl; 1198 struct ifg_member *ifgm; 1199 1200 IFNET_WLOCK(); 1201 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1202 if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) 1203 break; 1204 if (ifgl == NULL) { 1205 IFNET_WUNLOCK(); 1206 return (ENOENT); 1207 } 1208 1209 IF_ADDR_LOCK(ifp); 1210 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1211 IF_ADDR_UNLOCK(ifp); 1212 1213 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1214 if (ifgm->ifgm_ifp == ifp) 1215 break; 1216 1217 if (ifgm != NULL) { 1218 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next); 1219 free(ifgm, M_TEMP); 1220 } 1221 1222 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1223 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1224 EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group); 1225 free(ifgl->ifgl_group, M_TEMP); 1226 } 1227 IFNET_WUNLOCK(); 1228 1229 free(ifgl, M_TEMP); 1230 1231 EVENTHANDLER_INVOKE(group_change_event, groupname); 1232 1233 return (0); 1234} 1235 1236/* 1237 * Remove an interface from all groups 1238 */ 1239static void 1240if_delgroups(struct ifnet *ifp) 1241{ 1242 INIT_VNET_NET(ifp->if_vnet); 1243 struct ifg_list *ifgl; 1244 struct ifg_member *ifgm; 1245 char groupname[IFNAMSIZ]; 1246 1247 IFNET_WLOCK(); 1248 while (!TAILQ_EMPTY(&ifp->if_groups)) { 1249 ifgl = TAILQ_FIRST(&ifp->if_groups); 1250 1251 strlcpy(groupname, ifgl->ifgl_group->ifg_group, IFNAMSIZ); 1252 1253 IF_ADDR_LOCK(ifp); 1254 TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next); 1255 IF_ADDR_UNLOCK(ifp); 1256 1257 TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next) 1258 if (ifgm->ifgm_ifp == ifp) 1259 break; 1260 1261 if (ifgm != NULL) { 1262 TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, 1263 ifgm_next); 1264 free(ifgm, M_TEMP); 1265 } 1266 1267 if (--ifgl->ifgl_group->ifg_refcnt == 0) { 1268 TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next); 1269 EVENTHANDLER_INVOKE(group_detach_event, 1270 ifgl->ifgl_group); 1271 free(ifgl->ifgl_group, M_TEMP); 1272 } 1273 IFNET_WUNLOCK(); 1274 1275 free(ifgl, M_TEMP); 1276 1277 EVENTHANDLER_INVOKE(group_change_event, groupname); 1278 1279 IFNET_WLOCK(); 1280 } 1281 IFNET_WUNLOCK(); 1282} 1283 1284/* 1285 * Stores all groups from an interface in memory pointed 1286 * to by data 1287 */ 1288static int 1289if_getgroup(struct ifgroupreq *data, struct ifnet *ifp) 1290{ 1291 int len, error; 1292 struct ifg_list *ifgl; 1293 struct ifg_req ifgrq, *ifgp; 1294 struct ifgroupreq *ifgr = data; 1295 1296 if (ifgr->ifgr_len == 0) { 1297 IF_ADDR_LOCK(ifp); 1298 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) 1299 ifgr->ifgr_len += sizeof(struct ifg_req); 1300 IF_ADDR_UNLOCK(ifp); 1301 return (0); 1302 } 1303 1304 len = ifgr->ifgr_len; 1305 ifgp = ifgr->ifgr_groups; 1306 /* XXX: wire */ 1307 IF_ADDR_LOCK(ifp); 1308 TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) { 1309 if (len < sizeof(ifgrq)) { 1310 IF_ADDR_UNLOCK(ifp); 1311 return (EINVAL); 1312 } 1313 bzero(&ifgrq, sizeof ifgrq); 1314 strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group, 1315 sizeof(ifgrq.ifgrq_group)); 1316 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1317 IF_ADDR_UNLOCK(ifp); 1318 return (error); 1319 } 1320 len -= sizeof(ifgrq); 1321 ifgp++; 1322 } 1323 IF_ADDR_UNLOCK(ifp); 1324 1325 return (0); 1326} 1327 1328/* 1329 * Stores all members of a group in memory pointed to by data 1330 */ 1331static int 1332if_getgroupmembers(struct ifgroupreq *data) 1333{ 1334 INIT_VNET_NET(curvnet); 1335 struct ifgroupreq *ifgr = data; 1336 struct ifg_group *ifg; 1337 struct ifg_member *ifgm; 1338 struct ifg_req ifgrq, *ifgp; 1339 int len, error; 1340 1341 IFNET_RLOCK(); 1342 TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) 1343 if (!strcmp(ifg->ifg_group, ifgr->ifgr_name)) 1344 break; 1345 if (ifg == NULL) { 1346 IFNET_RUNLOCK(); 1347 return (ENOENT); 1348 } 1349 1350 if (ifgr->ifgr_len == 0) { 1351 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) 1352 ifgr->ifgr_len += sizeof(ifgrq); 1353 IFNET_RUNLOCK(); 1354 return (0); 1355 } 1356 1357 len = ifgr->ifgr_len; 1358 ifgp = ifgr->ifgr_groups; 1359 TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) { 1360 if (len < sizeof(ifgrq)) { 1361 IFNET_RUNLOCK(); 1362 return (EINVAL); 1363 } 1364 bzero(&ifgrq, sizeof ifgrq); 1365 strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname, 1366 sizeof(ifgrq.ifgrq_member)); 1367 if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) { 1368 IFNET_RUNLOCK(); 1369 return (error); 1370 } 1371 len -= sizeof(ifgrq); 1372 ifgp++; 1373 } 1374 IFNET_RUNLOCK(); 1375 1376 return (0); 1377} 1378 1379/* 1380 * Delete Routes for a Network Interface 1381 * 1382 * Called for each routing entry via the rnh->rnh_walktree() call above 1383 * to delete all route entries referencing a detaching network interface. 1384 * 1385 * Arguments: 1386 * rn pointer to node in the routing table 1387 * arg argument passed to rnh->rnh_walktree() - detaching interface 1388 * 1389 * Returns: 1390 * 0 successful 1391 * errno failed - reason indicated 1392 * 1393 */ 1394static int 1395if_rtdel(struct radix_node *rn, void *arg) 1396{ 1397 struct rtentry *rt = (struct rtentry *)rn; 1398 struct ifnet *ifp = arg; 1399 int err; 1400 1401 if (rt->rt_ifp == ifp) { 1402 1403 /* 1404 * Protect (sorta) against walktree recursion problems 1405 * with cloned routes 1406 */ 1407 if ((rt->rt_flags & RTF_UP) == 0) 1408 return (0); 1409 1410 err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway, 1411 rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED, 1412 (struct rtentry **) NULL, rt->rt_fibnum); 1413 if (err) { 1414 log(LOG_WARNING, "if_rtdel: error %d\n", err); 1415 } 1416 } 1417 1418 return (0); 1419} 1420 1421/* 1422 * Reference count functions for ifaddrs. 1423 */ 1424void 1425ifa_init(struct ifaddr *ifa) 1426{ 1427 1428 mtx_init(&ifa->ifa_mtx, "ifaddr", NULL, MTX_DEF); 1429 refcount_init(&ifa->ifa_refcnt, 1); 1430} 1431 1432void 1433ifa_ref(struct ifaddr *ifa) 1434{ 1435 1436 refcount_acquire(&ifa->ifa_refcnt); 1437} 1438 1439void 1440ifa_free(struct ifaddr *ifa) 1441{ 1442 1443 if (refcount_release(&ifa->ifa_refcnt)) { 1444 mtx_destroy(&ifa->ifa_mtx); 1445 free(ifa, M_IFADDR); 1446 } 1447} 1448 1449/* 1450 * XXX: Because sockaddr_dl has deeper structure than the sockaddr 1451 * structs used to represent other address families, it is necessary 1452 * to perform a different comparison. 1453 */ 1454 1455#define sa_equal(a1, a2) \ 1456 (bcmp((a1), (a2), ((a1))->sa_len) == 0) 1457 1458#define sa_dl_equal(a1, a2) \ 1459 ((((struct sockaddr_dl *)(a1))->sdl_len == \ 1460 ((struct sockaddr_dl *)(a2))->sdl_len) && \ 1461 (bcmp(LLADDR((struct sockaddr_dl *)(a1)), \ 1462 LLADDR((struct sockaddr_dl *)(a2)), \ 1463 ((struct sockaddr_dl *)(a1))->sdl_alen) == 0)) 1464 1465/* 1466 * Locate an interface based on a complete address. 1467 */ 1468/*ARGSUSED*/ 1469struct ifaddr * 1470ifa_ifwithaddr(struct sockaddr *addr) 1471{ 1472 INIT_VNET_NET(curvnet); 1473 struct ifnet *ifp; 1474 struct ifaddr *ifa; 1475 1476 IFNET_RLOCK(); 1477 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1478 IF_ADDR_LOCK(ifp); 1479 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1480 if (ifa->ifa_addr->sa_family != addr->sa_family) 1481 continue; 1482 if (sa_equal(addr, ifa->ifa_addr)) { 1483 IF_ADDR_UNLOCK(ifp); 1484 goto done; 1485 } 1486 /* IP6 doesn't have broadcast */ 1487 if ((ifp->if_flags & IFF_BROADCAST) && 1488 ifa->ifa_broadaddr && 1489 ifa->ifa_broadaddr->sa_len != 0 && 1490 sa_equal(ifa->ifa_broadaddr, addr)) { 1491 IF_ADDR_UNLOCK(ifp); 1492 goto done; 1493 } 1494 } 1495 IF_ADDR_UNLOCK(ifp); 1496 } 1497 ifa = NULL; 1498done: 1499 IFNET_RUNLOCK(); 1500 return (ifa); 1501} 1502 1503/* 1504 * Locate an interface based on the broadcast address. 1505 */ 1506/* ARGSUSED */ 1507struct ifaddr * 1508ifa_ifwithbroadaddr(struct sockaddr *addr) 1509{ 1510 INIT_VNET_NET(curvnet); 1511 struct ifnet *ifp; 1512 struct ifaddr *ifa; 1513 1514 IFNET_RLOCK(); 1515 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1516 IF_ADDR_LOCK(ifp); 1517 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1518 if (ifa->ifa_addr->sa_family != addr->sa_family) 1519 continue; 1520 if ((ifp->if_flags & IFF_BROADCAST) && 1521 ifa->ifa_broadaddr && 1522 ifa->ifa_broadaddr->sa_len != 0 && 1523 sa_equal(ifa->ifa_broadaddr, addr)) { 1524 IF_ADDR_UNLOCK(ifp); 1525 goto done; 1526 } 1527 } 1528 IF_ADDR_UNLOCK(ifp); 1529 } 1530 ifa = NULL; 1531done: 1532 IFNET_RUNLOCK(); 1533 return (ifa); 1534} 1535 1536/* 1537 * Locate the point to point interface with a given destination address. 1538 */ 1539/*ARGSUSED*/ 1540struct ifaddr * 1541ifa_ifwithdstaddr(struct sockaddr *addr) 1542{ 1543 INIT_VNET_NET(curvnet); 1544 struct ifnet *ifp; 1545 struct ifaddr *ifa; 1546 1547 IFNET_RLOCK(); 1548 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1549 if ((ifp->if_flags & IFF_POINTOPOINT) == 0) 1550 continue; 1551 IF_ADDR_LOCK(ifp); 1552 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1553 if (ifa->ifa_addr->sa_family != addr->sa_family) 1554 continue; 1555 if (ifa->ifa_dstaddr != NULL && 1556 sa_equal(addr, ifa->ifa_dstaddr)) { 1557 IF_ADDR_UNLOCK(ifp); 1558 goto done; 1559 } 1560 } 1561 IF_ADDR_UNLOCK(ifp); 1562 } 1563 ifa = NULL; 1564done: 1565 IFNET_RUNLOCK(); 1566 return (ifa); 1567} 1568 1569/* 1570 * Find an interface on a specific network. If many, choice 1571 * is most specific found. 1572 */ 1573struct ifaddr * 1574ifa_ifwithnet(struct sockaddr *addr) 1575{ 1576 INIT_VNET_NET(curvnet); 1577 struct ifnet *ifp; 1578 struct ifaddr *ifa; 1579 struct ifaddr *ifa_maybe = (struct ifaddr *) 0; 1580 u_int af = addr->sa_family; 1581 char *addr_data = addr->sa_data, *cplim; 1582 1583 /* 1584 * AF_LINK addresses can be looked up directly by their index number, 1585 * so do that if we can. 1586 */ 1587 if (af == AF_LINK) { 1588 struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr; 1589 if (sdl->sdl_index && sdl->sdl_index <= V_if_index) 1590 return (ifaddr_byindex(sdl->sdl_index)); 1591 } 1592 1593 /* 1594 * Scan though each interface, looking for ones that have 1595 * addresses in this address family. 1596 */ 1597 IFNET_RLOCK(); 1598 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1599 IF_ADDR_LOCK(ifp); 1600 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1601 char *cp, *cp2, *cp3; 1602 1603 if (ifa->ifa_addr->sa_family != af) 1604next: continue; 1605 if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) { 1606 /* 1607 * This is a bit broken as it doesn't 1608 * take into account that the remote end may 1609 * be a single node in the network we are 1610 * looking for. 1611 * The trouble is that we don't know the 1612 * netmask for the remote end. 1613 */ 1614 if (ifa->ifa_dstaddr != NULL && 1615 sa_equal(addr, ifa->ifa_dstaddr)) { 1616 IF_ADDR_UNLOCK(ifp); 1617 goto done; 1618 } 1619 } else { 1620 /* 1621 * if we have a special address handler, 1622 * then use it instead of the generic one. 1623 */ 1624 if (ifa->ifa_claim_addr) { 1625 if ((*ifa->ifa_claim_addr)(ifa, addr)) { 1626 IF_ADDR_UNLOCK(ifp); 1627 goto done; 1628 } 1629 continue; 1630 } 1631 1632 /* 1633 * Scan all the bits in the ifa's address. 1634 * If a bit dissagrees with what we are 1635 * looking for, mask it with the netmask 1636 * to see if it really matters. 1637 * (A byte at a time) 1638 */ 1639 if (ifa->ifa_netmask == 0) 1640 continue; 1641 cp = addr_data; 1642 cp2 = ifa->ifa_addr->sa_data; 1643 cp3 = ifa->ifa_netmask->sa_data; 1644 cplim = ifa->ifa_netmask->sa_len 1645 + (char *)ifa->ifa_netmask; 1646 while (cp3 < cplim) 1647 if ((*cp++ ^ *cp2++) & *cp3++) 1648 goto next; /* next address! */ 1649 /* 1650 * If the netmask of what we just found 1651 * is more specific than what we had before 1652 * (if we had one) then remember the new one 1653 * before continuing to search 1654 * for an even better one. 1655 */ 1656 if (ifa_maybe == 0 || 1657 rn_refines((caddr_t)ifa->ifa_netmask, 1658 (caddr_t)ifa_maybe->ifa_netmask)) 1659 ifa_maybe = ifa; 1660 } 1661 } 1662 IF_ADDR_UNLOCK(ifp); 1663 } 1664 ifa = ifa_maybe; 1665done: 1666 IFNET_RUNLOCK(); 1667 return (ifa); 1668} 1669 1670/* 1671 * Find an interface address specific to an interface best matching 1672 * a given address. 1673 */ 1674struct ifaddr * 1675ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp) 1676{ 1677 struct ifaddr *ifa; 1678 char *cp, *cp2, *cp3; 1679 char *cplim; 1680 struct ifaddr *ifa_maybe = 0; 1681 u_int af = addr->sa_family; 1682 1683 if (af >= AF_MAX) 1684 return (0); 1685 IF_ADDR_LOCK(ifp); 1686 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 1687 if (ifa->ifa_addr->sa_family != af) 1688 continue; 1689 if (ifa_maybe == 0) 1690 ifa_maybe = ifa; 1691 if (ifa->ifa_netmask == 0) { 1692 if (sa_equal(addr, ifa->ifa_addr) || 1693 (ifa->ifa_dstaddr && 1694 sa_equal(addr, ifa->ifa_dstaddr))) 1695 goto done; 1696 continue; 1697 } 1698 if (ifp->if_flags & IFF_POINTOPOINT) { 1699 if (sa_equal(addr, ifa->ifa_dstaddr)) 1700 goto done; 1701 } else { 1702 cp = addr->sa_data; 1703 cp2 = ifa->ifa_addr->sa_data; 1704 cp3 = ifa->ifa_netmask->sa_data; 1705 cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask; 1706 for (; cp3 < cplim; cp3++) 1707 if ((*cp++ ^ *cp2++) & *cp3) 1708 break; 1709 if (cp3 == cplim) 1710 goto done; 1711 } 1712 } 1713 ifa = ifa_maybe; 1714done: 1715 IF_ADDR_UNLOCK(ifp); 1716 return (ifa); 1717} 1718 1719#include <net/route.h> 1720#include <net/if_llatbl.h> 1721 1722/* 1723 * Default action when installing a route with a Link Level gateway. 1724 * Lookup an appropriate real ifa to point to. 1725 * This should be moved to /sys/net/link.c eventually. 1726 */ 1727static void 1728link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info) 1729{ 1730 struct ifaddr *ifa, *oifa; 1731 struct sockaddr *dst; 1732 struct ifnet *ifp; 1733 1734 RT_LOCK_ASSERT(rt); 1735 1736 if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) || 1737 ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0)) 1738 return; 1739 ifa = ifaof_ifpforaddr(dst, ifp); 1740 if (ifa) { 1741 ifa_ref(ifa); /* XXX */ 1742 oifa = rt->rt_ifa; 1743 rt->rt_ifa = ifa; 1744 ifa_free(oifa); 1745 if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest) 1746 ifa->ifa_rtrequest(cmd, rt, info); 1747 } 1748} 1749 1750/* 1751 * Mark an interface down and notify protocols of 1752 * the transition. 1753 * NOTE: must be called at splnet or eqivalent. 1754 */ 1755static void 1756if_unroute(struct ifnet *ifp, int flag, int fam) 1757{ 1758 struct ifaddr *ifa; 1759 1760 KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP")); 1761 1762 ifp->if_flags &= ~flag; 1763 getmicrotime(&ifp->if_lastchange); 1764 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 1765 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1766 pfctlinput(PRC_IFDOWN, ifa->ifa_addr); 1767 ifp->if_qflush(ifp); 1768 1769#if defined(INET) || defined(INET6) 1770#ifdef DEV_CARP 1771 if (ifp->if_carp) 1772 carp_carpdev_state(ifp->if_carp); 1773#endif 1774#endif 1775 rt_ifmsg(ifp); 1776} 1777 1778/* 1779 * Mark an interface up and notify protocols of 1780 * the transition. 1781 * NOTE: must be called at splnet or eqivalent. 1782 */ 1783static void 1784if_route(struct ifnet *ifp, int flag, int fam) 1785{ 1786 struct ifaddr *ifa; 1787 1788 KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP")); 1789 1790 ifp->if_flags |= flag; 1791 getmicrotime(&ifp->if_lastchange); 1792 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) 1793 if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family)) 1794 pfctlinput(PRC_IFUP, ifa->ifa_addr); 1795#if defined(INET) || defined(INET6) 1796#ifdef DEV_CARP 1797 if (ifp->if_carp) 1798 carp_carpdev_state(ifp->if_carp); 1799#endif 1800#endif 1801 rt_ifmsg(ifp); 1802#ifdef INET6 1803 in6_if_up(ifp); 1804#endif 1805} 1806 1807void (*vlan_link_state_p)(struct ifnet *, int); /* XXX: private from if_vlan */ 1808void (*vlan_trunk_cap_p)(struct ifnet *); /* XXX: private from if_vlan */ 1809 1810/* 1811 * Handle a change in the interface link state. To avoid LORs 1812 * between driver lock and upper layer locks, as well as possible 1813 * recursions, we post event to taskqueue, and all job 1814 * is done in static do_link_state_change(). 1815 */ 1816void 1817if_link_state_change(struct ifnet *ifp, int link_state) 1818{ 1819 /* Return if state hasn't changed. */ 1820 if (ifp->if_link_state == link_state) 1821 return; 1822 1823 ifp->if_link_state = link_state; 1824 1825 taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask); 1826} 1827 1828static void 1829do_link_state_change(void *arg, int pending) 1830{ 1831 struct ifnet *ifp = (struct ifnet *)arg; 1832 int link_state = ifp->if_link_state; 1833 int link; 1834 CURVNET_SET(ifp->if_vnet); 1835 1836 /* Notify that the link state has changed. */ 1837 rt_ifmsg(ifp); 1838 if (link_state == LINK_STATE_UP) 1839 link = NOTE_LINKUP; 1840 else if (link_state == LINK_STATE_DOWN) 1841 link = NOTE_LINKDOWN; 1842 else 1843 link = NOTE_LINKINV; 1844 KNOTE_UNLOCKED(&ifp->if_klist, link); 1845 if (ifp->if_vlantrunk != NULL) 1846 (*vlan_link_state_p)(ifp, link); 1847 1848 if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) && 1849 IFP2AC(ifp)->ac_netgraph != NULL) 1850 (*ng_ether_link_state_p)(ifp, link_state); 1851#if defined(INET) || defined(INET6) 1852#ifdef DEV_CARP 1853 if (ifp->if_carp) 1854 carp_carpdev_state(ifp->if_carp); 1855#endif 1856#endif 1857 if (ifp->if_bridge) { 1858 KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!")); 1859 (*bstp_linkstate_p)(ifp, link_state); 1860 } 1861 if (ifp->if_lagg) { 1862 KASSERT(lagg_linkstate_p != NULL,("if_lagg not loaded!")); 1863 (*lagg_linkstate_p)(ifp, link_state); 1864 } 1865 1866 if (IS_DEFAULT_VNET(curvnet)) 1867 devctl_notify("IFNET", ifp->if_xname, 1868 (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", 1869 NULL); 1870 if (pending > 1) 1871 if_printf(ifp, "%d link states coalesced\n", pending); 1872 if (log_link_state_change) 1873 log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname, 1874 (link_state == LINK_STATE_UP) ? "UP" : "DOWN" ); 1875 CURVNET_RESTORE(); 1876} 1877 1878/* 1879 * Mark an interface down and notify protocols of 1880 * the transition. 1881 * NOTE: must be called at splnet or eqivalent. 1882 */ 1883void 1884if_down(struct ifnet *ifp) 1885{ 1886 1887 if_unroute(ifp, IFF_UP, AF_UNSPEC); 1888} 1889 1890/* 1891 * Mark an interface up and notify protocols of 1892 * the transition. 1893 * NOTE: must be called at splnet or eqivalent. 1894 */ 1895void 1896if_up(struct ifnet *ifp) 1897{ 1898 1899 if_route(ifp, IFF_UP, AF_UNSPEC); 1900} 1901 1902/* 1903 * Flush an interface queue. 1904 */ 1905void 1906if_qflush(struct ifnet *ifp) 1907{ 1908 struct mbuf *m, *n; 1909 struct ifaltq *ifq; 1910 1911 ifq = &ifp->if_snd; 1912 IFQ_LOCK(ifq); 1913#ifdef ALTQ 1914 if (ALTQ_IS_ENABLED(ifq)) 1915 ALTQ_PURGE(ifq); 1916#endif 1917 n = ifq->ifq_head; 1918 while ((m = n) != 0) { 1919 n = m->m_act; 1920 m_freem(m); 1921 } 1922 ifq->ifq_head = 0; 1923 ifq->ifq_tail = 0; 1924 ifq->ifq_len = 0; 1925 IFQ_UNLOCK(ifq); 1926} 1927 1928/* 1929 * Handle interface watchdog timer routines. Called 1930 * from softclock, we decrement timers (if set) and 1931 * call the appropriate interface routine on expiration. 1932 * 1933 * XXXRW: Note that because timeouts run with Giant, if_watchdog() is called 1934 * holding Giant. 1935 */ 1936static void 1937if_slowtimo(void *arg) 1938{ 1939 VNET_ITERATOR_DECL(vnet_iter); 1940 struct ifnet *ifp; 1941 int s = splimp(); 1942 1943 IFNET_RLOCK(); 1944 VNET_LIST_RLOCK(); 1945 VNET_FOREACH(vnet_iter) { 1946 CURVNET_SET(vnet_iter); 1947 INIT_VNET_NET(vnet_iter); 1948 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1949 if (ifp->if_timer == 0 || --ifp->if_timer) 1950 continue; 1951 if (ifp->if_watchdog) 1952 (*ifp->if_watchdog)(ifp); 1953 } 1954 CURVNET_RESTORE(); 1955 } 1956 VNET_LIST_RUNLOCK(); 1957 IFNET_RUNLOCK(); 1958 splx(s); 1959 timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ); 1960} 1961 1962/* 1963 * Map interface name to interface structure pointer, with or without 1964 * returning a reference. 1965 */ 1966struct ifnet * 1967ifunit_ref(const char *name) 1968{ 1969 INIT_VNET_NET(curvnet); 1970 struct ifnet *ifp; 1971 1972 IFNET_RLOCK(); 1973 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1974 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0 && 1975 !(ifp->if_flags & IFF_DYING)) 1976 break; 1977 } 1978 if (ifp != NULL) 1979 if_ref(ifp); 1980 IFNET_RUNLOCK(); 1981 return (ifp); 1982} 1983 1984struct ifnet * 1985ifunit(const char *name) 1986{ 1987 INIT_VNET_NET(curvnet); 1988 struct ifnet *ifp; 1989 1990 IFNET_RLOCK(); 1991 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 1992 if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0) 1993 break; 1994 } 1995 IFNET_RUNLOCK(); 1996 return (ifp); 1997} 1998 1999/* 2000 * Hardware specific interface ioctls. 2001 */ 2002static int 2003ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td) 2004{ 2005 struct ifreq *ifr; 2006 struct ifstat *ifs; 2007 int error = 0; 2008 int new_flags, temp_flags; 2009 size_t namelen, onamelen; 2010 char new_name[IFNAMSIZ]; 2011 struct ifaddr *ifa; 2012 struct sockaddr_dl *sdl; 2013 2014 ifr = (struct ifreq *)data; 2015 switch (cmd) { 2016 case SIOCGIFINDEX: 2017 ifr->ifr_index = ifp->if_index; 2018 break; 2019 2020 case SIOCGIFFLAGS: 2021 temp_flags = ifp->if_flags | ifp->if_drv_flags; 2022 ifr->ifr_flags = temp_flags & 0xffff; 2023 ifr->ifr_flagshigh = temp_flags >> 16; 2024 break; 2025 2026 case SIOCGIFCAP: 2027 ifr->ifr_reqcap = ifp->if_capabilities; 2028 ifr->ifr_curcap = ifp->if_capenable; 2029 break; 2030 2031#ifdef MAC 2032 case SIOCGIFMAC: 2033 error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp); 2034 break; 2035#endif 2036 2037 case SIOCGIFMETRIC: 2038 ifr->ifr_metric = ifp->if_metric; 2039 break; 2040 2041 case SIOCGIFMTU: 2042 ifr->ifr_mtu = ifp->if_mtu; 2043 break; 2044 2045 case SIOCGIFPHYS: 2046 ifr->ifr_phys = ifp->if_physical; 2047 break; 2048 2049 case SIOCSIFFLAGS: 2050 error = priv_check(td, PRIV_NET_SETIFFLAGS); 2051 if (error) 2052 return (error); 2053 /* 2054 * Currently, no driver owned flags pass the IFF_CANTCHANGE 2055 * check, so we don't need special handling here yet. 2056 */ 2057 new_flags = (ifr->ifr_flags & 0xffff) | 2058 (ifr->ifr_flagshigh << 16); 2059 if (ifp->if_flags & IFF_SMART) { 2060 /* Smart drivers twiddle their own routes */ 2061 } else if (ifp->if_flags & IFF_UP && 2062 (new_flags & IFF_UP) == 0) { 2063 int s = splimp(); 2064 if_down(ifp); 2065 splx(s); 2066 } else if (new_flags & IFF_UP && 2067 (ifp->if_flags & IFF_UP) == 0) { 2068 int s = splimp(); 2069 if_up(ifp); 2070 splx(s); 2071 } 2072 /* See if permanently promiscuous mode bit is about to flip */ 2073 if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) { 2074 if (new_flags & IFF_PPROMISC) 2075 ifp->if_flags |= IFF_PROMISC; 2076 else if (ifp->if_pcount == 0) 2077 ifp->if_flags &= ~IFF_PROMISC; 2078 log(LOG_INFO, "%s: permanently promiscuous mode %s\n", 2079 ifp->if_xname, 2080 (new_flags & IFF_PPROMISC) ? "enabled" : "disabled"); 2081 } 2082 ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) | 2083 (new_flags &~ IFF_CANTCHANGE); 2084 if (ifp->if_ioctl) { 2085 (void) (*ifp->if_ioctl)(ifp, cmd, data); 2086 } 2087 getmicrotime(&ifp->if_lastchange); 2088 break; 2089 2090 case SIOCSIFCAP: 2091 error = priv_check(td, PRIV_NET_SETIFCAP); 2092 if (error) 2093 return (error); 2094 if (ifp->if_ioctl == NULL) 2095 return (EOPNOTSUPP); 2096 if (ifr->ifr_reqcap & ~ifp->if_capabilities) 2097 return (EINVAL); 2098 error = (*ifp->if_ioctl)(ifp, cmd, data); 2099 if (error == 0) 2100 getmicrotime(&ifp->if_lastchange); 2101 break; 2102 2103#ifdef MAC 2104 case SIOCSIFMAC: 2105 error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp); 2106 break; 2107#endif 2108 2109 case SIOCSIFNAME: 2110 error = priv_check(td, PRIV_NET_SETIFNAME); 2111 if (error) 2112 return (error); 2113 error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL); 2114 if (error != 0) 2115 return (error); 2116 if (new_name[0] == '\0') 2117 return (EINVAL); 2118 if (ifunit(new_name) != NULL) 2119 return (EEXIST); 2120 2121 /* Announce the departure of the interface. */ 2122 rt_ifannouncemsg(ifp, IFAN_DEPARTURE); 2123 EVENTHANDLER_INVOKE(ifnet_departure_event, ifp); 2124 2125 log(LOG_INFO, "%s: changing name to '%s'\n", 2126 ifp->if_xname, new_name); 2127 2128 strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname)); 2129 ifa = ifp->if_addr; 2130 IFA_LOCK(ifa); 2131 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 2132 namelen = strlen(new_name); 2133 onamelen = sdl->sdl_nlen; 2134 /* 2135 * Move the address if needed. This is safe because we 2136 * allocate space for a name of length IFNAMSIZ when we 2137 * create this in if_attach(). 2138 */ 2139 if (namelen != onamelen) { 2140 bcopy(sdl->sdl_data + onamelen, 2141 sdl->sdl_data + namelen, sdl->sdl_alen); 2142 } 2143 bcopy(new_name, sdl->sdl_data, namelen); 2144 sdl->sdl_nlen = namelen; 2145 sdl = (struct sockaddr_dl *)ifa->ifa_netmask; 2146 bzero(sdl->sdl_data, onamelen); 2147 while (namelen != 0) 2148 sdl->sdl_data[--namelen] = 0xff; 2149 IFA_UNLOCK(ifa); 2150 2151 EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp); 2152 /* Announce the return of the interface. */ 2153 rt_ifannouncemsg(ifp, IFAN_ARRIVAL); 2154 break; 2155 2156#ifdef VIMAGE 2157 case SIOCSIFVNET: 2158 error = priv_check(td, PRIV_NET_SETIFVNET); 2159 if (error) 2160 return (error); 2161 error = vi_if_move(td, ifp, ifr->ifr_name, ifr->ifr_jid, NULL); 2162 break; 2163#endif 2164 2165 case SIOCSIFMETRIC: 2166 error = priv_check(td, PRIV_NET_SETIFMETRIC); 2167 if (error) 2168 return (error); 2169 ifp->if_metric = ifr->ifr_metric; 2170 getmicrotime(&ifp->if_lastchange); 2171 break; 2172 2173 case SIOCSIFPHYS: 2174 error = priv_check(td, PRIV_NET_SETIFPHYS); 2175 if (error) 2176 return (error); 2177 if (ifp->if_ioctl == NULL) 2178 return (EOPNOTSUPP); 2179 error = (*ifp->if_ioctl)(ifp, cmd, data); 2180 if (error == 0) 2181 getmicrotime(&ifp->if_lastchange); 2182 break; 2183 2184 case SIOCSIFMTU: 2185 { 2186 u_long oldmtu = ifp->if_mtu; 2187 2188 error = priv_check(td, PRIV_NET_SETIFMTU); 2189 if (error) 2190 return (error); 2191 if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU) 2192 return (EINVAL); 2193 if (ifp->if_ioctl == NULL) 2194 return (EOPNOTSUPP); 2195 error = (*ifp->if_ioctl)(ifp, cmd, data); 2196 if (error == 0) { 2197 getmicrotime(&ifp->if_lastchange); 2198 rt_ifmsg(ifp); 2199 } 2200 /* 2201 * If the link MTU changed, do network layer specific procedure. 2202 */ 2203 if (ifp->if_mtu != oldmtu) { 2204#ifdef INET6 2205 nd6_setmtu(ifp); 2206#endif 2207 } 2208 break; 2209 } 2210 2211 case SIOCADDMULTI: 2212 case SIOCDELMULTI: 2213 if (cmd == SIOCADDMULTI) 2214 error = priv_check(td, PRIV_NET_ADDMULTI); 2215 else 2216 error = priv_check(td, PRIV_NET_DELMULTI); 2217 if (error) 2218 return (error); 2219 2220 /* Don't allow group membership on non-multicast interfaces. */ 2221 if ((ifp->if_flags & IFF_MULTICAST) == 0) 2222 return (EOPNOTSUPP); 2223 2224 /* Don't let users screw up protocols' entries. */ 2225 if (ifr->ifr_addr.sa_family != AF_LINK) 2226 return (EINVAL); 2227 2228 if (cmd == SIOCADDMULTI) { 2229 struct ifmultiaddr *ifma; 2230 2231 /* 2232 * Userland is only permitted to join groups once 2233 * via the if_addmulti() KPI, because it cannot hold 2234 * struct ifmultiaddr * between calls. It may also 2235 * lose a race while we check if the membership 2236 * already exists. 2237 */ 2238 IF_ADDR_LOCK(ifp); 2239 ifma = if_findmulti(ifp, &ifr->ifr_addr); 2240 IF_ADDR_UNLOCK(ifp); 2241 if (ifma != NULL) 2242 error = EADDRINUSE; 2243 else 2244 error = if_addmulti(ifp, &ifr->ifr_addr, &ifma); 2245 } else { 2246 error = if_delmulti(ifp, &ifr->ifr_addr); 2247 } 2248 if (error == 0) 2249 getmicrotime(&ifp->if_lastchange); 2250 break; 2251 2252 case SIOCSIFPHYADDR: 2253 case SIOCDIFPHYADDR: 2254#ifdef INET6 2255 case SIOCSIFPHYADDR_IN6: 2256#endif 2257 case SIOCSLIFPHYADDR: 2258 case SIOCSIFMEDIA: 2259 case SIOCSIFGENERIC: 2260 error = priv_check(td, PRIV_NET_HWIOCTL); 2261 if (error) 2262 return (error); 2263 if (ifp->if_ioctl == NULL) 2264 return (EOPNOTSUPP); 2265 error = (*ifp->if_ioctl)(ifp, cmd, data); 2266 if (error == 0) 2267 getmicrotime(&ifp->if_lastchange); 2268 break; 2269 2270 case SIOCGIFSTATUS: 2271 ifs = (struct ifstat *)data; 2272 ifs->ascii[0] = '\0'; 2273 2274 case SIOCGIFPSRCADDR: 2275 case SIOCGIFPDSTADDR: 2276 case SIOCGLIFPHYADDR: 2277 case SIOCGIFMEDIA: 2278 case SIOCGIFGENERIC: 2279 if (ifp->if_ioctl == NULL) 2280 return (EOPNOTSUPP); 2281 error = (*ifp->if_ioctl)(ifp, cmd, data); 2282 break; 2283 2284 case SIOCSIFLLADDR: 2285 error = priv_check(td, PRIV_NET_SETLLADDR); 2286 if (error) 2287 return (error); 2288 error = if_setlladdr(ifp, 2289 ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len); 2290 break; 2291 2292 case SIOCAIFGROUP: 2293 { 2294 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2295 2296 error = priv_check(td, PRIV_NET_ADDIFGROUP); 2297 if (error) 2298 return (error); 2299 if ((error = if_addgroup(ifp, ifgr->ifgr_group))) 2300 return (error); 2301 break; 2302 } 2303 2304 case SIOCGIFGROUP: 2305 if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp))) 2306 return (error); 2307 break; 2308 2309 case SIOCDIFGROUP: 2310 { 2311 struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr; 2312 2313 error = priv_check(td, PRIV_NET_DELIFGROUP); 2314 if (error) 2315 return (error); 2316 if ((error = if_delgroup(ifp, ifgr->ifgr_group))) 2317 return (error); 2318 break; 2319 } 2320 2321 default: 2322 error = ENOIOCTL; 2323 break; 2324 } 2325 return (error); 2326} 2327 2328/* 2329 * Interface ioctls. 2330 */ 2331int 2332ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td) 2333{ 2334 struct ifnet *ifp; 2335 struct ifreq *ifr; 2336 int error; 2337 int oif_flags; 2338 2339 switch (cmd) { 2340 case SIOCGIFCONF: 2341 case OSIOCGIFCONF: 2342#ifdef __amd64__ 2343 case SIOCGIFCONF32: 2344#endif 2345 return (ifconf(cmd, data)); 2346 } 2347 ifr = (struct ifreq *)data; 2348 2349 switch (cmd) { 2350#ifdef VIMAGE 2351 case SIOCSIFRVNET: 2352 error = priv_check(td, PRIV_NET_SETIFVNET); 2353 if (error) 2354 return (error); 2355 return (vi_if_move(td, NULL, ifr->ifr_name, ifr->ifr_jid, 2356 NULL)); 2357 /* 2358 * XXX vnet creation will be implemented through the new jail 2359 * framework - this is just a temporary hack for testing the 2360 * vnet create / destroy mechanisms. 2361 */ 2362 case SIOCSIFVIMAGE: 2363 error = vi_if_move(td, NULL, NULL, 0, (struct vi_req *) data); 2364 return (error); 2365 case SIOCSPVIMAGE: 2366 case SIOCGPVIMAGE: 2367 error = vi_td_ioctl(cmd, (struct vi_req *) data, td); 2368 return (error); 2369#endif 2370 case SIOCIFCREATE: 2371 case SIOCIFCREATE2: 2372 error = priv_check(td, PRIV_NET_IFCREATE); 2373 if (error) 2374 return (error); 2375 return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name), 2376 cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL)); 2377 case SIOCIFDESTROY: 2378 error = priv_check(td, PRIV_NET_IFDESTROY); 2379 if (error) 2380 return (error); 2381 return if_clone_destroy(ifr->ifr_name); 2382 2383 case SIOCIFGCLONERS: 2384 return (if_clone_list((struct if_clonereq *)data)); 2385 case SIOCGIFGMEMB: 2386 return (if_getgroupmembers((struct ifgroupreq *)data)); 2387 } 2388 2389 ifp = ifunit_ref(ifr->ifr_name); 2390 if (ifp == NULL) 2391 return (ENXIO); 2392 2393 error = ifhwioctl(cmd, ifp, data, td); 2394 if (error != ENOIOCTL) { 2395 if_rele(ifp); 2396 return (error); 2397 } 2398 2399 oif_flags = ifp->if_flags; 2400 if (so->so_proto == NULL) { 2401 if_rele(ifp); 2402 return (EOPNOTSUPP); 2403 } 2404#ifndef COMPAT_43 2405 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd, 2406 data, 2407 ifp, td)); 2408 if (error == EOPNOTSUPP && ifp != NULL && ifp->if_ioctl != NULL) 2409 error = (*ifp->if_ioctl)(ifp, cmd, data); 2410#else 2411 { 2412 u_long ocmd = cmd; 2413 2414 switch (cmd) { 2415 2416 case SIOCSIFDSTADDR: 2417 case SIOCSIFADDR: 2418 case SIOCSIFBRDADDR: 2419 case SIOCSIFNETMASK: 2420#if BYTE_ORDER != BIG_ENDIAN 2421 if (ifr->ifr_addr.sa_family == 0 && 2422 ifr->ifr_addr.sa_len < 16) { 2423 ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len; 2424 ifr->ifr_addr.sa_len = 16; 2425 } 2426#else 2427 if (ifr->ifr_addr.sa_len == 0) 2428 ifr->ifr_addr.sa_len = 16; 2429#endif 2430 break; 2431 2432 case OSIOCGIFADDR: 2433 cmd = SIOCGIFADDR; 2434 break; 2435 2436 case OSIOCGIFDSTADDR: 2437 cmd = SIOCGIFDSTADDR; 2438 break; 2439 2440 case OSIOCGIFBRDADDR: 2441 cmd = SIOCGIFBRDADDR; 2442 break; 2443 2444 case OSIOCGIFNETMASK: 2445 cmd = SIOCGIFNETMASK; 2446 } 2447 error = ((*so->so_proto->pr_usrreqs->pru_control)(so, 2448 cmd, 2449 data, 2450 ifp, td)); 2451 if (error == EOPNOTSUPP && ifp != NULL && 2452 ifp->if_ioctl != NULL) 2453 error = (*ifp->if_ioctl)(ifp, cmd, data); 2454 switch (ocmd) { 2455 2456 case OSIOCGIFADDR: 2457 case OSIOCGIFDSTADDR: 2458 case OSIOCGIFBRDADDR: 2459 case OSIOCGIFNETMASK: 2460 *(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family; 2461 2462 } 2463 } 2464#endif /* COMPAT_43 */ 2465 2466 if ((oif_flags ^ ifp->if_flags) & IFF_UP) { 2467#ifdef INET6 2468 if (ifp->if_flags & IFF_UP) { 2469 int s = splimp(); 2470 in6_if_up(ifp); 2471 splx(s); 2472 } 2473#endif 2474 } 2475 if_rele(ifp); 2476 return (error); 2477} 2478 2479/* 2480 * The code common to handling reference counted flags, 2481 * e.g., in ifpromisc() and if_allmulti(). 2482 * The "pflag" argument can specify a permanent mode flag to check, 2483 * such as IFF_PPROMISC for promiscuous mode; should be 0 if none. 2484 * 2485 * Only to be used on stack-owned flags, not driver-owned flags. 2486 */ 2487static int 2488if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch) 2489{ 2490 struct ifreq ifr; 2491 int error; 2492 int oldflags, oldcount; 2493 2494 /* Sanity checks to catch programming errors */ 2495 KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0, 2496 ("%s: setting driver-owned flag %d", __func__, flag)); 2497 2498 if (onswitch) 2499 KASSERT(*refcount >= 0, 2500 ("%s: increment negative refcount %d for flag %d", 2501 __func__, *refcount, flag)); 2502 else 2503 KASSERT(*refcount > 0, 2504 ("%s: decrement non-positive refcount %d for flag %d", 2505 __func__, *refcount, flag)); 2506 2507 /* In case this mode is permanent, just touch refcount */ 2508 if (ifp->if_flags & pflag) { 2509 *refcount += onswitch ? 1 : -1; 2510 return (0); 2511 } 2512 2513 /* Save ifnet parameters for if_ioctl() may fail */ 2514 oldcount = *refcount; 2515 oldflags = ifp->if_flags; 2516 2517 /* 2518 * See if we aren't the only and touching refcount is enough. 2519 * Actually toggle interface flag if we are the first or last. 2520 */ 2521 if (onswitch) { 2522 if ((*refcount)++) 2523 return (0); 2524 ifp->if_flags |= flag; 2525 } else { 2526 if (--(*refcount)) 2527 return (0); 2528 ifp->if_flags &= ~flag; 2529 } 2530 2531 /* Call down the driver since we've changed interface flags */ 2532 if (ifp->if_ioctl == NULL) { 2533 error = EOPNOTSUPP; 2534 goto recover; 2535 } 2536 ifr.ifr_flags = ifp->if_flags & 0xffff; 2537 ifr.ifr_flagshigh = ifp->if_flags >> 16; 2538 error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 2539 if (error) 2540 goto recover; 2541 /* Notify userland that interface flags have changed */ 2542 rt_ifmsg(ifp); 2543 return (0); 2544 2545recover: 2546 /* Recover after driver error */ 2547 *refcount = oldcount; 2548 ifp->if_flags = oldflags; 2549 return (error); 2550} 2551 2552/* 2553 * Set/clear promiscuous mode on interface ifp based on the truth value 2554 * of pswitch. The calls are reference counted so that only the first 2555 * "on" request actually has an effect, as does the final "off" request. 2556 * Results are undefined if the "off" and "on" requests are not matched. 2557 */ 2558int 2559ifpromisc(struct ifnet *ifp, int pswitch) 2560{ 2561 int error; 2562 int oldflags = ifp->if_flags; 2563 2564 error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC, 2565 &ifp->if_pcount, pswitch); 2566 /* If promiscuous mode status has changed, log a message */ 2567 if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC)) 2568 log(LOG_INFO, "%s: promiscuous mode %s\n", 2569 ifp->if_xname, 2570 (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled"); 2571 return (error); 2572} 2573 2574/* 2575 * Return interface configuration 2576 * of system. List may be used 2577 * in later ioctl's (above) to get 2578 * other information. 2579 */ 2580/*ARGSUSED*/ 2581static int 2582ifconf(u_long cmd, caddr_t data) 2583{ 2584 INIT_VNET_NET(curvnet); 2585 struct ifconf *ifc = (struct ifconf *)data; 2586#ifdef __amd64__ 2587 struct ifconf32 *ifc32 = (struct ifconf32 *)data; 2588 struct ifconf ifc_swab; 2589#endif 2590 struct ifnet *ifp; 2591 struct ifaddr *ifa; 2592 struct ifreq ifr; 2593 struct sbuf *sb; 2594 int error, full = 0, valid_len, max_len; 2595 2596#ifdef __amd64__ 2597 if (cmd == SIOCGIFCONF32) { 2598 ifc_swab.ifc_len = ifc32->ifc_len; 2599 ifc_swab.ifc_buf = (caddr_t)(uintptr_t)ifc32->ifc_buf; 2600 ifc = &ifc_swab; 2601 } 2602#endif 2603 /* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */ 2604 max_len = MAXPHYS - 1; 2605 2606 /* Prevent hostile input from being able to crash the system */ 2607 if (ifc->ifc_len <= 0) 2608 return (EINVAL); 2609 2610again: 2611 if (ifc->ifc_len <= max_len) { 2612 max_len = ifc->ifc_len; 2613 full = 1; 2614 } 2615 sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN); 2616 max_len = 0; 2617 valid_len = 0; 2618 2619 IFNET_RLOCK(); /* could sleep XXX */ 2620 TAILQ_FOREACH(ifp, &V_ifnet, if_link) { 2621 int addrs; 2622 2623 /* 2624 * Zero the ifr_name buffer to make sure we don't 2625 * disclose the contents of the stack. 2626 */ 2627 memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name)); 2628 2629 if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name)) 2630 >= sizeof(ifr.ifr_name)) { 2631 sbuf_delete(sb); 2632 IFNET_RUNLOCK(); 2633 return (ENAMETOOLONG); 2634 } 2635 2636 addrs = 0; 2637 IF_ADDR_LOCK(ifp); 2638 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 2639 struct sockaddr *sa = ifa->ifa_addr; 2640 2641 if (prison_if(curthread->td_ucred, sa) != 0) 2642 continue; 2643 addrs++; 2644#ifdef COMPAT_43 2645 if (cmd == OSIOCGIFCONF) { 2646 struct osockaddr *osa = 2647 (struct osockaddr *)&ifr.ifr_addr; 2648 ifr.ifr_addr = *sa; 2649 osa->sa_family = sa->sa_family; 2650 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2651 max_len += sizeof(ifr); 2652 } else 2653#endif 2654 if (sa->sa_len <= sizeof(*sa)) { 2655 ifr.ifr_addr = *sa; 2656 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2657 max_len += sizeof(ifr); 2658 } else { 2659 sbuf_bcat(sb, &ifr, 2660 offsetof(struct ifreq, ifr_addr)); 2661 max_len += offsetof(struct ifreq, ifr_addr); 2662 sbuf_bcat(sb, sa, sa->sa_len); 2663 max_len += sa->sa_len; 2664 } 2665 2666 if (!sbuf_overflowed(sb)) 2667 valid_len = sbuf_len(sb); 2668 } 2669 IF_ADDR_UNLOCK(ifp); 2670 if (addrs == 0) { 2671 bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr)); 2672 sbuf_bcat(sb, &ifr, sizeof(ifr)); 2673 max_len += sizeof(ifr); 2674 2675 if (!sbuf_overflowed(sb)) 2676 valid_len = sbuf_len(sb); 2677 } 2678 } 2679 IFNET_RUNLOCK(); 2680 2681 /* 2682 * If we didn't allocate enough space (uncommon), try again. If 2683 * we have already allocated as much space as we are allowed, 2684 * return what we've got. 2685 */ 2686 if (valid_len != max_len && !full) { 2687 sbuf_delete(sb); 2688 goto again; 2689 } 2690 2691 ifc->ifc_len = valid_len; 2692#ifdef __amd64__ 2693 if (cmd == SIOCGIFCONF32) 2694 ifc32->ifc_len = valid_len; 2695#endif 2696 sbuf_finish(sb); 2697 error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len); 2698 sbuf_delete(sb); 2699 return (error); 2700} 2701 2702/* 2703 * Just like ifpromisc(), but for all-multicast-reception mode. 2704 */ 2705int 2706if_allmulti(struct ifnet *ifp, int onswitch) 2707{ 2708 2709 return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch)); 2710} 2711 2712struct ifmultiaddr * 2713if_findmulti(struct ifnet *ifp, struct sockaddr *sa) 2714{ 2715 struct ifmultiaddr *ifma; 2716 2717 IF_ADDR_LOCK_ASSERT(ifp); 2718 2719 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2720 if (sa->sa_family == AF_LINK) { 2721 if (sa_dl_equal(ifma->ifma_addr, sa)) 2722 break; 2723 } else { 2724 if (sa_equal(ifma->ifma_addr, sa)) 2725 break; 2726 } 2727 } 2728 2729 return ifma; 2730} 2731 2732/* 2733 * Allocate a new ifmultiaddr and initialize based on passed arguments. We 2734 * make copies of passed sockaddrs. The ifmultiaddr will not be added to 2735 * the ifnet multicast address list here, so the caller must do that and 2736 * other setup work (such as notifying the device driver). The reference 2737 * count is initialized to 1. 2738 */ 2739static struct ifmultiaddr * 2740if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa, 2741 int mflags) 2742{ 2743 struct ifmultiaddr *ifma; 2744 struct sockaddr *dupsa; 2745 2746 ifma = malloc(sizeof *ifma, M_IFMADDR, mflags | 2747 M_ZERO); 2748 if (ifma == NULL) 2749 return (NULL); 2750 2751 dupsa = malloc(sa->sa_len, M_IFMADDR, mflags); 2752 if (dupsa == NULL) { 2753 free(ifma, M_IFMADDR); 2754 return (NULL); 2755 } 2756 bcopy(sa, dupsa, sa->sa_len); 2757 ifma->ifma_addr = dupsa; 2758 2759 ifma->ifma_ifp = ifp; 2760 ifma->ifma_refcount = 1; 2761 ifma->ifma_protospec = NULL; 2762 2763 if (llsa == NULL) { 2764 ifma->ifma_lladdr = NULL; 2765 return (ifma); 2766 } 2767 2768 dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags); 2769 if (dupsa == NULL) { 2770 free(ifma->ifma_addr, M_IFMADDR); 2771 free(ifma, M_IFMADDR); 2772 return (NULL); 2773 } 2774 bcopy(llsa, dupsa, llsa->sa_len); 2775 ifma->ifma_lladdr = dupsa; 2776 2777 return (ifma); 2778} 2779 2780/* 2781 * if_freemulti: free ifmultiaddr structure and possibly attached related 2782 * addresses. The caller is responsible for implementing reference 2783 * counting, notifying the driver, handling routing messages, and releasing 2784 * any dependent link layer state. 2785 */ 2786static void 2787if_freemulti(struct ifmultiaddr *ifma) 2788{ 2789 2790 KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d", 2791 ifma->ifma_refcount)); 2792 KASSERT(ifma->ifma_protospec == NULL, 2793 ("if_freemulti: protospec not NULL")); 2794 2795 if (ifma->ifma_lladdr != NULL) 2796 free(ifma->ifma_lladdr, M_IFMADDR); 2797 free(ifma->ifma_addr, M_IFMADDR); 2798 free(ifma, M_IFMADDR); 2799} 2800 2801/* 2802 * Register an additional multicast address with a network interface. 2803 * 2804 * - If the address is already present, bump the reference count on the 2805 * address and return. 2806 * - If the address is not link-layer, look up a link layer address. 2807 * - Allocate address structures for one or both addresses, and attach to the 2808 * multicast address list on the interface. If automatically adding a link 2809 * layer address, the protocol address will own a reference to the link 2810 * layer address, to be freed when it is freed. 2811 * - Notify the network device driver of an addition to the multicast address 2812 * list. 2813 * 2814 * 'sa' points to caller-owned memory with the desired multicast address. 2815 * 2816 * 'retifma' will be used to return a pointer to the resulting multicast 2817 * address reference, if desired. 2818 */ 2819int 2820if_addmulti(struct ifnet *ifp, struct sockaddr *sa, 2821 struct ifmultiaddr **retifma) 2822{ 2823 struct ifmultiaddr *ifma, *ll_ifma; 2824 struct sockaddr *llsa; 2825 int error; 2826 2827 /* 2828 * If the address is already present, return a new reference to it; 2829 * otherwise, allocate storage and set up a new address. 2830 */ 2831 IF_ADDR_LOCK(ifp); 2832 ifma = if_findmulti(ifp, sa); 2833 if (ifma != NULL) { 2834 ifma->ifma_refcount++; 2835 if (retifma != NULL) 2836 *retifma = ifma; 2837 IF_ADDR_UNLOCK(ifp); 2838 return (0); 2839 } 2840 2841 /* 2842 * The address isn't already present; resolve the protocol address 2843 * into a link layer address, and then look that up, bump its 2844 * refcount or allocate an ifma for that also. If 'llsa' was 2845 * returned, we will need to free it later. 2846 */ 2847 llsa = NULL; 2848 ll_ifma = NULL; 2849 if (ifp->if_resolvemulti != NULL) { 2850 error = ifp->if_resolvemulti(ifp, &llsa, sa); 2851 if (error) 2852 goto unlock_out; 2853 } 2854 2855 /* 2856 * Allocate the new address. Don't hook it up yet, as we may also 2857 * need to allocate a link layer multicast address. 2858 */ 2859 ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT); 2860 if (ifma == NULL) { 2861 error = ENOMEM; 2862 goto free_llsa_out; 2863 } 2864 2865 /* 2866 * If a link layer address is found, we'll need to see if it's 2867 * already present in the address list, or allocate is as well. 2868 * When this block finishes, the link layer address will be on the 2869 * list. 2870 */ 2871 if (llsa != NULL) { 2872 ll_ifma = if_findmulti(ifp, llsa); 2873 if (ll_ifma == NULL) { 2874 ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT); 2875 if (ll_ifma == NULL) { 2876 --ifma->ifma_refcount; 2877 if_freemulti(ifma); 2878 error = ENOMEM; 2879 goto free_llsa_out; 2880 } 2881 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma, 2882 ifma_link); 2883 } else 2884 ll_ifma->ifma_refcount++; 2885 ifma->ifma_llifma = ll_ifma; 2886 } 2887 2888 /* 2889 * We now have a new multicast address, ifma, and possibly a new or 2890 * referenced link layer address. Add the primary address to the 2891 * ifnet address list. 2892 */ 2893 TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link); 2894 2895 if (retifma != NULL) 2896 *retifma = ifma; 2897 2898 /* 2899 * Must generate the message while holding the lock so that 'ifma' 2900 * pointer is still valid. 2901 */ 2902 rt_newmaddrmsg(RTM_NEWMADDR, ifma); 2903 IF_ADDR_UNLOCK(ifp); 2904 2905 /* 2906 * We are certain we have added something, so call down to the 2907 * interface to let them know about it. 2908 */ 2909 if (ifp->if_ioctl != NULL) { 2910 (void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0); 2911 } 2912 2913 if (llsa != NULL) 2914 free(llsa, M_IFMADDR); 2915 2916 return (0); 2917 2918free_llsa_out: 2919 if (llsa != NULL) 2920 free(llsa, M_IFMADDR); 2921 2922unlock_out: 2923 IF_ADDR_UNLOCK(ifp); 2924 return (error); 2925} 2926 2927/* 2928 * Delete a multicast group membership by network-layer group address. 2929 * 2930 * Returns ENOENT if the entry could not be found. If ifp no longer 2931 * exists, results are undefined. This entry point should only be used 2932 * from subsystems which do appropriate locking to hold ifp for the 2933 * duration of the call. 2934 * Network-layer protocol domains must use if_delmulti_ifma(). 2935 */ 2936int 2937if_delmulti(struct ifnet *ifp, struct sockaddr *sa) 2938{ 2939 struct ifmultiaddr *ifma; 2940 int lastref; 2941#ifdef INVARIANTS 2942 struct ifnet *oifp; 2943 INIT_VNET_NET(ifp->if_vnet); 2944 2945 IFNET_RLOCK(); 2946 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 2947 if (ifp == oifp) 2948 break; 2949 if (ifp != oifp) 2950 ifp = NULL; 2951 IFNET_RUNLOCK(); 2952 2953 KASSERT(ifp != NULL, ("%s: ifnet went away", __func__)); 2954#endif 2955 if (ifp == NULL) 2956 return (ENOENT); 2957 2958 IF_ADDR_LOCK(ifp); 2959 lastref = 0; 2960 ifma = if_findmulti(ifp, sa); 2961 if (ifma != NULL) 2962 lastref = if_delmulti_locked(ifp, ifma, 0); 2963 IF_ADDR_UNLOCK(ifp); 2964 2965 if (ifma == NULL) 2966 return (ENOENT); 2967 2968 if (lastref && ifp->if_ioctl != NULL) { 2969 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 2970 } 2971 2972 return (0); 2973} 2974 2975/* 2976 * Delete a multicast group membership by group membership pointer. 2977 * Network-layer protocol domains must use this routine. 2978 * 2979 * It is safe to call this routine if the ifp disappeared. 2980 */ 2981void 2982if_delmulti_ifma(struct ifmultiaddr *ifma) 2983{ 2984#ifdef DIAGNOSTIC 2985 INIT_VNET_NET(curvnet); 2986#endif 2987 struct ifnet *ifp; 2988 int lastref; 2989 2990 ifp = ifma->ifma_ifp; 2991#ifdef DIAGNOSTIC 2992 if (ifp == NULL) { 2993 printf("%s: ifma_ifp seems to be detached\n", __func__); 2994 } else { 2995 struct ifnet *oifp; 2996 2997 IFNET_RLOCK(); 2998 TAILQ_FOREACH(oifp, &V_ifnet, if_link) 2999 if (ifp == oifp) 3000 break; 3001 if (ifp != oifp) { 3002 printf("%s: ifnet %p disappeared\n", __func__, ifp); 3003 ifp = NULL; 3004 } 3005 IFNET_RUNLOCK(); 3006 } 3007#endif 3008 /* 3009 * If and only if the ifnet instance exists: Acquire the address lock. 3010 */ 3011 if (ifp != NULL) 3012 IF_ADDR_LOCK(ifp); 3013 3014 lastref = if_delmulti_locked(ifp, ifma, 0); 3015 3016 if (ifp != NULL) { 3017 /* 3018 * If and only if the ifnet instance exists: 3019 * Release the address lock. 3020 * If the group was left: update the hardware hash filter. 3021 */ 3022 IF_ADDR_UNLOCK(ifp); 3023 if (lastref && ifp->if_ioctl != NULL) { 3024 (void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0); 3025 } 3026 } 3027} 3028 3029/* 3030 * Perform deletion of network-layer and/or link-layer multicast address. 3031 * 3032 * Return 0 if the reference count was decremented. 3033 * Return 1 if the final reference was released, indicating that the 3034 * hardware hash filter should be reprogrammed. 3035 */ 3036static int 3037if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching) 3038{ 3039 struct ifmultiaddr *ll_ifma; 3040 3041 if (ifp != NULL && ifma->ifma_ifp != NULL) { 3042 KASSERT(ifma->ifma_ifp == ifp, 3043 ("%s: inconsistent ifp %p", __func__, ifp)); 3044 IF_ADDR_LOCK_ASSERT(ifp); 3045 } 3046 3047 ifp = ifma->ifma_ifp; 3048 3049 /* 3050 * If the ifnet is detaching, null out references to ifnet, 3051 * so that upper protocol layers will notice, and not attempt 3052 * to obtain locks for an ifnet which no longer exists. The 3053 * routing socket announcement must happen before the ifnet 3054 * instance is detached from the system. 3055 */ 3056 if (detaching) { 3057#ifdef DIAGNOSTIC 3058 printf("%s: detaching ifnet instance %p\n", __func__, ifp); 3059#endif 3060 /* 3061 * ifp may already be nulled out if we are being reentered 3062 * to delete the ll_ifma. 3063 */ 3064 if (ifp != NULL) { 3065 rt_newmaddrmsg(RTM_DELMADDR, ifma); 3066 ifma->ifma_ifp = NULL; 3067 } 3068 } 3069 3070 if (--ifma->ifma_refcount > 0) 3071 return 0; 3072 3073 /* 3074 * If this ifma is a network-layer ifma, a link-layer ifma may 3075 * have been associated with it. Release it first if so. 3076 */ 3077 ll_ifma = ifma->ifma_llifma; 3078 if (ll_ifma != NULL) { 3079 KASSERT(ifma->ifma_lladdr != NULL, 3080 ("%s: llifma w/o lladdr", __func__)); 3081 if (detaching) 3082 ll_ifma->ifma_ifp = NULL; /* XXX */ 3083 if (--ll_ifma->ifma_refcount == 0) { 3084 if (ifp != NULL) { 3085 TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma, 3086 ifma_link); 3087 } 3088 if_freemulti(ll_ifma); 3089 } 3090 } 3091 3092 if (ifp != NULL) 3093 TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link); 3094 3095 if_freemulti(ifma); 3096 3097 /* 3098 * The last reference to this instance of struct ifmultiaddr 3099 * was released; the hardware should be notified of this change. 3100 */ 3101 return 1; 3102} 3103 3104/* 3105 * Set the link layer address on an interface. 3106 * 3107 * At this time we only support certain types of interfaces, 3108 * and we don't allow the length of the address to change. 3109 */ 3110int 3111if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len) 3112{ 3113 struct sockaddr_dl *sdl; 3114 struct ifaddr *ifa; 3115 struct ifreq ifr; 3116 3117 ifa = ifp->if_addr; 3118 if (ifa == NULL) 3119 return (EINVAL); 3120 sdl = (struct sockaddr_dl *)ifa->ifa_addr; 3121 if (sdl == NULL) 3122 return (EINVAL); 3123 if (len != sdl->sdl_alen) /* don't allow length to change */ 3124 return (EINVAL); 3125 switch (ifp->if_type) { 3126 case IFT_ETHER: 3127 case IFT_FDDI: 3128 case IFT_XETHER: 3129 case IFT_ISO88025: 3130 case IFT_L2VLAN: 3131 case IFT_BRIDGE: 3132 case IFT_ARCNET: 3133 case IFT_IEEE8023ADLAG: 3134 case IFT_IEEE80211: 3135 bcopy(lladdr, LLADDR(sdl), len); 3136 break; 3137 default: 3138 return (ENODEV); 3139 } 3140 /* 3141 * If the interface is already up, we need 3142 * to re-init it in order to reprogram its 3143 * address filter. 3144 */ 3145 if ((ifp->if_flags & IFF_UP) != 0) { 3146 if (ifp->if_ioctl) { 3147 ifp->if_flags &= ~IFF_UP; 3148 ifr.ifr_flags = ifp->if_flags & 0xffff; 3149 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3150 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3151 ifp->if_flags |= IFF_UP; 3152 ifr.ifr_flags = ifp->if_flags & 0xffff; 3153 ifr.ifr_flagshigh = ifp->if_flags >> 16; 3154 (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr); 3155 } 3156#ifdef INET 3157 /* 3158 * Also send gratuitous ARPs to notify other nodes about 3159 * the address change. 3160 */ 3161 TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { 3162 if (ifa->ifa_addr->sa_family == AF_INET) 3163 arp_ifinit(ifp, ifa); 3164 } 3165#endif 3166 } 3167 return (0); 3168} 3169 3170/* 3171 * The name argument must be a pointer to storage which will last as 3172 * long as the interface does. For physical devices, the result of 3173 * device_get_name(dev) is a good choice and for pseudo-devices a 3174 * static string works well. 3175 */ 3176void 3177if_initname(struct ifnet *ifp, const char *name, int unit) 3178{ 3179 ifp->if_dname = name; 3180 ifp->if_dunit = unit; 3181 if (unit != IF_DUNIT_NONE) 3182 snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit); 3183 else 3184 strlcpy(ifp->if_xname, name, IFNAMSIZ); 3185} 3186 3187int 3188if_printf(struct ifnet *ifp, const char * fmt, ...) 3189{ 3190 va_list ap; 3191 int retval; 3192 3193 retval = printf("%s: ", ifp->if_xname); 3194 va_start(ap, fmt); 3195 retval += vprintf(fmt, ap); 3196 va_end(ap); 3197 return (retval); 3198} 3199 3200void 3201if_start(struct ifnet *ifp) 3202{ 3203 3204 (*(ifp)->if_start)(ifp); 3205} 3206 3207/* 3208 * Backwards compatibility interface for drivers 3209 * that have not implemented it 3210 */ 3211static int 3212if_transmit(struct ifnet *ifp, struct mbuf *m) 3213{ 3214 int error; 3215 3216 IFQ_HANDOFF(ifp, m, error); 3217 return (error); 3218} 3219 3220int 3221if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust) 3222{ 3223 int active = 0; 3224 3225 IF_LOCK(ifq); 3226 if (_IF_QFULL(ifq)) { 3227 _IF_DROP(ifq); 3228 IF_UNLOCK(ifq); 3229 m_freem(m); 3230 return (0); 3231 } 3232 if (ifp != NULL) { 3233 ifp->if_obytes += m->m_pkthdr.len + adjust; 3234 if (m->m_flags & (M_BCAST|M_MCAST)) 3235 ifp->if_omcasts++; 3236 active = ifp->if_drv_flags & IFF_DRV_OACTIVE; 3237 } 3238 _IF_ENQUEUE(ifq, m); 3239 IF_UNLOCK(ifq); 3240 if (ifp != NULL && !active) 3241 (*(ifp)->if_start)(ifp); 3242 return (1); 3243} 3244 3245void 3246if_register_com_alloc(u_char type, 3247 if_com_alloc_t *a, if_com_free_t *f) 3248{ 3249 3250 KASSERT(if_com_alloc[type] == NULL, 3251 ("if_register_com_alloc: %d already registered", type)); 3252 KASSERT(if_com_free[type] == NULL, 3253 ("if_register_com_alloc: %d free already registered", type)); 3254 3255 if_com_alloc[type] = a; 3256 if_com_free[type] = f; 3257} 3258 3259void 3260if_deregister_com_alloc(u_char type) 3261{ 3262 3263 KASSERT(if_com_alloc[type] != NULL, 3264 ("if_deregister_com_alloc: %d not registered", type)); 3265 KASSERT(if_com_free[type] != NULL, 3266 ("if_deregister_com_alloc: %d free not registered", type)); 3267 if_com_alloc[type] = NULL; 3268 if_com_free[type] = NULL; 3269} 3270