1/* 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28/*- 29 * Copyright (c) 2009 Bruce Simpson. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 3. The name of the author may not be used to endorse or promote 40 * products derived from this software without specific prior written 41 * permission. 42 * 43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 53 * SUCH DAMAGE. 54 */ 55 56/* 57 * Copyright (c) 1988 Stephen Deering. 58 * Copyright (c) 1992, 1993 59 * The Regents of the University of California. All rights reserved. 60 * 61 * This code is derived from software contributed to Berkeley by 62 * Stephen Deering of Stanford University. 63 * 64 * Redistribution and use in source and binary forms, with or without 65 * modification, are permitted provided that the following conditions 66 * are met: 67 * 1. Redistributions of source code must retain the above copyright 68 * notice, this list of conditions and the following disclaimer. 69 * 2. Redistributions in binary form must reproduce the above copyright 70 * notice, this list of conditions and the following disclaimer in the 71 * documentation and/or other materials provided with the distribution. 72 * 3. All advertising materials mentioning features or use of this software 73 * must display the following acknowledgement: 74 * This product includes software developed by the University of 75 * California, Berkeley and its contributors. 76 * 4. Neither the name of the University nor the names of its contributors 77 * may be used to endorse or promote products derived from this software 78 * without specific prior written permission. 79 * 80 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 81 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 82 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 83 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 84 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 85 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 86 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 87 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 88 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 89 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 90 * SUCH DAMAGE. 91 * 92 * @(#)igmp.c 8.1 (Berkeley) 7/19/93 93 */ 94/* 95 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce 96 * support for mandatory and extensible security protections. This notice 97 * is included in support of clause 2.2 (b) of the Apple Public License, 98 * Version 2.0. 99 */ 100 101#include <sys/cdefs.h> 102 103#include <sys/param.h> 104#include <sys/systm.h> 105#include <sys/mbuf.h> 106#include <sys/socket.h> 107#include <sys/protosw.h> 108#include <sys/sysctl.h> 109#include <sys/kernel.h> 110#include <sys/malloc.h> 111#include <sys/mcache.h> 112 113#include <dev/random/randomdev.h> 114 115#include <kern/zalloc.h> 116 117#include <net/if.h> 118#include <net/route.h> 119 120#include <netinet/in.h> 121#include <netinet/in_var.h> 122#include <netinet6/in6_var.h> 123#include <netinet/ip6.h> 124#include <netinet6/ip6_var.h> 125#include <netinet6/scope6_var.h> 126#include <netinet/icmp6.h> 127#include <netinet6/mld6.h> 128#include <netinet6/mld6_var.h> 129 130/* Lock group and attribute for mld_mtx */ 131static lck_attr_t *mld_mtx_attr; 132static lck_grp_t *mld_mtx_grp; 133static lck_grp_attr_t *mld_mtx_grp_attr; 134 135/* 136 * Locking and reference counting: 137 * 138 * mld_mtx mainly protects mli_head. In cases where both mld_mtx and 139 * in6_multihead_lock must be held, the former must be acquired first in order 140 * to maintain lock ordering. It is not a requirement that mld_mtx be 141 * acquired first before in6_multihead_lock, but in case both must be acquired 142 * in succession, the correct lock ordering must be followed. 143 * 144 * Instead of walking the if_multiaddrs list at the interface and returning 145 * the ifma_protospec value of a matching entry, we search the global list 146 * of in6_multi records and find it that way; this is done with in6_multihead 147 * lock held. Doing so avoids the race condition issues that many other BSDs 148 * suffer from (therefore in our implementation, ifma_protospec will never be 149 * NULL for as long as the in6_multi is valid.) 150 * 151 * The above creates a requirement for the in6_multi to stay in in6_multihead 152 * list even after the final MLD leave (in MLDv2 mode) until no longer needs 153 * be retransmitted (this is not required for MLDv1.) In order to handle 154 * this, the request and reference counts of the in6_multi are bumped up when 155 * the state changes to MLD_LEAVING_MEMBER, and later dropped in the timeout 156 * handler. Each in6_multi holds a reference to the underlying mld_ifinfo. 157 * 158 * Thus, the permitted lock order is: 159 * 160 * mld_mtx, in6_multihead_lock, inm6_lock, mli_lock 161 * 162 * Any may be taken independently, but if any are held at the same time, 163 * the above lock order must be followed. 164 */ 165static decl_lck_mtx_data(, mld_mtx); 166 167SLIST_HEAD(mld_in6m_relhead, in6_multi); 168 169static void mli_initvar(struct mld_ifinfo *, struct ifnet *, int); 170static struct mld_ifinfo *mli_alloc(int); 171static void mli_free(struct mld_ifinfo *); 172static void mli_delete(const struct ifnet *, struct mld_in6m_relhead *); 173static void mld_dispatch_packet(struct mbuf *); 174static void mld_final_leave(struct in6_multi *, struct mld_ifinfo *, 175 struct mld_tparams *); 176static int mld_handle_state_change(struct in6_multi *, struct mld_ifinfo *, 177 struct mld_tparams *); 178static int mld_initial_join(struct in6_multi *, struct mld_ifinfo *, 179 struct mld_tparams *, const int); 180#ifdef MLD_DEBUG 181static const char * mld_rec_type_to_str(const int); 182#endif 183static uint32_t mld_set_version(struct mld_ifinfo *, const int); 184static void mld_flush_relq(struct mld_ifinfo *, struct mld_in6m_relhead *); 185static void mld_dispatch_queue(struct mld_ifinfo *, struct ifqueue *, int); 186static int mld_v1_input_query(struct ifnet *, const struct ip6_hdr *, 187 /*const*/ struct mld_hdr *); 188static int mld_v1_input_report(struct ifnet *, struct mbuf *, 189 const struct ip6_hdr *, /*const*/ struct mld_hdr *); 190static void mld_v1_process_group_timer(struct in6_multi *, const int); 191static void mld_v1_process_querier_timers(struct mld_ifinfo *); 192static int mld_v1_transmit_report(struct in6_multi *, const int); 193static uint32_t mld_v1_update_group(struct in6_multi *, const int); 194static void mld_v2_cancel_link_timers(struct mld_ifinfo *); 195static uint32_t mld_v2_dispatch_general_query(struct mld_ifinfo *); 196static struct mbuf * 197 mld_v2_encap_report(struct ifnet *, struct mbuf *); 198static int mld_v2_enqueue_filter_change(struct ifqueue *, 199 struct in6_multi *); 200static int mld_v2_enqueue_group_record(struct ifqueue *, 201 struct in6_multi *, const int, const int, const int, 202 const int); 203static int mld_v2_input_query(struct ifnet *, const struct ip6_hdr *, 204 struct mbuf *, const int, const int); 205static int mld_v2_merge_state_changes(struct in6_multi *, 206 struct ifqueue *); 207static void mld_v2_process_group_timers(struct mld_ifinfo *, 208 struct ifqueue *, struct ifqueue *, 209 struct in6_multi *, const int); 210static int mld_v2_process_group_query(struct in6_multi *, 211 int, struct mbuf *, const int); 212static int sysctl_mld_gsr SYSCTL_HANDLER_ARGS; 213static int sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS; 214static int sysctl_mld_v2enable SYSCTL_HANDLER_ARGS; 215 216static int mld_timeout_run; /* MLD timer is scheduled to run */ 217static void mld_timeout(void *); 218static void mld_sched_timeout(void); 219 220/* 221 * Normative references: RFC 2710, RFC 3590, RFC 3810. 222 */ 223static struct timeval mld_gsrdelay = {10, 0}; 224static LIST_HEAD(, mld_ifinfo) mli_head; 225 226static int querier_present_timers_running6; 227static int interface_timers_running6; 228static int state_change_timers_running6; 229static int current_state_timers_running6; 230 231/* 232 * Subsystem lock macros. 233 */ 234#define MLD_LOCK() \ 235 lck_mtx_lock(&mld_mtx) 236#define MLD_LOCK_ASSERT_HELD() \ 237 lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_OWNED) 238#define MLD_LOCK_ASSERT_NOTHELD() \ 239 lck_mtx_assert(&mld_mtx, LCK_MTX_ASSERT_NOTOWNED) 240#define MLD_UNLOCK() \ 241 lck_mtx_unlock(&mld_mtx) 242 243#define MLD_ADD_DETACHED_IN6M(_head, _in6m) { \ 244 SLIST_INSERT_HEAD(_head, _in6m, in6m_dtle); \ 245} 246 247#define MLD_REMOVE_DETACHED_IN6M(_head) { \ 248 struct in6_multi *_in6m, *_inm_tmp; \ 249 SLIST_FOREACH_SAFE(_in6m, _head, in6m_dtle, _inm_tmp) { \ 250 SLIST_REMOVE(_head, _in6m, in6_multi, in6m_dtle); \ 251 IN6M_REMREF(_in6m); \ 252 } \ 253 VERIFY(SLIST_EMPTY(_head)); \ 254} 255 256#define MLI_ZONE_MAX 64 /* maximum elements in zone */ 257#define MLI_ZONE_NAME "mld_ifinfo" /* zone name */ 258 259static unsigned int mli_size; /* size of zone element */ 260static struct zone *mli_zone; /* zone for mld_ifinfo */ 261 262SYSCTL_DECL(_net_inet6); /* Note: Not in any common header. */ 263 264SYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW | CTLFLAG_LOCKED, 0, 265 "IPv6 Multicast Listener Discovery"); 266SYSCTL_PROC(_net_inet6_mld, OID_AUTO, gsrdelay, 267 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 268 &mld_gsrdelay.tv_sec, 0, sysctl_mld_gsr, "I", 269 "Rate limit for MLDv2 Group-and-Source queries in seconds"); 270 271SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_LOCKED, 272 sysctl_mld_ifinfo, "Per-interface MLDv2 state"); 273 274static int mld_v1enable = 1; 275SYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW | CTLFLAG_LOCKED, 276 &mld_v1enable, 0, "Enable fallback to MLDv1"); 277 278static int mld_v2enable = 1; 279SYSCTL_PROC(_net_inet6_mld, OID_AUTO, v2enable, 280 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, 281 &mld_v2enable, 0, sysctl_mld_v2enable, "I", 282 "Enable MLDv2 (debug purposes only)"); 283 284static int mld_use_allow = 1; 285SYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW | CTLFLAG_LOCKED, 286 &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves"); 287 288#ifdef MLD_DEBUG 289int mld_debug = 0; 290SYSCTL_INT(_net_inet6_mld, OID_AUTO, 291 debug, CTLFLAG_RW | CTLFLAG_LOCKED, &mld_debug, 0, ""); 292#endif 293/* 294 * Packed Router Alert option structure declaration. 295 */ 296struct mld_raopt { 297 struct ip6_hbh hbh; 298 struct ip6_opt pad; 299 struct ip6_opt_router ra; 300} __packed; 301 302/* 303 * Router Alert hop-by-hop option header. 304 */ 305static struct mld_raopt mld_ra = { 306 .hbh = { 0, 0 }, 307 .pad = { .ip6o_type = IP6OPT_PADN, 0 }, 308 .ra = { 309 .ip6or_type = (u_int8_t)IP6OPT_ROUTER_ALERT, 310 .ip6or_len = (u_int8_t)(IP6OPT_RTALERT_LEN - 2), 311 .ip6or_value = {((IP6OPT_RTALERT_MLD >> 8) & 0xFF), 312 (IP6OPT_RTALERT_MLD & 0xFF) } 313 } 314}; 315static struct ip6_pktopts mld_po; 316 317/* Store MLDv2 record count in the module private scratch space */ 318#define vt_nrecs pkt_mpriv.__mpriv_u.__mpriv32[0].__mpriv32_u.__val16[0] 319 320static __inline void 321mld_save_context(struct mbuf *m, struct ifnet *ifp) 322{ 323 m->m_pkthdr.rcvif = ifp; 324} 325 326static __inline void 327mld_scrub_context(struct mbuf *m) 328{ 329 m->m_pkthdr.rcvif = NULL; 330} 331 332/* 333 * Restore context from a queued output chain. 334 * Return saved ifp. 335 */ 336static __inline struct ifnet * 337mld_restore_context(struct mbuf *m) 338{ 339 return (m->m_pkthdr.rcvif); 340} 341 342/* 343 * Retrieve or set threshold between group-source queries in seconds. 344 */ 345static int 346sysctl_mld_gsr SYSCTL_HANDLER_ARGS 347{ 348#pragma unused(arg1, arg2) 349 int error; 350 int i; 351 352 MLD_LOCK(); 353 354 i = mld_gsrdelay.tv_sec; 355 356 error = sysctl_handle_int(oidp, &i, 0, req); 357 if (error || !req->newptr) 358 goto out_locked; 359 360 if (i < -1 || i >= 60) { 361 error = EINVAL; 362 goto out_locked; 363 } 364 365 mld_gsrdelay.tv_sec = i; 366 367out_locked: 368 MLD_UNLOCK(); 369 return (error); 370} 371/* 372 * Expose struct mld_ifinfo to userland, keyed by ifindex. 373 * For use by ifmcstat(8). 374 * 375 */ 376static int 377sysctl_mld_ifinfo SYSCTL_HANDLER_ARGS 378{ 379#pragma unused(oidp) 380 int *name; 381 int error; 382 u_int namelen; 383 struct ifnet *ifp; 384 struct mld_ifinfo *mli; 385 struct mld_ifinfo_u mli_u; 386 387 name = (int *)arg1; 388 namelen = arg2; 389 390 if (req->newptr != USER_ADDR_NULL) 391 return (EPERM); 392 393 if (namelen != 1) 394 return (EINVAL); 395 396 MLD_LOCK(); 397 398 if (name[0] <= 0 || name[0] > (u_int)if_index) { 399 error = ENOENT; 400 goto out_locked; 401 } 402 403 error = ENOENT; 404 405 ifnet_head_lock_shared(); 406 ifp = ifindex2ifnet[name[0]]; 407 ifnet_head_done(); 408 if (ifp == NULL) 409 goto out_locked; 410 411 bzero(&mli_u, sizeof (mli_u)); 412 413 LIST_FOREACH(mli, &mli_head, mli_link) { 414 MLI_LOCK(mli); 415 if (ifp != mli->mli_ifp) { 416 MLI_UNLOCK(mli); 417 continue; 418 } 419 420 mli_u.mli_ifindex = mli->mli_ifp->if_index; 421 mli_u.mli_version = mli->mli_version; 422 mli_u.mli_v1_timer = mli->mli_v1_timer; 423 mli_u.mli_v2_timer = mli->mli_v2_timer; 424 mli_u.mli_flags = mli->mli_flags; 425 mli_u.mli_rv = mli->mli_rv; 426 mli_u.mli_qi = mli->mli_qi; 427 mli_u.mli_qri = mli->mli_qri; 428 mli_u.mli_uri = mli->mli_uri; 429 MLI_UNLOCK(mli); 430 431 error = SYSCTL_OUT(req, &mli_u, sizeof (mli_u)); 432 break; 433 } 434 435out_locked: 436 MLD_UNLOCK(); 437 return (error); 438} 439 440static int 441sysctl_mld_v2enable SYSCTL_HANDLER_ARGS 442{ 443#pragma unused(arg1, arg2) 444 int error; 445 int i; 446 struct mld_ifinfo *mli; 447 struct mld_tparams mtp = { 0, 0, 0, 0 }; 448 449 MLD_LOCK(); 450 451 i = mld_v2enable; 452 453 error = sysctl_handle_int(oidp, &i, 0, req); 454 if (error || !req->newptr) 455 goto out_locked; 456 457 if (i < 0 || i > 1) { 458 error = EINVAL; 459 goto out_locked; 460 } 461 462 mld_v2enable = i; 463 /* 464 * If we enabled v2, the state transition will take care of upgrading 465 * the MLD version back to v2. Otherwise, we have to explicitly 466 * downgrade. Note that this functionality is to be used for debugging. 467 */ 468 if (mld_v2enable == 1) 469 goto out_locked; 470 471 LIST_FOREACH(mli, &mli_head, mli_link) { 472 MLI_LOCK(mli); 473 if (mld_set_version(mli, MLD_VERSION_1) > 0) 474 mtp.qpt = 1; 475 MLI_UNLOCK(mli); 476 } 477 478out_locked: 479 MLD_UNLOCK(); 480 481 mld_set_timeout(&mtp); 482 483 return (error); 484} 485 486/* 487 * Dispatch an entire queue of pending packet chains. 488 * 489 * Must not be called with in6m_lock held. 490 */ 491static void 492mld_dispatch_queue(struct mld_ifinfo *mli, struct ifqueue *ifq, int limit) 493{ 494 struct mbuf *m; 495 496 if (mli != NULL) 497 MLI_LOCK_ASSERT_HELD(mli); 498 499 for (;;) { 500 IF_DEQUEUE(ifq, m); 501 if (m == NULL) 502 break; 503 MLD_PRINTF(("%s: dispatch 0x%llx from 0x%llx\n", __func__, 504 (uint64_t)VM_KERNEL_ADDRPERM(ifq), 505 (uint64_t)VM_KERNEL_ADDRPERM(m))); 506 if (mli != NULL) 507 MLI_UNLOCK(mli); 508 mld_dispatch_packet(m); 509 if (mli != NULL) 510 MLI_LOCK(mli); 511 if (--limit == 0) 512 break; 513 } 514 515 if (mli != NULL) 516 MLI_LOCK_ASSERT_HELD(mli); 517} 518 519/* 520 * Filter outgoing MLD report state by group. 521 * 522 * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1) 523 * and node-local addresses. However, kernel and socket consumers 524 * always embed the KAME scope ID in the address provided, so strip it 525 * when performing comparison. 526 * Note: This is not the same as the *multicast* scope. 527 * 528 * Return zero if the given group is one for which MLD reports 529 * should be suppressed, or non-zero if reports should be issued. 530 */ 531static __inline__ int 532mld_is_addr_reported(const struct in6_addr *addr) 533{ 534 535 VERIFY(IN6_IS_ADDR_MULTICAST(addr)); 536 537 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL) 538 return (0); 539 540 if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) { 541 struct in6_addr tmp = *addr; 542 in6_clearscope(&tmp); 543 if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes)) 544 return (0); 545 } 546 547 return (1); 548} 549 550/* 551 * Attach MLD when PF_INET6 is attached to an interface. 552 */ 553struct mld_ifinfo * 554mld_domifattach(struct ifnet *ifp, int how) 555{ 556 struct mld_ifinfo *mli; 557 558 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__, 559 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 560 561 mli = mli_alloc(how); 562 if (mli == NULL) 563 return (NULL); 564 565 MLD_LOCK(); 566 567 MLI_LOCK(mli); 568 mli_initvar(mli, ifp, 0); 569 mli->mli_debug |= IFD_ATTACHED; 570 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */ 571 MLI_ADDREF_LOCKED(mli); /* hold a reference for caller */ 572 MLI_UNLOCK(mli); 573 ifnet_lock_shared(ifp); 574 mld6_initsilent(ifp, mli); 575 ifnet_lock_done(ifp); 576 577 LIST_INSERT_HEAD(&mli_head, mli, mli_link); 578 579 MLD_UNLOCK(); 580 581 MLD_PRINTF(("%s: allocate mld_ifinfo for ifp 0x%llx(%s)\n", 582 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 583 584 return (mli); 585} 586 587/* 588 * Attach MLD when PF_INET6 is reattached to an interface. Caller is 589 * expected to have an outstanding reference to the mli. 590 */ 591void 592mld_domifreattach(struct mld_ifinfo *mli) 593{ 594 struct ifnet *ifp; 595 596 MLD_LOCK(); 597 598 MLI_LOCK(mli); 599 VERIFY(!(mli->mli_debug & IFD_ATTACHED)); 600 ifp = mli->mli_ifp; 601 VERIFY(ifp != NULL); 602 mli_initvar(mli, ifp, 1); 603 mli->mli_debug |= IFD_ATTACHED; 604 MLI_ADDREF_LOCKED(mli); /* hold a reference for mli_head */ 605 MLI_UNLOCK(mli); 606 ifnet_lock_shared(ifp); 607 mld6_initsilent(ifp, mli); 608 ifnet_lock_done(ifp); 609 610 LIST_INSERT_HEAD(&mli_head, mli, mli_link); 611 612 MLD_UNLOCK(); 613 614 MLD_PRINTF(("%s: reattached mld_ifinfo for ifp 0x%llx(%s)\n", 615 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 616} 617 618/* 619 * Hook for domifdetach. 620 */ 621void 622mld_domifdetach(struct ifnet *ifp) 623{ 624 SLIST_HEAD(, in6_multi) in6m_dthead; 625 626 SLIST_INIT(&in6m_dthead); 627 628 MLD_PRINTF(("%s: called for ifp 0x%llx(%s)\n", __func__, 629 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 630 631 MLD_LOCK(); 632 mli_delete(ifp, (struct mld_in6m_relhead *)&in6m_dthead); 633 MLD_UNLOCK(); 634 635 /* Now that we're dropped all locks, release detached records */ 636 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); 637} 638 639/* 640 * Called at interface detach time. Note that we only flush all deferred 641 * responses and record releases; all remaining inm records and their source 642 * entries related to this interface are left intact, in order to handle 643 * the reattach case. 644 */ 645static void 646mli_delete(const struct ifnet *ifp, struct mld_in6m_relhead *in6m_dthead) 647{ 648 struct mld_ifinfo *mli, *tmli; 649 650 MLD_LOCK_ASSERT_HELD(); 651 652 LIST_FOREACH_SAFE(mli, &mli_head, mli_link, tmli) { 653 MLI_LOCK(mli); 654 if (mli->mli_ifp == ifp) { 655 /* 656 * Free deferred General Query responses. 657 */ 658 IF_DRAIN(&mli->mli_gq); 659 IF_DRAIN(&mli->mli_v1q); 660 mld_flush_relq(mli, in6m_dthead); 661 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead)); 662 mli->mli_debug &= ~IFD_ATTACHED; 663 MLI_UNLOCK(mli); 664 665 LIST_REMOVE(mli, mli_link); 666 MLI_REMREF(mli); /* release mli_head reference */ 667 return; 668 } 669 MLI_UNLOCK(mli); 670 } 671 panic("%s: mld_ifinfo not found for ifp %p(%s)\n", __func__, 672 ifp, ifp->if_xname); 673} 674 675__private_extern__ void 676mld6_initsilent(struct ifnet *ifp, struct mld_ifinfo *mli) 677{ 678 ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_OWNED); 679 680 MLI_LOCK_ASSERT_NOTHELD(mli); 681 MLI_LOCK(mli); 682 if (!(ifp->if_flags & IFF_MULTICAST) && 683 (ifp->if_eflags & (IFEF_IPV6_ND6ALT|IFEF_LOCALNET_PRIVATE))) 684 mli->mli_flags |= MLIF_SILENT; 685 else 686 mli->mli_flags &= ~MLIF_SILENT; 687 MLI_UNLOCK(mli); 688} 689 690static void 691mli_initvar(struct mld_ifinfo *mli, struct ifnet *ifp, int reattach) 692{ 693 MLI_LOCK_ASSERT_HELD(mli); 694 695 mli->mli_ifp = ifp; 696 if (mld_v2enable) 697 mli->mli_version = MLD_VERSION_2; 698 else 699 mli->mli_version = MLD_VERSION_1; 700 mli->mli_flags = 0; 701 mli->mli_rv = MLD_RV_INIT; 702 mli->mli_qi = MLD_QI_INIT; 703 mli->mli_qri = MLD_QRI_INIT; 704 mli->mli_uri = MLD_URI_INIT; 705 706 if (mld_use_allow) 707 mli->mli_flags |= MLIF_USEALLOW; 708 if (!reattach) 709 SLIST_INIT(&mli->mli_relinmhead); 710 711 /* 712 * Responses to general queries are subject to bounds. 713 */ 714 mli->mli_gq.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS; 715 mli->mli_v1q.ifq_maxlen = MLD_MAX_RESPONSE_PACKETS; 716} 717 718static struct mld_ifinfo * 719mli_alloc(int how) 720{ 721 struct mld_ifinfo *mli; 722 723 mli = (how == M_WAITOK) ? zalloc(mli_zone) : zalloc_noblock(mli_zone); 724 if (mli != NULL) { 725 bzero(mli, mli_size); 726 lck_mtx_init(&mli->mli_lock, mld_mtx_grp, mld_mtx_attr); 727 mli->mli_debug |= IFD_ALLOC; 728 } 729 return (mli); 730} 731 732static void 733mli_free(struct mld_ifinfo *mli) 734{ 735 MLI_LOCK(mli); 736 if (mli->mli_debug & IFD_ATTACHED) { 737 panic("%s: attached mli=%p is being freed", __func__, mli); 738 /* NOTREACHED */ 739 } else if (mli->mli_ifp != NULL) { 740 panic("%s: ifp not NULL for mli=%p", __func__, mli); 741 /* NOTREACHED */ 742 } else if (!(mli->mli_debug & IFD_ALLOC)) { 743 panic("%s: mli %p cannot be freed", __func__, mli); 744 /* NOTREACHED */ 745 } else if (mli->mli_refcnt != 0) { 746 panic("%s: non-zero refcnt mli=%p", __func__, mli); 747 /* NOTREACHED */ 748 } 749 mli->mli_debug &= ~IFD_ALLOC; 750 MLI_UNLOCK(mli); 751 752 lck_mtx_destroy(&mli->mli_lock, mld_mtx_grp); 753 zfree(mli_zone, mli); 754} 755 756void 757mli_addref(struct mld_ifinfo *mli, int locked) 758{ 759 if (!locked) 760 MLI_LOCK_SPIN(mli); 761 else 762 MLI_LOCK_ASSERT_HELD(mli); 763 764 if (++mli->mli_refcnt == 0) { 765 panic("%s: mli=%p wraparound refcnt", __func__, mli); 766 /* NOTREACHED */ 767 } 768 if (!locked) 769 MLI_UNLOCK(mli); 770} 771 772void 773mli_remref(struct mld_ifinfo *mli) 774{ 775 SLIST_HEAD(, in6_multi) in6m_dthead; 776 struct ifnet *ifp; 777 778 MLI_LOCK_SPIN(mli); 779 780 if (mli->mli_refcnt == 0) { 781 panic("%s: mli=%p negative refcnt", __func__, mli); 782 /* NOTREACHED */ 783 } 784 785 --mli->mli_refcnt; 786 if (mli->mli_refcnt > 0) { 787 MLI_UNLOCK(mli); 788 return; 789 } 790 791 ifp = mli->mli_ifp; 792 mli->mli_ifp = NULL; 793 IF_DRAIN(&mli->mli_gq); 794 IF_DRAIN(&mli->mli_v1q); 795 SLIST_INIT(&in6m_dthead); 796 mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead); 797 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead)); 798 MLI_UNLOCK(mli); 799 800 /* Now that we're dropped all locks, release detached records */ 801 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); 802 803 MLD_PRINTF(("%s: freeing mld_ifinfo for ifp 0x%llx(%s)\n", 804 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 805 806 mli_free(mli); 807} 808 809/* 810 * Process a received MLDv1 general or address-specific query. 811 * Assumes that the query header has been pulled up to sizeof(mld_hdr). 812 * 813 * NOTE: Can't be fully const correct as we temporarily embed scope ID in 814 * mld_addr. This is OK as we own the mbuf chain. 815 */ 816static int 817mld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, 818 /*const*/ struct mld_hdr *mld) 819{ 820 struct mld_ifinfo *mli; 821 struct in6_multi *inm; 822 int err = 0, is_general_query; 823 uint16_t timer; 824 struct mld_tparams mtp = { 0, 0, 0, 0 }; 825 826 MLD_LOCK_ASSERT_NOTHELD(); 827 828 is_general_query = 0; 829 830 if (!mld_v1enable) { 831 MLD_PRINTF(("%s: ignore v1 query %s on ifp 0x%llx(%s)\n", 832 __func__, ip6_sprintf(&mld->mld_addr), 833 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 834 goto done; 835 } 836 837 /* 838 * RFC3810 Section 6.2: MLD queries must originate from 839 * a router's link-local address. 840 */ 841 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { 842 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n", 843 __func__, ip6_sprintf(&ip6->ip6_src), 844 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 845 goto done; 846 } 847 848 /* 849 * Do address field validation upfront before we accept 850 * the query. 851 */ 852 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { 853 /* 854 * MLDv1 General Query. 855 * If this was not sent to the all-nodes group, ignore it. 856 */ 857 struct in6_addr dst; 858 859 dst = ip6->ip6_dst; 860 in6_clearscope(&dst); 861 if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes)) { 862 err = EINVAL; 863 goto done; 864 } 865 is_general_query = 1; 866 } else { 867 /* 868 * Embed scope ID of receiving interface in MLD query for 869 * lookup whilst we don't hold other locks. 870 */ 871 in6_setscope(&mld->mld_addr, ifp, NULL); 872 } 873 874 /* 875 * Switch to MLDv1 host compatibility mode. 876 */ 877 mli = MLD_IFINFO(ifp); 878 VERIFY(mli != NULL); 879 880 MLI_LOCK(mli); 881 mtp.qpt = mld_set_version(mli, MLD_VERSION_1); 882 MLI_UNLOCK(mli); 883 884 timer = ntohs(mld->mld_maxdelay) / MLD_TIMER_SCALE; 885 if (timer == 0) 886 timer = 1; 887 888 if (is_general_query) { 889 struct in6_multistep step; 890 891 MLD_PRINTF(("%s: process v1 general query on ifp 0x%llx(%s)\n", 892 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 893 /* 894 * For each reporting group joined on this 895 * interface, kick the report timer. 896 */ 897 in6_multihead_lock_shared(); 898 IN6_FIRST_MULTI(step, inm); 899 while (inm != NULL) { 900 IN6M_LOCK(inm); 901 if (inm->in6m_ifp == ifp) 902 mtp.cst += mld_v1_update_group(inm, timer); 903 IN6M_UNLOCK(inm); 904 IN6_NEXT_MULTI(step, inm); 905 } 906 in6_multihead_lock_done(); 907 } else { 908 /* 909 * MLDv1 Group-Specific Query. 910 * If this is a group-specific MLDv1 query, we need only 911 * look up the single group to process it. 912 */ 913 in6_multihead_lock_shared(); 914 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); 915 in6_multihead_lock_done(); 916 917 if (inm != NULL) { 918 IN6M_LOCK(inm); 919 MLD_PRINTF(("%s: process v1 query %s on " 920 "ifp 0x%llx(%s)\n", __func__, 921 ip6_sprintf(&mld->mld_addr), 922 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 923 mtp.cst = mld_v1_update_group(inm, timer); 924 IN6M_UNLOCK(inm); 925 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ 926 } 927 /* XXX Clear embedded scope ID as userland won't expect it. */ 928 in6_clearscope(&mld->mld_addr); 929 } 930done: 931 mld_set_timeout(&mtp); 932 933 return (err); 934} 935 936/* 937 * Update the report timer on a group in response to an MLDv1 query. 938 * 939 * If we are becoming the reporting member for this group, start the timer. 940 * If we already are the reporting member for this group, and timer is 941 * below the threshold, reset it. 942 * 943 * We may be updating the group for the first time since we switched 944 * to MLDv2. If we are, then we must clear any recorded source lists, 945 * and transition to REPORTING state; the group timer is overloaded 946 * for group and group-source query responses. 947 * 948 * Unlike MLDv2, the delay per group should be jittered 949 * to avoid bursts of MLDv1 reports. 950 */ 951static uint32_t 952mld_v1_update_group(struct in6_multi *inm, const int timer) 953{ 954 IN6M_LOCK_ASSERT_HELD(inm); 955 956 MLD_PRINTF(("%s: %s/%s timer=%d\n", __func__, 957 ip6_sprintf(&inm->in6m_addr), 958 if_name(inm->in6m_ifp), timer)); 959 960 switch (inm->in6m_state) { 961 case MLD_NOT_MEMBER: 962 case MLD_SILENT_MEMBER: 963 break; 964 case MLD_REPORTING_MEMBER: 965 if (inm->in6m_timer != 0 && 966 inm->in6m_timer <= timer) { 967 MLD_PRINTF(("%s: REPORTING and timer running, " 968 "skipping.\n", __func__)); 969 break; 970 } 971 /* FALLTHROUGH */ 972 case MLD_SG_QUERY_PENDING_MEMBER: 973 case MLD_G_QUERY_PENDING_MEMBER: 974 case MLD_IDLE_MEMBER: 975 case MLD_LAZY_MEMBER: 976 case MLD_AWAKENING_MEMBER: 977 MLD_PRINTF(("%s: ->REPORTING\n", __func__)); 978 inm->in6m_state = MLD_REPORTING_MEMBER; 979 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 980 break; 981 case MLD_SLEEPING_MEMBER: 982 MLD_PRINTF(("%s: ->AWAKENING\n", __func__)); 983 inm->in6m_state = MLD_AWAKENING_MEMBER; 984 break; 985 case MLD_LEAVING_MEMBER: 986 break; 987 } 988 989 return (inm->in6m_timer); 990} 991 992/* 993 * Process a received MLDv2 general, group-specific or 994 * group-and-source-specific query. 995 * 996 * Assumes that the query header has been pulled up to sizeof(mldv2_query). 997 * 998 * Return 0 if successful, otherwise an appropriate error code is returned. 999 */ 1000static int 1001mld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6, 1002 struct mbuf *m, const int off, const int icmp6len) 1003{ 1004 struct mld_ifinfo *mli; 1005 struct mldv2_query *mld; 1006 struct in6_multi *inm; 1007 uint32_t maxdelay, nsrc, qqi; 1008 int err = 0, is_general_query; 1009 uint16_t timer; 1010 uint8_t qrv; 1011 struct mld_tparams mtp = { 0, 0, 0, 0 }; 1012 1013 MLD_LOCK_ASSERT_NOTHELD(); 1014 1015 is_general_query = 0; 1016 1017 if (!mld_v2enable) { 1018 MLD_PRINTF(("%s: ignore v2 query %s on ifp 0x%llx(%s)\n", 1019 __func__, ip6_sprintf(&ip6->ip6_src), 1020 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1021 goto done; 1022 } 1023 1024 /* 1025 * RFC3810 Section 6.2: MLD queries must originate from 1026 * a router's link-local address. 1027 */ 1028 if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) { 1029 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n", 1030 __func__, ip6_sprintf(&ip6->ip6_src), 1031 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1032 goto done; 1033 } 1034 1035 MLD_PRINTF(("%s: input v2 query on ifp 0x%llx(%s)\n", __func__, 1036 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1037 1038 mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off); 1039 1040 maxdelay = ntohs(mld->mld_maxdelay); /* in 1/10ths of a second */ 1041 if (maxdelay >= 32768) { 1042 maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) << 1043 (MLD_MRC_EXP(maxdelay) + 3); 1044 } 1045 timer = maxdelay / MLD_TIMER_SCALE; 1046 if (timer == 0) 1047 timer = 1; 1048 1049 qrv = MLD_QRV(mld->mld_misc); 1050 if (qrv < 2) { 1051 MLD_PRINTF(("%s: clamping qrv %d to %d\n", __func__, 1052 qrv, MLD_RV_INIT)); 1053 qrv = MLD_RV_INIT; 1054 } 1055 1056 qqi = mld->mld_qqi; 1057 if (qqi >= 128) { 1058 qqi = MLD_QQIC_MANT(mld->mld_qqi) << 1059 (MLD_QQIC_EXP(mld->mld_qqi) + 3); 1060 } 1061 1062 nsrc = ntohs(mld->mld_numsrc); 1063 if (nsrc > MLD_MAX_GS_SOURCES) { 1064 err = EMSGSIZE; 1065 goto done; 1066 } 1067 if (icmp6len < sizeof(struct mldv2_query) + 1068 (nsrc * sizeof(struct in6_addr))) { 1069 err = EMSGSIZE; 1070 goto done; 1071 } 1072 1073 /* 1074 * Do further input validation upfront to avoid resetting timers 1075 * should we need to discard this query. 1076 */ 1077 if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) { 1078 /* 1079 * A general query with a source list has undefined 1080 * behaviour; discard it. 1081 */ 1082 if (nsrc > 0) { 1083 err = EINVAL; 1084 goto done; 1085 } 1086 is_general_query = 1; 1087 } else { 1088 /* 1089 * Embed scope ID of receiving interface in MLD query for 1090 * lookup whilst we don't hold other locks (due to KAME 1091 * locking lameness). We own this mbuf chain just now. 1092 */ 1093 in6_setscope(&mld->mld_addr, ifp, NULL); 1094 } 1095 1096 mli = MLD_IFINFO(ifp); 1097 VERIFY(mli != NULL); 1098 1099 MLI_LOCK(mli); 1100 /* 1101 * Discard the v2 query if we're in Compatibility Mode. 1102 * The RFC is pretty clear that hosts need to stay in MLDv1 mode 1103 * until the Old Version Querier Present timer expires. 1104 */ 1105 if (mli->mli_version != MLD_VERSION_2) { 1106 MLI_UNLOCK(mli); 1107 goto done; 1108 } 1109 1110 mtp.qpt = mld_set_version(mli, MLD_VERSION_2); 1111 mli->mli_rv = qrv; 1112 mli->mli_qi = qqi; 1113 mli->mli_qri = MAX(timer, MLD_QRI_MIN); 1114 1115 MLD_PRINTF(("%s: qrv %d qi %d qri %d\n", __func__, mli->mli_rv, 1116 mli->mli_qi, mli->mli_qri)); 1117 1118 if (is_general_query) { 1119 /* 1120 * MLDv2 General Query. 1121 * 1122 * Schedule a current-state report on this ifp for 1123 * all groups, possibly containing source lists. 1124 * 1125 * If there is a pending General Query response 1126 * scheduled earlier than the selected delay, do 1127 * not schedule any other reports. 1128 * Otherwise, reset the interface timer. 1129 */ 1130 MLD_PRINTF(("%s: process v2 general query on ifp 0x%llx(%s)\n", 1131 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1132 if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) { 1133 mtp.it = mli->mli_v2_timer = MLD_RANDOM_DELAY(timer); 1134 } 1135 MLI_UNLOCK(mli); 1136 } else { 1137 MLI_UNLOCK(mli); 1138 /* 1139 * MLDv2 Group-specific or Group-and-source-specific Query. 1140 * 1141 * Group-source-specific queries are throttled on 1142 * a per-group basis to defeat denial-of-service attempts. 1143 * Queries for groups we are not a member of on this 1144 * link are simply ignored. 1145 */ 1146 in6_multihead_lock_shared(); 1147 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); 1148 in6_multihead_lock_done(); 1149 if (inm == NULL) 1150 goto done; 1151 1152 IN6M_LOCK(inm); 1153 if (nsrc > 0) { 1154 if (!ratecheck(&inm->in6m_lastgsrtv, 1155 &mld_gsrdelay)) { 1156 MLD_PRINTF(("%s: GS query throttled.\n", 1157 __func__)); 1158 IN6M_UNLOCK(inm); 1159 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ 1160 goto done; 1161 } 1162 } 1163 MLD_PRINTF(("%s: process v2 group query on ifp 0x%llx(%s)\n", 1164 __func__, (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1165 /* 1166 * If there is a pending General Query response 1167 * scheduled sooner than the selected delay, no 1168 * further report need be scheduled. 1169 * Otherwise, prepare to respond to the 1170 * group-specific or group-and-source query. 1171 */ 1172 MLI_LOCK(mli); 1173 mtp.it = mli->mli_v2_timer; 1174 MLI_UNLOCK(mli); 1175 if (mtp.it == 0 || mtp.it >= timer) { 1176 (void) mld_v2_process_group_query(inm, timer, m, off); 1177 mtp.cst = inm->in6m_timer; 1178 } 1179 IN6M_UNLOCK(inm); 1180 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ 1181 /* XXX Clear embedded scope ID as userland won't expect it. */ 1182 in6_clearscope(&mld->mld_addr); 1183 } 1184done: 1185 if (mtp.it > 0) { 1186 MLD_PRINTF(("%s: v2 general query response scheduled in " 1187 "T+%d seconds on ifp 0x%llx(%s)\n", __func__, mtp.it, 1188 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1189 } 1190 mld_set_timeout(&mtp); 1191 1192 return (err); 1193} 1194 1195/* 1196 * Process a recieved MLDv2 group-specific or group-and-source-specific 1197 * query. 1198 * Return <0 if any error occured. Currently this is ignored. 1199 */ 1200static int 1201mld_v2_process_group_query(struct in6_multi *inm, int timer, struct mbuf *m0, 1202 const int off) 1203{ 1204 struct mldv2_query *mld; 1205 int retval; 1206 uint16_t nsrc; 1207 1208 IN6M_LOCK_ASSERT_HELD(inm); 1209 1210 retval = 0; 1211 mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off); 1212 1213 switch (inm->in6m_state) { 1214 case MLD_NOT_MEMBER: 1215 case MLD_SILENT_MEMBER: 1216 case MLD_SLEEPING_MEMBER: 1217 case MLD_LAZY_MEMBER: 1218 case MLD_AWAKENING_MEMBER: 1219 case MLD_IDLE_MEMBER: 1220 case MLD_LEAVING_MEMBER: 1221 return (retval); 1222 break; 1223 case MLD_REPORTING_MEMBER: 1224 case MLD_G_QUERY_PENDING_MEMBER: 1225 case MLD_SG_QUERY_PENDING_MEMBER: 1226 break; 1227 } 1228 1229 nsrc = ntohs(mld->mld_numsrc); 1230 1231 /* 1232 * Deal with group-specific queries upfront. 1233 * If any group query is already pending, purge any recorded 1234 * source-list state if it exists, and schedule a query response 1235 * for this group-specific query. 1236 */ 1237 if (nsrc == 0) { 1238 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || 1239 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { 1240 in6m_clear_recorded(inm); 1241 timer = min(inm->in6m_timer, timer); 1242 } 1243 inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER; 1244 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 1245 return (retval); 1246 } 1247 1248 /* 1249 * Deal with the case where a group-and-source-specific query has 1250 * been received but a group-specific query is already pending. 1251 */ 1252 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) { 1253 timer = min(inm->in6m_timer, timer); 1254 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 1255 return (retval); 1256 } 1257 1258 /* 1259 * Finally, deal with the case where a group-and-source-specific 1260 * query has been received, where a response to a previous g-s-r 1261 * query exists, or none exists. 1262 * In this case, we need to parse the source-list which the Querier 1263 * has provided us with and check if we have any source list filter 1264 * entries at T1 for these sources. If we do not, there is no need 1265 * schedule a report and the query may be dropped. 1266 * If we do, we must record them and schedule a current-state 1267 * report for those sources. 1268 */ 1269 if (inm->in6m_nsrc > 0) { 1270 struct mbuf *m; 1271 uint8_t *sp; 1272 int i, nrecorded; 1273 int soff; 1274 1275 m = m0; 1276 soff = off + sizeof(struct mldv2_query); 1277 nrecorded = 0; 1278 for (i = 0; i < nsrc; i++) { 1279 sp = mtod(m, uint8_t *) + soff; 1280 retval = in6m_record_source(inm, 1281 (const struct in6_addr *)(void *)sp); 1282 if (retval < 0) 1283 break; 1284 nrecorded += retval; 1285 soff += sizeof(struct in6_addr); 1286 if (soff >= m->m_len) { 1287 soff = soff - m->m_len; 1288 m = m->m_next; 1289 if (m == NULL) 1290 break; 1291 } 1292 } 1293 if (nrecorded > 0) { 1294 MLD_PRINTF(( "%s: schedule response to SG query\n", 1295 __func__)); 1296 inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER; 1297 inm->in6m_timer = MLD_RANDOM_DELAY(timer); 1298 } 1299 } 1300 1301 return (retval); 1302} 1303 1304/* 1305 * Process a received MLDv1 host membership report. 1306 * Assumes mld points to mld_hdr in pulled up mbuf chain. 1307 * 1308 * NOTE: Can't be fully const correct as we temporarily embed scope ID in 1309 * mld_addr. This is OK as we own the mbuf chain. 1310 */ 1311static int 1312mld_v1_input_report(struct ifnet *ifp, struct mbuf *m, 1313 const struct ip6_hdr *ip6, /*const*/ struct mld_hdr *mld) 1314{ 1315 struct in6_addr src, dst; 1316 struct in6_ifaddr *ia; 1317 struct in6_multi *inm; 1318 1319 if (!mld_v1enable) { 1320 MLD_PRINTF(("%s: ignore v1 report %s on ifp 0x%llx(%s)\n", 1321 __func__, ip6_sprintf(&mld->mld_addr), 1322 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1323 return (0); 1324 } 1325 1326 if ((ifp->if_flags & IFF_LOOPBACK) || 1327 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) 1328 return (0); 1329 1330 /* 1331 * MLDv1 reports must originate from a host's link-local address, 1332 * or the unspecified address (when booting). 1333 */ 1334 src = ip6->ip6_src; 1335 in6_clearscope(&src); 1336 if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) { 1337 MLD_PRINTF(("%s: ignore v1 query src %s on ifp 0x%llx(%s)\n", 1338 __func__, ip6_sprintf(&ip6->ip6_src), 1339 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1340 return (EINVAL); 1341 } 1342 1343 /* 1344 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast 1345 * group, and must be directed to the group itself. 1346 */ 1347 dst = ip6->ip6_dst; 1348 in6_clearscope(&dst); 1349 if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) || 1350 !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) { 1351 MLD_PRINTF(("%s: ignore v1 query dst %s on ifp 0x%llx(%s)\n", 1352 __func__, ip6_sprintf(&ip6->ip6_dst), 1353 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1354 return (EINVAL); 1355 } 1356 1357 /* 1358 * Make sure we don't hear our own membership report, as fast 1359 * leave requires knowing that we are the only member of a 1360 * group. Assume we used the link-local address if available, 1361 * otherwise look for ::. 1362 * 1363 * XXX Note that scope ID comparison is needed for the address 1364 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be 1365 * performed for the on-wire address. 1366 */ 1367 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 1368 if (ia != NULL) { 1369 IFA_LOCK(&ia->ia_ifa); 1370 if ((IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia)))){ 1371 IFA_UNLOCK(&ia->ia_ifa); 1372 IFA_REMREF(&ia->ia_ifa); 1373 return (0); 1374 } 1375 IFA_UNLOCK(&ia->ia_ifa); 1376 IFA_REMREF(&ia->ia_ifa); 1377 } else if (IN6_IS_ADDR_UNSPECIFIED(&src)) { 1378 return (0); 1379 } 1380 1381 MLD_PRINTF(("%s: process v1 report %s on ifp 0x%llx(%s)\n", 1382 __func__, ip6_sprintf(&mld->mld_addr), 1383 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1384 1385 /* 1386 * Embed scope ID of receiving interface in MLD query for lookup 1387 * whilst we don't hold other locks (due to KAME locking lameness). 1388 */ 1389 if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) 1390 in6_setscope(&mld->mld_addr, ifp, NULL); 1391 1392 /* 1393 * MLDv1 report suppression. 1394 * If we are a member of this group, and our membership should be 1395 * reported, and our group timer is pending or about to be reset, 1396 * stop our group timer by transitioning to the 'lazy' state. 1397 */ 1398 in6_multihead_lock_shared(); 1399 IN6_LOOKUP_MULTI(&mld->mld_addr, ifp, inm); 1400 in6_multihead_lock_done(); 1401 1402 if (inm != NULL) { 1403 struct mld_ifinfo *mli; 1404 1405 IN6M_LOCK(inm); 1406 mli = inm->in6m_mli; 1407 VERIFY(mli != NULL); 1408 1409 MLI_LOCK(mli); 1410 /* 1411 * If we are in MLDv2 host mode, do not allow the 1412 * other host's MLDv1 report to suppress our reports. 1413 */ 1414 if (mli->mli_version == MLD_VERSION_2) { 1415 MLI_UNLOCK(mli); 1416 IN6M_UNLOCK(inm); 1417 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ 1418 goto out; 1419 } 1420 MLI_UNLOCK(mli); 1421 1422 inm->in6m_timer = 0; 1423 1424 switch (inm->in6m_state) { 1425 case MLD_NOT_MEMBER: 1426 case MLD_SILENT_MEMBER: 1427 case MLD_SLEEPING_MEMBER: 1428 break; 1429 case MLD_REPORTING_MEMBER: 1430 case MLD_IDLE_MEMBER: 1431 case MLD_AWAKENING_MEMBER: 1432 MLD_PRINTF(("%s: report suppressed for %s on " 1433 "ifp 0x%llx(%s)\n", __func__, 1434 ip6_sprintf(&mld->mld_addr), 1435 (uint64_t)VM_KERNEL_ADDRPERM(ifp), if_name(ifp))); 1436 case MLD_LAZY_MEMBER: 1437 inm->in6m_state = MLD_LAZY_MEMBER; 1438 break; 1439 case MLD_G_QUERY_PENDING_MEMBER: 1440 case MLD_SG_QUERY_PENDING_MEMBER: 1441 case MLD_LEAVING_MEMBER: 1442 break; 1443 } 1444 IN6M_UNLOCK(inm); 1445 IN6M_REMREF(inm); /* from IN6_LOOKUP_MULTI */ 1446 } 1447 1448out: 1449 /* XXX Clear embedded scope ID as userland won't expect it. */ 1450 in6_clearscope(&mld->mld_addr); 1451 1452 return (0); 1453} 1454 1455/* 1456 * MLD input path. 1457 * 1458 * Assume query messages which fit in a single ICMPv6 message header 1459 * have been pulled up. 1460 * Assume that userland will want to see the message, even if it 1461 * otherwise fails kernel input validation; do not free it. 1462 * Pullup may however free the mbuf chain m if it fails. 1463 * 1464 * Return IPPROTO_DONE if we freed m. Otherwise, return 0. 1465 */ 1466int 1467mld_input(struct mbuf *m, int off, int icmp6len) 1468{ 1469 struct ifnet *ifp; 1470 struct ip6_hdr *ip6; 1471 struct mld_hdr *mld; 1472 int mldlen; 1473 1474 MLD_PRINTF(("%s: called w/mbuf (0x%llx,%d)\n", __func__, 1475 (uint64_t)VM_KERNEL_ADDRPERM(m), off)); 1476 1477 ifp = m->m_pkthdr.rcvif; 1478 1479 ip6 = mtod(m, struct ip6_hdr *); 1480 1481 /* Pullup to appropriate size. */ 1482 mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off); 1483 if (mld->mld_type == MLD_LISTENER_QUERY && 1484 icmp6len >= sizeof(struct mldv2_query)) { 1485 mldlen = sizeof(struct mldv2_query); 1486 } else { 1487 mldlen = sizeof(struct mld_hdr); 1488 } 1489 IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen); 1490 if (mld == NULL) { 1491 icmp6stat.icp6s_badlen++; 1492 return (IPPROTO_DONE); 1493 } 1494 1495 /* 1496 * Userland needs to see all of this traffic for implementing 1497 * the endpoint discovery portion of multicast routing. 1498 */ 1499 switch (mld->mld_type) { 1500 case MLD_LISTENER_QUERY: 1501 icmp6_ifstat_inc(ifp, ifs6_in_mldquery); 1502 if (icmp6len == sizeof(struct mld_hdr)) { 1503 if (mld_v1_input_query(ifp, ip6, mld) != 0) 1504 return (0); 1505 } else if (icmp6len >= sizeof(struct mldv2_query)) { 1506 if (mld_v2_input_query(ifp, ip6, m, off, 1507 icmp6len) != 0) 1508 return (0); 1509 } 1510 break; 1511 case MLD_LISTENER_REPORT: 1512 icmp6_ifstat_inc(ifp, ifs6_in_mldreport); 1513 if (mld_v1_input_report(ifp, m, ip6, mld) != 0) 1514 return (0); 1515 break; 1516 case MLDV2_LISTENER_REPORT: 1517 icmp6_ifstat_inc(ifp, ifs6_in_mldreport); 1518 break; 1519 case MLD_LISTENER_DONE: 1520 icmp6_ifstat_inc(ifp, ifs6_in_mlddone); 1521 break; 1522 default: 1523 break; 1524 } 1525 1526 return (0); 1527} 1528 1529/* 1530 * Schedule MLD timer based on various parameters; caller must ensure that 1531 * lock ordering is maintained as this routine acquires MLD global lock. 1532 */ 1533void 1534mld_set_timeout(struct mld_tparams *mtp) 1535{ 1536 MLD_LOCK_ASSERT_NOTHELD(); 1537 VERIFY(mtp != NULL); 1538 1539 if (mtp->qpt != 0 || mtp->it != 0 || mtp->cst != 0 || mtp->sct != 0) { 1540 MLD_LOCK(); 1541 if (mtp->qpt != 0) 1542 querier_present_timers_running6 = 1; 1543 if (mtp->it != 0) 1544 interface_timers_running6 = 1; 1545 if (mtp->cst != 0) 1546 current_state_timers_running6 = 1; 1547 if (mtp->sct != 0) 1548 state_change_timers_running6 = 1; 1549 mld_sched_timeout(); 1550 MLD_UNLOCK(); 1551 } 1552} 1553 1554/* 1555 * MLD6 timer handler (per 1 second). 1556 */ 1557static void 1558mld_timeout(void *arg) 1559{ 1560#pragma unused(arg) 1561 struct ifqueue scq; /* State-change packets */ 1562 struct ifqueue qrq; /* Query response packets */ 1563 struct ifnet *ifp; 1564 struct mld_ifinfo *mli; 1565 struct in6_multi *inm; 1566 int uri_sec = 0; 1567 SLIST_HEAD(, in6_multi) in6m_dthead; 1568 1569 SLIST_INIT(&in6m_dthead); 1570 1571 /* 1572 * Update coarse-grained networking timestamp (in sec.); the idea 1573 * is to piggy-back on the timeout callout to update the counter 1574 * returnable via net_uptime(). 1575 */ 1576 net_update_uptime(); 1577 1578 MLD_LOCK(); 1579 1580 MLD_PRINTF(("%s: qpt %d, it %d, cst %d, sct %d\n", __func__, 1581 querier_present_timers_running6, interface_timers_running6, 1582 current_state_timers_running6, state_change_timers_running6)); 1583 1584 /* 1585 * MLDv1 querier present timer processing. 1586 */ 1587 if (querier_present_timers_running6) { 1588 querier_present_timers_running6 = 0; 1589 LIST_FOREACH(mli, &mli_head, mli_link) { 1590 MLI_LOCK(mli); 1591 mld_v1_process_querier_timers(mli); 1592 if (mli->mli_v1_timer > 0) 1593 querier_present_timers_running6 = 1; 1594 MLI_UNLOCK(mli); 1595 } 1596 } 1597 1598 /* 1599 * MLDv2 General Query response timer processing. 1600 */ 1601 if (interface_timers_running6) { 1602 MLD_PRINTF(("%s: interface timers running\n", __func__)); 1603 interface_timers_running6 = 0; 1604 LIST_FOREACH(mli, &mli_head, mli_link) { 1605 MLI_LOCK(mli); 1606 if (mli->mli_version != MLD_VERSION_2) { 1607 MLI_UNLOCK(mli); 1608 continue; 1609 } 1610 if (mli->mli_v2_timer == 0) { 1611 /* Do nothing. */ 1612 } else if (--mli->mli_v2_timer == 0) { 1613 if (mld_v2_dispatch_general_query(mli) > 0) 1614 interface_timers_running6 = 1; 1615 } else { 1616 interface_timers_running6 = 1; 1617 } 1618 MLI_UNLOCK(mli); 1619 } 1620 } 1621 1622 if (!current_state_timers_running6 && 1623 !state_change_timers_running6) 1624 goto out_locked; 1625 1626 current_state_timers_running6 = 0; 1627 state_change_timers_running6 = 0; 1628 1629 MLD_PRINTF(("%s: state change timers running\n", __func__)); 1630 1631 memset(&qrq, 0, sizeof(struct ifqueue)); 1632 qrq.ifq_maxlen = MLD_MAX_G_GS_PACKETS; 1633 1634 memset(&scq, 0, sizeof(struct ifqueue)); 1635 scq.ifq_maxlen = MLD_MAX_STATE_CHANGE_PACKETS; 1636 1637 /* 1638 * MLD host report and state-change timer processing. 1639 * Note: Processing a v2 group timer may remove a node. 1640 */ 1641 LIST_FOREACH(mli, &mli_head, mli_link) { 1642 struct in6_multistep step; 1643 1644 MLI_LOCK(mli); 1645 ifp = mli->mli_ifp; 1646 uri_sec = MLD_RANDOM_DELAY(mli->mli_uri); 1647 MLI_UNLOCK(mli); 1648 1649 in6_multihead_lock_shared(); 1650 IN6_FIRST_MULTI(step, inm); 1651 while (inm != NULL) { 1652 IN6M_LOCK(inm); 1653 if (inm->in6m_ifp != ifp) 1654 goto next; 1655 1656 MLI_LOCK(mli); 1657 switch (mli->mli_version) { 1658 case MLD_VERSION_1: 1659 mld_v1_process_group_timer(inm, 1660 mli->mli_version); 1661 break; 1662 case MLD_VERSION_2: 1663 mld_v2_process_group_timers(mli, &qrq, 1664 &scq, inm, uri_sec); 1665 break; 1666 } 1667 MLI_UNLOCK(mli); 1668next: 1669 IN6M_UNLOCK(inm); 1670 IN6_NEXT_MULTI(step, inm); 1671 } 1672 in6_multihead_lock_done(); 1673 1674 MLI_LOCK(mli); 1675 if (mli->mli_version == MLD_VERSION_1) { 1676 mld_dispatch_queue(mli, &mli->mli_v1q, 0); 1677 } else if (mli->mli_version == MLD_VERSION_2) { 1678 MLI_UNLOCK(mli); 1679 mld_dispatch_queue(NULL, &qrq, 0); 1680 mld_dispatch_queue(NULL, &scq, 0); 1681 VERIFY(qrq.ifq_len == 0); 1682 VERIFY(scq.ifq_len == 0); 1683 MLI_LOCK(mli); 1684 } 1685 /* 1686 * In case there are still any pending membership reports 1687 * which didn't get drained at version change time. 1688 */ 1689 IF_DRAIN(&mli->mli_v1q); 1690 /* 1691 * Release all deferred inm records, and drain any locally 1692 * enqueued packets; do it even if the current MLD version 1693 * for the link is no longer MLDv2, in order to handle the 1694 * version change case. 1695 */ 1696 mld_flush_relq(mli, (struct mld_in6m_relhead *)&in6m_dthead); 1697 VERIFY(SLIST_EMPTY(&mli->mli_relinmhead)); 1698 MLI_UNLOCK(mli); 1699 1700 IF_DRAIN(&qrq); 1701 IF_DRAIN(&scq); 1702 } 1703 1704out_locked: 1705 /* re-arm the timer if there's work to do */ 1706 mld_timeout_run = 0; 1707 mld_sched_timeout(); 1708 MLD_UNLOCK(); 1709 1710 /* Now that we're dropped all locks, release detached records */ 1711 MLD_REMOVE_DETACHED_IN6M(&in6m_dthead); 1712} 1713 1714static void 1715mld_sched_timeout(void) 1716{ 1717 MLD_LOCK_ASSERT_HELD(); 1718 1719 if (!mld_timeout_run && 1720 (querier_present_timers_running6 || current_state_timers_running6 || 1721 interface_timers_running6 || state_change_timers_running6)) { 1722 mld_timeout_run = 1; 1723 timeout(mld_timeout, NULL, hz); 1724 } 1725} 1726 1727/* 1728 * Free the in6_multi reference(s) for this MLD lifecycle. 1729 * 1730 * Caller must be holding mli_lock. 1731 */ 1732static void 1733mld_flush_relq(struct mld_ifinfo *mli, struct mld_in6m_relhead *in6m_dthead) 1734{ 1735 struct in6_multi *inm; 1736 1737again: 1738 MLI_LOCK_ASSERT_HELD(mli); 1739 inm = SLIST_FIRST(&mli->mli_relinmhead); 1740 if (inm != NULL) { 1741 int lastref; 1742 1743 SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele); 1744 MLI_UNLOCK(mli); 1745 1746 in6_multihead_lock_exclusive(); 1747 IN6M_LOCK(inm); 1748 VERIFY(inm->in6m_nrelecnt != 0); 1749 inm->in6m_nrelecnt--; 1750 lastref = in6_multi_detach(inm); 1751 VERIFY(!lastref || (!(inm->in6m_debug & IFD_ATTACHED) && 1752 inm->in6m_reqcnt == 0)); 1753 IN6M_UNLOCK(inm); 1754 in6_multihead_lock_done(); 1755 /* from mli_relinmhead */ 1756 IN6M_REMREF(inm); 1757 /* from in6_multihead_list */ 1758 if (lastref) { 1759 /* 1760 * Defer releasing our final reference, as we 1761 * are holding the MLD lock at this point, and 1762 * we could end up with locking issues later on 1763 * (while issuing SIOCDELMULTI) when this is the 1764 * final reference count. Let the caller do it 1765 * when it is safe. 1766 */ 1767 MLD_ADD_DETACHED_IN6M(in6m_dthead, inm); 1768 } 1769 MLI_LOCK(mli); 1770 goto again; 1771 } 1772} 1773 1774/* 1775 * Update host report group timer. 1776 * Will update the global pending timer flags. 1777 */ 1778static void 1779mld_v1_process_group_timer(struct in6_multi *inm, const int mld_version) 1780{ 1781#pragma unused(mld_version) 1782 int report_timer_expired; 1783 1784 MLD_LOCK_ASSERT_HELD(); 1785 IN6M_LOCK_ASSERT_HELD(inm); 1786 MLI_LOCK_ASSERT_HELD(inm->in6m_mli); 1787 1788 if (inm->in6m_timer == 0) { 1789 report_timer_expired = 0; 1790 } else if (--inm->in6m_timer == 0) { 1791 report_timer_expired = 1; 1792 } else { 1793 current_state_timers_running6 = 1; 1794 /* caller will schedule timer */ 1795 return; 1796 } 1797 1798 switch (inm->in6m_state) { 1799 case MLD_NOT_MEMBER: 1800 case MLD_SILENT_MEMBER: 1801 case MLD_IDLE_MEMBER: 1802 case MLD_LAZY_MEMBER: 1803 case MLD_SLEEPING_MEMBER: 1804 case MLD_AWAKENING_MEMBER: 1805 break; 1806 case MLD_REPORTING_MEMBER: 1807 if (report_timer_expired) { 1808 inm->in6m_state = MLD_IDLE_MEMBER; 1809 (void) mld_v1_transmit_report(inm, 1810 MLD_LISTENER_REPORT); 1811 IN6M_LOCK_ASSERT_HELD(inm); 1812 MLI_LOCK_ASSERT_HELD(inm->in6m_mli); 1813 } 1814 break; 1815 case MLD_G_QUERY_PENDING_MEMBER: 1816 case MLD_SG_QUERY_PENDING_MEMBER: 1817 case MLD_LEAVING_MEMBER: 1818 break; 1819 } 1820} 1821 1822/* 1823 * Update a group's timers for MLDv2. 1824 * Will update the global pending timer flags. 1825 * Note: Unlocked read from mli. 1826 */ 1827static void 1828mld_v2_process_group_timers(struct mld_ifinfo *mli, 1829 struct ifqueue *qrq, struct ifqueue *scq, 1830 struct in6_multi *inm, const int uri_sec) 1831{ 1832 int query_response_timer_expired; 1833 int state_change_retransmit_timer_expired; 1834 1835 MLD_LOCK_ASSERT_HELD(); 1836 IN6M_LOCK_ASSERT_HELD(inm); 1837 MLI_LOCK_ASSERT_HELD(mli); 1838 VERIFY(mli == inm->in6m_mli); 1839 1840 query_response_timer_expired = 0; 1841 state_change_retransmit_timer_expired = 0; 1842 1843 /* 1844 * During a transition from compatibility mode back to MLDv2, 1845 * a group record in REPORTING state may still have its group 1846 * timer active. This is a no-op in this function; it is easier 1847 * to deal with it here than to complicate the timeout path. 1848 */ 1849 if (inm->in6m_timer == 0) { 1850 query_response_timer_expired = 0; 1851 } else if (--inm->in6m_timer == 0) { 1852 query_response_timer_expired = 1; 1853 } else { 1854 current_state_timers_running6 = 1; 1855 /* caller will schedule timer */ 1856 } 1857 1858 if (inm->in6m_sctimer == 0) { 1859 state_change_retransmit_timer_expired = 0; 1860 } else if (--inm->in6m_sctimer == 0) { 1861 state_change_retransmit_timer_expired = 1; 1862 } else { 1863 state_change_timers_running6 = 1; 1864 /* caller will schedule timer */ 1865 } 1866 1867 /* We are in timer callback, so be quick about it. */ 1868 if (!state_change_retransmit_timer_expired && 1869 !query_response_timer_expired) 1870 return; 1871 1872 switch (inm->in6m_state) { 1873 case MLD_NOT_MEMBER: 1874 case MLD_SILENT_MEMBER: 1875 case MLD_SLEEPING_MEMBER: 1876 case MLD_LAZY_MEMBER: 1877 case MLD_AWAKENING_MEMBER: 1878 case MLD_IDLE_MEMBER: 1879 break; 1880 case MLD_G_QUERY_PENDING_MEMBER: 1881 case MLD_SG_QUERY_PENDING_MEMBER: 1882 /* 1883 * Respond to a previously pending Group-Specific 1884 * or Group-and-Source-Specific query by enqueueing 1885 * the appropriate Current-State report for 1886 * immediate transmission. 1887 */ 1888 if (query_response_timer_expired) { 1889 int retval; 1890 1891 retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1, 1892 (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER), 1893 0); 1894 MLD_PRINTF(("%s: enqueue record = %d\n", 1895 __func__, retval)); 1896 inm->in6m_state = MLD_REPORTING_MEMBER; 1897 in6m_clear_recorded(inm); 1898 } 1899 /* FALLTHROUGH */ 1900 case MLD_REPORTING_MEMBER: 1901 case MLD_LEAVING_MEMBER: 1902 if (state_change_retransmit_timer_expired) { 1903 /* 1904 * State-change retransmission timer fired. 1905 * If there are any further pending retransmissions, 1906 * set the global pending state-change flag, and 1907 * reset the timer. 1908 */ 1909 if (--inm->in6m_scrv > 0) { 1910 inm->in6m_sctimer = uri_sec; 1911 state_change_timers_running6 = 1; 1912 /* caller will schedule timer */ 1913 } 1914 /* 1915 * Retransmit the previously computed state-change 1916 * report. If there are no further pending 1917 * retransmissions, the mbuf queue will be consumed. 1918 * Update T0 state to T1 as we have now sent 1919 * a state-change. 1920 */ 1921 (void) mld_v2_merge_state_changes(inm, scq); 1922 1923 in6m_commit(inm); 1924 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__, 1925 ip6_sprintf(&inm->in6m_addr), 1926 if_name(inm->in6m_ifp))); 1927 1928 /* 1929 * If we are leaving the group for good, make sure 1930 * we release MLD's reference to it. 1931 * This release must be deferred using a SLIST, 1932 * as we are called from a loop which traverses 1933 * the in_ifmultiaddr TAILQ. 1934 */ 1935 if (inm->in6m_state == MLD_LEAVING_MEMBER && 1936 inm->in6m_scrv == 0) { 1937 inm->in6m_state = MLD_NOT_MEMBER; 1938 /* 1939 * A reference has already been held in 1940 * mld_final_leave() for this inm, so 1941 * no need to hold another one. We also 1942 * bumped up its request count then, so 1943 * that it stays in in6_multihead. Both 1944 * of them will be released when it is 1945 * dequeued later on. 1946 */ 1947 VERIFY(inm->in6m_nrelecnt != 0); 1948 SLIST_INSERT_HEAD(&mli->mli_relinmhead, 1949 inm, in6m_nrele); 1950 } 1951 } 1952 break; 1953 } 1954} 1955 1956/* 1957 * Switch to a different version on the given interface, 1958 * as per Section 9.12. 1959 */ 1960static uint32_t 1961mld_set_version(struct mld_ifinfo *mli, const int mld_version) 1962{ 1963 int old_version_timer; 1964 1965 MLI_LOCK_ASSERT_HELD(mli); 1966 1967 MLD_PRINTF(("%s: switching to v%d on ifp 0x%llx(%s)\n", __func__, 1968 mld_version, (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), 1969 if_name(mli->mli_ifp))); 1970 1971 if (mld_version == MLD_VERSION_1) { 1972 /* 1973 * Compute the "Older Version Querier Present" timer as per 1974 * Section 9.12, in seconds. 1975 */ 1976 old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri; 1977 mli->mli_v1_timer = old_version_timer; 1978 } 1979 1980 if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) { 1981 mli->mli_version = MLD_VERSION_1; 1982 mld_v2_cancel_link_timers(mli); 1983 } 1984 1985 MLI_LOCK_ASSERT_HELD(mli); 1986 1987 return (mli->mli_v1_timer); 1988} 1989 1990/* 1991 * Cancel pending MLDv2 timers for the given link and all groups 1992 * joined on it; state-change, general-query, and group-query timers. 1993 * 1994 * Only ever called on a transition from v2 to Compatibility mode. Kill 1995 * the timers stone dead (this may be expensive for large N groups), they 1996 * will be restarted if Compatibility Mode deems that they must be due to 1997 * query processing. 1998 */ 1999static void 2000mld_v2_cancel_link_timers(struct mld_ifinfo *mli) 2001{ 2002 struct ifnet *ifp; 2003 struct in6_multi *inm; 2004 struct in6_multistep step; 2005 2006 MLI_LOCK_ASSERT_HELD(mli); 2007 2008 MLD_PRINTF(("%s: cancel v2 timers on ifp 0x%llx(%s)\n", __func__, 2009 (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), if_name(mli->mli_ifp))); 2010 2011 /* 2012 * Stop the v2 General Query Response on this link stone dead. 2013 * If timer is woken up due to interface_timers_running6, 2014 * the flag will be cleared if there are no pending link timers. 2015 */ 2016 mli->mli_v2_timer = 0; 2017 2018 /* 2019 * Now clear the current-state and state-change report timers 2020 * for all memberships scoped to this link. 2021 */ 2022 ifp = mli->mli_ifp; 2023 MLI_UNLOCK(mli); 2024 2025 in6_multihead_lock_shared(); 2026 IN6_FIRST_MULTI(step, inm); 2027 while (inm != NULL) { 2028 IN6M_LOCK(inm); 2029 if (inm->in6m_ifp != ifp) 2030 goto next; 2031 2032 switch (inm->in6m_state) { 2033 case MLD_NOT_MEMBER: 2034 case MLD_SILENT_MEMBER: 2035 case MLD_IDLE_MEMBER: 2036 case MLD_LAZY_MEMBER: 2037 case MLD_SLEEPING_MEMBER: 2038 case MLD_AWAKENING_MEMBER: 2039 /* 2040 * These states are either not relevant in v2 mode, 2041 * or are unreported. Do nothing. 2042 */ 2043 break; 2044 case MLD_LEAVING_MEMBER: 2045 /* 2046 * If we are leaving the group and switching 2047 * version, we need to release the final 2048 * reference held for issuing the INCLUDE {}. 2049 * During mld_final_leave(), we bumped up both the 2050 * request and reference counts. Since we cannot 2051 * call in6_multi_detach() here, defer this task to 2052 * the timer routine. 2053 */ 2054 VERIFY(inm->in6m_nrelecnt != 0); 2055 MLI_LOCK(mli); 2056 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, 2057 in6m_nrele); 2058 MLI_UNLOCK(mli); 2059 /* FALLTHROUGH */ 2060 case MLD_G_QUERY_PENDING_MEMBER: 2061 case MLD_SG_QUERY_PENDING_MEMBER: 2062 in6m_clear_recorded(inm); 2063 /* FALLTHROUGH */ 2064 case MLD_REPORTING_MEMBER: 2065 inm->in6m_state = MLD_REPORTING_MEMBER; 2066 break; 2067 } 2068 /* 2069 * Always clear state-change and group report timers. 2070 * Free any pending MLDv2 state-change records. 2071 */ 2072 inm->in6m_sctimer = 0; 2073 inm->in6m_timer = 0; 2074 IF_DRAIN(&inm->in6m_scq); 2075next: 2076 IN6M_UNLOCK(inm); 2077 IN6_NEXT_MULTI(step, inm); 2078 } 2079 in6_multihead_lock_done(); 2080 2081 MLI_LOCK(mli); 2082} 2083 2084/* 2085 * Update the Older Version Querier Present timers for a link. 2086 * See Section 9.12 of RFC 3810. 2087 */ 2088static void 2089mld_v1_process_querier_timers(struct mld_ifinfo *mli) 2090{ 2091 MLI_LOCK_ASSERT_HELD(mli); 2092 2093 if (mld_v2enable && mli->mli_version != MLD_VERSION_2 && 2094 --mli->mli_v1_timer == 0) { 2095 /* 2096 * MLDv1 Querier Present timer expired; revert to MLDv2. 2097 */ 2098 MLD_PRINTF(("%s: transition from v%d -> v%d on 0x%llx(%s)\n", 2099 __func__, mli->mli_version, MLD_VERSION_2, 2100 (uint64_t)VM_KERNEL_ADDRPERM(mli->mli_ifp), 2101 if_name(mli->mli_ifp))); 2102 mli->mli_version = MLD_VERSION_2; 2103 } 2104} 2105 2106/* 2107 * Transmit an MLDv1 report immediately. 2108 */ 2109static int 2110mld_v1_transmit_report(struct in6_multi *in6m, const int type) 2111{ 2112 struct ifnet *ifp; 2113 struct in6_ifaddr *ia; 2114 struct ip6_hdr *ip6; 2115 struct mbuf *mh, *md; 2116 struct mld_hdr *mld; 2117 int error = 0; 2118 2119 IN6M_LOCK_ASSERT_HELD(in6m); 2120 MLI_LOCK_ASSERT_HELD(in6m->in6m_mli); 2121 2122 ifp = in6m->in6m_ifp; 2123 /* ia may be NULL if link-local address is tentative. */ 2124 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 2125 2126 MGETHDR(mh, M_DONTWAIT, MT_HEADER); 2127 if (mh == NULL) { 2128 if (ia != NULL) 2129 IFA_REMREF(&ia->ia_ifa); 2130 return (ENOMEM); 2131 } 2132 MGET(md, M_DONTWAIT, MT_DATA); 2133 if (md == NULL) { 2134 m_free(mh); 2135 if (ia != NULL) 2136 IFA_REMREF(&ia->ia_ifa); 2137 return (ENOMEM); 2138 } 2139 mh->m_next = md; 2140 2141 /* 2142 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so 2143 * that ether_output() does not need to allocate another mbuf 2144 * for the header in the most common case. 2145 */ 2146 MH_ALIGN(mh, sizeof(struct ip6_hdr)); 2147 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr); 2148 mh->m_len = sizeof(struct ip6_hdr); 2149 2150 ip6 = mtod(mh, struct ip6_hdr *); 2151 ip6->ip6_flow = 0; 2152 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 2153 ip6->ip6_vfc |= IPV6_VERSION; 2154 ip6->ip6_nxt = IPPROTO_ICMPV6; 2155 if (ia != NULL) 2156 IFA_LOCK(&ia->ia_ifa); 2157 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; 2158 if (ia != NULL) { 2159 IFA_UNLOCK(&ia->ia_ifa); 2160 IFA_REMREF(&ia->ia_ifa); 2161 ia = NULL; 2162 } 2163 ip6->ip6_dst = in6m->in6m_addr; 2164 2165 md->m_len = sizeof(struct mld_hdr); 2166 mld = mtod(md, struct mld_hdr *); 2167 mld->mld_type = type; 2168 mld->mld_code = 0; 2169 mld->mld_cksum = 0; 2170 mld->mld_maxdelay = 0; 2171 mld->mld_reserved = 0; 2172 mld->mld_addr = in6m->in6m_addr; 2173 in6_clearscope(&mld->mld_addr); 2174 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, 2175 sizeof(struct ip6_hdr), sizeof(struct mld_hdr)); 2176 2177 mld_save_context(mh, ifp); 2178 mh->m_flags |= M_MLDV1; 2179 2180 /* 2181 * Due to the fact that at this point we are possibly holding 2182 * in6_multihead_lock in shared or exclusive mode, we can't call 2183 * mld_dispatch_packet() here since that will eventually call 2184 * ip6_output(), which will try to lock in6_multihead_lock and cause 2185 * a deadlock. 2186 * Instead we defer the work to the mld_timeout() thread, thus 2187 * avoiding unlocking in_multihead_lock here. 2188 */ 2189 if (IF_QFULL(&in6m->in6m_mli->mli_v1q)) { 2190 MLD_PRINTF(("%s: v1 outbound queue full\n", __func__)); 2191 error = ENOMEM; 2192 m_freem(mh); 2193 } else { 2194 IF_ENQUEUE(&in6m->in6m_mli->mli_v1q, mh); 2195 VERIFY(error == 0); 2196 } 2197 2198 return (error); 2199} 2200 2201/* 2202 * Process a state change from the upper layer for the given IPv6 group. 2203 * 2204 * Each socket holds a reference on the in6_multi in its own ip_moptions. 2205 * The socket layer will have made the necessary updates to.the group 2206 * state, it is now up to MLD to issue a state change report if there 2207 * has been any change between T0 (when the last state-change was issued) 2208 * and T1 (now). 2209 * 2210 * We use the MLDv2 state machine at group level. The MLd module 2211 * however makes the decision as to which MLD protocol version to speak. 2212 * A state change *from* INCLUDE {} always means an initial join. 2213 * A state change *to* INCLUDE {} always means a final leave. 2214 * 2215 * If delay is non-zero, and the state change is an initial multicast 2216 * join, the state change report will be delayed by 'delay' ticks 2217 * in units of seconds if MLDv1 is active on the link; otherwise 2218 * the initial MLDv2 state change report will be delayed by whichever 2219 * is sooner, a pending state-change timer or delay itself. 2220 */ 2221int 2222mld_change_state(struct in6_multi *inm, struct mld_tparams *mtp, 2223 const int delay) 2224{ 2225 struct mld_ifinfo *mli; 2226 struct ifnet *ifp; 2227 int error = 0; 2228 2229 VERIFY(mtp != NULL); 2230 bzero(mtp, sizeof (*mtp)); 2231 2232 IN6M_LOCK_ASSERT_HELD(inm); 2233 VERIFY(inm->in6m_mli != NULL); 2234 MLI_LOCK_ASSERT_NOTHELD(inm->in6m_mli); 2235 2236 /* 2237 * Try to detect if the upper layer just asked us to change state 2238 * for an interface which has now gone away. 2239 */ 2240 VERIFY(inm->in6m_ifma != NULL); 2241 ifp = inm->in6m_ifma->ifma_ifp; 2242 /* 2243 * Sanity check that netinet6's notion of ifp is the same as net's. 2244 */ 2245 VERIFY(inm->in6m_ifp == ifp); 2246 2247 mli = MLD_IFINFO(ifp); 2248 VERIFY(mli != NULL); 2249 2250 /* 2251 * If we detect a state transition to or from MCAST_UNDEFINED 2252 * for this group, then we are starting or finishing an MLD 2253 * life cycle for this group. 2254 */ 2255 if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) { 2256 MLD_PRINTF(("%s: inm transition %d -> %d\n", __func__, 2257 inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode)); 2258 if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) { 2259 MLD_PRINTF(("%s: initial join\n", __func__)); 2260 error = mld_initial_join(inm, mli, mtp, delay); 2261 goto out; 2262 } else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) { 2263 MLD_PRINTF(("%s: final leave\n", __func__)); 2264 mld_final_leave(inm, mli, mtp); 2265 goto out; 2266 } 2267 } else { 2268 MLD_PRINTF(("%s: filter set change\n", __func__)); 2269 } 2270 2271 error = mld_handle_state_change(inm, mli, mtp); 2272out: 2273 return (error); 2274} 2275 2276/* 2277 * Perform the initial join for an MLD group. 2278 * 2279 * When joining a group: 2280 * If the group should have its MLD traffic suppressed, do nothing. 2281 * MLDv1 starts sending MLDv1 host membership reports. 2282 * MLDv2 will schedule an MLDv2 state-change report containing the 2283 * initial state of the membership. 2284 * 2285 * If the delay argument is non-zero, then we must delay sending the 2286 * initial state change for delay ticks (in units of seconds). 2287 */ 2288static int 2289mld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli, 2290 struct mld_tparams *mtp, const int delay) 2291{ 2292 struct ifnet *ifp; 2293 struct ifqueue *ifq; 2294 int error, retval, syncstates; 2295 int odelay; 2296 2297 IN6M_LOCK_ASSERT_HELD(inm); 2298 MLI_LOCK_ASSERT_NOTHELD(mli); 2299 VERIFY(mtp != NULL); 2300 2301 MLD_PRINTF(("%s: initial join %s on ifp 0x%llx(%s)\n", 2302 __func__, ip6_sprintf(&inm->in6m_addr), 2303 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), 2304 if_name(inm->in6m_ifp))); 2305 2306 error = 0; 2307 syncstates = 1; 2308 2309 ifp = inm->in6m_ifp; 2310 2311 MLI_LOCK(mli); 2312 VERIFY(mli->mli_ifp == ifp); 2313 2314 /* 2315 * Groups joined on loopback or marked as 'not reported', 2316 * enter the MLD_SILENT_MEMBER state and 2317 * are never reported in any protocol exchanges. 2318 * All other groups enter the appropriate state machine 2319 * for the version in use on this link. 2320 * A link marked as MLIF_SILENT causes MLD to be completely 2321 * disabled for the link. 2322 */ 2323 if ((ifp->if_flags & IFF_LOOPBACK) || 2324 (mli->mli_flags & MLIF_SILENT) || 2325 !mld_is_addr_reported(&inm->in6m_addr)) { 2326 MLD_PRINTF(("%s: not kicking state machine for silent group\n", 2327 __func__)); 2328 inm->in6m_state = MLD_SILENT_MEMBER; 2329 inm->in6m_timer = 0; 2330 } else { 2331 /* 2332 * Deal with overlapping in6_multi lifecycle. 2333 * If this group was LEAVING, then make sure 2334 * we drop the reference we picked up to keep the 2335 * group around for the final INCLUDE {} enqueue. 2336 * Since we cannot call in6_multi_detach() here, 2337 * defer this task to the timer routine. 2338 */ 2339 if (mli->mli_version == MLD_VERSION_2 && 2340 inm->in6m_state == MLD_LEAVING_MEMBER) { 2341 VERIFY(inm->in6m_nrelecnt != 0); 2342 SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm, 2343 in6m_nrele); 2344 } 2345 2346 inm->in6m_state = MLD_REPORTING_MEMBER; 2347 2348 switch (mli->mli_version) { 2349 case MLD_VERSION_1: 2350 /* 2351 * If a delay was provided, only use it if 2352 * it is greater than the delay normally 2353 * used for an MLDv1 state change report, 2354 * and delay sending the initial MLDv1 report 2355 * by not transitioning to the IDLE state. 2356 */ 2357 odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI); 2358 if (delay) { 2359 inm->in6m_timer = max(delay, odelay); 2360 mtp->cst = 1; 2361 } else { 2362 inm->in6m_state = MLD_IDLE_MEMBER; 2363 error = mld_v1_transmit_report(inm, 2364 MLD_LISTENER_REPORT); 2365 2366 IN6M_LOCK_ASSERT_HELD(inm); 2367 MLI_LOCK_ASSERT_HELD(mli); 2368 2369 if (error == 0) { 2370 inm->in6m_timer = odelay; 2371 mtp->cst = 1; 2372 } 2373 } 2374 break; 2375 2376 case MLD_VERSION_2: 2377 /* 2378 * Defer update of T0 to T1, until the first copy 2379 * of the state change has been transmitted. 2380 */ 2381 syncstates = 0; 2382 2383 /* 2384 * Immediately enqueue a State-Change Report for 2385 * this interface, freeing any previous reports. 2386 * Don't kick the timers if there is nothing to do, 2387 * or if an error occurred. 2388 */ 2389 ifq = &inm->in6m_scq; 2390 IF_DRAIN(ifq); 2391 retval = mld_v2_enqueue_group_record(ifq, inm, 1, 2392 0, 0, (mli->mli_flags & MLIF_USEALLOW)); 2393 mtp->cst = (ifq->ifq_len > 0); 2394 MLD_PRINTF(("%s: enqueue record = %d\n", 2395 __func__, retval)); 2396 if (retval <= 0) { 2397 error = retval * -1; 2398 break; 2399 } 2400 2401 /* 2402 * Schedule transmission of pending state-change 2403 * report up to RV times for this link. The timer 2404 * will fire at the next mld_timeout (1 second)), 2405 * giving us an opportunity to merge the reports. 2406 * 2407 * If a delay was provided to this function, only 2408 * use this delay if sooner than the existing one. 2409 */ 2410 VERIFY(mli->mli_rv > 1); 2411 inm->in6m_scrv = mli->mli_rv; 2412 if (delay) { 2413 if (inm->in6m_sctimer > 1) { 2414 inm->in6m_sctimer = 2415 min(inm->in6m_sctimer, delay); 2416 } else 2417 inm->in6m_sctimer = delay; 2418 } else { 2419 inm->in6m_sctimer = 1; 2420 } 2421 mtp->sct = 1; 2422 error = 0; 2423 break; 2424 } 2425 } 2426 MLI_UNLOCK(mli); 2427 2428 /* 2429 * Only update the T0 state if state change is atomic, 2430 * i.e. we don't need to wait for a timer to fire before we 2431 * can consider the state change to have been communicated. 2432 */ 2433 if (syncstates) { 2434 in6m_commit(inm); 2435 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__, 2436 ip6_sprintf(&inm->in6m_addr), 2437 if_name(inm->in6m_ifp))); 2438 } 2439 2440 return (error); 2441} 2442 2443/* 2444 * Issue an intermediate state change during the life-cycle. 2445 */ 2446static int 2447mld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli, 2448 struct mld_tparams *mtp) 2449{ 2450 struct ifnet *ifp; 2451 int retval = 0; 2452 2453 IN6M_LOCK_ASSERT_HELD(inm); 2454 MLI_LOCK_ASSERT_NOTHELD(mli); 2455 VERIFY(mtp != NULL); 2456 2457 MLD_PRINTF(("%s: state change for %s on ifp 0x%llx(%s)\n", 2458 __func__, ip6_sprintf(&inm->in6m_addr), 2459 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), 2460 if_name(inm->in6m_ifp))); 2461 2462 ifp = inm->in6m_ifp; 2463 2464 MLI_LOCK(mli); 2465 VERIFY(mli->mli_ifp == ifp); 2466 2467 if ((ifp->if_flags & IFF_LOOPBACK) || 2468 (mli->mli_flags & MLIF_SILENT) || 2469 !mld_is_addr_reported(&inm->in6m_addr) || 2470 (mli->mli_version != MLD_VERSION_2)) { 2471 MLI_UNLOCK(mli); 2472 if (!mld_is_addr_reported(&inm->in6m_addr)) { 2473 MLD_PRINTF(("%s: not kicking state machine for silent " 2474 "group\n", __func__)); 2475 } 2476 MLD_PRINTF(("%s: nothing to do\n", __func__)); 2477 in6m_commit(inm); 2478 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__, 2479 ip6_sprintf(&inm->in6m_addr), 2480 if_name(inm->in6m_ifp))); 2481 goto done; 2482 } 2483 2484 IF_DRAIN(&inm->in6m_scq); 2485 2486 retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0, 2487 (mli->mli_flags & MLIF_USEALLOW)); 2488 mtp->cst = (inm->in6m_scq.ifq_len > 0); 2489 MLD_PRINTF(("%s: enqueue record = %d\n", __func__, retval)); 2490 if (retval <= 0) { 2491 MLI_UNLOCK(mli); 2492 retval *= -1; 2493 goto done; 2494 } else { 2495 retval = 0; 2496 } 2497 2498 /* 2499 * If record(s) were enqueued, start the state-change 2500 * report timer for this group. 2501 */ 2502 inm->in6m_scrv = mli->mli_rv; 2503 inm->in6m_sctimer = 1; 2504 mtp->sct = 1; 2505 MLI_UNLOCK(mli); 2506 2507done: 2508 return (retval); 2509} 2510 2511/* 2512 * Perform the final leave for a multicast address. 2513 * 2514 * When leaving a group: 2515 * MLDv1 sends a DONE message, if and only if we are the reporter. 2516 * MLDv2 enqueues a state-change report containing a transition 2517 * to INCLUDE {} for immediate transmission. 2518 */ 2519static void 2520mld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli, 2521 struct mld_tparams *mtp) 2522{ 2523 int syncstates = 1; 2524 2525 IN6M_LOCK_ASSERT_HELD(inm); 2526 MLI_LOCK_ASSERT_NOTHELD(mli); 2527 VERIFY(mtp != NULL); 2528 2529 MLD_PRINTF(("%s: final leave %s on ifp 0x%llx(%s)\n", 2530 __func__, ip6_sprintf(&inm->in6m_addr), 2531 (uint64_t)VM_KERNEL_ADDRPERM(inm->in6m_ifp), 2532 if_name(inm->in6m_ifp))); 2533 2534 switch (inm->in6m_state) { 2535 case MLD_NOT_MEMBER: 2536 case MLD_SILENT_MEMBER: 2537 case MLD_LEAVING_MEMBER: 2538 /* Already leaving or left; do nothing. */ 2539 MLD_PRINTF(("%s: not kicking state machine for silent group\n", 2540 __func__)); 2541 break; 2542 case MLD_REPORTING_MEMBER: 2543 case MLD_IDLE_MEMBER: 2544 case MLD_G_QUERY_PENDING_MEMBER: 2545 case MLD_SG_QUERY_PENDING_MEMBER: 2546 MLI_LOCK(mli); 2547 if (mli->mli_version == MLD_VERSION_1) { 2548 if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER || 2549 inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) { 2550 panic("%s: MLDv2 state reached, not MLDv2 " 2551 "mode\n", __func__); 2552 /* NOTREACHED */ 2553 } 2554 /* scheduler timer if enqueue is successful */ 2555 mtp->cst = (mld_v1_transmit_report(inm, 2556 MLD_LISTENER_DONE) == 0); 2557 2558 IN6M_LOCK_ASSERT_HELD(inm); 2559 MLI_LOCK_ASSERT_HELD(mli); 2560 2561 inm->in6m_state = MLD_NOT_MEMBER; 2562 } else if (mli->mli_version == MLD_VERSION_2) { 2563 /* 2564 * Stop group timer and all pending reports. 2565 * Immediately enqueue a state-change report 2566 * TO_IN {} to be sent on the next timeout, 2567 * giving us an opportunity to merge reports. 2568 */ 2569 IF_DRAIN(&inm->in6m_scq); 2570 inm->in6m_timer = 0; 2571 inm->in6m_scrv = mli->mli_rv; 2572 MLD_PRINTF(("%s: Leaving %s/%s with %d " 2573 "pending retransmissions.\n", __func__, 2574 ip6_sprintf(&inm->in6m_addr), 2575 if_name(inm->in6m_ifp), 2576 inm->in6m_scrv)); 2577 if (inm->in6m_scrv == 0) { 2578 inm->in6m_state = MLD_NOT_MEMBER; 2579 inm->in6m_sctimer = 0; 2580 } else { 2581 int retval; 2582 /* 2583 * Stick around in the in6_multihead list; 2584 * the final detach will be issued by 2585 * mld_v2_process_group_timers() when 2586 * the retransmit timer expires. 2587 */ 2588 IN6M_ADDREF_LOCKED(inm); 2589 VERIFY(inm->in6m_debug & IFD_ATTACHED); 2590 inm->in6m_reqcnt++; 2591 VERIFY(inm->in6m_reqcnt >= 1); 2592 inm->in6m_nrelecnt++; 2593 VERIFY(inm->in6m_nrelecnt != 0); 2594 2595 retval = mld_v2_enqueue_group_record( 2596 &inm->in6m_scq, inm, 1, 0, 0, 2597 (mli->mli_flags & MLIF_USEALLOW)); 2598 mtp->cst = (inm->in6m_scq.ifq_len > 0); 2599 KASSERT(retval != 0, 2600 ("%s: enqueue record = %d\n", __func__, 2601 retval)); 2602 2603 inm->in6m_state = MLD_LEAVING_MEMBER; 2604 inm->in6m_sctimer = 1; 2605 mtp->sct = 1; 2606 syncstates = 0; 2607 } 2608 } 2609 MLI_UNLOCK(mli); 2610 break; 2611 case MLD_LAZY_MEMBER: 2612 case MLD_SLEEPING_MEMBER: 2613 case MLD_AWAKENING_MEMBER: 2614 /* Our reports are suppressed; do nothing. */ 2615 break; 2616 } 2617 2618 if (syncstates) { 2619 in6m_commit(inm); 2620 MLD_PRINTF(("%s: T1 -> T0 for %s/%s\n", __func__, 2621 ip6_sprintf(&inm->in6m_addr), 2622 if_name(inm->in6m_ifp))); 2623 inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED; 2624 MLD_PRINTF(("%s: T1 now MCAST_UNDEFINED for 0x%llx/%s\n", 2625 __func__, (uint64_t)VM_KERNEL_ADDRPERM(&inm->in6m_addr), 2626 if_name(inm->in6m_ifp))); 2627 } 2628} 2629 2630/* 2631 * Enqueue an MLDv2 group record to the given output queue. 2632 * 2633 * If is_state_change is zero, a current-state record is appended. 2634 * If is_state_change is non-zero, a state-change report is appended. 2635 * 2636 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2637 * If is_group_query is zero, and if there is a packet with free space 2638 * at the tail of the queue, it will be appended to providing there 2639 * is enough free space. 2640 * Otherwise a new mbuf packet chain is allocated. 2641 * 2642 * If is_source_query is non-zero, each source is checked to see if 2643 * it was recorded for a Group-Source query, and will be omitted if 2644 * it is not both in-mode and recorded. 2645 * 2646 * If use_block_allow is non-zero, state change reports for initial join 2647 * and final leave, on an inclusive mode group with a source list, will be 2648 * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively. 2649 * 2650 * The function will attempt to allocate leading space in the packet 2651 * for the IPv6+ICMP headers to be prepended without fragmenting the chain. 2652 * 2653 * If successful the size of all data appended to the queue is returned, 2654 * otherwise an error code less than zero is returned, or zero if 2655 * no record(s) were appended. 2656 */ 2657static int 2658mld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm, 2659 const int is_state_change, const int is_group_query, 2660 const int is_source_query, const int use_block_allow) 2661{ 2662 struct mldv2_record mr; 2663 struct mldv2_record *pmr; 2664 struct ifnet *ifp; 2665 struct ip6_msource *ims, *nims; 2666 struct mbuf *m0, *m, *md; 2667 int error, is_filter_list_change; 2668 int minrec0len, m0srcs, msrcs, nbytes, off; 2669 int record_has_sources; 2670 int now; 2671 int type; 2672 uint8_t mode; 2673 2674 IN6M_LOCK_ASSERT_HELD(inm); 2675 MLI_LOCK_ASSERT_HELD(inm->in6m_mli); 2676 2677 error = 0; 2678 ifp = inm->in6m_ifp; 2679 is_filter_list_change = 0; 2680 m = NULL; 2681 m0 = NULL; 2682 m0srcs = 0; 2683 msrcs = 0; 2684 nbytes = 0; 2685 nims = NULL; 2686 record_has_sources = 1; 2687 pmr = NULL; 2688 type = MLD_DO_NOTHING; 2689 mode = inm->in6m_st[1].iss_fmode; 2690 2691 /* 2692 * If we did not transition out of ASM mode during t0->t1, 2693 * and there are no source nodes to process, we can skip 2694 * the generation of source records. 2695 */ 2696 if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 && 2697 inm->in6m_nsrc == 0) 2698 record_has_sources = 0; 2699 2700 if (is_state_change) { 2701 /* 2702 * Queue a state change record. 2703 * If the mode did not change, and there are non-ASM 2704 * listeners or source filters present, 2705 * we potentially need to issue two records for the group. 2706 * If there are ASM listeners, and there was no filter 2707 * mode transition of any kind, do nothing. 2708 * 2709 * If we are transitioning to MCAST_UNDEFINED, we need 2710 * not send any sources. A transition to/from this state is 2711 * considered inclusive with some special treatment. 2712 * 2713 * If we are rewriting initial joins/leaves to use 2714 * ALLOW/BLOCK, and the group's membership is inclusive, 2715 * we need to send sources in all cases. 2716 */ 2717 if (mode != inm->in6m_st[0].iss_fmode) { 2718 if (mode == MCAST_EXCLUDE) { 2719 MLD_PRINTF(("%s: change to EXCLUDE\n", 2720 __func__)); 2721 type = MLD_CHANGE_TO_EXCLUDE_MODE; 2722 } else { 2723 MLD_PRINTF(("%s: change to INCLUDE\n", 2724 __func__)); 2725 if (use_block_allow) { 2726 /* 2727 * XXX 2728 * Here we're interested in state 2729 * edges either direction between 2730 * MCAST_UNDEFINED and MCAST_INCLUDE. 2731 * Perhaps we should just check 2732 * the group state, rather than 2733 * the filter mode. 2734 */ 2735 if (mode == MCAST_UNDEFINED) { 2736 type = MLD_BLOCK_OLD_SOURCES; 2737 } else { 2738 type = MLD_ALLOW_NEW_SOURCES; 2739 } 2740 } else { 2741 type = MLD_CHANGE_TO_INCLUDE_MODE; 2742 if (mode == MCAST_UNDEFINED) 2743 record_has_sources = 0; 2744 } 2745 } 2746 } else { 2747 if (record_has_sources) { 2748 is_filter_list_change = 1; 2749 } else { 2750 type = MLD_DO_NOTHING; 2751 } 2752 } 2753 } else { 2754 /* 2755 * Queue a current state record. 2756 */ 2757 if (mode == MCAST_EXCLUDE) { 2758 type = MLD_MODE_IS_EXCLUDE; 2759 } else if (mode == MCAST_INCLUDE) { 2760 type = MLD_MODE_IS_INCLUDE; 2761 VERIFY(inm->in6m_st[1].iss_asm == 0); 2762 } 2763 } 2764 2765 /* 2766 * Generate the filter list changes using a separate function. 2767 */ 2768 if (is_filter_list_change) 2769 return (mld_v2_enqueue_filter_change(ifq, inm)); 2770 2771 if (type == MLD_DO_NOTHING) { 2772 MLD_PRINTF(("%s: nothing to do for %s/%s\n", 2773 __func__, ip6_sprintf(&inm->in6m_addr), 2774 if_name(inm->in6m_ifp))); 2775 return (0); 2776 } 2777 2778 /* 2779 * If any sources are present, we must be able to fit at least 2780 * one in the trailing space of the tail packet's mbuf, 2781 * ideally more. 2782 */ 2783 minrec0len = sizeof(struct mldv2_record); 2784 if (record_has_sources) 2785 minrec0len += sizeof(struct in6_addr); 2786 MLD_PRINTF(("%s: queueing %s for %s/%s\n", __func__, 2787 mld_rec_type_to_str(type), 2788 ip6_sprintf(&inm->in6m_addr), 2789 if_name(inm->in6m_ifp))); 2790 2791 /* 2792 * Check if we have a packet in the tail of the queue for this 2793 * group into which the first group record for this group will fit. 2794 * Otherwise allocate a new packet. 2795 * Always allocate leading space for IP6+RA+ICMPV6+REPORT. 2796 * Note: Group records for G/GSR query responses MUST be sent 2797 * in their own packet. 2798 */ 2799 m0 = ifq->ifq_tail; 2800 if (!is_group_query && 2801 m0 != NULL && 2802 (m0->m_pkthdr.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) && 2803 (m0->m_pkthdr.len + minrec0len) < 2804 (ifp->if_mtu - MLD_MTUSPACE)) { 2805 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2806 sizeof(struct mldv2_record)) / 2807 sizeof(struct in6_addr); 2808 m = m0; 2809 MLD_PRINTF(("%s: use existing packet\n", __func__)); 2810 } else { 2811 if (IF_QFULL(ifq)) { 2812 MLD_PRINTF(("%s: outbound queue full\n", __func__)); 2813 return (-ENOMEM); 2814 } 2815 m = NULL; 2816 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 2817 sizeof(struct mldv2_record)) / sizeof(struct in6_addr); 2818 if (!is_state_change && !is_group_query) 2819 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2820 if (m == NULL) 2821 m = m_gethdr(M_DONTWAIT, MT_DATA); 2822 if (m == NULL) 2823 return (-ENOMEM); 2824 2825 mld_save_context(m, ifp); 2826 2827 MLD_PRINTF(("%s: allocated first packet\n", __func__)); 2828 } 2829 2830 /* 2831 * Append group record. 2832 * If we have sources, we don't know how many yet. 2833 */ 2834 mr.mr_type = type; 2835 mr.mr_datalen = 0; 2836 mr.mr_numsrc = 0; 2837 mr.mr_addr = inm->in6m_addr; 2838 in6_clearscope(&mr.mr_addr); 2839 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { 2840 if (m != m0) 2841 m_freem(m); 2842 MLD_PRINTF(("%s: m_append() failed.\n", __func__)); 2843 return (-ENOMEM); 2844 } 2845 nbytes += sizeof(struct mldv2_record); 2846 2847 /* 2848 * Append as many sources as will fit in the first packet. 2849 * If we are appending to a new packet, the chain allocation 2850 * may potentially use clusters; use m_getptr() in this case. 2851 * If we are appending to an existing packet, we need to obtain 2852 * a pointer to the group record after m_append(), in case a new 2853 * mbuf was allocated. 2854 * 2855 * Only append sources which are in-mode at t1. If we are 2856 * transitioning to MCAST_UNDEFINED state on the group, and 2857 * use_block_allow is zero, do not include source entries. 2858 * Otherwise, we need to include this source in the report. 2859 * 2860 * Only report recorded sources in our filter set when responding 2861 * to a group-source query. 2862 */ 2863 if (record_has_sources) { 2864 if (m == m0) { 2865 md = m_last(m); 2866 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + 2867 md->m_len - nbytes); 2868 } else { 2869 md = m_getptr(m, 0, &off); 2870 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + 2871 off); 2872 } 2873 msrcs = 0; 2874 RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs, 2875 nims) { 2876 MLD_PRINTF(("%s: visit node %s\n", __func__, 2877 ip6_sprintf(&ims->im6s_addr))); 2878 now = im6s_get_mode(inm, ims, 1); 2879 MLD_PRINTF(("%s: node is %d\n", __func__, now)); 2880 if ((now != mode) || 2881 (now == mode && 2882 (!use_block_allow && mode == MCAST_UNDEFINED))) { 2883 MLD_PRINTF(("%s: skip node\n", __func__)); 2884 continue; 2885 } 2886 if (is_source_query && ims->im6s_stp == 0) { 2887 MLD_PRINTF(("%s: skip unrecorded node\n", 2888 __func__)); 2889 continue; 2890 } 2891 MLD_PRINTF(("%s: append node\n", __func__)); 2892 if (!m_append(m, sizeof(struct in6_addr), 2893 (void *)&ims->im6s_addr)) { 2894 if (m != m0) 2895 m_freem(m); 2896 MLD_PRINTF(("%s: m_append() failed.\n", 2897 __func__)); 2898 return (-ENOMEM); 2899 } 2900 nbytes += sizeof(struct in6_addr); 2901 ++msrcs; 2902 if (msrcs == m0srcs) 2903 break; 2904 } 2905 MLD_PRINTF(("%s: msrcs is %d this packet\n", __func__, 2906 msrcs)); 2907 pmr->mr_numsrc = htons(msrcs); 2908 nbytes += (msrcs * sizeof(struct in6_addr)); 2909 } 2910 2911 if (is_source_query && msrcs == 0) { 2912 MLD_PRINTF(("%s: no recorded sources to report\n", __func__)); 2913 if (m != m0) 2914 m_freem(m); 2915 return (0); 2916 } 2917 2918 /* 2919 * We are good to go with first packet. 2920 */ 2921 if (m != m0) { 2922 MLD_PRINTF(("%s: enqueueing first packet\n", __func__)); 2923 m->m_pkthdr.vt_nrecs = 1; 2924 IF_ENQUEUE(ifq, m); 2925 } else { 2926 m->m_pkthdr.vt_nrecs++; 2927 } 2928 /* 2929 * No further work needed if no source list in packet(s). 2930 */ 2931 if (!record_has_sources) 2932 return (nbytes); 2933 2934 /* 2935 * Whilst sources remain to be announced, we need to allocate 2936 * a new packet and fill out as many sources as will fit. 2937 * Always try for a cluster first. 2938 */ 2939 while (nims != NULL) { 2940 if (IF_QFULL(ifq)) { 2941 MLD_PRINTF(("%s: outbound queue full\n", __func__)); 2942 return (-ENOMEM); 2943 } 2944 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2945 if (m == NULL) 2946 m = m_gethdr(M_DONTWAIT, MT_DATA); 2947 if (m == NULL) 2948 return (-ENOMEM); 2949 mld_save_context(m, ifp); 2950 md = m_getptr(m, 0, &off); 2951 pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off); 2952 MLD_PRINTF(("%s: allocated next packet\n", __func__)); 2953 2954 if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) { 2955 if (m != m0) 2956 m_freem(m); 2957 MLD_PRINTF(("%s: m_append() failed.\n", __func__)); 2958 return (-ENOMEM); 2959 } 2960 m->m_pkthdr.vt_nrecs = 1; 2961 nbytes += sizeof(struct mldv2_record); 2962 2963 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 2964 sizeof(struct mldv2_record)) / sizeof(struct in6_addr); 2965 2966 msrcs = 0; 2967 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { 2968 MLD_PRINTF(("%s: visit node %s\n", 2969 __func__, ip6_sprintf(&ims->im6s_addr))); 2970 now = im6s_get_mode(inm, ims, 1); 2971 if ((now != mode) || 2972 (now == mode && 2973 (!use_block_allow && mode == MCAST_UNDEFINED))) { 2974 MLD_PRINTF(("%s: skip node\n", __func__)); 2975 continue; 2976 } 2977 if (is_source_query && ims->im6s_stp == 0) { 2978 MLD_PRINTF(("%s: skip unrecorded node\n", 2979 __func__)); 2980 continue; 2981 } 2982 MLD_PRINTF(("%s: append node\n", __func__)); 2983 if (!m_append(m, sizeof(struct in6_addr), 2984 (void *)&ims->im6s_addr)) { 2985 if (m != m0) 2986 m_freem(m); 2987 MLD_PRINTF(("%s: m_append() failed.\n", 2988 __func__)); 2989 return (-ENOMEM); 2990 } 2991 ++msrcs; 2992 if (msrcs == m0srcs) 2993 break; 2994 } 2995 pmr->mr_numsrc = htons(msrcs); 2996 nbytes += (msrcs * sizeof(struct in6_addr)); 2997 2998 MLD_PRINTF(("%s: enqueueing next packet\n", __func__)); 2999 IF_ENQUEUE(ifq, m); 3000 } 3001 3002 return (nbytes); 3003} 3004 3005/* 3006 * Type used to mark record pass completion. 3007 * We exploit the fact we can cast to this easily from the 3008 * current filter modes on each ip_msource node. 3009 */ 3010typedef enum { 3011 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 3012 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 3013 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 3014 REC_FULL = REC_ALLOW | REC_BLOCK 3015} rectype_t; 3016 3017/* 3018 * Enqueue an MLDv2 filter list change to the given output queue. 3019 * 3020 * Source list filter state is held in an RB-tree. When the filter list 3021 * for a group is changed without changing its mode, we need to compute 3022 * the deltas between T0 and T1 for each source in the filter set, 3023 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 3024 * 3025 * As we may potentially queue two record types, and the entire R-B tree 3026 * needs to be walked at once, we break this out into its own function 3027 * so we can generate a tightly packed queue of packets. 3028 * 3029 * XXX This could be written to only use one tree walk, although that makes 3030 * serializing into the mbuf chains a bit harder. For now we do two walks 3031 * which makes things easier on us, and it may or may not be harder on 3032 * the L2 cache. 3033 * 3034 * If successful the size of all data appended to the queue is returned, 3035 * otherwise an error code less than zero is returned, or zero if 3036 * no record(s) were appended. 3037 */ 3038static int 3039mld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm) 3040{ 3041 static const int MINRECLEN = 3042 sizeof(struct mldv2_record) + sizeof(struct in6_addr); 3043 struct ifnet *ifp; 3044 struct mldv2_record mr; 3045 struct mldv2_record *pmr; 3046 struct ip6_msource *ims, *nims; 3047 struct mbuf *m, *m0, *md; 3048 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 3049 int nallow, nblock; 3050 uint8_t mode, now, then; 3051 rectype_t crt, drt, nrt; 3052 3053 IN6M_LOCK_ASSERT_HELD(inm); 3054 3055 if (inm->in6m_nsrc == 0 || 3056 (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0)) 3057 return (0); 3058 3059 ifp = inm->in6m_ifp; /* interface */ 3060 mode = inm->in6m_st[1].iss_fmode; /* filter mode at t1 */ 3061 crt = REC_NONE; /* current group record type */ 3062 drt = REC_NONE; /* mask of completed group record types */ 3063 nrt = REC_NONE; /* record type for current node */ 3064 m0srcs = 0; /* # source which will fit in current mbuf chain */ 3065 npbytes = 0; /* # of bytes appended this packet */ 3066 nbytes = 0; /* # of bytes appended to group's state-change queue */ 3067 rsrcs = 0; /* # sources encoded in current record */ 3068 schanged = 0; /* # nodes encoded in overall filter change */ 3069 nallow = 0; /* # of source entries in ALLOW_NEW */ 3070 nblock = 0; /* # of source entries in BLOCK_OLD */ 3071 nims = NULL; /* next tree node pointer */ 3072 3073 /* 3074 * For each possible filter record mode. 3075 * The first kind of source we encounter tells us which 3076 * is the first kind of record we start appending. 3077 * If a node transitioned to UNDEFINED at t1, its mode is treated 3078 * as the inverse of the group's filter mode. 3079 */ 3080 while (drt != REC_FULL) { 3081 do { 3082 m0 = ifq->ifq_tail; 3083 if (m0 != NULL && 3084 (m0->m_pkthdr.vt_nrecs + 1 <= 3085 MLD_V2_REPORT_MAXRECS) && 3086 (m0->m_pkthdr.len + MINRECLEN) < 3087 (ifp->if_mtu - MLD_MTUSPACE)) { 3088 m = m0; 3089 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 3090 sizeof(struct mldv2_record)) / 3091 sizeof(struct in6_addr); 3092 MLD_PRINTF(("%s: use previous packet\n", 3093 __func__)); 3094 } else { 3095 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3096 if (m == NULL) 3097 m = m_gethdr(M_DONTWAIT, MT_DATA); 3098 if (m == NULL) { 3099 MLD_PRINTF(("%s: m_get*() failed\n", 3100 __func__)); 3101 return (-ENOMEM); 3102 } 3103 m->m_pkthdr.vt_nrecs = 0; 3104 mld_save_context(m, ifp); 3105 m0srcs = (ifp->if_mtu - MLD_MTUSPACE - 3106 sizeof(struct mldv2_record)) / 3107 sizeof(struct in6_addr); 3108 npbytes = 0; 3109 MLD_PRINTF(("%s: allocated new packet\n", 3110 __func__)); 3111 } 3112 /* 3113 * Append the MLD group record header to the 3114 * current packet's data area. 3115 * Recalculate pointer to free space for next 3116 * group record, in case m_append() allocated 3117 * a new mbuf or cluster. 3118 */ 3119 memset(&mr, 0, sizeof(mr)); 3120 mr.mr_addr = inm->in6m_addr; 3121 in6_clearscope(&mr.mr_addr); 3122 if (!m_append(m, sizeof(mr), (void *)&mr)) { 3123 if (m != m0) 3124 m_freem(m); 3125 MLD_PRINTF(("%s: m_append() failed\n", 3126 __func__)); 3127 return (-ENOMEM); 3128 } 3129 npbytes += sizeof(struct mldv2_record); 3130 if (m != m0) { 3131 /* new packet; offset in chain */ 3132 md = m_getptr(m, npbytes - 3133 sizeof(struct mldv2_record), &off); 3134 pmr = (struct mldv2_record *)(mtod(md, 3135 uint8_t *) + off); 3136 } else { 3137 /* current packet; offset from last append */ 3138 md = m_last(m); 3139 pmr = (struct mldv2_record *)(mtod(md, 3140 uint8_t *) + md->m_len - 3141 sizeof(struct mldv2_record)); 3142 } 3143 /* 3144 * Begin walking the tree for this record type 3145 * pass, or continue from where we left off 3146 * previously if we had to allocate a new packet. 3147 * Only report deltas in-mode at t1. 3148 * We need not report included sources as allowed 3149 * if we are in inclusive mode on the group, 3150 * however the converse is not true. 3151 */ 3152 rsrcs = 0; 3153 if (nims == NULL) { 3154 nims = RB_MIN(ip6_msource_tree, 3155 &inm->in6m_srcs); 3156 } 3157 RB_FOREACH_FROM(ims, ip6_msource_tree, nims) { 3158 MLD_PRINTF(("%s: visit node %s\n", __func__, 3159 ip6_sprintf(&ims->im6s_addr))); 3160 now = im6s_get_mode(inm, ims, 1); 3161 then = im6s_get_mode(inm, ims, 0); 3162 MLD_PRINTF(("%s: mode: t0 %d, t1 %d\n", 3163 __func__, then, now)); 3164 if (now == then) { 3165 MLD_PRINTF(("%s: skip unchanged\n", 3166 __func__)); 3167 continue; 3168 } 3169 if (mode == MCAST_EXCLUDE && 3170 now == MCAST_INCLUDE) { 3171 MLD_PRINTF(("%s: skip IN src on EX " 3172 "group\n", __func__)); 3173 continue; 3174 } 3175 nrt = (rectype_t)now; 3176 if (nrt == REC_NONE) 3177 nrt = (rectype_t)(~mode & REC_FULL); 3178 if (schanged++ == 0) { 3179 crt = nrt; 3180 } else if (crt != nrt) 3181 continue; 3182 if (!m_append(m, sizeof(struct in6_addr), 3183 (void *)&ims->im6s_addr)) { 3184 if (m != m0) 3185 m_freem(m); 3186 MLD_PRINTF(("%s: m_append() failed\n", 3187 __func__)); 3188 return (-ENOMEM); 3189 } 3190 nallow += !!(crt == REC_ALLOW); 3191 nblock += !!(crt == REC_BLOCK); 3192 if (++rsrcs == m0srcs) 3193 break; 3194 } 3195 /* 3196 * If we did not append any tree nodes on this 3197 * pass, back out of allocations. 3198 */ 3199 if (rsrcs == 0) { 3200 npbytes -= sizeof(struct mldv2_record); 3201 if (m != m0) { 3202 MLD_PRINTF(("%s: m_free(m)\n", 3203 __func__)); 3204 m_freem(m); 3205 } else { 3206 MLD_PRINTF(("%s: m_adj(m, -mr)\n", 3207 __func__)); 3208 m_adj(m, -((int)sizeof( 3209 struct mldv2_record))); 3210 } 3211 continue; 3212 } 3213 npbytes += (rsrcs * sizeof(struct in6_addr)); 3214 if (crt == REC_ALLOW) 3215 pmr->mr_type = MLD_ALLOW_NEW_SOURCES; 3216 else if (crt == REC_BLOCK) 3217 pmr->mr_type = MLD_BLOCK_OLD_SOURCES; 3218 pmr->mr_numsrc = htons(rsrcs); 3219 /* 3220 * Count the new group record, and enqueue this 3221 * packet if it wasn't already queued. 3222 */ 3223 m->m_pkthdr.vt_nrecs++; 3224 if (m != m0) 3225 IF_ENQUEUE(ifq, m); 3226 nbytes += npbytes; 3227 } while (nims != NULL); 3228 drt |= crt; 3229 crt = (~crt & REC_FULL); 3230 } 3231 3232 MLD_PRINTF(("%s: queued %d ALLOW_NEW, %d BLOCK_OLD\n", __func__, 3233 nallow, nblock)); 3234 3235 return (nbytes); 3236} 3237 3238static int 3239mld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq) 3240{ 3241 struct ifqueue *gq; 3242 struct mbuf *m; /* pending state-change */ 3243 struct mbuf *m0; /* copy of pending state-change */ 3244 struct mbuf *mt; /* last state-change in packet */ 3245 struct mbuf *n; 3246 int docopy, domerge; 3247 u_int recslen; 3248 3249 IN6M_LOCK_ASSERT_HELD(inm); 3250 3251 docopy = 0; 3252 domerge = 0; 3253 recslen = 0; 3254 3255 /* 3256 * If there are further pending retransmissions, make a writable 3257 * copy of each queued state-change message before merging. 3258 */ 3259 if (inm->in6m_scrv > 0) 3260 docopy = 1; 3261 3262 gq = &inm->in6m_scq; 3263#ifdef MLD_DEBUG 3264 if (gq->ifq_head == NULL) { 3265 MLD_PRINTF(("%s: WARNING: queue for inm 0x%llx is empty\n", 3266 __func__, (uint64_t)VM_KERNEL_ADDRPERM(inm))); 3267 } 3268#endif 3269 3270 /* 3271 * Use IF_REMQUEUE() instead of IF_DEQUEUE() below, since the 3272 * packet might not always be at the head of the ifqueue. 3273 */ 3274 m = gq->ifq_head; 3275 while (m != NULL) { 3276 /* 3277 * Only merge the report into the current packet if 3278 * there is sufficient space to do so; an MLDv2 report 3279 * packet may only contain 65,535 group records. 3280 * Always use a simple mbuf chain concatentation to do this, 3281 * as large state changes for single groups may have 3282 * allocated clusters. 3283 */ 3284 domerge = 0; 3285 mt = ifscq->ifq_tail; 3286 if (mt != NULL) { 3287 recslen = m_length(m); 3288 3289 if ((mt->m_pkthdr.vt_nrecs + 3290 m->m_pkthdr.vt_nrecs <= 3291 MLD_V2_REPORT_MAXRECS) && 3292 (mt->m_pkthdr.len + recslen <= 3293 (inm->in6m_ifp->if_mtu - MLD_MTUSPACE))) 3294 domerge = 1; 3295 } 3296 3297 if (!domerge && IF_QFULL(gq)) { 3298 MLD_PRINTF(("%s: outbound queue full, skipping whole " 3299 "packet 0x%llx\n", __func__, 3300 (uint64_t)VM_KERNEL_ADDRPERM(m))); 3301 n = m->m_nextpkt; 3302 if (!docopy) { 3303 IF_REMQUEUE(gq, m); 3304 m_freem(m); 3305 } 3306 m = n; 3307 continue; 3308 } 3309 3310 if (!docopy) { 3311 MLD_PRINTF(("%s: dequeueing 0x%llx\n", __func__, 3312 (uint64_t)VM_KERNEL_ADDRPERM(m))); 3313 n = m->m_nextpkt; 3314 IF_REMQUEUE(gq, m); 3315 m0 = m; 3316 m = n; 3317 } else { 3318 MLD_PRINTF(("%s: copying 0x%llx\n", __func__, 3319 (uint64_t)VM_KERNEL_ADDRPERM(m))); 3320 m0 = m_dup(m, M_NOWAIT); 3321 if (m0 == NULL) 3322 return (ENOMEM); 3323 m0->m_nextpkt = NULL; 3324 m = m->m_nextpkt; 3325 } 3326 3327 if (!domerge) { 3328 MLD_PRINTF(("%s: queueing 0x%llx to ifscq 0x%llx)\n", 3329 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m0), 3330 (uint64_t)VM_KERNEL_ADDRPERM(ifscq))); 3331 IF_ENQUEUE(ifscq, m0); 3332 } else { 3333 struct mbuf *mtl; /* last mbuf of packet mt */ 3334 3335 MLD_PRINTF(("%s: merging 0x%llx with ifscq tail " 3336 "0x%llx)\n", __func__, 3337 (uint64_t)VM_KERNEL_ADDRPERM(m0), 3338 (uint64_t)VM_KERNEL_ADDRPERM(mt))); 3339 3340 mtl = m_last(mt); 3341 m0->m_flags &= ~M_PKTHDR; 3342 mt->m_pkthdr.len += recslen; 3343 mt->m_pkthdr.vt_nrecs += 3344 m0->m_pkthdr.vt_nrecs; 3345 3346 mtl->m_next = m0; 3347 } 3348 } 3349 3350 return (0); 3351} 3352 3353/* 3354 * Respond to a pending MLDv2 General Query. 3355 */ 3356static uint32_t 3357mld_v2_dispatch_general_query(struct mld_ifinfo *mli) 3358{ 3359 struct ifnet *ifp; 3360 struct in6_multi *inm; 3361 struct in6_multistep step; 3362 int retval; 3363 3364 MLI_LOCK_ASSERT_HELD(mli); 3365 3366 VERIFY(mli->mli_version == MLD_VERSION_2); 3367 3368 ifp = mli->mli_ifp; 3369 MLI_UNLOCK(mli); 3370 3371 in6_multihead_lock_shared(); 3372 IN6_FIRST_MULTI(step, inm); 3373 while (inm != NULL) { 3374 IN6M_LOCK(inm); 3375 if (inm->in6m_ifp != ifp) 3376 goto next; 3377 3378 switch (inm->in6m_state) { 3379 case MLD_NOT_MEMBER: 3380 case MLD_SILENT_MEMBER: 3381 break; 3382 case MLD_REPORTING_MEMBER: 3383 case MLD_IDLE_MEMBER: 3384 case MLD_LAZY_MEMBER: 3385 case MLD_SLEEPING_MEMBER: 3386 case MLD_AWAKENING_MEMBER: 3387 inm->in6m_state = MLD_REPORTING_MEMBER; 3388 MLI_LOCK(mli); 3389 retval = mld_v2_enqueue_group_record(&mli->mli_gq, 3390 inm, 0, 0, 0, 0); 3391 MLI_UNLOCK(mli); 3392 MLD_PRINTF(("%s: enqueue record = %d\n", 3393 __func__, retval)); 3394 break; 3395 case MLD_G_QUERY_PENDING_MEMBER: 3396 case MLD_SG_QUERY_PENDING_MEMBER: 3397 case MLD_LEAVING_MEMBER: 3398 break; 3399 } 3400next: 3401 IN6M_UNLOCK(inm); 3402 IN6_NEXT_MULTI(step, inm); 3403 } 3404 in6_multihead_lock_done(); 3405 3406 MLI_LOCK(mli); 3407 mld_dispatch_queue(mli, &mli->mli_gq, MLD_MAX_RESPONSE_BURST); 3408 MLI_LOCK_ASSERT_HELD(mli); 3409 3410 /* 3411 * Slew transmission of bursts over 1 second intervals. 3412 */ 3413 if (mli->mli_gq.ifq_head != NULL) { 3414 mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY( 3415 MLD_RESPONSE_BURST_INTERVAL); 3416 } 3417 3418 return (mli->mli_v2_timer); 3419} 3420 3421/* 3422 * Transmit the next pending message in the output queue. 3423 * 3424 * Must not be called with in6m_lockm or mli_lock held. 3425 */ 3426static void 3427mld_dispatch_packet(struct mbuf *m) 3428{ 3429 struct ip6_moptions *im6o; 3430 struct ifnet *ifp; 3431 struct ifnet *oifp = NULL; 3432 struct mbuf *m0; 3433 struct mbuf *md; 3434 struct ip6_hdr *ip6; 3435 struct mld_hdr *mld; 3436 int error; 3437 int off; 3438 int type; 3439 3440 MLD_PRINTF(("%s: transmit 0x%llx\n", __func__, 3441 (uint64_t)VM_KERNEL_ADDRPERM(m))); 3442 3443 /* 3444 * Check if the ifnet is still attached. 3445 */ 3446 ifp = mld_restore_context(m); 3447 if (ifp == NULL || !ifnet_is_attached(ifp, 0)) { 3448 MLD_PRINTF(("%s: dropped 0x%llx as ifindex %u went away.\n", 3449 __func__, (uint64_t)VM_KERNEL_ADDRPERM(m), 3450 (u_int)if_index)); 3451 m_freem(m); 3452 ip6stat.ip6s_noroute++; 3453 return; 3454 } 3455 3456 im6o = ip6_allocmoptions(M_WAITOK); 3457 if (im6o == NULL) { 3458 m_freem(m); 3459 return; 3460 } 3461 3462 im6o->im6o_multicast_hlim = 1; 3463 im6o->im6o_multicast_loop = 0; 3464 im6o->im6o_multicast_ifp = ifp; 3465 3466 if (m->m_flags & M_MLDV1) { 3467 m0 = m; 3468 } else { 3469 m0 = mld_v2_encap_report(ifp, m); 3470 if (m0 == NULL) { 3471 MLD_PRINTF(("%s: dropped 0x%llx\n", __func__, 3472 (uint64_t)VM_KERNEL_ADDRPERM(m))); 3473 /* 3474 * mld_v2_encap_report() has already freed our mbuf. 3475 */ 3476 IM6O_REMREF(im6o); 3477 ip6stat.ip6s_odropped++; 3478 return; 3479 } 3480 } 3481 3482 mld_scrub_context(m0); 3483 m->m_flags &= ~(M_PROTOFLAGS); 3484 m0->m_pkthdr.rcvif = lo_ifp; 3485 3486 ip6 = mtod(m0, struct ip6_hdr *); 3487 (void) in6_setscope(&ip6->ip6_dst, ifp, NULL); 3488 3489 /* 3490 * Retrieve the ICMPv6 type before handoff to ip6_output(), 3491 * so we can bump the stats. 3492 */ 3493 md = m_getptr(m0, sizeof(struct ip6_hdr), &off); 3494 mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off); 3495 type = mld->mld_type; 3496 3497 if (ifp->if_eflags & IFEF_TXSTART) { 3498 /* 3499 * Use control service class if the outgoing 3500 * interface supports transmit-start model. 3501 */ 3502 (void) m_set_service_class(m0, MBUF_SC_CTL); 3503 } 3504 3505 error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, im6o, 3506 &oifp, NULL); 3507 3508 IM6O_REMREF(im6o); 3509 3510 if (error) { 3511 MLD_PRINTF(("%s: ip6_output(0x%llx) = %d\n", __func__, 3512 (uint64_t)VM_KERNEL_ADDRPERM(m0), error)); 3513 if (oifp != NULL) 3514 ifnet_release(oifp); 3515 return; 3516 } 3517 3518 icmp6stat.icp6s_outhist[type]++; 3519 if (oifp != NULL) { 3520 icmp6_ifstat_inc(oifp, ifs6_out_msg); 3521 switch (type) { 3522 case MLD_LISTENER_REPORT: 3523 case MLDV2_LISTENER_REPORT: 3524 icmp6_ifstat_inc(oifp, ifs6_out_mldreport); 3525 break; 3526 case MLD_LISTENER_DONE: 3527 icmp6_ifstat_inc(oifp, ifs6_out_mlddone); 3528 break; 3529 } 3530 ifnet_release(oifp); 3531 } 3532} 3533 3534/* 3535 * Encapsulate an MLDv2 report. 3536 * 3537 * KAME IPv6 requires that hop-by-hop options be passed separately, 3538 * and that the IPv6 header be prepended in a separate mbuf. 3539 * 3540 * Returns a pointer to the new mbuf chain head, or NULL if the 3541 * allocation failed. 3542 */ 3543static struct mbuf * 3544mld_v2_encap_report(struct ifnet *ifp, struct mbuf *m) 3545{ 3546 struct mbuf *mh; 3547 struct mldv2_report *mld; 3548 struct ip6_hdr *ip6; 3549 struct in6_ifaddr *ia; 3550 int mldreclen; 3551 3552 VERIFY(m->m_flags & M_PKTHDR); 3553 3554 /* 3555 * RFC3590: OK to send as :: or tentative during DAD. 3556 */ 3557 ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST); 3558 if (ia == NULL) 3559 MLD_PRINTF(("%s: warning: ia is NULL\n", __func__)); 3560 3561 MGETHDR(mh, M_DONTWAIT, MT_HEADER); 3562 if (mh == NULL) { 3563 if (ia != NULL) 3564 IFA_REMREF(&ia->ia_ifa); 3565 m_freem(m); 3566 return (NULL); 3567 } 3568 MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report)); 3569 3570 mldreclen = m_length(m); 3571 MLD_PRINTF(("%s: mldreclen is %d\n", __func__, mldreclen)); 3572 3573 mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report); 3574 mh->m_pkthdr.len = sizeof(struct ip6_hdr) + 3575 sizeof(struct mldv2_report) + mldreclen; 3576 3577 ip6 = mtod(mh, struct ip6_hdr *); 3578 ip6->ip6_flow = 0; 3579 ip6->ip6_vfc &= ~IPV6_VERSION_MASK; 3580 ip6->ip6_vfc |= IPV6_VERSION; 3581 ip6->ip6_nxt = IPPROTO_ICMPV6; 3582 if (ia != NULL) 3583 IFA_LOCK(&ia->ia_ifa); 3584 ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any; 3585 if (ia != NULL) { 3586 IFA_UNLOCK(&ia->ia_ifa); 3587 IFA_REMREF(&ia->ia_ifa); 3588 ia = NULL; 3589 } 3590 ip6->ip6_dst = in6addr_linklocal_allv2routers; 3591 /* scope ID will be set in netisr */ 3592 3593 mld = (struct mldv2_report *)(ip6 + 1); 3594 mld->mld_type = MLDV2_LISTENER_REPORT; 3595 mld->mld_code = 0; 3596 mld->mld_cksum = 0; 3597 mld->mld_v2_reserved = 0; 3598 mld->mld_v2_numrecs = htons(m->m_pkthdr.vt_nrecs); 3599 m->m_pkthdr.vt_nrecs = 0; 3600 m->m_flags &= ~M_PKTHDR; 3601 3602 mh->m_next = m; 3603 mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6, 3604 sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen); 3605 return (mh); 3606} 3607 3608#ifdef MLD_DEBUG 3609static const char * 3610mld_rec_type_to_str(const int type) 3611{ 3612 switch (type) { 3613 case MLD_CHANGE_TO_EXCLUDE_MODE: 3614 return "TO_EX"; 3615 break; 3616 case MLD_CHANGE_TO_INCLUDE_MODE: 3617 return "TO_IN"; 3618 break; 3619 case MLD_MODE_IS_EXCLUDE: 3620 return "MODE_EX"; 3621 break; 3622 case MLD_MODE_IS_INCLUDE: 3623 return "MODE_IN"; 3624 break; 3625 case MLD_ALLOW_NEW_SOURCES: 3626 return "ALLOW_NEW"; 3627 break; 3628 case MLD_BLOCK_OLD_SOURCES: 3629 return "BLOCK_OLD"; 3630 break; 3631 default: 3632 break; 3633 } 3634 return "unknown"; 3635} 3636#endif 3637 3638void 3639mld_init(void) 3640{ 3641 3642 MLD_PRINTF(("%s: initializing\n", __func__)); 3643 3644 /* Setup lock group and attribute for mld_mtx */ 3645 mld_mtx_grp_attr = lck_grp_attr_alloc_init(); 3646 mld_mtx_grp = lck_grp_alloc_init("mld_mtx\n", mld_mtx_grp_attr); 3647 mld_mtx_attr = lck_attr_alloc_init(); 3648 lck_mtx_init(&mld_mtx, mld_mtx_grp, mld_mtx_attr); 3649 3650 ip6_initpktopts(&mld_po); 3651 mld_po.ip6po_hlim = 1; 3652 mld_po.ip6po_hbh = &mld_ra.hbh; 3653 mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER; 3654 mld_po.ip6po_flags = IP6PO_DONTFRAG; 3655 LIST_INIT(&mli_head); 3656 3657 mli_size = sizeof (struct mld_ifinfo); 3658 mli_zone = zinit(mli_size, MLI_ZONE_MAX * mli_size, 3659 0, MLI_ZONE_NAME); 3660 if (mli_zone == NULL) { 3661 panic("%s: failed allocating %s", __func__, MLI_ZONE_NAME); 3662 /* NOTREACHED */ 3663 } 3664 zone_change(mli_zone, Z_EXPAND, TRUE); 3665 zone_change(mli_zone, Z_CALLERACCT, FALSE); 3666} 3667