igmp.c revision 194760
1/*- 2 * Copyright (c) 2007-2009 Bruce Simpson. 3 * Copyright (c) 1988 Stephen Deering. 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Stephen Deering of Stanford University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93 35 */ 36 37/* 38 * Internet Group Management Protocol (IGMP) routines. 39 * [RFC1112, RFC2236, RFC3376] 40 * 41 * Written by Steve Deering, Stanford, May 1988. 42 * Modified by Rosen Sharma, Stanford, Aug 1994. 43 * Modified by Bill Fenner, Xerox PARC, Feb 1995. 44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995. 45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson. 46 * 47 * MULTICAST Revision: 3.5.1.4 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: head/sys/netinet/igmp.c 194760 2009-06-23 20:19:09Z rwatson $"); 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/module.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59#include <sys/protosw.h> 60#include <sys/kernel.h> 61#include <sys/sysctl.h> 62#include <sys/vimage.h> 63#include <sys/ktr.h> 64#include <sys/condvar.h> 65 66#include <net/if.h> 67#include <net/netisr.h> 68#include <net/vnet.h> 69 70#include <netinet/in.h> 71#include <netinet/in_var.h> 72#include <netinet/in_systm.h> 73#include <netinet/ip.h> 74#include <netinet/ip_var.h> 75#include <netinet/ip_options.h> 76#include <netinet/igmp.h> 77#include <netinet/igmp_var.h> 78#include <netinet/vinet.h> 79 80#include <machine/in_cksum.h> 81 82#include <security/mac/mac_framework.h> 83 84#ifndef KTR_IGMPV3 85#define KTR_IGMPV3 KTR_INET 86#endif 87 88static struct igmp_ifinfo * 89 igi_alloc_locked(struct ifnet *); 90static void igi_delete_locked(const struct ifnet *); 91static void igmp_dispatch_queue(struct ifqueue *, int, const int); 92static void igmp_fasttimo_vnet(void); 93static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *); 94static int igmp_handle_state_change(struct in_multi *, 95 struct igmp_ifinfo *); 96static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *); 97static int igmp_input_v1_query(struct ifnet *, const struct ip *, 98 const struct igmp *); 99static int igmp_input_v2_query(struct ifnet *, const struct ip *, 100 const struct igmp *); 101static int igmp_input_v3_query(struct ifnet *, const struct ip *, 102 /*const*/ struct igmpv3 *); 103static int igmp_input_v3_group_query(struct in_multi *, 104 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *); 105static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *, 106 /*const*/ struct igmp *); 107static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *, 108 /*const*/ struct igmp *); 109static void igmp_intr(struct mbuf *); 110static int igmp_isgroupreported(const struct in_addr); 111static struct mbuf * 112 igmp_ra_alloc(void); 113#ifdef KTR 114static char * igmp_rec_type_to_str(const int); 115#endif 116static void igmp_set_version(struct igmp_ifinfo *, const int); 117static void igmp_slowtimo_vnet(void); 118static void igmp_sysinit(void); 119static int igmp_v1v2_queue_report(struct in_multi *, const int); 120static void igmp_v1v2_process_group_timer(struct in_multi *, const int); 121static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); 122static void igmp_v2_update_group(struct in_multi *, const int); 123static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); 124static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *); 125static struct mbuf * 126 igmp_v3_encap_report(struct ifnet *, struct mbuf *); 127static int igmp_v3_enqueue_group_record(struct ifqueue *, 128 struct in_multi *, const int, const int, const int); 129static int igmp_v3_enqueue_filter_change(struct ifqueue *, 130 struct in_multi *); 131static void igmp_v3_process_group_timers(struct igmp_ifinfo *, 132 struct ifqueue *, struct ifqueue *, struct in_multi *, 133 const int); 134static int igmp_v3_merge_state_changes(struct in_multi *, 135 struct ifqueue *); 136static void igmp_v3_suppress_group_record(struct in_multi *); 137static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS); 138static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS); 139static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS); 140 141static vnet_attach_fn vnet_igmp_iattach; 142static vnet_detach_fn vnet_igmp_idetach; 143 144static const struct netisr_handler igmp_nh = { 145 .nh_name = "igmp", 146 .nh_handler = igmp_intr, 147 .nh_proto = NETISR_IGMP, 148 .nh_policy = NETISR_POLICY_SOURCE, 149}; 150 151/* 152 * System-wide globals. 153 * 154 * Unlocked access to these is OK, except for the global IGMP output 155 * queue. The IGMP subsystem lock ends up being system-wide for the moment, 156 * because all VIMAGEs have to share a global output queue, as netisrs 157 * themselves are not virtualized. 158 * 159 * Locking: 160 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 161 * Any may be taken independently; if any are held at the same 162 * time, the above lock order must be followed. 163 * * All output is delegated to the netisr. 164 * Now that Giant has been eliminated, the netisr may be inlined. 165 * * IN_MULTI_LOCK covers in_multi. 166 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file, 167 * including the output queue. 168 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of 169 * per-link state iterators. 170 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface, 171 * therefore it is not refcounted. 172 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi. 173 * 174 * Reference counting 175 * * IGMP acquires its own reference every time an in_multi is passed to 176 * it and the group is being joined for the first time. 177 * * IGMP releases its reference(s) on in_multi in a deferred way, 178 * because the operations which process the release run as part of 179 * a loop whose control variables are directly affected by the release 180 * (that, and not recursing on the IF_ADDR_LOCK). 181 * 182 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds 183 * to a vnet in ifp->if_vnet. 184 * 185 * SMPng: XXX We may potentially race operations on ifma_protospec. 186 * The problem is that we currently lack a clean way of taking the 187 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing, 188 * as anything which modifies ifma needs to be covered by that lock. 189 * So check for ifma_protospec being NULL before proceeding. 190 */ 191struct mtx igmp_mtx; 192 193struct mbuf *m_raopt; /* Router Alert option */ 194MALLOC_DEFINE(M_IGMP, "igmp", "igmp state"); 195 196/* 197 * VIMAGE-wide globals. 198 * 199 * The IGMPv3 timers themselves need to run per-image, however, 200 * protosw timers run globally (see tcp). 201 * An ifnet can only be in one vimage at a time, and the loopback 202 * ifnet, loif, is itself virtualized. 203 * It would otherwise be possible to seriously hose IGMP state, 204 * and create inconsistencies in upstream multicast routing, if you have 205 * multiple VIMAGEs running on the same link joining different multicast 206 * groups, UNLESS the "primary IP address" is different. This is because 207 * IGMP for IPv4 does not force link-local addresses to be used for each 208 * node, unlike MLD for IPv6. 209 * Obviously the IGMPv3 per-interface state has per-vimage granularity 210 * also as a result. 211 * 212 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection 213 * policy to control the address used by IGMP on the link. 214 */ 215#ifdef VIMAGE_GLOBALS 216int interface_timers_running; /* IGMPv3 general query response */ 217int state_change_timers_running; /* IGMPv3 state-change retransmit */ 218int current_state_timers_running; /* IGMPv1/v2 host report; 219 * IGMPv3 g/sg query response */ 220 221LIST_HEAD(, igmp_ifinfo) igi_head; 222struct igmpstat igmpstat; 223struct timeval igmp_gsrdelay; 224 225int igmp_recvifkludge; 226int igmp_sendra; 227int igmp_sendlocal; 228int igmp_v1enable; 229int igmp_v2enable; 230int igmp_legacysupp; 231int igmp_default_version; 232#endif /* VIMAGE_GLOBALS */ 233 234/* 235 * Virtualized sysctls. 236 */ 237SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_igmp, IGMPCTL_STATS, stats, 238 CTLFLAG_RW, igmpstat, igmpstat, ""); 239SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, recvifkludge, 240 CTLFLAG_RW, igmp_recvifkludge, 0, 241 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address"); 242SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendra, 243 CTLFLAG_RW, igmp_sendra, 0, 244 "Send IP Router Alert option in IGMPv2/v3 messages"); 245SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendlocal, 246 CTLFLAG_RW, igmp_sendlocal, 0, 247 "Send IGMP membership reports for 224.0.0.0/24 groups"); 248SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v1enable, 249 CTLFLAG_RW, igmp_v1enable, 0, 250 "Enable backwards compatibility with IGMPv1"); 251SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v2enable, 252 CTLFLAG_RW, igmp_v2enable, 0, 253 "Enable backwards compatibility with IGMPv2"); 254SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, legacysupp, 255 CTLFLAG_RW, igmp_legacysupp, 0, 256 "Allow v1/v2 reports to suppress v3 group responses"); 257SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, default_version, 258 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_default_version, 0, 259 sysctl_igmp_default_version, "I", 260 "Default version of IGMP to run on each interface"); 261SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, gsrdelay, 262 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_gsrdelay.tv_sec, 0, 263 sysctl_igmp_gsr, "I", 264 "Rate limit for IGMPv3 Group-and-Source queries in seconds"); 265 266/* 267 * Non-virtualized sysctls. 268 */ 269SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE, 270 sysctl_igmp_ifinfo, "Per-interface IGMPv3 state"); 271 272static __inline void 273igmp_save_context(struct mbuf *m, struct ifnet *ifp) 274{ 275 276#ifdef VIMAGE 277 m->m_pkthdr.header = ifp->if_vnet; 278#endif /* VIMAGE */ 279 m->m_pkthdr.flowid = ifp->if_index; 280} 281 282static __inline void 283igmp_scrub_context(struct mbuf *m) 284{ 285 286 m->m_pkthdr.header = NULL; 287 m->m_pkthdr.flowid = 0; 288} 289 290#ifdef KTR 291static __inline char * 292inet_ntoa_haddr(in_addr_t haddr) 293{ 294 struct in_addr ia; 295 296 ia.s_addr = htonl(haddr); 297 return (inet_ntoa(ia)); 298} 299#endif 300 301/* 302 * Restore context from a queued IGMP output chain. 303 * Return saved ifindex. 304 * 305 * VIMAGE: The assertion is there to make sure that we 306 * actually called CURVNET_SET() with what's in the mbuf chain. 307 */ 308static __inline uint32_t 309igmp_restore_context(struct mbuf *m) 310{ 311 312#ifdef notyet 313#if defined(VIMAGE) && defined(INVARIANTS) 314 KASSERT(curvnet == (m->m_pkthdr.header), 315 ("%s: called when curvnet was not restored", __func__)); 316#endif 317#endif 318 return (m->m_pkthdr.flowid); 319} 320 321/* 322 * Retrieve or set default IGMP version. 323 * 324 * VIMAGE: Assume curvnet set by caller. 325 * SMPng: NOTE: Serialized by IGMP lock. 326 */ 327static int 328sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS) 329{ 330 INIT_VNET_INET(curvnet); 331 int error; 332 int new; 333 334 error = sysctl_wire_old_buffer(req, sizeof(int)); 335 if (error) 336 return (error); 337 338 IGMP_LOCK(); 339 340 new = V_igmp_default_version; 341 342 error = sysctl_handle_int(oidp, &new, 0, req); 343 if (error || !req->newptr) 344 goto out_locked; 345 346 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) { 347 error = EINVAL; 348 goto out_locked; 349 } 350 351 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d", 352 V_igmp_default_version, new); 353 354 V_igmp_default_version = new; 355 356out_locked: 357 IGMP_UNLOCK(); 358 return (error); 359} 360 361/* 362 * Retrieve or set threshold between group-source queries in seconds. 363 * 364 * VIMAGE: Assume curvnet set by caller. 365 * SMPng: NOTE: Serialized by IGMP lock. 366 */ 367static int 368sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS) 369{ 370 INIT_VNET_INET(curvnet); 371 int error; 372 int i; 373 374 error = sysctl_wire_old_buffer(req, sizeof(int)); 375 if (error) 376 return (error); 377 378 IGMP_LOCK(); 379 380 i = V_igmp_gsrdelay.tv_sec; 381 382 error = sysctl_handle_int(oidp, &i, 0, req); 383 if (error || !req->newptr) 384 goto out_locked; 385 386 if (i < -1 || i >= 60) { 387 error = EINVAL; 388 goto out_locked; 389 } 390 391 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d", 392 V_igmp_gsrdelay.tv_sec, i); 393 V_igmp_gsrdelay.tv_sec = i; 394 395out_locked: 396 IGMP_UNLOCK(); 397 return (error); 398} 399 400/* 401 * Expose struct igmp_ifinfo to userland, keyed by ifindex. 402 * For use by ifmcstat(8). 403 * 404 * SMPng: NOTE: Does an unlocked ifindex space read. 405 * VIMAGE: Assume curvnet set by caller. The node handler itself 406 * is not directly virtualized. 407 */ 408static int 409sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS) 410{ 411 INIT_VNET_NET(curvnet); 412 INIT_VNET_INET(curvnet); 413 int *name; 414 int error; 415 u_int namelen; 416 struct ifnet *ifp; 417 struct igmp_ifinfo *igi; 418 419 name = (int *)arg1; 420 namelen = arg2; 421 422 if (req->newptr != NULL) 423 return (EPERM); 424 425 if (namelen != 1) 426 return (EINVAL); 427 428 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo)); 429 if (error) 430 return (error); 431 432 IN_MULTI_LOCK(); 433 IGMP_LOCK(); 434 435 if (name[0] <= 0 || name[0] > V_if_index) { 436 error = ENOENT; 437 goto out_locked; 438 } 439 440 error = ENOENT; 441 442 ifp = ifnet_byindex(name[0]); 443 if (ifp == NULL) 444 goto out_locked; 445 446 LIST_FOREACH(igi, &V_igi_head, igi_link) { 447 if (ifp == igi->igi_ifp) { 448 error = SYSCTL_OUT(req, igi, 449 sizeof(struct igmp_ifinfo)); 450 break; 451 } 452 } 453 454out_locked: 455 IGMP_UNLOCK(); 456 IN_MULTI_UNLOCK(); 457 return (error); 458} 459 460/* 461 * Dispatch an entire queue of pending packet chains 462 * using the netisr. 463 * VIMAGE: Assumes the vnet pointer has been set. 464 */ 465static void 466igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop) 467{ 468 struct mbuf *m; 469 470 for (;;) { 471 _IF_DEQUEUE(ifq, m); 472 if (m == NULL) 473 break; 474 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m); 475 if (loop) 476 m->m_flags |= M_IGMP_LOOP; 477 netisr_dispatch(NETISR_IGMP, m); 478 if (--limit == 0) 479 break; 480 } 481} 482 483/* 484 * Filter outgoing IGMP report state by group. 485 * 486 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1). 487 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are 488 * disabled for all groups in the 224.0.0.0/24 link-local scope. However, 489 * this may break certain IGMP snooping switches which rely on the old 490 * report behaviour. 491 * 492 * Return zero if the given group is one for which IGMP reports 493 * should be suppressed, or non-zero if reports should be issued. 494 */ 495static __inline int 496igmp_isgroupreported(const struct in_addr addr) 497{ 498 INIT_VNET_INET(curvnet); 499 500 if (in_allhosts(addr) || 501 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) 502 return (0); 503 504 return (1); 505} 506 507/* 508 * Construct a Router Alert option to use in outgoing packets. 509 */ 510static struct mbuf * 511igmp_ra_alloc(void) 512{ 513 struct mbuf *m; 514 struct ipoption *p; 515 516 MGET(m, M_DONTWAIT, MT_DATA); 517 p = mtod(m, struct ipoption *); 518 p->ipopt_dst.s_addr = INADDR_ANY; 519 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */ 520 p->ipopt_list[1] = 0x04; /* 4 bytes long */ 521 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ 522 p->ipopt_list[3] = 0x00; /* pad byte */ 523 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1]; 524 525 return (m); 526} 527 528/* 529 * Attach IGMP when PF_INET is attached to an interface. 530 */ 531struct igmp_ifinfo * 532igmp_domifattach(struct ifnet *ifp) 533{ 534 struct igmp_ifinfo *igi; 535 536 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 537 __func__, ifp, ifp->if_xname); 538 539 IGMP_LOCK(); 540 541 igi = igi_alloc_locked(ifp); 542 if (!(ifp->if_flags & IFF_MULTICAST)) 543 igi->igi_flags |= IGIF_SILENT; 544 545 IGMP_UNLOCK(); 546 547 return (igi); 548} 549 550/* 551 * VIMAGE: assume curvnet set by caller. 552 */ 553static struct igmp_ifinfo * 554igi_alloc_locked(/*const*/ struct ifnet *ifp) 555{ 556 INIT_VNET_INET(ifp->if_vnet); 557 struct igmp_ifinfo *igi; 558 559 IGMP_LOCK_ASSERT(); 560 561 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO); 562 if (igi == NULL) 563 goto out; 564 565 igi->igi_ifp = ifp; 566 igi->igi_version = V_igmp_default_version; 567 igi->igi_flags = 0; 568 igi->igi_rv = IGMP_RV_INIT; 569 igi->igi_qi = IGMP_QI_INIT; 570 igi->igi_qri = IGMP_QRI_INIT; 571 igi->igi_uri = IGMP_URI_INIT; 572 573 SLIST_INIT(&igi->igi_relinmhead); 574 575 /* 576 * Responses to general queries are subject to bounds. 577 */ 578 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); 579 580 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link); 581 582 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)", 583 ifp, ifp->if_xname); 584 585out: 586 return (igi); 587} 588 589/* 590 * Hook for ifdetach. 591 * 592 * NOTE: Some finalization tasks need to run before the protocol domain 593 * is detached, but also before the link layer does its cleanup. 594 * 595 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK(). 596 * XXX This is also bitten by unlocked ifma_protospec access. 597 */ 598void 599igmp_ifdetach(struct ifnet *ifp) 600{ 601 struct igmp_ifinfo *igi; 602 struct ifmultiaddr *ifma; 603 struct in_multi *inm, *tinm; 604 605 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp, 606 ifp->if_xname); 607 608 IGMP_LOCK(); 609 610 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 611 if (igi->igi_version == IGMP_VERSION_3) { 612 IF_ADDR_LOCK(ifp); 613 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 614 if (ifma->ifma_addr->sa_family != AF_INET || 615 ifma->ifma_protospec == NULL) 616 continue; 617#if 0 618 KASSERT(ifma->ifma_protospec != NULL, 619 ("%s: ifma_protospec is NULL", __func__)); 620#endif 621 inm = (struct in_multi *)ifma->ifma_protospec; 622 if (inm->inm_state == IGMP_LEAVING_MEMBER) { 623 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 624 inm, inm_nrele); 625 } 626 inm_clear_recorded(inm); 627 } 628 IF_ADDR_UNLOCK(ifp); 629 /* 630 * Free the in_multi reference(s) for this IGMP lifecycle. 631 */ 632 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, 633 tinm) { 634 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 635 inm_release_locked(inm); 636 } 637 } 638 639 IGMP_UNLOCK(); 640} 641 642/* 643 * Hook for domifdetach. 644 */ 645void 646igmp_domifdetach(struct ifnet *ifp) 647{ 648 struct igmp_ifinfo *igi; 649 650 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 651 __func__, ifp, ifp->if_xname); 652 653 IGMP_LOCK(); 654 655 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 656 igi_delete_locked(ifp); 657 658 IGMP_UNLOCK(); 659} 660 661static void 662igi_delete_locked(const struct ifnet *ifp) 663{ 664 INIT_VNET_INET(ifp->if_vnet); 665 struct igmp_ifinfo *igi, *tigi; 666 667 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)", 668 __func__, ifp, ifp->if_xname); 669 670 IGMP_LOCK_ASSERT(); 671 672 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) { 673 if (igi->igi_ifp == ifp) { 674 /* 675 * Free deferred General Query responses. 676 */ 677 _IF_DRAIN(&igi->igi_gq); 678 679 LIST_REMOVE(igi, igi_link); 680 681 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead), 682 ("%s: there are dangling in_multi references", 683 __func__)); 684 685 free(igi, M_IGMP); 686 return; 687 } 688 } 689 690#ifdef INVARIANTS 691 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp); 692#endif 693} 694 695/* 696 * Process a received IGMPv1 query. 697 * Return non-zero if the message should be dropped. 698 * 699 * VIMAGE: The curvnet pointer is derived from the input ifp. 700 */ 701static int 702igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, 703 const struct igmp *igmp) 704{ 705 INIT_VNET_INET(ifp->if_vnet); 706 struct ifmultiaddr *ifma; 707 struct igmp_ifinfo *igi; 708 struct in_multi *inm; 709 710 /* 711 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to 712 * 224.0.0.1. They are always treated as General Queries. 713 * igmp_group is always ignored. Do not drop it as a userland 714 * daemon may wish to see it. 715 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 716 */ 717 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) { 718 IGMPSTAT_INC(igps_rcv_badqueries); 719 return (0); 720 } 721 IGMPSTAT_INC(igps_rcv_gen_queries); 722 723 IN_MULTI_LOCK(); 724 IGMP_LOCK(); 725 726 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 727 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 728 729 if (igi->igi_flags & IGIF_LOOPBACK) { 730 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)", 731 ifp, ifp->if_xname); 732 goto out_locked; 733 } 734 735 /* 736 * Switch to IGMPv1 host compatibility mode. 737 */ 738 igmp_set_version(igi, IGMP_VERSION_1); 739 740 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname); 741 742 /* 743 * Start the timers in all of our group records 744 * for the interface on which the query arrived, 745 * except those which are already running. 746 */ 747 IF_ADDR_LOCK(ifp); 748 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 749 if (ifma->ifma_addr->sa_family != AF_INET || 750 ifma->ifma_protospec == NULL) 751 continue; 752 inm = (struct in_multi *)ifma->ifma_protospec; 753 if (inm->inm_timer != 0) 754 continue; 755 switch (inm->inm_state) { 756 case IGMP_NOT_MEMBER: 757 case IGMP_SILENT_MEMBER: 758 break; 759 case IGMP_G_QUERY_PENDING_MEMBER: 760 case IGMP_SG_QUERY_PENDING_MEMBER: 761 case IGMP_REPORTING_MEMBER: 762 case IGMP_IDLE_MEMBER: 763 case IGMP_LAZY_MEMBER: 764 case IGMP_SLEEPING_MEMBER: 765 case IGMP_AWAKENING_MEMBER: 766 inm->inm_state = IGMP_REPORTING_MEMBER; 767 inm->inm_timer = IGMP_RANDOM_DELAY( 768 IGMP_V1V2_MAX_RI * PR_FASTHZ); 769 V_current_state_timers_running = 1; 770 break; 771 case IGMP_LEAVING_MEMBER: 772 break; 773 } 774 } 775 IF_ADDR_UNLOCK(ifp); 776 777out_locked: 778 IGMP_UNLOCK(); 779 IN_MULTI_UNLOCK(); 780 781 return (0); 782} 783 784/* 785 * Process a received IGMPv2 general or group-specific query. 786 */ 787static int 788igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, 789 const struct igmp *igmp) 790{ 791 INIT_VNET_INET(ifp->if_vnet); 792 struct ifmultiaddr *ifma; 793 struct igmp_ifinfo *igi; 794 struct in_multi *inm; 795 int is_general_query; 796 uint16_t timer; 797 798 is_general_query = 0; 799 800 /* 801 * Validate address fields upfront. 802 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 803 */ 804 if (in_nullhost(igmp->igmp_group)) { 805 /* 806 * IGMPv2 General Query. 807 * If this was not sent to the all-hosts group, ignore it. 808 */ 809 if (!in_allhosts(ip->ip_dst)) 810 return (0); 811 IGMPSTAT_INC(igps_rcv_gen_queries); 812 is_general_query = 1; 813 } else { 814 /* IGMPv2 Group-Specific Query. */ 815 IGMPSTAT_INC(igps_rcv_group_queries); 816 } 817 818 IN_MULTI_LOCK(); 819 IGMP_LOCK(); 820 821 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 822 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 823 824 if (igi->igi_flags & IGIF_LOOPBACK) { 825 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)", 826 ifp, ifp->if_xname); 827 goto out_locked; 828 } 829 830 /* 831 * Ignore v2 query if in v1 Compatibility Mode. 832 */ 833 if (igi->igi_version == IGMP_VERSION_1) 834 goto out_locked; 835 836 igmp_set_version(igi, IGMP_VERSION_2); 837 838 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; 839 if (timer == 0) 840 timer = 1; 841 842 if (is_general_query) { 843 /* 844 * For each reporting group joined on this 845 * interface, kick the report timer. 846 */ 847 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)", 848 ifp, ifp->if_xname); 849 IF_ADDR_LOCK(ifp); 850 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 851 if (ifma->ifma_addr->sa_family != AF_INET || 852 ifma->ifma_protospec == NULL) 853 continue; 854 inm = (struct in_multi *)ifma->ifma_protospec; 855 igmp_v2_update_group(inm, timer); 856 } 857 IF_ADDR_UNLOCK(ifp); 858 } else { 859 /* 860 * Group-specific IGMPv2 query, we need only 861 * look up the single group to process it. 862 */ 863 inm = inm_lookup(ifp, igmp->igmp_group); 864 if (inm != NULL) { 865 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)", 866 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 867 igmp_v2_update_group(inm, timer); 868 } 869 } 870 871out_locked: 872 IGMP_UNLOCK(); 873 IN_MULTI_UNLOCK(); 874 875 return (0); 876} 877 878/* 879 * Update the report timer on a group in response to an IGMPv2 query. 880 * 881 * If we are becoming the reporting member for this group, start the timer. 882 * If we already are the reporting member for this group, and timer is 883 * below the threshold, reset it. 884 * 885 * We may be updating the group for the first time since we switched 886 * to IGMPv3. If we are, then we must clear any recorded source lists, 887 * and transition to REPORTING state; the group timer is overloaded 888 * for group and group-source query responses. 889 * 890 * Unlike IGMPv3, the delay per group should be jittered 891 * to avoid bursts of IGMPv2 reports. 892 */ 893static void 894igmp_v2_update_group(struct in_multi *inm, const int timer) 895{ 896 INIT_VNET_INET(curvnet); 897 898 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__, 899 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer); 900 901 IN_MULTI_LOCK_ASSERT(); 902 903 switch (inm->inm_state) { 904 case IGMP_NOT_MEMBER: 905 case IGMP_SILENT_MEMBER: 906 break; 907 case IGMP_REPORTING_MEMBER: 908 if (inm->inm_timer != 0 && 909 inm->inm_timer <= timer) { 910 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, " 911 "skipping.", __func__); 912 break; 913 } 914 /* FALLTHROUGH */ 915 case IGMP_SG_QUERY_PENDING_MEMBER: 916 case IGMP_G_QUERY_PENDING_MEMBER: 917 case IGMP_IDLE_MEMBER: 918 case IGMP_LAZY_MEMBER: 919 case IGMP_AWAKENING_MEMBER: 920 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__); 921 inm->inm_state = IGMP_REPORTING_MEMBER; 922 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 923 V_current_state_timers_running = 1; 924 break; 925 case IGMP_SLEEPING_MEMBER: 926 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__); 927 inm->inm_state = IGMP_AWAKENING_MEMBER; 928 break; 929 case IGMP_LEAVING_MEMBER: 930 break; 931 } 932} 933 934/* 935 * Process a received IGMPv3 general, group-specific or 936 * group-and-source-specific query. 937 * Assumes m has already been pulled up to the full IGMP message length. 938 * Return 0 if successful, otherwise an appropriate error code is returned. 939 */ 940static int 941igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, 942 /*const*/ struct igmpv3 *igmpv3) 943{ 944 INIT_VNET_INET(ifp->if_vnet); 945 struct igmp_ifinfo *igi; 946 struct in_multi *inm; 947 int is_general_query; 948 uint32_t maxresp, nsrc, qqi; 949 uint16_t timer; 950 uint8_t qrv; 951 952 is_general_query = 0; 953 954 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname); 955 956 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ 957 if (maxresp >= 128) { 958 maxresp = IGMP_MANT(igmpv3->igmp_code) << 959 (IGMP_EXP(igmpv3->igmp_code) + 3); 960 } 961 962 /* 963 * Robustness must never be less than 2 for on-wire IGMPv3. 964 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make 965 * an exception for interfaces whose IGMPv3 state changes 966 * are redirected to loopback (e.g. MANET). 967 */ 968 qrv = IGMP_QRV(igmpv3->igmp_misc); 969 if (qrv < 2) { 970 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__, 971 qrv, IGMP_RV_INIT); 972 qrv = IGMP_RV_INIT; 973 } 974 975 qqi = igmpv3->igmp_qqi; 976 if (qqi >= 128) { 977 qqi = IGMP_MANT(igmpv3->igmp_qqi) << 978 (IGMP_EXP(igmpv3->igmp_qqi) + 3); 979 } 980 981 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE; 982 if (timer == 0) 983 timer = 1; 984 985 nsrc = ntohs(igmpv3->igmp_numsrc); 986 987 /* 988 * Validate address fields and versions upfront before 989 * accepting v3 query. 990 * XXX SMPng: Unlocked access to igmpstat counters here. 991 */ 992 if (in_nullhost(igmpv3->igmp_group)) { 993 /* 994 * IGMPv3 General Query. 995 * 996 * General Queries SHOULD be directed to 224.0.0.1. 997 * A general query with a source list has undefined 998 * behaviour; discard it. 999 */ 1000 IGMPSTAT_INC(igps_rcv_gen_queries); 1001 if (!in_allhosts(ip->ip_dst) || nsrc > 0) { 1002 IGMPSTAT_INC(igps_rcv_badqueries); 1003 return (0); 1004 } 1005 is_general_query = 1; 1006 } else { 1007 /* Group or group-source specific query. */ 1008 if (nsrc == 0) 1009 IGMPSTAT_INC(igps_rcv_group_queries); 1010 else 1011 IGMPSTAT_INC(igps_rcv_gsr_queries); 1012 } 1013 1014 IN_MULTI_LOCK(); 1015 IGMP_LOCK(); 1016 1017 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 1018 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 1019 1020 if (igi->igi_flags & IGIF_LOOPBACK) { 1021 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)", 1022 ifp, ifp->if_xname); 1023 goto out_locked; 1024 } 1025 1026 /* 1027 * Discard the v3 query if we're in Compatibility Mode. 1028 * The RFC is not obviously worded that hosts need to stay in 1029 * compatibility mode until the Old Version Querier Present 1030 * timer expires. 1031 */ 1032 if (igi->igi_version != IGMP_VERSION_3) { 1033 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)", 1034 igi->igi_version, ifp, ifp->if_xname); 1035 goto out_locked; 1036 } 1037 1038 igmp_set_version(igi, IGMP_VERSION_3); 1039 igi->igi_rv = qrv; 1040 igi->igi_qi = qqi; 1041 igi->igi_qri = maxresp; 1042 1043 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi, 1044 maxresp); 1045 1046 if (is_general_query) { 1047 /* 1048 * Schedule a current-state report on this ifp for 1049 * all groups, possibly containing source lists. 1050 * If there is a pending General Query response 1051 * scheduled earlier than the selected delay, do 1052 * not schedule any other reports. 1053 * Otherwise, reset the interface timer. 1054 */ 1055 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)", 1056 ifp, ifp->if_xname); 1057 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) { 1058 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer); 1059 V_interface_timers_running = 1; 1060 } 1061 } else { 1062 /* 1063 * Group-source-specific queries are throttled on 1064 * a per-group basis to defeat denial-of-service attempts. 1065 * Queries for groups we are not a member of on this 1066 * link are simply ignored. 1067 */ 1068 inm = inm_lookup(ifp, igmpv3->igmp_group); 1069 if (inm == NULL) 1070 goto out_locked; 1071 if (nsrc > 0) { 1072 if (!ratecheck(&inm->inm_lastgsrtv, 1073 &V_igmp_gsrdelay)) { 1074 CTR1(KTR_IGMPV3, "%s: GS query throttled.", 1075 __func__); 1076 IGMPSTAT_INC(igps_drop_gsr_queries); 1077 goto out_locked; 1078 } 1079 } 1080 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)", 1081 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname); 1082 /* 1083 * If there is a pending General Query response 1084 * scheduled sooner than the selected delay, no 1085 * further report need be scheduled. 1086 * Otherwise, prepare to respond to the 1087 * group-specific or group-and-source query. 1088 */ 1089 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) 1090 igmp_input_v3_group_query(inm, igi, timer, igmpv3); 1091 } 1092 1093out_locked: 1094 IGMP_UNLOCK(); 1095 IN_MULTI_UNLOCK(); 1096 1097 return (0); 1098} 1099 1100/* 1101 * Process a recieved IGMPv3 group-specific or group-and-source-specific 1102 * query. 1103 * Return <0 if any error occured. Currently this is ignored. 1104 */ 1105static int 1106igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi, 1107 int timer, /*const*/ struct igmpv3 *igmpv3) 1108{ 1109 INIT_VNET_INET(curvnet); 1110 int retval; 1111 uint16_t nsrc; 1112 1113 IN_MULTI_LOCK_ASSERT(); 1114 IGMP_LOCK_ASSERT(); 1115 1116 retval = 0; 1117 1118 switch (inm->inm_state) { 1119 case IGMP_NOT_MEMBER: 1120 case IGMP_SILENT_MEMBER: 1121 case IGMP_SLEEPING_MEMBER: 1122 case IGMP_LAZY_MEMBER: 1123 case IGMP_AWAKENING_MEMBER: 1124 case IGMP_IDLE_MEMBER: 1125 case IGMP_LEAVING_MEMBER: 1126 return (retval); 1127 break; 1128 case IGMP_REPORTING_MEMBER: 1129 case IGMP_G_QUERY_PENDING_MEMBER: 1130 case IGMP_SG_QUERY_PENDING_MEMBER: 1131 break; 1132 } 1133 1134 nsrc = ntohs(igmpv3->igmp_numsrc); 1135 1136 /* 1137 * Deal with group-specific queries upfront. 1138 * If any group query is already pending, purge any recorded 1139 * source-list state if it exists, and schedule a query response 1140 * for this group-specific query. 1141 */ 1142 if (nsrc == 0) { 1143 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 1144 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) { 1145 inm_clear_recorded(inm); 1146 timer = min(inm->inm_timer, timer); 1147 } 1148 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER; 1149 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1150 V_current_state_timers_running = 1; 1151 return (retval); 1152 } 1153 1154 /* 1155 * Deal with the case where a group-and-source-specific query has 1156 * been received but a group-specific query is already pending. 1157 */ 1158 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) { 1159 timer = min(inm->inm_timer, timer); 1160 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1161 V_current_state_timers_running = 1; 1162 return (retval); 1163 } 1164 1165 /* 1166 * Finally, deal with the case where a group-and-source-specific 1167 * query has been received, where a response to a previous g-s-r 1168 * query exists, or none exists. 1169 * In this case, we need to parse the source-list which the Querier 1170 * has provided us with and check if we have any source list filter 1171 * entries at T1 for these sources. If we do not, there is no need 1172 * schedule a report and the query may be dropped. 1173 * If we do, we must record them and schedule a current-state 1174 * report for those sources. 1175 * FIXME: Handling source lists larger than 1 mbuf requires that 1176 * we pass the mbuf chain pointer down to this function, and use 1177 * m_getptr() to walk the chain. 1178 */ 1179 if (inm->inm_nsrc > 0) { 1180 const struct in_addr *ap; 1181 int i, nrecorded; 1182 1183 ap = (const struct in_addr *)(igmpv3 + 1); 1184 nrecorded = 0; 1185 for (i = 0; i < nsrc; i++, ap++) { 1186 retval = inm_record_source(inm, ap->s_addr); 1187 if (retval < 0) 1188 break; 1189 nrecorded += retval; 1190 } 1191 if (nrecorded > 0) { 1192 CTR1(KTR_IGMPV3, 1193 "%s: schedule response to SG query", __func__); 1194 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER; 1195 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1196 V_current_state_timers_running = 1; 1197 } 1198 } 1199 1200 return (retval); 1201} 1202 1203/* 1204 * Process a received IGMPv1 host membership report. 1205 * 1206 * NOTE: 0.0.0.0 workaround breaks const correctness. 1207 */ 1208static int 1209igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1210 /*const*/ struct igmp *igmp) 1211{ 1212 INIT_VNET_INET(ifp->if_vnet); 1213 struct in_ifaddr *ia; 1214 struct in_multi *inm; 1215 1216 IGMPSTAT_INC(igps_rcv_reports); 1217 1218 if (ifp->if_flags & IFF_LOOPBACK) 1219 return (0); 1220 1221 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) || 1222 !in_hosteq(igmp->igmp_group, ip->ip_dst))) { 1223 IGMPSTAT_INC(igps_rcv_badreports); 1224 return (EINVAL); 1225 } 1226 1227 /* 1228 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1229 * Booting clients may use the source address 0.0.0.0. Some 1230 * IGMP daemons may not know how to use IP_RECVIF to determine 1231 * the interface upon which this message was received. 1232 * Replace 0.0.0.0 with the subnet address if told to do so. 1233 */ 1234 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1235 IFP_TO_IA(ifp, ia); 1236 if (ia != NULL) { 1237 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1238 ifa_free(&ia->ia_ifa); 1239 } 1240 } 1241 1242 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)", 1243 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1244 1245 /* 1246 * IGMPv1 report suppression. 1247 * If we are a member of this group, and our membership should be 1248 * reported, stop our group timer and transition to the 'lazy' state. 1249 */ 1250 IN_MULTI_LOCK(); 1251 inm = inm_lookup(ifp, igmp->igmp_group); 1252 if (inm != NULL) { 1253 struct igmp_ifinfo *igi; 1254 1255 igi = inm->inm_igi; 1256 if (igi == NULL) { 1257 KASSERT(igi != NULL, 1258 ("%s: no igi for ifp %p", __func__, ifp)); 1259 goto out_locked; 1260 } 1261 1262 IGMPSTAT_INC(igps_rcv_ourreports); 1263 1264 /* 1265 * If we are in IGMPv3 host mode, do not allow the 1266 * other host's IGMPv1 report to suppress our reports 1267 * unless explicitly configured to do so. 1268 */ 1269 if (igi->igi_version == IGMP_VERSION_3) { 1270 if (V_igmp_legacysupp) 1271 igmp_v3_suppress_group_record(inm); 1272 goto out_locked; 1273 } 1274 1275 inm->inm_timer = 0; 1276 1277 switch (inm->inm_state) { 1278 case IGMP_NOT_MEMBER: 1279 case IGMP_SILENT_MEMBER: 1280 break; 1281 case IGMP_IDLE_MEMBER: 1282 case IGMP_LAZY_MEMBER: 1283 case IGMP_AWAKENING_MEMBER: 1284 CTR3(KTR_IGMPV3, 1285 "report suppressed for %s on ifp %p(%s)", 1286 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1287 case IGMP_SLEEPING_MEMBER: 1288 inm->inm_state = IGMP_SLEEPING_MEMBER; 1289 break; 1290 case IGMP_REPORTING_MEMBER: 1291 CTR3(KTR_IGMPV3, 1292 "report suppressed for %s on ifp %p(%s)", 1293 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1294 if (igi->igi_version == IGMP_VERSION_1) 1295 inm->inm_state = IGMP_LAZY_MEMBER; 1296 else if (igi->igi_version == IGMP_VERSION_2) 1297 inm->inm_state = IGMP_SLEEPING_MEMBER; 1298 break; 1299 case IGMP_G_QUERY_PENDING_MEMBER: 1300 case IGMP_SG_QUERY_PENDING_MEMBER: 1301 case IGMP_LEAVING_MEMBER: 1302 break; 1303 } 1304 } 1305 1306out_locked: 1307 IN_MULTI_UNLOCK(); 1308 1309 return (0); 1310} 1311 1312/* 1313 * Process a received IGMPv2 host membership report. 1314 * 1315 * NOTE: 0.0.0.0 workaround breaks const correctness. 1316 */ 1317static int 1318igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1319 /*const*/ struct igmp *igmp) 1320{ 1321 INIT_VNET_INET(ifp->if_vnet); 1322 struct in_ifaddr *ia; 1323 struct in_multi *inm; 1324 1325 /* 1326 * Make sure we don't hear our own membership report. Fast 1327 * leave requires knowing that we are the only member of a 1328 * group. 1329 */ 1330 IFP_TO_IA(ifp, ia); 1331 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) { 1332 ifa_free(&ia->ia_ifa); 1333 return (0); 1334 } 1335 1336 IGMPSTAT_INC(igps_rcv_reports); 1337 1338 if (ifp->if_flags & IFF_LOOPBACK) { 1339 if (ia != NULL) 1340 ifa_free(&ia->ia_ifa); 1341 return (0); 1342 } 1343 1344 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1345 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1346 if (ia != NULL) 1347 ifa_free(&ia->ia_ifa); 1348 IGMPSTAT_INC(igps_rcv_badreports); 1349 return (EINVAL); 1350 } 1351 1352 /* 1353 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1354 * Booting clients may use the source address 0.0.0.0. Some 1355 * IGMP daemons may not know how to use IP_RECVIF to determine 1356 * the interface upon which this message was received. 1357 * Replace 0.0.0.0 with the subnet address if told to do so. 1358 */ 1359 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1360 if (ia != NULL) 1361 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1362 } 1363 if (ia != NULL) 1364 ifa_free(&ia->ia_ifa); 1365 1366 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)", 1367 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1368 1369 /* 1370 * IGMPv2 report suppression. 1371 * If we are a member of this group, and our membership should be 1372 * reported, and our group timer is pending or about to be reset, 1373 * stop our group timer by transitioning to the 'lazy' state. 1374 */ 1375 IN_MULTI_LOCK(); 1376 inm = inm_lookup(ifp, igmp->igmp_group); 1377 if (inm != NULL) { 1378 struct igmp_ifinfo *igi; 1379 1380 igi = inm->inm_igi; 1381 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp)); 1382 1383 IGMPSTAT_INC(igps_rcv_ourreports); 1384 1385 /* 1386 * If we are in IGMPv3 host mode, do not allow the 1387 * other host's IGMPv1 report to suppress our reports 1388 * unless explicitly configured to do so. 1389 */ 1390 if (igi->igi_version == IGMP_VERSION_3) { 1391 if (V_igmp_legacysupp) 1392 igmp_v3_suppress_group_record(inm); 1393 goto out_locked; 1394 } 1395 1396 inm->inm_timer = 0; 1397 1398 switch (inm->inm_state) { 1399 case IGMP_NOT_MEMBER: 1400 case IGMP_SILENT_MEMBER: 1401 case IGMP_SLEEPING_MEMBER: 1402 break; 1403 case IGMP_REPORTING_MEMBER: 1404 case IGMP_IDLE_MEMBER: 1405 case IGMP_AWAKENING_MEMBER: 1406 CTR3(KTR_IGMPV3, 1407 "report suppressed for %s on ifp %p(%s)", 1408 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1409 case IGMP_LAZY_MEMBER: 1410 inm->inm_state = IGMP_LAZY_MEMBER; 1411 break; 1412 case IGMP_G_QUERY_PENDING_MEMBER: 1413 case IGMP_SG_QUERY_PENDING_MEMBER: 1414 case IGMP_LEAVING_MEMBER: 1415 break; 1416 } 1417 } 1418 1419out_locked: 1420 IN_MULTI_UNLOCK(); 1421 1422 return (0); 1423} 1424 1425void 1426igmp_input(struct mbuf *m, int off) 1427{ 1428 int iphlen; 1429 struct ifnet *ifp; 1430 struct igmp *igmp; 1431 struct ip *ip; 1432 int igmplen; 1433 int minlen; 1434 int queryver; 1435 1436 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off); 1437 1438 ifp = m->m_pkthdr.rcvif; 1439 INIT_VNET_INET(ifp->if_vnet); 1440 1441 IGMPSTAT_INC(igps_rcv_total); 1442 1443 ip = mtod(m, struct ip *); 1444 iphlen = off; 1445 igmplen = ip->ip_len; 1446 1447 /* 1448 * Validate lengths. 1449 */ 1450 if (igmplen < IGMP_MINLEN) { 1451 IGMPSTAT_INC(igps_rcv_tooshort); 1452 m_freem(m); 1453 return; 1454 } 1455 1456 /* 1457 * Always pullup to the minimum size for v1/v2 or v3 1458 * to amortize calls to m_pullup(). 1459 */ 1460 minlen = iphlen; 1461 if (igmplen >= IGMP_V3_QUERY_MINLEN) 1462 minlen += IGMP_V3_QUERY_MINLEN; 1463 else 1464 minlen += IGMP_MINLEN; 1465 if ((m->m_flags & M_EXT || m->m_len < minlen) && 1466 (m = m_pullup(m, minlen)) == 0) { 1467 IGMPSTAT_INC(igps_rcv_tooshort); 1468 return; 1469 } 1470 ip = mtod(m, struct ip *); 1471 1472 if (ip->ip_ttl != 1) { 1473 IGMPSTAT_INC(igps_rcv_badttl); 1474 m_freem(m); 1475 return; 1476 } 1477 1478 /* 1479 * Validate checksum. 1480 */ 1481 m->m_data += iphlen; 1482 m->m_len -= iphlen; 1483 igmp = mtod(m, struct igmp *); 1484 if (in_cksum(m, igmplen)) { 1485 IGMPSTAT_INC(igps_rcv_badsum); 1486 m_freem(m); 1487 return; 1488 } 1489 m->m_data -= iphlen; 1490 m->m_len += iphlen; 1491 1492 switch (igmp->igmp_type) { 1493 case IGMP_HOST_MEMBERSHIP_QUERY: 1494 if (igmplen == IGMP_MINLEN) { 1495 if (igmp->igmp_code == 0) 1496 queryver = IGMP_VERSION_1; 1497 else 1498 queryver = IGMP_VERSION_2; 1499 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) { 1500 queryver = IGMP_VERSION_3; 1501 } else { 1502 IGMPSTAT_INC(igps_rcv_tooshort); 1503 m_freem(m); 1504 return; 1505 } 1506 1507 switch (queryver) { 1508 case IGMP_VERSION_1: 1509 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1510 if (!V_igmp_v1enable) 1511 break; 1512 if (igmp_input_v1_query(ifp, ip, igmp) != 0) { 1513 m_freem(m); 1514 return; 1515 } 1516 break; 1517 1518 case IGMP_VERSION_2: 1519 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1520 if (!V_igmp_v2enable) 1521 break; 1522 if (igmp_input_v2_query(ifp, ip, igmp) != 0) { 1523 m_freem(m); 1524 return; 1525 } 1526 break; 1527 1528 case IGMP_VERSION_3: { 1529 struct igmpv3 *igmpv3; 1530 uint16_t igmpv3len; 1531 uint16_t srclen; 1532 int nsrc; 1533 1534 IGMPSTAT_INC(igps_rcv_v3_queries); 1535 igmpv3 = (struct igmpv3 *)igmp; 1536 /* 1537 * Validate length based on source count. 1538 */ 1539 nsrc = ntohs(igmpv3->igmp_numsrc); 1540 srclen = sizeof(struct in_addr) * nsrc; 1541 if (nsrc * sizeof(in_addr_t) > srclen) { 1542 IGMPSTAT_INC(igps_rcv_tooshort); 1543 return; 1544 } 1545 /* 1546 * m_pullup() may modify m, so pullup in 1547 * this scope. 1548 */ 1549 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN + 1550 srclen; 1551 if ((m->m_flags & M_EXT || 1552 m->m_len < igmpv3len) && 1553 (m = m_pullup(m, igmpv3len)) == NULL) { 1554 IGMPSTAT_INC(igps_rcv_tooshort); 1555 return; 1556 } 1557 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *) 1558 + iphlen); 1559 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { 1560 m_freem(m); 1561 return; 1562 } 1563 } 1564 break; 1565 } 1566 break; 1567 1568 case IGMP_v1_HOST_MEMBERSHIP_REPORT: 1569 if (!V_igmp_v1enable) 1570 break; 1571 if (igmp_input_v1_report(ifp, ip, igmp) != 0) { 1572 m_freem(m); 1573 return; 1574 } 1575 break; 1576 1577 case IGMP_v2_HOST_MEMBERSHIP_REPORT: 1578 if (!V_igmp_v2enable) 1579 break; 1580 if (!ip_checkrouteralert(m)) 1581 IGMPSTAT_INC(igps_rcv_nora); 1582 if (igmp_input_v2_report(ifp, ip, igmp) != 0) { 1583 m_freem(m); 1584 return; 1585 } 1586 break; 1587 1588 case IGMP_v3_HOST_MEMBERSHIP_REPORT: 1589 /* 1590 * Hosts do not need to process IGMPv3 membership reports, 1591 * as report suppression is no longer required. 1592 */ 1593 if (!ip_checkrouteralert(m)) 1594 IGMPSTAT_INC(igps_rcv_nora); 1595 break; 1596 1597 default: 1598 break; 1599 } 1600 1601 /* 1602 * Pass all valid IGMP packets up to any process(es) listening on a 1603 * raw IGMP socket. 1604 */ 1605 rip_input(m, off); 1606} 1607 1608 1609/* 1610 * Fast timeout handler (global). 1611 * VIMAGE: Timeout handlers are expected to service all vimages. 1612 */ 1613void 1614igmp_fasttimo(void) 1615{ 1616 VNET_ITERATOR_DECL(vnet_iter); 1617 1618 VNET_LIST_RLOCK(); 1619 VNET_FOREACH(vnet_iter) { 1620 CURVNET_SET(vnet_iter); 1621 igmp_fasttimo_vnet(); 1622 CURVNET_RESTORE(); 1623 } 1624 VNET_LIST_RUNLOCK(); 1625} 1626 1627/* 1628 * Fast timeout handler (per-vnet). 1629 * Sends are shuffled off to a netisr to deal with Giant. 1630 * 1631 * VIMAGE: Assume caller has set up our curvnet. 1632 */ 1633static void 1634igmp_fasttimo_vnet(void) 1635{ 1636 INIT_VNET_INET(curvnet); 1637 struct ifqueue scq; /* State-change packets */ 1638 struct ifqueue qrq; /* Query response packets */ 1639 struct ifnet *ifp; 1640 struct igmp_ifinfo *igi; 1641 struct ifmultiaddr *ifma, *tifma; 1642 struct in_multi *inm; 1643 int loop, uri_fasthz; 1644 1645 loop = 0; 1646 uri_fasthz = 0; 1647 1648 /* 1649 * Quick check to see if any work needs to be done, in order to 1650 * minimize the overhead of fasttimo processing. 1651 * SMPng: XXX Unlocked reads. 1652 */ 1653 if (!V_current_state_timers_running && 1654 !V_interface_timers_running && 1655 !V_state_change_timers_running) 1656 return; 1657 1658 IN_MULTI_LOCK(); 1659 IGMP_LOCK(); 1660 1661 /* 1662 * IGMPv3 General Query response timer processing. 1663 */ 1664 if (V_interface_timers_running) { 1665 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__); 1666 1667 V_interface_timers_running = 0; 1668 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1669 if (igi->igi_v3_timer == 0) { 1670 /* Do nothing. */ 1671 } else if (--igi->igi_v3_timer == 0) { 1672 igmp_v3_dispatch_general_query(igi); 1673 } else { 1674 V_interface_timers_running = 1; 1675 } 1676 } 1677 } 1678 1679 if (!V_current_state_timers_running && 1680 !V_state_change_timers_running) 1681 goto out_locked; 1682 1683 V_current_state_timers_running = 0; 1684 V_state_change_timers_running = 0; 1685 1686 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__); 1687 1688 /* 1689 * IGMPv1/v2/v3 host report and state-change timer processing. 1690 * Note: Processing a v3 group timer may remove a node. 1691 */ 1692 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1693 ifp = igi->igi_ifp; 1694 1695 if (igi->igi_version == IGMP_VERSION_3) { 1696 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 1697 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * 1698 PR_FASTHZ); 1699 1700 memset(&qrq, 0, sizeof(struct ifqueue)); 1701 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS); 1702 1703 memset(&scq, 0, sizeof(struct ifqueue)); 1704 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); 1705 } 1706 1707 IF_ADDR_LOCK(ifp); 1708 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, 1709 tifma) { 1710 if (ifma->ifma_addr->sa_family != AF_INET || 1711 ifma->ifma_protospec == NULL) 1712 continue; 1713 inm = (struct in_multi *)ifma->ifma_protospec; 1714 switch (igi->igi_version) { 1715 case IGMP_VERSION_1: 1716 case IGMP_VERSION_2: 1717 igmp_v1v2_process_group_timer(inm, 1718 igi->igi_version); 1719 break; 1720 case IGMP_VERSION_3: 1721 igmp_v3_process_group_timers(igi, &qrq, 1722 &scq, inm, uri_fasthz); 1723 break; 1724 } 1725 } 1726 IF_ADDR_UNLOCK(ifp); 1727 1728 if (igi->igi_version == IGMP_VERSION_3) { 1729 struct in_multi *tinm; 1730 1731 igmp_dispatch_queue(&qrq, 0, loop); 1732 igmp_dispatch_queue(&scq, 0, loop); 1733 1734 /* 1735 * Free the in_multi reference(s) for this 1736 * IGMP lifecycle. 1737 */ 1738 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, 1739 inm_nrele, tinm) { 1740 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, 1741 inm_nrele); 1742 inm_release_locked(inm); 1743 } 1744 } 1745 } 1746 1747out_locked: 1748 IGMP_UNLOCK(); 1749 IN_MULTI_UNLOCK(); 1750} 1751 1752/* 1753 * Update host report group timer for IGMPv1/v2. 1754 * Will update the global pending timer flags. 1755 */ 1756static void 1757igmp_v1v2_process_group_timer(struct in_multi *inm, const int version) 1758{ 1759 INIT_VNET_INET(curvnet); 1760 int report_timer_expired; 1761 1762 IN_MULTI_LOCK_ASSERT(); 1763 IGMP_LOCK_ASSERT(); 1764 1765 if (inm->inm_timer == 0) { 1766 report_timer_expired = 0; 1767 } else if (--inm->inm_timer == 0) { 1768 report_timer_expired = 1; 1769 } else { 1770 V_current_state_timers_running = 1; 1771 return; 1772 } 1773 1774 switch (inm->inm_state) { 1775 case IGMP_NOT_MEMBER: 1776 case IGMP_SILENT_MEMBER: 1777 case IGMP_IDLE_MEMBER: 1778 case IGMP_LAZY_MEMBER: 1779 case IGMP_SLEEPING_MEMBER: 1780 case IGMP_AWAKENING_MEMBER: 1781 break; 1782 case IGMP_REPORTING_MEMBER: 1783 if (report_timer_expired) { 1784 inm->inm_state = IGMP_IDLE_MEMBER; 1785 (void)igmp_v1v2_queue_report(inm, 1786 (version == IGMP_VERSION_2) ? 1787 IGMP_v2_HOST_MEMBERSHIP_REPORT : 1788 IGMP_v1_HOST_MEMBERSHIP_REPORT); 1789 } 1790 break; 1791 case IGMP_G_QUERY_PENDING_MEMBER: 1792 case IGMP_SG_QUERY_PENDING_MEMBER: 1793 case IGMP_LEAVING_MEMBER: 1794 break; 1795 } 1796} 1797 1798/* 1799 * Update a group's timers for IGMPv3. 1800 * Will update the global pending timer flags. 1801 * Note: Unlocked read from igi. 1802 */ 1803static void 1804igmp_v3_process_group_timers(struct igmp_ifinfo *igi, 1805 struct ifqueue *qrq, struct ifqueue *scq, 1806 struct in_multi *inm, const int uri_fasthz) 1807{ 1808 INIT_VNET_INET(curvnet); 1809 int query_response_timer_expired; 1810 int state_change_retransmit_timer_expired; 1811 1812 IN_MULTI_LOCK_ASSERT(); 1813 IGMP_LOCK_ASSERT(); 1814 1815 query_response_timer_expired = 0; 1816 state_change_retransmit_timer_expired = 0; 1817 1818 /* 1819 * During a transition from v1/v2 compatibility mode back to v3, 1820 * a group record in REPORTING state may still have its group 1821 * timer active. This is a no-op in this function; it is easier 1822 * to deal with it here than to complicate the slow-timeout path. 1823 */ 1824 if (inm->inm_timer == 0) { 1825 query_response_timer_expired = 0; 1826 } else if (--inm->inm_timer == 0) { 1827 query_response_timer_expired = 1; 1828 } else { 1829 V_current_state_timers_running = 1; 1830 } 1831 1832 if (inm->inm_sctimer == 0) { 1833 state_change_retransmit_timer_expired = 0; 1834 } else if (--inm->inm_sctimer == 0) { 1835 state_change_retransmit_timer_expired = 1; 1836 } else { 1837 V_state_change_timers_running = 1; 1838 } 1839 1840 /* We are in fasttimo, so be quick about it. */ 1841 if (!state_change_retransmit_timer_expired && 1842 !query_response_timer_expired) 1843 return; 1844 1845 switch (inm->inm_state) { 1846 case IGMP_NOT_MEMBER: 1847 case IGMP_SILENT_MEMBER: 1848 case IGMP_SLEEPING_MEMBER: 1849 case IGMP_LAZY_MEMBER: 1850 case IGMP_AWAKENING_MEMBER: 1851 case IGMP_IDLE_MEMBER: 1852 break; 1853 case IGMP_G_QUERY_PENDING_MEMBER: 1854 case IGMP_SG_QUERY_PENDING_MEMBER: 1855 /* 1856 * Respond to a previously pending Group-Specific 1857 * or Group-and-Source-Specific query by enqueueing 1858 * the appropriate Current-State report for 1859 * immediate transmission. 1860 */ 1861 if (query_response_timer_expired) { 1862 int retval; 1863 1864 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1, 1865 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)); 1866 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 1867 __func__, retval); 1868 inm->inm_state = IGMP_REPORTING_MEMBER; 1869 /* XXX Clear recorded sources for next time. */ 1870 inm_clear_recorded(inm); 1871 } 1872 /* FALLTHROUGH */ 1873 case IGMP_REPORTING_MEMBER: 1874 case IGMP_LEAVING_MEMBER: 1875 if (state_change_retransmit_timer_expired) { 1876 /* 1877 * State-change retransmission timer fired. 1878 * If there are any further pending retransmissions, 1879 * set the global pending state-change flag, and 1880 * reset the timer. 1881 */ 1882 if (--inm->inm_scrv > 0) { 1883 inm->inm_sctimer = uri_fasthz; 1884 V_state_change_timers_running = 1; 1885 } 1886 /* 1887 * Retransmit the previously computed state-change 1888 * report. If there are no further pending 1889 * retransmissions, the mbuf queue will be consumed. 1890 * Update T0 state to T1 as we have now sent 1891 * a state-change. 1892 */ 1893 (void)igmp_v3_merge_state_changes(inm, scq); 1894 1895 inm_commit(inm); 1896 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 1897 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 1898 1899 /* 1900 * If we are leaving the group for good, make sure 1901 * we release IGMP's reference to it. 1902 * This release must be deferred using a SLIST, 1903 * as we are called from a loop which traverses 1904 * the in_ifmultiaddr TAILQ. 1905 */ 1906 if (inm->inm_state == IGMP_LEAVING_MEMBER && 1907 inm->inm_scrv == 0) { 1908 inm->inm_state = IGMP_NOT_MEMBER; 1909 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 1910 inm, inm_nrele); 1911 } 1912 } 1913 break; 1914 } 1915} 1916 1917 1918/* 1919 * Suppress a group's pending response to a group or source/group query. 1920 * 1921 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency. 1922 * Do NOT update ST1/ST0 as this operation merely suppresses 1923 * the currently pending group record. 1924 * Do NOT suppress the response to a general query. It is possible but 1925 * it would require adding another state or flag. 1926 */ 1927static void 1928igmp_v3_suppress_group_record(struct in_multi *inm) 1929{ 1930 1931 IN_MULTI_LOCK_ASSERT(); 1932 1933 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3, 1934 ("%s: not IGMPv3 mode on link", __func__)); 1935 1936 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER || 1937 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) 1938 return; 1939 1940 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 1941 inm_clear_recorded(inm); 1942 1943 inm->inm_timer = 0; 1944 inm->inm_state = IGMP_REPORTING_MEMBER; 1945} 1946 1947/* 1948 * Switch to a different IGMP version on the given interface, 1949 * as per Section 7.2.1. 1950 */ 1951static void 1952igmp_set_version(struct igmp_ifinfo *igi, const int version) 1953{ 1954 int old_version_timer; 1955 1956 IGMP_LOCK_ASSERT(); 1957 1958 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__, 1959 version, igi->igi_ifp, igi->igi_ifp->if_xname); 1960 1961 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) { 1962 /* 1963 * Compute the "Older Version Querier Present" timer as per 1964 * Section 8.12. 1965 */ 1966 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri; 1967 old_version_timer *= PR_SLOWHZ; 1968 1969 if (version == IGMP_VERSION_1) { 1970 igi->igi_v1_timer = old_version_timer; 1971 igi->igi_v2_timer = 0; 1972 } else if (version == IGMP_VERSION_2) { 1973 igi->igi_v1_timer = 0; 1974 igi->igi_v2_timer = old_version_timer; 1975 } 1976 } 1977 1978 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 1979 if (igi->igi_version != IGMP_VERSION_2) { 1980 igi->igi_version = IGMP_VERSION_2; 1981 igmp_v3_cancel_link_timers(igi); 1982 } 1983 } else if (igi->igi_v1_timer > 0) { 1984 if (igi->igi_version != IGMP_VERSION_1) { 1985 igi->igi_version = IGMP_VERSION_1; 1986 igmp_v3_cancel_link_timers(igi); 1987 } 1988 } 1989} 1990 1991/* 1992 * Cancel pending IGMPv3 timers for the given link and all groups 1993 * joined on it; state-change, general-query, and group-query timers. 1994 * 1995 * Only ever called on a transition from v3 to Compatibility mode. Kill 1996 * the timers stone dead (this may be expensive for large N groups), they 1997 * will be restarted if Compatibility Mode deems that they must be due to 1998 * query processing. 1999 */ 2000static void 2001igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) 2002{ 2003 struct ifmultiaddr *ifma; 2004 struct ifnet *ifp; 2005 struct in_multi *inm; 2006 2007 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__, 2008 igi->igi_ifp, igi->igi_ifp->if_xname); 2009 2010 IN_MULTI_LOCK_ASSERT(); 2011 IGMP_LOCK_ASSERT(); 2012 2013 /* 2014 * Stop the v3 General Query Response on this link stone dead. 2015 * If fasttimo is woken up due to V_interface_timers_running, 2016 * the flag will be cleared if there are no pending link timers. 2017 */ 2018 igi->igi_v3_timer = 0; 2019 2020 /* 2021 * Now clear the current-state and state-change report timers 2022 * for all memberships scoped to this link. 2023 */ 2024 ifp = igi->igi_ifp; 2025 IF_ADDR_LOCK(ifp); 2026 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2027 if (ifma->ifma_addr->sa_family != AF_INET || 2028 ifma->ifma_protospec == NULL) 2029 continue; 2030 inm = (struct in_multi *)ifma->ifma_protospec; 2031 switch (inm->inm_state) { 2032 case IGMP_NOT_MEMBER: 2033 case IGMP_SILENT_MEMBER: 2034 case IGMP_IDLE_MEMBER: 2035 case IGMP_LAZY_MEMBER: 2036 case IGMP_SLEEPING_MEMBER: 2037 case IGMP_AWAKENING_MEMBER: 2038 /* 2039 * These states are either not relevant in v3 mode, 2040 * or are unreported. Do nothing. 2041 */ 2042 break; 2043 case IGMP_LEAVING_MEMBER: 2044 /* 2045 * If we are leaving the group and switching to 2046 * compatibility mode, we need to release the final 2047 * reference held for issuing the INCLUDE {}, and 2048 * transition to REPORTING to ensure the host leave 2049 * message is sent upstream to the old querier -- 2050 * transition to NOT would lose the leave and race. 2051 * 2052 * SMPNG: Must drop and re-acquire IF_ADDR_LOCK 2053 * around inm_release_locked(), as it is not 2054 * a recursive mutex. 2055 */ 2056 IF_ADDR_UNLOCK(ifp); 2057 inm_release_locked(inm); 2058 IF_ADDR_LOCK(ifp); 2059 /* FALLTHROUGH */ 2060 case IGMP_G_QUERY_PENDING_MEMBER: 2061 case IGMP_SG_QUERY_PENDING_MEMBER: 2062 inm_clear_recorded(inm); 2063 /* FALLTHROUGH */ 2064 case IGMP_REPORTING_MEMBER: 2065 inm->inm_state = IGMP_REPORTING_MEMBER; 2066 break; 2067 } 2068 /* 2069 * Always clear state-change and group report timers. 2070 * Free any pending IGMPv3 state-change records. 2071 */ 2072 inm->inm_sctimer = 0; 2073 inm->inm_timer = 0; 2074 _IF_DRAIN(&inm->inm_scq); 2075 } 2076 IF_ADDR_UNLOCK(ifp); 2077} 2078 2079/* 2080 * Update the Older Version Querier Present timers for a link. 2081 * See Section 7.2.1 of RFC 3376. 2082 */ 2083static void 2084igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi) 2085{ 2086 INIT_VNET_INET(curvnet); 2087 2088 IGMP_LOCK_ASSERT(); 2089 2090 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) { 2091 /* 2092 * IGMPv1 and IGMPv2 Querier Present timers expired. 2093 * 2094 * Revert to IGMPv3. 2095 */ 2096 if (igi->igi_version != IGMP_VERSION_3) { 2097 CTR5(KTR_IGMPV3, 2098 "%s: transition from v%d -> v%d on %p(%s)", 2099 __func__, igi->igi_version, IGMP_VERSION_3, 2100 igi->igi_ifp, igi->igi_ifp->if_xname); 2101 igi->igi_version = IGMP_VERSION_3; 2102 } 2103 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 2104 /* 2105 * IGMPv1 Querier Present timer expired, 2106 * IGMPv2 Querier Present timer running. 2107 * If IGMPv2 was disabled since last timeout, 2108 * revert to IGMPv3. 2109 * If IGMPv2 is enabled, revert to IGMPv2. 2110 */ 2111 if (!V_igmp_v2enable) { 2112 CTR5(KTR_IGMPV3, 2113 "%s: transition from v%d -> v%d on %p(%s)", 2114 __func__, igi->igi_version, IGMP_VERSION_3, 2115 igi->igi_ifp, igi->igi_ifp->if_xname); 2116 igi->igi_v2_timer = 0; 2117 igi->igi_version = IGMP_VERSION_3; 2118 } else { 2119 --igi->igi_v2_timer; 2120 if (igi->igi_version != IGMP_VERSION_2) { 2121 CTR5(KTR_IGMPV3, 2122 "%s: transition from v%d -> v%d on %p(%s)", 2123 __func__, igi->igi_version, IGMP_VERSION_2, 2124 igi->igi_ifp, igi->igi_ifp->if_xname); 2125 igi->igi_version = IGMP_VERSION_2; 2126 } 2127 } 2128 } else if (igi->igi_v1_timer > 0) { 2129 /* 2130 * IGMPv1 Querier Present timer running. 2131 * Stop IGMPv2 timer if running. 2132 * 2133 * If IGMPv1 was disabled since last timeout, 2134 * revert to IGMPv3. 2135 * If IGMPv1 is enabled, reset IGMPv2 timer if running. 2136 */ 2137 if (!V_igmp_v1enable) { 2138 CTR5(KTR_IGMPV3, 2139 "%s: transition from v%d -> v%d on %p(%s)", 2140 __func__, igi->igi_version, IGMP_VERSION_3, 2141 igi->igi_ifp, igi->igi_ifp->if_xname); 2142 igi->igi_v1_timer = 0; 2143 igi->igi_version = IGMP_VERSION_3; 2144 } else { 2145 --igi->igi_v1_timer; 2146 } 2147 if (igi->igi_v2_timer > 0) { 2148 CTR3(KTR_IGMPV3, 2149 "%s: cancel v2 timer on %p(%s)", 2150 __func__, igi->igi_ifp, igi->igi_ifp->if_xname); 2151 igi->igi_v2_timer = 0; 2152 } 2153 } 2154} 2155 2156/* 2157 * Global slowtimo handler. 2158 * VIMAGE: Timeout handlers are expected to service all vimages. 2159 */ 2160void 2161igmp_slowtimo(void) 2162{ 2163 VNET_ITERATOR_DECL(vnet_iter); 2164 2165 VNET_LIST_RLOCK(); 2166 VNET_FOREACH(vnet_iter) { 2167 CURVNET_SET(vnet_iter); 2168 igmp_slowtimo_vnet(); 2169 CURVNET_RESTORE(); 2170 } 2171 VNET_LIST_RUNLOCK(); 2172} 2173 2174/* 2175 * Per-vnet slowtimo handler. 2176 */ 2177static void 2178igmp_slowtimo_vnet(void) 2179{ 2180 INIT_VNET_INET(curvnet); 2181 struct igmp_ifinfo *igi; 2182 2183 IGMP_LOCK(); 2184 2185 LIST_FOREACH(igi, &V_igi_head, igi_link) { 2186 igmp_v1v2_process_querier_timers(igi); 2187 } 2188 2189 IGMP_UNLOCK(); 2190} 2191 2192/* 2193 * Dispatch an IGMPv1/v2 host report or leave message. 2194 * These are always small enough to fit inside a single mbuf. 2195 */ 2196static int 2197igmp_v1v2_queue_report(struct in_multi *inm, const int type) 2198{ 2199 struct ifnet *ifp; 2200 struct igmp *igmp; 2201 struct ip *ip; 2202 struct mbuf *m; 2203 2204 IN_MULTI_LOCK_ASSERT(); 2205 IGMP_LOCK_ASSERT(); 2206 2207 ifp = inm->inm_ifp; 2208 2209 MGETHDR(m, M_DONTWAIT, MT_DATA); 2210 if (m == NULL) 2211 return (ENOMEM); 2212 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp)); 2213 2214 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp); 2215 2216 m->m_data += sizeof(struct ip); 2217 m->m_len = sizeof(struct igmp); 2218 2219 igmp = mtod(m, struct igmp *); 2220 igmp->igmp_type = type; 2221 igmp->igmp_code = 0; 2222 igmp->igmp_group = inm->inm_addr; 2223 igmp->igmp_cksum = 0; 2224 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp)); 2225 2226 m->m_data -= sizeof(struct ip); 2227 m->m_len += sizeof(struct ip); 2228 2229 ip = mtod(m, struct ip *); 2230 ip->ip_tos = 0; 2231 ip->ip_len = sizeof(struct ip) + sizeof(struct igmp); 2232 ip->ip_off = 0; 2233 ip->ip_p = IPPROTO_IGMP; 2234 ip->ip_src.s_addr = INADDR_ANY; 2235 2236 if (type == IGMP_HOST_LEAVE_MESSAGE) 2237 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP); 2238 else 2239 ip->ip_dst = inm->inm_addr; 2240 2241 igmp_save_context(m, ifp); 2242 2243 m->m_flags |= M_IGMPV2; 2244 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) 2245 m->m_flags |= M_IGMP_LOOP; 2246 2247 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m); 2248 netisr_dispatch(NETISR_IGMP, m); 2249 2250 return (0); 2251} 2252 2253/* 2254 * Process a state change from the upper layer for the given IPv4 group. 2255 * 2256 * Each socket holds a reference on the in_multi in its own ip_moptions. 2257 * The socket layer will have made the necessary updates to.the group 2258 * state, it is now up to IGMP to issue a state change report if there 2259 * has been any change between T0 (when the last state-change was issued) 2260 * and T1 (now). 2261 * 2262 * We use the IGMPv3 state machine at group level. The IGMP module 2263 * however makes the decision as to which IGMP protocol version to speak. 2264 * A state change *from* INCLUDE {} always means an initial join. 2265 * A state change *to* INCLUDE {} always means a final leave. 2266 * 2267 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can 2268 * save ourselves a bunch of work; any exclusive mode groups need not 2269 * compute source filter lists. 2270 * 2271 * VIMAGE: curvnet should have been set by caller, as this routine 2272 * is called from the socket option handlers. 2273 */ 2274int 2275igmp_change_state(struct in_multi *inm) 2276{ 2277 struct igmp_ifinfo *igi; 2278 struct ifnet *ifp; 2279 int error; 2280 2281 IN_MULTI_LOCK_ASSERT(); 2282 2283 error = 0; 2284 2285 /* 2286 * Try to detect if the upper layer just asked us to change state 2287 * for an interface which has now gone away. 2288 */ 2289 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 2290 ifp = inm->inm_ifma->ifma_ifp; 2291 if (ifp != NULL) { 2292 /* 2293 * Sanity check that netinet's notion of ifp is the 2294 * same as net's. 2295 */ 2296 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 2297 } 2298 2299 IGMP_LOCK(); 2300 2301 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 2302 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 2303 2304 /* 2305 * If we detect a state transition to or from MCAST_UNDEFINED 2306 * for this group, then we are starting or finishing an IGMP 2307 * life cycle for this group. 2308 */ 2309 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) { 2310 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__, 2311 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode); 2312 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) { 2313 CTR1(KTR_IGMPV3, "%s: initial join", __func__); 2314 error = igmp_initial_join(inm, igi); 2315 goto out_locked; 2316 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) { 2317 CTR1(KTR_IGMPV3, "%s: final leave", __func__); 2318 igmp_final_leave(inm, igi); 2319 goto out_locked; 2320 } 2321 } else { 2322 CTR1(KTR_IGMPV3, "%s: filter set change", __func__); 2323 } 2324 2325 error = igmp_handle_state_change(inm, igi); 2326 2327out_locked: 2328 IGMP_UNLOCK(); 2329 return (error); 2330} 2331 2332/* 2333 * Perform the initial join for an IGMP group. 2334 * 2335 * When joining a group: 2336 * If the group should have its IGMP traffic suppressed, do nothing. 2337 * IGMPv1 starts sending IGMPv1 host membership reports. 2338 * IGMPv2 starts sending IGMPv2 host membership reports. 2339 * IGMPv3 will schedule an IGMPv3 state-change report containing the 2340 * initial state of the membership. 2341 */ 2342static int 2343igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi) 2344{ 2345 INIT_VNET_INET(curvnet); 2346 struct ifnet *ifp; 2347 struct ifqueue *ifq; 2348 int error, retval, syncstates; 2349 2350 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)", 2351 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2352 inm->inm_ifp->if_xname); 2353 2354 error = 0; 2355 syncstates = 1; 2356 2357 ifp = inm->inm_ifp; 2358 2359 IN_MULTI_LOCK_ASSERT(); 2360 IGMP_LOCK_ASSERT(); 2361 2362 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2363 2364 /* 2365 * Groups joined on loopback or marked as 'not reported', 2366 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and 2367 * are never reported in any IGMP protocol exchanges. 2368 * All other groups enter the appropriate IGMP state machine 2369 * for the version in use on this link. 2370 * A link marked as IGIF_SILENT causes IGMP to be completely 2371 * disabled for the link. 2372 */ 2373 if ((ifp->if_flags & IFF_LOOPBACK) || 2374 (igi->igi_flags & IGIF_SILENT) || 2375 !igmp_isgroupreported(inm->inm_addr)) { 2376 CTR1(KTR_IGMPV3, 2377"%s: not kicking state machine for silent group", __func__); 2378 inm->inm_state = IGMP_SILENT_MEMBER; 2379 inm->inm_timer = 0; 2380 } else { 2381 /* 2382 * Deal with overlapping in_multi lifecycle. 2383 * If this group was LEAVING, then make sure 2384 * we drop the reference we picked up to keep the 2385 * group around for the final INCLUDE {} enqueue. 2386 */ 2387 if (igi->igi_version == IGMP_VERSION_3 && 2388 inm->inm_state == IGMP_LEAVING_MEMBER) 2389 inm_release_locked(inm); 2390 2391 inm->inm_state = IGMP_REPORTING_MEMBER; 2392 2393 switch (igi->igi_version) { 2394 case IGMP_VERSION_1: 2395 case IGMP_VERSION_2: 2396 inm->inm_state = IGMP_IDLE_MEMBER; 2397 error = igmp_v1v2_queue_report(inm, 2398 (igi->igi_version == IGMP_VERSION_2) ? 2399 IGMP_v2_HOST_MEMBERSHIP_REPORT : 2400 IGMP_v1_HOST_MEMBERSHIP_REPORT); 2401 if (error == 0) { 2402 inm->inm_timer = IGMP_RANDOM_DELAY( 2403 IGMP_V1V2_MAX_RI * PR_FASTHZ); 2404 V_current_state_timers_running = 1; 2405 } 2406 break; 2407 2408 case IGMP_VERSION_3: 2409 /* 2410 * Defer update of T0 to T1, until the first copy 2411 * of the state change has been transmitted. 2412 */ 2413 syncstates = 0; 2414 2415 /* 2416 * Immediately enqueue a State-Change Report for 2417 * this interface, freeing any previous reports. 2418 * Don't kick the timers if there is nothing to do, 2419 * or if an error occurred. 2420 */ 2421 ifq = &inm->inm_scq; 2422 _IF_DRAIN(ifq); 2423 retval = igmp_v3_enqueue_group_record(ifq, inm, 1, 2424 0, 0); 2425 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 2426 __func__, retval); 2427 if (retval <= 0) { 2428 error = retval * -1; 2429 break; 2430 } 2431 2432 /* 2433 * Schedule transmission of pending state-change 2434 * report up to RV times for this link. The timer 2435 * will fire at the next igmp_fasttimo (~200ms), 2436 * giving us an opportunity to merge the reports. 2437 */ 2438 if (igi->igi_flags & IGIF_LOOPBACK) { 2439 inm->inm_scrv = 1; 2440 } else { 2441 KASSERT(igi->igi_rv > 1, 2442 ("%s: invalid robustness %d", __func__, 2443 igi->igi_rv)); 2444 inm->inm_scrv = igi->igi_rv; 2445 } 2446 inm->inm_sctimer = 1; 2447 V_state_change_timers_running = 1; 2448 2449 error = 0; 2450 break; 2451 } 2452 } 2453 2454 /* 2455 * Only update the T0 state if state change is atomic, 2456 * i.e. we don't need to wait for a timer to fire before we 2457 * can consider the state change to have been communicated. 2458 */ 2459 if (syncstates) { 2460 inm_commit(inm); 2461 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2462 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2463 } 2464 2465 return (error); 2466} 2467 2468/* 2469 * Issue an intermediate state change during the IGMP life-cycle. 2470 */ 2471static int 2472igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi) 2473{ 2474 INIT_VNET_INET(curvnet); 2475 struct ifnet *ifp; 2476 int retval; 2477 2478 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)", 2479 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2480 inm->inm_ifp->if_xname); 2481 2482 ifp = inm->inm_ifp; 2483 2484 IN_MULTI_LOCK_ASSERT(); 2485 IGMP_LOCK_ASSERT(); 2486 2487 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2488 2489 if ((ifp->if_flags & IFF_LOOPBACK) || 2490 (igi->igi_flags & IGIF_SILENT) || 2491 !igmp_isgroupreported(inm->inm_addr) || 2492 (igi->igi_version != IGMP_VERSION_3)) { 2493 if (!igmp_isgroupreported(inm->inm_addr)) { 2494 CTR1(KTR_IGMPV3, 2495"%s: not kicking state machine for silent group", __func__); 2496 } 2497 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__); 2498 inm_commit(inm); 2499 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2500 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2501 return (0); 2502 } 2503 2504 _IF_DRAIN(&inm->inm_scq); 2505 2506 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0); 2507 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); 2508 if (retval <= 0) 2509 return (-retval); 2510 2511 /* 2512 * If record(s) were enqueued, start the state-change 2513 * report timer for this group. 2514 */ 2515 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv); 2516 inm->inm_sctimer = 1; 2517 V_state_change_timers_running = 1; 2518 2519 return (0); 2520} 2521 2522/* 2523 * Perform the final leave for an IGMP group. 2524 * 2525 * When leaving a group: 2526 * IGMPv1 does nothing. 2527 * IGMPv2 sends a host leave message, if and only if we are the reporter. 2528 * IGMPv3 enqueues a state-change report containing a transition 2529 * to INCLUDE {} for immediate transmission. 2530 */ 2531static void 2532igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi) 2533{ 2534 INIT_VNET_INET(curvnet); 2535 int syncstates; 2536 2537 syncstates = 1; 2538 2539 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)", 2540 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2541 inm->inm_ifp->if_xname); 2542 2543 IN_MULTI_LOCK_ASSERT(); 2544 IGMP_LOCK_ASSERT(); 2545 2546 switch (inm->inm_state) { 2547 case IGMP_NOT_MEMBER: 2548 case IGMP_SILENT_MEMBER: 2549 case IGMP_LEAVING_MEMBER: 2550 /* Already leaving or left; do nothing. */ 2551 CTR1(KTR_IGMPV3, 2552"%s: not kicking state machine for silent group", __func__); 2553 break; 2554 case IGMP_REPORTING_MEMBER: 2555 case IGMP_IDLE_MEMBER: 2556 case IGMP_G_QUERY_PENDING_MEMBER: 2557 case IGMP_SG_QUERY_PENDING_MEMBER: 2558 if (igi->igi_version == IGMP_VERSION_2) { 2559#ifdef INVARIANTS 2560 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 2561 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 2562 panic("%s: IGMPv3 state reached, not IGMPv3 mode", 2563 __func__); 2564#endif 2565 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE); 2566 inm->inm_state = IGMP_NOT_MEMBER; 2567 } else if (igi->igi_version == IGMP_VERSION_3) { 2568 /* 2569 * Stop group timer and all pending reports. 2570 * Immediately enqueue a state-change report 2571 * TO_IN {} to be sent on the next fast timeout, 2572 * giving us an opportunity to merge reports. 2573 */ 2574 _IF_DRAIN(&inm->inm_scq); 2575 inm->inm_timer = 0; 2576 if (igi->igi_flags & IGIF_LOOPBACK) { 2577 inm->inm_scrv = 1; 2578 } else { 2579 inm->inm_scrv = igi->igi_rv; 2580 } 2581 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d " 2582 "pending retransmissions.", __func__, 2583 inet_ntoa(inm->inm_addr), 2584 inm->inm_ifp->if_xname, inm->inm_scrv); 2585 if (inm->inm_scrv == 0) { 2586 inm->inm_state = IGMP_NOT_MEMBER; 2587 inm->inm_sctimer = 0; 2588 } else { 2589 int retval; 2590 2591 inm_acquire_locked(inm); 2592 2593 retval = igmp_v3_enqueue_group_record( 2594 &inm->inm_scq, inm, 1, 0, 0); 2595 KASSERT(retval != 0, 2596 ("%s: enqueue record = %d", __func__, 2597 retval)); 2598 2599 inm->inm_state = IGMP_LEAVING_MEMBER; 2600 inm->inm_sctimer = 1; 2601 V_state_change_timers_running = 1; 2602 syncstates = 0; 2603 } 2604 break; 2605 } 2606 break; 2607 case IGMP_LAZY_MEMBER: 2608 case IGMP_SLEEPING_MEMBER: 2609 case IGMP_AWAKENING_MEMBER: 2610 /* Our reports are suppressed; do nothing. */ 2611 break; 2612 } 2613 2614 if (syncstates) { 2615 inm_commit(inm); 2616 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2617 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2618 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 2619 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s", 2620 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2621 } 2622} 2623 2624/* 2625 * Enqueue an IGMPv3 group record to the given output queue. 2626 * 2627 * XXX This function could do with having the allocation code 2628 * split out, and the multiple-tree-walks coalesced into a single 2629 * routine as has been done in igmp_v3_enqueue_filter_change(). 2630 * 2631 * If is_state_change is zero, a current-state record is appended. 2632 * If is_state_change is non-zero, a state-change report is appended. 2633 * 2634 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2635 * If is_group_query is zero, and if there is a packet with free space 2636 * at the tail of the queue, it will be appended to providing there 2637 * is enough free space. 2638 * Otherwise a new mbuf packet chain is allocated. 2639 * 2640 * If is_source_query is non-zero, each source is checked to see if 2641 * it was recorded for a Group-Source query, and will be omitted if 2642 * it is not both in-mode and recorded. 2643 * 2644 * The function will attempt to allocate leading space in the packet 2645 * for the IP/IGMP header to be prepended without fragmenting the chain. 2646 * 2647 * If successful the size of all data appended to the queue is returned, 2648 * otherwise an error code less than zero is returned, or zero if 2649 * no record(s) were appended. 2650 */ 2651static int 2652igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, 2653 const int is_state_change, const int is_group_query, 2654 const int is_source_query) 2655{ 2656 struct igmp_grouprec ig; 2657 struct igmp_grouprec *pig; 2658 struct ifnet *ifp; 2659 struct ip_msource *ims, *nims; 2660 struct mbuf *m0, *m, *md; 2661 int error, is_filter_list_change; 2662 int minrec0len, m0srcs, msrcs, nbytes, off; 2663 int record_has_sources; 2664 int now; 2665 int type; 2666 in_addr_t naddr; 2667 uint8_t mode; 2668 2669 IN_MULTI_LOCK_ASSERT(); 2670 2671 error = 0; 2672 ifp = inm->inm_ifp; 2673 is_filter_list_change = 0; 2674 m = NULL; 2675 m0 = NULL; 2676 m0srcs = 0; 2677 msrcs = 0; 2678 nbytes = 0; 2679 nims = NULL; 2680 record_has_sources = 1; 2681 pig = NULL; 2682 type = IGMP_DO_NOTHING; 2683 mode = inm->inm_st[1].iss_fmode; 2684 2685 /* 2686 * If we did not transition out of ASM mode during t0->t1, 2687 * and there are no source nodes to process, we can skip 2688 * the generation of source records. 2689 */ 2690 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 && 2691 inm->inm_nsrc == 0) 2692 record_has_sources = 0; 2693 2694 if (is_state_change) { 2695 /* 2696 * Queue a state change record. 2697 * If the mode did not change, and there are non-ASM 2698 * listeners or source filters present, 2699 * we potentially need to issue two records for the group. 2700 * If we are transitioning to MCAST_UNDEFINED, we need 2701 * not send any sources. 2702 * If there are ASM listeners, and there was no filter 2703 * mode transition of any kind, do nothing. 2704 */ 2705 if (mode != inm->inm_st[0].iss_fmode) { 2706 if (mode == MCAST_EXCLUDE) { 2707 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE", 2708 __func__); 2709 type = IGMP_CHANGE_TO_EXCLUDE_MODE; 2710 } else { 2711 CTR1(KTR_IGMPV3, "%s: change to INCLUDE", 2712 __func__); 2713 type = IGMP_CHANGE_TO_INCLUDE_MODE; 2714 if (mode == MCAST_UNDEFINED) 2715 record_has_sources = 0; 2716 } 2717 } else { 2718 if (record_has_sources) { 2719 is_filter_list_change = 1; 2720 } else { 2721 type = IGMP_DO_NOTHING; 2722 } 2723 } 2724 } else { 2725 /* 2726 * Queue a current state record. 2727 */ 2728 if (mode == MCAST_EXCLUDE) { 2729 type = IGMP_MODE_IS_EXCLUDE; 2730 } else if (mode == MCAST_INCLUDE) { 2731 type = IGMP_MODE_IS_INCLUDE; 2732 KASSERT(inm->inm_st[1].iss_asm == 0, 2733 ("%s: inm %p is INCLUDE but ASM count is %d", 2734 __func__, inm, inm->inm_st[1].iss_asm)); 2735 } 2736 } 2737 2738 /* 2739 * Generate the filter list changes using a separate function. 2740 */ 2741 if (is_filter_list_change) 2742 return (igmp_v3_enqueue_filter_change(ifq, inm)); 2743 2744 if (type == IGMP_DO_NOTHING) { 2745 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s", 2746 __func__, inet_ntoa(inm->inm_addr), 2747 inm->inm_ifp->if_xname); 2748 return (0); 2749 } 2750 2751 /* 2752 * If any sources are present, we must be able to fit at least 2753 * one in the trailing space of the tail packet's mbuf, 2754 * ideally more. 2755 */ 2756 minrec0len = sizeof(struct igmp_grouprec); 2757 if (record_has_sources) 2758 minrec0len += sizeof(in_addr_t); 2759 2760 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__, 2761 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr), 2762 inm->inm_ifp->if_xname); 2763 2764 /* 2765 * Check if we have a packet in the tail of the queue for this 2766 * group into which the first group record for this group will fit. 2767 * Otherwise allocate a new packet. 2768 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT. 2769 * Note: Group records for G/GSR query responses MUST be sent 2770 * in their own packet. 2771 */ 2772 m0 = ifq->ifq_tail; 2773 if (!is_group_query && 2774 m0 != NULL && 2775 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && 2776 (m0->m_pkthdr.len + minrec0len) < 2777 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 2778 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2779 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2780 m = m0; 2781 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__); 2782 } else { 2783 if (_IF_QFULL(ifq)) { 2784 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2785 return (-ENOMEM); 2786 } 2787 m = NULL; 2788 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2789 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2790 if (!is_state_change && !is_group_query) { 2791 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2792 if (m) 2793 m->m_data += IGMP_LEADINGSPACE; 2794 } 2795 if (m == NULL) { 2796 m = m_gethdr(M_DONTWAIT, MT_DATA); 2797 if (m) 2798 MH_ALIGN(m, IGMP_LEADINGSPACE); 2799 } 2800 if (m == NULL) 2801 return (-ENOMEM); 2802 2803 igmp_save_context(m, ifp); 2804 2805 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__); 2806 } 2807 2808 /* 2809 * Append group record. 2810 * If we have sources, we don't know how many yet. 2811 */ 2812 ig.ig_type = type; 2813 ig.ig_datalen = 0; 2814 ig.ig_numsrc = 0; 2815 ig.ig_group = inm->inm_addr; 2816 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2817 if (m != m0) 2818 m_freem(m); 2819 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2820 return (-ENOMEM); 2821 } 2822 nbytes += sizeof(struct igmp_grouprec); 2823 2824 /* 2825 * Append as many sources as will fit in the first packet. 2826 * If we are appending to a new packet, the chain allocation 2827 * may potentially use clusters; use m_getptr() in this case. 2828 * If we are appending to an existing packet, we need to obtain 2829 * a pointer to the group record after m_append(), in case a new 2830 * mbuf was allocated. 2831 * Only append sources which are in-mode at t1. If we are 2832 * transitioning to MCAST_UNDEFINED state on the group, do not 2833 * include source entries. 2834 * Only report recorded sources in our filter set when responding 2835 * to a group-source query. 2836 */ 2837 if (record_has_sources) { 2838 if (m == m0) { 2839 md = m_last(m); 2840 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2841 md->m_len - nbytes); 2842 } else { 2843 md = m_getptr(m, 0, &off); 2844 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2845 off); 2846 } 2847 msrcs = 0; 2848 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) { 2849 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2850 inet_ntoa_haddr(ims->ims_haddr)); 2851 now = ims_get_mode(inm, ims, 1); 2852 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now); 2853 if ((now != mode) || 2854 (now == mode && mode == MCAST_UNDEFINED)) { 2855 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2856 continue; 2857 } 2858 if (is_source_query && ims->ims_stp == 0) { 2859 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2860 __func__); 2861 continue; 2862 } 2863 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2864 naddr = htonl(ims->ims_haddr); 2865 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2866 if (m != m0) 2867 m_freem(m); 2868 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2869 __func__); 2870 return (-ENOMEM); 2871 } 2872 nbytes += sizeof(in_addr_t); 2873 ++msrcs; 2874 if (msrcs == m0srcs) 2875 break; 2876 } 2877 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__, 2878 msrcs); 2879 pig->ig_numsrc = htons(msrcs); 2880 nbytes += (msrcs * sizeof(in_addr_t)); 2881 } 2882 2883 if (is_source_query && msrcs == 0) { 2884 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__); 2885 if (m != m0) 2886 m_freem(m); 2887 return (0); 2888 } 2889 2890 /* 2891 * We are good to go with first packet. 2892 */ 2893 if (m != m0) { 2894 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__); 2895 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2896 _IF_ENQUEUE(ifq, m); 2897 } else 2898 m->m_pkthdr.PH_vt.vt_nrecs++; 2899 2900 /* 2901 * No further work needed if no source list in packet(s). 2902 */ 2903 if (!record_has_sources) 2904 return (nbytes); 2905 2906 /* 2907 * Whilst sources remain to be announced, we need to allocate 2908 * a new packet and fill out as many sources as will fit. 2909 * Always try for a cluster first. 2910 */ 2911 while (nims != NULL) { 2912 if (_IF_QFULL(ifq)) { 2913 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2914 return (-ENOMEM); 2915 } 2916 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2917 if (m) 2918 m->m_data += IGMP_LEADINGSPACE; 2919 if (m == NULL) { 2920 m = m_gethdr(M_DONTWAIT, MT_DATA); 2921 if (m) 2922 MH_ALIGN(m, IGMP_LEADINGSPACE); 2923 } 2924 if (m == NULL) 2925 return (-ENOMEM); 2926 igmp_save_context(m, ifp); 2927 md = m_getptr(m, 0, &off); 2928 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off); 2929 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__); 2930 2931 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2932 if (m != m0) 2933 m_freem(m); 2934 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2935 return (-ENOMEM); 2936 } 2937 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2938 nbytes += sizeof(struct igmp_grouprec); 2939 2940 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2941 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2942 2943 msrcs = 0; 2944 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 2945 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2946 inet_ntoa_haddr(ims->ims_haddr)); 2947 now = ims_get_mode(inm, ims, 1); 2948 if ((now != mode) || 2949 (now == mode && mode == MCAST_UNDEFINED)) { 2950 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2951 continue; 2952 } 2953 if (is_source_query && ims->ims_stp == 0) { 2954 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2955 __func__); 2956 continue; 2957 } 2958 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2959 naddr = htonl(ims->ims_haddr); 2960 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2961 if (m != m0) 2962 m_freem(m); 2963 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2964 __func__); 2965 return (-ENOMEM); 2966 } 2967 ++msrcs; 2968 if (msrcs == m0srcs) 2969 break; 2970 } 2971 pig->ig_numsrc = htons(msrcs); 2972 nbytes += (msrcs * sizeof(in_addr_t)); 2973 2974 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__); 2975 _IF_ENQUEUE(ifq, m); 2976 } 2977 2978 return (nbytes); 2979} 2980 2981/* 2982 * Type used to mark record pass completion. 2983 * We exploit the fact we can cast to this easily from the 2984 * current filter modes on each ip_msource node. 2985 */ 2986typedef enum { 2987 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 2988 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 2989 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 2990 REC_FULL = REC_ALLOW | REC_BLOCK 2991} rectype_t; 2992 2993/* 2994 * Enqueue an IGMPv3 filter list change to the given output queue. 2995 * 2996 * Source list filter state is held in an RB-tree. When the filter list 2997 * for a group is changed without changing its mode, we need to compute 2998 * the deltas between T0 and T1 for each source in the filter set, 2999 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 3000 * 3001 * As we may potentially queue two record types, and the entire R-B tree 3002 * needs to be walked at once, we break this out into its own function 3003 * so we can generate a tightly packed queue of packets. 3004 * 3005 * XXX This could be written to only use one tree walk, although that makes 3006 * serializing into the mbuf chains a bit harder. For now we do two walks 3007 * which makes things easier on us, and it may or may not be harder on 3008 * the L2 cache. 3009 * 3010 * If successful the size of all data appended to the queue is returned, 3011 * otherwise an error code less than zero is returned, or zero if 3012 * no record(s) were appended. 3013 */ 3014static int 3015igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) 3016{ 3017 static const int MINRECLEN = 3018 sizeof(struct igmp_grouprec) + sizeof(in_addr_t); 3019 struct ifnet *ifp; 3020 struct igmp_grouprec ig; 3021 struct igmp_grouprec *pig; 3022 struct ip_msource *ims, *nims; 3023 struct mbuf *m, *m0, *md; 3024 in_addr_t naddr; 3025 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 3026 int nallow, nblock; 3027 uint8_t mode, now, then; 3028 rectype_t crt, drt, nrt; 3029 3030 IN_MULTI_LOCK_ASSERT(); 3031 3032 if (inm->inm_nsrc == 0 || 3033 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) 3034 return (0); 3035 3036 ifp = inm->inm_ifp; /* interface */ 3037 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ 3038 crt = REC_NONE; /* current group record type */ 3039 drt = REC_NONE; /* mask of completed group record types */ 3040 nrt = REC_NONE; /* record type for current node */ 3041 m0srcs = 0; /* # source which will fit in current mbuf chain */ 3042 nbytes = 0; /* # of bytes appended to group's state-change queue */ 3043 npbytes = 0; /* # of bytes appended this packet */ 3044 rsrcs = 0; /* # sources encoded in current record */ 3045 schanged = 0; /* # nodes encoded in overall filter change */ 3046 nallow = 0; /* # of source entries in ALLOW_NEW */ 3047 nblock = 0; /* # of source entries in BLOCK_OLD */ 3048 nims = NULL; /* next tree node pointer */ 3049 3050 /* 3051 * For each possible filter record mode. 3052 * The first kind of source we encounter tells us which 3053 * is the first kind of record we start appending. 3054 * If a node transitioned to UNDEFINED at t1, its mode is treated 3055 * as the inverse of the group's filter mode. 3056 */ 3057 while (drt != REC_FULL) { 3058 do { 3059 m0 = ifq->ifq_tail; 3060 if (m0 != NULL && 3061 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= 3062 IGMP_V3_REPORT_MAXRECS) && 3063 (m0->m_pkthdr.len + MINRECLEN) < 3064 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 3065 m = m0; 3066 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 3067 sizeof(struct igmp_grouprec)) / 3068 sizeof(in_addr_t); 3069 CTR1(KTR_IGMPV3, 3070 "%s: use previous packet", __func__); 3071 } else { 3072 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3073 if (m) 3074 m->m_data += IGMP_LEADINGSPACE; 3075 if (m == NULL) { 3076 m = m_gethdr(M_DONTWAIT, MT_DATA); 3077 if (m) 3078 MH_ALIGN(m, IGMP_LEADINGSPACE); 3079 } 3080 if (m == NULL) { 3081 CTR1(KTR_IGMPV3, 3082 "%s: m_get*() failed", __func__); 3083 return (-ENOMEM); 3084 } 3085 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3086 igmp_save_context(m, ifp); 3087 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 3088 sizeof(struct igmp_grouprec)) / 3089 sizeof(in_addr_t); 3090 npbytes = 0; 3091 CTR1(KTR_IGMPV3, 3092 "%s: allocated new packet", __func__); 3093 } 3094 /* 3095 * Append the IGMP group record header to the 3096 * current packet's data area. 3097 * Recalculate pointer to free space for next 3098 * group record, in case m_append() allocated 3099 * a new mbuf or cluster. 3100 */ 3101 memset(&ig, 0, sizeof(ig)); 3102 ig.ig_group = inm->inm_addr; 3103 if (!m_append(m, sizeof(ig), (void *)&ig)) { 3104 if (m != m0) 3105 m_freem(m); 3106 CTR1(KTR_IGMPV3, 3107 "%s: m_append() failed", __func__); 3108 return (-ENOMEM); 3109 } 3110 npbytes += sizeof(struct igmp_grouprec); 3111 if (m != m0) { 3112 /* new packet; offset in c hain */ 3113 md = m_getptr(m, npbytes - 3114 sizeof(struct igmp_grouprec), &off); 3115 pig = (struct igmp_grouprec *)(mtod(md, 3116 uint8_t *) + off); 3117 } else { 3118 /* current packet; offset from last append */ 3119 md = m_last(m); 3120 pig = (struct igmp_grouprec *)(mtod(md, 3121 uint8_t *) + md->m_len - 3122 sizeof(struct igmp_grouprec)); 3123 } 3124 /* 3125 * Begin walking the tree for this record type 3126 * pass, or continue from where we left off 3127 * previously if we had to allocate a new packet. 3128 * Only report deltas in-mode at t1. 3129 * We need not report included sources as allowed 3130 * if we are in inclusive mode on the group, 3131 * however the converse is not true. 3132 */ 3133 rsrcs = 0; 3134 if (nims == NULL) 3135 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs); 3136 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 3137 CTR2(KTR_IGMPV3, "%s: visit node %s", 3138 __func__, inet_ntoa_haddr(ims->ims_haddr)); 3139 now = ims_get_mode(inm, ims, 1); 3140 then = ims_get_mode(inm, ims, 0); 3141 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d", 3142 __func__, then, now); 3143 if (now == then) { 3144 CTR1(KTR_IGMPV3, 3145 "%s: skip unchanged", __func__); 3146 continue; 3147 } 3148 if (mode == MCAST_EXCLUDE && 3149 now == MCAST_INCLUDE) { 3150 CTR1(KTR_IGMPV3, 3151 "%s: skip IN src on EX group", 3152 __func__); 3153 continue; 3154 } 3155 nrt = (rectype_t)now; 3156 if (nrt == REC_NONE) 3157 nrt = (rectype_t)(~mode & REC_FULL); 3158 if (schanged++ == 0) { 3159 crt = nrt; 3160 } else if (crt != nrt) 3161 continue; 3162 naddr = htonl(ims->ims_haddr); 3163 if (!m_append(m, sizeof(in_addr_t), 3164 (void *)&naddr)) { 3165 if (m != m0) 3166 m_freem(m); 3167 CTR1(KTR_IGMPV3, 3168 "%s: m_append() failed", __func__); 3169 return (-ENOMEM); 3170 } 3171 nallow += !!(crt == REC_ALLOW); 3172 nblock += !!(crt == REC_BLOCK); 3173 if (++rsrcs == m0srcs) 3174 break; 3175 } 3176 /* 3177 * If we did not append any tree nodes on this 3178 * pass, back out of allocations. 3179 */ 3180 if (rsrcs == 0) { 3181 npbytes -= sizeof(struct igmp_grouprec); 3182 if (m != m0) { 3183 CTR1(KTR_IGMPV3, 3184 "%s: m_free(m)", __func__); 3185 m_freem(m); 3186 } else { 3187 CTR1(KTR_IGMPV3, 3188 "%s: m_adj(m, -ig)", __func__); 3189 m_adj(m, -((int)sizeof( 3190 struct igmp_grouprec))); 3191 } 3192 continue; 3193 } 3194 npbytes += (rsrcs * sizeof(in_addr_t)); 3195 if (crt == REC_ALLOW) 3196 pig->ig_type = IGMP_ALLOW_NEW_SOURCES; 3197 else if (crt == REC_BLOCK) 3198 pig->ig_type = IGMP_BLOCK_OLD_SOURCES; 3199 pig->ig_numsrc = htons(rsrcs); 3200 /* 3201 * Count the new group record, and enqueue this 3202 * packet if it wasn't already queued. 3203 */ 3204 m->m_pkthdr.PH_vt.vt_nrecs++; 3205 if (m != m0) 3206 _IF_ENQUEUE(ifq, m); 3207 nbytes += npbytes; 3208 } while (nims != NULL); 3209 drt |= crt; 3210 crt = (~crt & REC_FULL); 3211 } 3212 3213 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__, 3214 nallow, nblock); 3215 3216 return (nbytes); 3217} 3218 3219static int 3220igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) 3221{ 3222 struct ifqueue *gq; 3223 struct mbuf *m; /* pending state-change */ 3224 struct mbuf *m0; /* copy of pending state-change */ 3225 struct mbuf *mt; /* last state-change in packet */ 3226 int docopy, domerge; 3227 u_int recslen; 3228 3229 docopy = 0; 3230 domerge = 0; 3231 recslen = 0; 3232 3233 IN_MULTI_LOCK_ASSERT(); 3234 IGMP_LOCK_ASSERT(); 3235 3236 /* 3237 * If there are further pending retransmissions, make a writable 3238 * copy of each queued state-change message before merging. 3239 */ 3240 if (inm->inm_scrv > 0) 3241 docopy = 1; 3242 3243 gq = &inm->inm_scq; 3244#ifdef KTR 3245 if (gq->ifq_head == NULL) { 3246 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty", 3247 __func__, inm); 3248 } 3249#endif 3250 3251 m = gq->ifq_head; 3252 while (m != NULL) { 3253 /* 3254 * Only merge the report into the current packet if 3255 * there is sufficient space to do so; an IGMPv3 report 3256 * packet may only contain 65,535 group records. 3257 * Always use a simple mbuf chain concatentation to do this, 3258 * as large state changes for single groups may have 3259 * allocated clusters. 3260 */ 3261 domerge = 0; 3262 mt = ifscq->ifq_tail; 3263 if (mt != NULL) { 3264 recslen = m_length(m, NULL); 3265 3266 if ((mt->m_pkthdr.PH_vt.vt_nrecs + 3267 m->m_pkthdr.PH_vt.vt_nrecs <= 3268 IGMP_V3_REPORT_MAXRECS) && 3269 (mt->m_pkthdr.len + recslen <= 3270 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) 3271 domerge = 1; 3272 } 3273 3274 if (!domerge && _IF_QFULL(gq)) { 3275 CTR2(KTR_IGMPV3, 3276 "%s: outbound queue full, skipping whole packet %p", 3277 __func__, m); 3278 mt = m->m_nextpkt; 3279 if (!docopy) 3280 m_freem(m); 3281 m = mt; 3282 continue; 3283 } 3284 3285 if (!docopy) { 3286 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m); 3287 _IF_DEQUEUE(gq, m0); 3288 m = m0->m_nextpkt; 3289 } else { 3290 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m); 3291 m0 = m_dup(m, M_NOWAIT); 3292 if (m0 == NULL) 3293 return (ENOMEM); 3294 m0->m_nextpkt = NULL; 3295 m = m->m_nextpkt; 3296 } 3297 3298 if (!domerge) { 3299 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)", 3300 __func__, m0, ifscq); 3301 _IF_ENQUEUE(ifscq, m0); 3302 } else { 3303 struct mbuf *mtl; /* last mbuf of packet mt */ 3304 3305 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)", 3306 __func__, m0, mt); 3307 3308 mtl = m_last(mt); 3309 m0->m_flags &= ~M_PKTHDR; 3310 mt->m_pkthdr.len += recslen; 3311 mt->m_pkthdr.PH_vt.vt_nrecs += 3312 m0->m_pkthdr.PH_vt.vt_nrecs; 3313 3314 mtl->m_next = m0; 3315 } 3316 } 3317 3318 return (0); 3319} 3320 3321/* 3322 * Respond to a pending IGMPv3 General Query. 3323 */ 3324static void 3325igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) 3326{ 3327 INIT_VNET_INET(curvnet); 3328 struct ifmultiaddr *ifma, *tifma; 3329 struct ifnet *ifp; 3330 struct in_multi *inm; 3331 int retval, loop; 3332 3333 IN_MULTI_LOCK_ASSERT(); 3334 IGMP_LOCK_ASSERT(); 3335 3336 KASSERT(igi->igi_version == IGMP_VERSION_3, 3337 ("%s: called when version %d", __func__, igi->igi_version)); 3338 3339 ifp = igi->igi_ifp; 3340 3341 IF_ADDR_LOCK(ifp); 3342 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, tifma) { 3343 if (ifma->ifma_addr->sa_family != AF_INET || 3344 ifma->ifma_protospec == NULL) 3345 continue; 3346 3347 inm = (struct in_multi *)ifma->ifma_protospec; 3348 KASSERT(ifp == inm->inm_ifp, 3349 ("%s: inconsistent ifp", __func__)); 3350 3351 switch (inm->inm_state) { 3352 case IGMP_NOT_MEMBER: 3353 case IGMP_SILENT_MEMBER: 3354 break; 3355 case IGMP_REPORTING_MEMBER: 3356 case IGMP_IDLE_MEMBER: 3357 case IGMP_LAZY_MEMBER: 3358 case IGMP_SLEEPING_MEMBER: 3359 case IGMP_AWAKENING_MEMBER: 3360 inm->inm_state = IGMP_REPORTING_MEMBER; 3361 retval = igmp_v3_enqueue_group_record(&igi->igi_gq, 3362 inm, 0, 0, 0); 3363 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 3364 __func__, retval); 3365 break; 3366 case IGMP_G_QUERY_PENDING_MEMBER: 3367 case IGMP_SG_QUERY_PENDING_MEMBER: 3368 case IGMP_LEAVING_MEMBER: 3369 break; 3370 } 3371 } 3372 IF_ADDR_UNLOCK(ifp); 3373 3374 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 3375 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop); 3376 3377 /* 3378 * Slew transmission of bursts over 500ms intervals. 3379 */ 3380 if (igi->igi_gq.ifq_head != NULL) { 3381 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( 3382 IGMP_RESPONSE_BURST_INTERVAL); 3383 V_interface_timers_running = 1; 3384 } 3385} 3386 3387/* 3388 * Transmit the next pending IGMP message in the output queue. 3389 * 3390 * We get called from netisr_processqueue(). A mutex private to igmpoq 3391 * will be acquired and released around this routine. 3392 * 3393 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis. 3394 * MRT: Nothing needs to be done, as IGMP traffic is always local to 3395 * a link and uses a link-scope multicast address. 3396 */ 3397static void 3398igmp_intr(struct mbuf *m) 3399{ 3400 struct ip_moptions imo; 3401 struct ifnet *ifp; 3402 struct mbuf *ipopts, *m0; 3403 int error; 3404 uint32_t ifindex; 3405 3406 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m); 3407 3408 /* 3409 * Set VNET image pointer from enqueued mbuf chain 3410 * before doing anything else. Whilst we use interface 3411 * indexes to guard against interface detach, they are 3412 * unique to each VIMAGE and must be retrieved. 3413 */ 3414 CURVNET_SET((struct vnet *)(m->m_pkthdr.header)); 3415 INIT_VNET_NET(curvnet); 3416 INIT_VNET_INET(curvnet); 3417 ifindex = igmp_restore_context(m); 3418 3419 /* 3420 * Check if the ifnet still exists. This limits the scope of 3421 * any race in the absence of a global ifp lock for low cost 3422 * (an array lookup). 3423 */ 3424 ifp = ifnet_byindex(ifindex); 3425 if (ifp == NULL) { 3426 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.", 3427 __func__, m, ifindex); 3428 m_freem(m); 3429 IPSTAT_INC(ips_noroute); 3430 goto out; 3431 } 3432 3433 ipopts = V_igmp_sendra ? m_raopt : NULL; 3434 3435 imo.imo_multicast_ttl = 1; 3436 imo.imo_multicast_vif = -1; 3437 imo.imo_multicast_loop = (V_ip_mrouter != NULL); 3438 3439 /* 3440 * If the user requested that IGMP traffic be explicitly 3441 * redirected to the loopback interface (e.g. they are running a 3442 * MANET interface and the routing protocol needs to see the 3443 * updates), handle this now. 3444 */ 3445 if (m->m_flags & M_IGMP_LOOP) 3446 imo.imo_multicast_ifp = V_loif; 3447 else 3448 imo.imo_multicast_ifp = ifp; 3449 3450 if (m->m_flags & M_IGMPV2) { 3451 m0 = m; 3452 } else { 3453 m0 = igmp_v3_encap_report(ifp, m); 3454 if (m0 == NULL) { 3455 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m); 3456 m_freem(m); 3457 IPSTAT_INC(ips_odropped); 3458 goto out; 3459 } 3460 } 3461 3462 igmp_scrub_context(m0); 3463 m->m_flags &= ~(M_PROTOFLAGS); 3464 m0->m_pkthdr.rcvif = V_loif; 3465#ifdef MAC 3466 mac_netinet_igmp_send(ifp, m0); 3467#endif 3468 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL); 3469 if (error) { 3470 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error); 3471 goto out; 3472 } 3473 3474 IGMPSTAT_INC(igps_snd_reports); 3475 3476out: 3477 /* 3478 * We must restore the existing vnet pointer before 3479 * continuing as we are run from netisr context. 3480 */ 3481 CURVNET_RESTORE(); 3482} 3483 3484/* 3485 * Encapsulate an IGMPv3 report. 3486 * 3487 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf 3488 * chain has already had its IP/IGMPv3 header prepended. In this case 3489 * the function will not attempt to prepend; the lengths and checksums 3490 * will however be re-computed. 3491 * 3492 * Returns a pointer to the new mbuf chain head, or NULL if the 3493 * allocation failed. 3494 */ 3495static struct mbuf * 3496igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) 3497{ 3498 INIT_VNET_INET(curvnet); 3499 struct igmp_report *igmp; 3500 struct ip *ip; 3501 int hdrlen, igmpreclen; 3502 3503 KASSERT((m->m_flags & M_PKTHDR), 3504 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m)); 3505 3506 igmpreclen = m_length(m, NULL); 3507 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report); 3508 3509 if (m->m_flags & M_IGMPV3_HDR) { 3510 igmpreclen -= hdrlen; 3511 } else { 3512 M_PREPEND(m, hdrlen, M_DONTWAIT); 3513 if (m == NULL) 3514 return (NULL); 3515 m->m_flags |= M_IGMPV3_HDR; 3516 } 3517 3518 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen); 3519 3520 m->m_data += sizeof(struct ip); 3521 m->m_len -= sizeof(struct ip); 3522 3523 igmp = mtod(m, struct igmp_report *); 3524 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT; 3525 igmp->ir_rsv1 = 0; 3526 igmp->ir_rsv2 = 0; 3527 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs); 3528 igmp->ir_cksum = 0; 3529 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen); 3530 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3531 3532 m->m_data -= sizeof(struct ip); 3533 m->m_len += sizeof(struct ip); 3534 3535 ip = mtod(m, struct ip *); 3536 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL; 3537 ip->ip_len = hdrlen + igmpreclen; 3538 ip->ip_off = IP_DF; 3539 ip->ip_p = IPPROTO_IGMP; 3540 ip->ip_sum = 0; 3541 3542 ip->ip_src.s_addr = INADDR_ANY; 3543 3544 if (m->m_flags & M_IGMP_LOOP) { 3545 struct in_ifaddr *ia; 3546 3547 IFP_TO_IA(ifp, ia); 3548 if (ia != NULL) { 3549 ip->ip_src = ia->ia_addr.sin_addr; 3550 ifa_free(&ia->ia_ifa); 3551 } 3552 } 3553 3554 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP); 3555 3556 return (m); 3557} 3558 3559#ifdef KTR 3560static char * 3561igmp_rec_type_to_str(const int type) 3562{ 3563 3564 switch (type) { 3565 case IGMP_CHANGE_TO_EXCLUDE_MODE: 3566 return "TO_EX"; 3567 break; 3568 case IGMP_CHANGE_TO_INCLUDE_MODE: 3569 return "TO_IN"; 3570 break; 3571 case IGMP_MODE_IS_EXCLUDE: 3572 return "MODE_EX"; 3573 break; 3574 case IGMP_MODE_IS_INCLUDE: 3575 return "MODE_IN"; 3576 break; 3577 case IGMP_ALLOW_NEW_SOURCES: 3578 return "ALLOW_NEW"; 3579 break; 3580 case IGMP_BLOCK_OLD_SOURCES: 3581 return "BLOCK_OLD"; 3582 break; 3583 default: 3584 break; 3585 } 3586 return "unknown"; 3587} 3588#endif 3589 3590static void 3591igmp_sysinit(void) 3592{ 3593 3594 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3595 3596 IGMP_LOCK_INIT(); 3597 3598 m_raopt = igmp_ra_alloc(); 3599 3600 netisr_register(&igmp_nh); 3601} 3602 3603static void 3604igmp_sysuninit(void) 3605{ 3606 3607 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3608 3609 netisr_unregister(&igmp_nh); 3610 3611 m_free(m_raopt); 3612 m_raopt = NULL; 3613 3614 IGMP_LOCK_DESTROY(); 3615} 3616 3617/* 3618 * Initialize an IGMPv3 instance. 3619 * VIMAGE: Assumes curvnet set by caller and called per vimage. 3620 */ 3621static int 3622vnet_igmp_iattach(const void *unused __unused) 3623{ 3624 INIT_VNET_INET(curvnet); 3625 3626 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3627 3628 LIST_INIT(&V_igi_head); 3629 3630 V_current_state_timers_running = 0; 3631 V_state_change_timers_running = 0; 3632 V_interface_timers_running = 0; 3633 3634 /* 3635 * Initialize sysctls to default values. 3636 */ 3637 V_igmp_recvifkludge = 1; 3638 V_igmp_sendra = 1; 3639 V_igmp_sendlocal = 1; 3640 V_igmp_v1enable = 1; 3641 V_igmp_v2enable = 1; 3642 V_igmp_legacysupp = 0; 3643 V_igmp_default_version = IGMP_VERSION_3; 3644 V_igmp_gsrdelay.tv_sec = 10; 3645 V_igmp_gsrdelay.tv_usec = 0; 3646 3647 memset(&V_igmpstat, 0, sizeof(struct igmpstat)); 3648 V_igmpstat.igps_version = IGPS_VERSION_3; 3649 V_igmpstat.igps_len = sizeof(struct igmpstat); 3650 3651 return (0); 3652} 3653 3654static int 3655vnet_igmp_idetach(const void *unused __unused) 3656{ 3657#ifdef INVARIANTS 3658 INIT_VNET_INET(curvnet); 3659#endif 3660 3661 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3662 3663 KASSERT(LIST_EMPTY(&V_igi_head), 3664 ("%s: igi list not empty; ifnets not detached?", __func__)); 3665 3666 return (0); 3667} 3668 3669#ifndef VIMAGE_GLOBALS 3670static vnet_modinfo_t vnet_igmp_modinfo = { 3671 .vmi_id = VNET_MOD_IGMP, 3672 .vmi_name = "igmp", 3673 .vmi_dependson = VNET_MOD_INET, 3674 .vmi_iattach = vnet_igmp_iattach, 3675 .vmi_idetach = vnet_igmp_idetach 3676}; 3677#endif 3678 3679static int 3680igmp_modevent(module_t mod, int type, void *unused __unused) 3681{ 3682 3683 switch (type) { 3684 case MOD_LOAD: 3685 igmp_sysinit(); 3686#ifndef VIMAGE_GLOBALS 3687 vnet_mod_register(&vnet_igmp_modinfo); 3688#else 3689 vnet_igmp_iattach(NULL); 3690#endif 3691 break; 3692 case MOD_UNLOAD: 3693#ifndef VIMAGE_GLOBALS 3694 vnet_mod_deregister(&vnet_igmp_modinfo); 3695#else 3696 vnet_igmp_idetach(NULL); 3697#endif 3698 igmp_sysuninit(); 3699 break; 3700 default: 3701 return (EOPNOTSUPP); 3702 } 3703 return (0); 3704} 3705 3706static moduledata_t igmp_mod = { 3707 "igmp", 3708 igmp_modevent, 3709 0 3710}; 3711DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3712