igmp.c revision 194739
1/*- 2 * Copyright (c) 2007-2009 Bruce Simpson. 3 * Copyright (c) 1988 Stephen Deering. 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Stephen Deering of Stanford University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93 35 */ 36 37/* 38 * Internet Group Management Protocol (IGMP) routines. 39 * [RFC1112, RFC2236, RFC3376] 40 * 41 * Written by Steve Deering, Stanford, May 1988. 42 * Modified by Rosen Sharma, Stanford, Aug 1994. 43 * Modified by Bill Fenner, Xerox PARC, Feb 1995. 44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995. 45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson. 46 * 47 * MULTICAST Revision: 3.5.1.4 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: head/sys/netinet/igmp.c 194739 2009-06-23 17:03:45Z bz $"); 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/module.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59#include <sys/protosw.h> 60#include <sys/kernel.h> 61#include <sys/sysctl.h> 62#include <sys/vimage.h> 63#include <sys/ktr.h> 64#include <sys/condvar.h> 65 66#include <net/if.h> 67#include <net/netisr.h> 68#include <net/vnet.h> 69 70#include <netinet/in.h> 71#include <netinet/in_var.h> 72#include <netinet/in_systm.h> 73#include <netinet/ip.h> 74#include <netinet/ip_var.h> 75#include <netinet/ip_options.h> 76#include <netinet/igmp.h> 77#include <netinet/igmp_var.h> 78#include <netinet/vinet.h> 79 80#include <machine/in_cksum.h> 81 82#include <security/mac/mac_framework.h> 83 84#ifndef KTR_IGMPV3 85#define KTR_IGMPV3 KTR_INET 86#endif 87 88static struct igmp_ifinfo * 89 igi_alloc_locked(struct ifnet *); 90static void igi_delete_locked(const struct ifnet *); 91static void igmp_dispatch_queue(struct ifqueue *, int, const int); 92static void igmp_fasttimo_vnet(void); 93static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *); 94static int igmp_handle_state_change(struct in_multi *, 95 struct igmp_ifinfo *); 96static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *); 97static int igmp_input_v1_query(struct ifnet *, const struct ip *, 98 const struct igmp *); 99static int igmp_input_v2_query(struct ifnet *, const struct ip *, 100 const struct igmp *); 101static int igmp_input_v3_query(struct ifnet *, const struct ip *, 102 /*const*/ struct igmpv3 *); 103static int igmp_input_v3_group_query(struct in_multi *, 104 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *); 105static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *, 106 /*const*/ struct igmp *); 107static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *, 108 /*const*/ struct igmp *); 109static void igmp_intr(struct mbuf *); 110static int igmp_isgroupreported(const struct in_addr); 111static struct mbuf * 112 igmp_ra_alloc(void); 113#ifdef KTR 114static char * igmp_rec_type_to_str(const int); 115#endif 116static void igmp_set_version(struct igmp_ifinfo *, const int); 117static void igmp_slowtimo_vnet(void); 118static void igmp_sysinit(void); 119static int igmp_v1v2_queue_report(struct in_multi *, const int); 120static void igmp_v1v2_process_group_timer(struct in_multi *, const int); 121static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); 122static void igmp_v2_update_group(struct in_multi *, const int); 123static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); 124static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *); 125static struct mbuf * 126 igmp_v3_encap_report(struct ifnet *, struct mbuf *); 127static int igmp_v3_enqueue_group_record(struct ifqueue *, 128 struct in_multi *, const int, const int, const int); 129static int igmp_v3_enqueue_filter_change(struct ifqueue *, 130 struct in_multi *); 131static void igmp_v3_process_group_timers(struct igmp_ifinfo *, 132 struct ifqueue *, struct ifqueue *, struct in_multi *, 133 const int); 134static int igmp_v3_merge_state_changes(struct in_multi *, 135 struct ifqueue *); 136static void igmp_v3_suppress_group_record(struct in_multi *); 137static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS); 138static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS); 139static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS); 140 141static vnet_attach_fn vnet_igmp_iattach; 142static vnet_detach_fn vnet_igmp_idetach; 143 144static const struct netisr_handler igmp_nh = { 145 .nh_name = "igmp", 146 .nh_handler = igmp_intr, 147 .nh_proto = NETISR_IGMP, 148 .nh_policy = NETISR_POLICY_SOURCE, 149}; 150 151/* 152 * System-wide globals. 153 * 154 * Unlocked access to these is OK, except for the global IGMP output 155 * queue. The IGMP subsystem lock ends up being system-wide for the moment, 156 * because all VIMAGEs have to share a global output queue, as netisrs 157 * themselves are not virtualized. 158 * 159 * Locking: 160 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 161 * Any may be taken independently; if any are held at the same 162 * time, the above lock order must be followed. 163 * * All output is delegated to the netisr. 164 * Now that Giant has been eliminated, the netisr may be inlined. 165 * * IN_MULTI_LOCK covers in_multi. 166 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file, 167 * including the output queue. 168 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of 169 * per-link state iterators. 170 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface, 171 * therefore it is not refcounted. 172 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi. 173 * 174 * Reference counting 175 * * IGMP acquires its own reference every time an in_multi is passed to 176 * it and the group is being joined for the first time. 177 * * IGMP releases its reference(s) on in_multi in a deferred way, 178 * because the operations which process the release run as part of 179 * a loop whose control variables are directly affected by the release 180 * (that, and not recursing on the IF_ADDR_LOCK). 181 * 182 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds 183 * to a vnet in ifp->if_vnet. 184 * 185 * SMPng: XXX We may potentially race operations on ifma_protospec. 186 * The problem is that we currently lack a clean way of taking the 187 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing, 188 * as anything which modifies ifma needs to be covered by that lock. 189 * So check for ifma_protospec being NULL before proceeding. 190 */ 191struct mtx igmp_mtx; 192 193struct mbuf *m_raopt; /* Router Alert option */ 194MALLOC_DEFINE(M_IGMP, "igmp", "igmp state"); 195 196/* 197 * VIMAGE-wide globals. 198 * 199 * The IGMPv3 timers themselves need to run per-image, however, 200 * protosw timers run globally (see tcp). 201 * An ifnet can only be in one vimage at a time, and the loopback 202 * ifnet, loif, is itself virtualized. 203 * It would otherwise be possible to seriously hose IGMP state, 204 * and create inconsistencies in upstream multicast routing, if you have 205 * multiple VIMAGEs running on the same link joining different multicast 206 * groups, UNLESS the "primary IP address" is different. This is because 207 * IGMP for IPv4 does not force link-local addresses to be used for each 208 * node, unlike MLD for IPv6. 209 * Obviously the IGMPv3 per-interface state has per-vimage granularity 210 * also as a result. 211 * 212 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection 213 * policy to control the address used by IGMP on the link. 214 */ 215#ifdef VIMAGE_GLOBALS 216int interface_timers_running; /* IGMPv3 general query response */ 217int state_change_timers_running; /* IGMPv3 state-change retransmit */ 218int current_state_timers_running; /* IGMPv1/v2 host report; 219 * IGMPv3 g/sg query response */ 220 221LIST_HEAD(, igmp_ifinfo) igi_head; 222struct igmpstat igmpstat; 223struct timeval igmp_gsrdelay; 224 225int igmp_recvifkludge; 226int igmp_sendra; 227int igmp_sendlocal; 228int igmp_v1enable; 229int igmp_v2enable; 230int igmp_legacysupp; 231int igmp_default_version; 232#endif /* VIMAGE_GLOBALS */ 233 234/* 235 * Virtualized sysctls. 236 */ 237SYSCTL_V_STRUCT(V_NET, vnet_inet, _net_inet_igmp, IGMPCTL_STATS, stats, 238 CTLFLAG_RW, igmpstat, igmpstat, ""); 239SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, recvifkludge, 240 CTLFLAG_RW, igmp_recvifkludge, 0, 241 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address"); 242SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendra, 243 CTLFLAG_RW, igmp_sendra, 0, 244 "Send IP Router Alert option in IGMPv2/v3 messages"); 245SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, sendlocal, 246 CTLFLAG_RW, igmp_sendlocal, 0, 247 "Send IGMP membership reports for 224.0.0.0/24 groups"); 248SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v1enable, 249 CTLFLAG_RW, igmp_v1enable, 0, 250 "Enable backwards compatibility with IGMPv1"); 251SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, v2enable, 252 CTLFLAG_RW, igmp_v2enable, 0, 253 "Enable backwards compatibility with IGMPv2"); 254SYSCTL_V_INT(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, legacysupp, 255 CTLFLAG_RW, igmp_legacysupp, 0, 256 "Allow v1/v2 reports to suppress v3 group responses"); 257SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, default_version, 258 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_default_version, 0, 259 sysctl_igmp_default_version, "I", 260 "Default version of IGMP to run on each interface"); 261SYSCTL_V_PROC(V_NET, vnet_inet, _net_inet_igmp, OID_AUTO, gsrdelay, 262 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, igmp_gsrdelay.tv_sec, 0, 263 sysctl_igmp_gsr, "I", 264 "Rate limit for IGMPv3 Group-and-Source queries in seconds"); 265 266/* 267 * Non-virtualized sysctls. 268 */ 269SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, CTLFLAG_RD | CTLFLAG_MPSAFE, 270 sysctl_igmp_ifinfo, "Per-interface IGMPv3 state"); 271 272static __inline void 273igmp_save_context(struct mbuf *m, struct ifnet *ifp) 274{ 275 276#ifdef VIMAGE 277 m->m_pkthdr.header = ifp->if_vnet; 278#endif /* VIMAGE */ 279 m->m_pkthdr.flowid = ifp->if_index; 280} 281 282static __inline void 283igmp_scrub_context(struct mbuf *m) 284{ 285 286 m->m_pkthdr.header = NULL; 287 m->m_pkthdr.flowid = 0; 288} 289 290#ifdef KTR 291static __inline char * 292inet_ntoa_haddr(in_addr_t haddr) 293{ 294 struct in_addr ia; 295 296 ia.s_addr = htonl(haddr); 297 return (inet_ntoa(ia)); 298} 299#endif 300 301/* 302 * Restore context from a queued IGMP output chain. 303 * Return saved ifindex. 304 * 305 * VIMAGE: The assertion is there to make sure that we 306 * actually called CURVNET_SET() with what's in the mbuf chain. 307 */ 308static __inline uint32_t 309igmp_restore_context(struct mbuf *m) 310{ 311 312#ifdef notyet 313#if defined(VIMAGE) && defined(INVARIANTS) 314 KASSERT(curvnet == (m->m_pkthdr.header), 315 ("%s: called when curvnet was not restored", __func__)); 316#endif 317#endif 318 return (m->m_pkthdr.flowid); 319} 320 321/* 322 * Retrieve or set default IGMP version. 323 * 324 * VIMAGE: Assume curvnet set by caller. 325 * SMPng: NOTE: Serialized by IGMP lock. 326 */ 327static int 328sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS) 329{ 330 INIT_VNET_INET(curvnet); 331 int error; 332 int new; 333 334 error = sysctl_wire_old_buffer(req, sizeof(int)); 335 if (error) 336 return (error); 337 338 IGMP_LOCK(); 339 340 new = V_igmp_default_version; 341 342 error = sysctl_handle_int(oidp, &new, 0, req); 343 if (error || !req->newptr) 344 goto out_locked; 345 346 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) { 347 error = EINVAL; 348 goto out_locked; 349 } 350 351 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d", 352 V_igmp_default_version, new); 353 354 V_igmp_default_version = new; 355 356out_locked: 357 IGMP_UNLOCK(); 358 return (error); 359} 360 361/* 362 * Retrieve or set threshold between group-source queries in seconds. 363 * 364 * VIMAGE: Assume curvnet set by caller. 365 * SMPng: NOTE: Serialized by IGMP lock. 366 */ 367static int 368sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS) 369{ 370 INIT_VNET_INET(curvnet); 371 int error; 372 int i; 373 374 error = sysctl_wire_old_buffer(req, sizeof(int)); 375 if (error) 376 return (error); 377 378 IGMP_LOCK(); 379 380 i = V_igmp_gsrdelay.tv_sec; 381 382 error = sysctl_handle_int(oidp, &i, 0, req); 383 if (error || !req->newptr) 384 goto out_locked; 385 386 if (i < -1 || i >= 60) { 387 error = EINVAL; 388 goto out_locked; 389 } 390 391 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d", 392 V_igmp_gsrdelay.tv_sec, i); 393 V_igmp_gsrdelay.tv_sec = i; 394 395out_locked: 396 IGMP_UNLOCK(); 397 return (error); 398} 399 400/* 401 * Expose struct igmp_ifinfo to userland, keyed by ifindex. 402 * For use by ifmcstat(8). 403 * 404 * SMPng: NOTE: Does an unlocked ifindex space read. 405 * VIMAGE: Assume curvnet set by caller. The node handler itself 406 * is not directly virtualized. 407 */ 408static int 409sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS) 410{ 411 INIT_VNET_NET(curvnet); 412 INIT_VNET_INET(curvnet); 413 int *name; 414 int error; 415 u_int namelen; 416 struct ifnet *ifp; 417 struct igmp_ifinfo *igi; 418 419 name = (int *)arg1; 420 namelen = arg2; 421 422 if (req->newptr != NULL) 423 return (EPERM); 424 425 if (namelen != 1) 426 return (EINVAL); 427 428 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo)); 429 if (error) 430 return (error); 431 432 IN_MULTI_LOCK(); 433 IGMP_LOCK(); 434 435 if (name[0] <= 0 || name[0] > V_if_index) { 436 error = ENOENT; 437 goto out_locked; 438 } 439 440 error = ENOENT; 441 442 ifp = ifnet_byindex(name[0]); 443 if (ifp == NULL) 444 goto out_locked; 445 446 LIST_FOREACH(igi, &V_igi_head, igi_link) { 447 if (ifp == igi->igi_ifp) { 448 error = SYSCTL_OUT(req, igi, 449 sizeof(struct igmp_ifinfo)); 450 break; 451 } 452 } 453 454out_locked: 455 IGMP_UNLOCK(); 456 IN_MULTI_UNLOCK(); 457 return (error); 458} 459 460/* 461 * Dispatch an entire queue of pending packet chains 462 * using the netisr. 463 * VIMAGE: Assumes the vnet pointer has been set. 464 */ 465static void 466igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop) 467{ 468 struct mbuf *m; 469 470 for (;;) { 471 _IF_DEQUEUE(ifq, m); 472 if (m == NULL) 473 break; 474 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m); 475 if (loop) 476 m->m_flags |= M_IGMP_LOOP; 477 netisr_dispatch(NETISR_IGMP, m); 478 if (--limit == 0) 479 break; 480 } 481} 482 483/* 484 * Filter outgoing IGMP report state by group. 485 * 486 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1). 487 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are 488 * disabled for all groups in the 224.0.0.0/24 link-local scope. However, 489 * this may break certain IGMP snooping switches which rely on the old 490 * report behaviour. 491 * 492 * Return zero if the given group is one for which IGMP reports 493 * should be suppressed, or non-zero if reports should be issued. 494 */ 495static __inline int 496igmp_isgroupreported(const struct in_addr addr) 497{ 498 INIT_VNET_INET(curvnet); 499 500 if (in_allhosts(addr) || 501 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) 502 return (0); 503 504 return (1); 505} 506 507/* 508 * Construct a Router Alert option to use in outgoing packets. 509 */ 510static struct mbuf * 511igmp_ra_alloc(void) 512{ 513 struct mbuf *m; 514 struct ipoption *p; 515 516 MGET(m, M_DONTWAIT, MT_DATA); 517 p = mtod(m, struct ipoption *); 518 p->ipopt_dst.s_addr = INADDR_ANY; 519 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */ 520 p->ipopt_list[1] = 0x04; /* 4 bytes long */ 521 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ 522 p->ipopt_list[3] = 0x00; /* pad byte */ 523 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1]; 524 525 return (m); 526} 527 528/* 529 * Attach IGMP when PF_INET is attached to an interface. 530 */ 531struct igmp_ifinfo * 532igmp_domifattach(struct ifnet *ifp) 533{ 534 struct igmp_ifinfo *igi; 535 536 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 537 __func__, ifp, ifp->if_xname); 538 539 IGMP_LOCK(); 540 541 igi = igi_alloc_locked(ifp); 542 if (!(ifp->if_flags & IFF_MULTICAST)) 543 igi->igi_flags |= IGIF_SILENT; 544 545 IGMP_UNLOCK(); 546 547 return (igi); 548} 549 550/* 551 * VIMAGE: assume curvnet set by caller. 552 */ 553static struct igmp_ifinfo * 554igi_alloc_locked(/*const*/ struct ifnet *ifp) 555{ 556 INIT_VNET_INET(ifp->if_vnet); 557 struct igmp_ifinfo *igi; 558 559 IGMP_LOCK_ASSERT(); 560 561 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO); 562 if (igi == NULL) 563 goto out; 564 565 igi->igi_ifp = ifp; 566 igi->igi_version = V_igmp_default_version; 567 igi->igi_flags = 0; 568 igi->igi_rv = IGMP_RV_INIT; 569 igi->igi_qi = IGMP_QI_INIT; 570 igi->igi_qri = IGMP_QRI_INIT; 571 igi->igi_uri = IGMP_URI_INIT; 572 573 SLIST_INIT(&igi->igi_relinmhead); 574 575 /* 576 * Responses to general queries are subject to bounds. 577 */ 578 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); 579 580 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link); 581 582 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)", 583 ifp, ifp->if_xname); 584 585out: 586 return (igi); 587} 588 589/* 590 * Hook for ifdetach. 591 * 592 * NOTE: Some finalization tasks need to run before the protocol domain 593 * is detached, but also before the link layer does its cleanup. 594 * 595 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK(). 596 * XXX This is also bitten by unlocked ifma_protospec access. 597 */ 598void 599igmp_ifdetach(struct ifnet *ifp) 600{ 601 struct igmp_ifinfo *igi; 602 struct ifmultiaddr *ifma; 603 struct in_multi *inm, *tinm; 604 605 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp, 606 ifp->if_xname); 607 608 IGMP_LOCK(); 609 610 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 611 if (igi->igi_version == IGMP_VERSION_3) { 612 IF_ADDR_LOCK(ifp); 613 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 614 if (ifma->ifma_addr->sa_family != AF_INET || 615 ifma->ifma_protospec == NULL) 616 continue; 617#if 0 618 KASSERT(ifma->ifma_protospec != NULL, 619 ("%s: ifma_protospec is NULL", __func__)); 620#endif 621 inm = (struct in_multi *)ifma->ifma_protospec; 622 if (inm->inm_state == IGMP_LEAVING_MEMBER) { 623 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 624 inm, inm_nrele); 625 } 626 inm_clear_recorded(inm); 627 } 628 IF_ADDR_UNLOCK(ifp); 629 /* 630 * Free the in_multi reference(s) for this IGMP lifecycle. 631 */ 632 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, 633 tinm) { 634 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 635 inm_release_locked(inm); 636 } 637 } 638 639 IGMP_UNLOCK(); 640} 641 642/* 643 * Hook for domifdetach. 644 */ 645void 646igmp_domifdetach(struct ifnet *ifp) 647{ 648 struct igmp_ifinfo *igi; 649 650 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 651 __func__, ifp, ifp->if_xname); 652 653 IGMP_LOCK(); 654 655 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 656 igi_delete_locked(ifp); 657 658 IGMP_UNLOCK(); 659} 660 661static void 662igi_delete_locked(const struct ifnet *ifp) 663{ 664 INIT_VNET_INET(ifp->if_vnet); 665 struct igmp_ifinfo *igi, *tigi; 666 667 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)", 668 __func__, ifp, ifp->if_xname); 669 670 IGMP_LOCK_ASSERT(); 671 672 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) { 673 if (igi->igi_ifp == ifp) { 674 /* 675 * Free deferred General Query responses. 676 */ 677 _IF_DRAIN(&igi->igi_gq); 678 679 LIST_REMOVE(igi, igi_link); 680 681 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead), 682 ("%s: there are dangling in_multi references", 683 __func__)); 684 685 free(igi, M_IGMP); 686 return; 687 } 688 } 689 690#ifdef INVARIANTS 691 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp); 692#endif 693} 694 695/* 696 * Process a received IGMPv1 query. 697 * Return non-zero if the message should be dropped. 698 * 699 * VIMAGE: The curvnet pointer is derived from the input ifp. 700 */ 701static int 702igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, 703 const struct igmp *igmp) 704{ 705 INIT_VNET_INET(ifp->if_vnet); 706 struct ifmultiaddr *ifma; 707 struct igmp_ifinfo *igi; 708 struct in_multi *inm; 709 710 /* 711 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to 712 * 224.0.0.1. They are always treated as General Queries. 713 * igmp_group is always ignored. Do not drop it as a userland 714 * daemon may wish to see it. 715 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 716 */ 717 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) { 718 IGMPSTAT_INC(igps_rcv_badqueries); 719 return (0); 720 } 721 IGMPSTAT_INC(igps_rcv_gen_queries); 722 723 IN_MULTI_LOCK(); 724 IGMP_LOCK(); 725 726 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 727 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 728 729 if (igi->igi_flags & IGIF_LOOPBACK) { 730 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)", 731 ifp, ifp->if_xname); 732 goto out_locked; 733 } 734 735 /* 736 * Switch to IGMPv1 host compatibility mode. 737 */ 738 igmp_set_version(igi, IGMP_VERSION_1); 739 740 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname); 741 742 /* 743 * Start the timers in all of our group records 744 * for the interface on which the query arrived, 745 * except those which are already running. 746 */ 747 IF_ADDR_LOCK(ifp); 748 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 749 if (ifma->ifma_addr->sa_family != AF_INET || 750 ifma->ifma_protospec == NULL) 751 continue; 752 inm = (struct in_multi *)ifma->ifma_protospec; 753 if (inm->inm_timer != 0) 754 continue; 755 switch (inm->inm_state) { 756 case IGMP_NOT_MEMBER: 757 case IGMP_SILENT_MEMBER: 758 break; 759 case IGMP_G_QUERY_PENDING_MEMBER: 760 case IGMP_SG_QUERY_PENDING_MEMBER: 761 case IGMP_REPORTING_MEMBER: 762 case IGMP_IDLE_MEMBER: 763 case IGMP_LAZY_MEMBER: 764 case IGMP_SLEEPING_MEMBER: 765 case IGMP_AWAKENING_MEMBER: 766 inm->inm_state = IGMP_REPORTING_MEMBER; 767 inm->inm_timer = IGMP_RANDOM_DELAY( 768 IGMP_V1V2_MAX_RI * PR_FASTHZ); 769 V_current_state_timers_running = 1; 770 break; 771 case IGMP_LEAVING_MEMBER: 772 break; 773 } 774 } 775 IF_ADDR_UNLOCK(ifp); 776 777out_locked: 778 IGMP_UNLOCK(); 779 IN_MULTI_UNLOCK(); 780 781 return (0); 782} 783 784/* 785 * Process a received IGMPv2 general or group-specific query. 786 */ 787static int 788igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, 789 const struct igmp *igmp) 790{ 791 INIT_VNET_INET(ifp->if_vnet); 792 struct ifmultiaddr *ifma; 793 struct igmp_ifinfo *igi; 794 struct in_multi *inm; 795 int is_general_query; 796 uint16_t timer; 797 798 is_general_query = 0; 799 800 /* 801 * Validate address fields upfront. 802 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 803 */ 804 if (in_nullhost(igmp->igmp_group)) { 805 /* 806 * IGMPv2 General Query. 807 * If this was not sent to the all-hosts group, ignore it. 808 */ 809 if (!in_allhosts(ip->ip_dst)) 810 return (0); 811 IGMPSTAT_INC(igps_rcv_gen_queries); 812 is_general_query = 1; 813 } else { 814 /* IGMPv2 Group-Specific Query. */ 815 IGMPSTAT_INC(igps_rcv_group_queries); 816 } 817 818 IN_MULTI_LOCK(); 819 IGMP_LOCK(); 820 821 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 822 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 823 824 if (igi->igi_flags & IGIF_LOOPBACK) { 825 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)", 826 ifp, ifp->if_xname); 827 goto out_locked; 828 } 829 830 /* 831 * Ignore v2 query if in v1 Compatibility Mode. 832 */ 833 if (igi->igi_version == IGMP_VERSION_1) 834 goto out_locked; 835 836 igmp_set_version(igi, IGMP_VERSION_2); 837 838 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; 839 if (timer == 0) 840 timer = 1; 841 842 if (is_general_query) { 843 /* 844 * For each reporting group joined on this 845 * interface, kick the report timer. 846 */ 847 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)", 848 ifp, ifp->if_xname); 849 IF_ADDR_LOCK(ifp); 850 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 851 if (ifma->ifma_addr->sa_family != AF_INET || 852 ifma->ifma_protospec == NULL) 853 continue; 854 inm = (struct in_multi *)ifma->ifma_protospec; 855 igmp_v2_update_group(inm, timer); 856 } 857 IF_ADDR_UNLOCK(ifp); 858 } else { 859 /* 860 * Group-specific IGMPv2 query, we need only 861 * look up the single group to process it. 862 */ 863 inm = inm_lookup(ifp, igmp->igmp_group); 864 if (inm != NULL) { 865 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)", 866 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 867 igmp_v2_update_group(inm, timer); 868 } 869 } 870 871out_locked: 872 IGMP_UNLOCK(); 873 IN_MULTI_UNLOCK(); 874 875 return (0); 876} 877 878/* 879 * Update the report timer on a group in response to an IGMPv2 query. 880 * 881 * If we are becoming the reporting member for this group, start the timer. 882 * If we already are the reporting member for this group, and timer is 883 * below the threshold, reset it. 884 * 885 * We may be updating the group for the first time since we switched 886 * to IGMPv3. If we are, then we must clear any recorded source lists, 887 * and transition to REPORTING state; the group timer is overloaded 888 * for group and group-source query responses. 889 * 890 * Unlike IGMPv3, the delay per group should be jittered 891 * to avoid bursts of IGMPv2 reports. 892 */ 893static void 894igmp_v2_update_group(struct in_multi *inm, const int timer) 895{ 896 INIT_VNET_INET(curvnet); 897 898 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__, 899 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer); 900 901 IN_MULTI_LOCK_ASSERT(); 902 903 switch (inm->inm_state) { 904 case IGMP_NOT_MEMBER: 905 case IGMP_SILENT_MEMBER: 906 break; 907 case IGMP_REPORTING_MEMBER: 908 if (inm->inm_timer != 0 && 909 inm->inm_timer <= timer) { 910 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, " 911 "skipping.", __func__); 912 break; 913 } 914 /* FALLTHROUGH */ 915 case IGMP_SG_QUERY_PENDING_MEMBER: 916 case IGMP_G_QUERY_PENDING_MEMBER: 917 case IGMP_IDLE_MEMBER: 918 case IGMP_LAZY_MEMBER: 919 case IGMP_AWAKENING_MEMBER: 920 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__); 921 inm->inm_state = IGMP_REPORTING_MEMBER; 922 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 923 V_current_state_timers_running = 1; 924 break; 925 case IGMP_SLEEPING_MEMBER: 926 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__); 927 inm->inm_state = IGMP_AWAKENING_MEMBER; 928 break; 929 case IGMP_LEAVING_MEMBER: 930 break; 931 } 932} 933 934/* 935 * Process a received IGMPv3 general, group-specific or 936 * group-and-source-specific query. 937 * Assumes m has already been pulled up to the full IGMP message length. 938 * Return 0 if successful, otherwise an appropriate error code is returned. 939 */ 940static int 941igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, 942 /*const*/ struct igmpv3 *igmpv3) 943{ 944 INIT_VNET_INET(ifp->if_vnet); 945 struct igmp_ifinfo *igi; 946 struct in_multi *inm; 947 int is_general_query; 948 uint32_t maxresp, nsrc, qqi; 949 uint16_t timer; 950 uint8_t qrv; 951 952 is_general_query = 0; 953 954 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname); 955 956 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ 957 if (maxresp >= 128) { 958 maxresp = IGMP_MANT(igmpv3->igmp_code) << 959 (IGMP_EXP(igmpv3->igmp_code) + 3); 960 } 961 962 /* 963 * Robustness must never be less than 2 for on-wire IGMPv3. 964 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make 965 * an exception for interfaces whose IGMPv3 state changes 966 * are redirected to loopback (e.g. MANET). 967 */ 968 qrv = IGMP_QRV(igmpv3->igmp_misc); 969 if (qrv < 2) { 970 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__, 971 qrv, IGMP_RV_INIT); 972 qrv = IGMP_RV_INIT; 973 } 974 975 qqi = igmpv3->igmp_qqi; 976 if (qqi >= 128) { 977 qqi = IGMP_MANT(igmpv3->igmp_qqi) << 978 (IGMP_EXP(igmpv3->igmp_qqi) + 3); 979 } 980 981 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE; 982 if (timer == 0) 983 timer = 1; 984 985 nsrc = ntohs(igmpv3->igmp_numsrc); 986 987 /* 988 * Validate address fields and versions upfront before 989 * accepting v3 query. 990 * XXX SMPng: Unlocked access to igmpstat counters here. 991 */ 992 if (in_nullhost(igmpv3->igmp_group)) { 993 /* 994 * IGMPv3 General Query. 995 * 996 * General Queries SHOULD be directed to 224.0.0.1. 997 * A general query with a source list has undefined 998 * behaviour; discard it. 999 */ 1000 IGMPSTAT_INC(igps_rcv_gen_queries); 1001 if (!in_allhosts(ip->ip_dst) || nsrc > 0) { 1002 IGMPSTAT_INC(igps_rcv_badqueries); 1003 return (0); 1004 } 1005 is_general_query = 1; 1006 } else { 1007 /* Group or group-source specific query. */ 1008 if (nsrc == 0) 1009 IGMPSTAT_INC(igps_rcv_group_queries); 1010 else 1011 IGMPSTAT_INC(igps_rcv_gsr_queries); 1012 } 1013 1014 IN_MULTI_LOCK(); 1015 IGMP_LOCK(); 1016 1017 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 1018 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 1019 1020 if (igi->igi_flags & IGIF_LOOPBACK) { 1021 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)", 1022 ifp, ifp->if_xname); 1023 goto out_locked; 1024 } 1025 1026 /* 1027 * Discard the v3 query if we're in Compatibility Mode. 1028 * The RFC is not obviously worded that hosts need to stay in 1029 * compatibility mode until the Old Version Querier Present 1030 * timer expires. 1031 */ 1032 if (igi->igi_version != IGMP_VERSION_3) { 1033 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)", 1034 igi->igi_version, ifp, ifp->if_xname); 1035 goto out_locked; 1036 } 1037 1038 igmp_set_version(igi, IGMP_VERSION_3); 1039 igi->igi_rv = qrv; 1040 igi->igi_qi = qqi; 1041 igi->igi_qri = maxresp; 1042 1043 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi, 1044 maxresp); 1045 1046 if (is_general_query) { 1047 /* 1048 * Schedule a current-state report on this ifp for 1049 * all groups, possibly containing source lists. 1050 * If there is a pending General Query response 1051 * scheduled earlier than the selected delay, do 1052 * not schedule any other reports. 1053 * Otherwise, reset the interface timer. 1054 */ 1055 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)", 1056 ifp, ifp->if_xname); 1057 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) { 1058 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer); 1059 V_interface_timers_running = 1; 1060 } 1061 } else { 1062 /* 1063 * Group-source-specific queries are throttled on 1064 * a per-group basis to defeat denial-of-service attempts. 1065 * Queries for groups we are not a member of on this 1066 * link are simply ignored. 1067 */ 1068 inm = inm_lookup(ifp, igmpv3->igmp_group); 1069 if (inm == NULL) 1070 goto out_locked; 1071 if (nsrc > 0) { 1072 if (!ratecheck(&inm->inm_lastgsrtv, 1073 &V_igmp_gsrdelay)) { 1074 CTR1(KTR_IGMPV3, "%s: GS query throttled.", 1075 __func__); 1076 IGMPSTAT_INC(igps_drop_gsr_queries); 1077 goto out_locked; 1078 } 1079 } 1080 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)", 1081 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname); 1082 /* 1083 * If there is a pending General Query response 1084 * scheduled sooner than the selected delay, no 1085 * further report need be scheduled. 1086 * Otherwise, prepare to respond to the 1087 * group-specific or group-and-source query. 1088 */ 1089 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) 1090 igmp_input_v3_group_query(inm, igi, timer, igmpv3); 1091 } 1092 1093out_locked: 1094 IGMP_UNLOCK(); 1095 IN_MULTI_UNLOCK(); 1096 1097 return (0); 1098} 1099 1100/* 1101 * Process a recieved IGMPv3 group-specific or group-and-source-specific 1102 * query. 1103 * Return <0 if any error occured. Currently this is ignored. 1104 */ 1105static int 1106igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi, 1107 int timer, /*const*/ struct igmpv3 *igmpv3) 1108{ 1109 INIT_VNET_INET(curvnet); 1110 int retval; 1111 uint16_t nsrc; 1112 1113 IN_MULTI_LOCK_ASSERT(); 1114 IGMP_LOCK_ASSERT(); 1115 1116 retval = 0; 1117 1118 switch (inm->inm_state) { 1119 case IGMP_NOT_MEMBER: 1120 case IGMP_SILENT_MEMBER: 1121 case IGMP_SLEEPING_MEMBER: 1122 case IGMP_LAZY_MEMBER: 1123 case IGMP_AWAKENING_MEMBER: 1124 case IGMP_IDLE_MEMBER: 1125 case IGMP_LEAVING_MEMBER: 1126 return (retval); 1127 break; 1128 case IGMP_REPORTING_MEMBER: 1129 case IGMP_G_QUERY_PENDING_MEMBER: 1130 case IGMP_SG_QUERY_PENDING_MEMBER: 1131 break; 1132 } 1133 1134 nsrc = ntohs(igmpv3->igmp_numsrc); 1135 1136 /* 1137 * Deal with group-specific queries upfront. 1138 * If any group query is already pending, purge any recorded 1139 * source-list state if it exists, and schedule a query response 1140 * for this group-specific query. 1141 */ 1142 if (nsrc == 0) { 1143 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 1144 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) { 1145 inm_clear_recorded(inm); 1146 timer = min(inm->inm_timer, timer); 1147 } 1148 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER; 1149 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1150 V_current_state_timers_running = 1; 1151 return (retval); 1152 } 1153 1154 /* 1155 * Deal with the case where a group-and-source-specific query has 1156 * been received but a group-specific query is already pending. 1157 */ 1158 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) { 1159 timer = min(inm->inm_timer, timer); 1160 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1161 V_current_state_timers_running = 1; 1162 return (retval); 1163 } 1164 1165 /* 1166 * Finally, deal with the case where a group-and-source-specific 1167 * query has been received, where a response to a previous g-s-r 1168 * query exists, or none exists. 1169 * In this case, we need to parse the source-list which the Querier 1170 * has provided us with and check if we have any source list filter 1171 * entries at T1 for these sources. If we do not, there is no need 1172 * schedule a report and the query may be dropped. 1173 * If we do, we must record them and schedule a current-state 1174 * report for those sources. 1175 * FIXME: Handling source lists larger than 1 mbuf requires that 1176 * we pass the mbuf chain pointer down to this function, and use 1177 * m_getptr() to walk the chain. 1178 */ 1179 if (inm->inm_nsrc > 0) { 1180 const struct in_addr *ap; 1181 int i, nrecorded; 1182 1183 ap = (const struct in_addr *)(igmpv3 + 1); 1184 nrecorded = 0; 1185 for (i = 0; i < nsrc; i++, ap++) { 1186 retval = inm_record_source(inm, ap->s_addr); 1187 if (retval < 0) 1188 break; 1189 nrecorded += retval; 1190 } 1191 if (nrecorded > 0) { 1192 CTR1(KTR_IGMPV3, 1193 "%s: schedule response to SG query", __func__); 1194 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER; 1195 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1196 V_current_state_timers_running = 1; 1197 } 1198 } 1199 1200 return (retval); 1201} 1202 1203/* 1204 * Process a received IGMPv1 host membership report. 1205 * 1206 * NOTE: 0.0.0.0 workaround breaks const correctness. 1207 */ 1208static int 1209igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1210 /*const*/ struct igmp *igmp) 1211{ 1212 INIT_VNET_INET(ifp->if_vnet); 1213 struct in_ifaddr *ia; 1214 struct in_multi *inm; 1215 1216 IGMPSTAT_INC(igps_rcv_reports); 1217 1218 if (ifp->if_flags & IFF_LOOPBACK) 1219 return (0); 1220 1221 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr) || 1222 !in_hosteq(igmp->igmp_group, ip->ip_dst))) { 1223 IGMPSTAT_INC(igps_rcv_badreports); 1224 return (EINVAL); 1225 } 1226 1227 /* 1228 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1229 * Booting clients may use the source address 0.0.0.0. Some 1230 * IGMP daemons may not know how to use IP_RECVIF to determine 1231 * the interface upon which this message was received. 1232 * Replace 0.0.0.0 with the subnet address if told to do so. 1233 */ 1234 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1235 IFP_TO_IA(ifp, ia); 1236 if (ia != NULL) 1237 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1238 } 1239 1240 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)", 1241 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1242 1243 /* 1244 * IGMPv1 report suppression. 1245 * If we are a member of this group, and our membership should be 1246 * reported, stop our group timer and transition to the 'lazy' state. 1247 */ 1248 IN_MULTI_LOCK(); 1249 inm = inm_lookup(ifp, igmp->igmp_group); 1250 if (inm != NULL) { 1251 struct igmp_ifinfo *igi; 1252 1253 igi = inm->inm_igi; 1254 if (igi == NULL) { 1255 KASSERT(igi != NULL, 1256 ("%s: no igi for ifp %p", __func__, ifp)); 1257 goto out_locked; 1258 } 1259 1260 IGMPSTAT_INC(igps_rcv_ourreports); 1261 1262 /* 1263 * If we are in IGMPv3 host mode, do not allow the 1264 * other host's IGMPv1 report to suppress our reports 1265 * unless explicitly configured to do so. 1266 */ 1267 if (igi->igi_version == IGMP_VERSION_3) { 1268 if (V_igmp_legacysupp) 1269 igmp_v3_suppress_group_record(inm); 1270 goto out_locked; 1271 } 1272 1273 inm->inm_timer = 0; 1274 1275 switch (inm->inm_state) { 1276 case IGMP_NOT_MEMBER: 1277 case IGMP_SILENT_MEMBER: 1278 break; 1279 case IGMP_IDLE_MEMBER: 1280 case IGMP_LAZY_MEMBER: 1281 case IGMP_AWAKENING_MEMBER: 1282 CTR3(KTR_IGMPV3, 1283 "report suppressed for %s on ifp %p(%s)", 1284 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1285 case IGMP_SLEEPING_MEMBER: 1286 inm->inm_state = IGMP_SLEEPING_MEMBER; 1287 break; 1288 case IGMP_REPORTING_MEMBER: 1289 CTR3(KTR_IGMPV3, 1290 "report suppressed for %s on ifp %p(%s)", 1291 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1292 if (igi->igi_version == IGMP_VERSION_1) 1293 inm->inm_state = IGMP_LAZY_MEMBER; 1294 else if (igi->igi_version == IGMP_VERSION_2) 1295 inm->inm_state = IGMP_SLEEPING_MEMBER; 1296 break; 1297 case IGMP_G_QUERY_PENDING_MEMBER: 1298 case IGMP_SG_QUERY_PENDING_MEMBER: 1299 case IGMP_LEAVING_MEMBER: 1300 break; 1301 } 1302 } 1303 1304out_locked: 1305 IN_MULTI_UNLOCK(); 1306 1307 return (0); 1308} 1309 1310/* 1311 * Process a received IGMPv2 host membership report. 1312 * 1313 * NOTE: 0.0.0.0 workaround breaks const correctness. 1314 */ 1315static int 1316igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1317 /*const*/ struct igmp *igmp) 1318{ 1319 INIT_VNET_INET(ifp->if_vnet); 1320 struct in_ifaddr *ia; 1321 struct in_multi *inm; 1322 1323 /* 1324 * Make sure we don't hear our own membership report. Fast 1325 * leave requires knowing that we are the only member of a 1326 * group. 1327 */ 1328 IFP_TO_IA(ifp, ia); 1329 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) 1330 return (0); 1331 1332 IGMPSTAT_INC(igps_rcv_reports); 1333 1334 if (ifp->if_flags & IFF_LOOPBACK) 1335 return (0); 1336 1337 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1338 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1339 IGMPSTAT_INC(igps_rcv_badreports); 1340 return (EINVAL); 1341 } 1342 1343 /* 1344 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1345 * Booting clients may use the source address 0.0.0.0. Some 1346 * IGMP daemons may not know how to use IP_RECVIF to determine 1347 * the interface upon which this message was received. 1348 * Replace 0.0.0.0 with the subnet address if told to do so. 1349 */ 1350 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1351 if (ia != NULL) 1352 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1353 } 1354 1355 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)", 1356 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1357 1358 /* 1359 * IGMPv2 report suppression. 1360 * If we are a member of this group, and our membership should be 1361 * reported, and our group timer is pending or about to be reset, 1362 * stop our group timer by transitioning to the 'lazy' state. 1363 */ 1364 IN_MULTI_LOCK(); 1365 inm = inm_lookup(ifp, igmp->igmp_group); 1366 if (inm != NULL) { 1367 struct igmp_ifinfo *igi; 1368 1369 igi = inm->inm_igi; 1370 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp)); 1371 1372 IGMPSTAT_INC(igps_rcv_ourreports); 1373 1374 /* 1375 * If we are in IGMPv3 host mode, do not allow the 1376 * other host's IGMPv1 report to suppress our reports 1377 * unless explicitly configured to do so. 1378 */ 1379 if (igi->igi_version == IGMP_VERSION_3) { 1380 if (V_igmp_legacysupp) 1381 igmp_v3_suppress_group_record(inm); 1382 goto out_locked; 1383 } 1384 1385 inm->inm_timer = 0; 1386 1387 switch (inm->inm_state) { 1388 case IGMP_NOT_MEMBER: 1389 case IGMP_SILENT_MEMBER: 1390 case IGMP_SLEEPING_MEMBER: 1391 break; 1392 case IGMP_REPORTING_MEMBER: 1393 case IGMP_IDLE_MEMBER: 1394 case IGMP_AWAKENING_MEMBER: 1395 CTR3(KTR_IGMPV3, 1396 "report suppressed for %s on ifp %p(%s)", 1397 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1398 case IGMP_LAZY_MEMBER: 1399 inm->inm_state = IGMP_LAZY_MEMBER; 1400 break; 1401 case IGMP_G_QUERY_PENDING_MEMBER: 1402 case IGMP_SG_QUERY_PENDING_MEMBER: 1403 case IGMP_LEAVING_MEMBER: 1404 break; 1405 } 1406 } 1407 1408out_locked: 1409 IN_MULTI_UNLOCK(); 1410 1411 return (0); 1412} 1413 1414void 1415igmp_input(struct mbuf *m, int off) 1416{ 1417 int iphlen; 1418 struct ifnet *ifp; 1419 struct igmp *igmp; 1420 struct ip *ip; 1421 int igmplen; 1422 int minlen; 1423 int queryver; 1424 1425 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off); 1426 1427 ifp = m->m_pkthdr.rcvif; 1428 INIT_VNET_INET(ifp->if_vnet); 1429 1430 IGMPSTAT_INC(igps_rcv_total); 1431 1432 ip = mtod(m, struct ip *); 1433 iphlen = off; 1434 igmplen = ip->ip_len; 1435 1436 /* 1437 * Validate lengths. 1438 */ 1439 if (igmplen < IGMP_MINLEN) { 1440 IGMPSTAT_INC(igps_rcv_tooshort); 1441 m_freem(m); 1442 return; 1443 } 1444 1445 /* 1446 * Always pullup to the minimum size for v1/v2 or v3 1447 * to amortize calls to m_pullup(). 1448 */ 1449 minlen = iphlen; 1450 if (igmplen >= IGMP_V3_QUERY_MINLEN) 1451 minlen += IGMP_V3_QUERY_MINLEN; 1452 else 1453 minlen += IGMP_MINLEN; 1454 if ((m->m_flags & M_EXT || m->m_len < minlen) && 1455 (m = m_pullup(m, minlen)) == 0) { 1456 IGMPSTAT_INC(igps_rcv_tooshort); 1457 return; 1458 } 1459 ip = mtod(m, struct ip *); 1460 1461 if (ip->ip_ttl != 1) { 1462 IGMPSTAT_INC(igps_rcv_badttl); 1463 m_freem(m); 1464 return; 1465 } 1466 1467 /* 1468 * Validate checksum. 1469 */ 1470 m->m_data += iphlen; 1471 m->m_len -= iphlen; 1472 igmp = mtod(m, struct igmp *); 1473 if (in_cksum(m, igmplen)) { 1474 IGMPSTAT_INC(igps_rcv_badsum); 1475 m_freem(m); 1476 return; 1477 } 1478 m->m_data -= iphlen; 1479 m->m_len += iphlen; 1480 1481 switch (igmp->igmp_type) { 1482 case IGMP_HOST_MEMBERSHIP_QUERY: 1483 if (igmplen == IGMP_MINLEN) { 1484 if (igmp->igmp_code == 0) 1485 queryver = IGMP_VERSION_1; 1486 else 1487 queryver = IGMP_VERSION_2; 1488 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) { 1489 queryver = IGMP_VERSION_3; 1490 } else { 1491 IGMPSTAT_INC(igps_rcv_tooshort); 1492 m_freem(m); 1493 return; 1494 } 1495 1496 switch (queryver) { 1497 case IGMP_VERSION_1: 1498 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1499 if (!V_igmp_v1enable) 1500 break; 1501 if (igmp_input_v1_query(ifp, ip, igmp) != 0) { 1502 m_freem(m); 1503 return; 1504 } 1505 break; 1506 1507 case IGMP_VERSION_2: 1508 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1509 if (!V_igmp_v2enable) 1510 break; 1511 if (igmp_input_v2_query(ifp, ip, igmp) != 0) { 1512 m_freem(m); 1513 return; 1514 } 1515 break; 1516 1517 case IGMP_VERSION_3: { 1518 struct igmpv3 *igmpv3; 1519 uint16_t igmpv3len; 1520 uint16_t srclen; 1521 int nsrc; 1522 1523 IGMPSTAT_INC(igps_rcv_v3_queries); 1524 igmpv3 = (struct igmpv3 *)igmp; 1525 /* 1526 * Validate length based on source count. 1527 */ 1528 nsrc = ntohs(igmpv3->igmp_numsrc); 1529 srclen = sizeof(struct in_addr) * nsrc; 1530 if (nsrc * sizeof(in_addr_t) > srclen) { 1531 IGMPSTAT_INC(igps_rcv_tooshort); 1532 return; 1533 } 1534 /* 1535 * m_pullup() may modify m, so pullup in 1536 * this scope. 1537 */ 1538 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN + 1539 srclen; 1540 if ((m->m_flags & M_EXT || 1541 m->m_len < igmpv3len) && 1542 (m = m_pullup(m, igmpv3len)) == NULL) { 1543 IGMPSTAT_INC(igps_rcv_tooshort); 1544 return; 1545 } 1546 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *) 1547 + iphlen); 1548 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { 1549 m_freem(m); 1550 return; 1551 } 1552 } 1553 break; 1554 } 1555 break; 1556 1557 case IGMP_v1_HOST_MEMBERSHIP_REPORT: 1558 if (!V_igmp_v1enable) 1559 break; 1560 if (igmp_input_v1_report(ifp, ip, igmp) != 0) { 1561 m_freem(m); 1562 return; 1563 } 1564 break; 1565 1566 case IGMP_v2_HOST_MEMBERSHIP_REPORT: 1567 if (!V_igmp_v2enable) 1568 break; 1569 if (!ip_checkrouteralert(m)) 1570 IGMPSTAT_INC(igps_rcv_nora); 1571 if (igmp_input_v2_report(ifp, ip, igmp) != 0) { 1572 m_freem(m); 1573 return; 1574 } 1575 break; 1576 1577 case IGMP_v3_HOST_MEMBERSHIP_REPORT: 1578 /* 1579 * Hosts do not need to process IGMPv3 membership reports, 1580 * as report suppression is no longer required. 1581 */ 1582 if (!ip_checkrouteralert(m)) 1583 IGMPSTAT_INC(igps_rcv_nora); 1584 break; 1585 1586 default: 1587 break; 1588 } 1589 1590 /* 1591 * Pass all valid IGMP packets up to any process(es) listening on a 1592 * raw IGMP socket. 1593 */ 1594 rip_input(m, off); 1595} 1596 1597 1598/* 1599 * Fast timeout handler (global). 1600 * VIMAGE: Timeout handlers are expected to service all vimages. 1601 */ 1602void 1603igmp_fasttimo(void) 1604{ 1605 VNET_ITERATOR_DECL(vnet_iter); 1606 1607 VNET_LIST_RLOCK(); 1608 VNET_FOREACH(vnet_iter) { 1609 CURVNET_SET(vnet_iter); 1610 igmp_fasttimo_vnet(); 1611 CURVNET_RESTORE(); 1612 } 1613 VNET_LIST_RUNLOCK(); 1614} 1615 1616/* 1617 * Fast timeout handler (per-vnet). 1618 * Sends are shuffled off to a netisr to deal with Giant. 1619 * 1620 * VIMAGE: Assume caller has set up our curvnet. 1621 */ 1622static void 1623igmp_fasttimo_vnet(void) 1624{ 1625 INIT_VNET_INET(curvnet); 1626 struct ifqueue scq; /* State-change packets */ 1627 struct ifqueue qrq; /* Query response packets */ 1628 struct ifnet *ifp; 1629 struct igmp_ifinfo *igi; 1630 struct ifmultiaddr *ifma, *tifma; 1631 struct in_multi *inm; 1632 int loop, uri_fasthz; 1633 1634 loop = 0; 1635 uri_fasthz = 0; 1636 1637 /* 1638 * Quick check to see if any work needs to be done, in order to 1639 * minimize the overhead of fasttimo processing. 1640 * SMPng: XXX Unlocked reads. 1641 */ 1642 if (!V_current_state_timers_running && 1643 !V_interface_timers_running && 1644 !V_state_change_timers_running) 1645 return; 1646 1647 IN_MULTI_LOCK(); 1648 IGMP_LOCK(); 1649 1650 /* 1651 * IGMPv3 General Query response timer processing. 1652 */ 1653 if (V_interface_timers_running) { 1654 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__); 1655 1656 V_interface_timers_running = 0; 1657 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1658 if (igi->igi_v3_timer == 0) { 1659 /* Do nothing. */ 1660 } else if (--igi->igi_v3_timer == 0) { 1661 igmp_v3_dispatch_general_query(igi); 1662 } else { 1663 V_interface_timers_running = 1; 1664 } 1665 } 1666 } 1667 1668 if (!V_current_state_timers_running && 1669 !V_state_change_timers_running) 1670 goto out_locked; 1671 1672 V_current_state_timers_running = 0; 1673 V_state_change_timers_running = 0; 1674 1675 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__); 1676 1677 /* 1678 * IGMPv1/v2/v3 host report and state-change timer processing. 1679 * Note: Processing a v3 group timer may remove a node. 1680 */ 1681 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1682 ifp = igi->igi_ifp; 1683 1684 if (igi->igi_version == IGMP_VERSION_3) { 1685 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 1686 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * 1687 PR_FASTHZ); 1688 1689 memset(&qrq, 0, sizeof(struct ifqueue)); 1690 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS); 1691 1692 memset(&scq, 0, sizeof(struct ifqueue)); 1693 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); 1694 } 1695 1696 IF_ADDR_LOCK(ifp); 1697 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, 1698 tifma) { 1699 if (ifma->ifma_addr->sa_family != AF_INET || 1700 ifma->ifma_protospec == NULL) 1701 continue; 1702 inm = (struct in_multi *)ifma->ifma_protospec; 1703 switch (igi->igi_version) { 1704 case IGMP_VERSION_1: 1705 case IGMP_VERSION_2: 1706 igmp_v1v2_process_group_timer(inm, 1707 igi->igi_version); 1708 break; 1709 case IGMP_VERSION_3: 1710 igmp_v3_process_group_timers(igi, &qrq, 1711 &scq, inm, uri_fasthz); 1712 break; 1713 } 1714 } 1715 IF_ADDR_UNLOCK(ifp); 1716 1717 if (igi->igi_version == IGMP_VERSION_3) { 1718 struct in_multi *tinm; 1719 1720 igmp_dispatch_queue(&qrq, 0, loop); 1721 igmp_dispatch_queue(&scq, 0, loop); 1722 1723 /* 1724 * Free the in_multi reference(s) for this 1725 * IGMP lifecycle. 1726 */ 1727 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, 1728 inm_nrele, tinm) { 1729 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, 1730 inm_nrele); 1731 inm_release_locked(inm); 1732 } 1733 } 1734 } 1735 1736out_locked: 1737 IGMP_UNLOCK(); 1738 IN_MULTI_UNLOCK(); 1739} 1740 1741/* 1742 * Update host report group timer for IGMPv1/v2. 1743 * Will update the global pending timer flags. 1744 */ 1745static void 1746igmp_v1v2_process_group_timer(struct in_multi *inm, const int version) 1747{ 1748 INIT_VNET_INET(curvnet); 1749 int report_timer_expired; 1750 1751 IN_MULTI_LOCK_ASSERT(); 1752 IGMP_LOCK_ASSERT(); 1753 1754 if (inm->inm_timer == 0) { 1755 report_timer_expired = 0; 1756 } else if (--inm->inm_timer == 0) { 1757 report_timer_expired = 1; 1758 } else { 1759 V_current_state_timers_running = 1; 1760 return; 1761 } 1762 1763 switch (inm->inm_state) { 1764 case IGMP_NOT_MEMBER: 1765 case IGMP_SILENT_MEMBER: 1766 case IGMP_IDLE_MEMBER: 1767 case IGMP_LAZY_MEMBER: 1768 case IGMP_SLEEPING_MEMBER: 1769 case IGMP_AWAKENING_MEMBER: 1770 break; 1771 case IGMP_REPORTING_MEMBER: 1772 if (report_timer_expired) { 1773 inm->inm_state = IGMP_IDLE_MEMBER; 1774 (void)igmp_v1v2_queue_report(inm, 1775 (version == IGMP_VERSION_2) ? 1776 IGMP_v2_HOST_MEMBERSHIP_REPORT : 1777 IGMP_v1_HOST_MEMBERSHIP_REPORT); 1778 } 1779 break; 1780 case IGMP_G_QUERY_PENDING_MEMBER: 1781 case IGMP_SG_QUERY_PENDING_MEMBER: 1782 case IGMP_LEAVING_MEMBER: 1783 break; 1784 } 1785} 1786 1787/* 1788 * Update a group's timers for IGMPv3. 1789 * Will update the global pending timer flags. 1790 * Note: Unlocked read from igi. 1791 */ 1792static void 1793igmp_v3_process_group_timers(struct igmp_ifinfo *igi, 1794 struct ifqueue *qrq, struct ifqueue *scq, 1795 struct in_multi *inm, const int uri_fasthz) 1796{ 1797 INIT_VNET_INET(curvnet); 1798 int query_response_timer_expired; 1799 int state_change_retransmit_timer_expired; 1800 1801 IN_MULTI_LOCK_ASSERT(); 1802 IGMP_LOCK_ASSERT(); 1803 1804 query_response_timer_expired = 0; 1805 state_change_retransmit_timer_expired = 0; 1806 1807 /* 1808 * During a transition from v1/v2 compatibility mode back to v3, 1809 * a group record in REPORTING state may still have its group 1810 * timer active. This is a no-op in this function; it is easier 1811 * to deal with it here than to complicate the slow-timeout path. 1812 */ 1813 if (inm->inm_timer == 0) { 1814 query_response_timer_expired = 0; 1815 } else if (--inm->inm_timer == 0) { 1816 query_response_timer_expired = 1; 1817 } else { 1818 V_current_state_timers_running = 1; 1819 } 1820 1821 if (inm->inm_sctimer == 0) { 1822 state_change_retransmit_timer_expired = 0; 1823 } else if (--inm->inm_sctimer == 0) { 1824 state_change_retransmit_timer_expired = 1; 1825 } else { 1826 V_state_change_timers_running = 1; 1827 } 1828 1829 /* We are in fasttimo, so be quick about it. */ 1830 if (!state_change_retransmit_timer_expired && 1831 !query_response_timer_expired) 1832 return; 1833 1834 switch (inm->inm_state) { 1835 case IGMP_NOT_MEMBER: 1836 case IGMP_SILENT_MEMBER: 1837 case IGMP_SLEEPING_MEMBER: 1838 case IGMP_LAZY_MEMBER: 1839 case IGMP_AWAKENING_MEMBER: 1840 case IGMP_IDLE_MEMBER: 1841 break; 1842 case IGMP_G_QUERY_PENDING_MEMBER: 1843 case IGMP_SG_QUERY_PENDING_MEMBER: 1844 /* 1845 * Respond to a previously pending Group-Specific 1846 * or Group-and-Source-Specific query by enqueueing 1847 * the appropriate Current-State report for 1848 * immediate transmission. 1849 */ 1850 if (query_response_timer_expired) { 1851 int retval; 1852 1853 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1, 1854 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)); 1855 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 1856 __func__, retval); 1857 inm->inm_state = IGMP_REPORTING_MEMBER; 1858 /* XXX Clear recorded sources for next time. */ 1859 inm_clear_recorded(inm); 1860 } 1861 /* FALLTHROUGH */ 1862 case IGMP_REPORTING_MEMBER: 1863 case IGMP_LEAVING_MEMBER: 1864 if (state_change_retransmit_timer_expired) { 1865 /* 1866 * State-change retransmission timer fired. 1867 * If there are any further pending retransmissions, 1868 * set the global pending state-change flag, and 1869 * reset the timer. 1870 */ 1871 if (--inm->inm_scrv > 0) { 1872 inm->inm_sctimer = uri_fasthz; 1873 V_state_change_timers_running = 1; 1874 } 1875 /* 1876 * Retransmit the previously computed state-change 1877 * report. If there are no further pending 1878 * retransmissions, the mbuf queue will be consumed. 1879 * Update T0 state to T1 as we have now sent 1880 * a state-change. 1881 */ 1882 (void)igmp_v3_merge_state_changes(inm, scq); 1883 1884 inm_commit(inm); 1885 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 1886 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 1887 1888 /* 1889 * If we are leaving the group for good, make sure 1890 * we release IGMP's reference to it. 1891 * This release must be deferred using a SLIST, 1892 * as we are called from a loop which traverses 1893 * the in_ifmultiaddr TAILQ. 1894 */ 1895 if (inm->inm_state == IGMP_LEAVING_MEMBER && 1896 inm->inm_scrv == 0) { 1897 inm->inm_state = IGMP_NOT_MEMBER; 1898 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 1899 inm, inm_nrele); 1900 } 1901 } 1902 break; 1903 } 1904} 1905 1906 1907/* 1908 * Suppress a group's pending response to a group or source/group query. 1909 * 1910 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency. 1911 * Do NOT update ST1/ST0 as this operation merely suppresses 1912 * the currently pending group record. 1913 * Do NOT suppress the response to a general query. It is possible but 1914 * it would require adding another state or flag. 1915 */ 1916static void 1917igmp_v3_suppress_group_record(struct in_multi *inm) 1918{ 1919 1920 IN_MULTI_LOCK_ASSERT(); 1921 1922 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3, 1923 ("%s: not IGMPv3 mode on link", __func__)); 1924 1925 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER || 1926 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) 1927 return; 1928 1929 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 1930 inm_clear_recorded(inm); 1931 1932 inm->inm_timer = 0; 1933 inm->inm_state = IGMP_REPORTING_MEMBER; 1934} 1935 1936/* 1937 * Switch to a different IGMP version on the given interface, 1938 * as per Section 7.2.1. 1939 */ 1940static void 1941igmp_set_version(struct igmp_ifinfo *igi, const int version) 1942{ 1943 int old_version_timer; 1944 1945 IGMP_LOCK_ASSERT(); 1946 1947 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__, 1948 version, igi->igi_ifp, igi->igi_ifp->if_xname); 1949 1950 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) { 1951 /* 1952 * Compute the "Older Version Querier Present" timer as per 1953 * Section 8.12. 1954 */ 1955 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri; 1956 old_version_timer *= PR_SLOWHZ; 1957 1958 if (version == IGMP_VERSION_1) { 1959 igi->igi_v1_timer = old_version_timer; 1960 igi->igi_v2_timer = 0; 1961 } else if (version == IGMP_VERSION_2) { 1962 igi->igi_v1_timer = 0; 1963 igi->igi_v2_timer = old_version_timer; 1964 } 1965 } 1966 1967 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 1968 if (igi->igi_version != IGMP_VERSION_2) { 1969 igi->igi_version = IGMP_VERSION_2; 1970 igmp_v3_cancel_link_timers(igi); 1971 } 1972 } else if (igi->igi_v1_timer > 0) { 1973 if (igi->igi_version != IGMP_VERSION_1) { 1974 igi->igi_version = IGMP_VERSION_1; 1975 igmp_v3_cancel_link_timers(igi); 1976 } 1977 } 1978} 1979 1980/* 1981 * Cancel pending IGMPv3 timers for the given link and all groups 1982 * joined on it; state-change, general-query, and group-query timers. 1983 * 1984 * Only ever called on a transition from v3 to Compatibility mode. Kill 1985 * the timers stone dead (this may be expensive for large N groups), they 1986 * will be restarted if Compatibility Mode deems that they must be due to 1987 * query processing. 1988 */ 1989static void 1990igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) 1991{ 1992 struct ifmultiaddr *ifma; 1993 struct ifnet *ifp; 1994 struct in_multi *inm; 1995 1996 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__, 1997 igi->igi_ifp, igi->igi_ifp->if_xname); 1998 1999 IN_MULTI_LOCK_ASSERT(); 2000 IGMP_LOCK_ASSERT(); 2001 2002 /* 2003 * Stop the v3 General Query Response on this link stone dead. 2004 * If fasttimo is woken up due to V_interface_timers_running, 2005 * the flag will be cleared if there are no pending link timers. 2006 */ 2007 igi->igi_v3_timer = 0; 2008 2009 /* 2010 * Now clear the current-state and state-change report timers 2011 * for all memberships scoped to this link. 2012 */ 2013 ifp = igi->igi_ifp; 2014 IF_ADDR_LOCK(ifp); 2015 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2016 if (ifma->ifma_addr->sa_family != AF_INET || 2017 ifma->ifma_protospec == NULL) 2018 continue; 2019 inm = (struct in_multi *)ifma->ifma_protospec; 2020 switch (inm->inm_state) { 2021 case IGMP_NOT_MEMBER: 2022 case IGMP_SILENT_MEMBER: 2023 case IGMP_IDLE_MEMBER: 2024 case IGMP_LAZY_MEMBER: 2025 case IGMP_SLEEPING_MEMBER: 2026 case IGMP_AWAKENING_MEMBER: 2027 /* 2028 * These states are either not relevant in v3 mode, 2029 * or are unreported. Do nothing. 2030 */ 2031 break; 2032 case IGMP_LEAVING_MEMBER: 2033 /* 2034 * If we are leaving the group and switching to 2035 * compatibility mode, we need to release the final 2036 * reference held for issuing the INCLUDE {}, and 2037 * transition to REPORTING to ensure the host leave 2038 * message is sent upstream to the old querier -- 2039 * transition to NOT would lose the leave and race. 2040 * 2041 * SMPNG: Must drop and re-acquire IF_ADDR_LOCK 2042 * around inm_release_locked(), as it is not 2043 * a recursive mutex. 2044 */ 2045 IF_ADDR_UNLOCK(ifp); 2046 inm_release_locked(inm); 2047 IF_ADDR_LOCK(ifp); 2048 /* FALLTHROUGH */ 2049 case IGMP_G_QUERY_PENDING_MEMBER: 2050 case IGMP_SG_QUERY_PENDING_MEMBER: 2051 inm_clear_recorded(inm); 2052 /* FALLTHROUGH */ 2053 case IGMP_REPORTING_MEMBER: 2054 inm->inm_state = IGMP_REPORTING_MEMBER; 2055 break; 2056 } 2057 /* 2058 * Always clear state-change and group report timers. 2059 * Free any pending IGMPv3 state-change records. 2060 */ 2061 inm->inm_sctimer = 0; 2062 inm->inm_timer = 0; 2063 _IF_DRAIN(&inm->inm_scq); 2064 } 2065 IF_ADDR_UNLOCK(ifp); 2066} 2067 2068/* 2069 * Update the Older Version Querier Present timers for a link. 2070 * See Section 7.2.1 of RFC 3376. 2071 */ 2072static void 2073igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi) 2074{ 2075 INIT_VNET_INET(curvnet); 2076 2077 IGMP_LOCK_ASSERT(); 2078 2079 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) { 2080 /* 2081 * IGMPv1 and IGMPv2 Querier Present timers expired. 2082 * 2083 * Revert to IGMPv3. 2084 */ 2085 if (igi->igi_version != IGMP_VERSION_3) { 2086 CTR5(KTR_IGMPV3, 2087 "%s: transition from v%d -> v%d on %p(%s)", 2088 __func__, igi->igi_version, IGMP_VERSION_3, 2089 igi->igi_ifp, igi->igi_ifp->if_xname); 2090 igi->igi_version = IGMP_VERSION_3; 2091 } 2092 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 2093 /* 2094 * IGMPv1 Querier Present timer expired, 2095 * IGMPv2 Querier Present timer running. 2096 * If IGMPv2 was disabled since last timeout, 2097 * revert to IGMPv3. 2098 * If IGMPv2 is enabled, revert to IGMPv2. 2099 */ 2100 if (!V_igmp_v2enable) { 2101 CTR5(KTR_IGMPV3, 2102 "%s: transition from v%d -> v%d on %p(%s)", 2103 __func__, igi->igi_version, IGMP_VERSION_3, 2104 igi->igi_ifp, igi->igi_ifp->if_xname); 2105 igi->igi_v2_timer = 0; 2106 igi->igi_version = IGMP_VERSION_3; 2107 } else { 2108 --igi->igi_v2_timer; 2109 if (igi->igi_version != IGMP_VERSION_2) { 2110 CTR5(KTR_IGMPV3, 2111 "%s: transition from v%d -> v%d on %p(%s)", 2112 __func__, igi->igi_version, IGMP_VERSION_2, 2113 igi->igi_ifp, igi->igi_ifp->if_xname); 2114 igi->igi_version = IGMP_VERSION_2; 2115 } 2116 } 2117 } else if (igi->igi_v1_timer > 0) { 2118 /* 2119 * IGMPv1 Querier Present timer running. 2120 * Stop IGMPv2 timer if running. 2121 * 2122 * If IGMPv1 was disabled since last timeout, 2123 * revert to IGMPv3. 2124 * If IGMPv1 is enabled, reset IGMPv2 timer if running. 2125 */ 2126 if (!V_igmp_v1enable) { 2127 CTR5(KTR_IGMPV3, 2128 "%s: transition from v%d -> v%d on %p(%s)", 2129 __func__, igi->igi_version, IGMP_VERSION_3, 2130 igi->igi_ifp, igi->igi_ifp->if_xname); 2131 igi->igi_v1_timer = 0; 2132 igi->igi_version = IGMP_VERSION_3; 2133 } else { 2134 --igi->igi_v1_timer; 2135 } 2136 if (igi->igi_v2_timer > 0) { 2137 CTR3(KTR_IGMPV3, 2138 "%s: cancel v2 timer on %p(%s)", 2139 __func__, igi->igi_ifp, igi->igi_ifp->if_xname); 2140 igi->igi_v2_timer = 0; 2141 } 2142 } 2143} 2144 2145/* 2146 * Global slowtimo handler. 2147 * VIMAGE: Timeout handlers are expected to service all vimages. 2148 */ 2149void 2150igmp_slowtimo(void) 2151{ 2152 VNET_ITERATOR_DECL(vnet_iter); 2153 2154 VNET_LIST_RLOCK(); 2155 VNET_FOREACH(vnet_iter) { 2156 CURVNET_SET(vnet_iter); 2157 igmp_slowtimo_vnet(); 2158 CURVNET_RESTORE(); 2159 } 2160 VNET_LIST_RUNLOCK(); 2161} 2162 2163/* 2164 * Per-vnet slowtimo handler. 2165 */ 2166static void 2167igmp_slowtimo_vnet(void) 2168{ 2169 INIT_VNET_INET(curvnet); 2170 struct igmp_ifinfo *igi; 2171 2172 IGMP_LOCK(); 2173 2174 LIST_FOREACH(igi, &V_igi_head, igi_link) { 2175 igmp_v1v2_process_querier_timers(igi); 2176 } 2177 2178 IGMP_UNLOCK(); 2179} 2180 2181/* 2182 * Dispatch an IGMPv1/v2 host report or leave message. 2183 * These are always small enough to fit inside a single mbuf. 2184 */ 2185static int 2186igmp_v1v2_queue_report(struct in_multi *inm, const int type) 2187{ 2188 struct ifnet *ifp; 2189 struct igmp *igmp; 2190 struct ip *ip; 2191 struct mbuf *m; 2192 2193 IN_MULTI_LOCK_ASSERT(); 2194 IGMP_LOCK_ASSERT(); 2195 2196 ifp = inm->inm_ifp; 2197 2198 MGETHDR(m, M_DONTWAIT, MT_DATA); 2199 if (m == NULL) 2200 return (ENOMEM); 2201 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp)); 2202 2203 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp); 2204 2205 m->m_data += sizeof(struct ip); 2206 m->m_len = sizeof(struct igmp); 2207 2208 igmp = mtod(m, struct igmp *); 2209 igmp->igmp_type = type; 2210 igmp->igmp_code = 0; 2211 igmp->igmp_group = inm->inm_addr; 2212 igmp->igmp_cksum = 0; 2213 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp)); 2214 2215 m->m_data -= sizeof(struct ip); 2216 m->m_len += sizeof(struct ip); 2217 2218 ip = mtod(m, struct ip *); 2219 ip->ip_tos = 0; 2220 ip->ip_len = sizeof(struct ip) + sizeof(struct igmp); 2221 ip->ip_off = 0; 2222 ip->ip_p = IPPROTO_IGMP; 2223 ip->ip_src.s_addr = INADDR_ANY; 2224 2225 if (type == IGMP_HOST_LEAVE_MESSAGE) 2226 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP); 2227 else 2228 ip->ip_dst = inm->inm_addr; 2229 2230 igmp_save_context(m, ifp); 2231 2232 m->m_flags |= M_IGMPV2; 2233 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) 2234 m->m_flags |= M_IGMP_LOOP; 2235 2236 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m); 2237 netisr_dispatch(NETISR_IGMP, m); 2238 2239 return (0); 2240} 2241 2242/* 2243 * Process a state change from the upper layer for the given IPv4 group. 2244 * 2245 * Each socket holds a reference on the in_multi in its own ip_moptions. 2246 * The socket layer will have made the necessary updates to.the group 2247 * state, it is now up to IGMP to issue a state change report if there 2248 * has been any change between T0 (when the last state-change was issued) 2249 * and T1 (now). 2250 * 2251 * We use the IGMPv3 state machine at group level. The IGMP module 2252 * however makes the decision as to which IGMP protocol version to speak. 2253 * A state change *from* INCLUDE {} always means an initial join. 2254 * A state change *to* INCLUDE {} always means a final leave. 2255 * 2256 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can 2257 * save ourselves a bunch of work; any exclusive mode groups need not 2258 * compute source filter lists. 2259 * 2260 * VIMAGE: curvnet should have been set by caller, as this routine 2261 * is called from the socket option handlers. 2262 */ 2263int 2264igmp_change_state(struct in_multi *inm) 2265{ 2266 struct igmp_ifinfo *igi; 2267 struct ifnet *ifp; 2268 int error; 2269 2270 IN_MULTI_LOCK_ASSERT(); 2271 2272 error = 0; 2273 2274 /* 2275 * Try to detect if the upper layer just asked us to change state 2276 * for an interface which has now gone away. 2277 */ 2278 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 2279 ifp = inm->inm_ifma->ifma_ifp; 2280 if (ifp != NULL) { 2281 /* 2282 * Sanity check that netinet's notion of ifp is the 2283 * same as net's. 2284 */ 2285 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 2286 } 2287 2288 IGMP_LOCK(); 2289 2290 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 2291 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 2292 2293 /* 2294 * If we detect a state transition to or from MCAST_UNDEFINED 2295 * for this group, then we are starting or finishing an IGMP 2296 * life cycle for this group. 2297 */ 2298 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) { 2299 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__, 2300 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode); 2301 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) { 2302 CTR1(KTR_IGMPV3, "%s: initial join", __func__); 2303 error = igmp_initial_join(inm, igi); 2304 goto out_locked; 2305 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) { 2306 CTR1(KTR_IGMPV3, "%s: final leave", __func__); 2307 igmp_final_leave(inm, igi); 2308 goto out_locked; 2309 } 2310 } else { 2311 CTR1(KTR_IGMPV3, "%s: filter set change", __func__); 2312 } 2313 2314 error = igmp_handle_state_change(inm, igi); 2315 2316out_locked: 2317 IGMP_UNLOCK(); 2318 return (error); 2319} 2320 2321/* 2322 * Perform the initial join for an IGMP group. 2323 * 2324 * When joining a group: 2325 * If the group should have its IGMP traffic suppressed, do nothing. 2326 * IGMPv1 starts sending IGMPv1 host membership reports. 2327 * IGMPv2 starts sending IGMPv2 host membership reports. 2328 * IGMPv3 will schedule an IGMPv3 state-change report containing the 2329 * initial state of the membership. 2330 */ 2331static int 2332igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi) 2333{ 2334 INIT_VNET_INET(curvnet); 2335 struct ifnet *ifp; 2336 struct ifqueue *ifq; 2337 int error, retval, syncstates; 2338 2339 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)", 2340 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2341 inm->inm_ifp->if_xname); 2342 2343 error = 0; 2344 syncstates = 1; 2345 2346 ifp = inm->inm_ifp; 2347 2348 IN_MULTI_LOCK_ASSERT(); 2349 IGMP_LOCK_ASSERT(); 2350 2351 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2352 2353 /* 2354 * Groups joined on loopback or marked as 'not reported', 2355 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and 2356 * are never reported in any IGMP protocol exchanges. 2357 * All other groups enter the appropriate IGMP state machine 2358 * for the version in use on this link. 2359 * A link marked as IGIF_SILENT causes IGMP to be completely 2360 * disabled for the link. 2361 */ 2362 if ((ifp->if_flags & IFF_LOOPBACK) || 2363 (igi->igi_flags & IGIF_SILENT) || 2364 !igmp_isgroupreported(inm->inm_addr)) { 2365 CTR1(KTR_IGMPV3, 2366"%s: not kicking state machine for silent group", __func__); 2367 inm->inm_state = IGMP_SILENT_MEMBER; 2368 inm->inm_timer = 0; 2369 } else { 2370 /* 2371 * Deal with overlapping in_multi lifecycle. 2372 * If this group was LEAVING, then make sure 2373 * we drop the reference we picked up to keep the 2374 * group around for the final INCLUDE {} enqueue. 2375 */ 2376 if (igi->igi_version == IGMP_VERSION_3 && 2377 inm->inm_state == IGMP_LEAVING_MEMBER) 2378 inm_release_locked(inm); 2379 2380 inm->inm_state = IGMP_REPORTING_MEMBER; 2381 2382 switch (igi->igi_version) { 2383 case IGMP_VERSION_1: 2384 case IGMP_VERSION_2: 2385 inm->inm_state = IGMP_IDLE_MEMBER; 2386 error = igmp_v1v2_queue_report(inm, 2387 (igi->igi_version == IGMP_VERSION_2) ? 2388 IGMP_v2_HOST_MEMBERSHIP_REPORT : 2389 IGMP_v1_HOST_MEMBERSHIP_REPORT); 2390 if (error == 0) { 2391 inm->inm_timer = IGMP_RANDOM_DELAY( 2392 IGMP_V1V2_MAX_RI * PR_FASTHZ); 2393 V_current_state_timers_running = 1; 2394 } 2395 break; 2396 2397 case IGMP_VERSION_3: 2398 /* 2399 * Defer update of T0 to T1, until the first copy 2400 * of the state change has been transmitted. 2401 */ 2402 syncstates = 0; 2403 2404 /* 2405 * Immediately enqueue a State-Change Report for 2406 * this interface, freeing any previous reports. 2407 * Don't kick the timers if there is nothing to do, 2408 * or if an error occurred. 2409 */ 2410 ifq = &inm->inm_scq; 2411 _IF_DRAIN(ifq); 2412 retval = igmp_v3_enqueue_group_record(ifq, inm, 1, 2413 0, 0); 2414 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 2415 __func__, retval); 2416 if (retval <= 0) { 2417 error = retval * -1; 2418 break; 2419 } 2420 2421 /* 2422 * Schedule transmission of pending state-change 2423 * report up to RV times for this link. The timer 2424 * will fire at the next igmp_fasttimo (~200ms), 2425 * giving us an opportunity to merge the reports. 2426 */ 2427 if (igi->igi_flags & IGIF_LOOPBACK) { 2428 inm->inm_scrv = 1; 2429 } else { 2430 KASSERT(igi->igi_rv > 1, 2431 ("%s: invalid robustness %d", __func__, 2432 igi->igi_rv)); 2433 inm->inm_scrv = igi->igi_rv; 2434 } 2435 inm->inm_sctimer = 1; 2436 V_state_change_timers_running = 1; 2437 2438 error = 0; 2439 break; 2440 } 2441 } 2442 2443 /* 2444 * Only update the T0 state if state change is atomic, 2445 * i.e. we don't need to wait for a timer to fire before we 2446 * can consider the state change to have been communicated. 2447 */ 2448 if (syncstates) { 2449 inm_commit(inm); 2450 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2451 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2452 } 2453 2454 return (error); 2455} 2456 2457/* 2458 * Issue an intermediate state change during the IGMP life-cycle. 2459 */ 2460static int 2461igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi) 2462{ 2463 INIT_VNET_INET(curvnet); 2464 struct ifnet *ifp; 2465 int retval; 2466 2467 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)", 2468 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2469 inm->inm_ifp->if_xname); 2470 2471 ifp = inm->inm_ifp; 2472 2473 IN_MULTI_LOCK_ASSERT(); 2474 IGMP_LOCK_ASSERT(); 2475 2476 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2477 2478 if ((ifp->if_flags & IFF_LOOPBACK) || 2479 (igi->igi_flags & IGIF_SILENT) || 2480 !igmp_isgroupreported(inm->inm_addr) || 2481 (igi->igi_version != IGMP_VERSION_3)) { 2482 if (!igmp_isgroupreported(inm->inm_addr)) { 2483 CTR1(KTR_IGMPV3, 2484"%s: not kicking state machine for silent group", __func__); 2485 } 2486 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__); 2487 inm_commit(inm); 2488 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2489 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2490 return (0); 2491 } 2492 2493 _IF_DRAIN(&inm->inm_scq); 2494 2495 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0); 2496 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); 2497 if (retval <= 0) 2498 return (-retval); 2499 2500 /* 2501 * If record(s) were enqueued, start the state-change 2502 * report timer for this group. 2503 */ 2504 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv); 2505 inm->inm_sctimer = 1; 2506 V_state_change_timers_running = 1; 2507 2508 return (0); 2509} 2510 2511/* 2512 * Perform the final leave for an IGMP group. 2513 * 2514 * When leaving a group: 2515 * IGMPv1 does nothing. 2516 * IGMPv2 sends a host leave message, if and only if we are the reporter. 2517 * IGMPv3 enqueues a state-change report containing a transition 2518 * to INCLUDE {} for immediate transmission. 2519 */ 2520static void 2521igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi) 2522{ 2523 INIT_VNET_INET(curvnet); 2524 int syncstates; 2525 2526 syncstates = 1; 2527 2528 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)", 2529 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2530 inm->inm_ifp->if_xname); 2531 2532 IN_MULTI_LOCK_ASSERT(); 2533 IGMP_LOCK_ASSERT(); 2534 2535 switch (inm->inm_state) { 2536 case IGMP_NOT_MEMBER: 2537 case IGMP_SILENT_MEMBER: 2538 case IGMP_LEAVING_MEMBER: 2539 /* Already leaving or left; do nothing. */ 2540 CTR1(KTR_IGMPV3, 2541"%s: not kicking state machine for silent group", __func__); 2542 break; 2543 case IGMP_REPORTING_MEMBER: 2544 case IGMP_IDLE_MEMBER: 2545 case IGMP_G_QUERY_PENDING_MEMBER: 2546 case IGMP_SG_QUERY_PENDING_MEMBER: 2547 if (igi->igi_version == IGMP_VERSION_2) { 2548#ifdef INVARIANTS 2549 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 2550 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 2551 panic("%s: IGMPv3 state reached, not IGMPv3 mode", 2552 __func__); 2553#endif 2554 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE); 2555 inm->inm_state = IGMP_NOT_MEMBER; 2556 } else if (igi->igi_version == IGMP_VERSION_3) { 2557 /* 2558 * Stop group timer and all pending reports. 2559 * Immediately enqueue a state-change report 2560 * TO_IN {} to be sent on the next fast timeout, 2561 * giving us an opportunity to merge reports. 2562 */ 2563 _IF_DRAIN(&inm->inm_scq); 2564 inm->inm_timer = 0; 2565 if (igi->igi_flags & IGIF_LOOPBACK) { 2566 inm->inm_scrv = 1; 2567 } else { 2568 inm->inm_scrv = igi->igi_rv; 2569 } 2570 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d " 2571 "pending retransmissions.", __func__, 2572 inet_ntoa(inm->inm_addr), 2573 inm->inm_ifp->if_xname, inm->inm_scrv); 2574 if (inm->inm_scrv == 0) { 2575 inm->inm_state = IGMP_NOT_MEMBER; 2576 inm->inm_sctimer = 0; 2577 } else { 2578 int retval; 2579 2580 inm_acquire_locked(inm); 2581 2582 retval = igmp_v3_enqueue_group_record( 2583 &inm->inm_scq, inm, 1, 0, 0); 2584 KASSERT(retval != 0, 2585 ("%s: enqueue record = %d", __func__, 2586 retval)); 2587 2588 inm->inm_state = IGMP_LEAVING_MEMBER; 2589 inm->inm_sctimer = 1; 2590 V_state_change_timers_running = 1; 2591 syncstates = 0; 2592 } 2593 break; 2594 } 2595 break; 2596 case IGMP_LAZY_MEMBER: 2597 case IGMP_SLEEPING_MEMBER: 2598 case IGMP_AWAKENING_MEMBER: 2599 /* Our reports are suppressed; do nothing. */ 2600 break; 2601 } 2602 2603 if (syncstates) { 2604 inm_commit(inm); 2605 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2606 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2607 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 2608 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s", 2609 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2610 } 2611} 2612 2613/* 2614 * Enqueue an IGMPv3 group record to the given output queue. 2615 * 2616 * XXX This function could do with having the allocation code 2617 * split out, and the multiple-tree-walks coalesced into a single 2618 * routine as has been done in igmp_v3_enqueue_filter_change(). 2619 * 2620 * If is_state_change is zero, a current-state record is appended. 2621 * If is_state_change is non-zero, a state-change report is appended. 2622 * 2623 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2624 * If is_group_query is zero, and if there is a packet with free space 2625 * at the tail of the queue, it will be appended to providing there 2626 * is enough free space. 2627 * Otherwise a new mbuf packet chain is allocated. 2628 * 2629 * If is_source_query is non-zero, each source is checked to see if 2630 * it was recorded for a Group-Source query, and will be omitted if 2631 * it is not both in-mode and recorded. 2632 * 2633 * The function will attempt to allocate leading space in the packet 2634 * for the IP/IGMP header to be prepended without fragmenting the chain. 2635 * 2636 * If successful the size of all data appended to the queue is returned, 2637 * otherwise an error code less than zero is returned, or zero if 2638 * no record(s) were appended. 2639 */ 2640static int 2641igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, 2642 const int is_state_change, const int is_group_query, 2643 const int is_source_query) 2644{ 2645 struct igmp_grouprec ig; 2646 struct igmp_grouprec *pig; 2647 struct ifnet *ifp; 2648 struct ip_msource *ims, *nims; 2649 struct mbuf *m0, *m, *md; 2650 int error, is_filter_list_change; 2651 int minrec0len, m0srcs, msrcs, nbytes, off; 2652 int record_has_sources; 2653 int now; 2654 int type; 2655 in_addr_t naddr; 2656 uint8_t mode; 2657 2658 IN_MULTI_LOCK_ASSERT(); 2659 2660 error = 0; 2661 ifp = inm->inm_ifp; 2662 is_filter_list_change = 0; 2663 m = NULL; 2664 m0 = NULL; 2665 m0srcs = 0; 2666 msrcs = 0; 2667 nbytes = 0; 2668 nims = NULL; 2669 record_has_sources = 1; 2670 pig = NULL; 2671 type = IGMP_DO_NOTHING; 2672 mode = inm->inm_st[1].iss_fmode; 2673 2674 /* 2675 * If we did not transition out of ASM mode during t0->t1, 2676 * and there are no source nodes to process, we can skip 2677 * the generation of source records. 2678 */ 2679 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 && 2680 inm->inm_nsrc == 0) 2681 record_has_sources = 0; 2682 2683 if (is_state_change) { 2684 /* 2685 * Queue a state change record. 2686 * If the mode did not change, and there are non-ASM 2687 * listeners or source filters present, 2688 * we potentially need to issue two records for the group. 2689 * If we are transitioning to MCAST_UNDEFINED, we need 2690 * not send any sources. 2691 * If there are ASM listeners, and there was no filter 2692 * mode transition of any kind, do nothing. 2693 */ 2694 if (mode != inm->inm_st[0].iss_fmode) { 2695 if (mode == MCAST_EXCLUDE) { 2696 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE", 2697 __func__); 2698 type = IGMP_CHANGE_TO_EXCLUDE_MODE; 2699 } else { 2700 CTR1(KTR_IGMPV3, "%s: change to INCLUDE", 2701 __func__); 2702 type = IGMP_CHANGE_TO_INCLUDE_MODE; 2703 if (mode == MCAST_UNDEFINED) 2704 record_has_sources = 0; 2705 } 2706 } else { 2707 if (record_has_sources) { 2708 is_filter_list_change = 1; 2709 } else { 2710 type = IGMP_DO_NOTHING; 2711 } 2712 } 2713 } else { 2714 /* 2715 * Queue a current state record. 2716 */ 2717 if (mode == MCAST_EXCLUDE) { 2718 type = IGMP_MODE_IS_EXCLUDE; 2719 } else if (mode == MCAST_INCLUDE) { 2720 type = IGMP_MODE_IS_INCLUDE; 2721 KASSERT(inm->inm_st[1].iss_asm == 0, 2722 ("%s: inm %p is INCLUDE but ASM count is %d", 2723 __func__, inm, inm->inm_st[1].iss_asm)); 2724 } 2725 } 2726 2727 /* 2728 * Generate the filter list changes using a separate function. 2729 */ 2730 if (is_filter_list_change) 2731 return (igmp_v3_enqueue_filter_change(ifq, inm)); 2732 2733 if (type == IGMP_DO_NOTHING) { 2734 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s", 2735 __func__, inet_ntoa(inm->inm_addr), 2736 inm->inm_ifp->if_xname); 2737 return (0); 2738 } 2739 2740 /* 2741 * If any sources are present, we must be able to fit at least 2742 * one in the trailing space of the tail packet's mbuf, 2743 * ideally more. 2744 */ 2745 minrec0len = sizeof(struct igmp_grouprec); 2746 if (record_has_sources) 2747 minrec0len += sizeof(in_addr_t); 2748 2749 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__, 2750 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr), 2751 inm->inm_ifp->if_xname); 2752 2753 /* 2754 * Check if we have a packet in the tail of the queue for this 2755 * group into which the first group record for this group will fit. 2756 * Otherwise allocate a new packet. 2757 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT. 2758 * Note: Group records for G/GSR query responses MUST be sent 2759 * in their own packet. 2760 */ 2761 m0 = ifq->ifq_tail; 2762 if (!is_group_query && 2763 m0 != NULL && 2764 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && 2765 (m0->m_pkthdr.len + minrec0len) < 2766 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 2767 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2768 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2769 m = m0; 2770 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__); 2771 } else { 2772 if (_IF_QFULL(ifq)) { 2773 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2774 return (-ENOMEM); 2775 } 2776 m = NULL; 2777 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2778 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2779 if (!is_state_change && !is_group_query) { 2780 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2781 if (m) 2782 m->m_data += IGMP_LEADINGSPACE; 2783 } 2784 if (m == NULL) { 2785 m = m_gethdr(M_DONTWAIT, MT_DATA); 2786 if (m) 2787 MH_ALIGN(m, IGMP_LEADINGSPACE); 2788 } 2789 if (m == NULL) 2790 return (-ENOMEM); 2791 2792 igmp_save_context(m, ifp); 2793 2794 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__); 2795 } 2796 2797 /* 2798 * Append group record. 2799 * If we have sources, we don't know how many yet. 2800 */ 2801 ig.ig_type = type; 2802 ig.ig_datalen = 0; 2803 ig.ig_numsrc = 0; 2804 ig.ig_group = inm->inm_addr; 2805 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2806 if (m != m0) 2807 m_freem(m); 2808 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2809 return (-ENOMEM); 2810 } 2811 nbytes += sizeof(struct igmp_grouprec); 2812 2813 /* 2814 * Append as many sources as will fit in the first packet. 2815 * If we are appending to a new packet, the chain allocation 2816 * may potentially use clusters; use m_getptr() in this case. 2817 * If we are appending to an existing packet, we need to obtain 2818 * a pointer to the group record after m_append(), in case a new 2819 * mbuf was allocated. 2820 * Only append sources which are in-mode at t1. If we are 2821 * transitioning to MCAST_UNDEFINED state on the group, do not 2822 * include source entries. 2823 * Only report recorded sources in our filter set when responding 2824 * to a group-source query. 2825 */ 2826 if (record_has_sources) { 2827 if (m == m0) { 2828 md = m_last(m); 2829 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2830 md->m_len - nbytes); 2831 } else { 2832 md = m_getptr(m, 0, &off); 2833 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2834 off); 2835 } 2836 msrcs = 0; 2837 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) { 2838 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2839 inet_ntoa_haddr(ims->ims_haddr)); 2840 now = ims_get_mode(inm, ims, 1); 2841 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now); 2842 if ((now != mode) || 2843 (now == mode && mode == MCAST_UNDEFINED)) { 2844 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2845 continue; 2846 } 2847 if (is_source_query && ims->ims_stp == 0) { 2848 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2849 __func__); 2850 continue; 2851 } 2852 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2853 naddr = htonl(ims->ims_haddr); 2854 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2855 if (m != m0) 2856 m_freem(m); 2857 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2858 __func__); 2859 return (-ENOMEM); 2860 } 2861 nbytes += sizeof(in_addr_t); 2862 ++msrcs; 2863 if (msrcs == m0srcs) 2864 break; 2865 } 2866 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__, 2867 msrcs); 2868 pig->ig_numsrc = htons(msrcs); 2869 nbytes += (msrcs * sizeof(in_addr_t)); 2870 } 2871 2872 if (is_source_query && msrcs == 0) { 2873 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__); 2874 if (m != m0) 2875 m_freem(m); 2876 return (0); 2877 } 2878 2879 /* 2880 * We are good to go with first packet. 2881 */ 2882 if (m != m0) { 2883 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__); 2884 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2885 _IF_ENQUEUE(ifq, m); 2886 } else 2887 m->m_pkthdr.PH_vt.vt_nrecs++; 2888 2889 /* 2890 * No further work needed if no source list in packet(s). 2891 */ 2892 if (!record_has_sources) 2893 return (nbytes); 2894 2895 /* 2896 * Whilst sources remain to be announced, we need to allocate 2897 * a new packet and fill out as many sources as will fit. 2898 * Always try for a cluster first. 2899 */ 2900 while (nims != NULL) { 2901 if (_IF_QFULL(ifq)) { 2902 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2903 return (-ENOMEM); 2904 } 2905 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2906 if (m) 2907 m->m_data += IGMP_LEADINGSPACE; 2908 if (m == NULL) { 2909 m = m_gethdr(M_DONTWAIT, MT_DATA); 2910 if (m) 2911 MH_ALIGN(m, IGMP_LEADINGSPACE); 2912 } 2913 if (m == NULL) 2914 return (-ENOMEM); 2915 igmp_save_context(m, ifp); 2916 md = m_getptr(m, 0, &off); 2917 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off); 2918 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__); 2919 2920 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2921 if (m != m0) 2922 m_freem(m); 2923 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2924 return (-ENOMEM); 2925 } 2926 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2927 nbytes += sizeof(struct igmp_grouprec); 2928 2929 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2930 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2931 2932 msrcs = 0; 2933 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 2934 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2935 inet_ntoa_haddr(ims->ims_haddr)); 2936 now = ims_get_mode(inm, ims, 1); 2937 if ((now != mode) || 2938 (now == mode && mode == MCAST_UNDEFINED)) { 2939 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2940 continue; 2941 } 2942 if (is_source_query && ims->ims_stp == 0) { 2943 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2944 __func__); 2945 continue; 2946 } 2947 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2948 naddr = htonl(ims->ims_haddr); 2949 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2950 if (m != m0) 2951 m_freem(m); 2952 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2953 __func__); 2954 return (-ENOMEM); 2955 } 2956 ++msrcs; 2957 if (msrcs == m0srcs) 2958 break; 2959 } 2960 pig->ig_numsrc = htons(msrcs); 2961 nbytes += (msrcs * sizeof(in_addr_t)); 2962 2963 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__); 2964 _IF_ENQUEUE(ifq, m); 2965 } 2966 2967 return (nbytes); 2968} 2969 2970/* 2971 * Type used to mark record pass completion. 2972 * We exploit the fact we can cast to this easily from the 2973 * current filter modes on each ip_msource node. 2974 */ 2975typedef enum { 2976 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 2977 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 2978 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 2979 REC_FULL = REC_ALLOW | REC_BLOCK 2980} rectype_t; 2981 2982/* 2983 * Enqueue an IGMPv3 filter list change to the given output queue. 2984 * 2985 * Source list filter state is held in an RB-tree. When the filter list 2986 * for a group is changed without changing its mode, we need to compute 2987 * the deltas between T0 and T1 for each source in the filter set, 2988 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 2989 * 2990 * As we may potentially queue two record types, and the entire R-B tree 2991 * needs to be walked at once, we break this out into its own function 2992 * so we can generate a tightly packed queue of packets. 2993 * 2994 * XXX This could be written to only use one tree walk, although that makes 2995 * serializing into the mbuf chains a bit harder. For now we do two walks 2996 * which makes things easier on us, and it may or may not be harder on 2997 * the L2 cache. 2998 * 2999 * If successful the size of all data appended to the queue is returned, 3000 * otherwise an error code less than zero is returned, or zero if 3001 * no record(s) were appended. 3002 */ 3003static int 3004igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) 3005{ 3006 static const int MINRECLEN = 3007 sizeof(struct igmp_grouprec) + sizeof(in_addr_t); 3008 struct ifnet *ifp; 3009 struct igmp_grouprec ig; 3010 struct igmp_grouprec *pig; 3011 struct ip_msource *ims, *nims; 3012 struct mbuf *m, *m0, *md; 3013 in_addr_t naddr; 3014 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 3015 int nallow, nblock; 3016 uint8_t mode, now, then; 3017 rectype_t crt, drt, nrt; 3018 3019 IN_MULTI_LOCK_ASSERT(); 3020 3021 if (inm->inm_nsrc == 0 || 3022 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) 3023 return (0); 3024 3025 ifp = inm->inm_ifp; /* interface */ 3026 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ 3027 crt = REC_NONE; /* current group record type */ 3028 drt = REC_NONE; /* mask of completed group record types */ 3029 nrt = REC_NONE; /* record type for current node */ 3030 m0srcs = 0; /* # source which will fit in current mbuf chain */ 3031 nbytes = 0; /* # of bytes appended to group's state-change queue */ 3032 npbytes = 0; /* # of bytes appended this packet */ 3033 rsrcs = 0; /* # sources encoded in current record */ 3034 schanged = 0; /* # nodes encoded in overall filter change */ 3035 nallow = 0; /* # of source entries in ALLOW_NEW */ 3036 nblock = 0; /* # of source entries in BLOCK_OLD */ 3037 nims = NULL; /* next tree node pointer */ 3038 3039 /* 3040 * For each possible filter record mode. 3041 * The first kind of source we encounter tells us which 3042 * is the first kind of record we start appending. 3043 * If a node transitioned to UNDEFINED at t1, its mode is treated 3044 * as the inverse of the group's filter mode. 3045 */ 3046 while (drt != REC_FULL) { 3047 do { 3048 m0 = ifq->ifq_tail; 3049 if (m0 != NULL && 3050 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= 3051 IGMP_V3_REPORT_MAXRECS) && 3052 (m0->m_pkthdr.len + MINRECLEN) < 3053 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 3054 m = m0; 3055 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 3056 sizeof(struct igmp_grouprec)) / 3057 sizeof(in_addr_t); 3058 CTR1(KTR_IGMPV3, 3059 "%s: use previous packet", __func__); 3060 } else { 3061 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3062 if (m) 3063 m->m_data += IGMP_LEADINGSPACE; 3064 if (m == NULL) { 3065 m = m_gethdr(M_DONTWAIT, MT_DATA); 3066 if (m) 3067 MH_ALIGN(m, IGMP_LEADINGSPACE); 3068 } 3069 if (m == NULL) { 3070 CTR1(KTR_IGMPV3, 3071 "%s: m_get*() failed", __func__); 3072 return (-ENOMEM); 3073 } 3074 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3075 igmp_save_context(m, ifp); 3076 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 3077 sizeof(struct igmp_grouprec)) / 3078 sizeof(in_addr_t); 3079 npbytes = 0; 3080 CTR1(KTR_IGMPV3, 3081 "%s: allocated new packet", __func__); 3082 } 3083 /* 3084 * Append the IGMP group record header to the 3085 * current packet's data area. 3086 * Recalculate pointer to free space for next 3087 * group record, in case m_append() allocated 3088 * a new mbuf or cluster. 3089 */ 3090 memset(&ig, 0, sizeof(ig)); 3091 ig.ig_group = inm->inm_addr; 3092 if (!m_append(m, sizeof(ig), (void *)&ig)) { 3093 if (m != m0) 3094 m_freem(m); 3095 CTR1(KTR_IGMPV3, 3096 "%s: m_append() failed", __func__); 3097 return (-ENOMEM); 3098 } 3099 npbytes += sizeof(struct igmp_grouprec); 3100 if (m != m0) { 3101 /* new packet; offset in c hain */ 3102 md = m_getptr(m, npbytes - 3103 sizeof(struct igmp_grouprec), &off); 3104 pig = (struct igmp_grouprec *)(mtod(md, 3105 uint8_t *) + off); 3106 } else { 3107 /* current packet; offset from last append */ 3108 md = m_last(m); 3109 pig = (struct igmp_grouprec *)(mtod(md, 3110 uint8_t *) + md->m_len - 3111 sizeof(struct igmp_grouprec)); 3112 } 3113 /* 3114 * Begin walking the tree for this record type 3115 * pass, or continue from where we left off 3116 * previously if we had to allocate a new packet. 3117 * Only report deltas in-mode at t1. 3118 * We need not report included sources as allowed 3119 * if we are in inclusive mode on the group, 3120 * however the converse is not true. 3121 */ 3122 rsrcs = 0; 3123 if (nims == NULL) 3124 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs); 3125 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 3126 CTR2(KTR_IGMPV3, "%s: visit node %s", 3127 __func__, inet_ntoa_haddr(ims->ims_haddr)); 3128 now = ims_get_mode(inm, ims, 1); 3129 then = ims_get_mode(inm, ims, 0); 3130 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d", 3131 __func__, then, now); 3132 if (now == then) { 3133 CTR1(KTR_IGMPV3, 3134 "%s: skip unchanged", __func__); 3135 continue; 3136 } 3137 if (mode == MCAST_EXCLUDE && 3138 now == MCAST_INCLUDE) { 3139 CTR1(KTR_IGMPV3, 3140 "%s: skip IN src on EX group", 3141 __func__); 3142 continue; 3143 } 3144 nrt = (rectype_t)now; 3145 if (nrt == REC_NONE) 3146 nrt = (rectype_t)(~mode & REC_FULL); 3147 if (schanged++ == 0) { 3148 crt = nrt; 3149 } else if (crt != nrt) 3150 continue; 3151 naddr = htonl(ims->ims_haddr); 3152 if (!m_append(m, sizeof(in_addr_t), 3153 (void *)&naddr)) { 3154 if (m != m0) 3155 m_freem(m); 3156 CTR1(KTR_IGMPV3, 3157 "%s: m_append() failed", __func__); 3158 return (-ENOMEM); 3159 } 3160 nallow += !!(crt == REC_ALLOW); 3161 nblock += !!(crt == REC_BLOCK); 3162 if (++rsrcs == m0srcs) 3163 break; 3164 } 3165 /* 3166 * If we did not append any tree nodes on this 3167 * pass, back out of allocations. 3168 */ 3169 if (rsrcs == 0) { 3170 npbytes -= sizeof(struct igmp_grouprec); 3171 if (m != m0) { 3172 CTR1(KTR_IGMPV3, 3173 "%s: m_free(m)", __func__); 3174 m_freem(m); 3175 } else { 3176 CTR1(KTR_IGMPV3, 3177 "%s: m_adj(m, -ig)", __func__); 3178 m_adj(m, -((int)sizeof( 3179 struct igmp_grouprec))); 3180 } 3181 continue; 3182 } 3183 npbytes += (rsrcs * sizeof(in_addr_t)); 3184 if (crt == REC_ALLOW) 3185 pig->ig_type = IGMP_ALLOW_NEW_SOURCES; 3186 else if (crt == REC_BLOCK) 3187 pig->ig_type = IGMP_BLOCK_OLD_SOURCES; 3188 pig->ig_numsrc = htons(rsrcs); 3189 /* 3190 * Count the new group record, and enqueue this 3191 * packet if it wasn't already queued. 3192 */ 3193 m->m_pkthdr.PH_vt.vt_nrecs++; 3194 if (m != m0) 3195 _IF_ENQUEUE(ifq, m); 3196 nbytes += npbytes; 3197 } while (nims != NULL); 3198 drt |= crt; 3199 crt = (~crt & REC_FULL); 3200 } 3201 3202 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__, 3203 nallow, nblock); 3204 3205 return (nbytes); 3206} 3207 3208static int 3209igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) 3210{ 3211 struct ifqueue *gq; 3212 struct mbuf *m; /* pending state-change */ 3213 struct mbuf *m0; /* copy of pending state-change */ 3214 struct mbuf *mt; /* last state-change in packet */ 3215 int docopy, domerge; 3216 u_int recslen; 3217 3218 docopy = 0; 3219 domerge = 0; 3220 recslen = 0; 3221 3222 IN_MULTI_LOCK_ASSERT(); 3223 IGMP_LOCK_ASSERT(); 3224 3225 /* 3226 * If there are further pending retransmissions, make a writable 3227 * copy of each queued state-change message before merging. 3228 */ 3229 if (inm->inm_scrv > 0) 3230 docopy = 1; 3231 3232 gq = &inm->inm_scq; 3233#ifdef KTR 3234 if (gq->ifq_head == NULL) { 3235 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty", 3236 __func__, inm); 3237 } 3238#endif 3239 3240 m = gq->ifq_head; 3241 while (m != NULL) { 3242 /* 3243 * Only merge the report into the current packet if 3244 * there is sufficient space to do so; an IGMPv3 report 3245 * packet may only contain 65,535 group records. 3246 * Always use a simple mbuf chain concatentation to do this, 3247 * as large state changes for single groups may have 3248 * allocated clusters. 3249 */ 3250 domerge = 0; 3251 mt = ifscq->ifq_tail; 3252 if (mt != NULL) { 3253 recslen = m_length(m, NULL); 3254 3255 if ((mt->m_pkthdr.PH_vt.vt_nrecs + 3256 m->m_pkthdr.PH_vt.vt_nrecs <= 3257 IGMP_V3_REPORT_MAXRECS) && 3258 (mt->m_pkthdr.len + recslen <= 3259 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) 3260 domerge = 1; 3261 } 3262 3263 if (!domerge && _IF_QFULL(gq)) { 3264 CTR2(KTR_IGMPV3, 3265 "%s: outbound queue full, skipping whole packet %p", 3266 __func__, m); 3267 mt = m->m_nextpkt; 3268 if (!docopy) 3269 m_freem(m); 3270 m = mt; 3271 continue; 3272 } 3273 3274 if (!docopy) { 3275 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m); 3276 _IF_DEQUEUE(gq, m0); 3277 m = m0->m_nextpkt; 3278 } else { 3279 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m); 3280 m0 = m_dup(m, M_NOWAIT); 3281 if (m0 == NULL) 3282 return (ENOMEM); 3283 m0->m_nextpkt = NULL; 3284 m = m->m_nextpkt; 3285 } 3286 3287 if (!domerge) { 3288 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)", 3289 __func__, m0, ifscq); 3290 _IF_ENQUEUE(ifscq, m0); 3291 } else { 3292 struct mbuf *mtl; /* last mbuf of packet mt */ 3293 3294 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)", 3295 __func__, m0, mt); 3296 3297 mtl = m_last(mt); 3298 m0->m_flags &= ~M_PKTHDR; 3299 mt->m_pkthdr.len += recslen; 3300 mt->m_pkthdr.PH_vt.vt_nrecs += 3301 m0->m_pkthdr.PH_vt.vt_nrecs; 3302 3303 mtl->m_next = m0; 3304 } 3305 } 3306 3307 return (0); 3308} 3309 3310/* 3311 * Respond to a pending IGMPv3 General Query. 3312 */ 3313static void 3314igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) 3315{ 3316 INIT_VNET_INET(curvnet); 3317 struct ifmultiaddr *ifma, *tifma; 3318 struct ifnet *ifp; 3319 struct in_multi *inm; 3320 int retval, loop; 3321 3322 IN_MULTI_LOCK_ASSERT(); 3323 IGMP_LOCK_ASSERT(); 3324 3325 KASSERT(igi->igi_version == IGMP_VERSION_3, 3326 ("%s: called when version %d", __func__, igi->igi_version)); 3327 3328 ifp = igi->igi_ifp; 3329 3330 IF_ADDR_LOCK(ifp); 3331 TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, tifma) { 3332 if (ifma->ifma_addr->sa_family != AF_INET || 3333 ifma->ifma_protospec == NULL) 3334 continue; 3335 3336 inm = (struct in_multi *)ifma->ifma_protospec; 3337 KASSERT(ifp == inm->inm_ifp, 3338 ("%s: inconsistent ifp", __func__)); 3339 3340 switch (inm->inm_state) { 3341 case IGMP_NOT_MEMBER: 3342 case IGMP_SILENT_MEMBER: 3343 break; 3344 case IGMP_REPORTING_MEMBER: 3345 case IGMP_IDLE_MEMBER: 3346 case IGMP_LAZY_MEMBER: 3347 case IGMP_SLEEPING_MEMBER: 3348 case IGMP_AWAKENING_MEMBER: 3349 inm->inm_state = IGMP_REPORTING_MEMBER; 3350 retval = igmp_v3_enqueue_group_record(&igi->igi_gq, 3351 inm, 0, 0, 0); 3352 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 3353 __func__, retval); 3354 break; 3355 case IGMP_G_QUERY_PENDING_MEMBER: 3356 case IGMP_SG_QUERY_PENDING_MEMBER: 3357 case IGMP_LEAVING_MEMBER: 3358 break; 3359 } 3360 } 3361 IF_ADDR_UNLOCK(ifp); 3362 3363 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 3364 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop); 3365 3366 /* 3367 * Slew transmission of bursts over 500ms intervals. 3368 */ 3369 if (igi->igi_gq.ifq_head != NULL) { 3370 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( 3371 IGMP_RESPONSE_BURST_INTERVAL); 3372 V_interface_timers_running = 1; 3373 } 3374} 3375 3376/* 3377 * Transmit the next pending IGMP message in the output queue. 3378 * 3379 * We get called from netisr_processqueue(). A mutex private to igmpoq 3380 * will be acquired and released around this routine. 3381 * 3382 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis. 3383 * MRT: Nothing needs to be done, as IGMP traffic is always local to 3384 * a link and uses a link-scope multicast address. 3385 */ 3386static void 3387igmp_intr(struct mbuf *m) 3388{ 3389 struct ip_moptions imo; 3390 struct ifnet *ifp; 3391 struct mbuf *ipopts, *m0; 3392 int error; 3393 uint32_t ifindex; 3394 3395 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m); 3396 3397 /* 3398 * Set VNET image pointer from enqueued mbuf chain 3399 * before doing anything else. Whilst we use interface 3400 * indexes to guard against interface detach, they are 3401 * unique to each VIMAGE and must be retrieved. 3402 */ 3403 CURVNET_SET((struct vnet *)(m->m_pkthdr.header)); 3404 INIT_VNET_NET(curvnet); 3405 INIT_VNET_INET(curvnet); 3406 ifindex = igmp_restore_context(m); 3407 3408 /* 3409 * Check if the ifnet still exists. This limits the scope of 3410 * any race in the absence of a global ifp lock for low cost 3411 * (an array lookup). 3412 */ 3413 ifp = ifnet_byindex(ifindex); 3414 if (ifp == NULL) { 3415 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.", 3416 __func__, m, ifindex); 3417 m_freem(m); 3418 IPSTAT_INC(ips_noroute); 3419 goto out; 3420 } 3421 3422 ipopts = V_igmp_sendra ? m_raopt : NULL; 3423 3424 imo.imo_multicast_ttl = 1; 3425 imo.imo_multicast_vif = -1; 3426 imo.imo_multicast_loop = (V_ip_mrouter != NULL); 3427 3428 /* 3429 * If the user requested that IGMP traffic be explicitly 3430 * redirected to the loopback interface (e.g. they are running a 3431 * MANET interface and the routing protocol needs to see the 3432 * updates), handle this now. 3433 */ 3434 if (m->m_flags & M_IGMP_LOOP) 3435 imo.imo_multicast_ifp = V_loif; 3436 else 3437 imo.imo_multicast_ifp = ifp; 3438 3439 if (m->m_flags & M_IGMPV2) { 3440 m0 = m; 3441 } else { 3442 m0 = igmp_v3_encap_report(ifp, m); 3443 if (m0 == NULL) { 3444 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m); 3445 m_freem(m); 3446 IPSTAT_INC(ips_odropped); 3447 goto out; 3448 } 3449 } 3450 3451 igmp_scrub_context(m0); 3452 m->m_flags &= ~(M_PROTOFLAGS); 3453 m0->m_pkthdr.rcvif = V_loif; 3454#ifdef MAC 3455 mac_netinet_igmp_send(ifp, m0); 3456#endif 3457 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL); 3458 if (error) { 3459 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error); 3460 goto out; 3461 } 3462 3463 IGMPSTAT_INC(igps_snd_reports); 3464 3465out: 3466 /* 3467 * We must restore the existing vnet pointer before 3468 * continuing as we are run from netisr context. 3469 */ 3470 CURVNET_RESTORE(); 3471} 3472 3473/* 3474 * Encapsulate an IGMPv3 report. 3475 * 3476 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf 3477 * chain has already had its IP/IGMPv3 header prepended. In this case 3478 * the function will not attempt to prepend; the lengths and checksums 3479 * will however be re-computed. 3480 * 3481 * Returns a pointer to the new mbuf chain head, or NULL if the 3482 * allocation failed. 3483 */ 3484static struct mbuf * 3485igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) 3486{ 3487 INIT_VNET_INET(curvnet); 3488 struct igmp_report *igmp; 3489 struct ip *ip; 3490 int hdrlen, igmpreclen; 3491 3492 KASSERT((m->m_flags & M_PKTHDR), 3493 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m)); 3494 3495 igmpreclen = m_length(m, NULL); 3496 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report); 3497 3498 if (m->m_flags & M_IGMPV3_HDR) { 3499 igmpreclen -= hdrlen; 3500 } else { 3501 M_PREPEND(m, hdrlen, M_DONTWAIT); 3502 if (m == NULL) 3503 return (NULL); 3504 m->m_flags |= M_IGMPV3_HDR; 3505 } 3506 3507 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen); 3508 3509 m->m_data += sizeof(struct ip); 3510 m->m_len -= sizeof(struct ip); 3511 3512 igmp = mtod(m, struct igmp_report *); 3513 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT; 3514 igmp->ir_rsv1 = 0; 3515 igmp->ir_rsv2 = 0; 3516 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs); 3517 igmp->ir_cksum = 0; 3518 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen); 3519 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3520 3521 m->m_data -= sizeof(struct ip); 3522 m->m_len += sizeof(struct ip); 3523 3524 ip = mtod(m, struct ip *); 3525 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL; 3526 ip->ip_len = hdrlen + igmpreclen; 3527 ip->ip_off = IP_DF; 3528 ip->ip_p = IPPROTO_IGMP; 3529 ip->ip_sum = 0; 3530 3531 ip->ip_src.s_addr = INADDR_ANY; 3532 3533 if (m->m_flags & M_IGMP_LOOP) { 3534 struct in_ifaddr *ia; 3535 3536 IFP_TO_IA(ifp, ia); 3537 if (ia != NULL) 3538 ip->ip_src = ia->ia_addr.sin_addr; 3539 } 3540 3541 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP); 3542 3543 return (m); 3544} 3545 3546#ifdef KTR 3547static char * 3548igmp_rec_type_to_str(const int type) 3549{ 3550 3551 switch (type) { 3552 case IGMP_CHANGE_TO_EXCLUDE_MODE: 3553 return "TO_EX"; 3554 break; 3555 case IGMP_CHANGE_TO_INCLUDE_MODE: 3556 return "TO_IN"; 3557 break; 3558 case IGMP_MODE_IS_EXCLUDE: 3559 return "MODE_EX"; 3560 break; 3561 case IGMP_MODE_IS_INCLUDE: 3562 return "MODE_IN"; 3563 break; 3564 case IGMP_ALLOW_NEW_SOURCES: 3565 return "ALLOW_NEW"; 3566 break; 3567 case IGMP_BLOCK_OLD_SOURCES: 3568 return "BLOCK_OLD"; 3569 break; 3570 default: 3571 break; 3572 } 3573 return "unknown"; 3574} 3575#endif 3576 3577static void 3578igmp_sysinit(void) 3579{ 3580 3581 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3582 3583 IGMP_LOCK_INIT(); 3584 3585 m_raopt = igmp_ra_alloc(); 3586 3587 netisr_register(&igmp_nh); 3588} 3589 3590static void 3591igmp_sysuninit(void) 3592{ 3593 3594 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3595 3596 netisr_unregister(&igmp_nh); 3597 3598 m_free(m_raopt); 3599 m_raopt = NULL; 3600 3601 IGMP_LOCK_DESTROY(); 3602} 3603 3604/* 3605 * Initialize an IGMPv3 instance. 3606 * VIMAGE: Assumes curvnet set by caller and called per vimage. 3607 */ 3608static int 3609vnet_igmp_iattach(const void *unused __unused) 3610{ 3611 INIT_VNET_INET(curvnet); 3612 3613 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3614 3615 LIST_INIT(&V_igi_head); 3616 3617 V_current_state_timers_running = 0; 3618 V_state_change_timers_running = 0; 3619 V_interface_timers_running = 0; 3620 3621 /* 3622 * Initialize sysctls to default values. 3623 */ 3624 V_igmp_recvifkludge = 1; 3625 V_igmp_sendra = 1; 3626 V_igmp_sendlocal = 1; 3627 V_igmp_v1enable = 1; 3628 V_igmp_v2enable = 1; 3629 V_igmp_legacysupp = 0; 3630 V_igmp_default_version = IGMP_VERSION_3; 3631 V_igmp_gsrdelay.tv_sec = 10; 3632 V_igmp_gsrdelay.tv_usec = 0; 3633 3634 memset(&V_igmpstat, 0, sizeof(struct igmpstat)); 3635 V_igmpstat.igps_version = IGPS_VERSION_3; 3636 V_igmpstat.igps_len = sizeof(struct igmpstat); 3637 3638 return (0); 3639} 3640 3641static int 3642vnet_igmp_idetach(const void *unused __unused) 3643{ 3644#ifdef INVARIANTS 3645 INIT_VNET_INET(curvnet); 3646#endif 3647 3648 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3649 3650 KASSERT(LIST_EMPTY(&V_igi_head), 3651 ("%s: igi list not empty; ifnets not detached?", __func__)); 3652 3653 return (0); 3654} 3655 3656#ifndef VIMAGE_GLOBALS 3657static vnet_modinfo_t vnet_igmp_modinfo = { 3658 .vmi_id = VNET_MOD_IGMP, 3659 .vmi_name = "igmp", 3660 .vmi_dependson = VNET_MOD_INET, 3661 .vmi_iattach = vnet_igmp_iattach, 3662 .vmi_idetach = vnet_igmp_idetach 3663}; 3664#endif 3665 3666static int 3667igmp_modevent(module_t mod, int type, void *unused __unused) 3668{ 3669 3670 switch (type) { 3671 case MOD_LOAD: 3672 igmp_sysinit(); 3673#ifndef VIMAGE_GLOBALS 3674 vnet_mod_register(&vnet_igmp_modinfo); 3675#else 3676 vnet_igmp_iattach(NULL); 3677#endif 3678 break; 3679 case MOD_UNLOAD: 3680#ifndef VIMAGE_GLOBALS 3681 vnet_mod_deregister(&vnet_igmp_modinfo); 3682#else 3683 vnet_igmp_idetach(NULL); 3684#endif 3685 igmp_sysuninit(); 3686 break; 3687 default: 3688 return (EOPNOTSUPP); 3689 } 3690 return (0); 3691} 3692 3693static moduledata_t igmp_mod = { 3694 "igmp", 3695 igmp_modevent, 3696 0 3697}; 3698DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3699