igmp.c revision 281233
1/*- 2 * Copyright (c) 2007-2009 Bruce Simpson. 3 * Copyright (c) 1988 Stephen Deering. 4 * Copyright (c) 1992, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * Stephen Deering of Stanford University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93 35 */ 36 37/* 38 * Internet Group Management Protocol (IGMP) routines. 39 * [RFC1112, RFC2236, RFC3376] 40 * 41 * Written by Steve Deering, Stanford, May 1988. 42 * Modified by Rosen Sharma, Stanford, Aug 1994. 43 * Modified by Bill Fenner, Xerox PARC, Feb 1995. 44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995. 45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson. 46 * 47 * MULTICAST Revision: 3.5.1.4 48 */ 49 50#include <sys/cdefs.h> 51__FBSDID("$FreeBSD: releng/9.3/sys/netinet/igmp.c 281233 2015-04-07 20:21:23Z delphij $"); 52 53#include <sys/param.h> 54#include <sys/systm.h> 55#include <sys/module.h> 56#include <sys/malloc.h> 57#include <sys/mbuf.h> 58#include <sys/socket.h> 59#include <sys/protosw.h> 60#include <sys/kernel.h> 61#include <sys/sysctl.h> 62#include <sys/ktr.h> 63#include <sys/condvar.h> 64 65#include <net/if.h> 66#include <net/netisr.h> 67#include <net/vnet.h> 68 69#include <netinet/in.h> 70#include <netinet/in_var.h> 71#include <netinet/in_systm.h> 72#include <netinet/ip.h> 73#include <netinet/ip_var.h> 74#include <netinet/ip_options.h> 75#include <netinet/igmp.h> 76#include <netinet/igmp_var.h> 77 78#include <machine/in_cksum.h> 79 80#include <security/mac/mac_framework.h> 81 82#ifndef KTR_IGMPV3 83#define KTR_IGMPV3 KTR_INET 84#endif 85 86static struct igmp_ifinfo * 87 igi_alloc_locked(struct ifnet *); 88static void igi_delete_locked(const struct ifnet *); 89static void igmp_dispatch_queue(struct ifqueue *, int, const int); 90static void igmp_fasttimo_vnet(void); 91static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *); 92static int igmp_handle_state_change(struct in_multi *, 93 struct igmp_ifinfo *); 94static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *); 95static int igmp_input_v1_query(struct ifnet *, const struct ip *, 96 const struct igmp *); 97static int igmp_input_v2_query(struct ifnet *, const struct ip *, 98 const struct igmp *); 99static int igmp_input_v3_query(struct ifnet *, const struct ip *, 100 /*const*/ struct igmpv3 *); 101static int igmp_input_v3_group_query(struct in_multi *, 102 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *); 103static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *, 104 /*const*/ struct igmp *); 105static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *, 106 /*const*/ struct igmp *); 107static void igmp_intr(struct mbuf *); 108static int igmp_isgroupreported(const struct in_addr); 109static struct mbuf * 110 igmp_ra_alloc(void); 111#ifdef KTR 112static char * igmp_rec_type_to_str(const int); 113#endif 114static void igmp_set_version(struct igmp_ifinfo *, const int); 115static void igmp_slowtimo_vnet(void); 116static int igmp_v1v2_queue_report(struct in_multi *, const int); 117static void igmp_v1v2_process_group_timer(struct in_multi *, const int); 118static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *); 119static void igmp_v2_update_group(struct in_multi *, const int); 120static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *); 121static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *); 122static struct mbuf * 123 igmp_v3_encap_report(struct ifnet *, struct mbuf *); 124static int igmp_v3_enqueue_group_record(struct ifqueue *, 125 struct in_multi *, const int, const int, const int); 126static int igmp_v3_enqueue_filter_change(struct ifqueue *, 127 struct in_multi *); 128static void igmp_v3_process_group_timers(struct igmp_ifinfo *, 129 struct ifqueue *, struct ifqueue *, struct in_multi *, 130 const int); 131static int igmp_v3_merge_state_changes(struct in_multi *, 132 struct ifqueue *); 133static void igmp_v3_suppress_group_record(struct in_multi *); 134static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS); 135static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS); 136static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS); 137 138static const struct netisr_handler igmp_nh = { 139 .nh_name = "igmp", 140 .nh_handler = igmp_intr, 141 .nh_proto = NETISR_IGMP, 142 .nh_policy = NETISR_POLICY_SOURCE, 143}; 144 145/* 146 * System-wide globals. 147 * 148 * Unlocked access to these is OK, except for the global IGMP output 149 * queue. The IGMP subsystem lock ends up being system-wide for the moment, 150 * because all VIMAGEs have to share a global output queue, as netisrs 151 * themselves are not virtualized. 152 * 153 * Locking: 154 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 155 * Any may be taken independently; if any are held at the same 156 * time, the above lock order must be followed. 157 * * All output is delegated to the netisr. 158 * Now that Giant has been eliminated, the netisr may be inlined. 159 * * IN_MULTI_LOCK covers in_multi. 160 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file, 161 * including the output queue. 162 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of 163 * per-link state iterators. 164 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface, 165 * therefore it is not refcounted. 166 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi. 167 * 168 * Reference counting 169 * * IGMP acquires its own reference every time an in_multi is passed to 170 * it and the group is being joined for the first time. 171 * * IGMP releases its reference(s) on in_multi in a deferred way, 172 * because the operations which process the release run as part of 173 * a loop whose control variables are directly affected by the release 174 * (that, and not recursing on the IF_ADDR_LOCK). 175 * 176 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds 177 * to a vnet in ifp->if_vnet. 178 * 179 * SMPng: XXX We may potentially race operations on ifma_protospec. 180 * The problem is that we currently lack a clean way of taking the 181 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing, 182 * as anything which modifies ifma needs to be covered by that lock. 183 * So check for ifma_protospec being NULL before proceeding. 184 */ 185struct mtx igmp_mtx; 186 187struct mbuf *m_raopt; /* Router Alert option */ 188static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state"); 189 190/* 191 * VIMAGE-wide globals. 192 * 193 * The IGMPv3 timers themselves need to run per-image, however, 194 * protosw timers run globally (see tcp). 195 * An ifnet can only be in one vimage at a time, and the loopback 196 * ifnet, loif, is itself virtualized. 197 * It would otherwise be possible to seriously hose IGMP state, 198 * and create inconsistencies in upstream multicast routing, if you have 199 * multiple VIMAGEs running on the same link joining different multicast 200 * groups, UNLESS the "primary IP address" is different. This is because 201 * IGMP for IPv4 does not force link-local addresses to be used for each 202 * node, unlike MLD for IPv6. 203 * Obviously the IGMPv3 per-interface state has per-vimage granularity 204 * also as a result. 205 * 206 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection 207 * policy to control the address used by IGMP on the link. 208 */ 209static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general 210 * query response */ 211static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change 212 * retransmit */ 213static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host 214 * report; IGMPv3 g/sg 215 * query response */ 216 217#define V_interface_timers_running VNET(interface_timers_running) 218#define V_state_change_timers_running VNET(state_change_timers_running) 219#define V_current_state_timers_running VNET(current_state_timers_running) 220 221static VNET_DEFINE(LIST_HEAD(, igmp_ifinfo), igi_head); 222static VNET_DEFINE(struct igmpstat, igmpstat) = { 223 .igps_version = IGPS_VERSION_3, 224 .igps_len = sizeof(struct igmpstat), 225}; 226static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0}; 227 228#define V_igi_head VNET(igi_head) 229#define V_igmpstat VNET(igmpstat) 230#define V_igmp_gsrdelay VNET(igmp_gsrdelay) 231 232static VNET_DEFINE(int, igmp_recvifkludge) = 1; 233static VNET_DEFINE(int, igmp_sendra) = 1; 234static VNET_DEFINE(int, igmp_sendlocal) = 1; 235static VNET_DEFINE(int, igmp_v1enable) = 1; 236static VNET_DEFINE(int, igmp_v2enable) = 1; 237static VNET_DEFINE(int, igmp_legacysupp); 238static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3; 239 240#define V_igmp_recvifkludge VNET(igmp_recvifkludge) 241#define V_igmp_sendra VNET(igmp_sendra) 242#define V_igmp_sendlocal VNET(igmp_sendlocal) 243#define V_igmp_v1enable VNET(igmp_v1enable) 244#define V_igmp_v2enable VNET(igmp_v2enable) 245#define V_igmp_legacysupp VNET(igmp_legacysupp) 246#define V_igmp_default_version VNET(igmp_default_version) 247 248/* 249 * Virtualized sysctls. 250 */ 251SYSCTL_VNET_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RW, 252 &VNET_NAME(igmpstat), igmpstat, ""); 253SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW, 254 &VNET_NAME(igmp_recvifkludge), 0, 255 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address"); 256SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW, 257 &VNET_NAME(igmp_sendra), 0, 258 "Send IP Router Alert option in IGMPv2/v3 messages"); 259SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW, 260 &VNET_NAME(igmp_sendlocal), 0, 261 "Send IGMP membership reports for 224.0.0.0/24 groups"); 262SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW, 263 &VNET_NAME(igmp_v1enable), 0, 264 "Enable backwards compatibility with IGMPv1"); 265SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW, 266 &VNET_NAME(igmp_v2enable), 0, 267 "Enable backwards compatibility with IGMPv2"); 268SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW, 269 &VNET_NAME(igmp_legacysupp), 0, 270 "Allow v1/v2 reports to suppress v3 group responses"); 271SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, default_version, 272 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 273 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I", 274 "Default version of IGMP to run on each interface"); 275SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, gsrdelay, 276 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 277 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I", 278 "Rate limit for IGMPv3 Group-and-Source queries in seconds"); 279 280/* 281 * Non-virtualized sysctls. 282 */ 283static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo, 284 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo, 285 "Per-interface IGMPv3 state"); 286 287static __inline void 288igmp_save_context(struct mbuf *m, struct ifnet *ifp) 289{ 290 291#ifdef VIMAGE 292 m->m_pkthdr.header = ifp->if_vnet; 293#endif /* VIMAGE */ 294 m->m_pkthdr.flowid = ifp->if_index; 295} 296 297static __inline void 298igmp_scrub_context(struct mbuf *m) 299{ 300 301 m->m_pkthdr.header = NULL; 302 m->m_pkthdr.flowid = 0; 303} 304 305#ifdef KTR 306static __inline char * 307inet_ntoa_haddr(in_addr_t haddr) 308{ 309 struct in_addr ia; 310 311 ia.s_addr = htonl(haddr); 312 return (inet_ntoa(ia)); 313} 314#endif 315 316/* 317 * Restore context from a queued IGMP output chain. 318 * Return saved ifindex. 319 * 320 * VIMAGE: The assertion is there to make sure that we 321 * actually called CURVNET_SET() with what's in the mbuf chain. 322 */ 323static __inline uint32_t 324igmp_restore_context(struct mbuf *m) 325{ 326 327#ifdef notyet 328#if defined(VIMAGE) && defined(INVARIANTS) 329 KASSERT(curvnet == (m->m_pkthdr.header), 330 ("%s: called when curvnet was not restored", __func__)); 331#endif 332#endif 333 return (m->m_pkthdr.flowid); 334} 335 336/* 337 * Retrieve or set default IGMP version. 338 * 339 * VIMAGE: Assume curvnet set by caller. 340 * SMPng: NOTE: Serialized by IGMP lock. 341 */ 342static int 343sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS) 344{ 345 int error; 346 int new; 347 348 error = sysctl_wire_old_buffer(req, sizeof(int)); 349 if (error) 350 return (error); 351 352 IGMP_LOCK(); 353 354 new = V_igmp_default_version; 355 356 error = sysctl_handle_int(oidp, &new, 0, req); 357 if (error || !req->newptr) 358 goto out_locked; 359 360 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) { 361 error = EINVAL; 362 goto out_locked; 363 } 364 365 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d", 366 V_igmp_default_version, new); 367 368 V_igmp_default_version = new; 369 370out_locked: 371 IGMP_UNLOCK(); 372 return (error); 373} 374 375/* 376 * Retrieve or set threshold between group-source queries in seconds. 377 * 378 * VIMAGE: Assume curvnet set by caller. 379 * SMPng: NOTE: Serialized by IGMP lock. 380 */ 381static int 382sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS) 383{ 384 int error; 385 int i; 386 387 error = sysctl_wire_old_buffer(req, sizeof(int)); 388 if (error) 389 return (error); 390 391 IGMP_LOCK(); 392 393 i = V_igmp_gsrdelay.tv_sec; 394 395 error = sysctl_handle_int(oidp, &i, 0, req); 396 if (error || !req->newptr) 397 goto out_locked; 398 399 if (i < -1 || i >= 60) { 400 error = EINVAL; 401 goto out_locked; 402 } 403 404 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d", 405 V_igmp_gsrdelay.tv_sec, i); 406 V_igmp_gsrdelay.tv_sec = i; 407 408out_locked: 409 IGMP_UNLOCK(); 410 return (error); 411} 412 413/* 414 * Expose struct igmp_ifinfo to userland, keyed by ifindex. 415 * For use by ifmcstat(8). 416 * 417 * SMPng: NOTE: Does an unlocked ifindex space read. 418 * VIMAGE: Assume curvnet set by caller. The node handler itself 419 * is not directly virtualized. 420 */ 421static int 422sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS) 423{ 424 int *name; 425 int error; 426 u_int namelen; 427 struct ifnet *ifp; 428 struct igmp_ifinfo *igi; 429 430 name = (int *)arg1; 431 namelen = arg2; 432 433 if (req->newptr != NULL) 434 return (EPERM); 435 436 if (namelen != 1) 437 return (EINVAL); 438 439 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo)); 440 if (error) 441 return (error); 442 443 IN_MULTI_LOCK(); 444 IGMP_LOCK(); 445 446 if (name[0] <= 0 || name[0] > V_if_index) { 447 error = ENOENT; 448 goto out_locked; 449 } 450 451 error = ENOENT; 452 453 ifp = ifnet_byindex(name[0]); 454 if (ifp == NULL) 455 goto out_locked; 456 457 LIST_FOREACH(igi, &V_igi_head, igi_link) { 458 if (ifp == igi->igi_ifp) { 459 error = SYSCTL_OUT(req, igi, 460 sizeof(struct igmp_ifinfo)); 461 break; 462 } 463 } 464 465out_locked: 466 IGMP_UNLOCK(); 467 IN_MULTI_UNLOCK(); 468 return (error); 469} 470 471/* 472 * Dispatch an entire queue of pending packet chains 473 * using the netisr. 474 * VIMAGE: Assumes the vnet pointer has been set. 475 */ 476static void 477igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop) 478{ 479 struct mbuf *m; 480 481 for (;;) { 482 _IF_DEQUEUE(ifq, m); 483 if (m == NULL) 484 break; 485 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m); 486 if (loop) 487 m->m_flags |= M_IGMP_LOOP; 488 netisr_dispatch(NETISR_IGMP, m); 489 if (--limit == 0) 490 break; 491 } 492} 493 494/* 495 * Filter outgoing IGMP report state by group. 496 * 497 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1). 498 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are 499 * disabled for all groups in the 224.0.0.0/24 link-local scope. However, 500 * this may break certain IGMP snooping switches which rely on the old 501 * report behaviour. 502 * 503 * Return zero if the given group is one for which IGMP reports 504 * should be suppressed, or non-zero if reports should be issued. 505 */ 506static __inline int 507igmp_isgroupreported(const struct in_addr addr) 508{ 509 510 if (in_allhosts(addr) || 511 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr))))) 512 return (0); 513 514 return (1); 515} 516 517/* 518 * Construct a Router Alert option to use in outgoing packets. 519 */ 520static struct mbuf * 521igmp_ra_alloc(void) 522{ 523 struct mbuf *m; 524 struct ipoption *p; 525 526 MGET(m, M_DONTWAIT, MT_DATA); 527 p = mtod(m, struct ipoption *); 528 p->ipopt_dst.s_addr = INADDR_ANY; 529 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */ 530 p->ipopt_list[1] = 0x04; /* 4 bytes long */ 531 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */ 532 p->ipopt_list[3] = 0x00; /* pad byte */ 533 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1]; 534 535 return (m); 536} 537 538/* 539 * Attach IGMP when PF_INET is attached to an interface. 540 */ 541struct igmp_ifinfo * 542igmp_domifattach(struct ifnet *ifp) 543{ 544 struct igmp_ifinfo *igi; 545 546 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 547 __func__, ifp, ifp->if_xname); 548 549 IGMP_LOCK(); 550 551 igi = igi_alloc_locked(ifp); 552 if (!(ifp->if_flags & IFF_MULTICAST)) 553 igi->igi_flags |= IGIF_SILENT; 554 555 IGMP_UNLOCK(); 556 557 return (igi); 558} 559 560/* 561 * VIMAGE: assume curvnet set by caller. 562 */ 563static struct igmp_ifinfo * 564igi_alloc_locked(/*const*/ struct ifnet *ifp) 565{ 566 struct igmp_ifinfo *igi; 567 568 IGMP_LOCK_ASSERT(); 569 570 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO); 571 if (igi == NULL) 572 goto out; 573 574 igi->igi_ifp = ifp; 575 igi->igi_version = V_igmp_default_version; 576 igi->igi_flags = 0; 577 igi->igi_rv = IGMP_RV_INIT; 578 igi->igi_qi = IGMP_QI_INIT; 579 igi->igi_qri = IGMP_QRI_INIT; 580 igi->igi_uri = IGMP_URI_INIT; 581 582 SLIST_INIT(&igi->igi_relinmhead); 583 584 /* 585 * Responses to general queries are subject to bounds. 586 */ 587 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS); 588 589 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link); 590 591 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)", 592 ifp, ifp->if_xname); 593 594out: 595 return (igi); 596} 597 598/* 599 * Hook for ifdetach. 600 * 601 * NOTE: Some finalization tasks need to run before the protocol domain 602 * is detached, but also before the link layer does its cleanup. 603 * 604 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK(). 605 * XXX This is also bitten by unlocked ifma_protospec access. 606 */ 607void 608igmp_ifdetach(struct ifnet *ifp) 609{ 610 struct igmp_ifinfo *igi; 611 struct ifmultiaddr *ifma; 612 struct in_multi *inm, *tinm; 613 614 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp, 615 ifp->if_xname); 616 617 IGMP_LOCK(); 618 619 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 620 if (igi->igi_version == IGMP_VERSION_3) { 621 IF_ADDR_RLOCK(ifp); 622 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 623 if (ifma->ifma_addr->sa_family != AF_INET || 624 ifma->ifma_protospec == NULL) 625 continue; 626#if 0 627 KASSERT(ifma->ifma_protospec != NULL, 628 ("%s: ifma_protospec is NULL", __func__)); 629#endif 630 inm = (struct in_multi *)ifma->ifma_protospec; 631 if (inm->inm_state == IGMP_LEAVING_MEMBER) { 632 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 633 inm, inm_nrele); 634 } 635 inm_clear_recorded(inm); 636 } 637 IF_ADDR_RUNLOCK(ifp); 638 /* 639 * Free the in_multi reference(s) for this IGMP lifecycle. 640 */ 641 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, 642 tinm) { 643 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 644 inm_release_locked(inm); 645 } 646 } 647 648 IGMP_UNLOCK(); 649} 650 651/* 652 * Hook for domifdetach. 653 */ 654void 655igmp_domifdetach(struct ifnet *ifp) 656{ 657 struct igmp_ifinfo *igi; 658 659 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", 660 __func__, ifp, ifp->if_xname); 661 662 IGMP_LOCK(); 663 664 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 665 igi_delete_locked(ifp); 666 667 IGMP_UNLOCK(); 668} 669 670static void 671igi_delete_locked(const struct ifnet *ifp) 672{ 673 struct igmp_ifinfo *igi, *tigi; 674 675 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)", 676 __func__, ifp, ifp->if_xname); 677 678 IGMP_LOCK_ASSERT(); 679 680 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) { 681 if (igi->igi_ifp == ifp) { 682 /* 683 * Free deferred General Query responses. 684 */ 685 _IF_DRAIN(&igi->igi_gq); 686 687 LIST_REMOVE(igi, igi_link); 688 689 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead), 690 ("%s: there are dangling in_multi references", 691 __func__)); 692 693 free(igi, M_IGMP); 694 return; 695 } 696 } 697 698#ifdef INVARIANTS 699 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp); 700#endif 701} 702 703/* 704 * Process a received IGMPv1 query. 705 * Return non-zero if the message should be dropped. 706 * 707 * VIMAGE: The curvnet pointer is derived from the input ifp. 708 */ 709static int 710igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip, 711 const struct igmp *igmp) 712{ 713 struct ifmultiaddr *ifma; 714 struct igmp_ifinfo *igi; 715 struct in_multi *inm; 716 717 /* 718 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to 719 * 224.0.0.1. They are always treated as General Queries. 720 * igmp_group is always ignored. Do not drop it as a userland 721 * daemon may wish to see it. 722 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 723 */ 724 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) { 725 IGMPSTAT_INC(igps_rcv_badqueries); 726 return (0); 727 } 728 IGMPSTAT_INC(igps_rcv_gen_queries); 729 730 IN_MULTI_LOCK(); 731 IGMP_LOCK(); 732 733 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 734 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 735 736 if (igi->igi_flags & IGIF_LOOPBACK) { 737 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)", 738 ifp, ifp->if_xname); 739 goto out_locked; 740 } 741 742 /* 743 * Switch to IGMPv1 host compatibility mode. 744 */ 745 igmp_set_version(igi, IGMP_VERSION_1); 746 747 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname); 748 749 /* 750 * Start the timers in all of our group records 751 * for the interface on which the query arrived, 752 * except those which are already running. 753 */ 754 IF_ADDR_RLOCK(ifp); 755 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 756 if (ifma->ifma_addr->sa_family != AF_INET || 757 ifma->ifma_protospec == NULL) 758 continue; 759 inm = (struct in_multi *)ifma->ifma_protospec; 760 if (inm->inm_timer != 0) 761 continue; 762 switch (inm->inm_state) { 763 case IGMP_NOT_MEMBER: 764 case IGMP_SILENT_MEMBER: 765 break; 766 case IGMP_G_QUERY_PENDING_MEMBER: 767 case IGMP_SG_QUERY_PENDING_MEMBER: 768 case IGMP_REPORTING_MEMBER: 769 case IGMP_IDLE_MEMBER: 770 case IGMP_LAZY_MEMBER: 771 case IGMP_SLEEPING_MEMBER: 772 case IGMP_AWAKENING_MEMBER: 773 inm->inm_state = IGMP_REPORTING_MEMBER; 774 inm->inm_timer = IGMP_RANDOM_DELAY( 775 IGMP_V1V2_MAX_RI * PR_FASTHZ); 776 V_current_state_timers_running = 1; 777 break; 778 case IGMP_LEAVING_MEMBER: 779 break; 780 } 781 } 782 IF_ADDR_RUNLOCK(ifp); 783 784out_locked: 785 IGMP_UNLOCK(); 786 IN_MULTI_UNLOCK(); 787 788 return (0); 789} 790 791/* 792 * Process a received IGMPv2 general or group-specific query. 793 */ 794static int 795igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip, 796 const struct igmp *igmp) 797{ 798 struct ifmultiaddr *ifma; 799 struct igmp_ifinfo *igi; 800 struct in_multi *inm; 801 int is_general_query; 802 uint16_t timer; 803 804 is_general_query = 0; 805 806 /* 807 * Validate address fields upfront. 808 * XXX SMPng: unlocked increments in igmpstat assumed atomic. 809 */ 810 if (in_nullhost(igmp->igmp_group)) { 811 /* 812 * IGMPv2 General Query. 813 * If this was not sent to the all-hosts group, ignore it. 814 */ 815 if (!in_allhosts(ip->ip_dst)) 816 return (0); 817 IGMPSTAT_INC(igps_rcv_gen_queries); 818 is_general_query = 1; 819 } else { 820 /* IGMPv2 Group-Specific Query. */ 821 IGMPSTAT_INC(igps_rcv_group_queries); 822 } 823 824 IN_MULTI_LOCK(); 825 IGMP_LOCK(); 826 827 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 828 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 829 830 if (igi->igi_flags & IGIF_LOOPBACK) { 831 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)", 832 ifp, ifp->if_xname); 833 goto out_locked; 834 } 835 836 /* 837 * Ignore v2 query if in v1 Compatibility Mode. 838 */ 839 if (igi->igi_version == IGMP_VERSION_1) 840 goto out_locked; 841 842 igmp_set_version(igi, IGMP_VERSION_2); 843 844 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE; 845 if (timer == 0) 846 timer = 1; 847 848 if (is_general_query) { 849 /* 850 * For each reporting group joined on this 851 * interface, kick the report timer. 852 */ 853 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)", 854 ifp, ifp->if_xname); 855 IF_ADDR_RLOCK(ifp); 856 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 857 if (ifma->ifma_addr->sa_family != AF_INET || 858 ifma->ifma_protospec == NULL) 859 continue; 860 inm = (struct in_multi *)ifma->ifma_protospec; 861 igmp_v2_update_group(inm, timer); 862 } 863 IF_ADDR_RUNLOCK(ifp); 864 } else { 865 /* 866 * Group-specific IGMPv2 query, we need only 867 * look up the single group to process it. 868 */ 869 inm = inm_lookup(ifp, igmp->igmp_group); 870 if (inm != NULL) { 871 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)", 872 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 873 igmp_v2_update_group(inm, timer); 874 } 875 } 876 877out_locked: 878 IGMP_UNLOCK(); 879 IN_MULTI_UNLOCK(); 880 881 return (0); 882} 883 884/* 885 * Update the report timer on a group in response to an IGMPv2 query. 886 * 887 * If we are becoming the reporting member for this group, start the timer. 888 * If we already are the reporting member for this group, and timer is 889 * below the threshold, reset it. 890 * 891 * We may be updating the group for the first time since we switched 892 * to IGMPv3. If we are, then we must clear any recorded source lists, 893 * and transition to REPORTING state; the group timer is overloaded 894 * for group and group-source query responses. 895 * 896 * Unlike IGMPv3, the delay per group should be jittered 897 * to avoid bursts of IGMPv2 reports. 898 */ 899static void 900igmp_v2_update_group(struct in_multi *inm, const int timer) 901{ 902 903 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__, 904 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer); 905 906 IN_MULTI_LOCK_ASSERT(); 907 908 switch (inm->inm_state) { 909 case IGMP_NOT_MEMBER: 910 case IGMP_SILENT_MEMBER: 911 break; 912 case IGMP_REPORTING_MEMBER: 913 if (inm->inm_timer != 0 && 914 inm->inm_timer <= timer) { 915 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, " 916 "skipping.", __func__); 917 break; 918 } 919 /* FALLTHROUGH */ 920 case IGMP_SG_QUERY_PENDING_MEMBER: 921 case IGMP_G_QUERY_PENDING_MEMBER: 922 case IGMP_IDLE_MEMBER: 923 case IGMP_LAZY_MEMBER: 924 case IGMP_AWAKENING_MEMBER: 925 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__); 926 inm->inm_state = IGMP_REPORTING_MEMBER; 927 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 928 V_current_state_timers_running = 1; 929 break; 930 case IGMP_SLEEPING_MEMBER: 931 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__); 932 inm->inm_state = IGMP_AWAKENING_MEMBER; 933 break; 934 case IGMP_LEAVING_MEMBER: 935 break; 936 } 937} 938 939/* 940 * Process a received IGMPv3 general, group-specific or 941 * group-and-source-specific query. 942 * Assumes m has already been pulled up to the full IGMP message length. 943 * Return 0 if successful, otherwise an appropriate error code is returned. 944 */ 945static int 946igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip, 947 /*const*/ struct igmpv3 *igmpv3) 948{ 949 struct igmp_ifinfo *igi; 950 struct in_multi *inm; 951 int is_general_query; 952 uint32_t maxresp, nsrc, qqi; 953 uint16_t timer; 954 uint8_t qrv; 955 956 is_general_query = 0; 957 958 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname); 959 960 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */ 961 if (maxresp >= 128) { 962 maxresp = IGMP_MANT(igmpv3->igmp_code) << 963 (IGMP_EXP(igmpv3->igmp_code) + 3); 964 } 965 966 /* 967 * Robustness must never be less than 2 for on-wire IGMPv3. 968 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make 969 * an exception for interfaces whose IGMPv3 state changes 970 * are redirected to loopback (e.g. MANET). 971 */ 972 qrv = IGMP_QRV(igmpv3->igmp_misc); 973 if (qrv < 2) { 974 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__, 975 qrv, IGMP_RV_INIT); 976 qrv = IGMP_RV_INIT; 977 } 978 979 qqi = igmpv3->igmp_qqi; 980 if (qqi >= 128) { 981 qqi = IGMP_MANT(igmpv3->igmp_qqi) << 982 (IGMP_EXP(igmpv3->igmp_qqi) + 3); 983 } 984 985 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE; 986 if (timer == 0) 987 timer = 1; 988 989 nsrc = ntohs(igmpv3->igmp_numsrc); 990 991 /* 992 * Validate address fields and versions upfront before 993 * accepting v3 query. 994 * XXX SMPng: Unlocked access to igmpstat counters here. 995 */ 996 if (in_nullhost(igmpv3->igmp_group)) { 997 /* 998 * IGMPv3 General Query. 999 * 1000 * General Queries SHOULD be directed to 224.0.0.1. 1001 * A general query with a source list has undefined 1002 * behaviour; discard it. 1003 */ 1004 IGMPSTAT_INC(igps_rcv_gen_queries); 1005 if (!in_allhosts(ip->ip_dst) || nsrc > 0) { 1006 IGMPSTAT_INC(igps_rcv_badqueries); 1007 return (0); 1008 } 1009 is_general_query = 1; 1010 } else { 1011 /* Group or group-source specific query. */ 1012 if (nsrc == 0) 1013 IGMPSTAT_INC(igps_rcv_group_queries); 1014 else 1015 IGMPSTAT_INC(igps_rcv_gsr_queries); 1016 } 1017 1018 IN_MULTI_LOCK(); 1019 IGMP_LOCK(); 1020 1021 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 1022 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 1023 1024 if (igi->igi_flags & IGIF_LOOPBACK) { 1025 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)", 1026 ifp, ifp->if_xname); 1027 goto out_locked; 1028 } 1029 1030 /* 1031 * Discard the v3 query if we're in Compatibility Mode. 1032 * The RFC is not obviously worded that hosts need to stay in 1033 * compatibility mode until the Old Version Querier Present 1034 * timer expires. 1035 */ 1036 if (igi->igi_version != IGMP_VERSION_3) { 1037 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)", 1038 igi->igi_version, ifp, ifp->if_xname); 1039 goto out_locked; 1040 } 1041 1042 igmp_set_version(igi, IGMP_VERSION_3); 1043 igi->igi_rv = qrv; 1044 igi->igi_qi = qqi; 1045 igi->igi_qri = maxresp; 1046 1047 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi, 1048 maxresp); 1049 1050 if (is_general_query) { 1051 /* 1052 * Schedule a current-state report on this ifp for 1053 * all groups, possibly containing source lists. 1054 * If there is a pending General Query response 1055 * scheduled earlier than the selected delay, do 1056 * not schedule any other reports. 1057 * Otherwise, reset the interface timer. 1058 */ 1059 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)", 1060 ifp, ifp->if_xname); 1061 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) { 1062 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer); 1063 V_interface_timers_running = 1; 1064 } 1065 } else { 1066 /* 1067 * Group-source-specific queries are throttled on 1068 * a per-group basis to defeat denial-of-service attempts. 1069 * Queries for groups we are not a member of on this 1070 * link are simply ignored. 1071 */ 1072 inm = inm_lookup(ifp, igmpv3->igmp_group); 1073 if (inm == NULL) 1074 goto out_locked; 1075 if (nsrc > 0) { 1076 if (!ratecheck(&inm->inm_lastgsrtv, 1077 &V_igmp_gsrdelay)) { 1078 CTR1(KTR_IGMPV3, "%s: GS query throttled.", 1079 __func__); 1080 IGMPSTAT_INC(igps_drop_gsr_queries); 1081 goto out_locked; 1082 } 1083 } 1084 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)", 1085 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname); 1086 /* 1087 * If there is a pending General Query response 1088 * scheduled sooner than the selected delay, no 1089 * further report need be scheduled. 1090 * Otherwise, prepare to respond to the 1091 * group-specific or group-and-source query. 1092 */ 1093 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) 1094 igmp_input_v3_group_query(inm, igi, timer, igmpv3); 1095 } 1096 1097out_locked: 1098 IGMP_UNLOCK(); 1099 IN_MULTI_UNLOCK(); 1100 1101 return (0); 1102} 1103 1104/* 1105 * Process a recieved IGMPv3 group-specific or group-and-source-specific 1106 * query. 1107 * Return <0 if any error occured. Currently this is ignored. 1108 */ 1109static int 1110igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi, 1111 int timer, /*const*/ struct igmpv3 *igmpv3) 1112{ 1113 int retval; 1114 uint16_t nsrc; 1115 1116 IN_MULTI_LOCK_ASSERT(); 1117 IGMP_LOCK_ASSERT(); 1118 1119 retval = 0; 1120 1121 switch (inm->inm_state) { 1122 case IGMP_NOT_MEMBER: 1123 case IGMP_SILENT_MEMBER: 1124 case IGMP_SLEEPING_MEMBER: 1125 case IGMP_LAZY_MEMBER: 1126 case IGMP_AWAKENING_MEMBER: 1127 case IGMP_IDLE_MEMBER: 1128 case IGMP_LEAVING_MEMBER: 1129 return (retval); 1130 break; 1131 case IGMP_REPORTING_MEMBER: 1132 case IGMP_G_QUERY_PENDING_MEMBER: 1133 case IGMP_SG_QUERY_PENDING_MEMBER: 1134 break; 1135 } 1136 1137 nsrc = ntohs(igmpv3->igmp_numsrc); 1138 1139 /* 1140 * Deal with group-specific queries upfront. 1141 * If any group query is already pending, purge any recorded 1142 * source-list state if it exists, and schedule a query response 1143 * for this group-specific query. 1144 */ 1145 if (nsrc == 0) { 1146 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 1147 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) { 1148 inm_clear_recorded(inm); 1149 timer = min(inm->inm_timer, timer); 1150 } 1151 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER; 1152 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1153 V_current_state_timers_running = 1; 1154 return (retval); 1155 } 1156 1157 /* 1158 * Deal with the case where a group-and-source-specific query has 1159 * been received but a group-specific query is already pending. 1160 */ 1161 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) { 1162 timer = min(inm->inm_timer, timer); 1163 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1164 V_current_state_timers_running = 1; 1165 return (retval); 1166 } 1167 1168 /* 1169 * Finally, deal with the case where a group-and-source-specific 1170 * query has been received, where a response to a previous g-s-r 1171 * query exists, or none exists. 1172 * In this case, we need to parse the source-list which the Querier 1173 * has provided us with and check if we have any source list filter 1174 * entries at T1 for these sources. If we do not, there is no need 1175 * schedule a report and the query may be dropped. 1176 * If we do, we must record them and schedule a current-state 1177 * report for those sources. 1178 * FIXME: Handling source lists larger than 1 mbuf requires that 1179 * we pass the mbuf chain pointer down to this function, and use 1180 * m_getptr() to walk the chain. 1181 */ 1182 if (inm->inm_nsrc > 0) { 1183 const struct in_addr *ap; 1184 int i, nrecorded; 1185 1186 ap = (const struct in_addr *)(igmpv3 + 1); 1187 nrecorded = 0; 1188 for (i = 0; i < nsrc; i++, ap++) { 1189 retval = inm_record_source(inm, ap->s_addr); 1190 if (retval < 0) 1191 break; 1192 nrecorded += retval; 1193 } 1194 if (nrecorded > 0) { 1195 CTR1(KTR_IGMPV3, 1196 "%s: schedule response to SG query", __func__); 1197 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER; 1198 inm->inm_timer = IGMP_RANDOM_DELAY(timer); 1199 V_current_state_timers_running = 1; 1200 } 1201 } 1202 1203 return (retval); 1204} 1205 1206/* 1207 * Process a received IGMPv1 host membership report. 1208 * 1209 * NOTE: 0.0.0.0 workaround breaks const correctness. 1210 */ 1211static int 1212igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1213 /*const*/ struct igmp *igmp) 1214{ 1215 struct in_ifaddr *ia; 1216 struct in_multi *inm; 1217 1218 IGMPSTAT_INC(igps_rcv_reports); 1219 1220 if (ifp->if_flags & IFF_LOOPBACK) 1221 return (0); 1222 1223 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1224 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1225 IGMPSTAT_INC(igps_rcv_badreports); 1226 return (EINVAL); 1227 } 1228 1229 /* 1230 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1231 * Booting clients may use the source address 0.0.0.0. Some 1232 * IGMP daemons may not know how to use IP_RECVIF to determine 1233 * the interface upon which this message was received. 1234 * Replace 0.0.0.0 with the subnet address if told to do so. 1235 */ 1236 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1237 IFP_TO_IA(ifp, ia); 1238 if (ia != NULL) { 1239 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1240 ifa_free(&ia->ia_ifa); 1241 } 1242 } 1243 1244 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)", 1245 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1246 1247 /* 1248 * IGMPv1 report suppression. 1249 * If we are a member of this group, and our membership should be 1250 * reported, stop our group timer and transition to the 'lazy' state. 1251 */ 1252 IN_MULTI_LOCK(); 1253 inm = inm_lookup(ifp, igmp->igmp_group); 1254 if (inm != NULL) { 1255 struct igmp_ifinfo *igi; 1256 1257 igi = inm->inm_igi; 1258 if (igi == NULL) { 1259 KASSERT(igi != NULL, 1260 ("%s: no igi for ifp %p", __func__, ifp)); 1261 goto out_locked; 1262 } 1263 1264 IGMPSTAT_INC(igps_rcv_ourreports); 1265 1266 /* 1267 * If we are in IGMPv3 host mode, do not allow the 1268 * other host's IGMPv1 report to suppress our reports 1269 * unless explicitly configured to do so. 1270 */ 1271 if (igi->igi_version == IGMP_VERSION_3) { 1272 if (V_igmp_legacysupp) 1273 igmp_v3_suppress_group_record(inm); 1274 goto out_locked; 1275 } 1276 1277 inm->inm_timer = 0; 1278 1279 switch (inm->inm_state) { 1280 case IGMP_NOT_MEMBER: 1281 case IGMP_SILENT_MEMBER: 1282 break; 1283 case IGMP_IDLE_MEMBER: 1284 case IGMP_LAZY_MEMBER: 1285 case IGMP_AWAKENING_MEMBER: 1286 CTR3(KTR_IGMPV3, 1287 "report suppressed for %s on ifp %p(%s)", 1288 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1289 case IGMP_SLEEPING_MEMBER: 1290 inm->inm_state = IGMP_SLEEPING_MEMBER; 1291 break; 1292 case IGMP_REPORTING_MEMBER: 1293 CTR3(KTR_IGMPV3, 1294 "report suppressed for %s on ifp %p(%s)", 1295 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1296 if (igi->igi_version == IGMP_VERSION_1) 1297 inm->inm_state = IGMP_LAZY_MEMBER; 1298 else if (igi->igi_version == IGMP_VERSION_2) 1299 inm->inm_state = IGMP_SLEEPING_MEMBER; 1300 break; 1301 case IGMP_G_QUERY_PENDING_MEMBER: 1302 case IGMP_SG_QUERY_PENDING_MEMBER: 1303 case IGMP_LEAVING_MEMBER: 1304 break; 1305 } 1306 } 1307 1308out_locked: 1309 IN_MULTI_UNLOCK(); 1310 1311 return (0); 1312} 1313 1314/* 1315 * Process a received IGMPv2 host membership report. 1316 * 1317 * NOTE: 0.0.0.0 workaround breaks const correctness. 1318 */ 1319static int 1320igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip, 1321 /*const*/ struct igmp *igmp) 1322{ 1323 struct in_ifaddr *ia; 1324 struct in_multi *inm; 1325 1326 /* 1327 * Make sure we don't hear our own membership report. Fast 1328 * leave requires knowing that we are the only member of a 1329 * group. 1330 */ 1331 IFP_TO_IA(ifp, ia); 1332 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) { 1333 ifa_free(&ia->ia_ifa); 1334 return (0); 1335 } 1336 1337 IGMPSTAT_INC(igps_rcv_reports); 1338 1339 if (ifp->if_flags & IFF_LOOPBACK) { 1340 if (ia != NULL) 1341 ifa_free(&ia->ia_ifa); 1342 return (0); 1343 } 1344 1345 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) || 1346 !in_hosteq(igmp->igmp_group, ip->ip_dst)) { 1347 if (ia != NULL) 1348 ifa_free(&ia->ia_ifa); 1349 IGMPSTAT_INC(igps_rcv_badreports); 1350 return (EINVAL); 1351 } 1352 1353 /* 1354 * RFC 3376, Section 4.2.13, 9.2, 9.3: 1355 * Booting clients may use the source address 0.0.0.0. Some 1356 * IGMP daemons may not know how to use IP_RECVIF to determine 1357 * the interface upon which this message was received. 1358 * Replace 0.0.0.0 with the subnet address if told to do so. 1359 */ 1360 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) { 1361 if (ia != NULL) 1362 ip->ip_src.s_addr = htonl(ia->ia_subnet); 1363 } 1364 if (ia != NULL) 1365 ifa_free(&ia->ia_ifa); 1366 1367 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)", 1368 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1369 1370 /* 1371 * IGMPv2 report suppression. 1372 * If we are a member of this group, and our membership should be 1373 * reported, and our group timer is pending or about to be reset, 1374 * stop our group timer by transitioning to the 'lazy' state. 1375 */ 1376 IN_MULTI_LOCK(); 1377 inm = inm_lookup(ifp, igmp->igmp_group); 1378 if (inm != NULL) { 1379 struct igmp_ifinfo *igi; 1380 1381 igi = inm->inm_igi; 1382 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp)); 1383 1384 IGMPSTAT_INC(igps_rcv_ourreports); 1385 1386 /* 1387 * If we are in IGMPv3 host mode, do not allow the 1388 * other host's IGMPv1 report to suppress our reports 1389 * unless explicitly configured to do so. 1390 */ 1391 if (igi->igi_version == IGMP_VERSION_3) { 1392 if (V_igmp_legacysupp) 1393 igmp_v3_suppress_group_record(inm); 1394 goto out_locked; 1395 } 1396 1397 inm->inm_timer = 0; 1398 1399 switch (inm->inm_state) { 1400 case IGMP_NOT_MEMBER: 1401 case IGMP_SILENT_MEMBER: 1402 case IGMP_SLEEPING_MEMBER: 1403 break; 1404 case IGMP_REPORTING_MEMBER: 1405 case IGMP_IDLE_MEMBER: 1406 case IGMP_AWAKENING_MEMBER: 1407 CTR3(KTR_IGMPV3, 1408 "report suppressed for %s on ifp %p(%s)", 1409 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname); 1410 case IGMP_LAZY_MEMBER: 1411 inm->inm_state = IGMP_LAZY_MEMBER; 1412 break; 1413 case IGMP_G_QUERY_PENDING_MEMBER: 1414 case IGMP_SG_QUERY_PENDING_MEMBER: 1415 case IGMP_LEAVING_MEMBER: 1416 break; 1417 } 1418 } 1419 1420out_locked: 1421 IN_MULTI_UNLOCK(); 1422 1423 return (0); 1424} 1425 1426void 1427igmp_input(struct mbuf *m, int off) 1428{ 1429 int iphlen; 1430 struct ifnet *ifp; 1431 struct igmp *igmp; 1432 struct ip *ip; 1433 int igmplen; 1434 int minlen; 1435 int queryver; 1436 1437 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off); 1438 1439 ifp = m->m_pkthdr.rcvif; 1440 1441 IGMPSTAT_INC(igps_rcv_total); 1442 1443 ip = mtod(m, struct ip *); 1444 iphlen = off; 1445 igmplen = ip->ip_len; 1446 1447 /* 1448 * Validate lengths. 1449 */ 1450 if (igmplen < IGMP_MINLEN) { 1451 IGMPSTAT_INC(igps_rcv_tooshort); 1452 m_freem(m); 1453 return; 1454 } 1455 1456 /* 1457 * Always pullup to the minimum size for v1/v2 or v3 1458 * to amortize calls to m_pullup(). 1459 */ 1460 minlen = iphlen; 1461 if (igmplen >= IGMP_V3_QUERY_MINLEN) 1462 minlen += IGMP_V3_QUERY_MINLEN; 1463 else 1464 minlen += IGMP_MINLEN; 1465 if ((m->m_flags & M_EXT || m->m_len < minlen) && 1466 (m = m_pullup(m, minlen)) == 0) { 1467 IGMPSTAT_INC(igps_rcv_tooshort); 1468 return; 1469 } 1470 ip = mtod(m, struct ip *); 1471 1472 /* 1473 * Validate checksum. 1474 */ 1475 m->m_data += iphlen; 1476 m->m_len -= iphlen; 1477 igmp = mtod(m, struct igmp *); 1478 if (in_cksum(m, igmplen)) { 1479 IGMPSTAT_INC(igps_rcv_badsum); 1480 m_freem(m); 1481 return; 1482 } 1483 m->m_data -= iphlen; 1484 m->m_len += iphlen; 1485 1486 /* 1487 * IGMP control traffic is link-scope, and must have a TTL of 1. 1488 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception; 1489 * probe packets may come from beyond the LAN. 1490 */ 1491 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) { 1492 IGMPSTAT_INC(igps_rcv_badttl); 1493 m_freem(m); 1494 return; 1495 } 1496 1497 switch (igmp->igmp_type) { 1498 case IGMP_HOST_MEMBERSHIP_QUERY: 1499 if (igmplen == IGMP_MINLEN) { 1500 if (igmp->igmp_code == 0) 1501 queryver = IGMP_VERSION_1; 1502 else 1503 queryver = IGMP_VERSION_2; 1504 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) { 1505 queryver = IGMP_VERSION_3; 1506 } else { 1507 IGMPSTAT_INC(igps_rcv_tooshort); 1508 m_freem(m); 1509 return; 1510 } 1511 1512 switch (queryver) { 1513 case IGMP_VERSION_1: 1514 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1515 if (!V_igmp_v1enable) 1516 break; 1517 if (igmp_input_v1_query(ifp, ip, igmp) != 0) { 1518 m_freem(m); 1519 return; 1520 } 1521 break; 1522 1523 case IGMP_VERSION_2: 1524 IGMPSTAT_INC(igps_rcv_v1v2_queries); 1525 if (!V_igmp_v2enable) 1526 break; 1527 if (igmp_input_v2_query(ifp, ip, igmp) != 0) { 1528 m_freem(m); 1529 return; 1530 } 1531 break; 1532 1533 case IGMP_VERSION_3: { 1534 struct igmpv3 *igmpv3; 1535 uint16_t igmpv3len; 1536 uint16_t nsrc; 1537 1538 IGMPSTAT_INC(igps_rcv_v3_queries); 1539 igmpv3 = (struct igmpv3 *)igmp; 1540 /* 1541 * Validate length based on source count. 1542 */ 1543 nsrc = ntohs(igmpv3->igmp_numsrc); 1544 if (nsrc * sizeof(in_addr_t) > 1545 UINT16_MAX - iphlen - IGMP_V3_QUERY_MINLEN) { 1546 IGMPSTAT_INC(igps_rcv_tooshort); 1547 return; 1548 } 1549 /* 1550 * m_pullup() may modify m, so pullup in 1551 * this scope. 1552 */ 1553 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN + 1554 sizeof(struct in_addr) * nsrc; 1555 if ((m->m_flags & M_EXT || 1556 m->m_len < igmpv3len) && 1557 (m = m_pullup(m, igmpv3len)) == NULL) { 1558 IGMPSTAT_INC(igps_rcv_tooshort); 1559 return; 1560 } 1561 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *) 1562 + iphlen); 1563 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) { 1564 m_freem(m); 1565 return; 1566 } 1567 } 1568 break; 1569 } 1570 break; 1571 1572 case IGMP_v1_HOST_MEMBERSHIP_REPORT: 1573 if (!V_igmp_v1enable) 1574 break; 1575 if (igmp_input_v1_report(ifp, ip, igmp) != 0) { 1576 m_freem(m); 1577 return; 1578 } 1579 break; 1580 1581 case IGMP_v2_HOST_MEMBERSHIP_REPORT: 1582 if (!V_igmp_v2enable) 1583 break; 1584 if (!ip_checkrouteralert(m)) 1585 IGMPSTAT_INC(igps_rcv_nora); 1586 if (igmp_input_v2_report(ifp, ip, igmp) != 0) { 1587 m_freem(m); 1588 return; 1589 } 1590 break; 1591 1592 case IGMP_v3_HOST_MEMBERSHIP_REPORT: 1593 /* 1594 * Hosts do not need to process IGMPv3 membership reports, 1595 * as report suppression is no longer required. 1596 */ 1597 if (!ip_checkrouteralert(m)) 1598 IGMPSTAT_INC(igps_rcv_nora); 1599 break; 1600 1601 default: 1602 break; 1603 } 1604 1605 /* 1606 * Pass all valid IGMP packets up to any process(es) listening on a 1607 * raw IGMP socket. 1608 */ 1609 rip_input(m, off); 1610} 1611 1612 1613/* 1614 * Fast timeout handler (global). 1615 * VIMAGE: Timeout handlers are expected to service all vimages. 1616 */ 1617void 1618igmp_fasttimo(void) 1619{ 1620 VNET_ITERATOR_DECL(vnet_iter); 1621 1622 VNET_LIST_RLOCK_NOSLEEP(); 1623 VNET_FOREACH(vnet_iter) { 1624 CURVNET_SET(vnet_iter); 1625 igmp_fasttimo_vnet(); 1626 CURVNET_RESTORE(); 1627 } 1628 VNET_LIST_RUNLOCK_NOSLEEP(); 1629} 1630 1631/* 1632 * Fast timeout handler (per-vnet). 1633 * Sends are shuffled off to a netisr to deal with Giant. 1634 * 1635 * VIMAGE: Assume caller has set up our curvnet. 1636 */ 1637static void 1638igmp_fasttimo_vnet(void) 1639{ 1640 struct ifqueue scq; /* State-change packets */ 1641 struct ifqueue qrq; /* Query response packets */ 1642 struct ifnet *ifp; 1643 struct igmp_ifinfo *igi; 1644 struct ifmultiaddr *ifma; 1645 struct in_multi *inm; 1646 int loop, uri_fasthz; 1647 1648 loop = 0; 1649 uri_fasthz = 0; 1650 1651 /* 1652 * Quick check to see if any work needs to be done, in order to 1653 * minimize the overhead of fasttimo processing. 1654 * SMPng: XXX Unlocked reads. 1655 */ 1656 if (!V_current_state_timers_running && 1657 !V_interface_timers_running && 1658 !V_state_change_timers_running) 1659 return; 1660 1661 IN_MULTI_LOCK(); 1662 IGMP_LOCK(); 1663 1664 /* 1665 * IGMPv3 General Query response timer processing. 1666 */ 1667 if (V_interface_timers_running) { 1668 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__); 1669 1670 V_interface_timers_running = 0; 1671 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1672 if (igi->igi_v3_timer == 0) { 1673 /* Do nothing. */ 1674 } else if (--igi->igi_v3_timer == 0) { 1675 igmp_v3_dispatch_general_query(igi); 1676 } else { 1677 V_interface_timers_running = 1; 1678 } 1679 } 1680 } 1681 1682 if (!V_current_state_timers_running && 1683 !V_state_change_timers_running) 1684 goto out_locked; 1685 1686 V_current_state_timers_running = 0; 1687 V_state_change_timers_running = 0; 1688 1689 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__); 1690 1691 /* 1692 * IGMPv1/v2/v3 host report and state-change timer processing. 1693 * Note: Processing a v3 group timer may remove a node. 1694 */ 1695 LIST_FOREACH(igi, &V_igi_head, igi_link) { 1696 ifp = igi->igi_ifp; 1697 1698 if (igi->igi_version == IGMP_VERSION_3) { 1699 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 1700 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri * 1701 PR_FASTHZ); 1702 1703 memset(&qrq, 0, sizeof(struct ifqueue)); 1704 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS); 1705 1706 memset(&scq, 0, sizeof(struct ifqueue)); 1707 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS); 1708 } 1709 1710 IF_ADDR_RLOCK(ifp); 1711 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 1712 if (ifma->ifma_addr->sa_family != AF_INET || 1713 ifma->ifma_protospec == NULL) 1714 continue; 1715 inm = (struct in_multi *)ifma->ifma_protospec; 1716 switch (igi->igi_version) { 1717 case IGMP_VERSION_1: 1718 case IGMP_VERSION_2: 1719 igmp_v1v2_process_group_timer(inm, 1720 igi->igi_version); 1721 break; 1722 case IGMP_VERSION_3: 1723 igmp_v3_process_group_timers(igi, &qrq, 1724 &scq, inm, uri_fasthz); 1725 break; 1726 } 1727 } 1728 IF_ADDR_RUNLOCK(ifp); 1729 1730 if (igi->igi_version == IGMP_VERSION_3) { 1731 struct in_multi *tinm; 1732 1733 igmp_dispatch_queue(&qrq, 0, loop); 1734 igmp_dispatch_queue(&scq, 0, loop); 1735 1736 /* 1737 * Free the in_multi reference(s) for this 1738 * IGMP lifecycle. 1739 */ 1740 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, 1741 inm_nrele, tinm) { 1742 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, 1743 inm_nrele); 1744 inm_release_locked(inm); 1745 } 1746 } 1747 } 1748 1749out_locked: 1750 IGMP_UNLOCK(); 1751 IN_MULTI_UNLOCK(); 1752} 1753 1754/* 1755 * Update host report group timer for IGMPv1/v2. 1756 * Will update the global pending timer flags. 1757 */ 1758static void 1759igmp_v1v2_process_group_timer(struct in_multi *inm, const int version) 1760{ 1761 int report_timer_expired; 1762 1763 IN_MULTI_LOCK_ASSERT(); 1764 IGMP_LOCK_ASSERT(); 1765 1766 if (inm->inm_timer == 0) { 1767 report_timer_expired = 0; 1768 } else if (--inm->inm_timer == 0) { 1769 report_timer_expired = 1; 1770 } else { 1771 V_current_state_timers_running = 1; 1772 return; 1773 } 1774 1775 switch (inm->inm_state) { 1776 case IGMP_NOT_MEMBER: 1777 case IGMP_SILENT_MEMBER: 1778 case IGMP_IDLE_MEMBER: 1779 case IGMP_LAZY_MEMBER: 1780 case IGMP_SLEEPING_MEMBER: 1781 case IGMP_AWAKENING_MEMBER: 1782 break; 1783 case IGMP_REPORTING_MEMBER: 1784 if (report_timer_expired) { 1785 inm->inm_state = IGMP_IDLE_MEMBER; 1786 (void)igmp_v1v2_queue_report(inm, 1787 (version == IGMP_VERSION_2) ? 1788 IGMP_v2_HOST_MEMBERSHIP_REPORT : 1789 IGMP_v1_HOST_MEMBERSHIP_REPORT); 1790 } 1791 break; 1792 case IGMP_G_QUERY_PENDING_MEMBER: 1793 case IGMP_SG_QUERY_PENDING_MEMBER: 1794 case IGMP_LEAVING_MEMBER: 1795 break; 1796 } 1797} 1798 1799/* 1800 * Update a group's timers for IGMPv3. 1801 * Will update the global pending timer flags. 1802 * Note: Unlocked read from igi. 1803 */ 1804static void 1805igmp_v3_process_group_timers(struct igmp_ifinfo *igi, 1806 struct ifqueue *qrq, struct ifqueue *scq, 1807 struct in_multi *inm, const int uri_fasthz) 1808{ 1809 int query_response_timer_expired; 1810 int state_change_retransmit_timer_expired; 1811 1812 IN_MULTI_LOCK_ASSERT(); 1813 IGMP_LOCK_ASSERT(); 1814 1815 query_response_timer_expired = 0; 1816 state_change_retransmit_timer_expired = 0; 1817 1818 /* 1819 * During a transition from v1/v2 compatibility mode back to v3, 1820 * a group record in REPORTING state may still have its group 1821 * timer active. This is a no-op in this function; it is easier 1822 * to deal with it here than to complicate the slow-timeout path. 1823 */ 1824 if (inm->inm_timer == 0) { 1825 query_response_timer_expired = 0; 1826 } else if (--inm->inm_timer == 0) { 1827 query_response_timer_expired = 1; 1828 } else { 1829 V_current_state_timers_running = 1; 1830 } 1831 1832 if (inm->inm_sctimer == 0) { 1833 state_change_retransmit_timer_expired = 0; 1834 } else if (--inm->inm_sctimer == 0) { 1835 state_change_retransmit_timer_expired = 1; 1836 } else { 1837 V_state_change_timers_running = 1; 1838 } 1839 1840 /* We are in fasttimo, so be quick about it. */ 1841 if (!state_change_retransmit_timer_expired && 1842 !query_response_timer_expired) 1843 return; 1844 1845 switch (inm->inm_state) { 1846 case IGMP_NOT_MEMBER: 1847 case IGMP_SILENT_MEMBER: 1848 case IGMP_SLEEPING_MEMBER: 1849 case IGMP_LAZY_MEMBER: 1850 case IGMP_AWAKENING_MEMBER: 1851 case IGMP_IDLE_MEMBER: 1852 break; 1853 case IGMP_G_QUERY_PENDING_MEMBER: 1854 case IGMP_SG_QUERY_PENDING_MEMBER: 1855 /* 1856 * Respond to a previously pending Group-Specific 1857 * or Group-and-Source-Specific query by enqueueing 1858 * the appropriate Current-State report for 1859 * immediate transmission. 1860 */ 1861 if (query_response_timer_expired) { 1862 int retval; 1863 1864 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1, 1865 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)); 1866 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 1867 __func__, retval); 1868 inm->inm_state = IGMP_REPORTING_MEMBER; 1869 /* XXX Clear recorded sources for next time. */ 1870 inm_clear_recorded(inm); 1871 } 1872 /* FALLTHROUGH */ 1873 case IGMP_REPORTING_MEMBER: 1874 case IGMP_LEAVING_MEMBER: 1875 if (state_change_retransmit_timer_expired) { 1876 /* 1877 * State-change retransmission timer fired. 1878 * If there are any further pending retransmissions, 1879 * set the global pending state-change flag, and 1880 * reset the timer. 1881 */ 1882 if (--inm->inm_scrv > 0) { 1883 inm->inm_sctimer = uri_fasthz; 1884 V_state_change_timers_running = 1; 1885 } 1886 /* 1887 * Retransmit the previously computed state-change 1888 * report. If there are no further pending 1889 * retransmissions, the mbuf queue will be consumed. 1890 * Update T0 state to T1 as we have now sent 1891 * a state-change. 1892 */ 1893 (void)igmp_v3_merge_state_changes(inm, scq); 1894 1895 inm_commit(inm); 1896 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 1897 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 1898 1899 /* 1900 * If we are leaving the group for good, make sure 1901 * we release IGMP's reference to it. 1902 * This release must be deferred using a SLIST, 1903 * as we are called from a loop which traverses 1904 * the in_ifmultiaddr TAILQ. 1905 */ 1906 if (inm->inm_state == IGMP_LEAVING_MEMBER && 1907 inm->inm_scrv == 0) { 1908 inm->inm_state = IGMP_NOT_MEMBER; 1909 SLIST_INSERT_HEAD(&igi->igi_relinmhead, 1910 inm, inm_nrele); 1911 } 1912 } 1913 break; 1914 } 1915} 1916 1917 1918/* 1919 * Suppress a group's pending response to a group or source/group query. 1920 * 1921 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency. 1922 * Do NOT update ST1/ST0 as this operation merely suppresses 1923 * the currently pending group record. 1924 * Do NOT suppress the response to a general query. It is possible but 1925 * it would require adding another state or flag. 1926 */ 1927static void 1928igmp_v3_suppress_group_record(struct in_multi *inm) 1929{ 1930 1931 IN_MULTI_LOCK_ASSERT(); 1932 1933 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3, 1934 ("%s: not IGMPv3 mode on link", __func__)); 1935 1936 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER || 1937 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER) 1938 return; 1939 1940 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 1941 inm_clear_recorded(inm); 1942 1943 inm->inm_timer = 0; 1944 inm->inm_state = IGMP_REPORTING_MEMBER; 1945} 1946 1947/* 1948 * Switch to a different IGMP version on the given interface, 1949 * as per Section 7.2.1. 1950 */ 1951static void 1952igmp_set_version(struct igmp_ifinfo *igi, const int version) 1953{ 1954 int old_version_timer; 1955 1956 IGMP_LOCK_ASSERT(); 1957 1958 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__, 1959 version, igi->igi_ifp, igi->igi_ifp->if_xname); 1960 1961 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) { 1962 /* 1963 * Compute the "Older Version Querier Present" timer as per 1964 * Section 8.12. 1965 */ 1966 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri; 1967 old_version_timer *= PR_SLOWHZ; 1968 1969 if (version == IGMP_VERSION_1) { 1970 igi->igi_v1_timer = old_version_timer; 1971 igi->igi_v2_timer = 0; 1972 } else if (version == IGMP_VERSION_2) { 1973 igi->igi_v1_timer = 0; 1974 igi->igi_v2_timer = old_version_timer; 1975 } 1976 } 1977 1978 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 1979 if (igi->igi_version != IGMP_VERSION_2) { 1980 igi->igi_version = IGMP_VERSION_2; 1981 igmp_v3_cancel_link_timers(igi); 1982 } 1983 } else if (igi->igi_v1_timer > 0) { 1984 if (igi->igi_version != IGMP_VERSION_1) { 1985 igi->igi_version = IGMP_VERSION_1; 1986 igmp_v3_cancel_link_timers(igi); 1987 } 1988 } 1989} 1990 1991/* 1992 * Cancel pending IGMPv3 timers for the given link and all groups 1993 * joined on it; state-change, general-query, and group-query timers. 1994 * 1995 * Only ever called on a transition from v3 to Compatibility mode. Kill 1996 * the timers stone dead (this may be expensive for large N groups), they 1997 * will be restarted if Compatibility Mode deems that they must be due to 1998 * query processing. 1999 */ 2000static void 2001igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi) 2002{ 2003 struct ifmultiaddr *ifma; 2004 struct ifnet *ifp; 2005 struct in_multi *inm, *tinm; 2006 2007 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__, 2008 igi->igi_ifp, igi->igi_ifp->if_xname); 2009 2010 IN_MULTI_LOCK_ASSERT(); 2011 IGMP_LOCK_ASSERT(); 2012 2013 /* 2014 * Stop the v3 General Query Response on this link stone dead. 2015 * If fasttimo is woken up due to V_interface_timers_running, 2016 * the flag will be cleared if there are no pending link timers. 2017 */ 2018 igi->igi_v3_timer = 0; 2019 2020 /* 2021 * Now clear the current-state and state-change report timers 2022 * for all memberships scoped to this link. 2023 */ 2024 ifp = igi->igi_ifp; 2025 IF_ADDR_RLOCK(ifp); 2026 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2027 if (ifma->ifma_addr->sa_family != AF_INET || 2028 ifma->ifma_protospec == NULL) 2029 continue; 2030 inm = (struct in_multi *)ifma->ifma_protospec; 2031 switch (inm->inm_state) { 2032 case IGMP_NOT_MEMBER: 2033 case IGMP_SILENT_MEMBER: 2034 case IGMP_IDLE_MEMBER: 2035 case IGMP_LAZY_MEMBER: 2036 case IGMP_SLEEPING_MEMBER: 2037 case IGMP_AWAKENING_MEMBER: 2038 /* 2039 * These states are either not relevant in v3 mode, 2040 * or are unreported. Do nothing. 2041 */ 2042 break; 2043 case IGMP_LEAVING_MEMBER: 2044 /* 2045 * If we are leaving the group and switching to 2046 * compatibility mode, we need to release the final 2047 * reference held for issuing the INCLUDE {}, and 2048 * transition to REPORTING to ensure the host leave 2049 * message is sent upstream to the old querier -- 2050 * transition to NOT would lose the leave and race. 2051 */ 2052 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele); 2053 /* FALLTHROUGH */ 2054 case IGMP_G_QUERY_PENDING_MEMBER: 2055 case IGMP_SG_QUERY_PENDING_MEMBER: 2056 inm_clear_recorded(inm); 2057 /* FALLTHROUGH */ 2058 case IGMP_REPORTING_MEMBER: 2059 inm->inm_state = IGMP_REPORTING_MEMBER; 2060 break; 2061 } 2062 /* 2063 * Always clear state-change and group report timers. 2064 * Free any pending IGMPv3 state-change records. 2065 */ 2066 inm->inm_sctimer = 0; 2067 inm->inm_timer = 0; 2068 _IF_DRAIN(&inm->inm_scq); 2069 } 2070 IF_ADDR_RUNLOCK(ifp); 2071 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) { 2072 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele); 2073 inm_release_locked(inm); 2074 } 2075} 2076 2077/* 2078 * Update the Older Version Querier Present timers for a link. 2079 * See Section 7.2.1 of RFC 3376. 2080 */ 2081static void 2082igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi) 2083{ 2084 2085 IGMP_LOCK_ASSERT(); 2086 2087 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) { 2088 /* 2089 * IGMPv1 and IGMPv2 Querier Present timers expired. 2090 * 2091 * Revert to IGMPv3. 2092 */ 2093 if (igi->igi_version != IGMP_VERSION_3) { 2094 CTR5(KTR_IGMPV3, 2095 "%s: transition from v%d -> v%d on %p(%s)", 2096 __func__, igi->igi_version, IGMP_VERSION_3, 2097 igi->igi_ifp, igi->igi_ifp->if_xname); 2098 igi->igi_version = IGMP_VERSION_3; 2099 } 2100 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) { 2101 /* 2102 * IGMPv1 Querier Present timer expired, 2103 * IGMPv2 Querier Present timer running. 2104 * If IGMPv2 was disabled since last timeout, 2105 * revert to IGMPv3. 2106 * If IGMPv2 is enabled, revert to IGMPv2. 2107 */ 2108 if (!V_igmp_v2enable) { 2109 CTR5(KTR_IGMPV3, 2110 "%s: transition from v%d -> v%d on %p(%s)", 2111 __func__, igi->igi_version, IGMP_VERSION_3, 2112 igi->igi_ifp, igi->igi_ifp->if_xname); 2113 igi->igi_v2_timer = 0; 2114 igi->igi_version = IGMP_VERSION_3; 2115 } else { 2116 --igi->igi_v2_timer; 2117 if (igi->igi_version != IGMP_VERSION_2) { 2118 CTR5(KTR_IGMPV3, 2119 "%s: transition from v%d -> v%d on %p(%s)", 2120 __func__, igi->igi_version, IGMP_VERSION_2, 2121 igi->igi_ifp, igi->igi_ifp->if_xname); 2122 igi->igi_version = IGMP_VERSION_2; 2123 } 2124 } 2125 } else if (igi->igi_v1_timer > 0) { 2126 /* 2127 * IGMPv1 Querier Present timer running. 2128 * Stop IGMPv2 timer if running. 2129 * 2130 * If IGMPv1 was disabled since last timeout, 2131 * revert to IGMPv3. 2132 * If IGMPv1 is enabled, reset IGMPv2 timer if running. 2133 */ 2134 if (!V_igmp_v1enable) { 2135 CTR5(KTR_IGMPV3, 2136 "%s: transition from v%d -> v%d on %p(%s)", 2137 __func__, igi->igi_version, IGMP_VERSION_3, 2138 igi->igi_ifp, igi->igi_ifp->if_xname); 2139 igi->igi_v1_timer = 0; 2140 igi->igi_version = IGMP_VERSION_3; 2141 } else { 2142 --igi->igi_v1_timer; 2143 } 2144 if (igi->igi_v2_timer > 0) { 2145 CTR3(KTR_IGMPV3, 2146 "%s: cancel v2 timer on %p(%s)", 2147 __func__, igi->igi_ifp, igi->igi_ifp->if_xname); 2148 igi->igi_v2_timer = 0; 2149 } 2150 } 2151} 2152 2153/* 2154 * Global slowtimo handler. 2155 * VIMAGE: Timeout handlers are expected to service all vimages. 2156 */ 2157void 2158igmp_slowtimo(void) 2159{ 2160 VNET_ITERATOR_DECL(vnet_iter); 2161 2162 VNET_LIST_RLOCK_NOSLEEP(); 2163 VNET_FOREACH(vnet_iter) { 2164 CURVNET_SET(vnet_iter); 2165 igmp_slowtimo_vnet(); 2166 CURVNET_RESTORE(); 2167 } 2168 VNET_LIST_RUNLOCK_NOSLEEP(); 2169} 2170 2171/* 2172 * Per-vnet slowtimo handler. 2173 */ 2174static void 2175igmp_slowtimo_vnet(void) 2176{ 2177 struct igmp_ifinfo *igi; 2178 2179 IGMP_LOCK(); 2180 2181 LIST_FOREACH(igi, &V_igi_head, igi_link) { 2182 igmp_v1v2_process_querier_timers(igi); 2183 } 2184 2185 IGMP_UNLOCK(); 2186} 2187 2188/* 2189 * Dispatch an IGMPv1/v2 host report or leave message. 2190 * These are always small enough to fit inside a single mbuf. 2191 */ 2192static int 2193igmp_v1v2_queue_report(struct in_multi *inm, const int type) 2194{ 2195 struct ifnet *ifp; 2196 struct igmp *igmp; 2197 struct ip *ip; 2198 struct mbuf *m; 2199 2200 IN_MULTI_LOCK_ASSERT(); 2201 IGMP_LOCK_ASSERT(); 2202 2203 ifp = inm->inm_ifp; 2204 2205 MGETHDR(m, M_DONTWAIT, MT_DATA); 2206 if (m == NULL) 2207 return (ENOMEM); 2208 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp)); 2209 2210 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp); 2211 2212 m->m_data += sizeof(struct ip); 2213 m->m_len = sizeof(struct igmp); 2214 2215 igmp = mtod(m, struct igmp *); 2216 igmp->igmp_type = type; 2217 igmp->igmp_code = 0; 2218 igmp->igmp_group = inm->inm_addr; 2219 igmp->igmp_cksum = 0; 2220 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp)); 2221 2222 m->m_data -= sizeof(struct ip); 2223 m->m_len += sizeof(struct ip); 2224 2225 ip = mtod(m, struct ip *); 2226 ip->ip_tos = 0; 2227 ip->ip_len = sizeof(struct ip) + sizeof(struct igmp); 2228 ip->ip_off = 0; 2229 ip->ip_p = IPPROTO_IGMP; 2230 ip->ip_src.s_addr = INADDR_ANY; 2231 2232 if (type == IGMP_HOST_LEAVE_MESSAGE) 2233 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP); 2234 else 2235 ip->ip_dst = inm->inm_addr; 2236 2237 igmp_save_context(m, ifp); 2238 2239 m->m_flags |= M_IGMPV2; 2240 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK) 2241 m->m_flags |= M_IGMP_LOOP; 2242 2243 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m); 2244 netisr_dispatch(NETISR_IGMP, m); 2245 2246 return (0); 2247} 2248 2249/* 2250 * Process a state change from the upper layer for the given IPv4 group. 2251 * 2252 * Each socket holds a reference on the in_multi in its own ip_moptions. 2253 * The socket layer will have made the necessary updates to.the group 2254 * state, it is now up to IGMP to issue a state change report if there 2255 * has been any change between T0 (when the last state-change was issued) 2256 * and T1 (now). 2257 * 2258 * We use the IGMPv3 state machine at group level. The IGMP module 2259 * however makes the decision as to which IGMP protocol version to speak. 2260 * A state change *from* INCLUDE {} always means an initial join. 2261 * A state change *to* INCLUDE {} always means a final leave. 2262 * 2263 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can 2264 * save ourselves a bunch of work; any exclusive mode groups need not 2265 * compute source filter lists. 2266 * 2267 * VIMAGE: curvnet should have been set by caller, as this routine 2268 * is called from the socket option handlers. 2269 */ 2270int 2271igmp_change_state(struct in_multi *inm) 2272{ 2273 struct igmp_ifinfo *igi; 2274 struct ifnet *ifp; 2275 int error; 2276 2277 IN_MULTI_LOCK_ASSERT(); 2278 2279 error = 0; 2280 2281 /* 2282 * Try to detect if the upper layer just asked us to change state 2283 * for an interface which has now gone away. 2284 */ 2285 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 2286 ifp = inm->inm_ifma->ifma_ifp; 2287 /* 2288 * Sanity check that netinet's notion of ifp is the 2289 * same as net's. 2290 */ 2291 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 2292 2293 IGMP_LOCK(); 2294 2295 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp; 2296 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp)); 2297 2298 /* 2299 * If we detect a state transition to or from MCAST_UNDEFINED 2300 * for this group, then we are starting or finishing an IGMP 2301 * life cycle for this group. 2302 */ 2303 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) { 2304 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__, 2305 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode); 2306 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) { 2307 CTR1(KTR_IGMPV3, "%s: initial join", __func__); 2308 error = igmp_initial_join(inm, igi); 2309 goto out_locked; 2310 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) { 2311 CTR1(KTR_IGMPV3, "%s: final leave", __func__); 2312 igmp_final_leave(inm, igi); 2313 goto out_locked; 2314 } 2315 } else { 2316 CTR1(KTR_IGMPV3, "%s: filter set change", __func__); 2317 } 2318 2319 error = igmp_handle_state_change(inm, igi); 2320 2321out_locked: 2322 IGMP_UNLOCK(); 2323 return (error); 2324} 2325 2326/* 2327 * Perform the initial join for an IGMP group. 2328 * 2329 * When joining a group: 2330 * If the group should have its IGMP traffic suppressed, do nothing. 2331 * IGMPv1 starts sending IGMPv1 host membership reports. 2332 * IGMPv2 starts sending IGMPv2 host membership reports. 2333 * IGMPv3 will schedule an IGMPv3 state-change report containing the 2334 * initial state of the membership. 2335 */ 2336static int 2337igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi) 2338{ 2339 struct ifnet *ifp; 2340 struct ifqueue *ifq; 2341 int error, retval, syncstates; 2342 2343 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)", 2344 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2345 inm->inm_ifp->if_xname); 2346 2347 error = 0; 2348 syncstates = 1; 2349 2350 ifp = inm->inm_ifp; 2351 2352 IN_MULTI_LOCK_ASSERT(); 2353 IGMP_LOCK_ASSERT(); 2354 2355 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2356 2357 /* 2358 * Groups joined on loopback or marked as 'not reported', 2359 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and 2360 * are never reported in any IGMP protocol exchanges. 2361 * All other groups enter the appropriate IGMP state machine 2362 * for the version in use on this link. 2363 * A link marked as IGIF_SILENT causes IGMP to be completely 2364 * disabled for the link. 2365 */ 2366 if ((ifp->if_flags & IFF_LOOPBACK) || 2367 (igi->igi_flags & IGIF_SILENT) || 2368 !igmp_isgroupreported(inm->inm_addr)) { 2369 CTR1(KTR_IGMPV3, 2370"%s: not kicking state machine for silent group", __func__); 2371 inm->inm_state = IGMP_SILENT_MEMBER; 2372 inm->inm_timer = 0; 2373 } else { 2374 /* 2375 * Deal with overlapping in_multi lifecycle. 2376 * If this group was LEAVING, then make sure 2377 * we drop the reference we picked up to keep the 2378 * group around for the final INCLUDE {} enqueue. 2379 */ 2380 if (igi->igi_version == IGMP_VERSION_3 && 2381 inm->inm_state == IGMP_LEAVING_MEMBER) 2382 inm_release_locked(inm); 2383 2384 inm->inm_state = IGMP_REPORTING_MEMBER; 2385 2386 switch (igi->igi_version) { 2387 case IGMP_VERSION_1: 2388 case IGMP_VERSION_2: 2389 inm->inm_state = IGMP_IDLE_MEMBER; 2390 error = igmp_v1v2_queue_report(inm, 2391 (igi->igi_version == IGMP_VERSION_2) ? 2392 IGMP_v2_HOST_MEMBERSHIP_REPORT : 2393 IGMP_v1_HOST_MEMBERSHIP_REPORT); 2394 if (error == 0) { 2395 inm->inm_timer = IGMP_RANDOM_DELAY( 2396 IGMP_V1V2_MAX_RI * PR_FASTHZ); 2397 V_current_state_timers_running = 1; 2398 } 2399 break; 2400 2401 case IGMP_VERSION_3: 2402 /* 2403 * Defer update of T0 to T1, until the first copy 2404 * of the state change has been transmitted. 2405 */ 2406 syncstates = 0; 2407 2408 /* 2409 * Immediately enqueue a State-Change Report for 2410 * this interface, freeing any previous reports. 2411 * Don't kick the timers if there is nothing to do, 2412 * or if an error occurred. 2413 */ 2414 ifq = &inm->inm_scq; 2415 _IF_DRAIN(ifq); 2416 retval = igmp_v3_enqueue_group_record(ifq, inm, 1, 2417 0, 0); 2418 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 2419 __func__, retval); 2420 if (retval <= 0) { 2421 error = retval * -1; 2422 break; 2423 } 2424 2425 /* 2426 * Schedule transmission of pending state-change 2427 * report up to RV times for this link. The timer 2428 * will fire at the next igmp_fasttimo (~200ms), 2429 * giving us an opportunity to merge the reports. 2430 */ 2431 if (igi->igi_flags & IGIF_LOOPBACK) { 2432 inm->inm_scrv = 1; 2433 } else { 2434 KASSERT(igi->igi_rv > 1, 2435 ("%s: invalid robustness %d", __func__, 2436 igi->igi_rv)); 2437 inm->inm_scrv = igi->igi_rv; 2438 } 2439 inm->inm_sctimer = 1; 2440 V_state_change_timers_running = 1; 2441 2442 error = 0; 2443 break; 2444 } 2445 } 2446 2447 /* 2448 * Only update the T0 state if state change is atomic, 2449 * i.e. we don't need to wait for a timer to fire before we 2450 * can consider the state change to have been communicated. 2451 */ 2452 if (syncstates) { 2453 inm_commit(inm); 2454 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2455 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2456 } 2457 2458 return (error); 2459} 2460 2461/* 2462 * Issue an intermediate state change during the IGMP life-cycle. 2463 */ 2464static int 2465igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi) 2466{ 2467 struct ifnet *ifp; 2468 int retval; 2469 2470 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)", 2471 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2472 inm->inm_ifp->if_xname); 2473 2474 ifp = inm->inm_ifp; 2475 2476 IN_MULTI_LOCK_ASSERT(); 2477 IGMP_LOCK_ASSERT(); 2478 2479 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__)); 2480 2481 if ((ifp->if_flags & IFF_LOOPBACK) || 2482 (igi->igi_flags & IGIF_SILENT) || 2483 !igmp_isgroupreported(inm->inm_addr) || 2484 (igi->igi_version != IGMP_VERSION_3)) { 2485 if (!igmp_isgroupreported(inm->inm_addr)) { 2486 CTR1(KTR_IGMPV3, 2487"%s: not kicking state machine for silent group", __func__); 2488 } 2489 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__); 2490 inm_commit(inm); 2491 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2492 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2493 return (0); 2494 } 2495 2496 _IF_DRAIN(&inm->inm_scq); 2497 2498 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0); 2499 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval); 2500 if (retval <= 0) 2501 return (-retval); 2502 2503 /* 2504 * If record(s) were enqueued, start the state-change 2505 * report timer for this group. 2506 */ 2507 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv); 2508 inm->inm_sctimer = 1; 2509 V_state_change_timers_running = 1; 2510 2511 return (0); 2512} 2513 2514/* 2515 * Perform the final leave for an IGMP group. 2516 * 2517 * When leaving a group: 2518 * IGMPv1 does nothing. 2519 * IGMPv2 sends a host leave message, if and only if we are the reporter. 2520 * IGMPv3 enqueues a state-change report containing a transition 2521 * to INCLUDE {} for immediate transmission. 2522 */ 2523static void 2524igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi) 2525{ 2526 int syncstates; 2527 2528 syncstates = 1; 2529 2530 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)", 2531 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp, 2532 inm->inm_ifp->if_xname); 2533 2534 IN_MULTI_LOCK_ASSERT(); 2535 IGMP_LOCK_ASSERT(); 2536 2537 switch (inm->inm_state) { 2538 case IGMP_NOT_MEMBER: 2539 case IGMP_SILENT_MEMBER: 2540 case IGMP_LEAVING_MEMBER: 2541 /* Already leaving or left; do nothing. */ 2542 CTR1(KTR_IGMPV3, 2543"%s: not kicking state machine for silent group", __func__); 2544 break; 2545 case IGMP_REPORTING_MEMBER: 2546 case IGMP_IDLE_MEMBER: 2547 case IGMP_G_QUERY_PENDING_MEMBER: 2548 case IGMP_SG_QUERY_PENDING_MEMBER: 2549 if (igi->igi_version == IGMP_VERSION_2) { 2550#ifdef INVARIANTS 2551 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER || 2552 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) 2553 panic("%s: IGMPv3 state reached, not IGMPv3 mode", 2554 __func__); 2555#endif 2556 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE); 2557 inm->inm_state = IGMP_NOT_MEMBER; 2558 } else if (igi->igi_version == IGMP_VERSION_3) { 2559 /* 2560 * Stop group timer and all pending reports. 2561 * Immediately enqueue a state-change report 2562 * TO_IN {} to be sent on the next fast timeout, 2563 * giving us an opportunity to merge reports. 2564 */ 2565 _IF_DRAIN(&inm->inm_scq); 2566 inm->inm_timer = 0; 2567 if (igi->igi_flags & IGIF_LOOPBACK) { 2568 inm->inm_scrv = 1; 2569 } else { 2570 inm->inm_scrv = igi->igi_rv; 2571 } 2572 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d " 2573 "pending retransmissions.", __func__, 2574 inet_ntoa(inm->inm_addr), 2575 inm->inm_ifp->if_xname, inm->inm_scrv); 2576 if (inm->inm_scrv == 0) { 2577 inm->inm_state = IGMP_NOT_MEMBER; 2578 inm->inm_sctimer = 0; 2579 } else { 2580 int retval; 2581 2582 inm_acquire_locked(inm); 2583 2584 retval = igmp_v3_enqueue_group_record( 2585 &inm->inm_scq, inm, 1, 0, 0); 2586 KASSERT(retval != 0, 2587 ("%s: enqueue record = %d", __func__, 2588 retval)); 2589 2590 inm->inm_state = IGMP_LEAVING_MEMBER; 2591 inm->inm_sctimer = 1; 2592 V_state_change_timers_running = 1; 2593 syncstates = 0; 2594 } 2595 break; 2596 } 2597 break; 2598 case IGMP_LAZY_MEMBER: 2599 case IGMP_SLEEPING_MEMBER: 2600 case IGMP_AWAKENING_MEMBER: 2601 /* Our reports are suppressed; do nothing. */ 2602 break; 2603 } 2604 2605 if (syncstates) { 2606 inm_commit(inm); 2607 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__, 2608 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2609 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 2610 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s", 2611 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname); 2612 } 2613} 2614 2615/* 2616 * Enqueue an IGMPv3 group record to the given output queue. 2617 * 2618 * XXX This function could do with having the allocation code 2619 * split out, and the multiple-tree-walks coalesced into a single 2620 * routine as has been done in igmp_v3_enqueue_filter_change(). 2621 * 2622 * If is_state_change is zero, a current-state record is appended. 2623 * If is_state_change is non-zero, a state-change report is appended. 2624 * 2625 * If is_group_query is non-zero, an mbuf packet chain is allocated. 2626 * If is_group_query is zero, and if there is a packet with free space 2627 * at the tail of the queue, it will be appended to providing there 2628 * is enough free space. 2629 * Otherwise a new mbuf packet chain is allocated. 2630 * 2631 * If is_source_query is non-zero, each source is checked to see if 2632 * it was recorded for a Group-Source query, and will be omitted if 2633 * it is not both in-mode and recorded. 2634 * 2635 * The function will attempt to allocate leading space in the packet 2636 * for the IP/IGMP header to be prepended without fragmenting the chain. 2637 * 2638 * If successful the size of all data appended to the queue is returned, 2639 * otherwise an error code less than zero is returned, or zero if 2640 * no record(s) were appended. 2641 */ 2642static int 2643igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm, 2644 const int is_state_change, const int is_group_query, 2645 const int is_source_query) 2646{ 2647 struct igmp_grouprec ig; 2648 struct igmp_grouprec *pig; 2649 struct ifnet *ifp; 2650 struct ip_msource *ims, *nims; 2651 struct mbuf *m0, *m, *md; 2652 int error, is_filter_list_change; 2653 int minrec0len, m0srcs, msrcs, nbytes, off; 2654 int record_has_sources; 2655 int now; 2656 int type; 2657 in_addr_t naddr; 2658 uint8_t mode; 2659 2660 IN_MULTI_LOCK_ASSERT(); 2661 2662 error = 0; 2663 ifp = inm->inm_ifp; 2664 is_filter_list_change = 0; 2665 m = NULL; 2666 m0 = NULL; 2667 m0srcs = 0; 2668 msrcs = 0; 2669 nbytes = 0; 2670 nims = NULL; 2671 record_has_sources = 1; 2672 pig = NULL; 2673 type = IGMP_DO_NOTHING; 2674 mode = inm->inm_st[1].iss_fmode; 2675 2676 /* 2677 * If we did not transition out of ASM mode during t0->t1, 2678 * and there are no source nodes to process, we can skip 2679 * the generation of source records. 2680 */ 2681 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 && 2682 inm->inm_nsrc == 0) 2683 record_has_sources = 0; 2684 2685 if (is_state_change) { 2686 /* 2687 * Queue a state change record. 2688 * If the mode did not change, and there are non-ASM 2689 * listeners or source filters present, 2690 * we potentially need to issue two records for the group. 2691 * If we are transitioning to MCAST_UNDEFINED, we need 2692 * not send any sources. 2693 * If there are ASM listeners, and there was no filter 2694 * mode transition of any kind, do nothing. 2695 */ 2696 if (mode != inm->inm_st[0].iss_fmode) { 2697 if (mode == MCAST_EXCLUDE) { 2698 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE", 2699 __func__); 2700 type = IGMP_CHANGE_TO_EXCLUDE_MODE; 2701 } else { 2702 CTR1(KTR_IGMPV3, "%s: change to INCLUDE", 2703 __func__); 2704 type = IGMP_CHANGE_TO_INCLUDE_MODE; 2705 if (mode == MCAST_UNDEFINED) 2706 record_has_sources = 0; 2707 } 2708 } else { 2709 if (record_has_sources) { 2710 is_filter_list_change = 1; 2711 } else { 2712 type = IGMP_DO_NOTHING; 2713 } 2714 } 2715 } else { 2716 /* 2717 * Queue a current state record. 2718 */ 2719 if (mode == MCAST_EXCLUDE) { 2720 type = IGMP_MODE_IS_EXCLUDE; 2721 } else if (mode == MCAST_INCLUDE) { 2722 type = IGMP_MODE_IS_INCLUDE; 2723 KASSERT(inm->inm_st[1].iss_asm == 0, 2724 ("%s: inm %p is INCLUDE but ASM count is %d", 2725 __func__, inm, inm->inm_st[1].iss_asm)); 2726 } 2727 } 2728 2729 /* 2730 * Generate the filter list changes using a separate function. 2731 */ 2732 if (is_filter_list_change) 2733 return (igmp_v3_enqueue_filter_change(ifq, inm)); 2734 2735 if (type == IGMP_DO_NOTHING) { 2736 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s", 2737 __func__, inet_ntoa(inm->inm_addr), 2738 inm->inm_ifp->if_xname); 2739 return (0); 2740 } 2741 2742 /* 2743 * If any sources are present, we must be able to fit at least 2744 * one in the trailing space of the tail packet's mbuf, 2745 * ideally more. 2746 */ 2747 minrec0len = sizeof(struct igmp_grouprec); 2748 if (record_has_sources) 2749 minrec0len += sizeof(in_addr_t); 2750 2751 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__, 2752 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr), 2753 inm->inm_ifp->if_xname); 2754 2755 /* 2756 * Check if we have a packet in the tail of the queue for this 2757 * group into which the first group record for this group will fit. 2758 * Otherwise allocate a new packet. 2759 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT. 2760 * Note: Group records for G/GSR query responses MUST be sent 2761 * in their own packet. 2762 */ 2763 m0 = ifq->ifq_tail; 2764 if (!is_group_query && 2765 m0 != NULL && 2766 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) && 2767 (m0->m_pkthdr.len + minrec0len) < 2768 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 2769 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 2770 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2771 m = m0; 2772 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__); 2773 } else { 2774 if (_IF_QFULL(ifq)) { 2775 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2776 return (-ENOMEM); 2777 } 2778 m = NULL; 2779 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2780 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2781 if (!is_state_change && !is_group_query) { 2782 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2783 if (m) 2784 m->m_data += IGMP_LEADINGSPACE; 2785 } 2786 if (m == NULL) { 2787 m = m_gethdr(M_DONTWAIT, MT_DATA); 2788 if (m) 2789 MH_ALIGN(m, IGMP_LEADINGSPACE); 2790 } 2791 if (m == NULL) 2792 return (-ENOMEM); 2793 2794 igmp_save_context(m, ifp); 2795 2796 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__); 2797 } 2798 2799 /* 2800 * Append group record. 2801 * If we have sources, we don't know how many yet. 2802 */ 2803 ig.ig_type = type; 2804 ig.ig_datalen = 0; 2805 ig.ig_numsrc = 0; 2806 ig.ig_group = inm->inm_addr; 2807 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2808 if (m != m0) 2809 m_freem(m); 2810 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2811 return (-ENOMEM); 2812 } 2813 nbytes += sizeof(struct igmp_grouprec); 2814 2815 /* 2816 * Append as many sources as will fit in the first packet. 2817 * If we are appending to a new packet, the chain allocation 2818 * may potentially use clusters; use m_getptr() in this case. 2819 * If we are appending to an existing packet, we need to obtain 2820 * a pointer to the group record after m_append(), in case a new 2821 * mbuf was allocated. 2822 * Only append sources which are in-mode at t1. If we are 2823 * transitioning to MCAST_UNDEFINED state on the group, do not 2824 * include source entries. 2825 * Only report recorded sources in our filter set when responding 2826 * to a group-source query. 2827 */ 2828 if (record_has_sources) { 2829 if (m == m0) { 2830 md = m_last(m); 2831 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2832 md->m_len - nbytes); 2833 } else { 2834 md = m_getptr(m, 0, &off); 2835 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + 2836 off); 2837 } 2838 msrcs = 0; 2839 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) { 2840 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2841 inet_ntoa_haddr(ims->ims_haddr)); 2842 now = ims_get_mode(inm, ims, 1); 2843 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now); 2844 if ((now != mode) || 2845 (now == mode && mode == MCAST_UNDEFINED)) { 2846 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2847 continue; 2848 } 2849 if (is_source_query && ims->ims_stp == 0) { 2850 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2851 __func__); 2852 continue; 2853 } 2854 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2855 naddr = htonl(ims->ims_haddr); 2856 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2857 if (m != m0) 2858 m_freem(m); 2859 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2860 __func__); 2861 return (-ENOMEM); 2862 } 2863 nbytes += sizeof(in_addr_t); 2864 ++msrcs; 2865 if (msrcs == m0srcs) 2866 break; 2867 } 2868 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__, 2869 msrcs); 2870 pig->ig_numsrc = htons(msrcs); 2871 nbytes += (msrcs * sizeof(in_addr_t)); 2872 } 2873 2874 if (is_source_query && msrcs == 0) { 2875 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__); 2876 if (m != m0) 2877 m_freem(m); 2878 return (0); 2879 } 2880 2881 /* 2882 * We are good to go with first packet. 2883 */ 2884 if (m != m0) { 2885 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__); 2886 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2887 _IF_ENQUEUE(ifq, m); 2888 } else 2889 m->m_pkthdr.PH_vt.vt_nrecs++; 2890 2891 /* 2892 * No further work needed if no source list in packet(s). 2893 */ 2894 if (!record_has_sources) 2895 return (nbytes); 2896 2897 /* 2898 * Whilst sources remain to be announced, we need to allocate 2899 * a new packet and fill out as many sources as will fit. 2900 * Always try for a cluster first. 2901 */ 2902 while (nims != NULL) { 2903 if (_IF_QFULL(ifq)) { 2904 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__); 2905 return (-ENOMEM); 2906 } 2907 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 2908 if (m) 2909 m->m_data += IGMP_LEADINGSPACE; 2910 if (m == NULL) { 2911 m = m_gethdr(M_DONTWAIT, MT_DATA); 2912 if (m) 2913 MH_ALIGN(m, IGMP_LEADINGSPACE); 2914 } 2915 if (m == NULL) 2916 return (-ENOMEM); 2917 igmp_save_context(m, ifp); 2918 md = m_getptr(m, 0, &off); 2919 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off); 2920 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__); 2921 2922 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) { 2923 if (m != m0) 2924 m_freem(m); 2925 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__); 2926 return (-ENOMEM); 2927 } 2928 m->m_pkthdr.PH_vt.vt_nrecs = 1; 2929 nbytes += sizeof(struct igmp_grouprec); 2930 2931 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 2932 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t); 2933 2934 msrcs = 0; 2935 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 2936 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2937 inet_ntoa_haddr(ims->ims_haddr)); 2938 now = ims_get_mode(inm, ims, 1); 2939 if ((now != mode) || 2940 (now == mode && mode == MCAST_UNDEFINED)) { 2941 CTR1(KTR_IGMPV3, "%s: skip node", __func__); 2942 continue; 2943 } 2944 if (is_source_query && ims->ims_stp == 0) { 2945 CTR1(KTR_IGMPV3, "%s: skip unrecorded node", 2946 __func__); 2947 continue; 2948 } 2949 CTR1(KTR_IGMPV3, "%s: append node", __func__); 2950 naddr = htonl(ims->ims_haddr); 2951 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) { 2952 if (m != m0) 2953 m_freem(m); 2954 CTR1(KTR_IGMPV3, "%s: m_append() failed.", 2955 __func__); 2956 return (-ENOMEM); 2957 } 2958 ++msrcs; 2959 if (msrcs == m0srcs) 2960 break; 2961 } 2962 pig->ig_numsrc = htons(msrcs); 2963 nbytes += (msrcs * sizeof(in_addr_t)); 2964 2965 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__); 2966 _IF_ENQUEUE(ifq, m); 2967 } 2968 2969 return (nbytes); 2970} 2971 2972/* 2973 * Type used to mark record pass completion. 2974 * We exploit the fact we can cast to this easily from the 2975 * current filter modes on each ip_msource node. 2976 */ 2977typedef enum { 2978 REC_NONE = 0x00, /* MCAST_UNDEFINED */ 2979 REC_ALLOW = 0x01, /* MCAST_INCLUDE */ 2980 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */ 2981 REC_FULL = REC_ALLOW | REC_BLOCK 2982} rectype_t; 2983 2984/* 2985 * Enqueue an IGMPv3 filter list change to the given output queue. 2986 * 2987 * Source list filter state is held in an RB-tree. When the filter list 2988 * for a group is changed without changing its mode, we need to compute 2989 * the deltas between T0 and T1 for each source in the filter set, 2990 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records. 2991 * 2992 * As we may potentially queue two record types, and the entire R-B tree 2993 * needs to be walked at once, we break this out into its own function 2994 * so we can generate a tightly packed queue of packets. 2995 * 2996 * XXX This could be written to only use one tree walk, although that makes 2997 * serializing into the mbuf chains a bit harder. For now we do two walks 2998 * which makes things easier on us, and it may or may not be harder on 2999 * the L2 cache. 3000 * 3001 * If successful the size of all data appended to the queue is returned, 3002 * otherwise an error code less than zero is returned, or zero if 3003 * no record(s) were appended. 3004 */ 3005static int 3006igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm) 3007{ 3008 static const int MINRECLEN = 3009 sizeof(struct igmp_grouprec) + sizeof(in_addr_t); 3010 struct ifnet *ifp; 3011 struct igmp_grouprec ig; 3012 struct igmp_grouprec *pig; 3013 struct ip_msource *ims, *nims; 3014 struct mbuf *m, *m0, *md; 3015 in_addr_t naddr; 3016 int m0srcs, nbytes, npbytes, off, rsrcs, schanged; 3017 int nallow, nblock; 3018 uint8_t mode, now, then; 3019 rectype_t crt, drt, nrt; 3020 3021 IN_MULTI_LOCK_ASSERT(); 3022 3023 if (inm->inm_nsrc == 0 || 3024 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0)) 3025 return (0); 3026 3027 ifp = inm->inm_ifp; /* interface */ 3028 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */ 3029 crt = REC_NONE; /* current group record type */ 3030 drt = REC_NONE; /* mask of completed group record types */ 3031 nrt = REC_NONE; /* record type for current node */ 3032 m0srcs = 0; /* # source which will fit in current mbuf chain */ 3033 nbytes = 0; /* # of bytes appended to group's state-change queue */ 3034 npbytes = 0; /* # of bytes appended this packet */ 3035 rsrcs = 0; /* # sources encoded in current record */ 3036 schanged = 0; /* # nodes encoded in overall filter change */ 3037 nallow = 0; /* # of source entries in ALLOW_NEW */ 3038 nblock = 0; /* # of source entries in BLOCK_OLD */ 3039 nims = NULL; /* next tree node pointer */ 3040 3041 /* 3042 * For each possible filter record mode. 3043 * The first kind of source we encounter tells us which 3044 * is the first kind of record we start appending. 3045 * If a node transitioned to UNDEFINED at t1, its mode is treated 3046 * as the inverse of the group's filter mode. 3047 */ 3048 while (drt != REC_FULL) { 3049 do { 3050 m0 = ifq->ifq_tail; 3051 if (m0 != NULL && 3052 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= 3053 IGMP_V3_REPORT_MAXRECS) && 3054 (m0->m_pkthdr.len + MINRECLEN) < 3055 (ifp->if_mtu - IGMP_LEADINGSPACE)) { 3056 m = m0; 3057 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len - 3058 sizeof(struct igmp_grouprec)) / 3059 sizeof(in_addr_t); 3060 CTR1(KTR_IGMPV3, 3061 "%s: use previous packet", __func__); 3062 } else { 3063 m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR); 3064 if (m) 3065 m->m_data += IGMP_LEADINGSPACE; 3066 if (m == NULL) { 3067 m = m_gethdr(M_DONTWAIT, MT_DATA); 3068 if (m) 3069 MH_ALIGN(m, IGMP_LEADINGSPACE); 3070 } 3071 if (m == NULL) { 3072 CTR1(KTR_IGMPV3, 3073 "%s: m_get*() failed", __func__); 3074 return (-ENOMEM); 3075 } 3076 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3077 igmp_save_context(m, ifp); 3078 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE - 3079 sizeof(struct igmp_grouprec)) / 3080 sizeof(in_addr_t); 3081 npbytes = 0; 3082 CTR1(KTR_IGMPV3, 3083 "%s: allocated new packet", __func__); 3084 } 3085 /* 3086 * Append the IGMP group record header to the 3087 * current packet's data area. 3088 * Recalculate pointer to free space for next 3089 * group record, in case m_append() allocated 3090 * a new mbuf or cluster. 3091 */ 3092 memset(&ig, 0, sizeof(ig)); 3093 ig.ig_group = inm->inm_addr; 3094 if (!m_append(m, sizeof(ig), (void *)&ig)) { 3095 if (m != m0) 3096 m_freem(m); 3097 CTR1(KTR_IGMPV3, 3098 "%s: m_append() failed", __func__); 3099 return (-ENOMEM); 3100 } 3101 npbytes += sizeof(struct igmp_grouprec); 3102 if (m != m0) { 3103 /* new packet; offset in c hain */ 3104 md = m_getptr(m, npbytes - 3105 sizeof(struct igmp_grouprec), &off); 3106 pig = (struct igmp_grouprec *)(mtod(md, 3107 uint8_t *) + off); 3108 } else { 3109 /* current packet; offset from last append */ 3110 md = m_last(m); 3111 pig = (struct igmp_grouprec *)(mtod(md, 3112 uint8_t *) + md->m_len - 3113 sizeof(struct igmp_grouprec)); 3114 } 3115 /* 3116 * Begin walking the tree for this record type 3117 * pass, or continue from where we left off 3118 * previously if we had to allocate a new packet. 3119 * Only report deltas in-mode at t1. 3120 * We need not report included sources as allowed 3121 * if we are in inclusive mode on the group, 3122 * however the converse is not true. 3123 */ 3124 rsrcs = 0; 3125 if (nims == NULL) 3126 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs); 3127 RB_FOREACH_FROM(ims, ip_msource_tree, nims) { 3128 CTR2(KTR_IGMPV3, "%s: visit node %s", 3129 __func__, inet_ntoa_haddr(ims->ims_haddr)); 3130 now = ims_get_mode(inm, ims, 1); 3131 then = ims_get_mode(inm, ims, 0); 3132 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d", 3133 __func__, then, now); 3134 if (now == then) { 3135 CTR1(KTR_IGMPV3, 3136 "%s: skip unchanged", __func__); 3137 continue; 3138 } 3139 if (mode == MCAST_EXCLUDE && 3140 now == MCAST_INCLUDE) { 3141 CTR1(KTR_IGMPV3, 3142 "%s: skip IN src on EX group", 3143 __func__); 3144 continue; 3145 } 3146 nrt = (rectype_t)now; 3147 if (nrt == REC_NONE) 3148 nrt = (rectype_t)(~mode & REC_FULL); 3149 if (schanged++ == 0) { 3150 crt = nrt; 3151 } else if (crt != nrt) 3152 continue; 3153 naddr = htonl(ims->ims_haddr); 3154 if (!m_append(m, sizeof(in_addr_t), 3155 (void *)&naddr)) { 3156 if (m != m0) 3157 m_freem(m); 3158 CTR1(KTR_IGMPV3, 3159 "%s: m_append() failed", __func__); 3160 return (-ENOMEM); 3161 } 3162 nallow += !!(crt == REC_ALLOW); 3163 nblock += !!(crt == REC_BLOCK); 3164 if (++rsrcs == m0srcs) 3165 break; 3166 } 3167 /* 3168 * If we did not append any tree nodes on this 3169 * pass, back out of allocations. 3170 */ 3171 if (rsrcs == 0) { 3172 npbytes -= sizeof(struct igmp_grouprec); 3173 if (m != m0) { 3174 CTR1(KTR_IGMPV3, 3175 "%s: m_free(m)", __func__); 3176 m_freem(m); 3177 } else { 3178 CTR1(KTR_IGMPV3, 3179 "%s: m_adj(m, -ig)", __func__); 3180 m_adj(m, -((int)sizeof( 3181 struct igmp_grouprec))); 3182 } 3183 continue; 3184 } 3185 npbytes += (rsrcs * sizeof(in_addr_t)); 3186 if (crt == REC_ALLOW) 3187 pig->ig_type = IGMP_ALLOW_NEW_SOURCES; 3188 else if (crt == REC_BLOCK) 3189 pig->ig_type = IGMP_BLOCK_OLD_SOURCES; 3190 pig->ig_numsrc = htons(rsrcs); 3191 /* 3192 * Count the new group record, and enqueue this 3193 * packet if it wasn't already queued. 3194 */ 3195 m->m_pkthdr.PH_vt.vt_nrecs++; 3196 if (m != m0) 3197 _IF_ENQUEUE(ifq, m); 3198 nbytes += npbytes; 3199 } while (nims != NULL); 3200 drt |= crt; 3201 crt = (~crt & REC_FULL); 3202 } 3203 3204 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__, 3205 nallow, nblock); 3206 3207 return (nbytes); 3208} 3209 3210static int 3211igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq) 3212{ 3213 struct ifqueue *gq; 3214 struct mbuf *m; /* pending state-change */ 3215 struct mbuf *m0; /* copy of pending state-change */ 3216 struct mbuf *mt; /* last state-change in packet */ 3217 int docopy, domerge; 3218 u_int recslen; 3219 3220 docopy = 0; 3221 domerge = 0; 3222 recslen = 0; 3223 3224 IN_MULTI_LOCK_ASSERT(); 3225 IGMP_LOCK_ASSERT(); 3226 3227 /* 3228 * If there are further pending retransmissions, make a writable 3229 * copy of each queued state-change message before merging. 3230 */ 3231 if (inm->inm_scrv > 0) 3232 docopy = 1; 3233 3234 gq = &inm->inm_scq; 3235#ifdef KTR 3236 if (gq->ifq_head == NULL) { 3237 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty", 3238 __func__, inm); 3239 } 3240#endif 3241 3242 m = gq->ifq_head; 3243 while (m != NULL) { 3244 /* 3245 * Only merge the report into the current packet if 3246 * there is sufficient space to do so; an IGMPv3 report 3247 * packet may only contain 65,535 group records. 3248 * Always use a simple mbuf chain concatentation to do this, 3249 * as large state changes for single groups may have 3250 * allocated clusters. 3251 */ 3252 domerge = 0; 3253 mt = ifscq->ifq_tail; 3254 if (mt != NULL) { 3255 recslen = m_length(m, NULL); 3256 3257 if ((mt->m_pkthdr.PH_vt.vt_nrecs + 3258 m->m_pkthdr.PH_vt.vt_nrecs <= 3259 IGMP_V3_REPORT_MAXRECS) && 3260 (mt->m_pkthdr.len + recslen <= 3261 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE))) 3262 domerge = 1; 3263 } 3264 3265 if (!domerge && _IF_QFULL(gq)) { 3266 CTR2(KTR_IGMPV3, 3267 "%s: outbound queue full, skipping whole packet %p", 3268 __func__, m); 3269 mt = m->m_nextpkt; 3270 if (!docopy) 3271 m_freem(m); 3272 m = mt; 3273 continue; 3274 } 3275 3276 if (!docopy) { 3277 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m); 3278 _IF_DEQUEUE(gq, m0); 3279 m = m0->m_nextpkt; 3280 } else { 3281 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m); 3282 m0 = m_dup(m, M_NOWAIT); 3283 if (m0 == NULL) 3284 return (ENOMEM); 3285 m0->m_nextpkt = NULL; 3286 m = m->m_nextpkt; 3287 } 3288 3289 if (!domerge) { 3290 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)", 3291 __func__, m0, ifscq); 3292 _IF_ENQUEUE(ifscq, m0); 3293 } else { 3294 struct mbuf *mtl; /* last mbuf of packet mt */ 3295 3296 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)", 3297 __func__, m0, mt); 3298 3299 mtl = m_last(mt); 3300 m0->m_flags &= ~M_PKTHDR; 3301 mt->m_pkthdr.len += recslen; 3302 mt->m_pkthdr.PH_vt.vt_nrecs += 3303 m0->m_pkthdr.PH_vt.vt_nrecs; 3304 3305 mtl->m_next = m0; 3306 } 3307 } 3308 3309 return (0); 3310} 3311 3312/* 3313 * Respond to a pending IGMPv3 General Query. 3314 */ 3315static void 3316igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi) 3317{ 3318 struct ifmultiaddr *ifma; 3319 struct ifnet *ifp; 3320 struct in_multi *inm; 3321 int retval, loop; 3322 3323 IN_MULTI_LOCK_ASSERT(); 3324 IGMP_LOCK_ASSERT(); 3325 3326 KASSERT(igi->igi_version == IGMP_VERSION_3, 3327 ("%s: called when version %d", __func__, igi->igi_version)); 3328 3329 ifp = igi->igi_ifp; 3330 3331 IF_ADDR_RLOCK(ifp); 3332 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 3333 if (ifma->ifma_addr->sa_family != AF_INET || 3334 ifma->ifma_protospec == NULL) 3335 continue; 3336 3337 inm = (struct in_multi *)ifma->ifma_protospec; 3338 KASSERT(ifp == inm->inm_ifp, 3339 ("%s: inconsistent ifp", __func__)); 3340 3341 switch (inm->inm_state) { 3342 case IGMP_NOT_MEMBER: 3343 case IGMP_SILENT_MEMBER: 3344 break; 3345 case IGMP_REPORTING_MEMBER: 3346 case IGMP_IDLE_MEMBER: 3347 case IGMP_LAZY_MEMBER: 3348 case IGMP_SLEEPING_MEMBER: 3349 case IGMP_AWAKENING_MEMBER: 3350 inm->inm_state = IGMP_REPORTING_MEMBER; 3351 retval = igmp_v3_enqueue_group_record(&igi->igi_gq, 3352 inm, 0, 0, 0); 3353 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", 3354 __func__, retval); 3355 break; 3356 case IGMP_G_QUERY_PENDING_MEMBER: 3357 case IGMP_SG_QUERY_PENDING_MEMBER: 3358 case IGMP_LEAVING_MEMBER: 3359 break; 3360 } 3361 } 3362 IF_ADDR_RUNLOCK(ifp); 3363 3364 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0; 3365 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop); 3366 3367 /* 3368 * Slew transmission of bursts over 500ms intervals. 3369 */ 3370 if (igi->igi_gq.ifq_head != NULL) { 3371 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY( 3372 IGMP_RESPONSE_BURST_INTERVAL); 3373 V_interface_timers_running = 1; 3374 } 3375} 3376 3377/* 3378 * Transmit the next pending IGMP message in the output queue. 3379 * 3380 * We get called from netisr_processqueue(). A mutex private to igmpoq 3381 * will be acquired and released around this routine. 3382 * 3383 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis. 3384 * MRT: Nothing needs to be done, as IGMP traffic is always local to 3385 * a link and uses a link-scope multicast address. 3386 */ 3387static void 3388igmp_intr(struct mbuf *m) 3389{ 3390 struct ip_moptions imo; 3391 struct ifnet *ifp; 3392 struct mbuf *ipopts, *m0; 3393 int error; 3394 uint32_t ifindex; 3395 3396 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m); 3397 3398 /* 3399 * Set VNET image pointer from enqueued mbuf chain 3400 * before doing anything else. Whilst we use interface 3401 * indexes to guard against interface detach, they are 3402 * unique to each VIMAGE and must be retrieved. 3403 */ 3404 CURVNET_SET((struct vnet *)(m->m_pkthdr.header)); 3405 ifindex = igmp_restore_context(m); 3406 3407 /* 3408 * Check if the ifnet still exists. This limits the scope of 3409 * any race in the absence of a global ifp lock for low cost 3410 * (an array lookup). 3411 */ 3412 ifp = ifnet_byindex(ifindex); 3413 if (ifp == NULL) { 3414 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.", 3415 __func__, m, ifindex); 3416 m_freem(m); 3417 IPSTAT_INC(ips_noroute); 3418 goto out; 3419 } 3420 3421 ipopts = V_igmp_sendra ? m_raopt : NULL; 3422 3423 imo.imo_multicast_ttl = 1; 3424 imo.imo_multicast_vif = -1; 3425 imo.imo_multicast_loop = (V_ip_mrouter != NULL); 3426 3427 /* 3428 * If the user requested that IGMP traffic be explicitly 3429 * redirected to the loopback interface (e.g. they are running a 3430 * MANET interface and the routing protocol needs to see the 3431 * updates), handle this now. 3432 */ 3433 if (m->m_flags & M_IGMP_LOOP) 3434 imo.imo_multicast_ifp = V_loif; 3435 else 3436 imo.imo_multicast_ifp = ifp; 3437 3438 if (m->m_flags & M_IGMPV2) { 3439 m0 = m; 3440 } else { 3441 m0 = igmp_v3_encap_report(ifp, m); 3442 if (m0 == NULL) { 3443 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m); 3444 m_freem(m); 3445 IPSTAT_INC(ips_odropped); 3446 goto out; 3447 } 3448 } 3449 3450 igmp_scrub_context(m0); 3451 m->m_flags &= ~(M_PROTOFLAGS); 3452 m0->m_pkthdr.rcvif = V_loif; 3453#ifdef MAC 3454 mac_netinet_igmp_send(ifp, m0); 3455#endif 3456 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL); 3457 if (error) { 3458 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error); 3459 goto out; 3460 } 3461 3462 IGMPSTAT_INC(igps_snd_reports); 3463 3464out: 3465 /* 3466 * We must restore the existing vnet pointer before 3467 * continuing as we are run from netisr context. 3468 */ 3469 CURVNET_RESTORE(); 3470} 3471 3472/* 3473 * Encapsulate an IGMPv3 report. 3474 * 3475 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf 3476 * chain has already had its IP/IGMPv3 header prepended. In this case 3477 * the function will not attempt to prepend; the lengths and checksums 3478 * will however be re-computed. 3479 * 3480 * Returns a pointer to the new mbuf chain head, or NULL if the 3481 * allocation failed. 3482 */ 3483static struct mbuf * 3484igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m) 3485{ 3486 struct igmp_report *igmp; 3487 struct ip *ip; 3488 int hdrlen, igmpreclen; 3489 3490 KASSERT((m->m_flags & M_PKTHDR), 3491 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m)); 3492 3493 igmpreclen = m_length(m, NULL); 3494 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report); 3495 3496 if (m->m_flags & M_IGMPV3_HDR) { 3497 igmpreclen -= hdrlen; 3498 } else { 3499 M_PREPEND(m, hdrlen, M_DONTWAIT); 3500 if (m == NULL) 3501 return (NULL); 3502 m->m_flags |= M_IGMPV3_HDR; 3503 } 3504 3505 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen); 3506 3507 m->m_data += sizeof(struct ip); 3508 m->m_len -= sizeof(struct ip); 3509 3510 igmp = mtod(m, struct igmp_report *); 3511 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT; 3512 igmp->ir_rsv1 = 0; 3513 igmp->ir_rsv2 = 0; 3514 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs); 3515 igmp->ir_cksum = 0; 3516 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen); 3517 m->m_pkthdr.PH_vt.vt_nrecs = 0; 3518 3519 m->m_data -= sizeof(struct ip); 3520 m->m_len += sizeof(struct ip); 3521 3522 ip = mtod(m, struct ip *); 3523 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL; 3524 ip->ip_len = hdrlen + igmpreclen; 3525 ip->ip_off = IP_DF; 3526 ip->ip_p = IPPROTO_IGMP; 3527 ip->ip_sum = 0; 3528 3529 ip->ip_src.s_addr = INADDR_ANY; 3530 3531 if (m->m_flags & M_IGMP_LOOP) { 3532 struct in_ifaddr *ia; 3533 3534 IFP_TO_IA(ifp, ia); 3535 if (ia != NULL) { 3536 ip->ip_src = ia->ia_addr.sin_addr; 3537 ifa_free(&ia->ia_ifa); 3538 } 3539 } 3540 3541 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP); 3542 3543 return (m); 3544} 3545 3546#ifdef KTR 3547static char * 3548igmp_rec_type_to_str(const int type) 3549{ 3550 3551 switch (type) { 3552 case IGMP_CHANGE_TO_EXCLUDE_MODE: 3553 return "TO_EX"; 3554 break; 3555 case IGMP_CHANGE_TO_INCLUDE_MODE: 3556 return "TO_IN"; 3557 break; 3558 case IGMP_MODE_IS_EXCLUDE: 3559 return "MODE_EX"; 3560 break; 3561 case IGMP_MODE_IS_INCLUDE: 3562 return "MODE_IN"; 3563 break; 3564 case IGMP_ALLOW_NEW_SOURCES: 3565 return "ALLOW_NEW"; 3566 break; 3567 case IGMP_BLOCK_OLD_SOURCES: 3568 return "BLOCK_OLD"; 3569 break; 3570 default: 3571 break; 3572 } 3573 return "unknown"; 3574} 3575#endif 3576 3577static void 3578igmp_init(void *unused __unused) 3579{ 3580 3581 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3582 3583 IGMP_LOCK_INIT(); 3584 3585 m_raopt = igmp_ra_alloc(); 3586 3587 netisr_register(&igmp_nh); 3588} 3589SYSINIT(igmp_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_init, NULL); 3590 3591static void 3592igmp_uninit(void *unused __unused) 3593{ 3594 3595 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3596 3597 netisr_unregister(&igmp_nh); 3598 3599 m_free(m_raopt); 3600 m_raopt = NULL; 3601 3602 IGMP_LOCK_DESTROY(); 3603} 3604SYSUNINIT(igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_uninit, NULL); 3605 3606static void 3607vnet_igmp_init(const void *unused __unused) 3608{ 3609 3610 CTR1(KTR_IGMPV3, "%s: initializing", __func__); 3611 3612 LIST_INIT(&V_igi_head); 3613} 3614VNET_SYSINIT(vnet_igmp_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_igmp_init, 3615 NULL); 3616 3617static void 3618vnet_igmp_uninit(const void *unused __unused) 3619{ 3620 3621 CTR1(KTR_IGMPV3, "%s: tearing down", __func__); 3622 3623 KASSERT(LIST_EMPTY(&V_igi_head), 3624 ("%s: igi list not empty; ifnets not detached?", __func__)); 3625} 3626VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, 3627 vnet_igmp_uninit, NULL); 3628 3629static int 3630igmp_modevent(module_t mod, int type, void *unused __unused) 3631{ 3632 3633 switch (type) { 3634 case MOD_LOAD: 3635 case MOD_UNLOAD: 3636 break; 3637 default: 3638 return (EOPNOTSUPP); 3639 } 3640 return (0); 3641} 3642 3643static moduledata_t igmp_mod = { 3644 "igmp", 3645 igmp_modevent, 3646 0 3647}; 3648DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY); 3649