Deleted Added
sdiff udiff text old ( 257176 ) new ( 269699 )
full compact
1/*-
2 * Copyright (c) 2007-2009 Bruce Simpson.
3 * Copyright (c) 1988 Stephen Deering.
4 * Copyright (c) 1992, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Stephen Deering of Stanford University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)igmp.c 8.1 (Berkeley) 7/19/93
35 */
36
37/*
38 * Internet Group Management Protocol (IGMP) routines.
39 * [RFC1112, RFC2236, RFC3376]
40 *
41 * Written by Steve Deering, Stanford, May 1988.
42 * Modified by Rosen Sharma, Stanford, Aug 1994.
43 * Modified by Bill Fenner, Xerox PARC, Feb 1995.
44 * Modified to fully comply to IGMPv2 by Bill Fenner, Oct 1995.
45 * Significantly rewritten for IGMPv3, VIMAGE, and SMP by Bruce Simpson.
46 *
47 * MULTICAST Revision: 3.5.1.4
48 */
49
50#include <sys/cdefs.h>
51__FBSDID("$FreeBSD: head/sys/netinet/igmp.c 269699 2014-08-08 01:57:15Z kevlo $");
52
53#include <sys/param.h>
54#include <sys/systm.h>
55#include <sys/module.h>
56#include <sys/malloc.h>
57#include <sys/mbuf.h>
58#include <sys/socket.h>
59#include <sys/protosw.h>
60#include <sys/kernel.h>
61#include <sys/sysctl.h>
62#include <sys/ktr.h>
63#include <sys/condvar.h>
64
65#include <net/if.h>
66#include <net/if_var.h>
67#include <net/netisr.h>
68#include <net/vnet.h>
69
70#include <netinet/in.h>
71#include <netinet/in_var.h>
72#include <netinet/in_systm.h>
73#include <netinet/ip.h>
74#include <netinet/ip_var.h>
75#include <netinet/ip_options.h>
76#include <netinet/igmp.h>
77#include <netinet/igmp_var.h>
78
79#include <machine/in_cksum.h>
80
81#include <security/mac/mac_framework.h>
82
83#ifndef KTR_IGMPV3
84#define KTR_IGMPV3 KTR_INET
85#endif
86
87static struct igmp_ifinfo *
88 igi_alloc_locked(struct ifnet *);
89static void igi_delete_locked(const struct ifnet *);
90static void igmp_dispatch_queue(struct ifqueue *, int, const int);
91static void igmp_fasttimo_vnet(void);
92static void igmp_final_leave(struct in_multi *, struct igmp_ifinfo *);
93static int igmp_handle_state_change(struct in_multi *,
94 struct igmp_ifinfo *);
95static int igmp_initial_join(struct in_multi *, struct igmp_ifinfo *);
96static int igmp_input_v1_query(struct ifnet *, const struct ip *,
97 const struct igmp *);
98static int igmp_input_v2_query(struct ifnet *, const struct ip *,
99 const struct igmp *);
100static int igmp_input_v3_query(struct ifnet *, const struct ip *,
101 /*const*/ struct igmpv3 *);
102static int igmp_input_v3_group_query(struct in_multi *,
103 struct igmp_ifinfo *, int, /*const*/ struct igmpv3 *);
104static int igmp_input_v1_report(struct ifnet *, /*const*/ struct ip *,
105 /*const*/ struct igmp *);
106static int igmp_input_v2_report(struct ifnet *, /*const*/ struct ip *,
107 /*const*/ struct igmp *);
108static void igmp_intr(struct mbuf *);
109static int igmp_isgroupreported(const struct in_addr);
110static struct mbuf *
111 igmp_ra_alloc(void);
112#ifdef KTR
113static char * igmp_rec_type_to_str(const int);
114#endif
115static void igmp_set_version(struct igmp_ifinfo *, const int);
116static void igmp_slowtimo_vnet(void);
117static int igmp_v1v2_queue_report(struct in_multi *, const int);
118static void igmp_v1v2_process_group_timer(struct in_multi *, const int);
119static void igmp_v1v2_process_querier_timers(struct igmp_ifinfo *);
120static void igmp_v2_update_group(struct in_multi *, const int);
121static void igmp_v3_cancel_link_timers(struct igmp_ifinfo *);
122static void igmp_v3_dispatch_general_query(struct igmp_ifinfo *);
123static struct mbuf *
124 igmp_v3_encap_report(struct ifnet *, struct mbuf *);
125static int igmp_v3_enqueue_group_record(struct ifqueue *,
126 struct in_multi *, const int, const int, const int);
127static int igmp_v3_enqueue_filter_change(struct ifqueue *,
128 struct in_multi *);
129static void igmp_v3_process_group_timers(struct igmp_ifinfo *,
130 struct ifqueue *, struct ifqueue *, struct in_multi *,
131 const int);
132static int igmp_v3_merge_state_changes(struct in_multi *,
133 struct ifqueue *);
134static void igmp_v3_suppress_group_record(struct in_multi *);
135static int sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS);
136static int sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS);
137static int sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS);
138
139static const struct netisr_handler igmp_nh = {
140 .nh_name = "igmp",
141 .nh_handler = igmp_intr,
142 .nh_proto = NETISR_IGMP,
143 .nh_policy = NETISR_POLICY_SOURCE,
144};
145
146/*
147 * System-wide globals.
148 *
149 * Unlocked access to these is OK, except for the global IGMP output
150 * queue. The IGMP subsystem lock ends up being system-wide for the moment,
151 * because all VIMAGEs have to share a global output queue, as netisrs
152 * themselves are not virtualized.
153 *
154 * Locking:
155 * * The permitted lock order is: IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK.
156 * Any may be taken independently; if any are held at the same
157 * time, the above lock order must be followed.
158 * * All output is delegated to the netisr.
159 * Now that Giant has been eliminated, the netisr may be inlined.
160 * * IN_MULTI_LOCK covers in_multi.
161 * * IGMP_LOCK covers igmp_ifinfo and any global variables in this file,
162 * including the output queue.
163 * * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
164 * per-link state iterators.
165 * * igmp_ifinfo is valid as long as PF_INET is attached to the interface,
166 * therefore it is not refcounted.
167 * We allow unlocked reads of igmp_ifinfo when accessed via in_multi.
168 *
169 * Reference counting
170 * * IGMP acquires its own reference every time an in_multi is passed to
171 * it and the group is being joined for the first time.
172 * * IGMP releases its reference(s) on in_multi in a deferred way,
173 * because the operations which process the release run as part of
174 * a loop whose control variables are directly affected by the release
175 * (that, and not recursing on the IF_ADDR_LOCK).
176 *
177 * VIMAGE: Each in_multi corresponds to an ifp, and each ifp corresponds
178 * to a vnet in ifp->if_vnet.
179 *
180 * SMPng: XXX We may potentially race operations on ifma_protospec.
181 * The problem is that we currently lack a clean way of taking the
182 * IF_ADDR_LOCK() between the ifnet and in layers w/o recursing,
183 * as anything which modifies ifma needs to be covered by that lock.
184 * So check for ifma_protospec being NULL before proceeding.
185 */
186struct mtx igmp_mtx;
187
188struct mbuf *m_raopt; /* Router Alert option */
189static MALLOC_DEFINE(M_IGMP, "igmp", "igmp state");
190
191/*
192 * VIMAGE-wide globals.
193 *
194 * The IGMPv3 timers themselves need to run per-image, however,
195 * protosw timers run globally (see tcp).
196 * An ifnet can only be in one vimage at a time, and the loopback
197 * ifnet, loif, is itself virtualized.
198 * It would otherwise be possible to seriously hose IGMP state,
199 * and create inconsistencies in upstream multicast routing, if you have
200 * multiple VIMAGEs running on the same link joining different multicast
201 * groups, UNLESS the "primary IP address" is different. This is because
202 * IGMP for IPv4 does not force link-local addresses to be used for each
203 * node, unlike MLD for IPv6.
204 * Obviously the IGMPv3 per-interface state has per-vimage granularity
205 * also as a result.
206 *
207 * FUTURE: Stop using IFP_TO_IA/INADDR_ANY, and use source address selection
208 * policy to control the address used by IGMP on the link.
209 */
210static VNET_DEFINE(int, interface_timers_running); /* IGMPv3 general
211 * query response */
212static VNET_DEFINE(int, state_change_timers_running); /* IGMPv3 state-change
213 * retransmit */
214static VNET_DEFINE(int, current_state_timers_running); /* IGMPv1/v2 host
215 * report; IGMPv3 g/sg
216 * query response */
217
218#define V_interface_timers_running VNET(interface_timers_running)
219#define V_state_change_timers_running VNET(state_change_timers_running)
220#define V_current_state_timers_running VNET(current_state_timers_running)
221
222static VNET_DEFINE(LIST_HEAD(, igmp_ifinfo), igi_head);
223static VNET_DEFINE(struct igmpstat, igmpstat) = {
224 .igps_version = IGPS_VERSION_3,
225 .igps_len = sizeof(struct igmpstat),
226};
227static VNET_DEFINE(struct timeval, igmp_gsrdelay) = {10, 0};
228
229#define V_igi_head VNET(igi_head)
230#define V_igmpstat VNET(igmpstat)
231#define V_igmp_gsrdelay VNET(igmp_gsrdelay)
232
233static VNET_DEFINE(int, igmp_recvifkludge) = 1;
234static VNET_DEFINE(int, igmp_sendra) = 1;
235static VNET_DEFINE(int, igmp_sendlocal) = 1;
236static VNET_DEFINE(int, igmp_v1enable) = 1;
237static VNET_DEFINE(int, igmp_v2enable) = 1;
238static VNET_DEFINE(int, igmp_legacysupp);
239static VNET_DEFINE(int, igmp_default_version) = IGMP_VERSION_3;
240
241#define V_igmp_recvifkludge VNET(igmp_recvifkludge)
242#define V_igmp_sendra VNET(igmp_sendra)
243#define V_igmp_sendlocal VNET(igmp_sendlocal)
244#define V_igmp_v1enable VNET(igmp_v1enable)
245#define V_igmp_v2enable VNET(igmp_v2enable)
246#define V_igmp_legacysupp VNET(igmp_legacysupp)
247#define V_igmp_default_version VNET(igmp_default_version)
248
249/*
250 * Virtualized sysctls.
251 */
252SYSCTL_VNET_STRUCT(_net_inet_igmp, IGMPCTL_STATS, stats, CTLFLAG_RW,
253 &VNET_NAME(igmpstat), igmpstat, "");
254SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, recvifkludge, CTLFLAG_RW,
255 &VNET_NAME(igmp_recvifkludge), 0,
256 "Rewrite IGMPv1/v2 reports from 0.0.0.0 to contain subnet address");
257SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendra, CTLFLAG_RW,
258 &VNET_NAME(igmp_sendra), 0,
259 "Send IP Router Alert option in IGMPv2/v3 messages");
260SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, sendlocal, CTLFLAG_RW,
261 &VNET_NAME(igmp_sendlocal), 0,
262 "Send IGMP membership reports for 224.0.0.0/24 groups");
263SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v1enable, CTLFLAG_RW,
264 &VNET_NAME(igmp_v1enable), 0,
265 "Enable backwards compatibility with IGMPv1");
266SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, v2enable, CTLFLAG_RW,
267 &VNET_NAME(igmp_v2enable), 0,
268 "Enable backwards compatibility with IGMPv2");
269SYSCTL_VNET_INT(_net_inet_igmp, OID_AUTO, legacysupp, CTLFLAG_RW,
270 &VNET_NAME(igmp_legacysupp), 0,
271 "Allow v1/v2 reports to suppress v3 group responses");
272SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, default_version,
273 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
274 &VNET_NAME(igmp_default_version), 0, sysctl_igmp_default_version, "I",
275 "Default version of IGMP to run on each interface");
276SYSCTL_VNET_PROC(_net_inet_igmp, OID_AUTO, gsrdelay,
277 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
278 &VNET_NAME(igmp_gsrdelay.tv_sec), 0, sysctl_igmp_gsr, "I",
279 "Rate limit for IGMPv3 Group-and-Source queries in seconds");
280
281/*
282 * Non-virtualized sysctls.
283 */
284static SYSCTL_NODE(_net_inet_igmp, OID_AUTO, ifinfo,
285 CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_igmp_ifinfo,
286 "Per-interface IGMPv3 state");
287
288static __inline void
289igmp_save_context(struct mbuf *m, struct ifnet *ifp)
290{
291
292#ifdef VIMAGE
293 m->m_pkthdr.PH_loc.ptr = ifp->if_vnet;
294#endif /* VIMAGE */
295 m->m_pkthdr.flowid = ifp->if_index;
296}
297
298static __inline void
299igmp_scrub_context(struct mbuf *m)
300{
301
302 m->m_pkthdr.PH_loc.ptr = NULL;
303 m->m_pkthdr.flowid = 0;
304}
305
306#ifdef KTR
307static __inline char *
308inet_ntoa_haddr(in_addr_t haddr)
309{
310 struct in_addr ia;
311
312 ia.s_addr = htonl(haddr);
313 return (inet_ntoa(ia));
314}
315#endif
316
317/*
318 * Restore context from a queued IGMP output chain.
319 * Return saved ifindex.
320 *
321 * VIMAGE: The assertion is there to make sure that we
322 * actually called CURVNET_SET() with what's in the mbuf chain.
323 */
324static __inline uint32_t
325igmp_restore_context(struct mbuf *m)
326{
327
328#ifdef notyet
329#if defined(VIMAGE) && defined(INVARIANTS)
330 KASSERT(curvnet == (m->m_pkthdr.PH_loc.ptr),
331 ("%s: called when curvnet was not restored", __func__));
332#endif
333#endif
334 return (m->m_pkthdr.flowid);
335}
336
337/*
338 * Retrieve or set default IGMP version.
339 *
340 * VIMAGE: Assume curvnet set by caller.
341 * SMPng: NOTE: Serialized by IGMP lock.
342 */
343static int
344sysctl_igmp_default_version(SYSCTL_HANDLER_ARGS)
345{
346 int error;
347 int new;
348
349 error = sysctl_wire_old_buffer(req, sizeof(int));
350 if (error)
351 return (error);
352
353 IGMP_LOCK();
354
355 new = V_igmp_default_version;
356
357 error = sysctl_handle_int(oidp, &new, 0, req);
358 if (error || !req->newptr)
359 goto out_locked;
360
361 if (new < IGMP_VERSION_1 || new > IGMP_VERSION_3) {
362 error = EINVAL;
363 goto out_locked;
364 }
365
366 CTR2(KTR_IGMPV3, "change igmp_default_version from %d to %d",
367 V_igmp_default_version, new);
368
369 V_igmp_default_version = new;
370
371out_locked:
372 IGMP_UNLOCK();
373 return (error);
374}
375
376/*
377 * Retrieve or set threshold between group-source queries in seconds.
378 *
379 * VIMAGE: Assume curvnet set by caller.
380 * SMPng: NOTE: Serialized by IGMP lock.
381 */
382static int
383sysctl_igmp_gsr(SYSCTL_HANDLER_ARGS)
384{
385 int error;
386 int i;
387
388 error = sysctl_wire_old_buffer(req, sizeof(int));
389 if (error)
390 return (error);
391
392 IGMP_LOCK();
393
394 i = V_igmp_gsrdelay.tv_sec;
395
396 error = sysctl_handle_int(oidp, &i, 0, req);
397 if (error || !req->newptr)
398 goto out_locked;
399
400 if (i < -1 || i >= 60) {
401 error = EINVAL;
402 goto out_locked;
403 }
404
405 CTR2(KTR_IGMPV3, "change igmp_gsrdelay from %d to %d",
406 V_igmp_gsrdelay.tv_sec, i);
407 V_igmp_gsrdelay.tv_sec = i;
408
409out_locked:
410 IGMP_UNLOCK();
411 return (error);
412}
413
414/*
415 * Expose struct igmp_ifinfo to userland, keyed by ifindex.
416 * For use by ifmcstat(8).
417 *
418 * SMPng: NOTE: Does an unlocked ifindex space read.
419 * VIMAGE: Assume curvnet set by caller. The node handler itself
420 * is not directly virtualized.
421 */
422static int
423sysctl_igmp_ifinfo(SYSCTL_HANDLER_ARGS)
424{
425 int *name;
426 int error;
427 u_int namelen;
428 struct ifnet *ifp;
429 struct igmp_ifinfo *igi;
430
431 name = (int *)arg1;
432 namelen = arg2;
433
434 if (req->newptr != NULL)
435 return (EPERM);
436
437 if (namelen != 1)
438 return (EINVAL);
439
440 error = sysctl_wire_old_buffer(req, sizeof(struct igmp_ifinfo));
441 if (error)
442 return (error);
443
444 IN_MULTI_LOCK();
445 IGMP_LOCK();
446
447 if (name[0] <= 0 || name[0] > V_if_index) {
448 error = ENOENT;
449 goto out_locked;
450 }
451
452 error = ENOENT;
453
454 ifp = ifnet_byindex(name[0]);
455 if (ifp == NULL)
456 goto out_locked;
457
458 LIST_FOREACH(igi, &V_igi_head, igi_link) {
459 if (ifp == igi->igi_ifp) {
460 error = SYSCTL_OUT(req, igi,
461 sizeof(struct igmp_ifinfo));
462 break;
463 }
464 }
465
466out_locked:
467 IGMP_UNLOCK();
468 IN_MULTI_UNLOCK();
469 return (error);
470}
471
472/*
473 * Dispatch an entire queue of pending packet chains
474 * using the netisr.
475 * VIMAGE: Assumes the vnet pointer has been set.
476 */
477static void
478igmp_dispatch_queue(struct ifqueue *ifq, int limit, const int loop)
479{
480 struct mbuf *m;
481
482 for (;;) {
483 _IF_DEQUEUE(ifq, m);
484 if (m == NULL)
485 break;
486 CTR3(KTR_IGMPV3, "%s: dispatch %p from %p", __func__, ifq, m);
487 if (loop)
488 m->m_flags |= M_IGMP_LOOP;
489 netisr_dispatch(NETISR_IGMP, m);
490 if (--limit == 0)
491 break;
492 }
493}
494
495/*
496 * Filter outgoing IGMP report state by group.
497 *
498 * Reports are ALWAYS suppressed for ALL-HOSTS (224.0.0.1).
499 * If the net.inet.igmp.sendlocal sysctl is 0, then IGMP reports are
500 * disabled for all groups in the 224.0.0.0/24 link-local scope. However,
501 * this may break certain IGMP snooping switches which rely on the old
502 * report behaviour.
503 *
504 * Return zero if the given group is one for which IGMP reports
505 * should be suppressed, or non-zero if reports should be issued.
506 */
507static __inline int
508igmp_isgroupreported(const struct in_addr addr)
509{
510
511 if (in_allhosts(addr) ||
512 ((!V_igmp_sendlocal && IN_LOCAL_GROUP(ntohl(addr.s_addr)))))
513 return (0);
514
515 return (1);
516}
517
518/*
519 * Construct a Router Alert option to use in outgoing packets.
520 */
521static struct mbuf *
522igmp_ra_alloc(void)
523{
524 struct mbuf *m;
525 struct ipoption *p;
526
527 m = m_get(M_WAITOK, MT_DATA);
528 p = mtod(m, struct ipoption *);
529 p->ipopt_dst.s_addr = INADDR_ANY;
530 p->ipopt_list[0] = IPOPT_RA; /* Router Alert Option */
531 p->ipopt_list[1] = 0x04; /* 4 bytes long */
532 p->ipopt_list[2] = IPOPT_EOL; /* End of IP option list */
533 p->ipopt_list[3] = 0x00; /* pad byte */
534 m->m_len = sizeof(p->ipopt_dst) + p->ipopt_list[1];
535
536 return (m);
537}
538
539/*
540 * Attach IGMP when PF_INET is attached to an interface.
541 */
542struct igmp_ifinfo *
543igmp_domifattach(struct ifnet *ifp)
544{
545 struct igmp_ifinfo *igi;
546
547 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
548 __func__, ifp, ifp->if_xname);
549
550 IGMP_LOCK();
551
552 igi = igi_alloc_locked(ifp);
553 if (!(ifp->if_flags & IFF_MULTICAST))
554 igi->igi_flags |= IGIF_SILENT;
555
556 IGMP_UNLOCK();
557
558 return (igi);
559}
560
561/*
562 * VIMAGE: assume curvnet set by caller.
563 */
564static struct igmp_ifinfo *
565igi_alloc_locked(/*const*/ struct ifnet *ifp)
566{
567 struct igmp_ifinfo *igi;
568
569 IGMP_LOCK_ASSERT();
570
571 igi = malloc(sizeof(struct igmp_ifinfo), M_IGMP, M_NOWAIT|M_ZERO);
572 if (igi == NULL)
573 goto out;
574
575 igi->igi_ifp = ifp;
576 igi->igi_version = V_igmp_default_version;
577 igi->igi_flags = 0;
578 igi->igi_rv = IGMP_RV_INIT;
579 igi->igi_qi = IGMP_QI_INIT;
580 igi->igi_qri = IGMP_QRI_INIT;
581 igi->igi_uri = IGMP_URI_INIT;
582
583 SLIST_INIT(&igi->igi_relinmhead);
584
585 /*
586 * Responses to general queries are subject to bounds.
587 */
588 IFQ_SET_MAXLEN(&igi->igi_gq, IGMP_MAX_RESPONSE_PACKETS);
589
590 LIST_INSERT_HEAD(&V_igi_head, igi, igi_link);
591
592 CTR2(KTR_IGMPV3, "allocate igmp_ifinfo for ifp %p(%s)",
593 ifp, ifp->if_xname);
594
595out:
596 return (igi);
597}
598
599/*
600 * Hook for ifdetach.
601 *
602 * NOTE: Some finalization tasks need to run before the protocol domain
603 * is detached, but also before the link layer does its cleanup.
604 *
605 * SMPNG: igmp_ifdetach() needs to take IF_ADDR_LOCK().
606 * XXX This is also bitten by unlocked ifma_protospec access.
607 */
608void
609igmp_ifdetach(struct ifnet *ifp)
610{
611 struct igmp_ifinfo *igi;
612 struct ifmultiaddr *ifma;
613 struct in_multi *inm, *tinm;
614
615 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)", __func__, ifp,
616 ifp->if_xname);
617
618 IGMP_LOCK();
619
620 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
621 if (igi->igi_version == IGMP_VERSION_3) {
622 IF_ADDR_RLOCK(ifp);
623 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
624 if (ifma->ifma_addr->sa_family != AF_INET ||
625 ifma->ifma_protospec == NULL)
626 continue;
627#if 0
628 KASSERT(ifma->ifma_protospec != NULL,
629 ("%s: ifma_protospec is NULL", __func__));
630#endif
631 inm = (struct in_multi *)ifma->ifma_protospec;
632 if (inm->inm_state == IGMP_LEAVING_MEMBER) {
633 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
634 inm, inm_nrele);
635 }
636 inm_clear_recorded(inm);
637 }
638 IF_ADDR_RUNLOCK(ifp);
639 /*
640 * Free the in_multi reference(s) for this IGMP lifecycle.
641 */
642 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele,
643 tinm) {
644 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
645 inm_release_locked(inm);
646 }
647 }
648
649 IGMP_UNLOCK();
650}
651
652/*
653 * Hook for domifdetach.
654 */
655void
656igmp_domifdetach(struct ifnet *ifp)
657{
658 struct igmp_ifinfo *igi;
659
660 CTR3(KTR_IGMPV3, "%s: called for ifp %p(%s)",
661 __func__, ifp, ifp->if_xname);
662
663 IGMP_LOCK();
664
665 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
666 igi_delete_locked(ifp);
667
668 IGMP_UNLOCK();
669}
670
671static void
672igi_delete_locked(const struct ifnet *ifp)
673{
674 struct igmp_ifinfo *igi, *tigi;
675
676 CTR3(KTR_IGMPV3, "%s: freeing igmp_ifinfo for ifp %p(%s)",
677 __func__, ifp, ifp->if_xname);
678
679 IGMP_LOCK_ASSERT();
680
681 LIST_FOREACH_SAFE(igi, &V_igi_head, igi_link, tigi) {
682 if (igi->igi_ifp == ifp) {
683 /*
684 * Free deferred General Query responses.
685 */
686 _IF_DRAIN(&igi->igi_gq);
687
688 LIST_REMOVE(igi, igi_link);
689
690 KASSERT(SLIST_EMPTY(&igi->igi_relinmhead),
691 ("%s: there are dangling in_multi references",
692 __func__));
693
694 free(igi, M_IGMP);
695 return;
696 }
697 }
698
699#ifdef INVARIANTS
700 panic("%s: igmp_ifinfo not found for ifp %p\n", __func__, ifp);
701#endif
702}
703
704/*
705 * Process a received IGMPv1 query.
706 * Return non-zero if the message should be dropped.
707 *
708 * VIMAGE: The curvnet pointer is derived from the input ifp.
709 */
710static int
711igmp_input_v1_query(struct ifnet *ifp, const struct ip *ip,
712 const struct igmp *igmp)
713{
714 struct ifmultiaddr *ifma;
715 struct igmp_ifinfo *igi;
716 struct in_multi *inm;
717
718 /*
719 * IGMPv1 Host Mmembership Queries SHOULD always be addressed to
720 * 224.0.0.1. They are always treated as General Queries.
721 * igmp_group is always ignored. Do not drop it as a userland
722 * daemon may wish to see it.
723 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
724 */
725 if (!in_allhosts(ip->ip_dst) || !in_nullhost(igmp->igmp_group)) {
726 IGMPSTAT_INC(igps_rcv_badqueries);
727 return (0);
728 }
729 IGMPSTAT_INC(igps_rcv_gen_queries);
730
731 IN_MULTI_LOCK();
732 IGMP_LOCK();
733
734 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
735 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
736
737 if (igi->igi_flags & IGIF_LOOPBACK) {
738 CTR2(KTR_IGMPV3, "ignore v1 query on IGIF_LOOPBACK ifp %p(%s)",
739 ifp, ifp->if_xname);
740 goto out_locked;
741 }
742
743 /*
744 * Switch to IGMPv1 host compatibility mode.
745 */
746 igmp_set_version(igi, IGMP_VERSION_1);
747
748 CTR2(KTR_IGMPV3, "process v1 query on ifp %p(%s)", ifp, ifp->if_xname);
749
750 /*
751 * Start the timers in all of our group records
752 * for the interface on which the query arrived,
753 * except those which are already running.
754 */
755 IF_ADDR_RLOCK(ifp);
756 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
757 if (ifma->ifma_addr->sa_family != AF_INET ||
758 ifma->ifma_protospec == NULL)
759 continue;
760 inm = (struct in_multi *)ifma->ifma_protospec;
761 if (inm->inm_timer != 0)
762 continue;
763 switch (inm->inm_state) {
764 case IGMP_NOT_MEMBER:
765 case IGMP_SILENT_MEMBER:
766 break;
767 case IGMP_G_QUERY_PENDING_MEMBER:
768 case IGMP_SG_QUERY_PENDING_MEMBER:
769 case IGMP_REPORTING_MEMBER:
770 case IGMP_IDLE_MEMBER:
771 case IGMP_LAZY_MEMBER:
772 case IGMP_SLEEPING_MEMBER:
773 case IGMP_AWAKENING_MEMBER:
774 inm->inm_state = IGMP_REPORTING_MEMBER;
775 inm->inm_timer = IGMP_RANDOM_DELAY(
776 IGMP_V1V2_MAX_RI * PR_FASTHZ);
777 V_current_state_timers_running = 1;
778 break;
779 case IGMP_LEAVING_MEMBER:
780 break;
781 }
782 }
783 IF_ADDR_RUNLOCK(ifp);
784
785out_locked:
786 IGMP_UNLOCK();
787 IN_MULTI_UNLOCK();
788
789 return (0);
790}
791
792/*
793 * Process a received IGMPv2 general or group-specific query.
794 */
795static int
796igmp_input_v2_query(struct ifnet *ifp, const struct ip *ip,
797 const struct igmp *igmp)
798{
799 struct ifmultiaddr *ifma;
800 struct igmp_ifinfo *igi;
801 struct in_multi *inm;
802 int is_general_query;
803 uint16_t timer;
804
805 is_general_query = 0;
806
807 /*
808 * Validate address fields upfront.
809 * XXX SMPng: unlocked increments in igmpstat assumed atomic.
810 */
811 if (in_nullhost(igmp->igmp_group)) {
812 /*
813 * IGMPv2 General Query.
814 * If this was not sent to the all-hosts group, ignore it.
815 */
816 if (!in_allhosts(ip->ip_dst))
817 return (0);
818 IGMPSTAT_INC(igps_rcv_gen_queries);
819 is_general_query = 1;
820 } else {
821 /* IGMPv2 Group-Specific Query. */
822 IGMPSTAT_INC(igps_rcv_group_queries);
823 }
824
825 IN_MULTI_LOCK();
826 IGMP_LOCK();
827
828 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
829 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
830
831 if (igi->igi_flags & IGIF_LOOPBACK) {
832 CTR2(KTR_IGMPV3, "ignore v2 query on IGIF_LOOPBACK ifp %p(%s)",
833 ifp, ifp->if_xname);
834 goto out_locked;
835 }
836
837 /*
838 * Ignore v2 query if in v1 Compatibility Mode.
839 */
840 if (igi->igi_version == IGMP_VERSION_1)
841 goto out_locked;
842
843 igmp_set_version(igi, IGMP_VERSION_2);
844
845 timer = igmp->igmp_code * PR_FASTHZ / IGMP_TIMER_SCALE;
846 if (timer == 0)
847 timer = 1;
848
849 if (is_general_query) {
850 /*
851 * For each reporting group joined on this
852 * interface, kick the report timer.
853 */
854 CTR2(KTR_IGMPV3, "process v2 general query on ifp %p(%s)",
855 ifp, ifp->if_xname);
856 IF_ADDR_RLOCK(ifp);
857 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
858 if (ifma->ifma_addr->sa_family != AF_INET ||
859 ifma->ifma_protospec == NULL)
860 continue;
861 inm = (struct in_multi *)ifma->ifma_protospec;
862 igmp_v2_update_group(inm, timer);
863 }
864 IF_ADDR_RUNLOCK(ifp);
865 } else {
866 /*
867 * Group-specific IGMPv2 query, we need only
868 * look up the single group to process it.
869 */
870 inm = inm_lookup(ifp, igmp->igmp_group);
871 if (inm != NULL) {
872 CTR3(KTR_IGMPV3, "process v2 query %s on ifp %p(%s)",
873 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
874 igmp_v2_update_group(inm, timer);
875 }
876 }
877
878out_locked:
879 IGMP_UNLOCK();
880 IN_MULTI_UNLOCK();
881
882 return (0);
883}
884
885/*
886 * Update the report timer on a group in response to an IGMPv2 query.
887 *
888 * If we are becoming the reporting member for this group, start the timer.
889 * If we already are the reporting member for this group, and timer is
890 * below the threshold, reset it.
891 *
892 * We may be updating the group for the first time since we switched
893 * to IGMPv3. If we are, then we must clear any recorded source lists,
894 * and transition to REPORTING state; the group timer is overloaded
895 * for group and group-source query responses.
896 *
897 * Unlike IGMPv3, the delay per group should be jittered
898 * to avoid bursts of IGMPv2 reports.
899 */
900static void
901igmp_v2_update_group(struct in_multi *inm, const int timer)
902{
903
904 CTR4(KTR_IGMPV3, "%s: %s/%s timer=%d", __func__,
905 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname, timer);
906
907 IN_MULTI_LOCK_ASSERT();
908
909 switch (inm->inm_state) {
910 case IGMP_NOT_MEMBER:
911 case IGMP_SILENT_MEMBER:
912 break;
913 case IGMP_REPORTING_MEMBER:
914 if (inm->inm_timer != 0 &&
915 inm->inm_timer <= timer) {
916 CTR1(KTR_IGMPV3, "%s: REPORTING and timer running, "
917 "skipping.", __func__);
918 break;
919 }
920 /* FALLTHROUGH */
921 case IGMP_SG_QUERY_PENDING_MEMBER:
922 case IGMP_G_QUERY_PENDING_MEMBER:
923 case IGMP_IDLE_MEMBER:
924 case IGMP_LAZY_MEMBER:
925 case IGMP_AWAKENING_MEMBER:
926 CTR1(KTR_IGMPV3, "%s: ->REPORTING", __func__);
927 inm->inm_state = IGMP_REPORTING_MEMBER;
928 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
929 V_current_state_timers_running = 1;
930 break;
931 case IGMP_SLEEPING_MEMBER:
932 CTR1(KTR_IGMPV3, "%s: ->AWAKENING", __func__);
933 inm->inm_state = IGMP_AWAKENING_MEMBER;
934 break;
935 case IGMP_LEAVING_MEMBER:
936 break;
937 }
938}
939
940/*
941 * Process a received IGMPv3 general, group-specific or
942 * group-and-source-specific query.
943 * Assumes m has already been pulled up to the full IGMP message length.
944 * Return 0 if successful, otherwise an appropriate error code is returned.
945 */
946static int
947igmp_input_v3_query(struct ifnet *ifp, const struct ip *ip,
948 /*const*/ struct igmpv3 *igmpv3)
949{
950 struct igmp_ifinfo *igi;
951 struct in_multi *inm;
952 int is_general_query;
953 uint32_t maxresp, nsrc, qqi;
954 uint16_t timer;
955 uint8_t qrv;
956
957 is_general_query = 0;
958
959 CTR2(KTR_IGMPV3, "process v3 query on ifp %p(%s)", ifp, ifp->if_xname);
960
961 maxresp = igmpv3->igmp_code; /* in 1/10ths of a second */
962 if (maxresp >= 128) {
963 maxresp = IGMP_MANT(igmpv3->igmp_code) <<
964 (IGMP_EXP(igmpv3->igmp_code) + 3);
965 }
966
967 /*
968 * Robustness must never be less than 2 for on-wire IGMPv3.
969 * FUTURE: Check if ifp has IGIF_LOOPBACK set, as we will make
970 * an exception for interfaces whose IGMPv3 state changes
971 * are redirected to loopback (e.g. MANET).
972 */
973 qrv = IGMP_QRV(igmpv3->igmp_misc);
974 if (qrv < 2) {
975 CTR3(KTR_IGMPV3, "%s: clamping qrv %d to %d", __func__,
976 qrv, IGMP_RV_INIT);
977 qrv = IGMP_RV_INIT;
978 }
979
980 qqi = igmpv3->igmp_qqi;
981 if (qqi >= 128) {
982 qqi = IGMP_MANT(igmpv3->igmp_qqi) <<
983 (IGMP_EXP(igmpv3->igmp_qqi) + 3);
984 }
985
986 timer = maxresp * PR_FASTHZ / IGMP_TIMER_SCALE;
987 if (timer == 0)
988 timer = 1;
989
990 nsrc = ntohs(igmpv3->igmp_numsrc);
991
992 /*
993 * Validate address fields and versions upfront before
994 * accepting v3 query.
995 * XXX SMPng: Unlocked access to igmpstat counters here.
996 */
997 if (in_nullhost(igmpv3->igmp_group)) {
998 /*
999 * IGMPv3 General Query.
1000 *
1001 * General Queries SHOULD be directed to 224.0.0.1.
1002 * A general query with a source list has undefined
1003 * behaviour; discard it.
1004 */
1005 IGMPSTAT_INC(igps_rcv_gen_queries);
1006 if (!in_allhosts(ip->ip_dst) || nsrc > 0) {
1007 IGMPSTAT_INC(igps_rcv_badqueries);
1008 return (0);
1009 }
1010 is_general_query = 1;
1011 } else {
1012 /* Group or group-source specific query. */
1013 if (nsrc == 0)
1014 IGMPSTAT_INC(igps_rcv_group_queries);
1015 else
1016 IGMPSTAT_INC(igps_rcv_gsr_queries);
1017 }
1018
1019 IN_MULTI_LOCK();
1020 IGMP_LOCK();
1021
1022 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
1023 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
1024
1025 if (igi->igi_flags & IGIF_LOOPBACK) {
1026 CTR2(KTR_IGMPV3, "ignore v3 query on IGIF_LOOPBACK ifp %p(%s)",
1027 ifp, ifp->if_xname);
1028 goto out_locked;
1029 }
1030
1031 /*
1032 * Discard the v3 query if we're in Compatibility Mode.
1033 * The RFC is not obviously worded that hosts need to stay in
1034 * compatibility mode until the Old Version Querier Present
1035 * timer expires.
1036 */
1037 if (igi->igi_version != IGMP_VERSION_3) {
1038 CTR3(KTR_IGMPV3, "ignore v3 query in v%d mode on ifp %p(%s)",
1039 igi->igi_version, ifp, ifp->if_xname);
1040 goto out_locked;
1041 }
1042
1043 igmp_set_version(igi, IGMP_VERSION_3);
1044 igi->igi_rv = qrv;
1045 igi->igi_qi = qqi;
1046 igi->igi_qri = maxresp;
1047
1048 CTR4(KTR_IGMPV3, "%s: qrv %d qi %d qri %d", __func__, qrv, qqi,
1049 maxresp);
1050
1051 if (is_general_query) {
1052 /*
1053 * Schedule a current-state report on this ifp for
1054 * all groups, possibly containing source lists.
1055 * If there is a pending General Query response
1056 * scheduled earlier than the selected delay, do
1057 * not schedule any other reports.
1058 * Otherwise, reset the interface timer.
1059 */
1060 CTR2(KTR_IGMPV3, "process v3 general query on ifp %p(%s)",
1061 ifp, ifp->if_xname);
1062 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer) {
1063 igi->igi_v3_timer = IGMP_RANDOM_DELAY(timer);
1064 V_interface_timers_running = 1;
1065 }
1066 } else {
1067 /*
1068 * Group-source-specific queries are throttled on
1069 * a per-group basis to defeat denial-of-service attempts.
1070 * Queries for groups we are not a member of on this
1071 * link are simply ignored.
1072 */
1073 inm = inm_lookup(ifp, igmpv3->igmp_group);
1074 if (inm == NULL)
1075 goto out_locked;
1076 if (nsrc > 0) {
1077 if (!ratecheck(&inm->inm_lastgsrtv,
1078 &V_igmp_gsrdelay)) {
1079 CTR1(KTR_IGMPV3, "%s: GS query throttled.",
1080 __func__);
1081 IGMPSTAT_INC(igps_drop_gsr_queries);
1082 goto out_locked;
1083 }
1084 }
1085 CTR3(KTR_IGMPV3, "process v3 %s query on ifp %p(%s)",
1086 inet_ntoa(igmpv3->igmp_group), ifp, ifp->if_xname);
1087 /*
1088 * If there is a pending General Query response
1089 * scheduled sooner than the selected delay, no
1090 * further report need be scheduled.
1091 * Otherwise, prepare to respond to the
1092 * group-specific or group-and-source query.
1093 */
1094 if (igi->igi_v3_timer == 0 || igi->igi_v3_timer >= timer)
1095 igmp_input_v3_group_query(inm, igi, timer, igmpv3);
1096 }
1097
1098out_locked:
1099 IGMP_UNLOCK();
1100 IN_MULTI_UNLOCK();
1101
1102 return (0);
1103}
1104
1105/*
1106 * Process a recieved IGMPv3 group-specific or group-and-source-specific
1107 * query.
1108 * Return <0 if any error occured. Currently this is ignored.
1109 */
1110static int
1111igmp_input_v3_group_query(struct in_multi *inm, struct igmp_ifinfo *igi,
1112 int timer, /*const*/ struct igmpv3 *igmpv3)
1113{
1114 int retval;
1115 uint16_t nsrc;
1116
1117 IN_MULTI_LOCK_ASSERT();
1118 IGMP_LOCK_ASSERT();
1119
1120 retval = 0;
1121
1122 switch (inm->inm_state) {
1123 case IGMP_NOT_MEMBER:
1124 case IGMP_SILENT_MEMBER:
1125 case IGMP_SLEEPING_MEMBER:
1126 case IGMP_LAZY_MEMBER:
1127 case IGMP_AWAKENING_MEMBER:
1128 case IGMP_IDLE_MEMBER:
1129 case IGMP_LEAVING_MEMBER:
1130 return (retval);
1131 break;
1132 case IGMP_REPORTING_MEMBER:
1133 case IGMP_G_QUERY_PENDING_MEMBER:
1134 case IGMP_SG_QUERY_PENDING_MEMBER:
1135 break;
1136 }
1137
1138 nsrc = ntohs(igmpv3->igmp_numsrc);
1139
1140 /*
1141 * Deal with group-specific queries upfront.
1142 * If any group query is already pending, purge any recorded
1143 * source-list state if it exists, and schedule a query response
1144 * for this group-specific query.
1145 */
1146 if (nsrc == 0) {
1147 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
1148 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER) {
1149 inm_clear_recorded(inm);
1150 timer = min(inm->inm_timer, timer);
1151 }
1152 inm->inm_state = IGMP_G_QUERY_PENDING_MEMBER;
1153 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1154 V_current_state_timers_running = 1;
1155 return (retval);
1156 }
1157
1158 /*
1159 * Deal with the case where a group-and-source-specific query has
1160 * been received but a group-specific query is already pending.
1161 */
1162 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER) {
1163 timer = min(inm->inm_timer, timer);
1164 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1165 V_current_state_timers_running = 1;
1166 return (retval);
1167 }
1168
1169 /*
1170 * Finally, deal with the case where a group-and-source-specific
1171 * query has been received, where a response to a previous g-s-r
1172 * query exists, or none exists.
1173 * In this case, we need to parse the source-list which the Querier
1174 * has provided us with and check if we have any source list filter
1175 * entries at T1 for these sources. If we do not, there is no need
1176 * schedule a report and the query may be dropped.
1177 * If we do, we must record them and schedule a current-state
1178 * report for those sources.
1179 * FIXME: Handling source lists larger than 1 mbuf requires that
1180 * we pass the mbuf chain pointer down to this function, and use
1181 * m_getptr() to walk the chain.
1182 */
1183 if (inm->inm_nsrc > 0) {
1184 const struct in_addr *ap;
1185 int i, nrecorded;
1186
1187 ap = (const struct in_addr *)(igmpv3 + 1);
1188 nrecorded = 0;
1189 for (i = 0; i < nsrc; i++, ap++) {
1190 retval = inm_record_source(inm, ap->s_addr);
1191 if (retval < 0)
1192 break;
1193 nrecorded += retval;
1194 }
1195 if (nrecorded > 0) {
1196 CTR1(KTR_IGMPV3,
1197 "%s: schedule response to SG query", __func__);
1198 inm->inm_state = IGMP_SG_QUERY_PENDING_MEMBER;
1199 inm->inm_timer = IGMP_RANDOM_DELAY(timer);
1200 V_current_state_timers_running = 1;
1201 }
1202 }
1203
1204 return (retval);
1205}
1206
1207/*
1208 * Process a received IGMPv1 host membership report.
1209 *
1210 * NOTE: 0.0.0.0 workaround breaks const correctness.
1211 */
1212static int
1213igmp_input_v1_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1214 /*const*/ struct igmp *igmp)
1215{
1216 struct in_ifaddr *ia;
1217 struct in_multi *inm;
1218
1219 IGMPSTAT_INC(igps_rcv_reports);
1220
1221 if (ifp->if_flags & IFF_LOOPBACK)
1222 return (0);
1223
1224 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1225 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1226 IGMPSTAT_INC(igps_rcv_badreports);
1227 return (EINVAL);
1228 }
1229
1230 /*
1231 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1232 * Booting clients may use the source address 0.0.0.0. Some
1233 * IGMP daemons may not know how to use IP_RECVIF to determine
1234 * the interface upon which this message was received.
1235 * Replace 0.0.0.0 with the subnet address if told to do so.
1236 */
1237 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1238 IFP_TO_IA(ifp, ia);
1239 if (ia != NULL) {
1240 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1241 ifa_free(&ia->ia_ifa);
1242 }
1243 }
1244
1245 CTR3(KTR_IGMPV3, "process v1 report %s on ifp %p(%s)",
1246 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1247
1248 /*
1249 * IGMPv1 report suppression.
1250 * If we are a member of this group, and our membership should be
1251 * reported, stop our group timer and transition to the 'lazy' state.
1252 */
1253 IN_MULTI_LOCK();
1254 inm = inm_lookup(ifp, igmp->igmp_group);
1255 if (inm != NULL) {
1256 struct igmp_ifinfo *igi;
1257
1258 igi = inm->inm_igi;
1259 if (igi == NULL) {
1260 KASSERT(igi != NULL,
1261 ("%s: no igi for ifp %p", __func__, ifp));
1262 goto out_locked;
1263 }
1264
1265 IGMPSTAT_INC(igps_rcv_ourreports);
1266
1267 /*
1268 * If we are in IGMPv3 host mode, do not allow the
1269 * other host's IGMPv1 report to suppress our reports
1270 * unless explicitly configured to do so.
1271 */
1272 if (igi->igi_version == IGMP_VERSION_3) {
1273 if (V_igmp_legacysupp)
1274 igmp_v3_suppress_group_record(inm);
1275 goto out_locked;
1276 }
1277
1278 inm->inm_timer = 0;
1279
1280 switch (inm->inm_state) {
1281 case IGMP_NOT_MEMBER:
1282 case IGMP_SILENT_MEMBER:
1283 break;
1284 case IGMP_IDLE_MEMBER:
1285 case IGMP_LAZY_MEMBER:
1286 case IGMP_AWAKENING_MEMBER:
1287 CTR3(KTR_IGMPV3,
1288 "report suppressed for %s on ifp %p(%s)",
1289 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1290 case IGMP_SLEEPING_MEMBER:
1291 inm->inm_state = IGMP_SLEEPING_MEMBER;
1292 break;
1293 case IGMP_REPORTING_MEMBER:
1294 CTR3(KTR_IGMPV3,
1295 "report suppressed for %s on ifp %p(%s)",
1296 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1297 if (igi->igi_version == IGMP_VERSION_1)
1298 inm->inm_state = IGMP_LAZY_MEMBER;
1299 else if (igi->igi_version == IGMP_VERSION_2)
1300 inm->inm_state = IGMP_SLEEPING_MEMBER;
1301 break;
1302 case IGMP_G_QUERY_PENDING_MEMBER:
1303 case IGMP_SG_QUERY_PENDING_MEMBER:
1304 case IGMP_LEAVING_MEMBER:
1305 break;
1306 }
1307 }
1308
1309out_locked:
1310 IN_MULTI_UNLOCK();
1311
1312 return (0);
1313}
1314
1315/*
1316 * Process a received IGMPv2 host membership report.
1317 *
1318 * NOTE: 0.0.0.0 workaround breaks const correctness.
1319 */
1320static int
1321igmp_input_v2_report(struct ifnet *ifp, /*const*/ struct ip *ip,
1322 /*const*/ struct igmp *igmp)
1323{
1324 struct in_ifaddr *ia;
1325 struct in_multi *inm;
1326
1327 /*
1328 * Make sure we don't hear our own membership report. Fast
1329 * leave requires knowing that we are the only member of a
1330 * group.
1331 */
1332 IFP_TO_IA(ifp, ia);
1333 if (ia != NULL && in_hosteq(ip->ip_src, IA_SIN(ia)->sin_addr)) {
1334 ifa_free(&ia->ia_ifa);
1335 return (0);
1336 }
1337
1338 IGMPSTAT_INC(igps_rcv_reports);
1339
1340 if (ifp->if_flags & IFF_LOOPBACK) {
1341 if (ia != NULL)
1342 ifa_free(&ia->ia_ifa);
1343 return (0);
1344 }
1345
1346 if (!IN_MULTICAST(ntohl(igmp->igmp_group.s_addr)) ||
1347 !in_hosteq(igmp->igmp_group, ip->ip_dst)) {
1348 if (ia != NULL)
1349 ifa_free(&ia->ia_ifa);
1350 IGMPSTAT_INC(igps_rcv_badreports);
1351 return (EINVAL);
1352 }
1353
1354 /*
1355 * RFC 3376, Section 4.2.13, 9.2, 9.3:
1356 * Booting clients may use the source address 0.0.0.0. Some
1357 * IGMP daemons may not know how to use IP_RECVIF to determine
1358 * the interface upon which this message was received.
1359 * Replace 0.0.0.0 with the subnet address if told to do so.
1360 */
1361 if (V_igmp_recvifkludge && in_nullhost(ip->ip_src)) {
1362 if (ia != NULL)
1363 ip->ip_src.s_addr = htonl(ia->ia_subnet);
1364 }
1365 if (ia != NULL)
1366 ifa_free(&ia->ia_ifa);
1367
1368 CTR3(KTR_IGMPV3, "process v2 report %s on ifp %p(%s)",
1369 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1370
1371 /*
1372 * IGMPv2 report suppression.
1373 * If we are a member of this group, and our membership should be
1374 * reported, and our group timer is pending or about to be reset,
1375 * stop our group timer by transitioning to the 'lazy' state.
1376 */
1377 IN_MULTI_LOCK();
1378 inm = inm_lookup(ifp, igmp->igmp_group);
1379 if (inm != NULL) {
1380 struct igmp_ifinfo *igi;
1381
1382 igi = inm->inm_igi;
1383 KASSERT(igi != NULL, ("%s: no igi for ifp %p", __func__, ifp));
1384
1385 IGMPSTAT_INC(igps_rcv_ourreports);
1386
1387 /*
1388 * If we are in IGMPv3 host mode, do not allow the
1389 * other host's IGMPv1 report to suppress our reports
1390 * unless explicitly configured to do so.
1391 */
1392 if (igi->igi_version == IGMP_VERSION_3) {
1393 if (V_igmp_legacysupp)
1394 igmp_v3_suppress_group_record(inm);
1395 goto out_locked;
1396 }
1397
1398 inm->inm_timer = 0;
1399
1400 switch (inm->inm_state) {
1401 case IGMP_NOT_MEMBER:
1402 case IGMP_SILENT_MEMBER:
1403 case IGMP_SLEEPING_MEMBER:
1404 break;
1405 case IGMP_REPORTING_MEMBER:
1406 case IGMP_IDLE_MEMBER:
1407 case IGMP_AWAKENING_MEMBER:
1408 CTR3(KTR_IGMPV3,
1409 "report suppressed for %s on ifp %p(%s)",
1410 inet_ntoa(igmp->igmp_group), ifp, ifp->if_xname);
1411 case IGMP_LAZY_MEMBER:
1412 inm->inm_state = IGMP_LAZY_MEMBER;
1413 break;
1414 case IGMP_G_QUERY_PENDING_MEMBER:
1415 case IGMP_SG_QUERY_PENDING_MEMBER:
1416 case IGMP_LEAVING_MEMBER:
1417 break;
1418 }
1419 }
1420
1421out_locked:
1422 IN_MULTI_UNLOCK();
1423
1424 return (0);
1425}
1426
1427int
1428igmp_input(struct mbuf **mp, int *offp, int proto)
1429{
1430 int iphlen;
1431 struct ifnet *ifp;
1432 struct igmp *igmp;
1433 struct ip *ip;
1434 struct mbuf *m;
1435 int igmplen;
1436 int minlen;
1437 int queryver;
1438
1439 CTR3(KTR_IGMPV3, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1440
1441 m = *mp;
1442 ifp = m->m_pkthdr.rcvif;
1443 *mp = NULL;
1444
1445 IGMPSTAT_INC(igps_rcv_total);
1446
1447 ip = mtod(m, struct ip *);
1448 iphlen = *offp;
1449 igmplen = ntohs(ip->ip_len) - iphlen;
1450
1451 /*
1452 * Validate lengths.
1453 */
1454 if (igmplen < IGMP_MINLEN) {
1455 IGMPSTAT_INC(igps_rcv_tooshort);
1456 m_freem(m);
1457 return (IPPROTO_DONE);
1458 }
1459
1460 /*
1461 * Always pullup to the minimum size for v1/v2 or v3
1462 * to amortize calls to m_pullup().
1463 */
1464 minlen = iphlen;
1465 if (igmplen >= IGMP_V3_QUERY_MINLEN)
1466 minlen += IGMP_V3_QUERY_MINLEN;
1467 else
1468 minlen += IGMP_MINLEN;
1469 if ((m->m_flags & M_EXT || m->m_len < minlen) &&
1470 (m = m_pullup(m, minlen)) == 0) {
1471 IGMPSTAT_INC(igps_rcv_tooshort);
1472 return (IPPROTO_DONE);
1473 }
1474 ip = mtod(m, struct ip *);
1475
1476 /*
1477 * Validate checksum.
1478 */
1479 m->m_data += iphlen;
1480 m->m_len -= iphlen;
1481 igmp = mtod(m, struct igmp *);
1482 if (in_cksum(m, igmplen)) {
1483 IGMPSTAT_INC(igps_rcv_badsum);
1484 m_freem(m);
1485 return (IPPROTO_DONE);
1486 }
1487 m->m_data -= iphlen;
1488 m->m_len += iphlen;
1489
1490 /*
1491 * IGMP control traffic is link-scope, and must have a TTL of 1.
1492 * DVMRP traffic (e.g. mrinfo, mtrace) is an exception;
1493 * probe packets may come from beyond the LAN.
1494 */
1495 if (igmp->igmp_type != IGMP_DVMRP && ip->ip_ttl != 1) {
1496 IGMPSTAT_INC(igps_rcv_badttl);
1497 m_freem(m);
1498 return (IPPROTO_DONE);
1499 }
1500
1501 switch (igmp->igmp_type) {
1502 case IGMP_HOST_MEMBERSHIP_QUERY:
1503 if (igmplen == IGMP_MINLEN) {
1504 if (igmp->igmp_code == 0)
1505 queryver = IGMP_VERSION_1;
1506 else
1507 queryver = IGMP_VERSION_2;
1508 } else if (igmplen >= IGMP_V3_QUERY_MINLEN) {
1509 queryver = IGMP_VERSION_3;
1510 } else {
1511 IGMPSTAT_INC(igps_rcv_tooshort);
1512 m_freem(m);
1513 return (IPPROTO_DONE);
1514 }
1515
1516 switch (queryver) {
1517 case IGMP_VERSION_1:
1518 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1519 if (!V_igmp_v1enable)
1520 break;
1521 if (igmp_input_v1_query(ifp, ip, igmp) != 0) {
1522 m_freem(m);
1523 return (IPPROTO_DONE);
1524 }
1525 break;
1526
1527 case IGMP_VERSION_2:
1528 IGMPSTAT_INC(igps_rcv_v1v2_queries);
1529 if (!V_igmp_v2enable)
1530 break;
1531 if (igmp_input_v2_query(ifp, ip, igmp) != 0) {
1532 m_freem(m);
1533 return (IPPROTO_DONE);
1534 }
1535 break;
1536
1537 case IGMP_VERSION_3: {
1538 struct igmpv3 *igmpv3;
1539 uint16_t igmpv3len;
1540 uint16_t srclen;
1541 int nsrc;
1542
1543 IGMPSTAT_INC(igps_rcv_v3_queries);
1544 igmpv3 = (struct igmpv3 *)igmp;
1545 /*
1546 * Validate length based on source count.
1547 */
1548 nsrc = ntohs(igmpv3->igmp_numsrc);
1549 srclen = sizeof(struct in_addr) * nsrc;
1550 if (nsrc * sizeof(in_addr_t) > srclen) {
1551 IGMPSTAT_INC(igps_rcv_tooshort);
1552 return (IPPROTO_DONE);
1553 }
1554 /*
1555 * m_pullup() may modify m, so pullup in
1556 * this scope.
1557 */
1558 igmpv3len = iphlen + IGMP_V3_QUERY_MINLEN +
1559 srclen;
1560 if ((m->m_flags & M_EXT ||
1561 m->m_len < igmpv3len) &&
1562 (m = m_pullup(m, igmpv3len)) == NULL) {
1563 IGMPSTAT_INC(igps_rcv_tooshort);
1564 return (IPPROTO_DONE);
1565 }
1566 igmpv3 = (struct igmpv3 *)(mtod(m, uint8_t *)
1567 + iphlen);
1568 if (igmp_input_v3_query(ifp, ip, igmpv3) != 0) {
1569 m_freem(m);
1570 return (IPPROTO_DONE);
1571 }
1572 }
1573 break;
1574 }
1575 break;
1576
1577 case IGMP_v1_HOST_MEMBERSHIP_REPORT:
1578 if (!V_igmp_v1enable)
1579 break;
1580 if (igmp_input_v1_report(ifp, ip, igmp) != 0) {
1581 m_freem(m);
1582 return (IPPROTO_DONE);
1583 }
1584 break;
1585
1586 case IGMP_v2_HOST_MEMBERSHIP_REPORT:
1587 if (!V_igmp_v2enable)
1588 break;
1589 if (!ip_checkrouteralert(m))
1590 IGMPSTAT_INC(igps_rcv_nora);
1591 if (igmp_input_v2_report(ifp, ip, igmp) != 0) {
1592 m_freem(m);
1593 return (IPPROTO_DONE);
1594 }
1595 break;
1596
1597 case IGMP_v3_HOST_MEMBERSHIP_REPORT:
1598 /*
1599 * Hosts do not need to process IGMPv3 membership reports,
1600 * as report suppression is no longer required.
1601 */
1602 if (!ip_checkrouteralert(m))
1603 IGMPSTAT_INC(igps_rcv_nora);
1604 break;
1605
1606 default:
1607 break;
1608 }
1609
1610 /*
1611 * Pass all valid IGMP packets up to any process(es) listening on a
1612 * raw IGMP socket.
1613 */
1614 *mp = m;
1615 return (rip_input(mp, offp, proto));
1616}
1617
1618
1619/*
1620 * Fast timeout handler (global).
1621 * VIMAGE: Timeout handlers are expected to service all vimages.
1622 */
1623void
1624igmp_fasttimo(void)
1625{
1626 VNET_ITERATOR_DECL(vnet_iter);
1627
1628 VNET_LIST_RLOCK_NOSLEEP();
1629 VNET_FOREACH(vnet_iter) {
1630 CURVNET_SET(vnet_iter);
1631 igmp_fasttimo_vnet();
1632 CURVNET_RESTORE();
1633 }
1634 VNET_LIST_RUNLOCK_NOSLEEP();
1635}
1636
1637/*
1638 * Fast timeout handler (per-vnet).
1639 * Sends are shuffled off to a netisr to deal with Giant.
1640 *
1641 * VIMAGE: Assume caller has set up our curvnet.
1642 */
1643static void
1644igmp_fasttimo_vnet(void)
1645{
1646 struct ifqueue scq; /* State-change packets */
1647 struct ifqueue qrq; /* Query response packets */
1648 struct ifnet *ifp;
1649 struct igmp_ifinfo *igi;
1650 struct ifmultiaddr *ifma;
1651 struct in_multi *inm;
1652 int loop, uri_fasthz;
1653
1654 loop = 0;
1655 uri_fasthz = 0;
1656
1657 /*
1658 * Quick check to see if any work needs to be done, in order to
1659 * minimize the overhead of fasttimo processing.
1660 * SMPng: XXX Unlocked reads.
1661 */
1662 if (!V_current_state_timers_running &&
1663 !V_interface_timers_running &&
1664 !V_state_change_timers_running)
1665 return;
1666
1667 IN_MULTI_LOCK();
1668 IGMP_LOCK();
1669
1670 /*
1671 * IGMPv3 General Query response timer processing.
1672 */
1673 if (V_interface_timers_running) {
1674 CTR1(KTR_IGMPV3, "%s: interface timers running", __func__);
1675
1676 V_interface_timers_running = 0;
1677 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1678 if (igi->igi_v3_timer == 0) {
1679 /* Do nothing. */
1680 } else if (--igi->igi_v3_timer == 0) {
1681 igmp_v3_dispatch_general_query(igi);
1682 } else {
1683 V_interface_timers_running = 1;
1684 }
1685 }
1686 }
1687
1688 if (!V_current_state_timers_running &&
1689 !V_state_change_timers_running)
1690 goto out_locked;
1691
1692 V_current_state_timers_running = 0;
1693 V_state_change_timers_running = 0;
1694
1695 CTR1(KTR_IGMPV3, "%s: state change timers running", __func__);
1696
1697 /*
1698 * IGMPv1/v2/v3 host report and state-change timer processing.
1699 * Note: Processing a v3 group timer may remove a node.
1700 */
1701 LIST_FOREACH(igi, &V_igi_head, igi_link) {
1702 ifp = igi->igi_ifp;
1703
1704 if (igi->igi_version == IGMP_VERSION_3) {
1705 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
1706 uri_fasthz = IGMP_RANDOM_DELAY(igi->igi_uri *
1707 PR_FASTHZ);
1708
1709 memset(&qrq, 0, sizeof(struct ifqueue));
1710 IFQ_SET_MAXLEN(&qrq, IGMP_MAX_G_GS_PACKETS);
1711
1712 memset(&scq, 0, sizeof(struct ifqueue));
1713 IFQ_SET_MAXLEN(&scq, IGMP_MAX_STATE_CHANGE_PACKETS);
1714 }
1715
1716 IF_ADDR_RLOCK(ifp);
1717 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1718 if (ifma->ifma_addr->sa_family != AF_INET ||
1719 ifma->ifma_protospec == NULL)
1720 continue;
1721 inm = (struct in_multi *)ifma->ifma_protospec;
1722 switch (igi->igi_version) {
1723 case IGMP_VERSION_1:
1724 case IGMP_VERSION_2:
1725 igmp_v1v2_process_group_timer(inm,
1726 igi->igi_version);
1727 break;
1728 case IGMP_VERSION_3:
1729 igmp_v3_process_group_timers(igi, &qrq,
1730 &scq, inm, uri_fasthz);
1731 break;
1732 }
1733 }
1734 IF_ADDR_RUNLOCK(ifp);
1735
1736 if (igi->igi_version == IGMP_VERSION_3) {
1737 struct in_multi *tinm;
1738
1739 igmp_dispatch_queue(&qrq, 0, loop);
1740 igmp_dispatch_queue(&scq, 0, loop);
1741
1742 /*
1743 * Free the in_multi reference(s) for this
1744 * IGMP lifecycle.
1745 */
1746 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead,
1747 inm_nrele, tinm) {
1748 SLIST_REMOVE_HEAD(&igi->igi_relinmhead,
1749 inm_nrele);
1750 inm_release_locked(inm);
1751 }
1752 }
1753 }
1754
1755out_locked:
1756 IGMP_UNLOCK();
1757 IN_MULTI_UNLOCK();
1758}
1759
1760/*
1761 * Update host report group timer for IGMPv1/v2.
1762 * Will update the global pending timer flags.
1763 */
1764static void
1765igmp_v1v2_process_group_timer(struct in_multi *inm, const int version)
1766{
1767 int report_timer_expired;
1768
1769 IN_MULTI_LOCK_ASSERT();
1770 IGMP_LOCK_ASSERT();
1771
1772 if (inm->inm_timer == 0) {
1773 report_timer_expired = 0;
1774 } else if (--inm->inm_timer == 0) {
1775 report_timer_expired = 1;
1776 } else {
1777 V_current_state_timers_running = 1;
1778 return;
1779 }
1780
1781 switch (inm->inm_state) {
1782 case IGMP_NOT_MEMBER:
1783 case IGMP_SILENT_MEMBER:
1784 case IGMP_IDLE_MEMBER:
1785 case IGMP_LAZY_MEMBER:
1786 case IGMP_SLEEPING_MEMBER:
1787 case IGMP_AWAKENING_MEMBER:
1788 break;
1789 case IGMP_REPORTING_MEMBER:
1790 if (report_timer_expired) {
1791 inm->inm_state = IGMP_IDLE_MEMBER;
1792 (void)igmp_v1v2_queue_report(inm,
1793 (version == IGMP_VERSION_2) ?
1794 IGMP_v2_HOST_MEMBERSHIP_REPORT :
1795 IGMP_v1_HOST_MEMBERSHIP_REPORT);
1796 }
1797 break;
1798 case IGMP_G_QUERY_PENDING_MEMBER:
1799 case IGMP_SG_QUERY_PENDING_MEMBER:
1800 case IGMP_LEAVING_MEMBER:
1801 break;
1802 }
1803}
1804
1805/*
1806 * Update a group's timers for IGMPv3.
1807 * Will update the global pending timer flags.
1808 * Note: Unlocked read from igi.
1809 */
1810static void
1811igmp_v3_process_group_timers(struct igmp_ifinfo *igi,
1812 struct ifqueue *qrq, struct ifqueue *scq,
1813 struct in_multi *inm, const int uri_fasthz)
1814{
1815 int query_response_timer_expired;
1816 int state_change_retransmit_timer_expired;
1817
1818 IN_MULTI_LOCK_ASSERT();
1819 IGMP_LOCK_ASSERT();
1820
1821 query_response_timer_expired = 0;
1822 state_change_retransmit_timer_expired = 0;
1823
1824 /*
1825 * During a transition from v1/v2 compatibility mode back to v3,
1826 * a group record in REPORTING state may still have its group
1827 * timer active. This is a no-op in this function; it is easier
1828 * to deal with it here than to complicate the slow-timeout path.
1829 */
1830 if (inm->inm_timer == 0) {
1831 query_response_timer_expired = 0;
1832 } else if (--inm->inm_timer == 0) {
1833 query_response_timer_expired = 1;
1834 } else {
1835 V_current_state_timers_running = 1;
1836 }
1837
1838 if (inm->inm_sctimer == 0) {
1839 state_change_retransmit_timer_expired = 0;
1840 } else if (--inm->inm_sctimer == 0) {
1841 state_change_retransmit_timer_expired = 1;
1842 } else {
1843 V_state_change_timers_running = 1;
1844 }
1845
1846 /* We are in fasttimo, so be quick about it. */
1847 if (!state_change_retransmit_timer_expired &&
1848 !query_response_timer_expired)
1849 return;
1850
1851 switch (inm->inm_state) {
1852 case IGMP_NOT_MEMBER:
1853 case IGMP_SILENT_MEMBER:
1854 case IGMP_SLEEPING_MEMBER:
1855 case IGMP_LAZY_MEMBER:
1856 case IGMP_AWAKENING_MEMBER:
1857 case IGMP_IDLE_MEMBER:
1858 break;
1859 case IGMP_G_QUERY_PENDING_MEMBER:
1860 case IGMP_SG_QUERY_PENDING_MEMBER:
1861 /*
1862 * Respond to a previously pending Group-Specific
1863 * or Group-and-Source-Specific query by enqueueing
1864 * the appropriate Current-State report for
1865 * immediate transmission.
1866 */
1867 if (query_response_timer_expired) {
1868 int retval;
1869
1870 retval = igmp_v3_enqueue_group_record(qrq, inm, 0, 1,
1871 (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER));
1872 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
1873 __func__, retval);
1874 inm->inm_state = IGMP_REPORTING_MEMBER;
1875 /* XXX Clear recorded sources for next time. */
1876 inm_clear_recorded(inm);
1877 }
1878 /* FALLTHROUGH */
1879 case IGMP_REPORTING_MEMBER:
1880 case IGMP_LEAVING_MEMBER:
1881 if (state_change_retransmit_timer_expired) {
1882 /*
1883 * State-change retransmission timer fired.
1884 * If there are any further pending retransmissions,
1885 * set the global pending state-change flag, and
1886 * reset the timer.
1887 */
1888 if (--inm->inm_scrv > 0) {
1889 inm->inm_sctimer = uri_fasthz;
1890 V_state_change_timers_running = 1;
1891 }
1892 /*
1893 * Retransmit the previously computed state-change
1894 * report. If there are no further pending
1895 * retransmissions, the mbuf queue will be consumed.
1896 * Update T0 state to T1 as we have now sent
1897 * a state-change.
1898 */
1899 (void)igmp_v3_merge_state_changes(inm, scq);
1900
1901 inm_commit(inm);
1902 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
1903 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
1904
1905 /*
1906 * If we are leaving the group for good, make sure
1907 * we release IGMP's reference to it.
1908 * This release must be deferred using a SLIST,
1909 * as we are called from a loop which traverses
1910 * the in_ifmultiaddr TAILQ.
1911 */
1912 if (inm->inm_state == IGMP_LEAVING_MEMBER &&
1913 inm->inm_scrv == 0) {
1914 inm->inm_state = IGMP_NOT_MEMBER;
1915 SLIST_INSERT_HEAD(&igi->igi_relinmhead,
1916 inm, inm_nrele);
1917 }
1918 }
1919 break;
1920 }
1921}
1922
1923
1924/*
1925 * Suppress a group's pending response to a group or source/group query.
1926 *
1927 * Do NOT suppress state changes. This leads to IGMPv3 inconsistency.
1928 * Do NOT update ST1/ST0 as this operation merely suppresses
1929 * the currently pending group record.
1930 * Do NOT suppress the response to a general query. It is possible but
1931 * it would require adding another state or flag.
1932 */
1933static void
1934igmp_v3_suppress_group_record(struct in_multi *inm)
1935{
1936
1937 IN_MULTI_LOCK_ASSERT();
1938
1939 KASSERT(inm->inm_igi->igi_version == IGMP_VERSION_3,
1940 ("%s: not IGMPv3 mode on link", __func__));
1941
1942 if (inm->inm_state != IGMP_G_QUERY_PENDING_MEMBER ||
1943 inm->inm_state != IGMP_SG_QUERY_PENDING_MEMBER)
1944 return;
1945
1946 if (inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
1947 inm_clear_recorded(inm);
1948
1949 inm->inm_timer = 0;
1950 inm->inm_state = IGMP_REPORTING_MEMBER;
1951}
1952
1953/*
1954 * Switch to a different IGMP version on the given interface,
1955 * as per Section 7.2.1.
1956 */
1957static void
1958igmp_set_version(struct igmp_ifinfo *igi, const int version)
1959{
1960 int old_version_timer;
1961
1962 IGMP_LOCK_ASSERT();
1963
1964 CTR4(KTR_IGMPV3, "%s: switching to v%d on ifp %p(%s)", __func__,
1965 version, igi->igi_ifp, igi->igi_ifp->if_xname);
1966
1967 if (version == IGMP_VERSION_1 || version == IGMP_VERSION_2) {
1968 /*
1969 * Compute the "Older Version Querier Present" timer as per
1970 * Section 8.12.
1971 */
1972 old_version_timer = igi->igi_rv * igi->igi_qi + igi->igi_qri;
1973 old_version_timer *= PR_SLOWHZ;
1974
1975 if (version == IGMP_VERSION_1) {
1976 igi->igi_v1_timer = old_version_timer;
1977 igi->igi_v2_timer = 0;
1978 } else if (version == IGMP_VERSION_2) {
1979 igi->igi_v1_timer = 0;
1980 igi->igi_v2_timer = old_version_timer;
1981 }
1982 }
1983
1984 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
1985 if (igi->igi_version != IGMP_VERSION_2) {
1986 igi->igi_version = IGMP_VERSION_2;
1987 igmp_v3_cancel_link_timers(igi);
1988 }
1989 } else if (igi->igi_v1_timer > 0) {
1990 if (igi->igi_version != IGMP_VERSION_1) {
1991 igi->igi_version = IGMP_VERSION_1;
1992 igmp_v3_cancel_link_timers(igi);
1993 }
1994 }
1995}
1996
1997/*
1998 * Cancel pending IGMPv3 timers for the given link and all groups
1999 * joined on it; state-change, general-query, and group-query timers.
2000 *
2001 * Only ever called on a transition from v3 to Compatibility mode. Kill
2002 * the timers stone dead (this may be expensive for large N groups), they
2003 * will be restarted if Compatibility Mode deems that they must be due to
2004 * query processing.
2005 */
2006static void
2007igmp_v3_cancel_link_timers(struct igmp_ifinfo *igi)
2008{
2009 struct ifmultiaddr *ifma;
2010 struct ifnet *ifp;
2011 struct in_multi *inm, *tinm;
2012
2013 CTR3(KTR_IGMPV3, "%s: cancel v3 timers on ifp %p(%s)", __func__,
2014 igi->igi_ifp, igi->igi_ifp->if_xname);
2015
2016 IN_MULTI_LOCK_ASSERT();
2017 IGMP_LOCK_ASSERT();
2018
2019 /*
2020 * Stop the v3 General Query Response on this link stone dead.
2021 * If fasttimo is woken up due to V_interface_timers_running,
2022 * the flag will be cleared if there are no pending link timers.
2023 */
2024 igi->igi_v3_timer = 0;
2025
2026 /*
2027 * Now clear the current-state and state-change report timers
2028 * for all memberships scoped to this link.
2029 */
2030 ifp = igi->igi_ifp;
2031 IF_ADDR_RLOCK(ifp);
2032 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2033 if (ifma->ifma_addr->sa_family != AF_INET ||
2034 ifma->ifma_protospec == NULL)
2035 continue;
2036 inm = (struct in_multi *)ifma->ifma_protospec;
2037 switch (inm->inm_state) {
2038 case IGMP_NOT_MEMBER:
2039 case IGMP_SILENT_MEMBER:
2040 case IGMP_IDLE_MEMBER:
2041 case IGMP_LAZY_MEMBER:
2042 case IGMP_SLEEPING_MEMBER:
2043 case IGMP_AWAKENING_MEMBER:
2044 /*
2045 * These states are either not relevant in v3 mode,
2046 * or are unreported. Do nothing.
2047 */
2048 break;
2049 case IGMP_LEAVING_MEMBER:
2050 /*
2051 * If we are leaving the group and switching to
2052 * compatibility mode, we need to release the final
2053 * reference held for issuing the INCLUDE {}, and
2054 * transition to REPORTING to ensure the host leave
2055 * message is sent upstream to the old querier --
2056 * transition to NOT would lose the leave and race.
2057 */
2058 SLIST_INSERT_HEAD(&igi->igi_relinmhead, inm, inm_nrele);
2059 /* FALLTHROUGH */
2060 case IGMP_G_QUERY_PENDING_MEMBER:
2061 case IGMP_SG_QUERY_PENDING_MEMBER:
2062 inm_clear_recorded(inm);
2063 /* FALLTHROUGH */
2064 case IGMP_REPORTING_MEMBER:
2065 inm->inm_state = IGMP_REPORTING_MEMBER;
2066 break;
2067 }
2068 /*
2069 * Always clear state-change and group report timers.
2070 * Free any pending IGMPv3 state-change records.
2071 */
2072 inm->inm_sctimer = 0;
2073 inm->inm_timer = 0;
2074 _IF_DRAIN(&inm->inm_scq);
2075 }
2076 IF_ADDR_RUNLOCK(ifp);
2077 SLIST_FOREACH_SAFE(inm, &igi->igi_relinmhead, inm_nrele, tinm) {
2078 SLIST_REMOVE_HEAD(&igi->igi_relinmhead, inm_nrele);
2079 inm_release_locked(inm);
2080 }
2081}
2082
2083/*
2084 * Update the Older Version Querier Present timers for a link.
2085 * See Section 7.2.1 of RFC 3376.
2086 */
2087static void
2088igmp_v1v2_process_querier_timers(struct igmp_ifinfo *igi)
2089{
2090
2091 IGMP_LOCK_ASSERT();
2092
2093 if (igi->igi_v1_timer == 0 && igi->igi_v2_timer == 0) {
2094 /*
2095 * IGMPv1 and IGMPv2 Querier Present timers expired.
2096 *
2097 * Revert to IGMPv3.
2098 */
2099 if (igi->igi_version != IGMP_VERSION_3) {
2100 CTR5(KTR_IGMPV3,
2101 "%s: transition from v%d -> v%d on %p(%s)",
2102 __func__, igi->igi_version, IGMP_VERSION_3,
2103 igi->igi_ifp, igi->igi_ifp->if_xname);
2104 igi->igi_version = IGMP_VERSION_3;
2105 }
2106 } else if (igi->igi_v1_timer == 0 && igi->igi_v2_timer > 0) {
2107 /*
2108 * IGMPv1 Querier Present timer expired,
2109 * IGMPv2 Querier Present timer running.
2110 * If IGMPv2 was disabled since last timeout,
2111 * revert to IGMPv3.
2112 * If IGMPv2 is enabled, revert to IGMPv2.
2113 */
2114 if (!V_igmp_v2enable) {
2115 CTR5(KTR_IGMPV3,
2116 "%s: transition from v%d -> v%d on %p(%s)",
2117 __func__, igi->igi_version, IGMP_VERSION_3,
2118 igi->igi_ifp, igi->igi_ifp->if_xname);
2119 igi->igi_v2_timer = 0;
2120 igi->igi_version = IGMP_VERSION_3;
2121 } else {
2122 --igi->igi_v2_timer;
2123 if (igi->igi_version != IGMP_VERSION_2) {
2124 CTR5(KTR_IGMPV3,
2125 "%s: transition from v%d -> v%d on %p(%s)",
2126 __func__, igi->igi_version, IGMP_VERSION_2,
2127 igi->igi_ifp, igi->igi_ifp->if_xname);
2128 igi->igi_version = IGMP_VERSION_2;
2129 igmp_v3_cancel_link_timers(igi);
2130 }
2131 }
2132 } else if (igi->igi_v1_timer > 0) {
2133 /*
2134 * IGMPv1 Querier Present timer running.
2135 * Stop IGMPv2 timer if running.
2136 *
2137 * If IGMPv1 was disabled since last timeout,
2138 * revert to IGMPv3.
2139 * If IGMPv1 is enabled, reset IGMPv2 timer if running.
2140 */
2141 if (!V_igmp_v1enable) {
2142 CTR5(KTR_IGMPV3,
2143 "%s: transition from v%d -> v%d on %p(%s)",
2144 __func__, igi->igi_version, IGMP_VERSION_3,
2145 igi->igi_ifp, igi->igi_ifp->if_xname);
2146 igi->igi_v1_timer = 0;
2147 igi->igi_version = IGMP_VERSION_3;
2148 } else {
2149 --igi->igi_v1_timer;
2150 }
2151 if (igi->igi_v2_timer > 0) {
2152 CTR3(KTR_IGMPV3,
2153 "%s: cancel v2 timer on %p(%s)",
2154 __func__, igi->igi_ifp, igi->igi_ifp->if_xname);
2155 igi->igi_v2_timer = 0;
2156 }
2157 }
2158}
2159
2160/*
2161 * Global slowtimo handler.
2162 * VIMAGE: Timeout handlers are expected to service all vimages.
2163 */
2164void
2165igmp_slowtimo(void)
2166{
2167 VNET_ITERATOR_DECL(vnet_iter);
2168
2169 VNET_LIST_RLOCK_NOSLEEP();
2170 VNET_FOREACH(vnet_iter) {
2171 CURVNET_SET(vnet_iter);
2172 igmp_slowtimo_vnet();
2173 CURVNET_RESTORE();
2174 }
2175 VNET_LIST_RUNLOCK_NOSLEEP();
2176}
2177
2178/*
2179 * Per-vnet slowtimo handler.
2180 */
2181static void
2182igmp_slowtimo_vnet(void)
2183{
2184 struct igmp_ifinfo *igi;
2185
2186 IGMP_LOCK();
2187
2188 LIST_FOREACH(igi, &V_igi_head, igi_link) {
2189 igmp_v1v2_process_querier_timers(igi);
2190 }
2191
2192 IGMP_UNLOCK();
2193}
2194
2195/*
2196 * Dispatch an IGMPv1/v2 host report or leave message.
2197 * These are always small enough to fit inside a single mbuf.
2198 */
2199static int
2200igmp_v1v2_queue_report(struct in_multi *inm, const int type)
2201{
2202 struct ifnet *ifp;
2203 struct igmp *igmp;
2204 struct ip *ip;
2205 struct mbuf *m;
2206
2207 IN_MULTI_LOCK_ASSERT();
2208 IGMP_LOCK_ASSERT();
2209
2210 ifp = inm->inm_ifp;
2211
2212 m = m_gethdr(M_NOWAIT, MT_DATA);
2213 if (m == NULL)
2214 return (ENOMEM);
2215 MH_ALIGN(m, sizeof(struct ip) + sizeof(struct igmp));
2216
2217 m->m_pkthdr.len = sizeof(struct ip) + sizeof(struct igmp);
2218
2219 m->m_data += sizeof(struct ip);
2220 m->m_len = sizeof(struct igmp);
2221
2222 igmp = mtod(m, struct igmp *);
2223 igmp->igmp_type = type;
2224 igmp->igmp_code = 0;
2225 igmp->igmp_group = inm->inm_addr;
2226 igmp->igmp_cksum = 0;
2227 igmp->igmp_cksum = in_cksum(m, sizeof(struct igmp));
2228
2229 m->m_data -= sizeof(struct ip);
2230 m->m_len += sizeof(struct ip);
2231
2232 ip = mtod(m, struct ip *);
2233 ip->ip_tos = 0;
2234 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct igmp));
2235 ip->ip_off = 0;
2236 ip->ip_p = IPPROTO_IGMP;
2237 ip->ip_src.s_addr = INADDR_ANY;
2238
2239 if (type == IGMP_HOST_LEAVE_MESSAGE)
2240 ip->ip_dst.s_addr = htonl(INADDR_ALLRTRS_GROUP);
2241 else
2242 ip->ip_dst = inm->inm_addr;
2243
2244 igmp_save_context(m, ifp);
2245
2246 m->m_flags |= M_IGMPV2;
2247 if (inm->inm_igi->igi_flags & IGIF_LOOPBACK)
2248 m->m_flags |= M_IGMP_LOOP;
2249
2250 CTR2(KTR_IGMPV3, "%s: netisr_dispatch(NETISR_IGMP, %p)", __func__, m);
2251 netisr_dispatch(NETISR_IGMP, m);
2252
2253 return (0);
2254}
2255
2256/*
2257 * Process a state change from the upper layer for the given IPv4 group.
2258 *
2259 * Each socket holds a reference on the in_multi in its own ip_moptions.
2260 * The socket layer will have made the necessary updates to.the group
2261 * state, it is now up to IGMP to issue a state change report if there
2262 * has been any change between T0 (when the last state-change was issued)
2263 * and T1 (now).
2264 *
2265 * We use the IGMPv3 state machine at group level. The IGMP module
2266 * however makes the decision as to which IGMP protocol version to speak.
2267 * A state change *from* INCLUDE {} always means an initial join.
2268 * A state change *to* INCLUDE {} always means a final leave.
2269 *
2270 * FUTURE: If IGIF_V3LITE is enabled for this interface, then we can
2271 * save ourselves a bunch of work; any exclusive mode groups need not
2272 * compute source filter lists.
2273 *
2274 * VIMAGE: curvnet should have been set by caller, as this routine
2275 * is called from the socket option handlers.
2276 */
2277int
2278igmp_change_state(struct in_multi *inm)
2279{
2280 struct igmp_ifinfo *igi;
2281 struct ifnet *ifp;
2282 int error;
2283
2284 IN_MULTI_LOCK_ASSERT();
2285
2286 error = 0;
2287
2288 /*
2289 * Try to detect if the upper layer just asked us to change state
2290 * for an interface which has now gone away.
2291 */
2292 KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__));
2293 ifp = inm->inm_ifma->ifma_ifp;
2294 /*
2295 * Sanity check that netinet's notion of ifp is the
2296 * same as net's.
2297 */
2298 KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__));
2299
2300 IGMP_LOCK();
2301
2302 igi = ((struct in_ifinfo *)ifp->if_afdata[AF_INET])->ii_igmp;
2303 KASSERT(igi != NULL, ("%s: no igmp_ifinfo for ifp %p", __func__, ifp));
2304
2305 /*
2306 * If we detect a state transition to or from MCAST_UNDEFINED
2307 * for this group, then we are starting or finishing an IGMP
2308 * life cycle for this group.
2309 */
2310 if (inm->inm_st[1].iss_fmode != inm->inm_st[0].iss_fmode) {
2311 CTR3(KTR_IGMPV3, "%s: inm transition %d -> %d", __func__,
2312 inm->inm_st[0].iss_fmode, inm->inm_st[1].iss_fmode);
2313 if (inm->inm_st[0].iss_fmode == MCAST_UNDEFINED) {
2314 CTR1(KTR_IGMPV3, "%s: initial join", __func__);
2315 error = igmp_initial_join(inm, igi);
2316 goto out_locked;
2317 } else if (inm->inm_st[1].iss_fmode == MCAST_UNDEFINED) {
2318 CTR1(KTR_IGMPV3, "%s: final leave", __func__);
2319 igmp_final_leave(inm, igi);
2320 goto out_locked;
2321 }
2322 } else {
2323 CTR1(KTR_IGMPV3, "%s: filter set change", __func__);
2324 }
2325
2326 error = igmp_handle_state_change(inm, igi);
2327
2328out_locked:
2329 IGMP_UNLOCK();
2330 return (error);
2331}
2332
2333/*
2334 * Perform the initial join for an IGMP group.
2335 *
2336 * When joining a group:
2337 * If the group should have its IGMP traffic suppressed, do nothing.
2338 * IGMPv1 starts sending IGMPv1 host membership reports.
2339 * IGMPv2 starts sending IGMPv2 host membership reports.
2340 * IGMPv3 will schedule an IGMPv3 state-change report containing the
2341 * initial state of the membership.
2342 */
2343static int
2344igmp_initial_join(struct in_multi *inm, struct igmp_ifinfo *igi)
2345{
2346 struct ifnet *ifp;
2347 struct ifqueue *ifq;
2348 int error, retval, syncstates;
2349
2350 CTR4(KTR_IGMPV3, "%s: initial join %s on ifp %p(%s)",
2351 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2352 inm->inm_ifp->if_xname);
2353
2354 error = 0;
2355 syncstates = 1;
2356
2357 ifp = inm->inm_ifp;
2358
2359 IN_MULTI_LOCK_ASSERT();
2360 IGMP_LOCK_ASSERT();
2361
2362 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2363
2364 /*
2365 * Groups joined on loopback or marked as 'not reported',
2366 * e.g. 224.0.0.1, enter the IGMP_SILENT_MEMBER state and
2367 * are never reported in any IGMP protocol exchanges.
2368 * All other groups enter the appropriate IGMP state machine
2369 * for the version in use on this link.
2370 * A link marked as IGIF_SILENT causes IGMP to be completely
2371 * disabled for the link.
2372 */
2373 if ((ifp->if_flags & IFF_LOOPBACK) ||
2374 (igi->igi_flags & IGIF_SILENT) ||
2375 !igmp_isgroupreported(inm->inm_addr)) {
2376 CTR1(KTR_IGMPV3,
2377"%s: not kicking state machine for silent group", __func__);
2378 inm->inm_state = IGMP_SILENT_MEMBER;
2379 inm->inm_timer = 0;
2380 } else {
2381 /*
2382 * Deal with overlapping in_multi lifecycle.
2383 * If this group was LEAVING, then make sure
2384 * we drop the reference we picked up to keep the
2385 * group around for the final INCLUDE {} enqueue.
2386 */
2387 if (igi->igi_version == IGMP_VERSION_3 &&
2388 inm->inm_state == IGMP_LEAVING_MEMBER)
2389 inm_release_locked(inm);
2390
2391 inm->inm_state = IGMP_REPORTING_MEMBER;
2392
2393 switch (igi->igi_version) {
2394 case IGMP_VERSION_1:
2395 case IGMP_VERSION_2:
2396 inm->inm_state = IGMP_IDLE_MEMBER;
2397 error = igmp_v1v2_queue_report(inm,
2398 (igi->igi_version == IGMP_VERSION_2) ?
2399 IGMP_v2_HOST_MEMBERSHIP_REPORT :
2400 IGMP_v1_HOST_MEMBERSHIP_REPORT);
2401 if (error == 0) {
2402 inm->inm_timer = IGMP_RANDOM_DELAY(
2403 IGMP_V1V2_MAX_RI * PR_FASTHZ);
2404 V_current_state_timers_running = 1;
2405 }
2406 break;
2407
2408 case IGMP_VERSION_3:
2409 /*
2410 * Defer update of T0 to T1, until the first copy
2411 * of the state change has been transmitted.
2412 */
2413 syncstates = 0;
2414
2415 /*
2416 * Immediately enqueue a State-Change Report for
2417 * this interface, freeing any previous reports.
2418 * Don't kick the timers if there is nothing to do,
2419 * or if an error occurred.
2420 */
2421 ifq = &inm->inm_scq;
2422 _IF_DRAIN(ifq);
2423 retval = igmp_v3_enqueue_group_record(ifq, inm, 1,
2424 0, 0);
2425 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
2426 __func__, retval);
2427 if (retval <= 0) {
2428 error = retval * -1;
2429 break;
2430 }
2431
2432 /*
2433 * Schedule transmission of pending state-change
2434 * report up to RV times for this link. The timer
2435 * will fire at the next igmp_fasttimo (~200ms),
2436 * giving us an opportunity to merge the reports.
2437 */
2438 if (igi->igi_flags & IGIF_LOOPBACK) {
2439 inm->inm_scrv = 1;
2440 } else {
2441 KASSERT(igi->igi_rv > 1,
2442 ("%s: invalid robustness %d", __func__,
2443 igi->igi_rv));
2444 inm->inm_scrv = igi->igi_rv;
2445 }
2446 inm->inm_sctimer = 1;
2447 V_state_change_timers_running = 1;
2448
2449 error = 0;
2450 break;
2451 }
2452 }
2453
2454 /*
2455 * Only update the T0 state if state change is atomic,
2456 * i.e. we don't need to wait for a timer to fire before we
2457 * can consider the state change to have been communicated.
2458 */
2459 if (syncstates) {
2460 inm_commit(inm);
2461 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2462 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2463 }
2464
2465 return (error);
2466}
2467
2468/*
2469 * Issue an intermediate state change during the IGMP life-cycle.
2470 */
2471static int
2472igmp_handle_state_change(struct in_multi *inm, struct igmp_ifinfo *igi)
2473{
2474 struct ifnet *ifp;
2475 int retval;
2476
2477 CTR4(KTR_IGMPV3, "%s: state change for %s on ifp %p(%s)",
2478 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2479 inm->inm_ifp->if_xname);
2480
2481 ifp = inm->inm_ifp;
2482
2483 IN_MULTI_LOCK_ASSERT();
2484 IGMP_LOCK_ASSERT();
2485
2486 KASSERT(igi && igi->igi_ifp == ifp, ("%s: inconsistent ifp", __func__));
2487
2488 if ((ifp->if_flags & IFF_LOOPBACK) ||
2489 (igi->igi_flags & IGIF_SILENT) ||
2490 !igmp_isgroupreported(inm->inm_addr) ||
2491 (igi->igi_version != IGMP_VERSION_3)) {
2492 if (!igmp_isgroupreported(inm->inm_addr)) {
2493 CTR1(KTR_IGMPV3,
2494"%s: not kicking state machine for silent group", __func__);
2495 }
2496 CTR1(KTR_IGMPV3, "%s: nothing to do", __func__);
2497 inm_commit(inm);
2498 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2499 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2500 return (0);
2501 }
2502
2503 _IF_DRAIN(&inm->inm_scq);
2504
2505 retval = igmp_v3_enqueue_group_record(&inm->inm_scq, inm, 1, 0, 0);
2506 CTR2(KTR_IGMPV3, "%s: enqueue record = %d", __func__, retval);
2507 if (retval <= 0)
2508 return (-retval);
2509
2510 /*
2511 * If record(s) were enqueued, start the state-change
2512 * report timer for this group.
2513 */
2514 inm->inm_scrv = ((igi->igi_flags & IGIF_LOOPBACK) ? 1 : igi->igi_rv);
2515 inm->inm_sctimer = 1;
2516 V_state_change_timers_running = 1;
2517
2518 return (0);
2519}
2520
2521/*
2522 * Perform the final leave for an IGMP group.
2523 *
2524 * When leaving a group:
2525 * IGMPv1 does nothing.
2526 * IGMPv2 sends a host leave message, if and only if we are the reporter.
2527 * IGMPv3 enqueues a state-change report containing a transition
2528 * to INCLUDE {} for immediate transmission.
2529 */
2530static void
2531igmp_final_leave(struct in_multi *inm, struct igmp_ifinfo *igi)
2532{
2533 int syncstates;
2534
2535 syncstates = 1;
2536
2537 CTR4(KTR_IGMPV3, "%s: final leave %s on ifp %p(%s)",
2538 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp,
2539 inm->inm_ifp->if_xname);
2540
2541 IN_MULTI_LOCK_ASSERT();
2542 IGMP_LOCK_ASSERT();
2543
2544 switch (inm->inm_state) {
2545 case IGMP_NOT_MEMBER:
2546 case IGMP_SILENT_MEMBER:
2547 case IGMP_LEAVING_MEMBER:
2548 /* Already leaving or left; do nothing. */
2549 CTR1(KTR_IGMPV3,
2550"%s: not kicking state machine for silent group", __func__);
2551 break;
2552 case IGMP_REPORTING_MEMBER:
2553 case IGMP_IDLE_MEMBER:
2554 case IGMP_G_QUERY_PENDING_MEMBER:
2555 case IGMP_SG_QUERY_PENDING_MEMBER:
2556 if (igi->igi_version == IGMP_VERSION_2) {
2557#ifdef INVARIANTS
2558 if (inm->inm_state == IGMP_G_QUERY_PENDING_MEMBER ||
2559 inm->inm_state == IGMP_SG_QUERY_PENDING_MEMBER)
2560 panic("%s: IGMPv3 state reached, not IGMPv3 mode",
2561 __func__);
2562#endif
2563 igmp_v1v2_queue_report(inm, IGMP_HOST_LEAVE_MESSAGE);
2564 inm->inm_state = IGMP_NOT_MEMBER;
2565 } else if (igi->igi_version == IGMP_VERSION_3) {
2566 /*
2567 * Stop group timer and all pending reports.
2568 * Immediately enqueue a state-change report
2569 * TO_IN {} to be sent on the next fast timeout,
2570 * giving us an opportunity to merge reports.
2571 */
2572 _IF_DRAIN(&inm->inm_scq);
2573 inm->inm_timer = 0;
2574 if (igi->igi_flags & IGIF_LOOPBACK) {
2575 inm->inm_scrv = 1;
2576 } else {
2577 inm->inm_scrv = igi->igi_rv;
2578 }
2579 CTR4(KTR_IGMPV3, "%s: Leaving %s/%s with %d "
2580 "pending retransmissions.", __func__,
2581 inet_ntoa(inm->inm_addr),
2582 inm->inm_ifp->if_xname, inm->inm_scrv);
2583 if (inm->inm_scrv == 0) {
2584 inm->inm_state = IGMP_NOT_MEMBER;
2585 inm->inm_sctimer = 0;
2586 } else {
2587 int retval;
2588
2589 inm_acquire_locked(inm);
2590
2591 retval = igmp_v3_enqueue_group_record(
2592 &inm->inm_scq, inm, 1, 0, 0);
2593 KASSERT(retval != 0,
2594 ("%s: enqueue record = %d", __func__,
2595 retval));
2596
2597 inm->inm_state = IGMP_LEAVING_MEMBER;
2598 inm->inm_sctimer = 1;
2599 V_state_change_timers_running = 1;
2600 syncstates = 0;
2601 }
2602 break;
2603 }
2604 break;
2605 case IGMP_LAZY_MEMBER:
2606 case IGMP_SLEEPING_MEMBER:
2607 case IGMP_AWAKENING_MEMBER:
2608 /* Our reports are suppressed; do nothing. */
2609 break;
2610 }
2611
2612 if (syncstates) {
2613 inm_commit(inm);
2614 CTR3(KTR_IGMPV3, "%s: T1 -> T0 for %s/%s", __func__,
2615 inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2616 inm->inm_st[1].iss_fmode = MCAST_UNDEFINED;
2617 CTR3(KTR_IGMPV3, "%s: T1 now MCAST_UNDEFINED for %s/%s",
2618 __func__, inet_ntoa(inm->inm_addr), inm->inm_ifp->if_xname);
2619 }
2620}
2621
2622/*
2623 * Enqueue an IGMPv3 group record to the given output queue.
2624 *
2625 * XXX This function could do with having the allocation code
2626 * split out, and the multiple-tree-walks coalesced into a single
2627 * routine as has been done in igmp_v3_enqueue_filter_change().
2628 *
2629 * If is_state_change is zero, a current-state record is appended.
2630 * If is_state_change is non-zero, a state-change report is appended.
2631 *
2632 * If is_group_query is non-zero, an mbuf packet chain is allocated.
2633 * If is_group_query is zero, and if there is a packet with free space
2634 * at the tail of the queue, it will be appended to providing there
2635 * is enough free space.
2636 * Otherwise a new mbuf packet chain is allocated.
2637 *
2638 * If is_source_query is non-zero, each source is checked to see if
2639 * it was recorded for a Group-Source query, and will be omitted if
2640 * it is not both in-mode and recorded.
2641 *
2642 * The function will attempt to allocate leading space in the packet
2643 * for the IP/IGMP header to be prepended without fragmenting the chain.
2644 *
2645 * If successful the size of all data appended to the queue is returned,
2646 * otherwise an error code less than zero is returned, or zero if
2647 * no record(s) were appended.
2648 */
2649static int
2650igmp_v3_enqueue_group_record(struct ifqueue *ifq, struct in_multi *inm,
2651 const int is_state_change, const int is_group_query,
2652 const int is_source_query)
2653{
2654 struct igmp_grouprec ig;
2655 struct igmp_grouprec *pig;
2656 struct ifnet *ifp;
2657 struct ip_msource *ims, *nims;
2658 struct mbuf *m0, *m, *md;
2659 int error, is_filter_list_change;
2660 int minrec0len, m0srcs, msrcs, nbytes, off;
2661 int record_has_sources;
2662 int now;
2663 int type;
2664 in_addr_t naddr;
2665 uint8_t mode;
2666
2667 IN_MULTI_LOCK_ASSERT();
2668
2669 error = 0;
2670 ifp = inm->inm_ifp;
2671 is_filter_list_change = 0;
2672 m = NULL;
2673 m0 = NULL;
2674 m0srcs = 0;
2675 msrcs = 0;
2676 nbytes = 0;
2677 nims = NULL;
2678 record_has_sources = 1;
2679 pig = NULL;
2680 type = IGMP_DO_NOTHING;
2681 mode = inm->inm_st[1].iss_fmode;
2682
2683 /*
2684 * If we did not transition out of ASM mode during t0->t1,
2685 * and there are no source nodes to process, we can skip
2686 * the generation of source records.
2687 */
2688 if (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0 &&
2689 inm->inm_nsrc == 0)
2690 record_has_sources = 0;
2691
2692 if (is_state_change) {
2693 /*
2694 * Queue a state change record.
2695 * If the mode did not change, and there are non-ASM
2696 * listeners or source filters present,
2697 * we potentially need to issue two records for the group.
2698 * If we are transitioning to MCAST_UNDEFINED, we need
2699 * not send any sources.
2700 * If there are ASM listeners, and there was no filter
2701 * mode transition of any kind, do nothing.
2702 */
2703 if (mode != inm->inm_st[0].iss_fmode) {
2704 if (mode == MCAST_EXCLUDE) {
2705 CTR1(KTR_IGMPV3, "%s: change to EXCLUDE",
2706 __func__);
2707 type = IGMP_CHANGE_TO_EXCLUDE_MODE;
2708 } else {
2709 CTR1(KTR_IGMPV3, "%s: change to INCLUDE",
2710 __func__);
2711 type = IGMP_CHANGE_TO_INCLUDE_MODE;
2712 if (mode == MCAST_UNDEFINED)
2713 record_has_sources = 0;
2714 }
2715 } else {
2716 if (record_has_sources) {
2717 is_filter_list_change = 1;
2718 } else {
2719 type = IGMP_DO_NOTHING;
2720 }
2721 }
2722 } else {
2723 /*
2724 * Queue a current state record.
2725 */
2726 if (mode == MCAST_EXCLUDE) {
2727 type = IGMP_MODE_IS_EXCLUDE;
2728 } else if (mode == MCAST_INCLUDE) {
2729 type = IGMP_MODE_IS_INCLUDE;
2730 KASSERT(inm->inm_st[1].iss_asm == 0,
2731 ("%s: inm %p is INCLUDE but ASM count is %d",
2732 __func__, inm, inm->inm_st[1].iss_asm));
2733 }
2734 }
2735
2736 /*
2737 * Generate the filter list changes using a separate function.
2738 */
2739 if (is_filter_list_change)
2740 return (igmp_v3_enqueue_filter_change(ifq, inm));
2741
2742 if (type == IGMP_DO_NOTHING) {
2743 CTR3(KTR_IGMPV3, "%s: nothing to do for %s/%s",
2744 __func__, inet_ntoa(inm->inm_addr),
2745 inm->inm_ifp->if_xname);
2746 return (0);
2747 }
2748
2749 /*
2750 * If any sources are present, we must be able to fit at least
2751 * one in the trailing space of the tail packet's mbuf,
2752 * ideally more.
2753 */
2754 minrec0len = sizeof(struct igmp_grouprec);
2755 if (record_has_sources)
2756 minrec0len += sizeof(in_addr_t);
2757
2758 CTR4(KTR_IGMPV3, "%s: queueing %s for %s/%s", __func__,
2759 igmp_rec_type_to_str(type), inet_ntoa(inm->inm_addr),
2760 inm->inm_ifp->if_xname);
2761
2762 /*
2763 * Check if we have a packet in the tail of the queue for this
2764 * group into which the first group record for this group will fit.
2765 * Otherwise allocate a new packet.
2766 * Always allocate leading space for IP+RA_OPT+IGMP+REPORT.
2767 * Note: Group records for G/GSR query responses MUST be sent
2768 * in their own packet.
2769 */
2770 m0 = ifq->ifq_tail;
2771 if (!is_group_query &&
2772 m0 != NULL &&
2773 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= IGMP_V3_REPORT_MAXRECS) &&
2774 (m0->m_pkthdr.len + minrec0len) <
2775 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
2776 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2777 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2778 m = m0;
2779 CTR1(KTR_IGMPV3, "%s: use existing packet", __func__);
2780 } else {
2781 if (_IF_QFULL(ifq)) {
2782 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2783 return (-ENOMEM);
2784 }
2785 m = NULL;
2786 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2787 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2788 if (!is_state_change && !is_group_query) {
2789 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2790 if (m)
2791 m->m_data += IGMP_LEADINGSPACE;
2792 }
2793 if (m == NULL) {
2794 m = m_gethdr(M_NOWAIT, MT_DATA);
2795 if (m)
2796 MH_ALIGN(m, IGMP_LEADINGSPACE);
2797 }
2798 if (m == NULL)
2799 return (-ENOMEM);
2800
2801 igmp_save_context(m, ifp);
2802
2803 CTR1(KTR_IGMPV3, "%s: allocated first packet", __func__);
2804 }
2805
2806 /*
2807 * Append group record.
2808 * If we have sources, we don't know how many yet.
2809 */
2810 ig.ig_type = type;
2811 ig.ig_datalen = 0;
2812 ig.ig_numsrc = 0;
2813 ig.ig_group = inm->inm_addr;
2814 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2815 if (m != m0)
2816 m_freem(m);
2817 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2818 return (-ENOMEM);
2819 }
2820 nbytes += sizeof(struct igmp_grouprec);
2821
2822 /*
2823 * Append as many sources as will fit in the first packet.
2824 * If we are appending to a new packet, the chain allocation
2825 * may potentially use clusters; use m_getptr() in this case.
2826 * If we are appending to an existing packet, we need to obtain
2827 * a pointer to the group record after m_append(), in case a new
2828 * mbuf was allocated.
2829 * Only append sources which are in-mode at t1. If we are
2830 * transitioning to MCAST_UNDEFINED state on the group, do not
2831 * include source entries.
2832 * Only report recorded sources in our filter set when responding
2833 * to a group-source query.
2834 */
2835 if (record_has_sources) {
2836 if (m == m0) {
2837 md = m_last(m);
2838 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2839 md->m_len - nbytes);
2840 } else {
2841 md = m_getptr(m, 0, &off);
2842 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) +
2843 off);
2844 }
2845 msrcs = 0;
2846 RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, nims) {
2847 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2848 inet_ntoa_haddr(ims->ims_haddr));
2849 now = ims_get_mode(inm, ims, 1);
2850 CTR2(KTR_IGMPV3, "%s: node is %d", __func__, now);
2851 if ((now != mode) ||
2852 (now == mode && mode == MCAST_UNDEFINED)) {
2853 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2854 continue;
2855 }
2856 if (is_source_query && ims->ims_stp == 0) {
2857 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2858 __func__);
2859 continue;
2860 }
2861 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2862 naddr = htonl(ims->ims_haddr);
2863 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2864 if (m != m0)
2865 m_freem(m);
2866 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2867 __func__);
2868 return (-ENOMEM);
2869 }
2870 nbytes += sizeof(in_addr_t);
2871 ++msrcs;
2872 if (msrcs == m0srcs)
2873 break;
2874 }
2875 CTR2(KTR_IGMPV3, "%s: msrcs is %d this packet", __func__,
2876 msrcs);
2877 pig->ig_numsrc = htons(msrcs);
2878 nbytes += (msrcs * sizeof(in_addr_t));
2879 }
2880
2881 if (is_source_query && msrcs == 0) {
2882 CTR1(KTR_IGMPV3, "%s: no recorded sources to report", __func__);
2883 if (m != m0)
2884 m_freem(m);
2885 return (0);
2886 }
2887
2888 /*
2889 * We are good to go with first packet.
2890 */
2891 if (m != m0) {
2892 CTR1(KTR_IGMPV3, "%s: enqueueing first packet", __func__);
2893 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2894 _IF_ENQUEUE(ifq, m);
2895 } else
2896 m->m_pkthdr.PH_vt.vt_nrecs++;
2897
2898 /*
2899 * No further work needed if no source list in packet(s).
2900 */
2901 if (!record_has_sources)
2902 return (nbytes);
2903
2904 /*
2905 * Whilst sources remain to be announced, we need to allocate
2906 * a new packet and fill out as many sources as will fit.
2907 * Always try for a cluster first.
2908 */
2909 while (nims != NULL) {
2910 if (_IF_QFULL(ifq)) {
2911 CTR1(KTR_IGMPV3, "%s: outbound queue full", __func__);
2912 return (-ENOMEM);
2913 }
2914 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2915 if (m)
2916 m->m_data += IGMP_LEADINGSPACE;
2917 if (m == NULL) {
2918 m = m_gethdr(M_NOWAIT, MT_DATA);
2919 if (m)
2920 MH_ALIGN(m, IGMP_LEADINGSPACE);
2921 }
2922 if (m == NULL)
2923 return (-ENOMEM);
2924 igmp_save_context(m, ifp);
2925 md = m_getptr(m, 0, &off);
2926 pig = (struct igmp_grouprec *)(mtod(md, uint8_t *) + off);
2927 CTR1(KTR_IGMPV3, "%s: allocated next packet", __func__);
2928
2929 if (!m_append(m, sizeof(struct igmp_grouprec), (void *)&ig)) {
2930 if (m != m0)
2931 m_freem(m);
2932 CTR1(KTR_IGMPV3, "%s: m_append() failed.", __func__);
2933 return (-ENOMEM);
2934 }
2935 m->m_pkthdr.PH_vt.vt_nrecs = 1;
2936 nbytes += sizeof(struct igmp_grouprec);
2937
2938 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
2939 sizeof(struct igmp_grouprec)) / sizeof(in_addr_t);
2940
2941 msrcs = 0;
2942 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
2943 CTR2(KTR_IGMPV3, "%s: visit node %s", __func__,
2944 inet_ntoa_haddr(ims->ims_haddr));
2945 now = ims_get_mode(inm, ims, 1);
2946 if ((now != mode) ||
2947 (now == mode && mode == MCAST_UNDEFINED)) {
2948 CTR1(KTR_IGMPV3, "%s: skip node", __func__);
2949 continue;
2950 }
2951 if (is_source_query && ims->ims_stp == 0) {
2952 CTR1(KTR_IGMPV3, "%s: skip unrecorded node",
2953 __func__);
2954 continue;
2955 }
2956 CTR1(KTR_IGMPV3, "%s: append node", __func__);
2957 naddr = htonl(ims->ims_haddr);
2958 if (!m_append(m, sizeof(in_addr_t), (void *)&naddr)) {
2959 if (m != m0)
2960 m_freem(m);
2961 CTR1(KTR_IGMPV3, "%s: m_append() failed.",
2962 __func__);
2963 return (-ENOMEM);
2964 }
2965 ++msrcs;
2966 if (msrcs == m0srcs)
2967 break;
2968 }
2969 pig->ig_numsrc = htons(msrcs);
2970 nbytes += (msrcs * sizeof(in_addr_t));
2971
2972 CTR1(KTR_IGMPV3, "%s: enqueueing next packet", __func__);
2973 _IF_ENQUEUE(ifq, m);
2974 }
2975
2976 return (nbytes);
2977}
2978
2979/*
2980 * Type used to mark record pass completion.
2981 * We exploit the fact we can cast to this easily from the
2982 * current filter modes on each ip_msource node.
2983 */
2984typedef enum {
2985 REC_NONE = 0x00, /* MCAST_UNDEFINED */
2986 REC_ALLOW = 0x01, /* MCAST_INCLUDE */
2987 REC_BLOCK = 0x02, /* MCAST_EXCLUDE */
2988 REC_FULL = REC_ALLOW | REC_BLOCK
2989} rectype_t;
2990
2991/*
2992 * Enqueue an IGMPv3 filter list change to the given output queue.
2993 *
2994 * Source list filter state is held in an RB-tree. When the filter list
2995 * for a group is changed without changing its mode, we need to compute
2996 * the deltas between T0 and T1 for each source in the filter set,
2997 * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2998 *
2999 * As we may potentially queue two record types, and the entire R-B tree
3000 * needs to be walked at once, we break this out into its own function
3001 * so we can generate a tightly packed queue of packets.
3002 *
3003 * XXX This could be written to only use one tree walk, although that makes
3004 * serializing into the mbuf chains a bit harder. For now we do two walks
3005 * which makes things easier on us, and it may or may not be harder on
3006 * the L2 cache.
3007 *
3008 * If successful the size of all data appended to the queue is returned,
3009 * otherwise an error code less than zero is returned, or zero if
3010 * no record(s) were appended.
3011 */
3012static int
3013igmp_v3_enqueue_filter_change(struct ifqueue *ifq, struct in_multi *inm)
3014{
3015 static const int MINRECLEN =
3016 sizeof(struct igmp_grouprec) + sizeof(in_addr_t);
3017 struct ifnet *ifp;
3018 struct igmp_grouprec ig;
3019 struct igmp_grouprec *pig;
3020 struct ip_msource *ims, *nims;
3021 struct mbuf *m, *m0, *md;
3022 in_addr_t naddr;
3023 int m0srcs, nbytes, npbytes, off, rsrcs, schanged;
3024 int nallow, nblock;
3025 uint8_t mode, now, then;
3026 rectype_t crt, drt, nrt;
3027
3028 IN_MULTI_LOCK_ASSERT();
3029
3030 if (inm->inm_nsrc == 0 ||
3031 (inm->inm_st[0].iss_asm > 0 && inm->inm_st[1].iss_asm > 0))
3032 return (0);
3033
3034 ifp = inm->inm_ifp; /* interface */
3035 mode = inm->inm_st[1].iss_fmode; /* filter mode at t1 */
3036 crt = REC_NONE; /* current group record type */
3037 drt = REC_NONE; /* mask of completed group record types */
3038 nrt = REC_NONE; /* record type for current node */
3039 m0srcs = 0; /* # source which will fit in current mbuf chain */
3040 nbytes = 0; /* # of bytes appended to group's state-change queue */
3041 npbytes = 0; /* # of bytes appended this packet */
3042 rsrcs = 0; /* # sources encoded in current record */
3043 schanged = 0; /* # nodes encoded in overall filter change */
3044 nallow = 0; /* # of source entries in ALLOW_NEW */
3045 nblock = 0; /* # of source entries in BLOCK_OLD */
3046 nims = NULL; /* next tree node pointer */
3047
3048 /*
3049 * For each possible filter record mode.
3050 * The first kind of source we encounter tells us which
3051 * is the first kind of record we start appending.
3052 * If a node transitioned to UNDEFINED at t1, its mode is treated
3053 * as the inverse of the group's filter mode.
3054 */
3055 while (drt != REC_FULL) {
3056 do {
3057 m0 = ifq->ifq_tail;
3058 if (m0 != NULL &&
3059 (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
3060 IGMP_V3_REPORT_MAXRECS) &&
3061 (m0->m_pkthdr.len + MINRECLEN) <
3062 (ifp->if_mtu - IGMP_LEADINGSPACE)) {
3063 m = m0;
3064 m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
3065 sizeof(struct igmp_grouprec)) /
3066 sizeof(in_addr_t);
3067 CTR1(KTR_IGMPV3,
3068 "%s: use previous packet", __func__);
3069 } else {
3070 m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
3071 if (m)
3072 m->m_data += IGMP_LEADINGSPACE;
3073 if (m == NULL) {
3074 m = m_gethdr(M_NOWAIT, MT_DATA);
3075 if (m)
3076 MH_ALIGN(m, IGMP_LEADINGSPACE);
3077 }
3078 if (m == NULL) {
3079 CTR1(KTR_IGMPV3,
3080 "%s: m_get*() failed", __func__);
3081 return (-ENOMEM);
3082 }
3083 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3084 igmp_save_context(m, ifp);
3085 m0srcs = (ifp->if_mtu - IGMP_LEADINGSPACE -
3086 sizeof(struct igmp_grouprec)) /
3087 sizeof(in_addr_t);
3088 npbytes = 0;
3089 CTR1(KTR_IGMPV3,
3090 "%s: allocated new packet", __func__);
3091 }
3092 /*
3093 * Append the IGMP group record header to the
3094 * current packet's data area.
3095 * Recalculate pointer to free space for next
3096 * group record, in case m_append() allocated
3097 * a new mbuf or cluster.
3098 */
3099 memset(&ig, 0, sizeof(ig));
3100 ig.ig_group = inm->inm_addr;
3101 if (!m_append(m, sizeof(ig), (void *)&ig)) {
3102 if (m != m0)
3103 m_freem(m);
3104 CTR1(KTR_IGMPV3,
3105 "%s: m_append() failed", __func__);
3106 return (-ENOMEM);
3107 }
3108 npbytes += sizeof(struct igmp_grouprec);
3109 if (m != m0) {
3110 /* new packet; offset in c hain */
3111 md = m_getptr(m, npbytes -
3112 sizeof(struct igmp_grouprec), &off);
3113 pig = (struct igmp_grouprec *)(mtod(md,
3114 uint8_t *) + off);
3115 } else {
3116 /* current packet; offset from last append */
3117 md = m_last(m);
3118 pig = (struct igmp_grouprec *)(mtod(md,
3119 uint8_t *) + md->m_len -
3120 sizeof(struct igmp_grouprec));
3121 }
3122 /*
3123 * Begin walking the tree for this record type
3124 * pass, or continue from where we left off
3125 * previously if we had to allocate a new packet.
3126 * Only report deltas in-mode at t1.
3127 * We need not report included sources as allowed
3128 * if we are in inclusive mode on the group,
3129 * however the converse is not true.
3130 */
3131 rsrcs = 0;
3132 if (nims == NULL)
3133 nims = RB_MIN(ip_msource_tree, &inm->inm_srcs);
3134 RB_FOREACH_FROM(ims, ip_msource_tree, nims) {
3135 CTR2(KTR_IGMPV3, "%s: visit node %s",
3136 __func__, inet_ntoa_haddr(ims->ims_haddr));
3137 now = ims_get_mode(inm, ims, 1);
3138 then = ims_get_mode(inm, ims, 0);
3139 CTR3(KTR_IGMPV3, "%s: mode: t0 %d, t1 %d",
3140 __func__, then, now);
3141 if (now == then) {
3142 CTR1(KTR_IGMPV3,
3143 "%s: skip unchanged", __func__);
3144 continue;
3145 }
3146 if (mode == MCAST_EXCLUDE &&
3147 now == MCAST_INCLUDE) {
3148 CTR1(KTR_IGMPV3,
3149 "%s: skip IN src on EX group",
3150 __func__);
3151 continue;
3152 }
3153 nrt = (rectype_t)now;
3154 if (nrt == REC_NONE)
3155 nrt = (rectype_t)(~mode & REC_FULL);
3156 if (schanged++ == 0) {
3157 crt = nrt;
3158 } else if (crt != nrt)
3159 continue;
3160 naddr = htonl(ims->ims_haddr);
3161 if (!m_append(m, sizeof(in_addr_t),
3162 (void *)&naddr)) {
3163 if (m != m0)
3164 m_freem(m);
3165 CTR1(KTR_IGMPV3,
3166 "%s: m_append() failed", __func__);
3167 return (-ENOMEM);
3168 }
3169 nallow += !!(crt == REC_ALLOW);
3170 nblock += !!(crt == REC_BLOCK);
3171 if (++rsrcs == m0srcs)
3172 break;
3173 }
3174 /*
3175 * If we did not append any tree nodes on this
3176 * pass, back out of allocations.
3177 */
3178 if (rsrcs == 0) {
3179 npbytes -= sizeof(struct igmp_grouprec);
3180 if (m != m0) {
3181 CTR1(KTR_IGMPV3,
3182 "%s: m_free(m)", __func__);
3183 m_freem(m);
3184 } else {
3185 CTR1(KTR_IGMPV3,
3186 "%s: m_adj(m, -ig)", __func__);
3187 m_adj(m, -((int)sizeof(
3188 struct igmp_grouprec)));
3189 }
3190 continue;
3191 }
3192 npbytes += (rsrcs * sizeof(in_addr_t));
3193 if (crt == REC_ALLOW)
3194 pig->ig_type = IGMP_ALLOW_NEW_SOURCES;
3195 else if (crt == REC_BLOCK)
3196 pig->ig_type = IGMP_BLOCK_OLD_SOURCES;
3197 pig->ig_numsrc = htons(rsrcs);
3198 /*
3199 * Count the new group record, and enqueue this
3200 * packet if it wasn't already queued.
3201 */
3202 m->m_pkthdr.PH_vt.vt_nrecs++;
3203 if (m != m0)
3204 _IF_ENQUEUE(ifq, m);
3205 nbytes += npbytes;
3206 } while (nims != NULL);
3207 drt |= crt;
3208 crt = (~crt & REC_FULL);
3209 }
3210
3211 CTR3(KTR_IGMPV3, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
3212 nallow, nblock);
3213
3214 return (nbytes);
3215}
3216
3217static int
3218igmp_v3_merge_state_changes(struct in_multi *inm, struct ifqueue *ifscq)
3219{
3220 struct ifqueue *gq;
3221 struct mbuf *m; /* pending state-change */
3222 struct mbuf *m0; /* copy of pending state-change */
3223 struct mbuf *mt; /* last state-change in packet */
3224 int docopy, domerge;
3225 u_int recslen;
3226
3227 docopy = 0;
3228 domerge = 0;
3229 recslen = 0;
3230
3231 IN_MULTI_LOCK_ASSERT();
3232 IGMP_LOCK_ASSERT();
3233
3234 /*
3235 * If there are further pending retransmissions, make a writable
3236 * copy of each queued state-change message before merging.
3237 */
3238 if (inm->inm_scrv > 0)
3239 docopy = 1;
3240
3241 gq = &inm->inm_scq;
3242#ifdef KTR
3243 if (gq->ifq_head == NULL) {
3244 CTR2(KTR_IGMPV3, "%s: WARNING: queue for inm %p is empty",
3245 __func__, inm);
3246 }
3247#endif
3248
3249 m = gq->ifq_head;
3250 while (m != NULL) {
3251 /*
3252 * Only merge the report into the current packet if
3253 * there is sufficient space to do so; an IGMPv3 report
3254 * packet may only contain 65,535 group records.
3255 * Always use a simple mbuf chain concatentation to do this,
3256 * as large state changes for single groups may have
3257 * allocated clusters.
3258 */
3259 domerge = 0;
3260 mt = ifscq->ifq_tail;
3261 if (mt != NULL) {
3262 recslen = m_length(m, NULL);
3263
3264 if ((mt->m_pkthdr.PH_vt.vt_nrecs +
3265 m->m_pkthdr.PH_vt.vt_nrecs <=
3266 IGMP_V3_REPORT_MAXRECS) &&
3267 (mt->m_pkthdr.len + recslen <=
3268 (inm->inm_ifp->if_mtu - IGMP_LEADINGSPACE)))
3269 domerge = 1;
3270 }
3271
3272 if (!domerge && _IF_QFULL(gq)) {
3273 CTR2(KTR_IGMPV3,
3274 "%s: outbound queue full, skipping whole packet %p",
3275 __func__, m);
3276 mt = m->m_nextpkt;
3277 if (!docopy)
3278 m_freem(m);
3279 m = mt;
3280 continue;
3281 }
3282
3283 if (!docopy) {
3284 CTR2(KTR_IGMPV3, "%s: dequeueing %p", __func__, m);
3285 _IF_DEQUEUE(gq, m0);
3286 m = m0->m_nextpkt;
3287 } else {
3288 CTR2(KTR_IGMPV3, "%s: copying %p", __func__, m);
3289 m0 = m_dup(m, M_NOWAIT);
3290 if (m0 == NULL)
3291 return (ENOMEM);
3292 m0->m_nextpkt = NULL;
3293 m = m->m_nextpkt;
3294 }
3295
3296 if (!domerge) {
3297 CTR3(KTR_IGMPV3, "%s: queueing %p to ifscq %p)",
3298 __func__, m0, ifscq);
3299 _IF_ENQUEUE(ifscq, m0);
3300 } else {
3301 struct mbuf *mtl; /* last mbuf of packet mt */
3302
3303 CTR3(KTR_IGMPV3, "%s: merging %p with ifscq tail %p)",
3304 __func__, m0, mt);
3305
3306 mtl = m_last(mt);
3307 m0->m_flags &= ~M_PKTHDR;
3308 mt->m_pkthdr.len += recslen;
3309 mt->m_pkthdr.PH_vt.vt_nrecs +=
3310 m0->m_pkthdr.PH_vt.vt_nrecs;
3311
3312 mtl->m_next = m0;
3313 }
3314 }
3315
3316 return (0);
3317}
3318
3319/*
3320 * Respond to a pending IGMPv3 General Query.
3321 */
3322static void
3323igmp_v3_dispatch_general_query(struct igmp_ifinfo *igi)
3324{
3325 struct ifmultiaddr *ifma;
3326 struct ifnet *ifp;
3327 struct in_multi *inm;
3328 int retval, loop;
3329
3330 IN_MULTI_LOCK_ASSERT();
3331 IGMP_LOCK_ASSERT();
3332
3333 KASSERT(igi->igi_version == IGMP_VERSION_3,
3334 ("%s: called when version %d", __func__, igi->igi_version));
3335
3336 ifp = igi->igi_ifp;
3337
3338 IF_ADDR_RLOCK(ifp);
3339 TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
3340 if (ifma->ifma_addr->sa_family != AF_INET ||
3341 ifma->ifma_protospec == NULL)
3342 continue;
3343
3344 inm = (struct in_multi *)ifma->ifma_protospec;
3345 KASSERT(ifp == inm->inm_ifp,
3346 ("%s: inconsistent ifp", __func__));
3347
3348 switch (inm->inm_state) {
3349 case IGMP_NOT_MEMBER:
3350 case IGMP_SILENT_MEMBER:
3351 break;
3352 case IGMP_REPORTING_MEMBER:
3353 case IGMP_IDLE_MEMBER:
3354 case IGMP_LAZY_MEMBER:
3355 case IGMP_SLEEPING_MEMBER:
3356 case IGMP_AWAKENING_MEMBER:
3357 inm->inm_state = IGMP_REPORTING_MEMBER;
3358 retval = igmp_v3_enqueue_group_record(&igi->igi_gq,
3359 inm, 0, 0, 0);
3360 CTR2(KTR_IGMPV3, "%s: enqueue record = %d",
3361 __func__, retval);
3362 break;
3363 case IGMP_G_QUERY_PENDING_MEMBER:
3364 case IGMP_SG_QUERY_PENDING_MEMBER:
3365 case IGMP_LEAVING_MEMBER:
3366 break;
3367 }
3368 }
3369 IF_ADDR_RUNLOCK(ifp);
3370
3371 loop = (igi->igi_flags & IGIF_LOOPBACK) ? 1 : 0;
3372 igmp_dispatch_queue(&igi->igi_gq, IGMP_MAX_RESPONSE_BURST, loop);
3373
3374 /*
3375 * Slew transmission of bursts over 500ms intervals.
3376 */
3377 if (igi->igi_gq.ifq_head != NULL) {
3378 igi->igi_v3_timer = 1 + IGMP_RANDOM_DELAY(
3379 IGMP_RESPONSE_BURST_INTERVAL);
3380 V_interface_timers_running = 1;
3381 }
3382}
3383
3384/*
3385 * Transmit the next pending IGMP message in the output queue.
3386 *
3387 * We get called from netisr_processqueue(). A mutex private to igmpoq
3388 * will be acquired and released around this routine.
3389 *
3390 * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3391 * MRT: Nothing needs to be done, as IGMP traffic is always local to
3392 * a link and uses a link-scope multicast address.
3393 */
3394static void
3395igmp_intr(struct mbuf *m)
3396{
3397 struct ip_moptions imo;
3398 struct ifnet *ifp;
3399 struct mbuf *ipopts, *m0;
3400 int error;
3401 uint32_t ifindex;
3402
3403 CTR2(KTR_IGMPV3, "%s: transmit %p", __func__, m);
3404
3405 /*
3406 * Set VNET image pointer from enqueued mbuf chain
3407 * before doing anything else. Whilst we use interface
3408 * indexes to guard against interface detach, they are
3409 * unique to each VIMAGE and must be retrieved.
3410 */
3411 CURVNET_SET((struct vnet *)(m->m_pkthdr.PH_loc.ptr));
3412 ifindex = igmp_restore_context(m);
3413
3414 /*
3415 * Check if the ifnet still exists. This limits the scope of
3416 * any race in the absence of a global ifp lock for low cost
3417 * (an array lookup).
3418 */
3419 ifp = ifnet_byindex(ifindex);
3420 if (ifp == NULL) {
3421 CTR3(KTR_IGMPV3, "%s: dropped %p as ifindex %u went away.",
3422 __func__, m, ifindex);
3423 m_freem(m);
3424 IPSTAT_INC(ips_noroute);
3425 goto out;
3426 }
3427
3428 ipopts = V_igmp_sendra ? m_raopt : NULL;
3429
3430 imo.imo_multicast_ttl = 1;
3431 imo.imo_multicast_vif = -1;
3432 imo.imo_multicast_loop = (V_ip_mrouter != NULL);
3433
3434 /*
3435 * If the user requested that IGMP traffic be explicitly
3436 * redirected to the loopback interface (e.g. they are running a
3437 * MANET interface and the routing protocol needs to see the
3438 * updates), handle this now.
3439 */
3440 if (m->m_flags & M_IGMP_LOOP)
3441 imo.imo_multicast_ifp = V_loif;
3442 else
3443 imo.imo_multicast_ifp = ifp;
3444
3445 if (m->m_flags & M_IGMPV2) {
3446 m0 = m;
3447 } else {
3448 m0 = igmp_v3_encap_report(ifp, m);
3449 if (m0 == NULL) {
3450 CTR2(KTR_IGMPV3, "%s: dropped %p", __func__, m);
3451 m_freem(m);
3452 IPSTAT_INC(ips_odropped);
3453 goto out;
3454 }
3455 }
3456
3457 igmp_scrub_context(m0);
3458 m_clrprotoflags(m);
3459 m0->m_pkthdr.rcvif = V_loif;
3460#ifdef MAC
3461 mac_netinet_igmp_send(ifp, m0);
3462#endif
3463 error = ip_output(m0, ipopts, NULL, 0, &imo, NULL);
3464 if (error) {
3465 CTR3(KTR_IGMPV3, "%s: ip_output(%p) = %d", __func__, m0, error);
3466 goto out;
3467 }
3468
3469 IGMPSTAT_INC(igps_snd_reports);
3470
3471out:
3472 /*
3473 * We must restore the existing vnet pointer before
3474 * continuing as we are run from netisr context.
3475 */
3476 CURVNET_RESTORE();
3477}
3478
3479/*
3480 * Encapsulate an IGMPv3 report.
3481 *
3482 * The internal mbuf flag M_IGMPV3_HDR is used to indicate that the mbuf
3483 * chain has already had its IP/IGMPv3 header prepended. In this case
3484 * the function will not attempt to prepend; the lengths and checksums
3485 * will however be re-computed.
3486 *
3487 * Returns a pointer to the new mbuf chain head, or NULL if the
3488 * allocation failed.
3489 */
3490static struct mbuf *
3491igmp_v3_encap_report(struct ifnet *ifp, struct mbuf *m)
3492{
3493 struct igmp_report *igmp;
3494 struct ip *ip;
3495 int hdrlen, igmpreclen;
3496
3497 KASSERT((m->m_flags & M_PKTHDR),
3498 ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3499
3500 igmpreclen = m_length(m, NULL);
3501 hdrlen = sizeof(struct ip) + sizeof(struct igmp_report);
3502
3503 if (m->m_flags & M_IGMPV3_HDR) {
3504 igmpreclen -= hdrlen;
3505 } else {
3506 M_PREPEND(m, hdrlen, M_NOWAIT);
3507 if (m == NULL)
3508 return (NULL);
3509 m->m_flags |= M_IGMPV3_HDR;
3510 }
3511
3512 CTR2(KTR_IGMPV3, "%s: igmpreclen is %d", __func__, igmpreclen);
3513
3514 m->m_data += sizeof(struct ip);
3515 m->m_len -= sizeof(struct ip);
3516
3517 igmp = mtod(m, struct igmp_report *);
3518 igmp->ir_type = IGMP_v3_HOST_MEMBERSHIP_REPORT;
3519 igmp->ir_rsv1 = 0;
3520 igmp->ir_rsv2 = 0;
3521 igmp->ir_numgrps = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3522 igmp->ir_cksum = 0;
3523 igmp->ir_cksum = in_cksum(m, sizeof(struct igmp_report) + igmpreclen);
3524 m->m_pkthdr.PH_vt.vt_nrecs = 0;
3525
3526 m->m_data -= sizeof(struct ip);
3527 m->m_len += sizeof(struct ip);
3528
3529 ip = mtod(m, struct ip *);
3530 ip->ip_tos = IPTOS_PREC_INTERNETCONTROL;
3531 ip->ip_len = htons(hdrlen + igmpreclen);
3532 ip->ip_off = htons(IP_DF);
3533 ip->ip_p = IPPROTO_IGMP;
3534 ip->ip_sum = 0;
3535
3536 ip->ip_src.s_addr = INADDR_ANY;
3537
3538 if (m->m_flags & M_IGMP_LOOP) {
3539 struct in_ifaddr *ia;
3540
3541 IFP_TO_IA(ifp, ia);
3542 if (ia != NULL) {
3543 ip->ip_src = ia->ia_addr.sin_addr;
3544 ifa_free(&ia->ia_ifa);
3545 }
3546 }
3547
3548 ip->ip_dst.s_addr = htonl(INADDR_ALLRPTS_GROUP);
3549
3550 return (m);
3551}
3552
3553#ifdef KTR
3554static char *
3555igmp_rec_type_to_str(const int type)
3556{
3557
3558 switch (type) {
3559 case IGMP_CHANGE_TO_EXCLUDE_MODE:
3560 return "TO_EX";
3561 break;
3562 case IGMP_CHANGE_TO_INCLUDE_MODE:
3563 return "TO_IN";
3564 break;
3565 case IGMP_MODE_IS_EXCLUDE:
3566 return "MODE_EX";
3567 break;
3568 case IGMP_MODE_IS_INCLUDE:
3569 return "MODE_IN";
3570 break;
3571 case IGMP_ALLOW_NEW_SOURCES:
3572 return "ALLOW_NEW";
3573 break;
3574 case IGMP_BLOCK_OLD_SOURCES:
3575 return "BLOCK_OLD";
3576 break;
3577 default:
3578 break;
3579 }
3580 return "unknown";
3581}
3582#endif
3583
3584static void
3585igmp_init(void *unused __unused)
3586{
3587
3588 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3589
3590 IGMP_LOCK_INIT();
3591
3592 m_raopt = igmp_ra_alloc();
3593
3594 netisr_register(&igmp_nh);
3595}
3596SYSINIT(igmp_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_init, NULL);
3597
3598static void
3599igmp_uninit(void *unused __unused)
3600{
3601
3602 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3603
3604 netisr_unregister(&igmp_nh);
3605
3606 m_free(m_raopt);
3607 m_raopt = NULL;
3608
3609 IGMP_LOCK_DESTROY();
3610}
3611SYSUNINIT(igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, igmp_uninit, NULL);
3612
3613static void
3614vnet_igmp_init(const void *unused __unused)
3615{
3616
3617 CTR1(KTR_IGMPV3, "%s: initializing", __func__);
3618
3619 LIST_INIT(&V_igi_head);
3620}
3621VNET_SYSINIT(vnet_igmp_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_igmp_init,
3622 NULL);
3623
3624static void
3625vnet_igmp_uninit(const void *unused __unused)
3626{
3627
3628 CTR1(KTR_IGMPV3, "%s: tearing down", __func__);
3629
3630 KASSERT(LIST_EMPTY(&V_igi_head),
3631 ("%s: igi list not empty; ifnets not detached?", __func__));
3632}
3633VNET_SYSUNINIT(vnet_igmp_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY,
3634 vnet_igmp_uninit, NULL);
3635
3636static int
3637igmp_modevent(module_t mod, int type, void *unused __unused)
3638{
3639
3640 switch (type) {
3641 case MOD_LOAD:
3642 case MOD_UNLOAD:
3643 break;
3644 default:
3645 return (EOPNOTSUPP);
3646 }
3647 return (0);
3648}
3649
3650static moduledata_t igmp_mod = {
3651 "igmp",
3652 igmp_modevent,
3653 0
3654};
3655DECLARE_MODULE(igmp, igmp_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);