1139826Simp/*-
2191672Sbms * Copyright (c) 2009 Bruce Simpson.
353541Sshin *
453541Sshin * Redistribution and use in source and binary forms, with or without
553541Sshin * modification, are permitted provided that the following conditions
653541Sshin * are met:
753541Sshin * 1. Redistributions of source code must retain the above copyright
853541Sshin *    notice, this list of conditions and the following disclaimer.
953541Sshin * 2. Redistributions in binary form must reproduce the above copyright
1053541Sshin *    notice, this list of conditions and the following disclaimer in the
1153541Sshin *    documentation and/or other materials provided with the distribution.
12191672Sbms * 3. The name of the author may not be used to endorse or promote
13191672Sbms *    products derived from this software without specific prior written
14191672Sbms *    permission.
1553541Sshin *
16191672Sbms * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
1753541Sshin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1853541Sshin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19191672Sbms * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
2053541Sshin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2153541Sshin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2253541Sshin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2353541Sshin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2453541Sshin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2553541Sshin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2653541Sshin * SUCH DAMAGE.
27174510Sobrien *
28174510Sobrien *	$KAME: mld6.c,v 1.27 2001/04/04 05:17:30 itojun Exp $
2953541Sshin */
3053541Sshin
31139826Simp/*-
3253541Sshin * Copyright (c) 1988 Stephen Deering.
3353541Sshin * Copyright (c) 1992, 1993
3453541Sshin *	The Regents of the University of California.  All rights reserved.
3553541Sshin *
3653541Sshin * This code is derived from software contributed to Berkeley by
3753541Sshin * Stephen Deering of Stanford University.
3853541Sshin *
3953541Sshin * Redistribution and use in source and binary forms, with or without
4053541Sshin * modification, are permitted provided that the following conditions
4153541Sshin * are met:
4253541Sshin * 1. Redistributions of source code must retain the above copyright
4353541Sshin *    notice, this list of conditions and the following disclaimer.
4453541Sshin * 2. Redistributions in binary form must reproduce the above copyright
4553541Sshin *    notice, this list of conditions and the following disclaimer in the
4653541Sshin *    documentation and/or other materials provided with the distribution.
4753541Sshin * 4. Neither the name of the University nor the names of its contributors
4853541Sshin *    may be used to endorse or promote products derived from this software
4953541Sshin *    without specific prior written permission.
5053541Sshin *
5153541Sshin * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
5253541Sshin * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
5353541Sshin * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
5453541Sshin * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
5553541Sshin * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
5653541Sshin * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
5753541Sshin * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
5853541Sshin * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
5953541Sshin * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
6053541Sshin * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
6153541Sshin * SUCH DAMAGE.
6253541Sshin *
6353541Sshin *	@(#)igmp.c	8.1 (Berkeley) 7/19/93
6453541Sshin */
6553541Sshin
66174510Sobrien#include <sys/cdefs.h>
67174510Sobrien__FBSDID("$FreeBSD$");
68174510Sobrien
6962587Sitojun#include "opt_inet.h"
7062587Sitojun#include "opt_inet6.h"
7155009Sshin
7253541Sshin#include <sys/param.h>
7353541Sshin#include <sys/systm.h>
7453541Sshin#include <sys/mbuf.h>
7553541Sshin#include <sys/socket.h>
7653541Sshin#include <sys/protosw.h>
77191672Sbms#include <sys/sysctl.h>
78151539Ssuz#include <sys/kernel.h>
79151539Ssuz#include <sys/callout.h>
80126603Sume#include <sys/malloc.h>
81191672Sbms#include <sys/module.h>
82200572Sbms#include <sys/ktr.h>
8353541Sshin
8453541Sshin#include <net/if.h>
85191672Sbms#include <net/route.h>
86191672Sbms#include <net/vnet.h>
8753541Sshin
8853541Sshin#include <netinet/in.h>
8953541Sshin#include <netinet/in_var.h>
90151539Ssuz#include <netinet6/in6_var.h>
9162587Sitojun#include <netinet/ip6.h>
9253541Sshin#include <netinet6/ip6_var.h>
93148385Sume#include <netinet6/scope6_var.h>
9462587Sitojun#include <netinet/icmp6.h>
95191672Sbms#include <netinet6/mld6.h>
9653541Sshin#include <netinet6/mld6_var.h>
9753541Sshin
98191672Sbms#include <security/mac/mac_framework.h>
99191672Sbms
100191672Sbms#ifndef KTR_MLD
101191672Sbms#define KTR_MLD KTR_INET6
102191672Sbms#endif
103191672Sbms
104191672Sbmsstatic struct mld_ifinfo *
105191672Sbms		mli_alloc_locked(struct ifnet *);
106191672Sbmsstatic void	mli_delete_locked(const struct ifnet *);
107191672Sbmsstatic void	mld_dispatch_packet(struct mbuf *);
108191672Sbmsstatic void	mld_dispatch_queue(struct ifqueue *, int);
109191672Sbmsstatic void	mld_final_leave(struct in6_multi *, struct mld_ifinfo *);
110191672Sbmsstatic void	mld_fasttimo_vnet(void);
111191672Sbmsstatic int	mld_handle_state_change(struct in6_multi *,
112191672Sbms		    struct mld_ifinfo *);
113191672Sbmsstatic int	mld_initial_join(struct in6_multi *, struct mld_ifinfo *,
114191672Sbms		    const int);
115191672Sbms#ifdef KTR
116191672Sbmsstatic char *	mld_rec_type_to_str(const int);
117191672Sbms#endif
118191672Sbmsstatic void	mld_set_version(struct mld_ifinfo *, const int);
119191672Sbmsstatic void	mld_slowtimo_vnet(void);
120191672Sbmsstatic int	mld_v1_input_query(struct ifnet *, const struct ip6_hdr *,
121192923Sbms		    /*const*/ struct mld_hdr *);
122191672Sbmsstatic int	mld_v1_input_report(struct ifnet *, const struct ip6_hdr *,
123192923Sbms		    /*const*/ struct mld_hdr *);
124230076Sjhbstatic void	mld_v1_process_group_timer(struct mld_ifinfo *,
125230076Sjhb		    struct in6_multi *);
126191672Sbmsstatic void	mld_v1_process_querier_timers(struct mld_ifinfo *);
127191672Sbmsstatic int	mld_v1_transmit_report(struct in6_multi *, const int);
128191672Sbmsstatic void	mld_v1_update_group(struct in6_multi *, const int);
129191672Sbmsstatic void	mld_v2_cancel_link_timers(struct mld_ifinfo *);
130191672Sbmsstatic void	mld_v2_dispatch_general_query(struct mld_ifinfo *);
131191672Sbmsstatic struct mbuf *
132191672Sbms		mld_v2_encap_report(struct ifnet *, struct mbuf *);
133191672Sbmsstatic int	mld_v2_enqueue_filter_change(struct ifqueue *,
134191672Sbms		    struct in6_multi *);
135191672Sbmsstatic int	mld_v2_enqueue_group_record(struct ifqueue *,
136200871Sbms		    struct in6_multi *, const int, const int, const int,
137200871Sbms		    const int);
138191672Sbmsstatic int	mld_v2_input_query(struct ifnet *, const struct ip6_hdr *,
139191672Sbms		    struct mbuf *, const int, const int);
140191672Sbmsstatic int	mld_v2_merge_state_changes(struct in6_multi *,
141191672Sbms		    struct ifqueue *);
142191672Sbmsstatic void	mld_v2_process_group_timers(struct mld_ifinfo *,
143191672Sbms		    struct ifqueue *, struct ifqueue *,
144191672Sbms		    struct in6_multi *, const int);
145191672Sbmsstatic int	mld_v2_process_group_query(struct in6_multi *,
146191672Sbms		    struct mld_ifinfo *mli, int, struct mbuf *, const int);
147191672Sbmsstatic int	sysctl_mld_gsr(SYSCTL_HANDLER_ARGS);
148191672Sbmsstatic int	sysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS);
149191672Sbms
15053541Sshin/*
151191672Sbms * Normative references: RFC 2710, RFC 3590, RFC 3810.
152191672Sbms *
153191672Sbms * Locking:
154191672Sbms *  * The MLD subsystem lock ends up being system-wide for the moment,
155191672Sbms *    but could be per-VIMAGE later on.
156191672Sbms *  * The permitted lock order is: IN6_MULTI_LOCK, MLD_LOCK, IF_ADDR_LOCK.
157191672Sbms *    Any may be taken independently; if any are held at the same
158191672Sbms *    time, the above lock order must be followed.
159191672Sbms *  * IN6_MULTI_LOCK covers in_multi.
160191672Sbms *  * MLD_LOCK covers per-link state and any global variables in this file.
161191672Sbms *  * IF_ADDR_LOCK covers if_multiaddrs, which is used for a variety of
162191672Sbms *    per-link state iterators.
163191672Sbms *
164191672Sbms *  XXX LOR PREVENTION
165191672Sbms *  A special case for IPv6 is the in6_setscope() routine. ip6_output()
166191672Sbms *  will not accept an ifp; it wants an embedded scope ID, unlike
167191672Sbms *  ip_output(), which happily takes the ifp given to it. The embedded
168191672Sbms *  scope ID is only used by MLD to select the outgoing interface.
169191672Sbms *
170191672Sbms *  During interface attach and detach, MLD will take MLD_LOCK *after*
171191672Sbms *  the IF_AFDATA_LOCK.
172191672Sbms *  As in6_setscope() takes IF_AFDATA_LOCK then SCOPE_LOCK, we can't call
173191672Sbms *  it with MLD_LOCK held without triggering an LOR. A netisr with indirect
174191672Sbms *  dispatch could work around this, but we'd rather not do that, as it
175191672Sbms *  can introduce other races.
176191672Sbms *
177191672Sbms *  As such, we exploit the fact that the scope ID is just the interface
178191672Sbms *  index, and embed it in the IPv6 destination address accordingly.
179191672Sbms *  This is potentially NOT VALID for MLDv1 reports, as they
180191672Sbms *  are always sent to the multicast group itself; as MLDv2
181191672Sbms *  reports are always sent to ff02::16, this is not an issue
182191672Sbms *  when MLDv2 is in use.
183191672Sbms *
184191672Sbms *  This does not however eliminate the LOR when ip6_output() itself
185191672Sbms *  calls in6_setscope() internally whilst MLD_LOCK is held. This will
186191672Sbms *  trigger a LOR warning in WITNESS when the ifnet is detached.
187191672Sbms *
188191672Sbms *  The right answer is probably to make IF_AFDATA_LOCK an rwlock, given
189191672Sbms *  how it's used across the network stack. Here we're simply exploiting
190191672Sbms *  the fact that MLD runs at a similar layer in the stack to scope6.c.
191191672Sbms *
192191672Sbms * VIMAGE:
193191672Sbms *  * Each in6_multi corresponds to an ifp, and each ifp corresponds
194191672Sbms *    to a vnet in ifp->if_vnet.
19553541Sshin */
196191672Sbmsstatic struct mtx		 mld_mtx;
197249132Smavstatic MALLOC_DEFINE(M_MLD, "mld", "mld state");
19853541Sshin
199206454Sbms#define	MLD_EMBEDSCOPE(pin6, zoneid)					\
200206454Sbms	if (IN6_IS_SCOPE_LINKLOCAL(pin6) ||				\
201206454Sbms	    IN6_IS_ADDR_MC_INTFACELOCAL(pin6))				\
202206454Sbms		(pin6)->s6_addr16[1] = htons((zoneid) & 0xFFFF)		\
203191672Sbms
20453541Sshin/*
205191672Sbms * VIMAGE-wide globals.
20653541Sshin */
207215701Sdimstatic VNET_DEFINE(struct timeval, mld_gsrdelay) = {10, 0};
208215701Sdimstatic VNET_DEFINE(LIST_HEAD(, mld_ifinfo), mli_head);
209215701Sdimstatic VNET_DEFINE(int, interface_timers_running6);
210215701Sdimstatic VNET_DEFINE(int, state_change_timers_running6);
211215701Sdimstatic VNET_DEFINE(int, current_state_timers_running6);
21253541Sshin
213195727Srwatson#define	V_mld_gsrdelay			VNET(mld_gsrdelay)
214195727Srwatson#define	V_mli_head			VNET(mli_head)
215195727Srwatson#define	V_interface_timers_running6	VNET(interface_timers_running6)
216195727Srwatson#define	V_state_change_timers_running6	VNET(state_change_timers_running6)
217195727Srwatson#define	V_current_state_timers_running6	VNET(current_state_timers_running6)
218195699Srwatson
219191672SbmsSYSCTL_DECL(_net_inet6);	/* Note: Not in any common header. */
22053541Sshin
221191672SbmsSYSCTL_NODE(_net_inet6, OID_AUTO, mld, CTLFLAG_RW, 0,
222191672Sbms    "IPv6 Multicast Listener Discovery");
223191672Sbms
224191672Sbms/*
225191672Sbms * Virtualized sysctls.
226191672Sbms */
227195699SrwatsonSYSCTL_VNET_PROC(_net_inet6_mld, OID_AUTO, gsrdelay,
228195699Srwatson    CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
229195699Srwatson    &VNET_NAME(mld_gsrdelay.tv_sec), 0, sysctl_mld_gsr, "I",
230191672Sbms    "Rate limit for MLDv2 Group-and-Source queries in seconds");
231191672Sbms
232191672Sbms/*
233191672Sbms * Non-virtualized sysctls.
234191672Sbms */
235248085Smariusstatic SYSCTL_NODE(_net_inet6_mld, OID_AUTO, ifinfo,
236248085Smarius    CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_mld_ifinfo,
237248085Smarius    "Per-interface MLDv2 state");
238191672Sbms
239192923Sbmsstatic int	mld_v1enable = 1;
240192923SbmsSYSCTL_INT(_net_inet6_mld, OID_AUTO, v1enable, CTLFLAG_RW,
241192923Sbms    &mld_v1enable, 0, "Enable fallback to MLDv1");
242192923SbmsTUNABLE_INT("net.inet6.mld.v1enable", &mld_v1enable);
243192923Sbms
244200871Sbmsstatic int	mld_use_allow = 1;
245200871SbmsSYSCTL_INT(_net_inet6_mld, OID_AUTO, use_allow, CTLFLAG_RW,
246200871Sbms    &mld_use_allow, 0, "Use ALLOW/BLOCK for RFC 4604 SSM joins/leaves");
247200871SbmsTUNABLE_INT("net.inet6.mld.use_allow", &mld_use_allow);
248200871Sbms
249191672Sbms/*
250191672Sbms * Packed Router Alert option structure declaration.
251191672Sbms */
252191672Sbmsstruct mld_raopt {
253191672Sbms	struct ip6_hbh		hbh;
254191672Sbms	struct ip6_opt		pad;
255191672Sbms	struct ip6_opt_router	ra;
256191672Sbms} __packed;
257191672Sbms
258191672Sbms/*
259191672Sbms * Router Alert hop-by-hop option header.
260191672Sbms */
261191672Sbmsstatic struct mld_raopt mld_ra = {
262191672Sbms	.hbh = { 0, 0 },
263191672Sbms	.pad = { .ip6o_type = IP6OPT_PADN, 0 },
264191672Sbms	.ra = {
265191672Sbms	    .ip6or_type = IP6OPT_ROUTER_ALERT,
266191672Sbms	    .ip6or_len = IP6OPT_RTALERT_LEN - 2,
267191672Sbms	    .ip6or_value[0] = ((IP6OPT_RTALERT_MLD >> 8) & 0xFF),
268191672Sbms	    .ip6or_value[1] = (IP6OPT_RTALERT_MLD & 0xFF)
269191672Sbms	}
270191672Sbms};
271191672Sbmsstatic struct ip6_pktopts mld_po;
272191672Sbms
273191672Sbmsstatic __inline void
274191672Sbmsmld_save_context(struct mbuf *m, struct ifnet *ifp)
27553541Sshin{
276191672Sbms
277191672Sbms#ifdef VIMAGE
278191672Sbms	m->m_pkthdr.header = ifp->if_vnet;
279191672Sbms#endif /* VIMAGE */
280191672Sbms	m->m_pkthdr.flowid = ifp->if_index;
281191672Sbms}
282191672Sbms
283191672Sbmsstatic __inline void
284191672Sbmsmld_scrub_context(struct mbuf *m)
285191672Sbms{
286191672Sbms
287191672Sbms	m->m_pkthdr.header = NULL;
288191672Sbms	m->m_pkthdr.flowid = 0;
289191672Sbms}
290191672Sbms
291191672Sbms/*
292191672Sbms * Restore context from a queued output chain.
293191672Sbms * Return saved ifindex.
294191672Sbms *
295191672Sbms * VIMAGE: The assertion is there to make sure that we
296191672Sbms * actually called CURVNET_SET() with what's in the mbuf chain.
297191672Sbms */
298191672Sbmsstatic __inline uint32_t
299191672Sbmsmld_restore_context(struct mbuf *m)
300191672Sbms{
301191672Sbms
302191672Sbms#if defined(VIMAGE) && defined(INVARIANTS)
303191846Szec	KASSERT(curvnet == m->m_pkthdr.header,
304191672Sbms	    ("%s: called when curvnet was not restored", __func__));
305191672Sbms#endif
306191672Sbms	return (m->m_pkthdr.flowid);
307191672Sbms}
308191672Sbms
309191672Sbms/*
310191672Sbms * Retrieve or set threshold between group-source queries in seconds.
311191672Sbms *
312191672Sbms * VIMAGE: Assume curvnet set by caller.
313191672Sbms * SMPng: NOTE: Serialized by MLD lock.
314191672Sbms */
315191672Sbmsstatic int
316191672Sbmssysctl_mld_gsr(SYSCTL_HANDLER_ARGS)
317191672Sbms{
318191672Sbms	int error;
319191672Sbms	int i;
32053541Sshin
321191672Sbms	error = sysctl_wire_old_buffer(req, sizeof(int));
322191672Sbms	if (error)
323191672Sbms		return (error);
32453541Sshin
325191672Sbms	MLD_LOCK();
32653541Sshin
327191672Sbms	i = V_mld_gsrdelay.tv_sec;
328191672Sbms
329191672Sbms	error = sysctl_handle_int(oidp, &i, 0, req);
330191672Sbms	if (error || !req->newptr)
331191672Sbms		goto out_locked;
332191672Sbms
333191672Sbms	if (i < -1 || i >= 60) {
334191672Sbms		error = EINVAL;
335191672Sbms		goto out_locked;
336191672Sbms	}
337191672Sbms
338191672Sbms	CTR2(KTR_MLD, "change mld_gsrdelay from %d to %d",
339191672Sbms	     V_mld_gsrdelay.tv_sec, i);
340191672Sbms	V_mld_gsrdelay.tv_sec = i;
341191672Sbms
342191672Sbmsout_locked:
343191672Sbms	MLD_UNLOCK();
344191672Sbms	return (error);
34553541Sshin}
34653541Sshin
347191672Sbms/*
348191672Sbms * Expose struct mld_ifinfo to userland, keyed by ifindex.
349191672Sbms * For use by ifmcstat(8).
350191672Sbms *
351191672Sbms * SMPng: NOTE: Does an unlocked ifindex space read.
352191672Sbms * VIMAGE: Assume curvnet set by caller. The node handler itself
353191672Sbms * is not directly virtualized.
354191672Sbms */
355191672Sbmsstatic int
356191672Sbmssysctl_mld_ifinfo(SYSCTL_HANDLER_ARGS)
357151539Ssuz{
358191672Sbms	int			*name;
359191672Sbms	int			 error;
360191672Sbms	u_int			 namelen;
361191672Sbms	struct ifnet		*ifp;
362191672Sbms	struct mld_ifinfo	*mli;
363151539Ssuz
364191672Sbms	name = (int *)arg1;
365191672Sbms	namelen = arg2;
366191672Sbms
367191672Sbms	if (req->newptr != NULL)
368191672Sbms		return (EPERM);
369191672Sbms
370191672Sbms	if (namelen != 1)
371191672Sbms		return (EINVAL);
372191672Sbms
373191672Sbms	error = sysctl_wire_old_buffer(req, sizeof(struct mld_ifinfo));
374191672Sbms	if (error)
375191672Sbms		return (error);
376191672Sbms
377191672Sbms	IN6_MULTI_LOCK();
378191672Sbms	MLD_LOCK();
379191672Sbms
380191672Sbms	if (name[0] <= 0 || name[0] > V_if_index) {
381191672Sbms		error = ENOENT;
382191672Sbms		goto out_locked;
383151539Ssuz	}
384151539Ssuz
385191672Sbms	error = ENOENT;
386191672Sbms
387191672Sbms	ifp = ifnet_byindex(name[0]);
388191672Sbms	if (ifp == NULL)
389191672Sbms		goto out_locked;
390191672Sbms
391191672Sbms	LIST_FOREACH(mli, &V_mli_head, mli_link) {
392191672Sbms		if (ifp == mli->mli_ifp) {
393191672Sbms			error = SYSCTL_OUT(req, mli,
394191672Sbms			    sizeof(struct mld_ifinfo));
395191672Sbms			break;
396191672Sbms		}
397191672Sbms	}
398191672Sbms
399191672Sbmsout_locked:
400191672Sbms	MLD_UNLOCK();
401191672Sbms	IN6_MULTI_UNLOCK();
402191672Sbms	return (error);
403151539Ssuz}
404151539Ssuz
405191672Sbms/*
406191672Sbms * Dispatch an entire queue of pending packet chains.
407191672Sbms * VIMAGE: Assumes the vnet pointer has been set.
408191672Sbms */
409151539Ssuzstatic void
410191672Sbmsmld_dispatch_queue(struct ifqueue *ifq, int limit)
411151539Ssuz{
412191672Sbms	struct mbuf *m;
413151539Ssuz
414191672Sbms	for (;;) {
415191672Sbms		_IF_DEQUEUE(ifq, m);
416191672Sbms		if (m == NULL)
417191672Sbms			break;
418191672Sbms		CTR3(KTR_MLD, "%s: dispatch %p from %p", __func__, ifq, m);
419191672Sbms		mld_dispatch_packet(m);
420191672Sbms		if (--limit == 0)
421191672Sbms			break;
422191672Sbms	}
423151539Ssuz}
424151539Ssuz
425191672Sbms/*
426191672Sbms * Filter outgoing MLD report state by group.
427191672Sbms *
428191672Sbms * Reports are ALWAYS suppressed for ALL-HOSTS (ff02::1)
429191672Sbms * and node-local addresses. However, kernel and socket consumers
430191672Sbms * always embed the KAME scope ID in the address provided, so strip it
431191672Sbms * when performing comparison.
432191672Sbms * Note: This is not the same as the *multicast* scope.
433191672Sbms *
434191672Sbms * Return zero if the given group is one for which MLD reports
435191672Sbms * should be suppressed, or non-zero if reports should be issued.
436191672Sbms */
437191672Sbmsstatic __inline int
438191672Sbmsmld_is_addr_reported(const struct in6_addr *addr)
439151539Ssuz{
440151539Ssuz
441191672Sbms	KASSERT(IN6_IS_ADDR_MULTICAST(addr), ("%s: not multicast", __func__));
442151539Ssuz
443191672Sbms	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_NODELOCAL)
444191672Sbms		return (0);
445151539Ssuz
446191672Sbms	if (IPV6_ADDR_MC_SCOPE(addr) == IPV6_ADDR_SCOPE_LINKLOCAL) {
447191672Sbms		struct in6_addr tmp = *addr;
448191672Sbms		in6_clearscope(&tmp);
449191672Sbms		if (IN6_ARE_ADDR_EQUAL(&tmp, &in6addr_linklocal_allnodes))
450191672Sbms			return (0);
451151539Ssuz	}
452151539Ssuz
453191672Sbms	return (1);
454151539Ssuz}
455151539Ssuz
456191672Sbms/*
457191672Sbms * Attach MLD when PF_INET6 is attached to an interface.
458191672Sbms *
459191672Sbms * SMPng: Normally called with IF_AFDATA_LOCK held.
460191672Sbms */
461191672Sbmsstruct mld_ifinfo *
462191672Sbmsmld_domifattach(struct ifnet *ifp)
463151539Ssuz{
464191672Sbms	struct mld_ifinfo *mli;
465151539Ssuz
466191672Sbms	CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
467191672Sbms	    __func__, ifp, ifp->if_xname);
468151539Ssuz
469191672Sbms	MLD_LOCK();
470191672Sbms
471191672Sbms	mli = mli_alloc_locked(ifp);
472191672Sbms	if (!(ifp->if_flags & IFF_MULTICAST))
473191672Sbms		mli->mli_flags |= MLIF_SILENT;
474200871Sbms	if (mld_use_allow)
475200871Sbms		mli->mli_flags |= MLIF_USEALLOW;
476191672Sbms
477191672Sbms	MLD_UNLOCK();
478191672Sbms
479191672Sbms	return (mli);
480191672Sbms}
481191672Sbms
482191672Sbms/*
483191672Sbms * VIMAGE: assume curvnet set by caller.
484191672Sbms */
485191672Sbmsstatic struct mld_ifinfo *
486191672Sbmsmli_alloc_locked(/*const*/ struct ifnet *ifp)
487191672Sbms{
488191672Sbms	struct mld_ifinfo *mli;
489191672Sbms
490191672Sbms	MLD_LOCK_ASSERT();
491191672Sbms
492191672Sbms	mli = malloc(sizeof(struct mld_ifinfo), M_MLD, M_NOWAIT|M_ZERO);
493191672Sbms	if (mli == NULL)
494191672Sbms		goto out;
495191672Sbms
496191672Sbms	mli->mli_ifp = ifp;
497191672Sbms	mli->mli_version = MLD_VERSION_2;
498191672Sbms	mli->mli_flags = 0;
499191672Sbms	mli->mli_rv = MLD_RV_INIT;
500191672Sbms	mli->mli_qi = MLD_QI_INIT;
501191672Sbms	mli->mli_qri = MLD_QRI_INIT;
502191672Sbms	mli->mli_uri = MLD_URI_INIT;
503191672Sbms
504191672Sbms	SLIST_INIT(&mli->mli_relinmhead);
505191672Sbms
506191672Sbms	/*
507191672Sbms	 * Responses to general queries are subject to bounds.
508191672Sbms	 */
509191672Sbms	IFQ_SET_MAXLEN(&mli->mli_gq, MLD_MAX_RESPONSE_PACKETS);
510191672Sbms
511191672Sbms	LIST_INSERT_HEAD(&V_mli_head, mli, mli_link);
512191672Sbms
513191672Sbms	CTR2(KTR_MLD, "allocate mld_ifinfo for ifp %p(%s)",
514191672Sbms	     ifp, ifp->if_xname);
515191672Sbms
516191672Sbmsout:
517191672Sbms	return (mli);
518191672Sbms}
519191672Sbms
520191672Sbms/*
521191672Sbms * Hook for ifdetach.
522191672Sbms *
523191672Sbms * NOTE: Some finalization tasks need to run before the protocol domain
524191672Sbms * is detached, but also before the link layer does its cleanup.
525191672Sbms * Run before link-layer cleanup; cleanup groups, but do not free MLD state.
526191672Sbms *
527191672Sbms * SMPng: Caller must hold IN6_MULTI_LOCK().
528191672Sbms * Must take IF_ADDR_LOCK() to cover if_multiaddrs iterator.
529191672Sbms * XXX This routine is also bitten by unlocked ifma_protospec access.
530191672Sbms */
531191672Sbmsvoid
532191672Sbmsmld_ifdetach(struct ifnet *ifp)
533191672Sbms{
534191672Sbms	struct mld_ifinfo	*mli;
535191672Sbms	struct ifmultiaddr	*ifma;
536191672Sbms	struct in6_multi	*inm, *tinm;
537191672Sbms
538191672Sbms	CTR3(KTR_MLD, "%s: called for ifp %p(%s)", __func__, ifp,
539191672Sbms	    ifp->if_xname);
540191672Sbms
541191672Sbms	IN6_MULTI_LOCK_ASSERT();
542191672Sbms	MLD_LOCK();
543191672Sbms
544191672Sbms	mli = MLD_IFINFO(ifp);
545191672Sbms	if (mli->mli_version == MLD_VERSION_2) {
546233200Sjhb		IF_ADDR_RLOCK(ifp);
547191672Sbms		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
548191672Sbms			if (ifma->ifma_addr->sa_family != AF_INET6 ||
549191672Sbms			    ifma->ifma_protospec == NULL)
550191672Sbms				continue;
551191672Sbms			inm = (struct in6_multi *)ifma->ifma_protospec;
552191672Sbms			if (inm->in6m_state == MLD_LEAVING_MEMBER) {
553191672Sbms				SLIST_INSERT_HEAD(&mli->mli_relinmhead,
554191672Sbms				    inm, in6m_nrele);
555191672Sbms			}
556191672Sbms			in6m_clear_recorded(inm);
557191672Sbms		}
558233200Sjhb		IF_ADDR_RUNLOCK(ifp);
559191672Sbms		SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele,
560191672Sbms		    tinm) {
561191672Sbms			SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
562191672Sbms			in6m_release_locked(inm);
563191672Sbms		}
564151539Ssuz	}
565151539Ssuz
566191672Sbms	MLD_UNLOCK();
567151539Ssuz}
568151539Ssuz
569191672Sbms/*
570191672Sbms * Hook for domifdetach.
571191672Sbms * Runs after link-layer cleanup; free MLD state.
572191672Sbms *
573191672Sbms * SMPng: Normally called with IF_AFDATA_LOCK held.
574191672Sbms */
57553541Sshinvoid
576191672Sbmsmld_domifdetach(struct ifnet *ifp)
57753541Sshin{
57853541Sshin
579191672Sbms	CTR3(KTR_MLD, "%s: called for ifp %p(%s)",
580191672Sbms	    __func__, ifp, ifp->if_xname);
581191672Sbms
582191672Sbms	MLD_LOCK();
583191672Sbms	mli_delete_locked(ifp);
584191672Sbms	MLD_UNLOCK();
585191672Sbms}
586191672Sbms
587191672Sbmsstatic void
588191672Sbmsmli_delete_locked(const struct ifnet *ifp)
589191672Sbms{
590191672Sbms	struct mld_ifinfo *mli, *tmli;
591191672Sbms
592191672Sbms	CTR3(KTR_MLD, "%s: freeing mld_ifinfo for ifp %p(%s)",
593191672Sbms	    __func__, ifp, ifp->if_xname);
594191672Sbms
595191672Sbms	MLD_LOCK_ASSERT();
596191672Sbms
597191672Sbms	LIST_FOREACH_SAFE(mli, &V_mli_head, mli_link, tmli) {
598191672Sbms		if (mli->mli_ifp == ifp) {
599191672Sbms			/*
600191672Sbms			 * Free deferred General Query responses.
601191672Sbms			 */
602191672Sbms			_IF_DRAIN(&mli->mli_gq);
603191672Sbms
604191672Sbms			LIST_REMOVE(mli, mli_link);
605191672Sbms
606191672Sbms			KASSERT(SLIST_EMPTY(&mli->mli_relinmhead),
607191672Sbms			    ("%s: there are dangling in_multi references",
608191672Sbms			    __func__));
609191672Sbms
610191672Sbms			free(mli, M_MLD);
611191672Sbms			return;
612191672Sbms		}
613191672Sbms	}
614191672Sbms#ifdef INVARIANTS
615191672Sbms	panic("%s: mld_ifinfo not found for ifp %p\n", __func__,  ifp);
616191672Sbms#endif
617191672Sbms}
618191672Sbms
619191672Sbms/*
620191672Sbms * Process a received MLDv1 general or address-specific query.
621191672Sbms * Assumes that the query header has been pulled up to sizeof(mld_hdr).
622192923Sbms *
623192923Sbms * NOTE: Can't be fully const correct as we temporarily embed scope ID in
624192923Sbms * mld_addr. This is OK as we own the mbuf chain.
625191672Sbms */
626191672Sbmsstatic int
627191672Sbmsmld_v1_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
628192923Sbms    /*const*/ struct mld_hdr *mld)
629191672Sbms{
630191672Sbms	struct ifmultiaddr	*ifma;
631191672Sbms	struct mld_ifinfo	*mli;
632191672Sbms	struct in6_multi	*inm;
633192923Sbms	int			 is_general_query;
634191672Sbms	uint16_t		 timer;
635191672Sbms#ifdef KTR
636191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
637191672Sbms#endif
638191672Sbms
639192923Sbms	is_general_query = 0;
640192923Sbms
641192923Sbms	if (!mld_v1enable) {
642192923Sbms		CTR3(KTR_MLD, "ignore v1 query %s on ifp %p(%s)",
643192923Sbms		    ip6_sprintf(ip6tbuf, &mld->mld_addr),
644192923Sbms		    ifp, ifp->if_xname);
645192923Sbms		return (0);
646192923Sbms	}
647192923Sbms
648192923Sbms	/*
649192923Sbms	 * RFC3810 Section 6.2: MLD queries must originate from
650192923Sbms	 * a router's link-local address.
651192923Sbms	 */
652192923Sbms	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
653192923Sbms		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
654192923Sbms		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
655192923Sbms		    ifp, ifp->if_xname);
656192923Sbms		return (0);
657192923Sbms	}
658192923Sbms
659192923Sbms	/*
660192923Sbms	 * Do address field validation upfront before we accept
661192923Sbms	 * the query.
662192923Sbms	 */
663192923Sbms	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
664192923Sbms		/*
665192923Sbms		 * MLDv1 General Query.
666192923Sbms		 * If this was not sent to the all-nodes group, ignore it.
667192923Sbms		 */
668192923Sbms		struct in6_addr		 dst;
669192923Sbms
670192923Sbms		dst = ip6->ip6_dst;
671192923Sbms		in6_clearscope(&dst);
672192923Sbms		if (!IN6_ARE_ADDR_EQUAL(&dst, &in6addr_linklocal_allnodes))
673192923Sbms			return (EINVAL);
674192923Sbms		is_general_query = 1;
675192923Sbms	} else {
676192923Sbms		/*
677192923Sbms		 * Embed scope ID of receiving interface in MLD query for
678192923Sbms		 * lookup whilst we don't hold other locks.
679192923Sbms		 */
680192923Sbms		in6_setscope(&mld->mld_addr, ifp, NULL);
681192923Sbms	}
682192923Sbms
683191672Sbms	IN6_MULTI_LOCK();
684192547Sbms	MLD_LOCK();
685191672Sbms
68653541Sshin	/*
687191672Sbms	 * Switch to MLDv1 host compatibility mode.
68853541Sshin	 */
689192923Sbms	mli = MLD_IFINFO(ifp);
690192923Sbms	KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
691191672Sbms	mld_set_version(mli, MLD_VERSION_1);
692191672Sbms
693192923Sbms	timer = (ntohs(mld->mld_maxdelay) * PR_FASTHZ) / MLD_TIMER_SCALE;
694191672Sbms	if (timer == 0)
695191672Sbms		timer = 1;
696191672Sbms
697233200Sjhb	IF_ADDR_RLOCK(ifp);
698192923Sbms	if (is_general_query) {
699191672Sbms		/*
700192923Sbms		 * For each reporting group joined on this
701192923Sbms		 * interface, kick the report timer.
702192923Sbms		 */
703192923Sbms		CTR2(KTR_MLD, "process v1 general query on ifp %p(%s)",
704192923Sbms		    ifp, ifp->if_xname);
705192923Sbms		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
706192923Sbms			if (ifma->ifma_addr->sa_family != AF_INET6 ||
707192923Sbms			    ifma->ifma_protospec == NULL)
708192923Sbms				continue;
709192923Sbms			inm = (struct in6_multi *)ifma->ifma_protospec;
710192923Sbms			mld_v1_update_group(inm, timer);
711192923Sbms		}
712192923Sbms	} else {
713192923Sbms		/*
714191672Sbms		 * MLDv1 Group-Specific Query.
715191672Sbms		 * If this is a group-specific MLDv1 query, we need only
716191672Sbms		 * look up the single group to process it.
717191672Sbms		 */
718191672Sbms		inm = in6m_lookup_locked(ifp, &mld->mld_addr);
719191672Sbms		if (inm != NULL) {
720191672Sbms			CTR3(KTR_MLD, "process v1 query %s on ifp %p(%s)",
721191672Sbms			    ip6_sprintf(ip6tbuf, &mld->mld_addr),
722191672Sbms			    ifp, ifp->if_xname);
723191672Sbms			mld_v1_update_group(inm, timer);
724191672Sbms		}
725192923Sbms		/* XXX Clear embedded scope ID as userland won't expect it. */
726192923Sbms		in6_clearscope(&mld->mld_addr);
72753541Sshin	}
728191672Sbms
729233200Sjhb	IF_ADDR_RUNLOCK(ifp);
730191672Sbms	MLD_UNLOCK();
731191672Sbms	IN6_MULTI_UNLOCK();
732191672Sbms
733191672Sbms	return (0);
73453541Sshin}
73553541Sshin
736191672Sbms/*
737191672Sbms * Update the report timer on a group in response to an MLDv1 query.
738191672Sbms *
739191672Sbms * If we are becoming the reporting member for this group, start the timer.
740191672Sbms * If we already are the reporting member for this group, and timer is
741191672Sbms * below the threshold, reset it.
742191672Sbms *
743191672Sbms * We may be updating the group for the first time since we switched
744191672Sbms * to MLDv2. If we are, then we must clear any recorded source lists,
745191672Sbms * and transition to REPORTING state; the group timer is overloaded
746191672Sbms * for group and group-source query responses.
747191672Sbms *
748191672Sbms * Unlike MLDv2, the delay per group should be jittered
749191672Sbms * to avoid bursts of MLDv1 reports.
750191672Sbms */
751191672Sbmsstatic void
752191672Sbmsmld_v1_update_group(struct in6_multi *inm, const int timer)
75353541Sshin{
754191672Sbms#ifdef KTR
755191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
756191672Sbms#endif
75753541Sshin
758191672Sbms	CTR4(KTR_MLD, "%s: %s/%s timer=%d", __func__,
759191672Sbms	    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
760191672Sbms	    inm->in6m_ifp->if_xname, timer);
761191672Sbms
762191672Sbms	IN6_MULTI_LOCK_ASSERT();
763191672Sbms
764191672Sbms	switch (inm->in6m_state) {
765191672Sbms	case MLD_NOT_MEMBER:
766191672Sbms	case MLD_SILENT_MEMBER:
767191672Sbms		break;
768191672Sbms	case MLD_REPORTING_MEMBER:
769191672Sbms		if (inm->in6m_timer != 0 &&
770191672Sbms		    inm->in6m_timer <= timer) {
771191672Sbms			CTR1(KTR_MLD, "%s: REPORTING and timer running, "
772191672Sbms			    "skipping.", __func__);
773191672Sbms			break;
774191672Sbms		}
775191672Sbms		/* FALLTHROUGH */
776191672Sbms	case MLD_SG_QUERY_PENDING_MEMBER:
777191672Sbms	case MLD_G_QUERY_PENDING_MEMBER:
778191672Sbms	case MLD_IDLE_MEMBER:
779191672Sbms	case MLD_LAZY_MEMBER:
780191672Sbms	case MLD_AWAKENING_MEMBER:
781191672Sbms		CTR1(KTR_MLD, "%s: ->REPORTING", __func__);
782191672Sbms		inm->in6m_state = MLD_REPORTING_MEMBER;
783191672Sbms		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
784191672Sbms		V_current_state_timers_running6 = 1;
785191672Sbms		break;
786191672Sbms	case MLD_SLEEPING_MEMBER:
787191672Sbms		CTR1(KTR_MLD, "%s: ->AWAKENING", __func__);
788191672Sbms		inm->in6m_state = MLD_AWAKENING_MEMBER;
789191672Sbms		break;
790191672Sbms	case MLD_LEAVING_MEMBER:
791191672Sbms		break;
792148385Sume	}
79353541Sshin}
79453541Sshin
795191672Sbms/*
796191672Sbms * Process a received MLDv2 general, group-specific or
797191672Sbms * group-and-source-specific query.
798191672Sbms *
799191672Sbms * Assumes that the query header has been pulled up to sizeof(mldv2_query).
800191672Sbms *
801191672Sbms * Return 0 if successful, otherwise an appropriate error code is returned.
802191672Sbms */
803191672Sbmsstatic int
804191672Sbmsmld_v2_input_query(struct ifnet *ifp, const struct ip6_hdr *ip6,
805191672Sbms    struct mbuf *m, const int off, const int icmp6len)
80653541Sshin{
807191672Sbms	struct mld_ifinfo	*mli;
808191672Sbms	struct mldv2_query	*mld;
809191672Sbms	struct in6_multi	*inm;
810191672Sbms	uint32_t		 maxdelay, nsrc, qqi;
811192923Sbms	int			 is_general_query;
812191672Sbms	uint16_t		 timer;
813191672Sbms	uint8_t			 qrv;
814192923Sbms#ifdef KTR
815192923Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
816192923Sbms#endif
81753541Sshin
818192923Sbms	is_general_query = 0;
819191672Sbms
820192923Sbms	/*
821192923Sbms	 * RFC3810 Section 6.2: MLD queries must originate from
822192923Sbms	 * a router's link-local address.
823192923Sbms	 */
824192923Sbms	if (!IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
825192923Sbms		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
826192923Sbms		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
827192923Sbms		    ifp, ifp->if_xname);
828192923Sbms		return (0);
829192923Sbms	}
830192923Sbms
831192923Sbms	CTR2(KTR_MLD, "input v2 query on ifp %p(%s)", ifp, ifp->if_xname);
832192923Sbms
833191672Sbms	mld = (struct mldv2_query *)(mtod(m, uint8_t *) + off);
834191672Sbms
835191672Sbms	maxdelay = ntohs(mld->mld_maxdelay);	/* in 1/10ths of a second */
836237992Sbms	if (maxdelay >= 32768) {
837192923Sbms		maxdelay = (MLD_MRC_MANT(maxdelay) | 0x1000) <<
838192923Sbms			   (MLD_MRC_EXP(maxdelay) + 3);
83978064Sume	}
840192923Sbms	timer = (maxdelay * PR_FASTHZ) / MLD_TIMER_SCALE;
841192923Sbms	if (timer == 0)
842192923Sbms		timer = 1;
84378064Sume
844191672Sbms	qrv = MLD_QRV(mld->mld_misc);
845191672Sbms	if (qrv < 2) {
846191672Sbms		CTR3(KTR_MLD, "%s: clamping qrv %d to %d", __func__,
847191672Sbms		    qrv, MLD_RV_INIT);
848191672Sbms		qrv = MLD_RV_INIT;
849191672Sbms	}
850191672Sbms
851191672Sbms	qqi = mld->mld_qqi;
852191672Sbms	if (qqi >= 128) {
853191672Sbms		qqi = MLD_QQIC_MANT(mld->mld_qqi) <<
854191672Sbms		     (MLD_QQIC_EXP(mld->mld_qqi) + 3);
855191672Sbms	}
856191672Sbms
857191672Sbms	nsrc = ntohs(mld->mld_numsrc);
858191672Sbms	if (nsrc > MLD_MAX_GS_SOURCES)
859191672Sbms		return (EMSGSIZE);
860191672Sbms	if (icmp6len < sizeof(struct mldv2_query) +
861191672Sbms	    (nsrc * sizeof(struct in6_addr)))
862191672Sbms		return (EMSGSIZE);
863191672Sbms
864192923Sbms	/*
865192923Sbms	 * Do further input validation upfront to avoid resetting timers
866192923Sbms	 * should we need to discard this query.
867192923Sbms	 */
868192923Sbms	if (IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr)) {
869192923Sbms		/*
870192923Sbms		 * A general query with a source list has undefined
871192923Sbms		 * behaviour; discard it.
872192923Sbms		 */
873237990Sbms		if (nsrc > 0)
874192923Sbms			return (EINVAL);
875192923Sbms		is_general_query = 1;
876192923Sbms	} else {
877192923Sbms		/*
878192923Sbms		 * Embed scope ID of receiving interface in MLD query for
879192923Sbms		 * lookup whilst we don't hold other locks (due to KAME
880192923Sbms		 * locking lameness). We own this mbuf chain just now.
881192923Sbms		 */
882192923Sbms		in6_setscope(&mld->mld_addr, ifp, NULL);
883192923Sbms	}
884192923Sbms
885191672Sbms	IN6_MULTI_LOCK();
886192547Sbms	MLD_LOCK();
887191672Sbms
888191672Sbms	mli = MLD_IFINFO(ifp);
889191672Sbms	KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
890191672Sbms
891192923Sbms	/*
892192923Sbms	 * Discard the v2 query if we're in Compatibility Mode.
893192923Sbms	 * The RFC is pretty clear that hosts need to stay in MLDv1 mode
894192923Sbms	 * until the Old Version Querier Present timer expires.
895192923Sbms	 */
896192923Sbms	if (mli->mli_version != MLD_VERSION_2)
897192923Sbms		goto out_locked;
898192923Sbms
899191672Sbms	mld_set_version(mli, MLD_VERSION_2);
900191672Sbms	mli->mli_rv = qrv;
901191672Sbms	mli->mli_qi = qqi;
902191672Sbms	mli->mli_qri = maxdelay;
903191672Sbms
904192547Sbms	CTR4(KTR_MLD, "%s: qrv %d qi %d maxdelay %d", __func__, qrv, qqi,
905191672Sbms	    maxdelay);
906191672Sbms
907192923Sbms	if (is_general_query) {
90853541Sshin		/*
909191672Sbms		 * MLDv2 General Query.
910192547Sbms		 *
911191672Sbms		 * Schedule a current-state report on this ifp for
912191672Sbms		 * all groups, possibly containing source lists.
913192547Sbms		 *
914191672Sbms		 * If there is a pending General Query response
915191672Sbms		 * scheduled earlier than the selected delay, do
916191672Sbms		 * not schedule any other reports.
917191672Sbms		 * Otherwise, reset the interface timer.
918191672Sbms		 */
919192923Sbms		CTR2(KTR_MLD, "process v2 general query on ifp %p(%s)",
920192923Sbms		    ifp, ifp->if_xname);
921192562Sbms		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer) {
922192562Sbms			mli->mli_v2_timer = MLD_RANDOM_DELAY(timer);
923191672Sbms			V_interface_timers_running6 = 1;
924191672Sbms		}
925191672Sbms	} else {
926191672Sbms		/*
927191672Sbms		 * MLDv2 Group-specific or Group-and-source-specific Query.
928191672Sbms		 *
929191672Sbms		 * Group-source-specific queries are throttled on
930191672Sbms		 * a per-group basis to defeat denial-of-service attempts.
931191672Sbms		 * Queries for groups we are not a member of on this
932191672Sbms		 * link are simply ignored.
933191672Sbms		 */
934233200Sjhb		IF_ADDR_RLOCK(ifp);
935191672Sbms		inm = in6m_lookup_locked(ifp, &mld->mld_addr);
936225096Spluknet		if (inm == NULL) {
937233200Sjhb			IF_ADDR_RUNLOCK(ifp);
938191672Sbms			goto out_locked;
939225096Spluknet		}
940191672Sbms		if (nsrc > 0) {
941191672Sbms			if (!ratecheck(&inm->in6m_lastgsrtv,
942191672Sbms			    &V_mld_gsrdelay)) {
943191672Sbms				CTR1(KTR_MLD, "%s: GS query throttled.",
944191672Sbms				    __func__);
945233200Sjhb				IF_ADDR_RUNLOCK(ifp);
946191672Sbms				goto out_locked;
947191672Sbms			}
948191672Sbms		}
949191672Sbms		CTR2(KTR_MLD, "process v2 group query on ifp %p(%s)",
950191672Sbms		     ifp, ifp->if_xname);
951191672Sbms		/*
952191672Sbms		 * If there is a pending General Query response
953191672Sbms		 * scheduled sooner than the selected delay, no
954191672Sbms		 * further report need be scheduled.
955191672Sbms		 * Otherwise, prepare to respond to the
956191672Sbms		 * group-specific or group-and-source query.
957191672Sbms		 */
958192562Sbms		if (mli->mli_v2_timer == 0 || mli->mli_v2_timer >= timer)
959191672Sbms			mld_v2_process_group_query(inm, mli, timer, m, off);
960192923Sbms
961192923Sbms		/* XXX Clear embedded scope ID as userland won't expect it. */
962192923Sbms		in6_clearscope(&mld->mld_addr);
963233200Sjhb		IF_ADDR_RUNLOCK(ifp);
96453541Sshin	}
96553541Sshin
966191672Sbmsout_locked:
967191672Sbms	MLD_UNLOCK();
968191672Sbms	IN6_MULTI_UNLOCK();
969191672Sbms
970191672Sbms	return (0);
971191672Sbms}
972191672Sbms
973191672Sbms/*
974191672Sbms * Process a recieved MLDv2 group-specific or group-and-source-specific
975191672Sbms * query.
976191672Sbms * Return <0 if any error occured. Currently this is ignored.
977191672Sbms */
978191672Sbmsstatic int
979191672Sbmsmld_v2_process_group_query(struct in6_multi *inm, struct mld_ifinfo *mli,
980191672Sbms    int timer, struct mbuf *m0, const int off)
981191672Sbms{
982191672Sbms	struct mldv2_query	*mld;
983191672Sbms	int			 retval;
984191672Sbms	uint16_t		 nsrc;
985191672Sbms
986191672Sbms	IN6_MULTI_LOCK_ASSERT();
987191672Sbms	MLD_LOCK_ASSERT();
988191672Sbms
989191672Sbms	retval = 0;
990191672Sbms	mld = (struct mldv2_query *)(mtod(m0, uint8_t *) + off);
991191672Sbms
992191672Sbms	switch (inm->in6m_state) {
993191672Sbms	case MLD_NOT_MEMBER:
994191672Sbms	case MLD_SILENT_MEMBER:
995191672Sbms	case MLD_SLEEPING_MEMBER:
996191672Sbms	case MLD_LAZY_MEMBER:
997191672Sbms	case MLD_AWAKENING_MEMBER:
998191672Sbms	case MLD_IDLE_MEMBER:
999191672Sbms	case MLD_LEAVING_MEMBER:
1000191672Sbms		return (retval);
1001191672Sbms		break;
1002191672Sbms	case MLD_REPORTING_MEMBER:
1003191672Sbms	case MLD_G_QUERY_PENDING_MEMBER:
1004191672Sbms	case MLD_SG_QUERY_PENDING_MEMBER:
1005191672Sbms		break;
1006191672Sbms	}
1007191672Sbms
1008191672Sbms	nsrc = ntohs(mld->mld_numsrc);
1009191672Sbms
101053541Sshin	/*
1011191672Sbms	 * Deal with group-specific queries upfront.
1012191672Sbms	 * If any group query is already pending, purge any recorded
1013191672Sbms	 * source-list state if it exists, and schedule a query response
1014191672Sbms	 * for this group-specific query.
1015148385Sume	 */
1016191672Sbms	if (nsrc == 0) {
1017191672Sbms		if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
1018191672Sbms		    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER) {
1019191672Sbms			in6m_clear_recorded(inm);
1020191672Sbms			timer = min(inm->in6m_timer, timer);
1021191672Sbms		}
1022191672Sbms		inm->in6m_state = MLD_G_QUERY_PENDING_MEMBER;
1023191672Sbms		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1024191672Sbms		V_current_state_timers_running6 = 1;
1025191672Sbms		return (retval);
1026148385Sume	}
1027148385Sume
1028148385Sume	/*
1029191672Sbms	 * Deal with the case where a group-and-source-specific query has
1030191672Sbms	 * been received but a group-specific query is already pending.
103153541Sshin	 */
1032191672Sbms	if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER) {
1033191672Sbms		timer = min(inm->in6m_timer, timer);
1034191672Sbms		inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1035191672Sbms		V_current_state_timers_running6 = 1;
1036191672Sbms		return (retval);
1037191672Sbms	}
103853541Sshin
1039191672Sbms	/*
1040191672Sbms	 * Finally, deal with the case where a group-and-source-specific
1041191672Sbms	 * query has been received, where a response to a previous g-s-r
1042191672Sbms	 * query exists, or none exists.
1043191672Sbms	 * In this case, we need to parse the source-list which the Querier
1044191672Sbms	 * has provided us with and check if we have any source list filter
1045191672Sbms	 * entries at T1 for these sources. If we do not, there is no need
1046191672Sbms	 * schedule a report and the query may be dropped.
1047191672Sbms	 * If we do, we must record them and schedule a current-state
1048191672Sbms	 * report for those sources.
1049191672Sbms	 */
1050191672Sbms	if (inm->in6m_nsrc > 0) {
1051191672Sbms		struct mbuf		*m;
1052191672Sbms		uint8_t			*sp;
1053191672Sbms		int			 i, nrecorded;
1054191672Sbms		int			 soff;
105553541Sshin
1056191672Sbms		m = m0;
1057191672Sbms		soff = off + sizeof(struct mldv2_query);
1058191672Sbms		nrecorded = 0;
1059191672Sbms		for (i = 0; i < nsrc; i++) {
1060191672Sbms			sp = mtod(m, uint8_t *) + soff;
1061191672Sbms			retval = in6m_record_source(inm,
1062191672Sbms			    (const struct in6_addr *)sp);
1063191672Sbms			if (retval < 0)
1064191672Sbms				break;
1065191672Sbms			nrecorded += retval;
1066191672Sbms			soff += sizeof(struct in6_addr);
1067191672Sbms			if (soff >= m->m_len) {
1068191672Sbms				soff = soff - m->m_len;
1069191672Sbms				m = m->m_next;
1070191672Sbms				if (m == NULL)
1071191672Sbms					break;
1072191672Sbms			}
1073148385Sume		}
1074191672Sbms		if (nrecorded > 0) {
1075191672Sbms			CTR1(KTR_MLD,
1076191672Sbms			    "%s: schedule response to SG query", __func__);
1077191672Sbms			inm->in6m_state = MLD_SG_QUERY_PENDING_MEMBER;
1078191672Sbms			inm->in6m_timer = MLD_RANDOM_DELAY(timer);
1079191672Sbms			V_current_state_timers_running6 = 1;
1080191672Sbms		}
1081191672Sbms	}
1082148385Sume
1083191672Sbms	return (retval);
1084191672Sbms}
1085191672Sbms
1086191672Sbms/*
1087191672Sbms * Process a received MLDv1 host membership report.
1088191672Sbms * Assumes mld points to mld_hdr in pulled up mbuf chain.
1089192923Sbms *
1090192923Sbms * NOTE: Can't be fully const correct as we temporarily embed scope ID in
1091192923Sbms * mld_addr. This is OK as we own the mbuf chain.
1092191672Sbms */
1093191672Sbmsstatic int
1094191672Sbmsmld_v1_input_report(struct ifnet *ifp, const struct ip6_hdr *ip6,
1095192923Sbms    /*const*/ struct mld_hdr *mld)
1096191672Sbms{
1097192923Sbms	struct in6_addr		 src, dst;
1098191672Sbms	struct in6_ifaddr	*ia;
1099191672Sbms	struct in6_multi	*inm;
1100191672Sbms#ifdef KTR
1101191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
1102191672Sbms#endif
1103191672Sbms
1104192923Sbms	if (!mld_v1enable) {
1105192923Sbms		CTR3(KTR_MLD, "ignore v1 report %s on ifp %p(%s)",
1106192923Sbms		    ip6_sprintf(ip6tbuf, &mld->mld_addr),
1107192923Sbms		    ifp, ifp->if_xname);
1108192923Sbms		return (0);
1109192923Sbms	}
1110192923Sbms
1111191672Sbms	if (ifp->if_flags & IFF_LOOPBACK)
1112191672Sbms		return (0);
1113192923Sbms
1114192923Sbms	/*
1115192923Sbms	 * MLDv1 reports must originate from a host's link-local address,
1116192923Sbms	 * or the unspecified address (when booting).
1117192923Sbms	 */
1118192923Sbms	src = ip6->ip6_src;
1119192923Sbms	in6_clearscope(&src);
1120192923Sbms	if (!IN6_IS_SCOPE_LINKLOCAL(&src) && !IN6_IS_ADDR_UNSPECIFIED(&src)) {
1121192923Sbms		CTR3(KTR_MLD, "ignore v1 query src %s on ifp %p(%s)",
1122192923Sbms		    ip6_sprintf(ip6tbuf, &ip6->ip6_src),
1123192923Sbms		    ifp, ifp->if_xname);
1124192547Sbms		return (EINVAL);
1125192923Sbms	}
1126191672Sbms
1127192923Sbms	/*
1128192923Sbms	 * RFC2710 Section 4: MLDv1 reports must pertain to a multicast
1129192923Sbms	 * group, and must be directed to the group itself.
1130192923Sbms	 */
1131192547Sbms	dst = ip6->ip6_dst;
1132192547Sbms	in6_clearscope(&dst);
1133192923Sbms	if (!IN6_IS_ADDR_MULTICAST(&mld->mld_addr) ||
1134192923Sbms	    !IN6_ARE_ADDR_EQUAL(&mld->mld_addr, &dst)) {
1135192923Sbms		CTR3(KTR_MLD, "ignore v1 query dst %s on ifp %p(%s)",
1136192923Sbms		    ip6_sprintf(ip6tbuf, &ip6->ip6_dst),
1137192923Sbms		    ifp, ifp->if_xname);
1138191672Sbms		return (EINVAL);
1139192923Sbms	}
1140191672Sbms
1141191672Sbms	/*
1142191672Sbms	 * Make sure we don't hear our own membership report, as fast
1143191672Sbms	 * leave requires knowing that we are the only member of a
1144191672Sbms	 * group. Assume we used the link-local address if available,
1145191672Sbms	 * otherwise look for ::.
1146192547Sbms	 *
1147192547Sbms	 * XXX Note that scope ID comparison is needed for the address
1148192547Sbms	 * returned by in6ifa_ifpforlinklocal(), but SHOULD NOT be
1149192547Sbms	 * performed for the on-wire address.
1150191672Sbms	 */
1151191672Sbms	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1152191672Sbms	if ((ia && IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, IA6_IN6(ia))) ||
1153194760Srwatson	    (ia == NULL && IN6_IS_ADDR_UNSPECIFIED(&src))) {
1154194760Srwatson		if (ia != NULL)
1155194760Srwatson			ifa_free(&ia->ia_ifa);
1156191672Sbms		return (0);
1157194760Srwatson	}
1158194760Srwatson	if (ia != NULL)
1159194760Srwatson		ifa_free(&ia->ia_ifa);
1160191672Sbms
1161191672Sbms	CTR3(KTR_MLD, "process v1 report %s on ifp %p(%s)",
1162191672Sbms	    ip6_sprintf(ip6tbuf, &mld->mld_addr), ifp, ifp->if_xname);
1163191672Sbms
1164192923Sbms	/*
1165192923Sbms	 * Embed scope ID of receiving interface in MLD query for lookup
1166192923Sbms	 * whilst we don't hold other locks (due to KAME locking lameness).
1167192923Sbms	 */
1168192923Sbms	if (!IN6_IS_ADDR_UNSPECIFIED(&mld->mld_addr))
1169192923Sbms		in6_setscope(&mld->mld_addr, ifp, NULL);
1170192923Sbms
1171191672Sbms	IN6_MULTI_LOCK();
1172192547Sbms	MLD_LOCK();
1173233200Sjhb	IF_ADDR_RLOCK(ifp);
1174191672Sbms
1175191672Sbms	/*
1176191672Sbms	 * MLDv1 report suppression.
1177191672Sbms	 * If we are a member of this group, and our membership should be
1178191672Sbms	 * reported, and our group timer is pending or about to be reset,
1179191672Sbms	 * stop our group timer by transitioning to the 'lazy' state.
1180191672Sbms	 */
1181191672Sbms	inm = in6m_lookup_locked(ifp, &mld->mld_addr);
1182191672Sbms	if (inm != NULL) {
1183191672Sbms		struct mld_ifinfo *mli;
1184191672Sbms
1185191672Sbms		mli = inm->in6m_mli;
1186191672Sbms		KASSERT(mli != NULL,
1187191672Sbms		    ("%s: no mli for ifp %p", __func__, ifp));
1188191672Sbms
118953541Sshin		/*
1190191672Sbms		 * If we are in MLDv2 host mode, do not allow the
1191191672Sbms		 * other host's MLDv1 report to suppress our reports.
119262587Sitojun		 */
1193191672Sbms		if (mli->mli_version == MLD_VERSION_2)
1194191672Sbms			goto out_locked;
1195151539Ssuz
1196191672Sbms		inm->in6m_timer = 0;
1197191672Sbms
1198191672Sbms		switch (inm->in6m_state) {
1199191672Sbms		case MLD_NOT_MEMBER:
1200191672Sbms		case MLD_SILENT_MEMBER:
1201191672Sbms		case MLD_SLEEPING_MEMBER:
120253541Sshin			break;
1203191672Sbms		case MLD_REPORTING_MEMBER:
1204191672Sbms		case MLD_IDLE_MEMBER:
1205191672Sbms		case MLD_AWAKENING_MEMBER:
1206191672Sbms			CTR3(KTR_MLD,
1207191672Sbms			    "report suppressed for %s on ifp %p(%s)",
1208191672Sbms			    ip6_sprintf(ip6tbuf, &mld->mld_addr),
1209191672Sbms			    ifp, ifp->if_xname);
1210191672Sbms		case MLD_LAZY_MEMBER:
1211191672Sbms			inm->in6m_state = MLD_LAZY_MEMBER;
1212191672Sbms			break;
1213191672Sbms		case MLD_G_QUERY_PENDING_MEMBER:
1214191672Sbms		case MLD_SG_QUERY_PENDING_MEMBER:
1215191672Sbms		case MLD_LEAVING_MEMBER:
1216191672Sbms			break;
1217191341Srwatson		}
1218191672Sbms	}
121953541Sshin
1220191672Sbmsout_locked:
1221233200Sjhb	IF_ADDR_RUNLOCK(ifp);
1222192547Sbms	MLD_UNLOCK();
1223191672Sbms	IN6_MULTI_UNLOCK();
1224120941Sume
1225192923Sbms	/* XXX Clear embedded scope ID as userland won't expect it. */
1226192923Sbms	in6_clearscope(&mld->mld_addr);
1227192923Sbms
1228191672Sbms	return (0);
1229191672Sbms}
1230120941Sume
1231191672Sbms/*
1232191672Sbms * MLD input path.
1233191672Sbms *
1234191672Sbms * Assume query messages which fit in a single ICMPv6 message header
1235191672Sbms * have been pulled up.
1236191672Sbms * Assume that userland will want to see the message, even if it
1237191672Sbms * otherwise fails kernel input validation; do not free it.
1238191672Sbms * Pullup may however free the mbuf chain m if it fails.
1239191672Sbms *
1240191672Sbms * Return IPPROTO_DONE if we freed m. Otherwise, return 0.
1241191672Sbms */
1242191672Sbmsint
1243191672Sbmsmld_input(struct mbuf *m, int off, int icmp6len)
1244191672Sbms{
1245191672Sbms	struct ifnet	*ifp;
1246191672Sbms	struct ip6_hdr	*ip6;
1247191672Sbms	struct mld_hdr	*mld;
1248191672Sbms	int		 mldlen;
1249191672Sbms
1250191672Sbms	CTR3(KTR_MLD, "%s: called w/mbuf (%p,%d)", __func__, m, off);
1251191672Sbms
1252191672Sbms	ifp = m->m_pkthdr.rcvif;
1253191672Sbms
1254191672Sbms	ip6 = mtod(m, struct ip6_hdr *);
1255191672Sbms
1256191672Sbms	/* Pullup to appropriate size. */
1257191672Sbms	mld = (struct mld_hdr *)(mtod(m, uint8_t *) + off);
1258191672Sbms	if (mld->mld_type == MLD_LISTENER_QUERY &&
1259191672Sbms	    icmp6len >= sizeof(struct mldv2_query)) {
1260191672Sbms		mldlen = sizeof(struct mldv2_query);
1261191672Sbms	} else {
1262191672Sbms		mldlen = sizeof(struct mld_hdr);
1263191672Sbms	}
1264191672Sbms	IP6_EXTHDR_GET(mld, struct mld_hdr *, m, off, mldlen);
1265191672Sbms	if (mld == NULL) {
1266191672Sbms		ICMP6STAT_INC(icp6s_badlen);
1267191672Sbms		return (IPPROTO_DONE);
1268191672Sbms	}
1269191672Sbms
1270192923Sbms	/*
1271192923Sbms	 * Userland needs to see all of this traffic for implementing
1272192923Sbms	 * the endpoint discovery portion of multicast routing.
1273192923Sbms	 */
1274191672Sbms	switch (mld->mld_type) {
1275191672Sbms	case MLD_LISTENER_QUERY:
1276191672Sbms		icmp6_ifstat_inc(ifp, ifs6_in_mldquery);
1277191672Sbms		if (icmp6len == sizeof(struct mld_hdr)) {
1278191672Sbms			if (mld_v1_input_query(ifp, ip6, mld) != 0)
1279191672Sbms				return (0);
1280191672Sbms		} else if (icmp6len >= sizeof(struct mldv2_query)) {
1281191672Sbms			if (mld_v2_input_query(ifp, ip6, m, off,
1282191672Sbms			    icmp6len) != 0)
1283191672Sbms				return (0);
1284191672Sbms		}
1285191672Sbms		break;
1286191672Sbms	case MLD_LISTENER_REPORT:
1287191672Sbms		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1288191672Sbms		if (mld_v1_input_report(ifp, ip6, mld) != 0)
1289192923Sbms			return (0);
1290191672Sbms		break;
1291191672Sbms	case MLDV2_LISTENER_REPORT:
1292191672Sbms		icmp6_ifstat_inc(ifp, ifs6_in_mldreport);
1293191672Sbms		break;
1294191672Sbms	case MLD_LISTENER_DONE:
1295191672Sbms		icmp6_ifstat_inc(ifp, ifs6_in_mlddone);
1296191672Sbms		break;
1297191672Sbms	default:
1298191672Sbms		break;
1299191672Sbms	}
1300191672Sbms
1301191672Sbms	return (0);
1302191672Sbms}
1303191672Sbms
1304191672Sbms/*
1305191672Sbms * Fast timeout handler (global).
1306191672Sbms * VIMAGE: Timeout handlers are expected to service all vimages.
1307191672Sbms */
1308191672Sbmsvoid
1309191672Sbmsmld_fasttimo(void)
1310191672Sbms{
1311191672Sbms	VNET_ITERATOR_DECL(vnet_iter);
1312191672Sbms
1313195760Srwatson	VNET_LIST_RLOCK_NOSLEEP();
1314191672Sbms	VNET_FOREACH(vnet_iter) {
1315191672Sbms		CURVNET_SET(vnet_iter);
1316191672Sbms		mld_fasttimo_vnet();
1317191672Sbms		CURVNET_RESTORE();
1318191672Sbms	}
1319195760Srwatson	VNET_LIST_RUNLOCK_NOSLEEP();
1320191672Sbms}
1321191672Sbms
1322191672Sbms/*
1323191672Sbms * Fast timeout handler (per-vnet).
1324191672Sbms *
1325191672Sbms * VIMAGE: Assume caller has set up our curvnet.
1326191672Sbms */
1327191672Sbmsstatic void
1328191672Sbmsmld_fasttimo_vnet(void)
1329191672Sbms{
1330191672Sbms	struct ifqueue		 scq;	/* State-change packets */
1331191672Sbms	struct ifqueue		 qrq;	/* Query response packets */
1332191672Sbms	struct ifnet		*ifp;
1333191672Sbms	struct mld_ifinfo	*mli;
1334230076Sjhb	struct ifmultiaddr	*ifma;
1335230076Sjhb	struct in6_multi	*inm, *tinm;
1336191672Sbms	int			 uri_fasthz;
1337191672Sbms
1338191672Sbms	uri_fasthz = 0;
1339191672Sbms
1340191672Sbms	/*
1341191672Sbms	 * Quick check to see if any work needs to be done, in order to
1342191672Sbms	 * minimize the overhead of fasttimo processing.
1343191672Sbms	 * SMPng: XXX Unlocked reads.
1344191672Sbms	 */
1345191672Sbms	if (!V_current_state_timers_running6 &&
1346191672Sbms	    !V_interface_timers_running6 &&
1347191672Sbms	    !V_state_change_timers_running6)
1348191672Sbms		return;
1349191672Sbms
1350191672Sbms	IN6_MULTI_LOCK();
1351191672Sbms	MLD_LOCK();
1352191672Sbms
1353191672Sbms	/*
1354191672Sbms	 * MLDv2 General Query response timer processing.
1355191672Sbms	 */
1356191672Sbms	if (V_interface_timers_running6) {
1357191672Sbms		CTR1(KTR_MLD, "%s: interface timers running", __func__);
1358191672Sbms
1359191672Sbms		V_interface_timers_running6 = 0;
1360191672Sbms		LIST_FOREACH(mli, &V_mli_head, mli_link) {
1361191672Sbms			if (mli->mli_v2_timer == 0) {
1362191672Sbms				/* Do nothing. */
1363191672Sbms			} else if (--mli->mli_v2_timer == 0) {
1364191672Sbms				mld_v2_dispatch_general_query(mli);
1365191672Sbms			} else {
1366191672Sbms				V_interface_timers_running6 = 1;
1367191672Sbms			}
1368191672Sbms		}
1369191672Sbms	}
1370191672Sbms
1371191672Sbms	if (!V_current_state_timers_running6 &&
1372191672Sbms	    !V_state_change_timers_running6)
1373191672Sbms		goto out_locked;
1374191672Sbms
1375191672Sbms	V_current_state_timers_running6 = 0;
1376191672Sbms	V_state_change_timers_running6 = 0;
1377191672Sbms
1378191672Sbms	CTR1(KTR_MLD, "%s: state change timers running", __func__);
1379191672Sbms
1380191672Sbms	/*
1381191672Sbms	 * MLD host report and state-change timer processing.
1382191672Sbms	 * Note: Processing a v2 group timer may remove a node.
1383191672Sbms	 */
1384191672Sbms	LIST_FOREACH(mli, &V_mli_head, mli_link) {
1385191672Sbms		ifp = mli->mli_ifp;
1386191672Sbms
1387191672Sbms		if (mli->mli_version == MLD_VERSION_2) {
1388191672Sbms			uri_fasthz = MLD_RANDOM_DELAY(mli->mli_uri *
1389191672Sbms			    PR_FASTHZ);
1390191672Sbms
1391191672Sbms			memset(&qrq, 0, sizeof(struct ifqueue));
1392191672Sbms			IFQ_SET_MAXLEN(&qrq, MLD_MAX_G_GS_PACKETS);
1393191672Sbms
1394191672Sbms			memset(&scq, 0, sizeof(struct ifqueue));
1395191672Sbms			IFQ_SET_MAXLEN(&scq, MLD_MAX_STATE_CHANGE_PACKETS);
1396191672Sbms		}
1397191672Sbms
1398233200Sjhb		IF_ADDR_RLOCK(ifp);
1399230076Sjhb		TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1400191672Sbms			if (ifma->ifma_addr->sa_family != AF_INET6 ||
1401191672Sbms			    ifma->ifma_protospec == NULL)
140253541Sshin				continue;
1403191672Sbms			inm = (struct in6_multi *)ifma->ifma_protospec;
1404191672Sbms			switch (mli->mli_version) {
1405191672Sbms			case MLD_VERSION_1:
1406230076Sjhb				mld_v1_process_group_timer(mli, inm);
1407191672Sbms				break;
1408191672Sbms			case MLD_VERSION_2:
1409191672Sbms				mld_v2_process_group_timers(mli, &qrq,
1410191672Sbms				    &scq, inm, uri_fasthz);
1411191672Sbms				break;
1412191672Sbms			}
1413191672Sbms		}
1414233200Sjhb		IF_ADDR_RUNLOCK(ifp);
141553541Sshin
1416230076Sjhb		switch (mli->mli_version) {
1417230076Sjhb		case MLD_VERSION_1:
1418230076Sjhb			/*
1419230076Sjhb			 * Transmit reports for this lifecycle.  This
1420230076Sjhb			 * is done while not holding IF_ADDR_LOCK
1421230076Sjhb			 * since this can call
1422230076Sjhb			 * in6ifa_ifpforlinklocal() which locks
1423230076Sjhb			 * IF_ADDR_LOCK internally as well as
1424230076Sjhb			 * ip6_output() to transmit a packet.
1425230076Sjhb			 */
1426230076Sjhb			SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1427230076Sjhb			    in6m_nrele, tinm) {
1428230076Sjhb				SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1429230076Sjhb				    in6m_nrele);
1430230076Sjhb				(void)mld_v1_transmit_report(inm,
1431230076Sjhb				    MLD_LISTENER_REPORT);
1432230076Sjhb			}
1433230076Sjhb			break;
1434230076Sjhb		case MLD_VERSION_2:
1435191672Sbms			mld_dispatch_queue(&qrq, 0);
1436191672Sbms			mld_dispatch_queue(&scq, 0);
1437191672Sbms
1438191672Sbms			/*
1439191672Sbms			 * Free the in_multi reference(s) for
1440191672Sbms			 * this lifecycle.
1441191672Sbms			 */
1442191672Sbms			SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead,
1443191672Sbms			    in6m_nrele, tinm) {
1444191672Sbms				SLIST_REMOVE_HEAD(&mli->mli_relinmhead,
1445191672Sbms				    in6m_nrele);
1446191672Sbms				in6m_release_locked(inm);
144753541Sshin			}
1448230076Sjhb			break;
144953541Sshin		}
1450191672Sbms	}
1451191672Sbms
1452191672Sbmsout_locked:
1453191672Sbms	MLD_UNLOCK();
1454191672Sbms	IN6_MULTI_UNLOCK();
1455191672Sbms}
1456191672Sbms
1457191672Sbms/*
1458191672Sbms * Update host report group timer.
1459191672Sbms * Will update the global pending timer flags.
1460191672Sbms */
1461191672Sbmsstatic void
1462230076Sjhbmld_v1_process_group_timer(struct mld_ifinfo *mli, struct in6_multi *inm)
1463191672Sbms{
1464191672Sbms	int report_timer_expired;
1465191672Sbms
1466191672Sbms	IN6_MULTI_LOCK_ASSERT();
1467191672Sbms	MLD_LOCK_ASSERT();
1468191672Sbms
1469191672Sbms	if (inm->in6m_timer == 0) {
1470191672Sbms		report_timer_expired = 0;
1471191672Sbms	} else if (--inm->in6m_timer == 0) {
1472191672Sbms		report_timer_expired = 1;
1473191672Sbms	} else {
1474191672Sbms		V_current_state_timers_running6 = 1;
1475191672Sbms		return;
1476191672Sbms	}
1477191672Sbms
1478191672Sbms	switch (inm->in6m_state) {
1479191672Sbms	case MLD_NOT_MEMBER:
1480191672Sbms	case MLD_SILENT_MEMBER:
1481191672Sbms	case MLD_IDLE_MEMBER:
1482191672Sbms	case MLD_LAZY_MEMBER:
1483191672Sbms	case MLD_SLEEPING_MEMBER:
1484191672Sbms	case MLD_AWAKENING_MEMBER:
148553541Sshin		break;
1486191672Sbms	case MLD_REPORTING_MEMBER:
1487191672Sbms		if (report_timer_expired) {
1488191672Sbms			inm->in6m_state = MLD_IDLE_MEMBER;
1489230076Sjhb			SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1490230076Sjhb			    in6m_nrele);
1491191672Sbms		}
1492191672Sbms		break;
1493191672Sbms	case MLD_G_QUERY_PENDING_MEMBER:
1494191672Sbms	case MLD_SG_QUERY_PENDING_MEMBER:
1495191672Sbms	case MLD_LEAVING_MEMBER:
1496191672Sbms		break;
1497191672Sbms	}
1498191672Sbms}
1499120941Sume
1500191672Sbms/*
1501191672Sbms * Update a group's timers for MLDv2.
1502191672Sbms * Will update the global pending timer flags.
1503191672Sbms * Note: Unlocked read from mli.
1504191672Sbms */
1505191672Sbmsstatic void
1506191672Sbmsmld_v2_process_group_timers(struct mld_ifinfo *mli,
1507191672Sbms    struct ifqueue *qrq, struct ifqueue *scq,
1508191672Sbms    struct in6_multi *inm, const int uri_fasthz)
1509191672Sbms{
1510191672Sbms	int query_response_timer_expired;
1511191672Sbms	int state_change_retransmit_timer_expired;
1512191672Sbms#ifdef KTR
1513191672Sbms	char ip6tbuf[INET6_ADDRSTRLEN];
1514191672Sbms#endif
1515191672Sbms
1516191672Sbms	IN6_MULTI_LOCK_ASSERT();
1517191672Sbms	MLD_LOCK_ASSERT();
1518191672Sbms
1519191672Sbms	query_response_timer_expired = 0;
1520191672Sbms	state_change_retransmit_timer_expired = 0;
1521191672Sbms
1522191672Sbms	/*
1523191672Sbms	 * During a transition from compatibility mode back to MLDv2,
1524191672Sbms	 * a group record in REPORTING state may still have its group
1525191672Sbms	 * timer active. This is a no-op in this function; it is easier
1526191672Sbms	 * to deal with it here than to complicate the slow-timeout path.
1527191672Sbms	 */
1528191672Sbms	if (inm->in6m_timer == 0) {
1529191672Sbms		query_response_timer_expired = 0;
1530191672Sbms	} else if (--inm->in6m_timer == 0) {
1531191672Sbms		query_response_timer_expired = 1;
1532191672Sbms	} else {
1533191672Sbms		V_current_state_timers_running6 = 1;
1534191672Sbms	}
1535191672Sbms
1536191672Sbms	if (inm->in6m_sctimer == 0) {
1537191672Sbms		state_change_retransmit_timer_expired = 0;
1538191672Sbms	} else if (--inm->in6m_sctimer == 0) {
1539191672Sbms		state_change_retransmit_timer_expired = 1;
1540191672Sbms	} else {
1541191672Sbms		V_state_change_timers_running6 = 1;
1542191672Sbms	}
1543191672Sbms
1544191672Sbms	/* We are in fasttimo, so be quick about it. */
1545191672Sbms	if (!state_change_retransmit_timer_expired &&
1546191672Sbms	    !query_response_timer_expired)
1547191672Sbms		return;
1548191672Sbms
1549191672Sbms	switch (inm->in6m_state) {
1550191672Sbms	case MLD_NOT_MEMBER:
1551191672Sbms	case MLD_SILENT_MEMBER:
1552191672Sbms	case MLD_SLEEPING_MEMBER:
1553191672Sbms	case MLD_LAZY_MEMBER:
1554191672Sbms	case MLD_AWAKENING_MEMBER:
1555191672Sbms	case MLD_IDLE_MEMBER:
1556191672Sbms		break;
1557191672Sbms	case MLD_G_QUERY_PENDING_MEMBER:
1558191672Sbms	case MLD_SG_QUERY_PENDING_MEMBER:
155953541Sshin		/*
1560191672Sbms		 * Respond to a previously pending Group-Specific
1561191672Sbms		 * or Group-and-Source-Specific query by enqueueing
1562191672Sbms		 * the appropriate Current-State report for
1563191672Sbms		 * immediate transmission.
156462587Sitojun		 */
1565191672Sbms		if (query_response_timer_expired) {
1566191672Sbms			int retval;
156753541Sshin
1568191672Sbms			retval = mld_v2_enqueue_group_record(qrq, inm, 0, 1,
1569200871Sbms			    (inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER),
1570200871Sbms			    0);
1571191672Sbms			CTR2(KTR_MLD, "%s: enqueue record = %d",
1572191672Sbms			    __func__, retval);
1573191672Sbms			inm->in6m_state = MLD_REPORTING_MEMBER;
1574191672Sbms			in6m_clear_recorded(inm);
1575191672Sbms		}
1576191672Sbms		/* FALLTHROUGH */
1577191672Sbms	case MLD_REPORTING_MEMBER:
1578191672Sbms	case MLD_LEAVING_MEMBER:
1579191672Sbms		if (state_change_retransmit_timer_expired) {
1580191672Sbms			/*
1581191672Sbms			 * State-change retransmission timer fired.
1582191672Sbms			 * If there are any further pending retransmissions,
1583191672Sbms			 * set the global pending state-change flag, and
1584191672Sbms			 * reset the timer.
1585191672Sbms			 */
1586191672Sbms			if (--inm->in6m_scrv > 0) {
1587191672Sbms				inm->in6m_sctimer = uri_fasthz;
1588191672Sbms				V_state_change_timers_running6 = 1;
1589191672Sbms			}
1590191672Sbms			/*
1591191672Sbms			 * Retransmit the previously computed state-change
1592191672Sbms			 * report. If there are no further pending
1593191672Sbms			 * retransmissions, the mbuf queue will be consumed.
1594191672Sbms			 * Update T0 state to T1 as we have now sent
1595191672Sbms			 * a state-change.
1596191672Sbms			 */
1597191672Sbms			(void)mld_v2_merge_state_changes(inm, scq);
159853541Sshin
1599191672Sbms			in6m_commit(inm);
1600191672Sbms			CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
1601191672Sbms			    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1602191672Sbms			    inm->in6m_ifp->if_xname);
1603191672Sbms
1604191672Sbms			/*
1605191672Sbms			 * If we are leaving the group for good, make sure
1606191672Sbms			 * we release MLD's reference to it.
1607191672Sbms			 * This release must be deferred using a SLIST,
1608191672Sbms			 * as we are called from a loop which traverses
1609191672Sbms			 * the in_ifmultiaddr TAILQ.
1610191672Sbms			 */
1611191672Sbms			if (inm->in6m_state == MLD_LEAVING_MEMBER &&
1612191672Sbms			    inm->in6m_scrv == 0) {
1613191672Sbms				inm->in6m_state = MLD_NOT_MEMBER;
1614191672Sbms				SLIST_INSERT_HEAD(&mli->mli_relinmhead,
1615191672Sbms				    inm, in6m_nrele);
1616191672Sbms			}
1617191672Sbms		}
1618191672Sbms		break;
1619191672Sbms	}
1620191672Sbms}
1621191672Sbms
1622191672Sbms/*
1623191672Sbms * Switch to a different version on the given interface,
1624191672Sbms * as per Section 9.12.
1625191672Sbms */
1626191672Sbmsstatic void
1627191672Sbmsmld_set_version(struct mld_ifinfo *mli, const int version)
1628191672Sbms{
1629192562Sbms	int old_version_timer;
1630191672Sbms
1631191672Sbms	MLD_LOCK_ASSERT();
1632191672Sbms
1633191672Sbms	CTR4(KTR_MLD, "%s: switching to v%d on ifp %p(%s)", __func__,
1634191672Sbms	    version, mli->mli_ifp, mli->mli_ifp->if_xname);
1635191672Sbms
1636191672Sbms	if (version == MLD_VERSION_1) {
163753541Sshin		/*
1638191672Sbms		 * Compute the "Older Version Querier Present" timer as per
1639191672Sbms		 * Section 9.12.
164062587Sitojun		 */
1641192923Sbms		old_version_timer = (mli->mli_rv * mli->mli_qi) + mli->mli_qri;
1642191672Sbms		old_version_timer *= PR_SLOWHZ;
1643192562Sbms		mli->mli_v1_timer = old_version_timer;
164453541Sshin	}
164562587Sitojun
1646192562Sbms	if (mli->mli_v1_timer > 0 && mli->mli_version != MLD_VERSION_1) {
1647192562Sbms		mli->mli_version = MLD_VERSION_1;
1648192562Sbms		mld_v2_cancel_link_timers(mli);
1649191672Sbms	}
165053541Sshin}
165153541Sshin
1652191672Sbms/*
1653191672Sbms * Cancel pending MLDv2 timers for the given link and all groups
1654191672Sbms * joined on it; state-change, general-query, and group-query timers.
1655191672Sbms */
165653541Sshinstatic void
1657191672Sbmsmld_v2_cancel_link_timers(struct mld_ifinfo *mli)
165853541Sshin{
1659191672Sbms	struct ifmultiaddr	*ifma;
1660191672Sbms	struct ifnet		*ifp;
1661230076Sjhb	struct in6_multi	*inm, *tinm;
166253541Sshin
1663191672Sbms	CTR3(KTR_MLD, "%s: cancel v2 timers on ifp %p(%s)", __func__,
1664191672Sbms	    mli->mli_ifp, mli->mli_ifp->if_xname);
1665191672Sbms
1666191672Sbms	IN6_MULTI_LOCK_ASSERT();
1667191672Sbms	MLD_LOCK_ASSERT();
1668191672Sbms
166953541Sshin	/*
1670191672Sbms	 * Fast-track this potentially expensive operation
1671191672Sbms	 * by checking all the global 'timer pending' flags.
167253541Sshin	 */
1673191672Sbms	if (!V_interface_timers_running6 &&
1674191672Sbms	    !V_state_change_timers_running6 &&
1675191672Sbms	    !V_current_state_timers_running6)
167653541Sshin		return;
167753541Sshin
1678191672Sbms	mli->mli_v2_timer = 0;
1679191672Sbms
1680191672Sbms	ifp = mli->mli_ifp;
1681191672Sbms
1682233200Sjhb	IF_ADDR_RLOCK(ifp);
1683191672Sbms	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
1684191672Sbms		if (ifma->ifma_addr->sa_family != AF_INET6)
1685191672Sbms			continue;
1686191672Sbms		inm = (struct in6_multi *)ifma->ifma_protospec;
1687191672Sbms		switch (inm->in6m_state) {
1688191672Sbms		case MLD_NOT_MEMBER:
1689191672Sbms		case MLD_SILENT_MEMBER:
1690191672Sbms		case MLD_IDLE_MEMBER:
1691191672Sbms		case MLD_LAZY_MEMBER:
1692191672Sbms		case MLD_SLEEPING_MEMBER:
1693191672Sbms		case MLD_AWAKENING_MEMBER:
1694191672Sbms			break;
1695191672Sbms		case MLD_LEAVING_MEMBER:
1696191672Sbms			/*
1697191672Sbms			 * If we are leaving the group and switching
1698191672Sbms			 * version, we need to release the final
1699191672Sbms			 * reference held for issuing the INCLUDE {}.
1700191672Sbms			 */
1701230076Sjhb			SLIST_INSERT_HEAD(&mli->mli_relinmhead, inm,
1702230076Sjhb			    in6m_nrele);
1703191672Sbms			/* FALLTHROUGH */
1704191672Sbms		case MLD_G_QUERY_PENDING_MEMBER:
1705191672Sbms		case MLD_SG_QUERY_PENDING_MEMBER:
1706191672Sbms			in6m_clear_recorded(inm);
1707191672Sbms			/* FALLTHROUGH */
1708191672Sbms		case MLD_REPORTING_MEMBER:
1709191672Sbms			inm->in6m_sctimer = 0;
1710191672Sbms			inm->in6m_timer = 0;
1711191672Sbms			inm->in6m_state = MLD_REPORTING_MEMBER;
1712191672Sbms			/*
1713191672Sbms			 * Free any pending MLDv2 state-change records.
1714191672Sbms			 */
1715191672Sbms			_IF_DRAIN(&inm->in6m_scq);
1716191672Sbms			break;
1717191672Sbms		}
1718191672Sbms	}
1719233200Sjhb	IF_ADDR_RUNLOCK(ifp);
1720230076Sjhb	SLIST_FOREACH_SAFE(inm, &mli->mli_relinmhead, in6m_nrele, tinm) {
1721230076Sjhb		SLIST_REMOVE_HEAD(&mli->mli_relinmhead, in6m_nrele);
1722230076Sjhb		in6m_release_locked(inm);
1723230076Sjhb	}
1724191672Sbms}
1725191672Sbms
1726191672Sbms/*
1727191672Sbms * Global slowtimo handler.
1728191672Sbms * VIMAGE: Timeout handlers are expected to service all vimages.
1729191672Sbms */
1730191672Sbmsvoid
1731191672Sbmsmld_slowtimo(void)
1732191672Sbms{
1733191672Sbms	VNET_ITERATOR_DECL(vnet_iter);
1734191672Sbms
1735195760Srwatson	VNET_LIST_RLOCK_NOSLEEP();
1736191672Sbms	VNET_FOREACH(vnet_iter) {
1737191672Sbms		CURVNET_SET(vnet_iter);
1738191672Sbms		mld_slowtimo_vnet();
1739191672Sbms		CURVNET_RESTORE();
1740191672Sbms	}
1741195760Srwatson	VNET_LIST_RUNLOCK_NOSLEEP();
1742191672Sbms}
1743191672Sbms
1744191672Sbms/*
1745191672Sbms * Per-vnet slowtimo handler.
1746191672Sbms */
1747191672Sbmsstatic void
1748191672Sbmsmld_slowtimo_vnet(void)
1749191672Sbms{
1750191672Sbms	struct mld_ifinfo *mli;
1751191672Sbms
1752191672Sbms	MLD_LOCK();
1753191672Sbms
1754191672Sbms	LIST_FOREACH(mli, &V_mli_head, mli_link) {
1755191672Sbms		mld_v1_process_querier_timers(mli);
1756191672Sbms	}
1757191672Sbms
1758191672Sbms	MLD_UNLOCK();
1759191672Sbms}
1760191672Sbms
1761191672Sbms/*
1762191672Sbms * Update the Older Version Querier Present timers for a link.
1763191672Sbms * See Section 9.12 of RFC 3810.
1764191672Sbms */
1765191672Sbmsstatic void
1766191672Sbmsmld_v1_process_querier_timers(struct mld_ifinfo *mli)
1767191672Sbms{
1768191672Sbms
1769191672Sbms	MLD_LOCK_ASSERT();
1770191672Sbms
1771192923Sbms	if (mli->mli_version != MLD_VERSION_2 && --mli->mli_v1_timer == 0) {
1772191672Sbms		/*
1773192562Sbms		 * MLDv1 Querier Present timer expired; revert to MLDv2.
1774191672Sbms		 */
1775192562Sbms		CTR5(KTR_MLD,
1776192562Sbms		    "%s: transition from v%d -> v%d on %p(%s)",
1777192562Sbms		    __func__, mli->mli_version, MLD_VERSION_2,
1778192562Sbms		    mli->mli_ifp, mli->mli_ifp->if_xname);
1779192562Sbms		mli->mli_version = MLD_VERSION_2;
1780191672Sbms	}
1781191672Sbms}
1782191672Sbms
1783191672Sbms/*
1784191672Sbms * Transmit an MLDv1 report immediately.
1785191672Sbms */
1786191672Sbmsstatic int
1787191672Sbmsmld_v1_transmit_report(struct in6_multi *in6m, const int type)
1788191672Sbms{
1789191672Sbms	struct ifnet		*ifp;
1790191672Sbms	struct in6_ifaddr	*ia;
1791191672Sbms	struct ip6_hdr		*ip6;
1792191672Sbms	struct mbuf		*mh, *md;
1793191672Sbms	struct mld_hdr		*mld;
1794191672Sbms
1795191672Sbms	IN6_MULTI_LOCK_ASSERT();
1796191672Sbms	MLD_LOCK_ASSERT();
1797191672Sbms
1798191672Sbms	ifp = in6m->in6m_ifp;
1799191672Sbms	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
1800191672Sbms	/* ia may be NULL if link-local address is tentative. */
1801191672Sbms
1802111119Simp	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
1803194760Srwatson	if (mh == NULL) {
1804194760Srwatson		if (ia != NULL)
1805194760Srwatson			ifa_free(&ia->ia_ifa);
1806191672Sbms		return (ENOMEM);
1807194760Srwatson	}
1808111119Simp	MGET(md, M_DONTWAIT, MT_DATA);
180953541Sshin	if (md == NULL) {
181053541Sshin		m_free(mh);
1811194760Srwatson		if (ia != NULL)
1812194760Srwatson			ifa_free(&ia->ia_ifa);
1813191672Sbms		return (ENOMEM);
181453541Sshin	}
181553541Sshin	mh->m_next = md;
181662587Sitojun
1817191672Sbms	/*
1818191672Sbms	 * FUTURE: Consider increasing alignment by ETHER_HDR_LEN, so
1819191672Sbms	 * that ether_output() does not need to allocate another mbuf
1820191672Sbms	 * for the header in the most common case.
1821191672Sbms	 */
1822191672Sbms	MH_ALIGN(mh, sizeof(struct ip6_hdr));
182396116Sume	mh->m_pkthdr.len = sizeof(struct ip6_hdr) + sizeof(struct mld_hdr);
182453541Sshin	mh->m_len = sizeof(struct ip6_hdr);
182553541Sshin
182653541Sshin	ip6 = mtod(mh, struct ip6_hdr *);
182753541Sshin	ip6->ip6_flow = 0;
182862587Sitojun	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
182962587Sitojun	ip6->ip6_vfc |= IPV6_VERSION;
183053541Sshin	ip6->ip6_nxt = IPPROTO_ICMPV6;
1831191672Sbms	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
1832191672Sbms	ip6->ip6_dst = in6m->in6m_addr;
183353541Sshin
183496116Sume	md->m_len = sizeof(struct mld_hdr);
1835191672Sbms	mld = mtod(md, struct mld_hdr *);
1836191672Sbms	mld->mld_type = type;
1837191672Sbms	mld->mld_code = 0;
1838191672Sbms	mld->mld_cksum = 0;
1839191672Sbms	mld->mld_maxdelay = 0;
1840191672Sbms	mld->mld_reserved = 0;
1841191672Sbms	mld->mld_addr = in6m->in6m_addr;
1842191672Sbms	in6_clearscope(&mld->mld_addr);
1843191672Sbms	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
1844191672Sbms	    sizeof(struct ip6_hdr), sizeof(struct mld_hdr));
184553541Sshin
1846191672Sbms	mld_save_context(mh, ifp);
1847191672Sbms	mh->m_flags |= M_MLDV1;
184853541Sshin
1849191672Sbms	mld_dispatch_packet(mh);
1850191672Sbms
1851194760Srwatson	if (ia != NULL)
1852194760Srwatson		ifa_free(&ia->ia_ifa);
1853191672Sbms	return (0);
1854191672Sbms}
1855191672Sbms
1856191672Sbms/*
1857191672Sbms * Process a state change from the upper layer for the given IPv6 group.
1858191672Sbms *
1859191672Sbms * Each socket holds a reference on the in_multi in its own ip_moptions.
1860191672Sbms * The socket layer will have made the necessary updates to.the group
1861191672Sbms * state, it is now up to MLD to issue a state change report if there
1862191672Sbms * has been any change between T0 (when the last state-change was issued)
1863191672Sbms * and T1 (now).
1864191672Sbms *
1865191672Sbms * We use the MLDv2 state machine at group level. The MLd module
1866191672Sbms * however makes the decision as to which MLD protocol version to speak.
1867191672Sbms * A state change *from* INCLUDE {} always means an initial join.
1868191672Sbms * A state change *to* INCLUDE {} always means a final leave.
1869191672Sbms *
1870191672Sbms * If delay is non-zero, and the state change is an initial multicast
1871191672Sbms * join, the state change report will be delayed by 'delay' ticks
1872191672Sbms * in units of PR_FASTHZ if MLDv1 is active on the link; otherwise
1873191672Sbms * the initial MLDv2 state change report will be delayed by whichever
1874191672Sbms * is sooner, a pending state-change timer or delay itself.
1875191672Sbms *
1876191672Sbms * VIMAGE: curvnet should have been set by caller, as this routine
1877191672Sbms * is called from the socket option handlers.
1878191672Sbms */
1879191672Sbmsint
1880191672Sbmsmld_change_state(struct in6_multi *inm, const int delay)
1881191672Sbms{
1882191672Sbms	struct mld_ifinfo *mli;
1883191672Sbms	struct ifnet *ifp;
1884191672Sbms	int error;
1885191672Sbms
1886191672Sbms	IN6_MULTI_LOCK_ASSERT();
1887191672Sbms
1888191672Sbms	error = 0;
1889191672Sbms
189053541Sshin	/*
1891191672Sbms	 * Try to detect if the upper layer just asked us to change state
1892191672Sbms	 * for an interface which has now gone away.
189353541Sshin	 */
1894191672Sbms	KASSERT(inm->in6m_ifma != NULL, ("%s: no ifma", __func__));
1895191672Sbms	ifp = inm->in6m_ifma->ifma_ifp;
1896191672Sbms	if (ifp != NULL) {
1897191672Sbms		/*
1898191672Sbms		 * Sanity check that netinet6's notion of ifp is the
1899191672Sbms		 * same as net's.
1900191672Sbms		 */
1901191672Sbms		KASSERT(inm->in6m_ifp == ifp, ("%s: bad ifp", __func__));
1902191672Sbms	}
190353541Sshin
1904191672Sbms	MLD_LOCK();
190553541Sshin
1906191672Sbms	mli = MLD_IFINFO(ifp);
1907191672Sbms	KASSERT(mli != NULL, ("%s: no mld_ifinfo for ifp %p", __func__, ifp));
1908191672Sbms
1909191672Sbms	/*
1910191672Sbms	 * If we detect a state transition to or from MCAST_UNDEFINED
1911191672Sbms	 * for this group, then we are starting or finishing an MLD
1912191672Sbms	 * life cycle for this group.
1913191672Sbms	 */
1914191672Sbms	if (inm->in6m_st[1].iss_fmode != inm->in6m_st[0].iss_fmode) {
1915191672Sbms		CTR3(KTR_MLD, "%s: inm transition %d -> %d", __func__,
1916191672Sbms		    inm->in6m_st[0].iss_fmode, inm->in6m_st[1].iss_fmode);
1917191672Sbms		if (inm->in6m_st[0].iss_fmode == MCAST_UNDEFINED) {
1918191672Sbms			CTR1(KTR_MLD, "%s: initial join", __func__);
1919191672Sbms			error = mld_initial_join(inm, mli, delay);
1920191672Sbms			goto out_locked;
1921191672Sbms		} else if (inm->in6m_st[1].iss_fmode == MCAST_UNDEFINED) {
1922191672Sbms			CTR1(KTR_MLD, "%s: final leave", __func__);
1923191672Sbms			mld_final_leave(inm, mli);
1924191672Sbms			goto out_locked;
1925191672Sbms		}
1926191672Sbms	} else {
1927191672Sbms		CTR1(KTR_MLD, "%s: filter set change", __func__);
1928191672Sbms	}
1929191672Sbms
1930191672Sbms	error = mld_handle_state_change(inm, mli);
1931191672Sbms
1932191672Sbmsout_locked:
1933191672Sbms	MLD_UNLOCK();
1934191672Sbms	return (error);
1935191672Sbms}
1936191672Sbms
1937191672Sbms/*
1938191672Sbms * Perform the initial join for an MLD group.
1939191672Sbms *
1940191672Sbms * When joining a group:
1941191672Sbms *  If the group should have its MLD traffic suppressed, do nothing.
1942191672Sbms *  MLDv1 starts sending MLDv1 host membership reports.
1943191672Sbms *  MLDv2 will schedule an MLDv2 state-change report containing the
1944191672Sbms *  initial state of the membership.
1945191672Sbms *
1946191672Sbms * If the delay argument is non-zero, then we must delay sending the
1947191672Sbms * initial state change for delay ticks (in units of PR_FASTHZ).
1948191672Sbms */
1949191672Sbmsstatic int
1950191672Sbmsmld_initial_join(struct in6_multi *inm, struct mld_ifinfo *mli,
1951191672Sbms    const int delay)
1952191672Sbms{
1953191672Sbms	struct ifnet		*ifp;
1954191672Sbms	struct ifqueue		*ifq;
1955191672Sbms	int			 error, retval, syncstates;
1956191672Sbms	int			 odelay;
1957191672Sbms#ifdef KTR
1958191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
1959191672Sbms#endif
1960191672Sbms
1961191672Sbms	CTR4(KTR_MLD, "%s: initial join %s on ifp %p(%s)",
1962191672Sbms	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
1963191672Sbms	    inm->in6m_ifp, inm->in6m_ifp->if_xname);
1964191672Sbms
1965191672Sbms	error = 0;
1966191672Sbms	syncstates = 1;
1967191672Sbms
1968191672Sbms	ifp = inm->in6m_ifp;
1969191672Sbms
1970191672Sbms	IN6_MULTI_LOCK_ASSERT();
1971191672Sbms	MLD_LOCK_ASSERT();
1972191672Sbms
1973191672Sbms	KASSERT(mli && mli->mli_ifp == ifp, ("%s: inconsistent ifp", __func__));
1974191672Sbms
1975191672Sbms	/*
1976191672Sbms	 * Groups joined on loopback or marked as 'not reported',
1977191672Sbms	 * enter the MLD_SILENT_MEMBER state and
1978191672Sbms	 * are never reported in any protocol exchanges.
1979191672Sbms	 * All other groups enter the appropriate state machine
1980191672Sbms	 * for the version in use on this link.
1981191672Sbms	 * A link marked as MLIF_SILENT causes MLD to be completely
1982191672Sbms	 * disabled for the link.
1983191672Sbms	 */
1984191672Sbms	if ((ifp->if_flags & IFF_LOOPBACK) ||
1985191672Sbms	    (mli->mli_flags & MLIF_SILENT) ||
1986191672Sbms	    !mld_is_addr_reported(&inm->in6m_addr)) {
1987191672Sbms		CTR1(KTR_MLD,
1988191672Sbms"%s: not kicking state machine for silent group", __func__);
1989191672Sbms		inm->in6m_state = MLD_SILENT_MEMBER;
1990191672Sbms		inm->in6m_timer = 0;
1991191672Sbms	} else {
1992191672Sbms		/*
1993191672Sbms		 * Deal with overlapping in_multi lifecycle.
1994191672Sbms		 * If this group was LEAVING, then make sure
1995191672Sbms		 * we drop the reference we picked up to keep the
1996191672Sbms		 * group around for the final INCLUDE {} enqueue.
1997191672Sbms		 */
1998191672Sbms		if (mli->mli_version == MLD_VERSION_2 &&
1999191672Sbms		    inm->in6m_state == MLD_LEAVING_MEMBER)
2000191672Sbms			in6m_release_locked(inm);
2001191672Sbms
2002191672Sbms		inm->in6m_state = MLD_REPORTING_MEMBER;
2003191672Sbms
2004191672Sbms		switch (mli->mli_version) {
2005191672Sbms		case MLD_VERSION_1:
2006191672Sbms			/*
2007191672Sbms			 * If a delay was provided, only use it if
2008191672Sbms			 * it is greater than the delay normally
2009191672Sbms			 * used for an MLDv1 state change report,
2010191672Sbms			 * and delay sending the initial MLDv1 report
2011191672Sbms			 * by not transitioning to the IDLE state.
2012191672Sbms			 */
2013191672Sbms			odelay = MLD_RANDOM_DELAY(MLD_V1_MAX_RI * PR_FASTHZ);
2014191672Sbms			if (delay) {
2015191672Sbms				inm->in6m_timer = max(delay, odelay);
2016191672Sbms				V_current_state_timers_running6 = 1;
2017191672Sbms			} else {
2018191672Sbms				inm->in6m_state = MLD_IDLE_MEMBER;
2019191672Sbms				error = mld_v1_transmit_report(inm,
2020191672Sbms				     MLD_LISTENER_REPORT);
2021191672Sbms				if (error == 0) {
2022191672Sbms					inm->in6m_timer = odelay;
2023191672Sbms					V_current_state_timers_running6 = 1;
2024191672Sbms				}
2025191672Sbms			}
202678064Sume			break;
2027191672Sbms
2028191672Sbms		case MLD_VERSION_2:
2029191672Sbms			/*
2030191672Sbms			 * Defer update of T0 to T1, until the first copy
2031191672Sbms			 * of the state change has been transmitted.
2032191672Sbms			 */
2033191672Sbms			syncstates = 0;
2034191672Sbms
2035191672Sbms			/*
2036191672Sbms			 * Immediately enqueue a State-Change Report for
2037191672Sbms			 * this interface, freeing any previous reports.
2038191672Sbms			 * Don't kick the timers if there is nothing to do,
2039191672Sbms			 * or if an error occurred.
2040191672Sbms			 */
2041191672Sbms			ifq = &inm->in6m_scq;
2042191672Sbms			_IF_DRAIN(ifq);
2043191672Sbms			retval = mld_v2_enqueue_group_record(ifq, inm, 1,
2044200871Sbms			    0, 0, (mli->mli_flags & MLIF_USEALLOW));
2045191672Sbms			CTR2(KTR_MLD, "%s: enqueue record = %d",
2046191672Sbms			    __func__, retval);
2047191672Sbms			if (retval <= 0) {
2048191672Sbms				error = retval * -1;
2049191672Sbms				break;
2050191672Sbms			}
2051191672Sbms
2052191672Sbms			/*
2053191672Sbms			 * Schedule transmission of pending state-change
2054191672Sbms			 * report up to RV times for this link. The timer
2055191672Sbms			 * will fire at the next mld_fasttimo (~200ms),
2056191672Sbms			 * giving us an opportunity to merge the reports.
2057191672Sbms			 *
2058191672Sbms			 * If a delay was provided to this function, only
2059191672Sbms			 * use this delay if sooner than the existing one.
2060191672Sbms			 */
2061191672Sbms			KASSERT(mli->mli_rv > 1,
2062191672Sbms			   ("%s: invalid robustness %d", __func__,
2063191672Sbms			    mli->mli_rv));
2064191672Sbms			inm->in6m_scrv = mli->mli_rv;
2065191672Sbms			if (delay) {
2066191672Sbms				if (inm->in6m_sctimer > 1) {
2067191672Sbms					inm->in6m_sctimer =
2068191672Sbms					    min(inm->in6m_sctimer, delay);
2069191672Sbms				} else
2070191672Sbms					inm->in6m_sctimer = delay;
2071191672Sbms			} else
2072191672Sbms				inm->in6m_sctimer = 1;
2073191672Sbms			V_state_change_timers_running6 = 1;
2074191672Sbms
2075191672Sbms			error = 0;
207678064Sume			break;
2077191672Sbms		}
2078191672Sbms	}
2079191672Sbms
2080191672Sbms	/*
2081191672Sbms	 * Only update the T0 state if state change is atomic,
2082191672Sbms	 * i.e. we don't need to wait for a timer to fire before we
2083191672Sbms	 * can consider the state change to have been communicated.
2084191672Sbms	 */
2085191672Sbms	if (syncstates) {
2086191672Sbms		in6m_commit(inm);
2087191672Sbms		CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2088191672Sbms		    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2089191672Sbms		    inm->in6m_ifp->if_xname);
2090191672Sbms	}
2091191672Sbms
2092191672Sbms	return (error);
2093191672Sbms}
2094191672Sbms
2095191672Sbms/*
2096191672Sbms * Issue an intermediate state change during the life-cycle.
2097191672Sbms */
2098191672Sbmsstatic int
2099191672Sbmsmld_handle_state_change(struct in6_multi *inm, struct mld_ifinfo *mli)
2100191672Sbms{
2101191672Sbms	struct ifnet		*ifp;
2102191672Sbms	int			 retval;
2103191672Sbms#ifdef KTR
2104191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
2105191672Sbms#endif
2106191672Sbms
2107191672Sbms	CTR4(KTR_MLD, "%s: state change for %s on ifp %p(%s)",
2108191672Sbms	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2109191672Sbms	    inm->in6m_ifp, inm->in6m_ifp->if_xname);
2110191672Sbms
2111191672Sbms	ifp = inm->in6m_ifp;
2112191672Sbms
2113191672Sbms	IN6_MULTI_LOCK_ASSERT();
2114191672Sbms	MLD_LOCK_ASSERT();
2115191672Sbms
2116191672Sbms	KASSERT(mli && mli->mli_ifp == ifp,
2117191672Sbms	    ("%s: inconsistent ifp", __func__));
2118191672Sbms
2119191672Sbms	if ((ifp->if_flags & IFF_LOOPBACK) ||
2120191672Sbms	    (mli->mli_flags & MLIF_SILENT) ||
2121191672Sbms	    !mld_is_addr_reported(&inm->in6m_addr) ||
2122191672Sbms	    (mli->mli_version != MLD_VERSION_2)) {
2123191672Sbms		if (!mld_is_addr_reported(&inm->in6m_addr)) {
2124191672Sbms			CTR1(KTR_MLD,
2125191672Sbms"%s: not kicking state machine for silent group", __func__);
2126191672Sbms		}
2127191672Sbms		CTR1(KTR_MLD, "%s: nothing to do", __func__);
2128191672Sbms		in6m_commit(inm);
2129191672Sbms		CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2130191672Sbms		    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2131191672Sbms		    inm->in6m_ifp->if_xname);
2132191672Sbms		return (0);
2133191672Sbms	}
2134191672Sbms
2135191672Sbms	_IF_DRAIN(&inm->in6m_scq);
2136191672Sbms
2137200871Sbms	retval = mld_v2_enqueue_group_record(&inm->in6m_scq, inm, 1, 0, 0,
2138200871Sbms	    (mli->mli_flags & MLIF_USEALLOW));
2139191672Sbms	CTR2(KTR_MLD, "%s: enqueue record = %d", __func__, retval);
2140191672Sbms	if (retval <= 0)
2141191672Sbms		return (-retval);
2142191672Sbms
2143191672Sbms	/*
2144191672Sbms	 * If record(s) were enqueued, start the state-change
2145191672Sbms	 * report timer for this group.
2146191672Sbms	 */
2147191672Sbms	inm->in6m_scrv = mli->mli_rv;
2148191672Sbms	inm->in6m_sctimer = 1;
2149191672Sbms	V_state_change_timers_running6 = 1;
2150191672Sbms
2151191672Sbms	return (0);
2152191672Sbms}
2153191672Sbms
2154191672Sbms/*
2155191672Sbms * Perform the final leave for a multicast address.
2156191672Sbms *
2157191672Sbms * When leaving a group:
2158191672Sbms *  MLDv1 sends a DONE message, if and only if we are the reporter.
2159191672Sbms *  MLDv2 enqueues a state-change report containing a transition
2160191672Sbms *  to INCLUDE {} for immediate transmission.
2161191672Sbms */
2162191672Sbmsstatic void
2163191672Sbmsmld_final_leave(struct in6_multi *inm, struct mld_ifinfo *mli)
2164191672Sbms{
2165191672Sbms	int syncstates;
2166191672Sbms#ifdef KTR
2167191672Sbms	char ip6tbuf[INET6_ADDRSTRLEN];
2168191672Sbms#endif
2169191672Sbms
2170191672Sbms	syncstates = 1;
2171191672Sbms
2172191672Sbms	CTR4(KTR_MLD, "%s: final leave %s on ifp %p(%s)",
2173191672Sbms	    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2174191672Sbms	    inm->in6m_ifp, inm->in6m_ifp->if_xname);
2175191672Sbms
2176191672Sbms	IN6_MULTI_LOCK_ASSERT();
2177191672Sbms	MLD_LOCK_ASSERT();
2178191672Sbms
2179191672Sbms	switch (inm->in6m_state) {
2180191672Sbms	case MLD_NOT_MEMBER:
2181191672Sbms	case MLD_SILENT_MEMBER:
2182191672Sbms	case MLD_LEAVING_MEMBER:
2183191672Sbms		/* Already leaving or left; do nothing. */
2184191672Sbms		CTR1(KTR_MLD,
2185191672Sbms"%s: not kicking state machine for silent group", __func__);
2186191672Sbms		break;
2187191672Sbms	case MLD_REPORTING_MEMBER:
2188191672Sbms	case MLD_IDLE_MEMBER:
2189191672Sbms	case MLD_G_QUERY_PENDING_MEMBER:
2190191672Sbms	case MLD_SG_QUERY_PENDING_MEMBER:
2191191672Sbms		if (mli->mli_version == MLD_VERSION_1) {
2192191672Sbms#ifdef INVARIANTS
2193191672Sbms			if (inm->in6m_state == MLD_G_QUERY_PENDING_MEMBER ||
2194191672Sbms			    inm->in6m_state == MLD_SG_QUERY_PENDING_MEMBER)
2195191672Sbms			panic("%s: MLDv2 state reached, not MLDv2 mode",
2196191672Sbms			     __func__);
2197191672Sbms#endif
2198191672Sbms			mld_v1_transmit_report(inm, MLD_LISTENER_DONE);
2199191672Sbms			inm->in6m_state = MLD_NOT_MEMBER;
2200237995Sbms			V_current_state_timers_running6 = 1;
2201191672Sbms		} else if (mli->mli_version == MLD_VERSION_2) {
2202191672Sbms			/*
2203191672Sbms			 * Stop group timer and all pending reports.
2204191672Sbms			 * Immediately enqueue a state-change report
2205191672Sbms			 * TO_IN {} to be sent on the next fast timeout,
2206191672Sbms			 * giving us an opportunity to merge reports.
2207191672Sbms			 */
2208191672Sbms			_IF_DRAIN(&inm->in6m_scq);
2209191672Sbms			inm->in6m_timer = 0;
2210191672Sbms			inm->in6m_scrv = mli->mli_rv;
2211191672Sbms			CTR4(KTR_MLD, "%s: Leaving %s/%s with %d "
2212191672Sbms			    "pending retransmissions.", __func__,
2213191672Sbms			    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2214191672Sbms			    inm->in6m_ifp->if_xname, inm->in6m_scrv);
2215191672Sbms			if (inm->in6m_scrv == 0) {
2216191672Sbms				inm->in6m_state = MLD_NOT_MEMBER;
2217191672Sbms				inm->in6m_sctimer = 0;
2218191672Sbms			} else {
2219191672Sbms				int retval;
2220191672Sbms
2221191672Sbms				in6m_acquire_locked(inm);
2222191672Sbms
2223191672Sbms				retval = mld_v2_enqueue_group_record(
2224200871Sbms				    &inm->in6m_scq, inm, 1, 0, 0,
2225200871Sbms				    (mli->mli_flags & MLIF_USEALLOW));
2226191672Sbms				KASSERT(retval != 0,
2227191672Sbms				    ("%s: enqueue record = %d", __func__,
2228191672Sbms				     retval));
2229191672Sbms
2230191672Sbms				inm->in6m_state = MLD_LEAVING_MEMBER;
2231191672Sbms				inm->in6m_sctimer = 1;
2232191672Sbms				V_state_change_timers_running6 = 1;
2233191672Sbms				syncstates = 0;
2234191672Sbms			}
223578064Sume			break;
223653541Sshin		}
2237191672Sbms		break;
2238191672Sbms	case MLD_LAZY_MEMBER:
2239191672Sbms	case MLD_SLEEPING_MEMBER:
2240191672Sbms	case MLD_AWAKENING_MEMBER:
2241191672Sbms		/* Our reports are suppressed; do nothing. */
2242191672Sbms		break;
224353541Sshin	}
2244191672Sbms
2245191672Sbms	if (syncstates) {
2246191672Sbms		in6m_commit(inm);
2247191672Sbms		CTR3(KTR_MLD, "%s: T1 -> T0 for %s/%s", __func__,
2248191672Sbms		    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2249191672Sbms		    inm->in6m_ifp->if_xname);
2250191672Sbms		inm->in6m_st[1].iss_fmode = MCAST_UNDEFINED;
2251191672Sbms		CTR3(KTR_MLD, "%s: T1 now MCAST_UNDEFINED for %p/%s",
2252191672Sbms		    __func__, &inm->in6m_addr, inm->in6m_ifp->if_xname);
2253191672Sbms	}
225453541Sshin}
2255126603Sume
2256126603Sume/*
2257191672Sbms * Enqueue an MLDv2 group record to the given output queue.
2258191672Sbms *
2259191672Sbms * If is_state_change is zero, a current-state record is appended.
2260191672Sbms * If is_state_change is non-zero, a state-change report is appended.
2261191672Sbms *
2262191672Sbms * If is_group_query is non-zero, an mbuf packet chain is allocated.
2263191672Sbms * If is_group_query is zero, and if there is a packet with free space
2264191672Sbms * at the tail of the queue, it will be appended to providing there
2265191672Sbms * is enough free space.
2266191672Sbms * Otherwise a new mbuf packet chain is allocated.
2267191672Sbms *
2268191672Sbms * If is_source_query is non-zero, each source is checked to see if
2269191672Sbms * it was recorded for a Group-Source query, and will be omitted if
2270191672Sbms * it is not both in-mode and recorded.
2271191672Sbms *
2272200871Sbms * If use_block_allow is non-zero, state change reports for initial join
2273200871Sbms * and final leave, on an inclusive mode group with a source list, will be
2274200871Sbms * rewritten to use the ALLOW_NEW and BLOCK_OLD record types, respectively.
2275200871Sbms *
2276191672Sbms * The function will attempt to allocate leading space in the packet
2277191672Sbms * for the IPv6+ICMP headers to be prepended without fragmenting the chain.
2278191672Sbms *
2279191672Sbms * If successful the size of all data appended to the queue is returned,
2280191672Sbms * otherwise an error code less than zero is returned, or zero if
2281191672Sbms * no record(s) were appended.
2282126603Sume */
2283191672Sbmsstatic int
2284191672Sbmsmld_v2_enqueue_group_record(struct ifqueue *ifq, struct in6_multi *inm,
2285191672Sbms    const int is_state_change, const int is_group_query,
2286200871Sbms    const int is_source_query, const int use_block_allow)
2287126603Sume{
2288191672Sbms	struct mldv2_record	 mr;
2289191672Sbms	struct mldv2_record	*pmr;
2290191672Sbms	struct ifnet		*ifp;
2291191672Sbms	struct ip6_msource	*ims, *nims;
2292191672Sbms	struct mbuf		*m0, *m, *md;
2293191672Sbms	int			 error, is_filter_list_change;
2294191672Sbms	int			 minrec0len, m0srcs, msrcs, nbytes, off;
2295191672Sbms	int			 record_has_sources;
2296191672Sbms	int			 now;
2297191672Sbms	int			 type;
2298191672Sbms	uint8_t			 mode;
2299191672Sbms#ifdef KTR
2300191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
2301191672Sbms#endif
2302126603Sume
2303191672Sbms	IN6_MULTI_LOCK_ASSERT();
2304126603Sume
2305191672Sbms	error = 0;
2306191672Sbms	ifp = inm->in6m_ifp;
2307191672Sbms	is_filter_list_change = 0;
2308191672Sbms	m = NULL;
2309191672Sbms	m0 = NULL;
2310191672Sbms	m0srcs = 0;
2311191672Sbms	msrcs = 0;
2312191672Sbms	nbytes = 0;
2313191672Sbms	nims = NULL;
2314191672Sbms	record_has_sources = 1;
2315191672Sbms	pmr = NULL;
2316191672Sbms	type = MLD_DO_NOTHING;
2317191672Sbms	mode = inm->in6m_st[1].iss_fmode;
2318126603Sume
2319191672Sbms	/*
2320191672Sbms	 * If we did not transition out of ASM mode during t0->t1,
2321191672Sbms	 * and there are no source nodes to process, we can skip
2322191672Sbms	 * the generation of source records.
2323191672Sbms	 */
2324191672Sbms	if (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0 &&
2325191672Sbms	    inm->in6m_nsrc == 0)
2326191672Sbms		record_has_sources = 0;
2327191672Sbms
2328191672Sbms	if (is_state_change) {
2329167729Sbms		/*
2330191672Sbms		 * Queue a state change record.
2331191672Sbms		 * If the mode did not change, and there are non-ASM
2332191672Sbms		 * listeners or source filters present,
2333191672Sbms		 * we potentially need to issue two records for the group.
2334191672Sbms		 * If there are ASM listeners, and there was no filter
2335191672Sbms		 * mode transition of any kind, do nothing.
2336200871Sbms		 *
2337200871Sbms		 * If we are transitioning to MCAST_UNDEFINED, we need
2338200871Sbms		 * not send any sources. A transition to/from this state is
2339200871Sbms		 * considered inclusive with some special treatment.
2340200871Sbms		 *
2341200871Sbms		 * If we are rewriting initial joins/leaves to use
2342200871Sbms		 * ALLOW/BLOCK, and the group's membership is inclusive,
2343200871Sbms		 * we need to send sources in all cases.
2344167729Sbms		 */
2345191672Sbms		if (mode != inm->in6m_st[0].iss_fmode) {
2346191672Sbms			if (mode == MCAST_EXCLUDE) {
2347191672Sbms				CTR1(KTR_MLD, "%s: change to EXCLUDE",
2348191672Sbms				    __func__);
2349191672Sbms				type = MLD_CHANGE_TO_EXCLUDE_MODE;
2350191672Sbms			} else {
2351191672Sbms				CTR1(KTR_MLD, "%s: change to INCLUDE",
2352191672Sbms				    __func__);
2353200871Sbms				if (use_block_allow) {
2354200871Sbms					/*
2355200871Sbms					 * XXX
2356200871Sbms					 * Here we're interested in state
2357200871Sbms					 * edges either direction between
2358200871Sbms					 * MCAST_UNDEFINED and MCAST_INCLUDE.
2359200871Sbms					 * Perhaps we should just check
2360200871Sbms					 * the group state, rather than
2361200871Sbms					 * the filter mode.
2362200871Sbms					 */
2363200871Sbms					if (mode == MCAST_UNDEFINED) {
2364200871Sbms						type = MLD_BLOCK_OLD_SOURCES;
2365200871Sbms					} else {
2366200871Sbms						type = MLD_ALLOW_NEW_SOURCES;
2367200871Sbms					}
2368200871Sbms				} else {
2369200871Sbms					type = MLD_CHANGE_TO_INCLUDE_MODE;
2370200871Sbms					if (mode == MCAST_UNDEFINED)
2371200871Sbms						record_has_sources = 0;
2372200871Sbms				}
2373191672Sbms			}
2374191672Sbms		} else {
2375191672Sbms			if (record_has_sources) {
2376191672Sbms				is_filter_list_change = 1;
2377191672Sbms			} else {
2378191672Sbms				type = MLD_DO_NOTHING;
2379191672Sbms			}
2380191672Sbms		}
2381191672Sbms	} else {
2382191672Sbms		/*
2383191672Sbms		 * Queue a current state record.
2384191672Sbms		 */
2385191672Sbms		if (mode == MCAST_EXCLUDE) {
2386191672Sbms			type = MLD_MODE_IS_EXCLUDE;
2387191672Sbms		} else if (mode == MCAST_INCLUDE) {
2388191672Sbms			type = MLD_MODE_IS_INCLUDE;
2389191672Sbms			KASSERT(inm->in6m_st[1].iss_asm == 0,
2390191672Sbms			    ("%s: inm %p is INCLUDE but ASM count is %d",
2391191672Sbms			     __func__, inm, inm->in6m_st[1].iss_asm));
2392191672Sbms		}
2393191672Sbms	}
2394126603Sume
2395191672Sbms	/*
2396191672Sbms	 * Generate the filter list changes using a separate function.
2397191672Sbms	 */
2398191672Sbms	if (is_filter_list_change)
2399191672Sbms		return (mld_v2_enqueue_filter_change(ifq, inm));
2400126603Sume
2401191672Sbms	if (type == MLD_DO_NOTHING) {
2402191672Sbms		CTR3(KTR_MLD, "%s: nothing to do for %s/%s",
2403191672Sbms		    __func__, ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2404191672Sbms		    inm->in6m_ifp->if_xname);
2405191672Sbms		return (0);
2406191672Sbms	}
2407126603Sume
2408191672Sbms	/*
2409191672Sbms	 * If any sources are present, we must be able to fit at least
2410191672Sbms	 * one in the trailing space of the tail packet's mbuf,
2411191672Sbms	 * ideally more.
2412191672Sbms	 */
2413191672Sbms	minrec0len = sizeof(struct mldv2_record);
2414191672Sbms	if (record_has_sources)
2415191672Sbms		minrec0len += sizeof(struct in6_addr);
2416191672Sbms
2417191672Sbms	CTR4(KTR_MLD, "%s: queueing %s for %s/%s", __func__,
2418191672Sbms	    mld_rec_type_to_str(type),
2419191672Sbms	    ip6_sprintf(ip6tbuf, &inm->in6m_addr),
2420191672Sbms	    inm->in6m_ifp->if_xname);
2421191672Sbms
2422191672Sbms	/*
2423191672Sbms	 * Check if we have a packet in the tail of the queue for this
2424191672Sbms	 * group into which the first group record for this group will fit.
2425191672Sbms	 * Otherwise allocate a new packet.
2426191672Sbms	 * Always allocate leading space for IP6+RA+ICMPV6+REPORT.
2427191672Sbms	 * Note: Group records for G/GSR query responses MUST be sent
2428191672Sbms	 * in their own packet.
2429191672Sbms	 */
2430191672Sbms	m0 = ifq->ifq_tail;
2431191672Sbms	if (!is_group_query &&
2432191672Sbms	    m0 != NULL &&
2433191672Sbms	    (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <= MLD_V2_REPORT_MAXRECS) &&
2434191672Sbms	    (m0->m_pkthdr.len + minrec0len) <
2435191672Sbms	     (ifp->if_mtu - MLD_MTUSPACE)) {
2436191672Sbms		m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2437191672Sbms			    sizeof(struct mldv2_record)) /
2438191672Sbms			    sizeof(struct in6_addr);
2439191672Sbms		m = m0;
2440191672Sbms		CTR1(KTR_MLD, "%s: use existing packet", __func__);
2441191672Sbms	} else {
2442191672Sbms		if (_IF_QFULL(ifq)) {
2443191672Sbms			CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2444191672Sbms			return (-ENOMEM);
2445191672Sbms		}
2446191672Sbms		m = NULL;
2447191672Sbms		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2448191672Sbms		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2449191672Sbms		if (!is_state_change && !is_group_query)
2450191672Sbms			m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2451191672Sbms		if (m == NULL)
2452191672Sbms			m = m_gethdr(M_DONTWAIT, MT_DATA);
2453191672Sbms		if (m == NULL)
2454191672Sbms			return (-ENOMEM);
2455191672Sbms
2456191672Sbms		mld_save_context(m, ifp);
2457191672Sbms
2458191672Sbms		CTR1(KTR_MLD, "%s: allocated first packet", __func__);
2459191672Sbms	}
2460191672Sbms
2461191672Sbms	/*
2462191672Sbms	 * Append group record.
2463191672Sbms	 * If we have sources, we don't know how many yet.
2464191672Sbms	 */
2465191672Sbms	mr.mr_type = type;
2466191672Sbms	mr.mr_datalen = 0;
2467191672Sbms	mr.mr_numsrc = 0;
2468191672Sbms	mr.mr_addr = inm->in6m_addr;
2469191672Sbms	in6_clearscope(&mr.mr_addr);
2470191672Sbms	if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2471191672Sbms		if (m != m0)
2472191672Sbms			m_freem(m);
2473191672Sbms		CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2474191672Sbms		return (-ENOMEM);
2475191672Sbms	}
2476191672Sbms	nbytes += sizeof(struct mldv2_record);
2477191672Sbms
2478191672Sbms	/*
2479191672Sbms	 * Append as many sources as will fit in the first packet.
2480191672Sbms	 * If we are appending to a new packet, the chain allocation
2481191672Sbms	 * may potentially use clusters; use m_getptr() in this case.
2482191672Sbms	 * If we are appending to an existing packet, we need to obtain
2483191672Sbms	 * a pointer to the group record after m_append(), in case a new
2484191672Sbms	 * mbuf was allocated.
2485200871Sbms	 *
2486191672Sbms	 * Only append sources which are in-mode at t1. If we are
2487200871Sbms	 * transitioning to MCAST_UNDEFINED state on the group, and
2488200871Sbms	 * use_block_allow is zero, do not include source entries.
2489200871Sbms	 * Otherwise, we need to include this source in the report.
2490200871Sbms	 *
2491191672Sbms	 * Only report recorded sources in our filter set when responding
2492191672Sbms	 * to a group-source query.
2493191672Sbms	 */
2494191672Sbms	if (record_has_sources) {
2495191672Sbms		if (m == m0) {
2496191672Sbms			md = m_last(m);
2497191672Sbms			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2498191672Sbms			    md->m_len - nbytes);
2499191672Sbms		} else {
2500191672Sbms			md = m_getptr(m, 0, &off);
2501191672Sbms			pmr = (struct mldv2_record *)(mtod(md, uint8_t *) +
2502191672Sbms			    off);
2503191672Sbms		}
2504191672Sbms		msrcs = 0;
2505191672Sbms		RB_FOREACH_SAFE(ims, ip6_msource_tree, &inm->in6m_srcs,
2506191672Sbms		    nims) {
2507191672Sbms			CTR2(KTR_MLD, "%s: visit node %s", __func__,
2508191672Sbms			    ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2509191672Sbms			now = im6s_get_mode(inm, ims, 1);
2510191672Sbms			CTR2(KTR_MLD, "%s: node is %d", __func__, now);
2511191672Sbms			if ((now != mode) ||
2512200871Sbms			    (now == mode &&
2513200871Sbms			     (!use_block_allow && mode == MCAST_UNDEFINED))) {
2514191672Sbms				CTR1(KTR_MLD, "%s: skip node", __func__);
2515191672Sbms				continue;
2516191672Sbms			}
2517191672Sbms			if (is_source_query && ims->im6s_stp == 0) {
2518191672Sbms				CTR1(KTR_MLD, "%s: skip unrecorded node",
2519191672Sbms				    __func__);
2520191672Sbms				continue;
2521191672Sbms			}
2522191672Sbms			CTR1(KTR_MLD, "%s: append node", __func__);
2523191672Sbms			if (!m_append(m, sizeof(struct in6_addr),
2524191672Sbms			    (void *)&ims->im6s_addr)) {
2525191672Sbms				if (m != m0)
2526191672Sbms					m_freem(m);
2527191672Sbms				CTR1(KTR_MLD, "%s: m_append() failed.",
2528191672Sbms				    __func__);
2529191672Sbms				return (-ENOMEM);
2530191672Sbms			}
2531191672Sbms			nbytes += sizeof(struct in6_addr);
2532191672Sbms			++msrcs;
2533191672Sbms			if (msrcs == m0srcs)
2534191672Sbms				break;
2535191672Sbms		}
2536191672Sbms		CTR2(KTR_MLD, "%s: msrcs is %d this packet", __func__,
2537191672Sbms		    msrcs);
2538191672Sbms		pmr->mr_numsrc = htons(msrcs);
2539191672Sbms		nbytes += (msrcs * sizeof(struct in6_addr));
2540191672Sbms	}
2541191672Sbms
2542191672Sbms	if (is_source_query && msrcs == 0) {
2543191672Sbms		CTR1(KTR_MLD, "%s: no recorded sources to report", __func__);
2544191672Sbms		if (m != m0)
2545191672Sbms			m_freem(m);
2546191672Sbms		return (0);
2547191672Sbms	}
2548191672Sbms
2549191672Sbms	/*
2550191672Sbms	 * We are good to go with first packet.
2551191672Sbms	 */
2552191672Sbms	if (m != m0) {
2553191672Sbms		CTR1(KTR_MLD, "%s: enqueueing first packet", __func__);
2554191672Sbms		m->m_pkthdr.PH_vt.vt_nrecs = 1;
2555191672Sbms		_IF_ENQUEUE(ifq, m);
2556191672Sbms	} else
2557191672Sbms		m->m_pkthdr.PH_vt.vt_nrecs++;
2558191672Sbms
2559191672Sbms	/*
2560191672Sbms	 * No further work needed if no source list in packet(s).
2561191672Sbms	 */
2562191672Sbms	if (!record_has_sources)
2563191672Sbms		return (nbytes);
2564191672Sbms
2565191672Sbms	/*
2566191672Sbms	 * Whilst sources remain to be announced, we need to allocate
2567191672Sbms	 * a new packet and fill out as many sources as will fit.
2568191672Sbms	 * Always try for a cluster first.
2569191672Sbms	 */
2570191672Sbms	while (nims != NULL) {
2571191672Sbms		if (_IF_QFULL(ifq)) {
2572191672Sbms			CTR1(KTR_MLD, "%s: outbound queue full", __func__);
2573191672Sbms			return (-ENOMEM);
2574191672Sbms		}
2575191672Sbms		m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2576191672Sbms		if (m == NULL)
2577191672Sbms			m = m_gethdr(M_DONTWAIT, MT_DATA);
2578191672Sbms		if (m == NULL)
2579191672Sbms			return (-ENOMEM);
2580191672Sbms		mld_save_context(m, ifp);
2581191672Sbms		md = m_getptr(m, 0, &off);
2582191672Sbms		pmr = (struct mldv2_record *)(mtod(md, uint8_t *) + off);
2583191672Sbms		CTR1(KTR_MLD, "%s: allocated next packet", __func__);
2584191672Sbms
2585191672Sbms		if (!m_append(m, sizeof(struct mldv2_record), (void *)&mr)) {
2586191672Sbms			if (m != m0)
2587191672Sbms				m_freem(m);
2588191672Sbms			CTR1(KTR_MLD, "%s: m_append() failed.", __func__);
2589191672Sbms			return (-ENOMEM);
2590191672Sbms		}
2591191672Sbms		m->m_pkthdr.PH_vt.vt_nrecs = 1;
2592191672Sbms		nbytes += sizeof(struct mldv2_record);
2593191672Sbms
2594191672Sbms		m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2595191672Sbms		    sizeof(struct mldv2_record)) / sizeof(struct in6_addr);
2596191672Sbms
2597191672Sbms		msrcs = 0;
2598191672Sbms		RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2599191672Sbms			CTR2(KTR_MLD, "%s: visit node %s",
2600191672Sbms			    __func__, ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2601191672Sbms			now = im6s_get_mode(inm, ims, 1);
2602191672Sbms			if ((now != mode) ||
2603200871Sbms			    (now == mode &&
2604200871Sbms			     (!use_block_allow && mode == MCAST_UNDEFINED))) {
2605191672Sbms				CTR1(KTR_MLD, "%s: skip node", __func__);
2606191672Sbms				continue;
2607191672Sbms			}
2608191672Sbms			if (is_source_query && ims->im6s_stp == 0) {
2609191672Sbms				CTR1(KTR_MLD, "%s: skip unrecorded node",
2610191672Sbms				    __func__);
2611191672Sbms				continue;
2612191672Sbms			}
2613191672Sbms			CTR1(KTR_MLD, "%s: append node", __func__);
2614191672Sbms			if (!m_append(m, sizeof(struct in6_addr),
2615191672Sbms			    (void *)&ims->im6s_addr)) {
2616191672Sbms				if (m != m0)
2617191672Sbms					m_freem(m);
2618191672Sbms				CTR1(KTR_MLD, "%s: m_append() failed.",
2619191672Sbms				    __func__);
2620191672Sbms				return (-ENOMEM);
2621191672Sbms			}
2622191672Sbms			++msrcs;
2623191672Sbms			if (msrcs == m0srcs)
2624191672Sbms				break;
2625191672Sbms		}
2626191672Sbms		pmr->mr_numsrc = htons(msrcs);
2627191672Sbms		nbytes += (msrcs * sizeof(struct in6_addr));
2628191672Sbms
2629191672Sbms		CTR1(KTR_MLD, "%s: enqueueing next packet", __func__);
2630191672Sbms		_IF_ENQUEUE(ifq, m);
2631191672Sbms	}
2632191672Sbms
2633191672Sbms	return (nbytes);
2634191672Sbms}
2635191672Sbms
2636191672Sbms/*
2637191672Sbms * Type used to mark record pass completion.
2638191672Sbms * We exploit the fact we can cast to this easily from the
2639191672Sbms * current filter modes on each ip_msource node.
2640191672Sbms */
2641191672Sbmstypedef enum {
2642191672Sbms	REC_NONE = 0x00,	/* MCAST_UNDEFINED */
2643191672Sbms	REC_ALLOW = 0x01,	/* MCAST_INCLUDE */
2644191672Sbms	REC_BLOCK = 0x02,	/* MCAST_EXCLUDE */
2645191672Sbms	REC_FULL = REC_ALLOW | REC_BLOCK
2646191672Sbms} rectype_t;
2647191672Sbms
2648191672Sbms/*
2649191672Sbms * Enqueue an MLDv2 filter list change to the given output queue.
2650191672Sbms *
2651191672Sbms * Source list filter state is held in an RB-tree. When the filter list
2652191672Sbms * for a group is changed without changing its mode, we need to compute
2653191672Sbms * the deltas between T0 and T1 for each source in the filter set,
2654191672Sbms * and enqueue the appropriate ALLOW_NEW/BLOCK_OLD records.
2655191672Sbms *
2656191672Sbms * As we may potentially queue two record types, and the entire R-B tree
2657191672Sbms * needs to be walked at once, we break this out into its own function
2658191672Sbms * so we can generate a tightly packed queue of packets.
2659191672Sbms *
2660191672Sbms * XXX This could be written to only use one tree walk, although that makes
2661191672Sbms * serializing into the mbuf chains a bit harder. For now we do two walks
2662191672Sbms * which makes things easier on us, and it may or may not be harder on
2663191672Sbms * the L2 cache.
2664191672Sbms *
2665191672Sbms * If successful the size of all data appended to the queue is returned,
2666191672Sbms * otherwise an error code less than zero is returned, or zero if
2667191672Sbms * no record(s) were appended.
2668191672Sbms */
2669191672Sbmsstatic int
2670191672Sbmsmld_v2_enqueue_filter_change(struct ifqueue *ifq, struct in6_multi *inm)
2671191672Sbms{
2672191672Sbms	static const int MINRECLEN =
2673191672Sbms	    sizeof(struct mldv2_record) + sizeof(struct in6_addr);
2674191672Sbms	struct ifnet		*ifp;
2675191672Sbms	struct mldv2_record	 mr;
2676191672Sbms	struct mldv2_record	*pmr;
2677191672Sbms	struct ip6_msource	*ims, *nims;
2678191672Sbms	struct mbuf		*m, *m0, *md;
2679191672Sbms	int			 m0srcs, nbytes, npbytes, off, rsrcs, schanged;
2680191672Sbms	int			 nallow, nblock;
2681191672Sbms	uint8_t			 mode, now, then;
2682191672Sbms	rectype_t		 crt, drt, nrt;
2683191672Sbms#ifdef KTR
2684191672Sbms	char			 ip6tbuf[INET6_ADDRSTRLEN];
2685191672Sbms#endif
2686191672Sbms
2687191672Sbms	IN6_MULTI_LOCK_ASSERT();
2688191672Sbms
2689191672Sbms	if (inm->in6m_nsrc == 0 ||
2690191672Sbms	    (inm->in6m_st[0].iss_asm > 0 && inm->in6m_st[1].iss_asm > 0))
2691191672Sbms		return (0);
2692191672Sbms
2693191672Sbms	ifp = inm->in6m_ifp;			/* interface */
2694191672Sbms	mode = inm->in6m_st[1].iss_fmode;	/* filter mode at t1 */
2695191672Sbms	crt = REC_NONE;	/* current group record type */
2696191672Sbms	drt = REC_NONE;	/* mask of completed group record types */
2697191672Sbms	nrt = REC_NONE;	/* record type for current node */
2698191672Sbms	m0srcs = 0;	/* # source which will fit in current mbuf chain */
2699191672Sbms	npbytes = 0;	/* # of bytes appended this packet */
2700191672Sbms	nbytes = 0;	/* # of bytes appended to group's state-change queue */
2701191672Sbms	rsrcs = 0;	/* # sources encoded in current record */
2702191672Sbms	schanged = 0;	/* # nodes encoded in overall filter change */
2703191672Sbms	nallow = 0;	/* # of source entries in ALLOW_NEW */
2704191672Sbms	nblock = 0;	/* # of source entries in BLOCK_OLD */
2705191672Sbms	nims = NULL;	/* next tree node pointer */
2706191672Sbms
2707191672Sbms	/*
2708191672Sbms	 * For each possible filter record mode.
2709191672Sbms	 * The first kind of source we encounter tells us which
2710191672Sbms	 * is the first kind of record we start appending.
2711191672Sbms	 * If a node transitioned to UNDEFINED at t1, its mode is treated
2712191672Sbms	 * as the inverse of the group's filter mode.
2713191672Sbms	 */
2714191672Sbms	while (drt != REC_FULL) {
2715191672Sbms		do {
2716191672Sbms			m0 = ifq->ifq_tail;
2717191672Sbms			if (m0 != NULL &&
2718191672Sbms			    (m0->m_pkthdr.PH_vt.vt_nrecs + 1 <=
2719191672Sbms			     MLD_V2_REPORT_MAXRECS) &&
2720191672Sbms			    (m0->m_pkthdr.len + MINRECLEN) <
2721191672Sbms			     (ifp->if_mtu - MLD_MTUSPACE)) {
2722191672Sbms				m = m0;
2723191672Sbms				m0srcs = (ifp->if_mtu - m0->m_pkthdr.len -
2724191672Sbms					    sizeof(struct mldv2_record)) /
2725191672Sbms					    sizeof(struct in6_addr);
2726191672Sbms				CTR1(KTR_MLD,
2727191672Sbms				    "%s: use previous packet", __func__);
2728191672Sbms			} else {
2729191672Sbms				m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
2730191672Sbms				if (m == NULL)
2731191672Sbms					m = m_gethdr(M_DONTWAIT, MT_DATA);
2732191672Sbms				if (m == NULL) {
2733191672Sbms					CTR1(KTR_MLD,
2734191672Sbms					    "%s: m_get*() failed", __func__);
2735191672Sbms					return (-ENOMEM);
2736191672Sbms				}
2737191672Sbms				m->m_pkthdr.PH_vt.vt_nrecs = 0;
2738191672Sbms				mld_save_context(m, ifp);
2739191672Sbms				m0srcs = (ifp->if_mtu - MLD_MTUSPACE -
2740191672Sbms				    sizeof(struct mldv2_record)) /
2741191672Sbms				    sizeof(struct in6_addr);
2742191672Sbms				npbytes = 0;
2743191672Sbms				CTR1(KTR_MLD,
2744191672Sbms				    "%s: allocated new packet", __func__);
2745191672Sbms			}
2746191672Sbms			/*
2747191672Sbms			 * Append the MLD group record header to the
2748191672Sbms			 * current packet's data area.
2749191672Sbms			 * Recalculate pointer to free space for next
2750191672Sbms			 * group record, in case m_append() allocated
2751191672Sbms			 * a new mbuf or cluster.
2752191672Sbms			 */
2753191672Sbms			memset(&mr, 0, sizeof(mr));
2754191672Sbms			mr.mr_addr = inm->in6m_addr;
2755191672Sbms			in6_clearscope(&mr.mr_addr);
2756191672Sbms			if (!m_append(m, sizeof(mr), (void *)&mr)) {
2757191672Sbms				if (m != m0)
2758191672Sbms					m_freem(m);
2759191672Sbms				CTR1(KTR_MLD,
2760191672Sbms				    "%s: m_append() failed", __func__);
2761191672Sbms				return (-ENOMEM);
2762191672Sbms			}
2763191672Sbms			npbytes += sizeof(struct mldv2_record);
2764191672Sbms			if (m != m0) {
2765191672Sbms				/* new packet; offset in chain */
2766191672Sbms				md = m_getptr(m, npbytes -
2767191672Sbms				    sizeof(struct mldv2_record), &off);
2768191672Sbms				pmr = (struct mldv2_record *)(mtod(md,
2769191672Sbms				    uint8_t *) + off);
2770191672Sbms			} else {
2771191672Sbms				/* current packet; offset from last append */
2772191672Sbms				md = m_last(m);
2773191672Sbms				pmr = (struct mldv2_record *)(mtod(md,
2774191672Sbms				    uint8_t *) + md->m_len -
2775191672Sbms				    sizeof(struct mldv2_record));
2776191672Sbms			}
2777191672Sbms			/*
2778191672Sbms			 * Begin walking the tree for this record type
2779191672Sbms			 * pass, or continue from where we left off
2780191672Sbms			 * previously if we had to allocate a new packet.
2781191672Sbms			 * Only report deltas in-mode at t1.
2782191672Sbms			 * We need not report included sources as allowed
2783191672Sbms			 * if we are in inclusive mode on the group,
2784191672Sbms			 * however the converse is not true.
2785191672Sbms			 */
2786191672Sbms			rsrcs = 0;
2787191672Sbms			if (nims == NULL) {
2788191672Sbms				nims = RB_MIN(ip6_msource_tree,
2789191672Sbms				    &inm->in6m_srcs);
2790191672Sbms			}
2791191672Sbms			RB_FOREACH_FROM(ims, ip6_msource_tree, nims) {
2792191672Sbms				CTR2(KTR_MLD, "%s: visit node %s", __func__,
2793191672Sbms				    ip6_sprintf(ip6tbuf, &ims->im6s_addr));
2794191672Sbms				now = im6s_get_mode(inm, ims, 1);
2795191672Sbms				then = im6s_get_mode(inm, ims, 0);
2796191672Sbms				CTR3(KTR_MLD, "%s: mode: t0 %d, t1 %d",
2797191672Sbms				    __func__, then, now);
2798191672Sbms				if (now == then) {
2799191672Sbms					CTR1(KTR_MLD,
2800191672Sbms					    "%s: skip unchanged", __func__);
2801191672Sbms					continue;
2802191672Sbms				}
2803191672Sbms				if (mode == MCAST_EXCLUDE &&
2804191672Sbms				    now == MCAST_INCLUDE) {
2805191672Sbms					CTR1(KTR_MLD,
2806191672Sbms					    "%s: skip IN src on EX group",
2807191672Sbms					    __func__);
2808191672Sbms					continue;
2809191672Sbms				}
2810191672Sbms				nrt = (rectype_t)now;
2811191672Sbms				if (nrt == REC_NONE)
2812191672Sbms					nrt = (rectype_t)(~mode & REC_FULL);
2813191672Sbms				if (schanged++ == 0) {
2814191672Sbms					crt = nrt;
2815191672Sbms				} else if (crt != nrt)
2816191672Sbms					continue;
2817191672Sbms				if (!m_append(m, sizeof(struct in6_addr),
2818191672Sbms				    (void *)&ims->im6s_addr)) {
2819191672Sbms					if (m != m0)
2820191672Sbms						m_freem(m);
2821191672Sbms					CTR1(KTR_MLD,
2822191672Sbms					    "%s: m_append() failed", __func__);
2823191672Sbms					return (-ENOMEM);
2824191672Sbms				}
2825191672Sbms				nallow += !!(crt == REC_ALLOW);
2826191672Sbms				nblock += !!(crt == REC_BLOCK);
2827191672Sbms				if (++rsrcs == m0srcs)
2828191672Sbms					break;
2829191672Sbms			}
2830191672Sbms			/*
2831191672Sbms			 * If we did not append any tree nodes on this
2832191672Sbms			 * pass, back out of allocations.
2833191672Sbms			 */
2834191672Sbms			if (rsrcs == 0) {
2835191672Sbms				npbytes -= sizeof(struct mldv2_record);
2836191672Sbms				if (m != m0) {
2837191672Sbms					CTR1(KTR_MLD,
2838191672Sbms					    "%s: m_free(m)", __func__);
2839191672Sbms					m_freem(m);
2840191672Sbms				} else {
2841191672Sbms					CTR1(KTR_MLD,
2842191672Sbms					    "%s: m_adj(m, -mr)", __func__);
2843191672Sbms					m_adj(m, -((int)sizeof(
2844191672Sbms					    struct mldv2_record)));
2845191672Sbms				}
2846191672Sbms				continue;
2847191672Sbms			}
2848191672Sbms			npbytes += (rsrcs * sizeof(struct in6_addr));
2849191672Sbms			if (crt == REC_ALLOW)
2850191672Sbms				pmr->mr_type = MLD_ALLOW_NEW_SOURCES;
2851191672Sbms			else if (crt == REC_BLOCK)
2852191672Sbms				pmr->mr_type = MLD_BLOCK_OLD_SOURCES;
2853191672Sbms			pmr->mr_numsrc = htons(rsrcs);
2854191672Sbms			/*
2855191672Sbms			 * Count the new group record, and enqueue this
2856191672Sbms			 * packet if it wasn't already queued.
2857191672Sbms			 */
2858191672Sbms			m->m_pkthdr.PH_vt.vt_nrecs++;
2859191672Sbms			if (m != m0)
2860191672Sbms				_IF_ENQUEUE(ifq, m);
2861191672Sbms			nbytes += npbytes;
2862191672Sbms		} while (nims != NULL);
2863191672Sbms		drt |= crt;
2864191672Sbms		crt = (~crt & REC_FULL);
2865191672Sbms	}
2866191672Sbms
2867191672Sbms	CTR3(KTR_MLD, "%s: queued %d ALLOW_NEW, %d BLOCK_OLD", __func__,
2868191672Sbms	    nallow, nblock);
2869191672Sbms
2870191672Sbms	return (nbytes);
2871191672Sbms}
2872191672Sbms
2873191672Sbmsstatic int
2874191672Sbmsmld_v2_merge_state_changes(struct in6_multi *inm, struct ifqueue *ifscq)
2875191672Sbms{
2876191672Sbms	struct ifqueue	*gq;
2877191672Sbms	struct mbuf	*m;		/* pending state-change */
2878191672Sbms	struct mbuf	*m0;		/* copy of pending state-change */
2879191672Sbms	struct mbuf	*mt;		/* last state-change in packet */
2880191672Sbms	int		 docopy, domerge;
2881191672Sbms	u_int		 recslen;
2882191672Sbms
2883191672Sbms	docopy = 0;
2884191672Sbms	domerge = 0;
2885191672Sbms	recslen = 0;
2886191672Sbms
2887191672Sbms	IN6_MULTI_LOCK_ASSERT();
2888191672Sbms	MLD_LOCK_ASSERT();
2889191672Sbms
2890191672Sbms	/*
2891191672Sbms	 * If there are further pending retransmissions, make a writable
2892191672Sbms	 * copy of each queued state-change message before merging.
2893191672Sbms	 */
2894191672Sbms	if (inm->in6m_scrv > 0)
2895191672Sbms		docopy = 1;
2896191672Sbms
2897191672Sbms	gq = &inm->in6m_scq;
2898191672Sbms#ifdef KTR
2899191672Sbms	if (gq->ifq_head == NULL) {
2900191672Sbms		CTR2(KTR_MLD, "%s: WARNING: queue for inm %p is empty",
2901191672Sbms		    __func__, inm);
2902191672Sbms	}
2903191672Sbms#endif
2904191672Sbms
2905191672Sbms	m = gq->ifq_head;
2906191672Sbms	while (m != NULL) {
2907167729Sbms		/*
2908191672Sbms		 * Only merge the report into the current packet if
2909191672Sbms		 * there is sufficient space to do so; an MLDv2 report
2910191672Sbms		 * packet may only contain 65,535 group records.
2911191672Sbms		 * Always use a simple mbuf chain concatentation to do this,
2912191672Sbms		 * as large state changes for single groups may have
2913191672Sbms		 * allocated clusters.
2914167729Sbms		 */
2915191672Sbms		domerge = 0;
2916191672Sbms		mt = ifscq->ifq_tail;
2917191672Sbms		if (mt != NULL) {
2918191672Sbms			recslen = m_length(m, NULL);
2919191672Sbms
2920191672Sbms			if ((mt->m_pkthdr.PH_vt.vt_nrecs +
2921191672Sbms			    m->m_pkthdr.PH_vt.vt_nrecs <=
2922191672Sbms			    MLD_V2_REPORT_MAXRECS) &&
2923191672Sbms			    (mt->m_pkthdr.len + recslen <=
2924191672Sbms			    (inm->in6m_ifp->if_mtu - MLD_MTUSPACE)))
2925191672Sbms				domerge = 1;
2926167729Sbms		}
2927151539Ssuz
2928191672Sbms		if (!domerge && _IF_QFULL(gq)) {
2929191672Sbms			CTR2(KTR_MLD,
2930191672Sbms			    "%s: outbound queue full, skipping whole packet %p",
2931191672Sbms			    __func__, m);
2932191672Sbms			mt = m->m_nextpkt;
2933191672Sbms			if (!docopy)
2934191672Sbms				m_freem(m);
2935191672Sbms			m = mt;
2936191672Sbms			continue;
2937167729Sbms		}
2938151539Ssuz
2939191672Sbms		if (!docopy) {
2940191672Sbms			CTR2(KTR_MLD, "%s: dequeueing %p", __func__, m);
2941191672Sbms			_IF_DEQUEUE(gq, m0);
2942191672Sbms			m = m0->m_nextpkt;
2943191672Sbms		} else {
2944191672Sbms			CTR2(KTR_MLD, "%s: copying %p", __func__, m);
2945191672Sbms			m0 = m_dup(m, M_NOWAIT);
2946191672Sbms			if (m0 == NULL)
2947191672Sbms				return (ENOMEM);
2948191672Sbms			m0->m_nextpkt = NULL;
2949191672Sbms			m = m->m_nextpkt;
2950191672Sbms		}
2951167729Sbms
2952191672Sbms		if (!domerge) {
2953191672Sbms			CTR3(KTR_MLD, "%s: queueing %p to ifscq %p)",
2954191672Sbms			    __func__, m0, ifscq);
2955191672Sbms			_IF_ENQUEUE(ifscq, m0);
2956191672Sbms		} else {
2957191672Sbms			struct mbuf *mtl;	/* last mbuf of packet mt */
2958191672Sbms
2959191672Sbms			CTR3(KTR_MLD, "%s: merging %p with ifscq tail %p)",
2960191672Sbms			    __func__, m0, mt);
2961191672Sbms
2962191672Sbms			mtl = m_last(mt);
2963191672Sbms			m0->m_flags &= ~M_PKTHDR;
2964191672Sbms			mt->m_pkthdr.len += recslen;
2965191672Sbms			mt->m_pkthdr.PH_vt.vt_nrecs +=
2966191672Sbms			    m0->m_pkthdr.PH_vt.vt_nrecs;
2967191672Sbms
2968191672Sbms			mtl->m_next = m0;
2969191672Sbms		}
2970191672Sbms	}
2971191672Sbms
2972191672Sbms	return (0);
2973191672Sbms}
2974191672Sbms
2975191672Sbms/*
2976191672Sbms * Respond to a pending MLDv2 General Query.
2977191672Sbms */
2978191672Sbmsstatic void
2979191672Sbmsmld_v2_dispatch_general_query(struct mld_ifinfo *mli)
2980191672Sbms{
2981230076Sjhb	struct ifmultiaddr	*ifma;
2982191672Sbms	struct ifnet		*ifp;
2983191672Sbms	struct in6_multi	*inm;
2984191672Sbms	int			 retval;
2985191672Sbms
2986191672Sbms	IN6_MULTI_LOCK_ASSERT();
2987191672Sbms	MLD_LOCK_ASSERT();
2988191672Sbms
2989191672Sbms	KASSERT(mli->mli_version == MLD_VERSION_2,
2990191672Sbms	    ("%s: called when version %d", __func__, mli->mli_version));
2991191672Sbms
2992191672Sbms	ifp = mli->mli_ifp;
2993191672Sbms
2994233200Sjhb	IF_ADDR_RLOCK(ifp);
2995230076Sjhb	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2996191672Sbms		if (ifma->ifma_addr->sa_family != AF_INET6 ||
2997191672Sbms		    ifma->ifma_protospec == NULL)
2998191672Sbms			continue;
2999191672Sbms
3000191672Sbms		inm = (struct in6_multi *)ifma->ifma_protospec;
3001191672Sbms		KASSERT(ifp == inm->in6m_ifp,
3002191672Sbms		    ("%s: inconsistent ifp", __func__));
3003191672Sbms
3004191672Sbms		switch (inm->in6m_state) {
3005191672Sbms		case MLD_NOT_MEMBER:
3006191672Sbms		case MLD_SILENT_MEMBER:
3007167729Sbms			break;
3008191672Sbms		case MLD_REPORTING_MEMBER:
3009191672Sbms		case MLD_IDLE_MEMBER:
3010191672Sbms		case MLD_LAZY_MEMBER:
3011191672Sbms		case MLD_SLEEPING_MEMBER:
3012191672Sbms		case MLD_AWAKENING_MEMBER:
3013191672Sbms			inm->in6m_state = MLD_REPORTING_MEMBER;
3014191672Sbms			retval = mld_v2_enqueue_group_record(&mli->mli_gq,
3015200871Sbms			    inm, 0, 0, 0, 0);
3016191672Sbms			CTR2(KTR_MLD, "%s: enqueue record = %d",
3017191672Sbms			    __func__, retval);
3018191672Sbms			break;
3019191672Sbms		case MLD_G_QUERY_PENDING_MEMBER:
3020191672Sbms		case MLD_SG_QUERY_PENDING_MEMBER:
3021191672Sbms		case MLD_LEAVING_MEMBER:
3022191672Sbms			break;
3023167729Sbms		}
3024191672Sbms	}
3025233200Sjhb	IF_ADDR_RUNLOCK(ifp);
3026167729Sbms
3027191672Sbms	mld_dispatch_queue(&mli->mli_gq, MLD_MAX_RESPONSE_BURST);
3028167729Sbms
3029191672Sbms	/*
3030191672Sbms	 * Slew transmission of bursts over 500ms intervals.
3031191672Sbms	 */
3032191672Sbms	if (mli->mli_gq.ifq_head != NULL) {
3033191672Sbms		mli->mli_v2_timer = 1 + MLD_RANDOM_DELAY(
3034191672Sbms		    MLD_RESPONSE_BURST_INTERVAL);
3035191672Sbms		V_interface_timers_running6 = 1;
3036191672Sbms	}
3037191672Sbms}
3038191672Sbms
3039191672Sbms/*
3040191672Sbms * Transmit the next pending message in the output queue.
3041191672Sbms *
3042191672Sbms * VIMAGE: Needs to store/restore vnet pointer on a per-mbuf-chain basis.
3043191672Sbms * MRT: Nothing needs to be done, as MLD traffic is always local to
3044191672Sbms * a link and uses a link-scope multicast address.
3045191672Sbms */
3046191672Sbmsstatic void
3047191672Sbmsmld_dispatch_packet(struct mbuf *m)
3048191672Sbms{
3049191672Sbms	struct ip6_moptions	 im6o;
3050191672Sbms	struct ifnet		*ifp;
3051191672Sbms	struct ifnet		*oifp;
3052191672Sbms	struct mbuf		*m0;
3053191672Sbms	struct mbuf		*md;
3054191672Sbms	struct ip6_hdr		*ip6;
3055191672Sbms	struct mld_hdr		*mld;
3056191672Sbms	int			 error;
3057191672Sbms	int			 off;
3058191672Sbms	int			 type;
3059191672Sbms	uint32_t		 ifindex;
3060191672Sbms
3061191672Sbms	CTR2(KTR_MLD, "%s: transmit %p", __func__, m);
3062191672Sbms
3063191672Sbms	/*
3064191672Sbms	 * Set VNET image pointer from enqueued mbuf chain
3065191672Sbms	 * before doing anything else. Whilst we use interface
3066191672Sbms	 * indexes to guard against interface detach, they are
3067191672Sbms	 * unique to each VIMAGE and must be retrieved.
3068191672Sbms	 */
3069191672Sbms	ifindex = mld_restore_context(m);
3070191672Sbms
3071191672Sbms	/*
3072191672Sbms	 * Check if the ifnet still exists. This limits the scope of
3073191672Sbms	 * any race in the absence of a global ifp lock for low cost
3074191672Sbms	 * (an array lookup).
3075191672Sbms	 */
3076191672Sbms	ifp = ifnet_byindex(ifindex);
3077191672Sbms	if (ifp == NULL) {
3078191672Sbms		CTR3(KTR_MLD, "%s: dropped %p as ifindex %u went away.",
3079191672Sbms		    __func__, m, ifindex);
3080191672Sbms		m_freem(m);
3081191672Sbms		IP6STAT_INC(ip6s_noroute);
3082191672Sbms		goto out;
3083191672Sbms	}
3084191672Sbms
3085191672Sbms	im6o.im6o_multicast_hlim  = 1;
3086191672Sbms	im6o.im6o_multicast_loop = (V_ip6_mrouter != NULL);
3087191672Sbms	im6o.im6o_multicast_ifp = ifp;
3088191672Sbms
3089191672Sbms	if (m->m_flags & M_MLDV1) {
3090191672Sbms		m0 = m;
3091191672Sbms	} else {
3092191672Sbms		m0 = mld_v2_encap_report(ifp, m);
3093191672Sbms		if (m0 == NULL) {
3094191672Sbms			CTR2(KTR_MLD, "%s: dropped %p", __func__, m);
3095191672Sbms			IP6STAT_INC(ip6s_odropped);
3096191672Sbms			goto out;
3097167729Sbms		}
3098191672Sbms	}
3099167729Sbms
3100191672Sbms	mld_scrub_context(m0);
3101191672Sbms	m->m_flags &= ~(M_PROTOFLAGS);
3102191672Sbms	m0->m_pkthdr.rcvif = V_loif;
3103167729Sbms
3104191672Sbms	ip6 = mtod(m0, struct ip6_hdr *);
3105191672Sbms#if 0
3106191672Sbms	(void)in6_setscope(&ip6->ip6_dst, ifp, NULL);	/* XXX LOR */
3107191672Sbms#else
3108191672Sbms	/*
3109191672Sbms	 * XXX XXX Break some KPI rules to prevent an LOR which would
3110191672Sbms	 * occur if we called in6_setscope() at transmission.
3111191672Sbms	 * See comments at top of file.
3112191672Sbms	 */
3113191672Sbms	MLD_EMBEDSCOPE(&ip6->ip6_dst, ifp->if_index);
3114191672Sbms#endif
3115167729Sbms
3116191672Sbms	/*
3117191672Sbms	 * Retrieve the ICMPv6 type before handoff to ip6_output(),
3118191672Sbms	 * so we can bump the stats.
3119191672Sbms	 */
3120191672Sbms	md = m_getptr(m0, sizeof(struct ip6_hdr), &off);
3121191672Sbms	mld = (struct mld_hdr *)(mtod(md, uint8_t *) + off);
3122191672Sbms	type = mld->mld_type;
3123167729Sbms
3124191672Sbms	error = ip6_output(m0, &mld_po, NULL, IPV6_UNSPECSRC, &im6o,
3125191672Sbms	    &oifp, NULL);
3126191672Sbms	if (error) {
3127191672Sbms		CTR3(KTR_MLD, "%s: ip6_output(%p) = %d", __func__, m0, error);
3128191672Sbms		goto out;
3129191672Sbms	}
3130191672Sbms	ICMP6STAT_INC(icp6s_outhist[type]);
3131191672Sbms	if (oifp != NULL) {
3132191672Sbms		icmp6_ifstat_inc(oifp, ifs6_out_msg);
3133191672Sbms		switch (type) {
3134191672Sbms		case MLD_LISTENER_REPORT:
3135191672Sbms		case MLDV2_LISTENER_REPORT:
3136191672Sbms			icmp6_ifstat_inc(oifp, ifs6_out_mldreport);
3137191672Sbms			break;
3138191672Sbms		case MLD_LISTENER_DONE:
3139191672Sbms			icmp6_ifstat_inc(oifp, ifs6_out_mlddone);
3140191672Sbms			break;
3141191672Sbms		}
3142191672Sbms	}
3143191672Sbmsout:
3144191816Szec	return;
3145126603Sume}
3146126603Sume
3147126603Sume/*
3148191672Sbms * Encapsulate an MLDv2 report.
3149167729Sbms *
3150191672Sbms * KAME IPv6 requires that hop-by-hop options be passed separately,
3151191672Sbms * and that the IPv6 header be prepended in a separate mbuf.
3152191672Sbms *
3153191672Sbms * Returns a pointer to the new mbuf chain head, or NULL if the
3154191672Sbms * allocation failed.
3155126603Sume */
3156191672Sbmsstatic struct mbuf *
3157191672Sbmsmld_v2_encap_report(struct ifnet *ifp, struct mbuf *m)
3158126603Sume{
3159191672Sbms	struct mbuf		*mh;
3160191672Sbms	struct mldv2_report	*mld;
3161191672Sbms	struct ip6_hdr		*ip6;
3162191672Sbms	struct in6_ifaddr	*ia;
3163191672Sbms	int			 mldreclen;
3164126603Sume
3165191672Sbms	KASSERT(ifp != NULL, ("%s: null ifp", __func__));
3166191672Sbms	KASSERT((m->m_flags & M_PKTHDR),
3167191672Sbms	    ("%s: mbuf chain %p is !M_PKTHDR", __func__, m));
3168167729Sbms
3169191672Sbms	/*
3170191672Sbms	 * RFC3590: OK to send as :: or tentative during DAD.
3171191672Sbms	 */
3172191672Sbms	ia = in6ifa_ifpforlinklocal(ifp, IN6_IFF_NOTREADY|IN6_IFF_ANYCAST);
3173191672Sbms	if (ia == NULL)
3174191672Sbms		CTR1(KTR_MLD, "%s: warning: ia is NULL", __func__);
3175167729Sbms
3176191672Sbms	MGETHDR(mh, M_DONTWAIT, MT_HEADER);
3177191672Sbms	if (mh == NULL) {
3178194760Srwatson		if (ia != NULL)
3179194760Srwatson			ifa_free(&ia->ia_ifa);
3180191672Sbms		m_freem(m);
3181191672Sbms		return (NULL);
3182191672Sbms	}
3183191672Sbms	MH_ALIGN(mh, sizeof(struct ip6_hdr) + sizeof(struct mldv2_report));
3184167729Sbms
3185191672Sbms	mldreclen = m_length(m, NULL);
3186191672Sbms	CTR2(KTR_MLD, "%s: mldreclen is %d", __func__, mldreclen);
3187167729Sbms
3188191672Sbms	mh->m_len = sizeof(struct ip6_hdr) + sizeof(struct mldv2_report);
3189191672Sbms	mh->m_pkthdr.len = sizeof(struct ip6_hdr) +
3190191672Sbms	    sizeof(struct mldv2_report) + mldreclen;
3191191672Sbms
3192191672Sbms	ip6 = mtod(mh, struct ip6_hdr *);
3193191672Sbms	ip6->ip6_flow = 0;
3194191672Sbms	ip6->ip6_vfc &= ~IPV6_VERSION_MASK;
3195191672Sbms	ip6->ip6_vfc |= IPV6_VERSION;
3196191672Sbms	ip6->ip6_nxt = IPPROTO_ICMPV6;
3197191672Sbms	ip6->ip6_src = ia ? ia->ia_addr.sin6_addr : in6addr_any;
3198194760Srwatson	if (ia != NULL)
3199194760Srwatson		ifa_free(&ia->ia_ifa);
3200191672Sbms	ip6->ip6_dst = in6addr_linklocal_allv2routers;
3201191672Sbms	/* scope ID will be set in netisr */
3202191672Sbms
3203191672Sbms	mld = (struct mldv2_report *)(ip6 + 1);
3204191672Sbms	mld->mld_type = MLDV2_LISTENER_REPORT;
3205191672Sbms	mld->mld_code = 0;
3206191672Sbms	mld->mld_cksum = 0;
3207191672Sbms	mld->mld_v2_reserved = 0;
3208191672Sbms	mld->mld_v2_numrecs = htons(m->m_pkthdr.PH_vt.vt_nrecs);
3209191672Sbms	m->m_pkthdr.PH_vt.vt_nrecs = 0;
3210191672Sbms
3211191672Sbms	mh->m_next = m;
3212191672Sbms	mld->mld_cksum = in6_cksum(mh, IPPROTO_ICMPV6,
3213191672Sbms	    sizeof(struct ip6_hdr), sizeof(struct mldv2_report) + mldreclen);
3214191672Sbms	return (mh);
3215191672Sbms}
3216191672Sbms
3217191672Sbms#ifdef KTR
3218191672Sbmsstatic char *
3219191672Sbmsmld_rec_type_to_str(const int type)
3220191672Sbms{
3221191672Sbms
3222191672Sbms	switch (type) {
3223191672Sbms		case MLD_CHANGE_TO_EXCLUDE_MODE:
3224191672Sbms			return "TO_EX";
3225191672Sbms			break;
3226191672Sbms		case MLD_CHANGE_TO_INCLUDE_MODE:
3227191672Sbms			return "TO_IN";
3228191672Sbms			break;
3229191672Sbms		case MLD_MODE_IS_EXCLUDE:
3230191672Sbms			return "MODE_EX";
3231191672Sbms			break;
3232191672Sbms		case MLD_MODE_IS_INCLUDE:
3233191672Sbms			return "MODE_IN";
3234191672Sbms			break;
3235191672Sbms		case MLD_ALLOW_NEW_SOURCES:
3236191672Sbms			return "ALLOW_NEW";
3237191672Sbms			break;
3238191672Sbms		case MLD_BLOCK_OLD_SOURCES:
3239191672Sbms			return "BLOCK_OLD";
3240191672Sbms			break;
3241191672Sbms		default:
3242191672Sbms			break;
3243126603Sume	}
3244191672Sbms	return "unknown";
3245126603Sume}
3246191672Sbms#endif
3247191672Sbms
3248191672Sbmsstatic void
3249195837Srwatsonmld_init(void *unused __unused)
3250191672Sbms{
3251191672Sbms
3252191672Sbms	CTR1(KTR_MLD, "%s: initializing", __func__);
3253191672Sbms	MLD_LOCK_INIT();
3254191672Sbms
3255191672Sbms	ip6_initpktopts(&mld_po);
3256191672Sbms	mld_po.ip6po_hlim = 1;
3257191672Sbms	mld_po.ip6po_hbh = &mld_ra.hbh;
3258191672Sbms	mld_po.ip6po_prefer_tempaddr = IP6PO_TEMPADDR_NOTPREFER;
3259191672Sbms	mld_po.ip6po_flags = IP6PO_DONTFRAG;
3260191672Sbms}
3261195837SrwatsonSYSINIT(mld_init, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_init, NULL);
3262191672Sbms
3263191672Sbmsstatic void
3264195837Srwatsonmld_uninit(void *unused __unused)
3265191672Sbms{
3266191672Sbms
3267191672Sbms	CTR1(KTR_MLD, "%s: tearing down", __func__);
3268191672Sbms	MLD_LOCK_DESTROY();
3269191672Sbms}
3270195837SrwatsonSYSUNINIT(mld_uninit, SI_SUB_PSEUDO, SI_ORDER_MIDDLE, mld_uninit, NULL);
3271191672Sbms
3272195837Srwatsonstatic void
3273195837Srwatsonvnet_mld_init(const void *unused __unused)
3274191672Sbms{
3275191672Sbms
3276191672Sbms	CTR1(KTR_MLD, "%s: initializing", __func__);
3277191672Sbms
3278191672Sbms	LIST_INIT(&V_mli_head);
3279191672Sbms}
3280195837SrwatsonVNET_SYSINIT(vnet_mld_init, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_init,
3281195837Srwatson    NULL);
3282191672Sbms
3283195837Srwatsonstatic void
3284195837Srwatsonvnet_mld_uninit(const void *unused __unused)
3285191672Sbms{
3286191672Sbms
3287191672Sbms	CTR1(KTR_MLD, "%s: tearing down", __func__);
3288191672Sbms
3289191672Sbms	KASSERT(LIST_EMPTY(&V_mli_head),
3290191672Sbms	    ("%s: mli list not empty; ifnets not detached?", __func__));
3291191672Sbms}
3292195837SrwatsonVNET_SYSUNINIT(vnet_mld_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, vnet_mld_uninit,
3293195837Srwatson    NULL);
3294191672Sbms
3295191672Sbmsstatic int
3296191672Sbmsmld_modevent(module_t mod, int type, void *unused __unused)
3297191672Sbms{
3298191672Sbms
3299191672Sbms    switch (type) {
3300191672Sbms    case MOD_LOAD:
3301191672Sbms    case MOD_UNLOAD:
3302191672Sbms	break;
3303191672Sbms    default:
3304191672Sbms	return (EOPNOTSUPP);
3305191672Sbms    }
3306191672Sbms    return (0);
3307191672Sbms}
3308191672Sbms
3309191672Sbmsstatic moduledata_t mld_mod = {
3310191672Sbms    "mld",
3311191672Sbms    mld_modevent,
3312191672Sbms    0
3313191672Sbms};
3314191672SbmsDECLARE_MODULE(mld, mld_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
3315