in_mcast.c revision 248085
1170613Sbms/*- 2189592Sbms * Copyright (c) 2007-2009 Bruce Simpson. 3170613Sbms * Copyright (c) 2005 Robert N. M. Watson. 4170613Sbms * All rights reserved. 5170613Sbms * 6170613Sbms * Redistribution and use in source and binary forms, with or without 7170613Sbms * modification, are permitted provided that the following conditions 8170613Sbms * are met: 9170613Sbms * 1. Redistributions of source code must retain the above copyright 10170613Sbms * notice, this list of conditions and the following disclaimer. 11170613Sbms * 2. Redistributions in binary form must reproduce the above copyright 12170613Sbms * notice, this list of conditions and the following disclaimer in the 13170613Sbms * documentation and/or other materials provided with the distribution. 14170613Sbms * 3. The name of the author may not be used to endorse or promote 15170613Sbms * products derived from this software without specific prior written 16170613Sbms * permission. 17170613Sbms * 18170613Sbms * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19170613Sbms * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20170613Sbms * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21170613Sbms * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22170613Sbms * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23170613Sbms * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24170613Sbms * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25170613Sbms * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26170613Sbms * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27170613Sbms * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28170613Sbms * SUCH DAMAGE. 29170613Sbms */ 30170613Sbms 31170613Sbms/* 32170613Sbms * IPv4 multicast socket, group, and socket option processing module. 33170613Sbms */ 34170613Sbms 35170613Sbms#include <sys/cdefs.h> 36170613Sbms__FBSDID("$FreeBSD: stable/9/sys/netinet/in_mcast.c 248085 2013-03-09 02:36:32Z marius $"); 37170613Sbms 38170613Sbms#include <sys/param.h> 39170613Sbms#include <sys/systm.h> 40170613Sbms#include <sys/kernel.h> 41170613Sbms#include <sys/malloc.h> 42170613Sbms#include <sys/mbuf.h> 43171746Scsjp#include <sys/protosw.h> 44170613Sbms#include <sys/socket.h> 45170613Sbms#include <sys/socketvar.h> 46189592Sbms#include <sys/protosw.h> 47170613Sbms#include <sys/sysctl.h> 48189592Sbms#include <sys/ktr.h> 49189592Sbms#include <sys/tree.h> 50170613Sbms 51170613Sbms#include <net/if.h> 52170613Sbms#include <net/if_dl.h> 53170613Sbms#include <net/route.h> 54185571Sbz#include <net/vnet.h> 55170613Sbms 56170613Sbms#include <netinet/in.h> 57170613Sbms#include <netinet/in_systm.h> 58170613Sbms#include <netinet/in_pcb.h> 59170613Sbms#include <netinet/in_var.h> 60170613Sbms#include <netinet/ip_var.h> 61170613Sbms#include <netinet/igmp_var.h> 62170613Sbms 63189592Sbms#ifndef KTR_IGMPV3 64191659Sbms#define KTR_IGMPV3 KTR_INET 65189592Sbms#endif 66189592Sbms 67170613Sbms#ifndef __SOCKUNION_DECLARED 68170613Sbmsunion sockunion { 69170613Sbms struct sockaddr_storage ss; 70170613Sbms struct sockaddr sa; 71170613Sbms struct sockaddr_dl sdl; 72170613Sbms struct sockaddr_in sin; 73170613Sbms}; 74170613Sbmstypedef union sockunion sockunion_t; 75170613Sbms#define __SOCKUNION_DECLARED 76170613Sbms#endif /* __SOCKUNION_DECLARED */ 77170613Sbms 78189592Sbmsstatic MALLOC_DEFINE(M_INMFILTER, "in_mfilter", 79189592Sbms "IPv4 multicast PCB-layer source filter"); 80170613Sbmsstatic MALLOC_DEFINE(M_IPMADDR, "in_multi", "IPv4 multicast group"); 81170613Sbmsstatic MALLOC_DEFINE(M_IPMOPTS, "ip_moptions", "IPv4 multicast options"); 82189592Sbmsstatic MALLOC_DEFINE(M_IPMSOURCE, "ip_msource", 83189592Sbms "IPv4 multicast IGMP-layer source filter"); 84170613Sbms 85170613Sbms/* 86189592Sbms * Locking: 87189592Sbms * - Lock order is: Giant, INP_WLOCK, IN_MULTI_LOCK, IGMP_LOCK, IF_ADDR_LOCK. 88189592Sbms * - The IF_ADDR_LOCK is implicitly taken by inm_lookup() earlier, however 89189592Sbms * it can be taken by code in net/if.c also. 90189592Sbms * - ip_moptions and in_mfilter are covered by the INP_WLOCK. 91189592Sbms * 92189592Sbms * struct in_multi is covered by IN_MULTI_LOCK. There isn't strictly 93189592Sbms * any need for in_multi itself to be virtualized -- it is bound to an ifp 94189592Sbms * anyway no matter what happens. 95170613Sbms */ 96170613Sbmsstruct mtx in_multi_mtx; 97189592SbmsMTX_SYSINIT(in_multi_mtx, &in_multi_mtx, "in_multi_mtx", MTX_DEF); 98170613Sbms 99170613Sbms/* 100170613Sbms * Functions with non-static linkage defined in this file should be 101170613Sbms * declared in in_var.h: 102189592Sbms * imo_multi_filter() 103170613Sbms * in_addmulti() 104170613Sbms * in_delmulti() 105189592Sbms * in_joingroup() 106189592Sbms * in_joingroup_locked() 107189592Sbms * in_leavegroup() 108189592Sbms * in_leavegroup_locked() 109170613Sbms * and ip_var.h: 110170613Sbms * inp_freemoptions() 111170613Sbms * inp_getmoptions() 112170613Sbms * inp_setmoptions() 113189592Sbms * 114189592Sbms * XXX: Both carp and pf need to use the legacy (*,G) KPIs in_addmulti() 115189592Sbms * and in_delmulti(). 116170613Sbms */ 117189592Sbmsstatic void imf_commit(struct in_mfilter *); 118189592Sbmsstatic int imf_get_source(struct in_mfilter *imf, 119189592Sbms const struct sockaddr_in *psin, 120189592Sbms struct in_msource **); 121189592Sbmsstatic struct in_msource * 122189592Sbms imf_graft(struct in_mfilter *, const uint8_t, 123189592Sbms const struct sockaddr_in *); 124189592Sbmsstatic void imf_leave(struct in_mfilter *); 125189592Sbmsstatic int imf_prune(struct in_mfilter *, const struct sockaddr_in *); 126189592Sbmsstatic void imf_purge(struct in_mfilter *); 127189592Sbmsstatic void imf_rollback(struct in_mfilter *); 128189592Sbmsstatic void imf_reap(struct in_mfilter *); 129170613Sbmsstatic int imo_grow(struct ip_moptions *); 130189592Sbmsstatic size_t imo_match_group(const struct ip_moptions *, 131189592Sbms const struct ifnet *, const struct sockaddr *); 132189592Sbmsstatic struct in_msource * 133189592Sbms imo_match_source(const struct ip_moptions *, const size_t, 134189592Sbms const struct sockaddr *); 135189592Sbmsstatic void ims_merge(struct ip_msource *ims, 136189592Sbms const struct in_msource *lims, const int rollback); 137189592Sbmsstatic int in_getmulti(struct ifnet *, const struct in_addr *, 138189592Sbms struct in_multi **); 139189592Sbmsstatic int inm_get_source(struct in_multi *inm, const in_addr_t haddr, 140189592Sbms const int noalloc, struct ip_msource **pims); 141189592Sbmsstatic int inm_is_ifp_detached(const struct in_multi *); 142189592Sbmsstatic int inm_merge(struct in_multi *, /*const*/ struct in_mfilter *); 143189592Sbmsstatic void inm_purge(struct in_multi *); 144189592Sbmsstatic void inm_reap(struct in_multi *); 145170613Sbmsstatic struct ip_moptions * 146170613Sbms inp_findmoptions(struct inpcb *); 147170613Sbmsstatic int inp_get_source_filters(struct inpcb *, struct sockopt *); 148170613Sbmsstatic int inp_join_group(struct inpcb *, struct sockopt *); 149170613Sbmsstatic int inp_leave_group(struct inpcb *, struct sockopt *); 150189592Sbmsstatic struct ifnet * 151189592Sbms inp_lookup_mcast_ifp(const struct inpcb *, 152189592Sbms const struct sockaddr_in *, const struct in_addr); 153189592Sbmsstatic int inp_block_unblock_source(struct inpcb *, struct sockopt *); 154170613Sbmsstatic int inp_set_multicast_if(struct inpcb *, struct sockopt *); 155170613Sbmsstatic int inp_set_source_filters(struct inpcb *, struct sockopt *); 156189592Sbmsstatic int sysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS); 157170613Sbms 158248085Smariusstatic SYSCTL_NODE(_net_inet_ip, OID_AUTO, mcast, CTLFLAG_RW, 0, 159248085Smarius "IPv4 multicast"); 160189357Sbms 161189592Sbmsstatic u_long in_mcast_maxgrpsrc = IP_MAX_GROUP_SRC_FILTER; 162189592SbmsSYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxgrpsrc, 163189592Sbms CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxgrpsrc, 0, 164189592Sbms "Max source filters per group"); 165189592SbmsTUNABLE_ULONG("net.inet.ip.mcast.maxgrpsrc", &in_mcast_maxgrpsrc); 166189592Sbms 167189592Sbmsstatic u_long in_mcast_maxsocksrc = IP_MAX_SOCK_SRC_FILTER; 168189592SbmsSYSCTL_ULONG(_net_inet_ip_mcast, OID_AUTO, maxsocksrc, 169189592Sbms CTLFLAG_RW | CTLFLAG_TUN, &in_mcast_maxsocksrc, 0, 170189592Sbms "Max source filters per socket"); 171189592SbmsTUNABLE_ULONG("net.inet.ip.mcast.maxsocksrc", &in_mcast_maxsocksrc); 172189592Sbms 173189357Sbmsint in_mcast_loop = IP_DEFAULT_MULTICAST_LOOP; 174189357SbmsSYSCTL_INT(_net_inet_ip_mcast, OID_AUTO, loop, CTLFLAG_RW | CTLFLAG_TUN, 175189357Sbms &in_mcast_loop, 0, "Loopback multicast datagrams by default"); 176189357SbmsTUNABLE_INT("net.inet.ip.mcast.loop", &in_mcast_loop); 177189357Sbms 178248085Smariusstatic SYSCTL_NODE(_net_inet_ip_mcast, OID_AUTO, filters, 179189592Sbms CTLFLAG_RD | CTLFLAG_MPSAFE, sysctl_ip_mcast_filters, 180189592Sbms "Per-interface stack-wide source filters"); 181189592Sbms 182170613Sbms/* 183189592Sbms * Inline function which wraps assertions for a valid ifp. 184189592Sbms * The ifnet layer will set the ifma's ifp pointer to NULL if the ifp 185189592Sbms * is detached. 186189592Sbms */ 187189592Sbmsstatic int __inline 188189592Sbmsinm_is_ifp_detached(const struct in_multi *inm) 189189592Sbms{ 190189592Sbms struct ifnet *ifp; 191189592Sbms 192189592Sbms KASSERT(inm->inm_ifma != NULL, ("%s: no ifma", __func__)); 193189592Sbms ifp = inm->inm_ifma->ifma_ifp; 194189592Sbms if (ifp != NULL) { 195189592Sbms /* 196189592Sbms * Sanity check that netinet's notion of ifp is the 197189592Sbms * same as net's. 198189592Sbms */ 199189592Sbms KASSERT(inm->inm_ifp == ifp, ("%s: bad ifp", __func__)); 200189592Sbms } 201189592Sbms 202189592Sbms return (ifp == NULL); 203189592Sbms} 204189592Sbms 205189592Sbms/* 206189592Sbms * Initialize an in_mfilter structure to a known state at t0, t1 207189592Sbms * with an empty source filter list. 208189592Sbms */ 209189592Sbmsstatic __inline void 210189592Sbmsimf_init(struct in_mfilter *imf, const int st0, const int st1) 211189592Sbms{ 212189592Sbms memset(imf, 0, sizeof(struct in_mfilter)); 213189592Sbms RB_INIT(&imf->imf_sources); 214189592Sbms imf->imf_st[0] = st0; 215189592Sbms imf->imf_st[1] = st1; 216189592Sbms} 217189592Sbms 218189592Sbms/* 219170613Sbms * Resize the ip_moptions vector to the next power-of-two minus 1. 220170613Sbms * May be called with locks held; do not sleep. 221170613Sbms */ 222170613Sbmsstatic int 223170613Sbmsimo_grow(struct ip_moptions *imo) 224170613Sbms{ 225170613Sbms struct in_multi **nmships; 226170613Sbms struct in_multi **omships; 227170613Sbms struct in_mfilter *nmfilters; 228170613Sbms struct in_mfilter *omfilters; 229170613Sbms size_t idx; 230170613Sbms size_t newmax; 231170613Sbms size_t oldmax; 232170613Sbms 233170613Sbms nmships = NULL; 234170613Sbms nmfilters = NULL; 235170613Sbms omships = imo->imo_membership; 236170613Sbms omfilters = imo->imo_mfilters; 237170613Sbms oldmax = imo->imo_max_memberships; 238170613Sbms newmax = ((oldmax + 1) * 2) - 1; 239170613Sbms 240170613Sbms if (newmax <= IP_MAX_MEMBERSHIPS) { 241170613Sbms nmships = (struct in_multi **)realloc(omships, 242170613Sbms sizeof(struct in_multi *) * newmax, M_IPMOPTS, M_NOWAIT); 243170613Sbms nmfilters = (struct in_mfilter *)realloc(omfilters, 244189592Sbms sizeof(struct in_mfilter) * newmax, M_INMFILTER, M_NOWAIT); 245170613Sbms if (nmships != NULL && nmfilters != NULL) { 246170613Sbms /* Initialize newly allocated source filter heads. */ 247170613Sbms for (idx = oldmax; idx < newmax; idx++) { 248189592Sbms imf_init(&nmfilters[idx], MCAST_UNDEFINED, 249189592Sbms MCAST_EXCLUDE); 250170613Sbms } 251170613Sbms imo->imo_max_memberships = newmax; 252170613Sbms imo->imo_membership = nmships; 253170613Sbms imo->imo_mfilters = nmfilters; 254170613Sbms } 255170613Sbms } 256170613Sbms 257170613Sbms if (nmships == NULL || nmfilters == NULL) { 258170613Sbms if (nmships != NULL) 259170613Sbms free(nmships, M_IPMOPTS); 260170613Sbms if (nmfilters != NULL) 261189592Sbms free(nmfilters, M_INMFILTER); 262170613Sbms return (ETOOMANYREFS); 263170613Sbms } 264170613Sbms 265170613Sbms return (0); 266170613Sbms} 267170613Sbms 268170613Sbms/* 269170613Sbms * Find an IPv4 multicast group entry for this ip_moptions instance 270170613Sbms * which matches the specified group, and optionally an interface. 271170613Sbms * Return its index into the array, or -1 if not found. 272170613Sbms */ 273189592Sbmsstatic size_t 274189592Sbmsimo_match_group(const struct ip_moptions *imo, const struct ifnet *ifp, 275189592Sbms const struct sockaddr *group) 276170613Sbms{ 277189592Sbms const struct sockaddr_in *gsin; 278170613Sbms struct in_multi **pinm; 279170613Sbms int idx; 280170613Sbms int nmships; 281170613Sbms 282189592Sbms gsin = (const struct sockaddr_in *)group; 283170613Sbms 284170613Sbms /* The imo_membership array may be lazy allocated. */ 285170613Sbms if (imo->imo_membership == NULL || imo->imo_num_memberships == 0) 286170613Sbms return (-1); 287170613Sbms 288170613Sbms nmships = imo->imo_num_memberships; 289170613Sbms pinm = &imo->imo_membership[0]; 290170613Sbms for (idx = 0; idx < nmships; idx++, pinm++) { 291170613Sbms if (*pinm == NULL) 292170613Sbms continue; 293170613Sbms if ((ifp == NULL || ((*pinm)->inm_ifp == ifp)) && 294189592Sbms in_hosteq((*pinm)->inm_addr, gsin->sin_addr)) { 295170613Sbms break; 296170613Sbms } 297170613Sbms } 298170613Sbms if (idx >= nmships) 299170613Sbms idx = -1; 300170613Sbms 301170613Sbms return (idx); 302170613Sbms} 303170613Sbms 304170613Sbms/* 305189592Sbms * Find an IPv4 multicast source entry for this imo which matches 306170613Sbms * the given group index for this socket, and source address. 307189592Sbms * 308189592Sbms * NOTE: This does not check if the entry is in-mode, merely if 309189592Sbms * it exists, which may not be the desired behaviour. 310170613Sbms */ 311189592Sbmsstatic struct in_msource * 312189592Sbmsimo_match_source(const struct ip_moptions *imo, const size_t gidx, 313189592Sbms const struct sockaddr *src) 314170613Sbms{ 315189592Sbms struct ip_msource find; 316170613Sbms struct in_mfilter *imf; 317189592Sbms struct ip_msource *ims; 318189592Sbms const sockunion_t *psa; 319170613Sbms 320170613Sbms KASSERT(src->sa_family == AF_INET, ("%s: !AF_INET", __func__)); 321170613Sbms KASSERT(gidx != -1 && gidx < imo->imo_num_memberships, 322170613Sbms ("%s: invalid index %d\n", __func__, (int)gidx)); 323170613Sbms 324170613Sbms /* The imo_mfilters array may be lazy allocated. */ 325170613Sbms if (imo->imo_mfilters == NULL) 326170613Sbms return (NULL); 327170613Sbms imf = &imo->imo_mfilters[gidx]; 328170613Sbms 329189592Sbms /* Source trees are keyed in host byte order. */ 330189592Sbms psa = (const sockunion_t *)src; 331189592Sbms find.ims_haddr = ntohl(psa->sin.sin_addr.s_addr); 332189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 333189592Sbms 334189592Sbms return ((struct in_msource *)ims); 335170613Sbms} 336170613Sbms 337170613Sbms/* 338189592Sbms * Perform filtering for multicast datagrams on a socket by group and source. 339189592Sbms * 340189592Sbms * Returns 0 if a datagram should be allowed through, or various error codes 341189592Sbms * if the socket was not a member of the group, or the source was muted, etc. 342170613Sbms */ 343189592Sbmsint 344189592Sbmsimo_multi_filter(const struct ip_moptions *imo, const struct ifnet *ifp, 345189592Sbms const struct sockaddr *group, const struct sockaddr *src) 346170613Sbms{ 347189592Sbms size_t gidx; 348189592Sbms struct in_msource *ims; 349189592Sbms int mode; 350189592Sbms 351189592Sbms KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 352189592Sbms 353189592Sbms gidx = imo_match_group(imo, ifp, group); 354189592Sbms if (gidx == -1) 355189592Sbms return (MCAST_NOTGMEMBER); 356189592Sbms 357189592Sbms /* 358189592Sbms * Check if the source was included in an (S,G) join. 359189592Sbms * Allow reception on exclusive memberships by default, 360189592Sbms * reject reception on inclusive memberships by default. 361189592Sbms * Exclude source only if an in-mode exclude filter exists. 362189592Sbms * Include source only if an in-mode include filter exists. 363189592Sbms * NOTE: We are comparing group state here at IGMP t1 (now) 364189592Sbms * with socket-layer t0 (since last downcall). 365189592Sbms */ 366189592Sbms mode = imo->imo_mfilters[gidx].imf_st[1]; 367189592Sbms ims = imo_match_source(imo, gidx, src); 368189592Sbms 369189592Sbms if ((ims == NULL && mode == MCAST_INCLUDE) || 370189592Sbms (ims != NULL && ims->imsl_st[0] != mode)) 371189592Sbms return (MCAST_NOTSMEMBER); 372189592Sbms 373189592Sbms return (MCAST_PASS); 374189592Sbms} 375189592Sbms 376189592Sbms/* 377189592Sbms * Find and return a reference to an in_multi record for (ifp, group), 378189592Sbms * and bump its reference count. 379189592Sbms * If one does not exist, try to allocate it, and update link-layer multicast 380189592Sbms * filters on ifp to listen for group. 381189592Sbms * Assumes the IN_MULTI lock is held across the call. 382189592Sbms * Return 0 if successful, otherwise return an appropriate error code. 383189592Sbms */ 384189592Sbmsstatic int 385189592Sbmsin_getmulti(struct ifnet *ifp, const struct in_addr *group, 386189592Sbms struct in_multi **pinm) 387189592Sbms{ 388189592Sbms struct sockaddr_in gsin; 389189592Sbms struct ifmultiaddr *ifma; 390189592Sbms struct in_ifinfo *ii; 391189592Sbms struct in_multi *inm; 392189592Sbms int error; 393170613Sbms 394189592Sbms IN_MULTI_LOCK_ASSERT(); 395170613Sbms 396189592Sbms ii = (struct in_ifinfo *)ifp->if_afdata[AF_INET]; 397170613Sbms 398189592Sbms inm = inm_lookup(ifp, *group); 399170613Sbms if (inm != NULL) { 400170613Sbms /* 401170613Sbms * If we already joined this group, just bump the 402170613Sbms * refcount and return it. 403170613Sbms */ 404170613Sbms KASSERT(inm->inm_refcount >= 1, 405170613Sbms ("%s: bad refcount %d", __func__, inm->inm_refcount)); 406170613Sbms ++inm->inm_refcount; 407189592Sbms *pinm = inm; 408189592Sbms return (0); 409189592Sbms } 410170613Sbms 411189592Sbms memset(&gsin, 0, sizeof(gsin)); 412189592Sbms gsin.sin_family = AF_INET; 413189592Sbms gsin.sin_len = sizeof(struct sockaddr_in); 414189592Sbms gsin.sin_addr = *group; 415170613Sbms 416189592Sbms /* 417189592Sbms * Check if a link-layer group is already associated 418189592Sbms * with this network-layer group on the given ifnet. 419189592Sbms */ 420189592Sbms error = if_addmulti(ifp, (struct sockaddr *)&gsin, &ifma); 421189592Sbms if (error != 0) 422189592Sbms return (error); 423189592Sbms 424189931Sbms /* XXX ifma_protospec must be covered by IF_ADDR_LOCK */ 425233200Sjhb IF_ADDR_WLOCK(ifp); 426189931Sbms 427189592Sbms /* 428189592Sbms * If something other than netinet is occupying the link-layer 429189592Sbms * group, print a meaningful error message and back out of 430189592Sbms * the allocation. 431189592Sbms * Otherwise, bump the refcount on the existing network-layer 432189592Sbms * group association and return it. 433189592Sbms */ 434189592Sbms if (ifma->ifma_protospec != NULL) { 435189592Sbms inm = (struct in_multi *)ifma->ifma_protospec; 436170613Sbms#ifdef INVARIANTS 437189592Sbms KASSERT(ifma->ifma_addr != NULL, ("%s: no ifma_addr", 438189592Sbms __func__)); 439189592Sbms KASSERT(ifma->ifma_addr->sa_family == AF_INET, 440189592Sbms ("%s: ifma not AF_INET", __func__)); 441189592Sbms KASSERT(inm != NULL, ("%s: no ifma_protospec", __func__)); 442189592Sbms if (inm->inm_ifma != ifma || inm->inm_ifp != ifp || 443189592Sbms !in_hosteq(inm->inm_addr, *group)) 444189592Sbms panic("%s: ifma %p is inconsistent with %p (%s)", 445189592Sbms __func__, ifma, inm, inet_ntoa(*group)); 446170613Sbms#endif 447189592Sbms ++inm->inm_refcount; 448189592Sbms *pinm = inm; 449233200Sjhb IF_ADDR_WUNLOCK(ifp); 450189592Sbms return (0); 451189592Sbms } 452189592Sbms 453233200Sjhb IF_ADDR_WLOCK_ASSERT(ifp); 454189931Sbms 455189592Sbms /* 456189592Sbms * A new in_multi record is needed; allocate and initialize it. 457189592Sbms * We DO NOT perform an IGMP join as the in_ layer may need to 458189592Sbms * push an initial source list down to IGMP to support SSM. 459189592Sbms * 460189592Sbms * The initial source filter state is INCLUDE, {} as per the RFC. 461189592Sbms */ 462189592Sbms inm = malloc(sizeof(*inm), M_IPMADDR, M_NOWAIT | M_ZERO); 463189592Sbms if (inm == NULL) { 464189592Sbms if_delmulti_ifma(ifma); 465233200Sjhb IF_ADDR_WUNLOCK(ifp); 466189592Sbms return (ENOMEM); 467189592Sbms } 468189592Sbms inm->inm_addr = *group; 469189592Sbms inm->inm_ifp = ifp; 470189592Sbms inm->inm_igi = ii->ii_igmp; 471189592Sbms inm->inm_ifma = ifma; 472189592Sbms inm->inm_refcount = 1; 473189592Sbms inm->inm_state = IGMP_NOT_MEMBER; 474189592Sbms 475189592Sbms /* 476189592Sbms * Pending state-changes per group are subject to a bounds check. 477189592Sbms */ 478189592Sbms IFQ_SET_MAXLEN(&inm->inm_scq, IGMP_MAX_STATE_CHANGES); 479189592Sbms 480189592Sbms inm->inm_st[0].iss_fmode = MCAST_UNDEFINED; 481189592Sbms inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 482189592Sbms RB_INIT(&inm->inm_srcs); 483189592Sbms 484189592Sbms ifma->ifma_protospec = inm; 485189592Sbms 486189592Sbms *pinm = inm; 487189592Sbms 488233200Sjhb IF_ADDR_WUNLOCK(ifp); 489189592Sbms return (0); 490189592Sbms} 491189592Sbms 492189592Sbms/* 493189592Sbms * Drop a reference to an in_multi record. 494189592Sbms * 495189592Sbms * If the refcount drops to 0, free the in_multi record and 496189592Sbms * delete the underlying link-layer membership. 497189592Sbms */ 498189592Sbmsvoid 499189592Sbmsinm_release_locked(struct in_multi *inm) 500189592Sbms{ 501189592Sbms struct ifmultiaddr *ifma; 502189592Sbms 503189592Sbms IN_MULTI_LOCK_ASSERT(); 504189592Sbms 505189592Sbms CTR2(KTR_IGMPV3, "%s: refcount is %d", __func__, inm->inm_refcount); 506189592Sbms 507189592Sbms if (--inm->inm_refcount > 0) { 508189592Sbms CTR2(KTR_IGMPV3, "%s: refcount is now %d", __func__, 509189592Sbms inm->inm_refcount); 510189592Sbms return; 511189592Sbms } 512189592Sbms 513189592Sbms CTR2(KTR_IGMPV3, "%s: freeing inm %p", __func__, inm); 514189592Sbms 515189592Sbms ifma = inm->inm_ifma; 516189592Sbms 517189931Sbms /* XXX this access is not covered by IF_ADDR_LOCK */ 518189592Sbms CTR2(KTR_IGMPV3, "%s: purging ifma %p", __func__, ifma); 519189592Sbms KASSERT(ifma->ifma_protospec == inm, 520189592Sbms ("%s: ifma_protospec != inm", __func__)); 521189592Sbms ifma->ifma_protospec = NULL; 522189592Sbms 523189592Sbms inm_purge(inm); 524189592Sbms 525189592Sbms free(inm, M_IPMADDR); 526189592Sbms 527189592Sbms if_delmulti_ifma(ifma); 528189592Sbms} 529189592Sbms 530189592Sbms/* 531189592Sbms * Clear recorded source entries for a group. 532189592Sbms * Used by the IGMP code. Caller must hold the IN_MULTI lock. 533189592Sbms * FIXME: Should reap. 534189592Sbms */ 535189592Sbmsvoid 536189592Sbmsinm_clear_recorded(struct in_multi *inm) 537189592Sbms{ 538189592Sbms struct ip_msource *ims; 539189592Sbms 540189592Sbms IN_MULTI_LOCK_ASSERT(); 541189592Sbms 542189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 543189592Sbms if (ims->ims_stp) { 544189592Sbms ims->ims_stp = 0; 545189592Sbms --inm->inm_st[1].iss_rec; 546170613Sbms } 547189592Sbms } 548189592Sbms KASSERT(inm->inm_st[1].iss_rec == 0, 549189592Sbms ("%s: iss_rec %d not 0", __func__, inm->inm_st[1].iss_rec)); 550189592Sbms} 551170613Sbms 552189592Sbms/* 553189592Sbms * Record a source as pending for a Source-Group IGMPv3 query. 554189592Sbms * This lives here as it modifies the shared tree. 555189592Sbms * 556189592Sbms * inm is the group descriptor. 557189592Sbms * naddr is the address of the source to record in network-byte order. 558189592Sbms * 559189592Sbms * If the net.inet.igmp.sgalloc sysctl is non-zero, we will 560189592Sbms * lazy-allocate a source node in response to an SG query. 561189592Sbms * Otherwise, no allocation is performed. This saves some memory 562189592Sbms * with the trade-off that the source will not be reported to the 563189592Sbms * router if joined in the window between the query response and 564189592Sbms * the group actually being joined on the local host. 565189592Sbms * 566189592Sbms * VIMAGE: XXX: Currently the igmp_sgalloc feature has been removed. 567189592Sbms * This turns off the allocation of a recorded source entry if 568189592Sbms * the group has not been joined. 569189592Sbms * 570189592Sbms * Return 0 if the source didn't exist or was already marked as recorded. 571189592Sbms * Return 1 if the source was marked as recorded by this function. 572189592Sbms * Return <0 if any error occured (negated errno code). 573189592Sbms */ 574189592Sbmsint 575189592Sbmsinm_record_source(struct in_multi *inm, const in_addr_t naddr) 576189592Sbms{ 577189592Sbms struct ip_msource find; 578189592Sbms struct ip_msource *ims, *nims; 579189592Sbms 580189592Sbms IN_MULTI_LOCK_ASSERT(); 581189592Sbms 582189592Sbms find.ims_haddr = ntohl(naddr); 583189592Sbms ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); 584189592Sbms if (ims && ims->ims_stp) 585189592Sbms return (0); 586189592Sbms if (ims == NULL) { 587189592Sbms if (inm->inm_nsrc == in_mcast_maxgrpsrc) 588189592Sbms return (-ENOSPC); 589189592Sbms nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, 590189592Sbms M_NOWAIT | M_ZERO); 591189592Sbms if (nims == NULL) 592189592Sbms return (-ENOMEM); 593189592Sbms nims->ims_haddr = find.ims_haddr; 594189592Sbms RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); 595189592Sbms ++inm->inm_nsrc; 596189592Sbms ims = nims; 597189592Sbms } 598189592Sbms 599189592Sbms /* 600189592Sbms * Mark the source as recorded and update the recorded 601189592Sbms * source count. 602189592Sbms */ 603189592Sbms ++ims->ims_stp; 604189592Sbms ++inm->inm_st[1].iss_rec; 605189592Sbms 606189592Sbms return (1); 607189592Sbms} 608189592Sbms 609189592Sbms/* 610189592Sbms * Return a pointer to an in_msource owned by an in_mfilter, 611189592Sbms * given its source address. 612189592Sbms * Lazy-allocate if needed. If this is a new entry its filter state is 613189592Sbms * undefined at t0. 614189592Sbms * 615189592Sbms * imf is the filter set being modified. 616189592Sbms * haddr is the source address in *host* byte-order. 617189592Sbms * 618189592Sbms * SMPng: May be called with locks held; malloc must not block. 619189592Sbms */ 620189592Sbmsstatic int 621189592Sbmsimf_get_source(struct in_mfilter *imf, const struct sockaddr_in *psin, 622189592Sbms struct in_msource **plims) 623189592Sbms{ 624189592Sbms struct ip_msource find; 625189592Sbms struct ip_msource *ims, *nims; 626189592Sbms struct in_msource *lims; 627189592Sbms int error; 628189592Sbms 629189592Sbms error = 0; 630189592Sbms ims = NULL; 631189592Sbms lims = NULL; 632189592Sbms 633189592Sbms /* key is host byte order */ 634189592Sbms find.ims_haddr = ntohl(psin->sin_addr.s_addr); 635189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 636189592Sbms lims = (struct in_msource *)ims; 637189592Sbms if (lims == NULL) { 638189592Sbms if (imf->imf_nsrc == in_mcast_maxsocksrc) 639189592Sbms return (ENOSPC); 640189592Sbms nims = malloc(sizeof(struct in_msource), M_INMFILTER, 641189592Sbms M_NOWAIT | M_ZERO); 642189592Sbms if (nims == NULL) 643189592Sbms return (ENOMEM); 644189592Sbms lims = (struct in_msource *)nims; 645189592Sbms lims->ims_haddr = find.ims_haddr; 646189592Sbms lims->imsl_st[0] = MCAST_UNDEFINED; 647189592Sbms RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); 648189592Sbms ++imf->imf_nsrc; 649189592Sbms } 650189592Sbms 651189592Sbms *plims = lims; 652189592Sbms 653189592Sbms return (error); 654189592Sbms} 655189592Sbms 656189592Sbms/* 657189592Sbms * Graft a source entry into an existing socket-layer filter set, 658189592Sbms * maintaining any required invariants and checking allocations. 659189592Sbms * 660189592Sbms * The source is marked as being in the new filter mode at t1. 661189592Sbms * 662189592Sbms * Return the pointer to the new node, otherwise return NULL. 663189592Sbms */ 664189592Sbmsstatic struct in_msource * 665189592Sbmsimf_graft(struct in_mfilter *imf, const uint8_t st1, 666189592Sbms const struct sockaddr_in *psin) 667189592Sbms{ 668189592Sbms struct ip_msource *nims; 669189592Sbms struct in_msource *lims; 670189592Sbms 671189592Sbms nims = malloc(sizeof(struct in_msource), M_INMFILTER, 672189592Sbms M_NOWAIT | M_ZERO); 673189592Sbms if (nims == NULL) 674189592Sbms return (NULL); 675189592Sbms lims = (struct in_msource *)nims; 676189592Sbms lims->ims_haddr = ntohl(psin->sin_addr.s_addr); 677189592Sbms lims->imsl_st[0] = MCAST_UNDEFINED; 678189592Sbms lims->imsl_st[1] = st1; 679189592Sbms RB_INSERT(ip_msource_tree, &imf->imf_sources, nims); 680189592Sbms ++imf->imf_nsrc; 681189592Sbms 682189592Sbms return (lims); 683189592Sbms} 684189592Sbms 685189592Sbms/* 686189592Sbms * Prune a source entry from an existing socket-layer filter set, 687189592Sbms * maintaining any required invariants and checking allocations. 688189592Sbms * 689189592Sbms * The source is marked as being left at t1, it is not freed. 690189592Sbms * 691189592Sbms * Return 0 if no error occurred, otherwise return an errno value. 692189592Sbms */ 693189592Sbmsstatic int 694189592Sbmsimf_prune(struct in_mfilter *imf, const struct sockaddr_in *psin) 695189592Sbms{ 696189592Sbms struct ip_msource find; 697189592Sbms struct ip_msource *ims; 698189592Sbms struct in_msource *lims; 699189592Sbms 700189592Sbms /* key is host byte order */ 701189592Sbms find.ims_haddr = ntohl(psin->sin_addr.s_addr); 702189592Sbms ims = RB_FIND(ip_msource_tree, &imf->imf_sources, &find); 703189592Sbms if (ims == NULL) 704189592Sbms return (ENOENT); 705189592Sbms lims = (struct in_msource *)ims; 706189592Sbms lims->imsl_st[1] = MCAST_UNDEFINED; 707189592Sbms return (0); 708189592Sbms} 709189592Sbms 710189592Sbms/* 711189592Sbms * Revert socket-layer filter set deltas at t1 to t0 state. 712189592Sbms */ 713189592Sbmsstatic void 714189592Sbmsimf_rollback(struct in_mfilter *imf) 715189592Sbms{ 716189592Sbms struct ip_msource *ims, *tims; 717189592Sbms struct in_msource *lims; 718189592Sbms 719189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 720189592Sbms lims = (struct in_msource *)ims; 721189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) { 722189592Sbms /* no change at t1 */ 723189592Sbms continue; 724189592Sbms } else if (lims->imsl_st[0] != MCAST_UNDEFINED) { 725189592Sbms /* revert change to existing source at t1 */ 726189592Sbms lims->imsl_st[1] = lims->imsl_st[0]; 727189592Sbms } else { 728189592Sbms /* revert source added t1 */ 729189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 730189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 731189592Sbms free(ims, M_INMFILTER); 732189592Sbms imf->imf_nsrc--; 733189592Sbms } 734189592Sbms } 735189592Sbms imf->imf_st[1] = imf->imf_st[0]; 736189592Sbms} 737189592Sbms 738189592Sbms/* 739189592Sbms * Mark socket-layer filter set as INCLUDE {} at t1. 740189592Sbms */ 741189592Sbmsstatic void 742189592Sbmsimf_leave(struct in_mfilter *imf) 743189592Sbms{ 744189592Sbms struct ip_msource *ims; 745189592Sbms struct in_msource *lims; 746189592Sbms 747189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 748189592Sbms lims = (struct in_msource *)ims; 749189592Sbms lims->imsl_st[1] = MCAST_UNDEFINED; 750189592Sbms } 751189592Sbms imf->imf_st[1] = MCAST_INCLUDE; 752189592Sbms} 753189592Sbms 754189592Sbms/* 755189592Sbms * Mark socket-layer filter set deltas as committed. 756189592Sbms */ 757189592Sbmsstatic void 758189592Sbmsimf_commit(struct in_mfilter *imf) 759189592Sbms{ 760189592Sbms struct ip_msource *ims; 761189592Sbms struct in_msource *lims; 762189592Sbms 763189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 764189592Sbms lims = (struct in_msource *)ims; 765189592Sbms lims->imsl_st[0] = lims->imsl_st[1]; 766189592Sbms } 767189592Sbms imf->imf_st[0] = imf->imf_st[1]; 768189592Sbms} 769189592Sbms 770189592Sbms/* 771189592Sbms * Reap unreferenced sources from socket-layer filter set. 772189592Sbms */ 773189592Sbmsstatic void 774189592Sbmsimf_reap(struct in_mfilter *imf) 775189592Sbms{ 776189592Sbms struct ip_msource *ims, *tims; 777189592Sbms struct in_msource *lims; 778189592Sbms 779189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 780189592Sbms lims = (struct in_msource *)ims; 781189592Sbms if ((lims->imsl_st[0] == MCAST_UNDEFINED) && 782189592Sbms (lims->imsl_st[1] == MCAST_UNDEFINED)) { 783189592Sbms CTR2(KTR_IGMPV3, "%s: free lims %p", __func__, ims); 784189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 785189592Sbms free(ims, M_INMFILTER); 786189592Sbms imf->imf_nsrc--; 787189592Sbms } 788189592Sbms } 789189592Sbms} 790189592Sbms 791189592Sbms/* 792189592Sbms * Purge socket-layer filter set. 793189592Sbms */ 794189592Sbmsstatic void 795189592Sbmsimf_purge(struct in_mfilter *imf) 796189592Sbms{ 797189592Sbms struct ip_msource *ims, *tims; 798189592Sbms 799189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &imf->imf_sources, tims) { 800189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 801189592Sbms RB_REMOVE(ip_msource_tree, &imf->imf_sources, ims); 802189592Sbms free(ims, M_INMFILTER); 803189592Sbms imf->imf_nsrc--; 804189592Sbms } 805189592Sbms imf->imf_st[0] = imf->imf_st[1] = MCAST_UNDEFINED; 806189592Sbms KASSERT(RB_EMPTY(&imf->imf_sources), 807189592Sbms ("%s: imf_sources not empty", __func__)); 808189592Sbms} 809189592Sbms 810189592Sbms/* 811189592Sbms * Look up a source filter entry for a multicast group. 812189592Sbms * 813189592Sbms * inm is the group descriptor to work with. 814189592Sbms * haddr is the host-byte-order IPv4 address to look up. 815189592Sbms * noalloc may be non-zero to suppress allocation of sources. 816189592Sbms * *pims will be set to the address of the retrieved or allocated source. 817189592Sbms * 818189592Sbms * SMPng: NOTE: may be called with locks held. 819189592Sbms * Return 0 if successful, otherwise return a non-zero error code. 820189592Sbms */ 821189592Sbmsstatic int 822189592Sbmsinm_get_source(struct in_multi *inm, const in_addr_t haddr, 823189592Sbms const int noalloc, struct ip_msource **pims) 824189592Sbms{ 825189592Sbms struct ip_msource find; 826189592Sbms struct ip_msource *ims, *nims; 827189592Sbms#ifdef KTR 828189592Sbms struct in_addr ia; 829189592Sbms#endif 830189592Sbms 831189592Sbms find.ims_haddr = haddr; 832189592Sbms ims = RB_FIND(ip_msource_tree, &inm->inm_srcs, &find); 833189592Sbms if (ims == NULL && !noalloc) { 834189592Sbms if (inm->inm_nsrc == in_mcast_maxgrpsrc) 835189592Sbms return (ENOSPC); 836189592Sbms nims = malloc(sizeof(struct ip_msource), M_IPMSOURCE, 837189592Sbms M_NOWAIT | M_ZERO); 838189592Sbms if (nims == NULL) 839189592Sbms return (ENOMEM); 840189592Sbms nims->ims_haddr = haddr; 841189592Sbms RB_INSERT(ip_msource_tree, &inm->inm_srcs, nims); 842189592Sbms ++inm->inm_nsrc; 843189592Sbms ims = nims; 844189592Sbms#ifdef KTR 845189592Sbms ia.s_addr = htonl(haddr); 846189592Sbms CTR3(KTR_IGMPV3, "%s: allocated %s as %p", __func__, 847189592Sbms inet_ntoa(ia), ims); 848189592Sbms#endif 849189592Sbms } 850189592Sbms 851189592Sbms *pims = ims; 852189592Sbms return (0); 853189592Sbms} 854189592Sbms 855189592Sbms/* 856189592Sbms * Merge socket-layer source into IGMP-layer source. 857189592Sbms * If rollback is non-zero, perform the inverse of the merge. 858189592Sbms */ 859189592Sbmsstatic void 860189592Sbmsims_merge(struct ip_msource *ims, const struct in_msource *lims, 861189592Sbms const int rollback) 862189592Sbms{ 863189592Sbms int n = rollback ? -1 : 1; 864189592Sbms#ifdef KTR 865189592Sbms struct in_addr ia; 866189592Sbms 867189592Sbms ia.s_addr = htonl(ims->ims_haddr); 868189592Sbms#endif 869189592Sbms 870189592Sbms if (lims->imsl_st[0] == MCAST_EXCLUDE) { 871189592Sbms CTR3(KTR_IGMPV3, "%s: t1 ex -= %d on %s", 872189592Sbms __func__, n, inet_ntoa(ia)); 873189592Sbms ims->ims_st[1].ex -= n; 874189592Sbms } else if (lims->imsl_st[0] == MCAST_INCLUDE) { 875189592Sbms CTR3(KTR_IGMPV3, "%s: t1 in -= %d on %s", 876189592Sbms __func__, n, inet_ntoa(ia)); 877189592Sbms ims->ims_st[1].in -= n; 878189592Sbms } 879189592Sbms 880189592Sbms if (lims->imsl_st[1] == MCAST_EXCLUDE) { 881189592Sbms CTR3(KTR_IGMPV3, "%s: t1 ex += %d on %s", 882189592Sbms __func__, n, inet_ntoa(ia)); 883189592Sbms ims->ims_st[1].ex += n; 884189592Sbms } else if (lims->imsl_st[1] == MCAST_INCLUDE) { 885189592Sbms CTR3(KTR_IGMPV3, "%s: t1 in += %d on %s", 886189592Sbms __func__, n, inet_ntoa(ia)); 887189592Sbms ims->ims_st[1].in += n; 888189592Sbms } 889189592Sbms} 890189592Sbms 891189592Sbms/* 892189592Sbms * Atomically update the global in_multi state, when a membership's 893189592Sbms * filter list is being updated in any way. 894189592Sbms * 895189592Sbms * imf is the per-inpcb-membership group filter pointer. 896189592Sbms * A fake imf may be passed for in-kernel consumers. 897189592Sbms * 898189592Sbms * XXX This is a candidate for a set-symmetric-difference style loop 899189592Sbms * which would eliminate the repeated lookup from root of ims nodes, 900189592Sbms * as they share the same key space. 901189592Sbms * 902189592Sbms * If any error occurred this function will back out of refcounts 903189592Sbms * and return a non-zero value. 904189592Sbms */ 905189592Sbmsstatic int 906189592Sbmsinm_merge(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 907189592Sbms{ 908189592Sbms struct ip_msource *ims, *nims; 909189592Sbms struct in_msource *lims; 910189592Sbms int schanged, error; 911189592Sbms int nsrc0, nsrc1; 912189592Sbms 913189592Sbms schanged = 0; 914189592Sbms error = 0; 915189592Sbms nsrc1 = nsrc0 = 0; 916189592Sbms 917189592Sbms /* 918189592Sbms * Update the source filters first, as this may fail. 919189592Sbms * Maintain count of in-mode filters at t0, t1. These are 920189592Sbms * used to work out if we transition into ASM mode or not. 921189592Sbms * Maintain a count of source filters whose state was 922189592Sbms * actually modified by this operation. 923189592Sbms */ 924189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 925189592Sbms lims = (struct in_msource *)ims; 926189592Sbms if (lims->imsl_st[0] == imf->imf_st[0]) nsrc0++; 927189592Sbms if (lims->imsl_st[1] == imf->imf_st[1]) nsrc1++; 928189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) continue; 929189592Sbms error = inm_get_source(inm, lims->ims_haddr, 0, &nims); 930189592Sbms ++schanged; 931189592Sbms if (error) 932170613Sbms break; 933189592Sbms ims_merge(nims, lims, 0); 934189592Sbms } 935189592Sbms if (error) { 936189592Sbms struct ip_msource *bims; 937189592Sbms 938189592Sbms RB_FOREACH_REVERSE_FROM(ims, ip_msource_tree, nims) { 939189592Sbms lims = (struct in_msource *)ims; 940189592Sbms if (lims->imsl_st[0] == lims->imsl_st[1]) 941189592Sbms continue; 942189592Sbms (void)inm_get_source(inm, lims->ims_haddr, 1, &bims); 943189592Sbms if (bims == NULL) 944189592Sbms continue; 945189592Sbms ims_merge(bims, lims, 1); 946170613Sbms } 947189592Sbms goto out_reap; 948189592Sbms } 949170613Sbms 950189592Sbms CTR3(KTR_IGMPV3, "%s: imf filters in-mode: %d at t0, %d at t1", 951189592Sbms __func__, nsrc0, nsrc1); 952170613Sbms 953189592Sbms /* Handle transition between INCLUDE {n} and INCLUDE {} on socket. */ 954189592Sbms if (imf->imf_st[0] == imf->imf_st[1] && 955189592Sbms imf->imf_st[1] == MCAST_INCLUDE) { 956189592Sbms if (nsrc1 == 0) { 957189592Sbms CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); 958189592Sbms --inm->inm_st[1].iss_in; 959189592Sbms } 960189592Sbms } 961170613Sbms 962189592Sbms /* Handle filter mode transition on socket. */ 963189592Sbms if (imf->imf_st[0] != imf->imf_st[1]) { 964189592Sbms CTR3(KTR_IGMPV3, "%s: imf transition %d to %d", 965189592Sbms __func__, imf->imf_st[0], imf->imf_st[1]); 966189592Sbms 967189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE) { 968189592Sbms CTR1(KTR_IGMPV3, "%s: --ex on inm at t1", __func__); 969189592Sbms --inm->inm_st[1].iss_ex; 970189592Sbms } else if (imf->imf_st[0] == MCAST_INCLUDE) { 971189592Sbms CTR1(KTR_IGMPV3, "%s: --in on inm at t1", __func__); 972189592Sbms --inm->inm_st[1].iss_in; 973189592Sbms } 974189592Sbms 975189592Sbms if (imf->imf_st[1] == MCAST_EXCLUDE) { 976189592Sbms CTR1(KTR_IGMPV3, "%s: ex++ on inm at t1", __func__); 977189592Sbms inm->inm_st[1].iss_ex++; 978189592Sbms } else if (imf->imf_st[1] == MCAST_INCLUDE && nsrc1 > 0) { 979189592Sbms CTR1(KTR_IGMPV3, "%s: in++ on inm at t1", __func__); 980189592Sbms inm->inm_st[1].iss_in++; 981189592Sbms } 982189592Sbms } 983189592Sbms 984189592Sbms /* 985189592Sbms * Track inm filter state in terms of listener counts. 986189592Sbms * If there are any exclusive listeners, stack-wide 987189592Sbms * membership is exclusive. 988189592Sbms * Otherwise, if only inclusive listeners, stack-wide is inclusive. 989189592Sbms * If no listeners remain, state is undefined at t1, 990189592Sbms * and the IGMP lifecycle for this group should finish. 991189592Sbms */ 992189592Sbms if (inm->inm_st[1].iss_ex > 0) { 993189592Sbms CTR1(KTR_IGMPV3, "%s: transition to EX", __func__); 994189592Sbms inm->inm_st[1].iss_fmode = MCAST_EXCLUDE; 995189592Sbms } else if (inm->inm_st[1].iss_in > 0) { 996189592Sbms CTR1(KTR_IGMPV3, "%s: transition to IN", __func__); 997189592Sbms inm->inm_st[1].iss_fmode = MCAST_INCLUDE; 998189592Sbms } else { 999189592Sbms CTR1(KTR_IGMPV3, "%s: transition to UNDEF", __func__); 1000189592Sbms inm->inm_st[1].iss_fmode = MCAST_UNDEFINED; 1001189592Sbms } 1002189592Sbms 1003189592Sbms /* Decrement ASM listener count on transition out of ASM mode. */ 1004189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE && nsrc0 == 0) { 1005189592Sbms if ((imf->imf_st[1] != MCAST_EXCLUDE) || 1006189592Sbms (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 > 0)) 1007189592Sbms CTR1(KTR_IGMPV3, "%s: --asm on inm at t1", __func__); 1008189592Sbms --inm->inm_st[1].iss_asm; 1009189592Sbms } 1010189592Sbms 1011189592Sbms /* Increment ASM listener count on transition to ASM mode. */ 1012189592Sbms if (imf->imf_st[1] == MCAST_EXCLUDE && nsrc1 == 0) { 1013189592Sbms CTR1(KTR_IGMPV3, "%s: asm++ on inm at t1", __func__); 1014189592Sbms inm->inm_st[1].iss_asm++; 1015189592Sbms } 1016189592Sbms 1017189592Sbms CTR3(KTR_IGMPV3, "%s: merged imf %p to inm %p", __func__, imf, inm); 1018189592Sbms inm_print(inm); 1019189592Sbms 1020189592Sbmsout_reap: 1021189592Sbms if (schanged > 0) { 1022189592Sbms CTR1(KTR_IGMPV3, "%s: sources changed; reaping", __func__); 1023189592Sbms inm_reap(inm); 1024189592Sbms } 1025189592Sbms return (error); 1026189592Sbms} 1027189592Sbms 1028189592Sbms/* 1029189592Sbms * Mark an in_multi's filter set deltas as committed. 1030189592Sbms * Called by IGMP after a state change has been enqueued. 1031189592Sbms */ 1032189592Sbmsvoid 1033189592Sbmsinm_commit(struct in_multi *inm) 1034189592Sbms{ 1035189592Sbms struct ip_msource *ims; 1036189592Sbms 1037189592Sbms CTR2(KTR_IGMPV3, "%s: commit inm %p", __func__, inm); 1038189592Sbms CTR1(KTR_IGMPV3, "%s: pre commit:", __func__); 1039189592Sbms inm_print(inm); 1040189592Sbms 1041189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 1042189592Sbms ims->ims_st[0] = ims->ims_st[1]; 1043189592Sbms } 1044189592Sbms inm->inm_st[0] = inm->inm_st[1]; 1045189592Sbms} 1046189592Sbms 1047189592Sbms/* 1048189592Sbms * Reap unreferenced nodes from an in_multi's filter set. 1049189592Sbms */ 1050189592Sbmsstatic void 1051189592Sbmsinm_reap(struct in_multi *inm) 1052189592Sbms{ 1053189592Sbms struct ip_msource *ims, *tims; 1054189592Sbms 1055189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { 1056189592Sbms if (ims->ims_st[0].ex > 0 || ims->ims_st[0].in > 0 || 1057189592Sbms ims->ims_st[1].ex > 0 || ims->ims_st[1].in > 0 || 1058189592Sbms ims->ims_stp != 0) 1059189592Sbms continue; 1060189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 1061189592Sbms RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); 1062189592Sbms free(ims, M_IPMSOURCE); 1063189592Sbms inm->inm_nsrc--; 1064189592Sbms } 1065189592Sbms} 1066189592Sbms 1067189592Sbms/* 1068189592Sbms * Purge all source nodes from an in_multi's filter set. 1069189592Sbms */ 1070189592Sbmsstatic void 1071189592Sbmsinm_purge(struct in_multi *inm) 1072189592Sbms{ 1073189592Sbms struct ip_msource *ims, *tims; 1074189592Sbms 1075189592Sbms RB_FOREACH_SAFE(ims, ip_msource_tree, &inm->inm_srcs, tims) { 1076189592Sbms CTR2(KTR_IGMPV3, "%s: free ims %p", __func__, ims); 1077189592Sbms RB_REMOVE(ip_msource_tree, &inm->inm_srcs, ims); 1078189592Sbms free(ims, M_IPMSOURCE); 1079189592Sbms inm->inm_nsrc--; 1080189592Sbms } 1081189592Sbms} 1082189592Sbms 1083189592Sbms/* 1084189592Sbms * Join a multicast group; unlocked entry point. 1085189592Sbms * 1086189592Sbms * SMPng: XXX: in_joingroup() is called from in_control() when Giant 1087189592Sbms * is not held. Fortunately, ifp is unlikely to have been detached 1088189592Sbms * at this point, so we assume it's OK to recurse. 1089189592Sbms */ 1090189592Sbmsint 1091189592Sbmsin_joingroup(struct ifnet *ifp, const struct in_addr *gina, 1092189592Sbms /*const*/ struct in_mfilter *imf, struct in_multi **pinm) 1093189592Sbms{ 1094189592Sbms int error; 1095189592Sbms 1096189592Sbms IN_MULTI_LOCK(); 1097189592Sbms error = in_joingroup_locked(ifp, gina, imf, pinm); 1098170613Sbms IN_MULTI_UNLOCK(); 1099170613Sbms 1100189592Sbms return (error); 1101170613Sbms} 1102170613Sbms 1103170613Sbms/* 1104189592Sbms * Join a multicast group; real entry point. 1105170613Sbms * 1106189592Sbms * Only preserves atomicity at inm level. 1107189592Sbms * NOTE: imf argument cannot be const due to sys/tree.h limitations. 1108170613Sbms * 1109189592Sbms * If the IGMP downcall fails, the group is not joined, and an error 1110189592Sbms * code is returned. 1111170613Sbms */ 1112189592Sbmsint 1113189592Sbmsin_joingroup_locked(struct ifnet *ifp, const struct in_addr *gina, 1114189592Sbms /*const*/ struct in_mfilter *imf, struct in_multi **pinm) 1115170613Sbms{ 1116189592Sbms struct in_mfilter timf; 1117189592Sbms struct in_multi *inm; 1118189592Sbms int error; 1119170613Sbms 1120189592Sbms IN_MULTI_LOCK_ASSERT(); 1121170613Sbms 1122189592Sbms CTR4(KTR_IGMPV3, "%s: join %s on %p(%s))", __func__, 1123189592Sbms inet_ntoa(*gina), ifp, ifp->if_xname); 1124189592Sbms 1125189592Sbms error = 0; 1126189592Sbms inm = NULL; 1127189592Sbms 1128189592Sbms /* 1129189592Sbms * If no imf was specified (i.e. kernel consumer), 1130189592Sbms * fake one up and assume it is an ASM join. 1131189592Sbms */ 1132189592Sbms if (imf == NULL) { 1133189592Sbms imf_init(&timf, MCAST_UNDEFINED, MCAST_EXCLUDE); 1134189592Sbms imf = &timf; 1135170613Sbms } 1136170613Sbms 1137189592Sbms error = in_getmulti(ifp, gina, &inm); 1138189592Sbms if (error) { 1139189592Sbms CTR1(KTR_IGMPV3, "%s: in_getmulti() failure", __func__); 1140189592Sbms return (error); 1141189592Sbms } 1142189592Sbms 1143189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1144189592Sbms error = inm_merge(inm, imf); 1145189592Sbms if (error) { 1146189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 1147189592Sbms goto out_inm_release; 1148189592Sbms } 1149189592Sbms 1150189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1151189592Sbms error = igmp_change_state(inm); 1152189592Sbms if (error) { 1153189592Sbms CTR1(KTR_IGMPV3, "%s: failed to update source", __func__); 1154189592Sbms goto out_inm_release; 1155189592Sbms } 1156189592Sbms 1157189592Sbmsout_inm_release: 1158189592Sbms if (error) { 1159189592Sbms CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); 1160189592Sbms inm_release_locked(inm); 1161189592Sbms } else { 1162189592Sbms *pinm = inm; 1163189592Sbms } 1164189592Sbms 1165189592Sbms return (error); 1166189592Sbms} 1167189592Sbms 1168189592Sbms/* 1169189592Sbms * Leave a multicast group; unlocked entry point. 1170189592Sbms */ 1171189592Sbmsint 1172189592Sbmsin_leavegroup(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 1173189592Sbms{ 1174189592Sbms struct ifnet *ifp; 1175189851Srwatson int error; 1176189592Sbms 1177189592Sbms ifp = inm->inm_ifp; 1178189592Sbms 1179170613Sbms IN_MULTI_LOCK(); 1180189592Sbms error = in_leavegroup_locked(inm, imf); 1181170613Sbms IN_MULTI_UNLOCK(); 1182170613Sbms 1183189592Sbms return (error); 1184170613Sbms} 1185170613Sbms 1186170613Sbms/* 1187189592Sbms * Leave a multicast group; real entry point. 1188189592Sbms * All source filters will be expunged. 1189170613Sbms * 1190189592Sbms * Only preserves atomicity at inm level. 1191189592Sbms * 1192189592Sbms * Holding the write lock for the INP which contains imf 1193189592Sbms * is highly advisable. We can't assert for it as imf does not 1194189592Sbms * contain a back-pointer to the owning inp. 1195189592Sbms * 1196189592Sbms * Note: This is not the same as inm_release(*) as this function also 1197189592Sbms * makes a state change downcall into IGMP. 1198170613Sbms */ 1199189592Sbmsint 1200189592Sbmsin_leavegroup_locked(struct in_multi *inm, /*const*/ struct in_mfilter *imf) 1201170613Sbms{ 1202189592Sbms struct in_mfilter timf; 1203189592Sbms int error; 1204170613Sbms 1205189592Sbms error = 0; 1206189592Sbms 1207170613Sbms IN_MULTI_LOCK_ASSERT(); 1208170613Sbms 1209189592Sbms CTR5(KTR_IGMPV3, "%s: leave inm %p, %s/%s, imf %p", __func__, 1210189592Sbms inm, inet_ntoa(inm->inm_addr), 1211189592Sbms (inm_is_ifp_detached(inm) ? "null" : inm->inm_ifp->if_xname), 1212189592Sbms imf); 1213170613Sbms 1214189592Sbms /* 1215189592Sbms * If no imf was specified (i.e. kernel consumer), 1216189592Sbms * fake one up and assume it is an ASM join. 1217189592Sbms */ 1218189592Sbms if (imf == NULL) { 1219189592Sbms imf_init(&timf, MCAST_EXCLUDE, MCAST_UNDEFINED); 1220189592Sbms imf = &timf; 1221189592Sbms } 1222170613Sbms 1223189592Sbms /* 1224189592Sbms * Begin state merge transaction at IGMP layer. 1225189592Sbms * 1226189592Sbms * As this particular invocation should not cause any memory 1227189592Sbms * to be allocated, and there is no opportunity to roll back 1228189592Sbms * the transaction, it MUST NOT fail. 1229189592Sbms */ 1230189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1231189592Sbms error = inm_merge(inm, imf); 1232189592Sbms KASSERT(error == 0, ("%s: failed to merge inm state", __func__)); 1233170613Sbms 1234189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1235189592Sbms error = igmp_change_state(inm); 1236189592Sbms if (error) 1237189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 1238189592Sbms 1239189592Sbms CTR2(KTR_IGMPV3, "%s: dropping ref on %p", __func__, inm); 1240189592Sbms inm_release_locked(inm); 1241189592Sbms 1242189592Sbms return (error); 1243170613Sbms} 1244170613Sbms 1245189592Sbms/*#ifndef BURN_BRIDGES*/ 1246170613Sbms/* 1247189592Sbms * Join an IPv4 multicast group in (*,G) exclusive mode. 1248189592Sbms * The group must be a 224.0.0.0/24 link-scope group. 1249189592Sbms * This KPI is for legacy kernel consumers only. 1250170613Sbms */ 1251189592Sbmsstruct in_multi * 1252189592Sbmsin_addmulti(struct in_addr *ap, struct ifnet *ifp) 1253189592Sbms{ 1254189592Sbms struct in_multi *pinm; 1255189592Sbms int error; 1256189592Sbms 1257189592Sbms KASSERT(IN_LOCAL_GROUP(ntohl(ap->s_addr)), 1258189592Sbms ("%s: %s not in 224.0.0.0/24", __func__, inet_ntoa(*ap))); 1259189592Sbms 1260189592Sbms error = in_joingroup(ifp, ap, NULL, &pinm); 1261189592Sbms if (error != 0) 1262189592Sbms pinm = NULL; 1263189592Sbms 1264189592Sbms return (pinm); 1265189592Sbms} 1266189592Sbms 1267189592Sbms/* 1268189592Sbms * Leave an IPv4 multicast group, assumed to be in exclusive (*,G) mode. 1269189592Sbms * This KPI is for legacy kernel consumers only. 1270189592Sbms */ 1271189592Sbmsvoid 1272189592Sbmsin_delmulti(struct in_multi *inm) 1273189592Sbms{ 1274189592Sbms 1275189592Sbms (void)in_leavegroup(inm, NULL); 1276189592Sbms} 1277189592Sbms/*#endif*/ 1278189592Sbms 1279189592Sbms/* 1280189592Sbms * Block or unblock an ASM multicast source on an inpcb. 1281189592Sbms * This implements the delta-based API described in RFC 3678. 1282189592Sbms * 1283189592Sbms * The delta-based API applies only to exclusive-mode memberships. 1284189592Sbms * An IGMP downcall will be performed. 1285189592Sbms * 1286189592Sbms * SMPng: NOTE: Must take Giant as a join may create a new ifma. 1287189592Sbms * 1288189592Sbms * Return 0 if successful, otherwise return an appropriate error code. 1289189592Sbms */ 1290170613Sbmsstatic int 1291189592Sbmsinp_block_unblock_source(struct inpcb *inp, struct sockopt *sopt) 1292170613Sbms{ 1293170613Sbms struct group_source_req gsr; 1294170613Sbms sockunion_t *gsa, *ssa; 1295170613Sbms struct ifnet *ifp; 1296170613Sbms struct in_mfilter *imf; 1297170613Sbms struct ip_moptions *imo; 1298170613Sbms struct in_msource *ims; 1299189592Sbms struct in_multi *inm; 1300170613Sbms size_t idx; 1301189592Sbms uint16_t fmode; 1302189592Sbms int error, doblock; 1303170613Sbms 1304170613Sbms ifp = NULL; 1305170613Sbms error = 0; 1306189592Sbms doblock = 0; 1307170613Sbms 1308170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 1309170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 1310170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 1311170613Sbms 1312170613Sbms switch (sopt->sopt_name) { 1313170613Sbms case IP_BLOCK_SOURCE: 1314170613Sbms case IP_UNBLOCK_SOURCE: { 1315170613Sbms struct ip_mreq_source mreqs; 1316170613Sbms 1317170613Sbms error = sooptcopyin(sopt, &mreqs, 1318170613Sbms sizeof(struct ip_mreq_source), 1319170613Sbms sizeof(struct ip_mreq_source)); 1320170613Sbms if (error) 1321170613Sbms return (error); 1322170613Sbms 1323170613Sbms gsa->sin.sin_family = AF_INET; 1324170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 1325170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 1326170613Sbms 1327170613Sbms ssa->sin.sin_family = AF_INET; 1328170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 1329170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 1330170613Sbms 1331189592Sbms if (!in_nullhost(mreqs.imr_interface)) 1332170613Sbms INADDR_TO_IFP(mreqs.imr_interface, ifp); 1333170613Sbms 1334170613Sbms if (sopt->sopt_name == IP_BLOCK_SOURCE) 1335189592Sbms doblock = 1; 1336170613Sbms 1337189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 1338189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 1339170613Sbms break; 1340170613Sbms } 1341170613Sbms 1342170613Sbms case MCAST_BLOCK_SOURCE: 1343170613Sbms case MCAST_UNBLOCK_SOURCE: 1344170613Sbms error = sooptcopyin(sopt, &gsr, 1345170613Sbms sizeof(struct group_source_req), 1346170613Sbms sizeof(struct group_source_req)); 1347170613Sbms if (error) 1348170613Sbms return (error); 1349170613Sbms 1350170613Sbms if (gsa->sin.sin_family != AF_INET || 1351170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 1352170613Sbms return (EINVAL); 1353170613Sbms 1354170613Sbms if (ssa->sin.sin_family != AF_INET || 1355170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 1356170613Sbms return (EINVAL); 1357170613Sbms 1358181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1359170613Sbms return (EADDRNOTAVAIL); 1360170613Sbms 1361170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 1362170613Sbms 1363170613Sbms if (sopt->sopt_name == MCAST_BLOCK_SOURCE) 1364189592Sbms doblock = 1; 1365170613Sbms break; 1366170613Sbms 1367170613Sbms default: 1368189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 1369189592Sbms __func__, sopt->sopt_name); 1370170613Sbms return (EOPNOTSUPP); 1371170613Sbms break; 1372170613Sbms } 1373170613Sbms 1374170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1375170613Sbms return (EINVAL); 1376170613Sbms 1377170613Sbms /* 1378170613Sbms * Check if we are actually a member of this group. 1379170613Sbms */ 1380170613Sbms imo = inp_findmoptions(inp); 1381170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 1382170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 1383170613Sbms error = EADDRNOTAVAIL; 1384189592Sbms goto out_inp_locked; 1385170613Sbms } 1386170613Sbms 1387170613Sbms KASSERT(imo->imo_mfilters != NULL, 1388170613Sbms ("%s: imo_mfilters not allocated", __func__)); 1389170613Sbms imf = &imo->imo_mfilters[idx]; 1390189592Sbms inm = imo->imo_membership[idx]; 1391170613Sbms 1392170613Sbms /* 1393189592Sbms * Attempting to use the delta-based API on an 1394189592Sbms * non exclusive-mode membership is an error. 1395170613Sbms */ 1396189592Sbms fmode = imf->imf_st[0]; 1397189592Sbms if (fmode != MCAST_EXCLUDE) { 1398189592Sbms error = EINVAL; 1399189592Sbms goto out_inp_locked; 1400170613Sbms } 1401189592Sbms 1402189592Sbms /* 1403189592Sbms * Deal with error cases up-front: 1404189592Sbms * Asked to block, but already blocked; or 1405189592Sbms * Asked to unblock, but nothing to unblock. 1406189592Sbms * If adding a new block entry, allocate it. 1407189592Sbms */ 1408170613Sbms ims = imo_match_source(imo, idx, &ssa->sa); 1409189592Sbms if ((ims != NULL && doblock) || (ims == NULL && !doblock)) { 1410189592Sbms CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, 1411189592Sbms inet_ntoa(ssa->sin.sin_addr), doblock ? "" : "not "); 1412189592Sbms error = EADDRNOTAVAIL; 1413189592Sbms goto out_inp_locked; 1414189592Sbms } 1415189592Sbms 1416189592Sbms INP_WLOCK_ASSERT(inp); 1417189592Sbms 1418189592Sbms /* 1419189592Sbms * Begin state merge transaction at socket layer. 1420189592Sbms */ 1421189592Sbms if (doblock) { 1422189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); 1423189592Sbms ims = imf_graft(imf, fmode, &ssa->sin); 1424189592Sbms if (ims == NULL) 1425189592Sbms error = ENOMEM; 1426170613Sbms } else { 1427189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); 1428189592Sbms error = imf_prune(imf, &ssa->sin); 1429170613Sbms } 1430170613Sbms 1431189592Sbms if (error) { 1432189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", __func__); 1433189592Sbms goto out_imf_rollback; 1434189592Sbms } 1435189592Sbms 1436189592Sbms /* 1437189592Sbms * Begin state merge transaction at IGMP layer. 1438189592Sbms */ 1439189592Sbms IN_MULTI_LOCK(); 1440189592Sbms 1441189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 1442189592Sbms error = inm_merge(inm, imf); 1443189592Sbms if (error) { 1444189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 1445189592Sbms goto out_imf_rollback; 1446189592Sbms } 1447189592Sbms 1448189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 1449189592Sbms error = igmp_change_state(inm); 1450189592Sbms if (error) 1451189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 1452189592Sbms 1453189592Sbms IN_MULTI_UNLOCK(); 1454189592Sbms 1455189592Sbmsout_imf_rollback: 1456189592Sbms if (error) 1457189592Sbms imf_rollback(imf); 1458189592Sbms else 1459189592Sbms imf_commit(imf); 1460189592Sbms 1461189592Sbms imf_reap(imf); 1462189592Sbms 1463189592Sbmsout_inp_locked: 1464178285Srwatson INP_WUNLOCK(inp); 1465170613Sbms return (error); 1466170613Sbms} 1467170613Sbms 1468170613Sbms/* 1469170613Sbms * Given an inpcb, return its multicast options structure pointer. Accepts 1470170613Sbms * an unlocked inpcb pointer, but will return it locked. May sleep. 1471189592Sbms * 1472189592Sbms * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 1473189592Sbms * SMPng: NOTE: Returns with the INP write lock held. 1474170613Sbms */ 1475170613Sbmsstatic struct ip_moptions * 1476170613Sbmsinp_findmoptions(struct inpcb *inp) 1477170613Sbms{ 1478170613Sbms struct ip_moptions *imo; 1479170613Sbms struct in_multi **immp; 1480170613Sbms struct in_mfilter *imfp; 1481170613Sbms size_t idx; 1482170613Sbms 1483178285Srwatson INP_WLOCK(inp); 1484170613Sbms if (inp->inp_moptions != NULL) 1485170613Sbms return (inp->inp_moptions); 1486170613Sbms 1487178285Srwatson INP_WUNLOCK(inp); 1488170613Sbms 1489189592Sbms imo = malloc(sizeof(*imo), M_IPMOPTS, M_WAITOK); 1490189592Sbms immp = malloc(sizeof(*immp) * IP_MIN_MEMBERSHIPS, M_IPMOPTS, 1491189592Sbms M_WAITOK | M_ZERO); 1492189592Sbms imfp = malloc(sizeof(struct in_mfilter) * IP_MIN_MEMBERSHIPS, 1493189592Sbms M_INMFILTER, M_WAITOK); 1494170613Sbms 1495170613Sbms imo->imo_multicast_ifp = NULL; 1496170613Sbms imo->imo_multicast_addr.s_addr = INADDR_ANY; 1497170613Sbms imo->imo_multicast_vif = -1; 1498170613Sbms imo->imo_multicast_ttl = IP_DEFAULT_MULTICAST_TTL; 1499189357Sbms imo->imo_multicast_loop = in_mcast_loop; 1500170613Sbms imo->imo_num_memberships = 0; 1501170613Sbms imo->imo_max_memberships = IP_MIN_MEMBERSHIPS; 1502170613Sbms imo->imo_membership = immp; 1503170613Sbms 1504170613Sbms /* Initialize per-group source filters. */ 1505189592Sbms for (idx = 0; idx < IP_MIN_MEMBERSHIPS; idx++) 1506189592Sbms imf_init(&imfp[idx], MCAST_UNDEFINED, MCAST_EXCLUDE); 1507170613Sbms imo->imo_mfilters = imfp; 1508170613Sbms 1509178285Srwatson INP_WLOCK(inp); 1510170613Sbms if (inp->inp_moptions != NULL) { 1511189592Sbms free(imfp, M_INMFILTER); 1512170613Sbms free(immp, M_IPMOPTS); 1513170613Sbms free(imo, M_IPMOPTS); 1514170613Sbms return (inp->inp_moptions); 1515170613Sbms } 1516170613Sbms inp->inp_moptions = imo; 1517170613Sbms return (imo); 1518170613Sbms} 1519170613Sbms 1520170613Sbms/* 1521170613Sbms * Discard the IP multicast options (and source filters). 1522189592Sbms * 1523189592Sbms * SMPng: NOTE: assumes INP write lock is held. 1524170613Sbms */ 1525170613Sbmsvoid 1526170613Sbmsinp_freemoptions(struct ip_moptions *imo) 1527170613Sbms{ 1528170613Sbms struct in_mfilter *imf; 1529170613Sbms size_t idx, nmships; 1530170613Sbms 1531170613Sbms KASSERT(imo != NULL, ("%s: ip_moptions is NULL", __func__)); 1532170613Sbms 1533170613Sbms nmships = imo->imo_num_memberships; 1534170613Sbms for (idx = 0; idx < nmships; ++idx) { 1535189592Sbms imf = imo->imo_mfilters ? &imo->imo_mfilters[idx] : NULL; 1536189592Sbms if (imf) 1537189592Sbms imf_leave(imf); 1538189592Sbms (void)in_leavegroup(imo->imo_membership[idx], imf); 1539189592Sbms if (imf) 1540189592Sbms imf_purge(imf); 1541170613Sbms } 1542170613Sbms 1543189592Sbms if (imo->imo_mfilters) 1544189592Sbms free(imo->imo_mfilters, M_INMFILTER); 1545170613Sbms free(imo->imo_membership, M_IPMOPTS); 1546170613Sbms free(imo, M_IPMOPTS); 1547170613Sbms} 1548170613Sbms 1549170613Sbms/* 1550170613Sbms * Atomically get source filters on a socket for an IPv4 multicast group. 1551170613Sbms * Called with INP lock held; returns with lock released. 1552170613Sbms */ 1553170613Sbmsstatic int 1554170613Sbmsinp_get_source_filters(struct inpcb *inp, struct sockopt *sopt) 1555170613Sbms{ 1556170613Sbms struct __msfilterreq msfr; 1557170613Sbms sockunion_t *gsa; 1558170613Sbms struct ifnet *ifp; 1559170613Sbms struct ip_moptions *imo; 1560170613Sbms struct in_mfilter *imf; 1561189592Sbms struct ip_msource *ims; 1562189592Sbms struct in_msource *lims; 1563189592Sbms struct sockaddr_in *psin; 1564170613Sbms struct sockaddr_storage *ptss; 1565170613Sbms struct sockaddr_storage *tss; 1566170613Sbms int error; 1567189592Sbms size_t idx, nsrcs, ncsrcs; 1568170613Sbms 1569178285Srwatson INP_WLOCK_ASSERT(inp); 1570170613Sbms 1571170613Sbms imo = inp->inp_moptions; 1572170613Sbms KASSERT(imo != NULL, ("%s: null ip_moptions", __func__)); 1573170613Sbms 1574178285Srwatson INP_WUNLOCK(inp); 1575170613Sbms 1576170613Sbms error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 1577170613Sbms sizeof(struct __msfilterreq)); 1578170613Sbms if (error) 1579170613Sbms return (error); 1580170613Sbms 1581181803Sbz if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 1582170613Sbms return (EINVAL); 1583170613Sbms 1584170613Sbms ifp = ifnet_byindex(msfr.msfr_ifindex); 1585170613Sbms if (ifp == NULL) 1586170613Sbms return (EINVAL); 1587170613Sbms 1588178285Srwatson INP_WLOCK(inp); 1589170613Sbms 1590170613Sbms /* 1591170613Sbms * Lookup group on the socket. 1592170613Sbms */ 1593170613Sbms gsa = (sockunion_t *)&msfr.msfr_group; 1594170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 1595170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 1596178285Srwatson INP_WUNLOCK(inp); 1597170613Sbms return (EADDRNOTAVAIL); 1598170613Sbms } 1599170613Sbms imf = &imo->imo_mfilters[idx]; 1600170613Sbms 1601170613Sbms /* 1602189592Sbms * Ignore memberships which are in limbo. 1603189592Sbms */ 1604189592Sbms if (imf->imf_st[1] == MCAST_UNDEFINED) { 1605189592Sbms INP_WUNLOCK(inp); 1606189592Sbms return (EAGAIN); 1607189592Sbms } 1608189592Sbms msfr.msfr_fmode = imf->imf_st[1]; 1609189592Sbms 1610189592Sbms /* 1611170613Sbms * If the user specified a buffer, copy out the source filter 1612170613Sbms * entries to userland gracefully. 1613189592Sbms * We only copy out the number of entries which userland 1614189592Sbms * has asked for, but we always tell userland how big the 1615189592Sbms * buffer really needs to be. 1616170613Sbms */ 1617170613Sbms tss = NULL; 1618170613Sbms if (msfr.msfr_srcs != NULL && msfr.msfr_nsrcs > 0) { 1619184214Sdes tss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 1620189592Sbms M_TEMP, M_NOWAIT | M_ZERO); 1621170613Sbms if (tss == NULL) { 1622189592Sbms INP_WUNLOCK(inp); 1623189592Sbms return (ENOBUFS); 1624170613Sbms } 1625170613Sbms } 1626170613Sbms 1627189592Sbms /* 1628189592Sbms * Count number of sources in-mode at t0. 1629189592Sbms * If buffer space exists and remains, copy out source entries. 1630189592Sbms */ 1631189592Sbms nsrcs = msfr.msfr_nsrcs; 1632189592Sbms ncsrcs = 0; 1633189592Sbms ptss = tss; 1634189592Sbms RB_FOREACH(ims, ip_msource_tree, &imf->imf_sources) { 1635189592Sbms lims = (struct in_msource *)ims; 1636189592Sbms if (lims->imsl_st[0] == MCAST_UNDEFINED || 1637189592Sbms lims->imsl_st[0] != imf->imf_st[0]) 1638189592Sbms continue; 1639189592Sbms ++ncsrcs; 1640191659Sbms if (tss != NULL && nsrcs > 0) { 1641191659Sbms psin = (struct sockaddr_in *)ptss; 1642189592Sbms psin->sin_family = AF_INET; 1643189592Sbms psin->sin_len = sizeof(struct sockaddr_in); 1644189592Sbms psin->sin_addr.s_addr = htonl(lims->ims_haddr); 1645191659Sbms psin->sin_port = 0; 1646191659Sbms ++ptss; 1647191659Sbms --nsrcs; 1648189592Sbms } 1649189592Sbms } 1650189592Sbms 1651178285Srwatson INP_WUNLOCK(inp); 1652170613Sbms 1653170613Sbms if (tss != NULL) { 1654170613Sbms error = copyout(tss, msfr.msfr_srcs, 1655170613Sbms sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 1656184205Sdes free(tss, M_TEMP); 1657189592Sbms if (error) 1658189592Sbms return (error); 1659170613Sbms } 1660170613Sbms 1661189592Sbms msfr.msfr_nsrcs = ncsrcs; 1662170613Sbms error = sooptcopyout(sopt, &msfr, sizeof(struct __msfilterreq)); 1663170613Sbms 1664170613Sbms return (error); 1665170613Sbms} 1666170613Sbms 1667170613Sbms/* 1668170613Sbms * Return the IP multicast options in response to user getsockopt(). 1669170613Sbms */ 1670170613Sbmsint 1671170613Sbmsinp_getmoptions(struct inpcb *inp, struct sockopt *sopt) 1672170613Sbms{ 1673170613Sbms struct ip_mreqn mreqn; 1674170613Sbms struct ip_moptions *imo; 1675170613Sbms struct ifnet *ifp; 1676170613Sbms struct in_ifaddr *ia; 1677170613Sbms int error, optval; 1678170613Sbms u_char coptval; 1679170613Sbms 1680178285Srwatson INP_WLOCK(inp); 1681170613Sbms imo = inp->inp_moptions; 1682171746Scsjp /* 1683171746Scsjp * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 1684171746Scsjp * or is a divert socket, reject it. 1685171746Scsjp */ 1686171746Scsjp if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 1687171746Scsjp (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 1688171746Scsjp inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) { 1689178285Srwatson INP_WUNLOCK(inp); 1690171746Scsjp return (EOPNOTSUPP); 1691171746Scsjp } 1692170613Sbms 1693170613Sbms error = 0; 1694170613Sbms switch (sopt->sopt_name) { 1695170613Sbms case IP_MULTICAST_VIF: 1696170613Sbms if (imo != NULL) 1697170613Sbms optval = imo->imo_multicast_vif; 1698170613Sbms else 1699170613Sbms optval = -1; 1700178285Srwatson INP_WUNLOCK(inp); 1701170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1702170613Sbms break; 1703170613Sbms 1704170613Sbms case IP_MULTICAST_IF: 1705170613Sbms memset(&mreqn, 0, sizeof(struct ip_mreqn)); 1706170613Sbms if (imo != NULL) { 1707170613Sbms ifp = imo->imo_multicast_ifp; 1708189592Sbms if (!in_nullhost(imo->imo_multicast_addr)) { 1709170613Sbms mreqn.imr_address = imo->imo_multicast_addr; 1710170613Sbms } else if (ifp != NULL) { 1711170613Sbms mreqn.imr_ifindex = ifp->if_index; 1712170613Sbms IFP_TO_IA(ifp, ia); 1713170613Sbms if (ia != NULL) { 1714170613Sbms mreqn.imr_address = 1715170613Sbms IA_SIN(ia)->sin_addr; 1716194760Srwatson ifa_free(&ia->ia_ifa); 1717170613Sbms } 1718170613Sbms } 1719170613Sbms } 1720178285Srwatson INP_WUNLOCK(inp); 1721170613Sbms if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { 1722170613Sbms error = sooptcopyout(sopt, &mreqn, 1723170613Sbms sizeof(struct ip_mreqn)); 1724170613Sbms } else { 1725170613Sbms error = sooptcopyout(sopt, &mreqn.imr_address, 1726170613Sbms sizeof(struct in_addr)); 1727170613Sbms } 1728170613Sbms break; 1729170613Sbms 1730170613Sbms case IP_MULTICAST_TTL: 1731170613Sbms if (imo == 0) 1732170613Sbms optval = coptval = IP_DEFAULT_MULTICAST_TTL; 1733170613Sbms else 1734170613Sbms optval = coptval = imo->imo_multicast_ttl; 1735178285Srwatson INP_WUNLOCK(inp); 1736170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) 1737170613Sbms error = sooptcopyout(sopt, &coptval, sizeof(u_char)); 1738170613Sbms else 1739170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1740170613Sbms break; 1741170613Sbms 1742170613Sbms case IP_MULTICAST_LOOP: 1743170613Sbms if (imo == 0) 1744170613Sbms optval = coptval = IP_DEFAULT_MULTICAST_LOOP; 1745170613Sbms else 1746170613Sbms optval = coptval = imo->imo_multicast_loop; 1747178285Srwatson INP_WUNLOCK(inp); 1748170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) 1749170613Sbms error = sooptcopyout(sopt, &coptval, sizeof(u_char)); 1750170613Sbms else 1751170613Sbms error = sooptcopyout(sopt, &optval, sizeof(int)); 1752170613Sbms break; 1753170613Sbms 1754170613Sbms case IP_MSFILTER: 1755170613Sbms if (imo == NULL) { 1756170613Sbms error = EADDRNOTAVAIL; 1757178285Srwatson INP_WUNLOCK(inp); 1758170613Sbms } else { 1759170613Sbms error = inp_get_source_filters(inp, sopt); 1760170613Sbms } 1761170613Sbms break; 1762170613Sbms 1763170613Sbms default: 1764178285Srwatson INP_WUNLOCK(inp); 1765170613Sbms error = ENOPROTOOPT; 1766170613Sbms break; 1767170613Sbms } 1768170613Sbms 1769170613Sbms INP_UNLOCK_ASSERT(inp); 1770170613Sbms 1771170613Sbms return (error); 1772170613Sbms} 1773170613Sbms 1774170613Sbms/* 1775189592Sbms * Look up the ifnet to use for a multicast group membership, 1776189592Sbms * given the IPv4 address of an interface, and the IPv4 group address. 1777189592Sbms * 1778189592Sbms * This routine exists to support legacy multicast applications 1779189592Sbms * which do not understand that multicast memberships are scoped to 1780189592Sbms * specific physical links in the networking stack, or which need 1781189592Sbms * to join link-scope groups before IPv4 addresses are configured. 1782189592Sbms * 1783189592Sbms * If inp is non-NULL, use this socket's current FIB number for any 1784189592Sbms * required FIB lookup. 1785189592Sbms * If ina is INADDR_ANY, look up the group address in the unicast FIB, 1786189592Sbms * and use its ifp; usually, this points to the default next-hop. 1787189592Sbms * 1788189592Sbms * If the FIB lookup fails, attempt to use the first non-loopback 1789189592Sbms * interface with multicast capability in the system as a 1790189592Sbms * last resort. The legacy IPv4 ASM API requires that we do 1791189592Sbms * this in order to allow groups to be joined when the routing 1792189592Sbms * table has not yet been populated during boot. 1793189592Sbms * 1794189592Sbms * Returns NULL if no ifp could be found. 1795189592Sbms * 1796189592Sbms * SMPng: TODO: Acquire the appropriate locks for INADDR_TO_IFP. 1797189592Sbms * FUTURE: Implement IPv4 source-address selection. 1798189592Sbms */ 1799189592Sbmsstatic struct ifnet * 1800189592Sbmsinp_lookup_mcast_ifp(const struct inpcb *inp, 1801189592Sbms const struct sockaddr_in *gsin, const struct in_addr ina) 1802189592Sbms{ 1803189592Sbms struct ifnet *ifp; 1804189592Sbms 1805189592Sbms KASSERT(gsin->sin_family == AF_INET, ("%s: not AF_INET", __func__)); 1806189592Sbms KASSERT(IN_MULTICAST(ntohl(gsin->sin_addr.s_addr)), 1807189592Sbms ("%s: not multicast", __func__)); 1808189592Sbms 1809189592Sbms ifp = NULL; 1810189592Sbms if (!in_nullhost(ina)) { 1811189592Sbms INADDR_TO_IFP(ina, ifp); 1812189592Sbms } else { 1813189592Sbms struct route ro; 1814189592Sbms 1815189592Sbms ro.ro_rt = NULL; 1816189592Sbms memcpy(&ro.ro_dst, gsin, sizeof(struct sockaddr_in)); 1817189592Sbms in_rtalloc_ign(&ro, 0, inp ? inp->inp_inc.inc_fibnum : 0); 1818189592Sbms if (ro.ro_rt != NULL) { 1819189592Sbms ifp = ro.ro_rt->rt_ifp; 1820189592Sbms KASSERT(ifp != NULL, ("%s: null ifp", __func__)); 1821189592Sbms RTFREE(ro.ro_rt); 1822189592Sbms } else { 1823189592Sbms struct in_ifaddr *ia; 1824189592Sbms struct ifnet *mifp; 1825189592Sbms 1826189592Sbms mifp = NULL; 1827194951Srwatson IN_IFADDR_RLOCK(); 1828189592Sbms TAILQ_FOREACH(ia, &V_in_ifaddrhead, ia_link) { 1829189592Sbms mifp = ia->ia_ifp; 1830189592Sbms if (!(mifp->if_flags & IFF_LOOPBACK) && 1831189592Sbms (mifp->if_flags & IFF_MULTICAST)) { 1832189592Sbms ifp = mifp; 1833189592Sbms break; 1834189592Sbms } 1835189592Sbms } 1836194951Srwatson IN_IFADDR_RUNLOCK(); 1837189592Sbms } 1838189592Sbms } 1839189592Sbms 1840189592Sbms return (ifp); 1841189592Sbms} 1842189592Sbms 1843189592Sbms/* 1844170613Sbms * Join an IPv4 multicast group, possibly with a source. 1845170613Sbms */ 1846170613Sbmsstatic int 1847170613Sbmsinp_join_group(struct inpcb *inp, struct sockopt *sopt) 1848170613Sbms{ 1849170613Sbms struct group_source_req gsr; 1850170613Sbms sockunion_t *gsa, *ssa; 1851170613Sbms struct ifnet *ifp; 1852170613Sbms struct in_mfilter *imf; 1853170613Sbms struct ip_moptions *imo; 1854170613Sbms struct in_multi *inm; 1855189592Sbms struct in_msource *lims; 1856170613Sbms size_t idx; 1857189592Sbms int error, is_new; 1858170613Sbms 1859170613Sbms ifp = NULL; 1860189592Sbms imf = NULL; 1861197136Sbms lims = NULL; 1862170613Sbms error = 0; 1863189592Sbms is_new = 0; 1864170613Sbms 1865170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 1866170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 1867170613Sbms gsa->ss.ss_family = AF_UNSPEC; 1868170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 1869170613Sbms ssa->ss.ss_family = AF_UNSPEC; 1870170613Sbms 1871170613Sbms switch (sopt->sopt_name) { 1872170613Sbms case IP_ADD_MEMBERSHIP: 1873170613Sbms case IP_ADD_SOURCE_MEMBERSHIP: { 1874170613Sbms struct ip_mreq_source mreqs; 1875170613Sbms 1876170613Sbms if (sopt->sopt_name == IP_ADD_MEMBERSHIP) { 1877170613Sbms error = sooptcopyin(sopt, &mreqs, 1878170613Sbms sizeof(struct ip_mreq), 1879170613Sbms sizeof(struct ip_mreq)); 1880170613Sbms /* 1881170613Sbms * Do argument switcharoo from ip_mreq into 1882170613Sbms * ip_mreq_source to avoid using two instances. 1883170613Sbms */ 1884170613Sbms mreqs.imr_interface = mreqs.imr_sourceaddr; 1885170613Sbms mreqs.imr_sourceaddr.s_addr = INADDR_ANY; 1886170613Sbms } else if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { 1887170613Sbms error = sooptcopyin(sopt, &mreqs, 1888170613Sbms sizeof(struct ip_mreq_source), 1889170613Sbms sizeof(struct ip_mreq_source)); 1890170613Sbms } 1891170613Sbms if (error) 1892170613Sbms return (error); 1893170613Sbms 1894170613Sbms gsa->sin.sin_family = AF_INET; 1895170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 1896170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 1897170613Sbms 1898170613Sbms if (sopt->sopt_name == IP_ADD_SOURCE_MEMBERSHIP) { 1899170613Sbms ssa->sin.sin_family = AF_INET; 1900170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 1901170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 1902170613Sbms } 1903170613Sbms 1904196932Ssyrinx if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1905196932Ssyrinx return (EINVAL); 1906196932Ssyrinx 1907189592Sbms ifp = inp_lookup_mcast_ifp(inp, &gsa->sin, 1908189592Sbms mreqs.imr_interface); 1909189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 1910189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 1911170613Sbms break; 1912170613Sbms } 1913170613Sbms 1914170613Sbms case MCAST_JOIN_GROUP: 1915170613Sbms case MCAST_JOIN_SOURCE_GROUP: 1916170613Sbms if (sopt->sopt_name == MCAST_JOIN_GROUP) { 1917170613Sbms error = sooptcopyin(sopt, &gsr, 1918170613Sbms sizeof(struct group_req), 1919170613Sbms sizeof(struct group_req)); 1920170613Sbms } else if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1921170613Sbms error = sooptcopyin(sopt, &gsr, 1922170613Sbms sizeof(struct group_source_req), 1923170613Sbms sizeof(struct group_source_req)); 1924170613Sbms } 1925170613Sbms if (error) 1926170613Sbms return (error); 1927170613Sbms 1928170613Sbms if (gsa->sin.sin_family != AF_INET || 1929170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 1930170613Sbms return (EINVAL); 1931170613Sbms 1932170613Sbms /* 1933170613Sbms * Overwrite the port field if present, as the sockaddr 1934170613Sbms * being copied in may be matched with a binary comparison. 1935170613Sbms */ 1936170613Sbms gsa->sin.sin_port = 0; 1937170613Sbms if (sopt->sopt_name == MCAST_JOIN_SOURCE_GROUP) { 1938170613Sbms if (ssa->sin.sin_family != AF_INET || 1939170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 1940170613Sbms return (EINVAL); 1941170613Sbms ssa->sin.sin_port = 0; 1942170613Sbms } 1943170613Sbms 1944196932Ssyrinx if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 1945196932Ssyrinx return (EINVAL); 1946196932Ssyrinx 1947181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 1948170613Sbms return (EADDRNOTAVAIL); 1949170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 1950170613Sbms break; 1951170613Sbms 1952170613Sbms default: 1953189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 1954189592Sbms __func__, sopt->sopt_name); 1955170613Sbms return (EOPNOTSUPP); 1956170613Sbms break; 1957170613Sbms } 1958170613Sbms 1959170613Sbms if (ifp == NULL || (ifp->if_flags & IFF_MULTICAST) == 0) 1960170613Sbms return (EADDRNOTAVAIL); 1961170613Sbms 1962170613Sbms imo = inp_findmoptions(inp); 1963170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 1964189592Sbms if (idx == -1) { 1965189592Sbms is_new = 1; 1966189592Sbms } else { 1967189592Sbms inm = imo->imo_membership[idx]; 1968189592Sbms imf = &imo->imo_mfilters[idx]; 1969197132Sbms if (ssa->ss.ss_family != AF_UNSPEC) { 1970197132Sbms /* 1971199525Sbms * MCAST_JOIN_SOURCE_GROUP on an exclusive membership 1972197132Sbms * is an error. On an existing inclusive membership, 1973197132Sbms * it just adds the source to the filter list. 1974197132Sbms */ 1975197132Sbms if (imf->imf_st[1] != MCAST_INCLUDE) { 1976197132Sbms error = EINVAL; 1977197132Sbms goto out_inp_locked; 1978197132Sbms } 1979197136Sbms /* 1980197136Sbms * Throw out duplicates. 1981197136Sbms * 1982197136Sbms * XXX FIXME: This makes a naive assumption that 1983197136Sbms * even if entries exist for *ssa in this imf, 1984197136Sbms * they will be rejected as dupes, even if they 1985197136Sbms * are not valid in the current mode (in-mode). 1986197136Sbms * 1987197136Sbms * in_msource is transactioned just as for anything 1988197136Sbms * else in SSM -- but note naive use of inm_graft() 1989197136Sbms * below for allocating new filter entries. 1990197136Sbms * 1991197136Sbms * This is only an issue if someone mixes the 1992197136Sbms * full-state SSM API with the delta-based API, 1993197136Sbms * which is discouraged in the relevant RFCs. 1994197136Sbms */ 1995197132Sbms lims = imo_match_source(imo, idx, &ssa->sa); 1996197136Sbms if (lims != NULL /*&& 1997197136Sbms lims->imsl_st[1] == MCAST_INCLUDE*/) { 1998197132Sbms error = EADDRNOTAVAIL; 1999197132Sbms goto out_inp_locked; 2000197132Sbms } 2001197132Sbms } else { 2002197132Sbms /* 2003206452Sbms * MCAST_JOIN_GROUP on an existing exclusive 2004206452Sbms * membership is an error; return EADDRINUSE 2005206452Sbms * to preserve 4.4BSD API idempotence, and 2006206452Sbms * avoid tedious detour to code below. 2007206452Sbms * NOTE: This is bending RFC 3678 a bit. 2008206452Sbms * 2009197135Sbms * On an existing inclusive membership, this is also 2010197135Sbms * an error; if you want to change filter mode, 2011197135Sbms * you must use the userland API setsourcefilter(). 2012197135Sbms * XXX We don't reject this for imf in UNDEFINED 2013197135Sbms * state at t1, because allocation of a filter 2014197135Sbms * is atomic with allocation of a membership. 2015197132Sbms */ 2016197135Sbms error = EINVAL; 2017206452Sbms if (imf->imf_st[1] == MCAST_EXCLUDE) 2018206452Sbms error = EADDRINUSE; 2019197135Sbms goto out_inp_locked; 2020189592Sbms } 2021170613Sbms } 2022170613Sbms 2023170613Sbms /* 2024189592Sbms * Begin state merge transaction at socket layer. 2025170613Sbms */ 2026189592Sbms INP_WLOCK_ASSERT(inp); 2027189592Sbms 2028189592Sbms if (is_new) { 2029189592Sbms if (imo->imo_num_memberships == imo->imo_max_memberships) { 2030189592Sbms error = imo_grow(imo); 2031189592Sbms if (error) 2032189592Sbms goto out_inp_locked; 2033189592Sbms } 2034189592Sbms /* 2035189592Sbms * Allocate the new slot upfront so we can deal with 2036189592Sbms * grafting the new source filter in same code path 2037189592Sbms * as for join-source on existing membership. 2038189592Sbms */ 2039189592Sbms idx = imo->imo_num_memberships; 2040189592Sbms imo->imo_membership[idx] = NULL; 2041189592Sbms imo->imo_num_memberships++; 2042189592Sbms KASSERT(imo->imo_mfilters != NULL, 2043189592Sbms ("%s: imf_mfilters vector was not allocated", __func__)); 2044189592Sbms imf = &imo->imo_mfilters[idx]; 2045189592Sbms KASSERT(RB_EMPTY(&imf->imf_sources), 2046189592Sbms ("%s: imf_sources not empty", __func__)); 2047170613Sbms } 2048170613Sbms 2049170613Sbms /* 2050189592Sbms * Graft new source into filter list for this inpcb's 2051189592Sbms * membership of the group. The in_multi may not have 2052197132Sbms * been allocated yet if this is a new membership, however, 2053197132Sbms * the in_mfilter slot will be allocated and must be initialized. 2054197135Sbms * 2055197135Sbms * Note: Grafting of exclusive mode filters doesn't happen 2056197135Sbms * in this path. 2057197136Sbms * XXX: Should check for non-NULL lims (node exists but may 2058197136Sbms * not be in-mode) for interop with full-state API. 2059170613Sbms */ 2060189592Sbms if (ssa->ss.ss_family != AF_UNSPEC) { 2061189592Sbms /* Membership starts in IN mode */ 2062189592Sbms if (is_new) { 2063189592Sbms CTR1(KTR_IGMPV3, "%s: new join w/source", __func__); 2064189592Sbms imf_init(imf, MCAST_UNDEFINED, MCAST_INCLUDE); 2065189592Sbms } else { 2066189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "allow"); 2067189592Sbms } 2068189592Sbms lims = imf_graft(imf, MCAST_INCLUDE, &ssa->sin); 2069189592Sbms if (lims == NULL) { 2070189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", 2071189592Sbms __func__); 2072189592Sbms error = ENOMEM; 2073189592Sbms goto out_imo_free; 2074189592Sbms } 2075197132Sbms } else { 2076197132Sbms /* No address specified; Membership starts in EX mode */ 2077197132Sbms if (is_new) { 2078197132Sbms CTR1(KTR_IGMPV3, "%s: new join w/o source", __func__); 2079197132Sbms imf_init(imf, MCAST_UNDEFINED, MCAST_EXCLUDE); 2080197132Sbms } 2081170613Sbms } 2082170613Sbms 2083170613Sbms /* 2084189592Sbms * Begin state merge transaction at IGMP layer. 2085170613Sbms */ 2086189592Sbms IN_MULTI_LOCK(); 2087170613Sbms 2088189592Sbms if (is_new) { 2089189592Sbms error = in_joingroup_locked(ifp, &gsa->sin.sin_addr, imf, 2090189592Sbms &inm); 2091189592Sbms if (error) 2092189592Sbms goto out_imo_free; 2093189592Sbms imo->imo_membership[idx] = inm; 2094189592Sbms } else { 2095189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2096189592Sbms error = inm_merge(inm, imf); 2097170613Sbms if (error) { 2098189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", 2099189592Sbms __func__); 2100189592Sbms goto out_imf_rollback; 2101170613Sbms } 2102189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2103189592Sbms error = igmp_change_state(inm); 2104189592Sbms if (error) { 2105189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", 2106189592Sbms __func__); 2107189592Sbms goto out_imf_rollback; 2108189592Sbms } 2109170613Sbms } 2110170613Sbms 2111189592Sbms IN_MULTI_UNLOCK(); 2112189592Sbms 2113189592Sbmsout_imf_rollback: 2114189592Sbms INP_WLOCK_ASSERT(inp); 2115189592Sbms if (error) { 2116189592Sbms imf_rollback(imf); 2117189592Sbms if (is_new) 2118189592Sbms imf_purge(imf); 2119189592Sbms else 2120189592Sbms imf_reap(imf); 2121189592Sbms } else { 2122189592Sbms imf_commit(imf); 2123189592Sbms } 2124189592Sbms 2125189592Sbmsout_imo_free: 2126189592Sbms if (error && is_new) { 2127189592Sbms imo->imo_membership[idx] = NULL; 2128189592Sbms --imo->imo_num_memberships; 2129189592Sbms } 2130189592Sbms 2131189592Sbmsout_inp_locked: 2132178285Srwatson INP_WUNLOCK(inp); 2133170613Sbms return (error); 2134170613Sbms} 2135170613Sbms 2136170613Sbms/* 2137170613Sbms * Leave an IPv4 multicast group on an inpcb, possibly with a source. 2138170613Sbms */ 2139170613Sbmsstatic int 2140170613Sbmsinp_leave_group(struct inpcb *inp, struct sockopt *sopt) 2141170613Sbms{ 2142170613Sbms struct group_source_req gsr; 2143170613Sbms struct ip_mreq_source mreqs; 2144170613Sbms sockunion_t *gsa, *ssa; 2145170613Sbms struct ifnet *ifp; 2146170613Sbms struct in_mfilter *imf; 2147170613Sbms struct ip_moptions *imo; 2148189592Sbms struct in_msource *ims; 2149170613Sbms struct in_multi *inm; 2150170613Sbms size_t idx; 2151189592Sbms int error, is_final; 2152170613Sbms 2153170613Sbms ifp = NULL; 2154170613Sbms error = 0; 2155189592Sbms is_final = 1; 2156170613Sbms 2157170613Sbms memset(&gsr, 0, sizeof(struct group_source_req)); 2158170613Sbms gsa = (sockunion_t *)&gsr.gsr_group; 2159170613Sbms gsa->ss.ss_family = AF_UNSPEC; 2160170613Sbms ssa = (sockunion_t *)&gsr.gsr_source; 2161170613Sbms ssa->ss.ss_family = AF_UNSPEC; 2162170613Sbms 2163170613Sbms switch (sopt->sopt_name) { 2164170613Sbms case IP_DROP_MEMBERSHIP: 2165170613Sbms case IP_DROP_SOURCE_MEMBERSHIP: 2166170613Sbms if (sopt->sopt_name == IP_DROP_MEMBERSHIP) { 2167170613Sbms error = sooptcopyin(sopt, &mreqs, 2168170613Sbms sizeof(struct ip_mreq), 2169170613Sbms sizeof(struct ip_mreq)); 2170170613Sbms /* 2171170613Sbms * Swap interface and sourceaddr arguments, 2172170613Sbms * as ip_mreq and ip_mreq_source are laid 2173170613Sbms * out differently. 2174170613Sbms */ 2175170613Sbms mreqs.imr_interface = mreqs.imr_sourceaddr; 2176170613Sbms mreqs.imr_sourceaddr.s_addr = INADDR_ANY; 2177170613Sbms } else if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { 2178170613Sbms error = sooptcopyin(sopt, &mreqs, 2179170613Sbms sizeof(struct ip_mreq_source), 2180170613Sbms sizeof(struct ip_mreq_source)); 2181170613Sbms } 2182170613Sbms if (error) 2183170613Sbms return (error); 2184170613Sbms 2185170613Sbms gsa->sin.sin_family = AF_INET; 2186170613Sbms gsa->sin.sin_len = sizeof(struct sockaddr_in); 2187170613Sbms gsa->sin.sin_addr = mreqs.imr_multiaddr; 2188170613Sbms 2189170613Sbms if (sopt->sopt_name == IP_DROP_SOURCE_MEMBERSHIP) { 2190170613Sbms ssa->sin.sin_family = AF_INET; 2191170613Sbms ssa->sin.sin_len = sizeof(struct sockaddr_in); 2192170613Sbms ssa->sin.sin_addr = mreqs.imr_sourceaddr; 2193170613Sbms } 2194170613Sbms 2195206452Sbms /* 2196206452Sbms * Attempt to look up hinted ifp from interface address. 2197206452Sbms * Fallthrough with null ifp iff lookup fails, to 2198206452Sbms * preserve 4.4BSD mcast API idempotence. 2199206452Sbms * XXX NOTE WELL: The RFC 3678 API is preferred because 2200206452Sbms * using an IPv4 address as a key is racy. 2201206452Sbms */ 2202206452Sbms if (!in_nullhost(mreqs.imr_interface)) 2203170613Sbms INADDR_TO_IFP(mreqs.imr_interface, ifp); 2204170613Sbms 2205189592Sbms CTR3(KTR_IGMPV3, "%s: imr_interface = %s, ifp = %p", 2206189592Sbms __func__, inet_ntoa(mreqs.imr_interface), ifp); 2207189592Sbms 2208170613Sbms break; 2209170613Sbms 2210170613Sbms case MCAST_LEAVE_GROUP: 2211170613Sbms case MCAST_LEAVE_SOURCE_GROUP: 2212170613Sbms if (sopt->sopt_name == MCAST_LEAVE_GROUP) { 2213170613Sbms error = sooptcopyin(sopt, &gsr, 2214170613Sbms sizeof(struct group_req), 2215170613Sbms sizeof(struct group_req)); 2216170613Sbms } else if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2217170613Sbms error = sooptcopyin(sopt, &gsr, 2218170613Sbms sizeof(struct group_source_req), 2219170613Sbms sizeof(struct group_source_req)); 2220170613Sbms } 2221170613Sbms if (error) 2222170613Sbms return (error); 2223170613Sbms 2224170613Sbms if (gsa->sin.sin_family != AF_INET || 2225170613Sbms gsa->sin.sin_len != sizeof(struct sockaddr_in)) 2226170613Sbms return (EINVAL); 2227170613Sbms 2228170613Sbms if (sopt->sopt_name == MCAST_LEAVE_SOURCE_GROUP) { 2229170613Sbms if (ssa->sin.sin_family != AF_INET || 2230170613Sbms ssa->sin.sin_len != sizeof(struct sockaddr_in)) 2231170613Sbms return (EINVAL); 2232170613Sbms } 2233170613Sbms 2234181803Sbz if (gsr.gsr_interface == 0 || V_if_index < gsr.gsr_interface) 2235170613Sbms return (EADDRNOTAVAIL); 2236170613Sbms 2237170613Sbms ifp = ifnet_byindex(gsr.gsr_interface); 2238206452Sbms 2239206452Sbms if (ifp == NULL) 2240206452Sbms return (EADDRNOTAVAIL); 2241170613Sbms break; 2242170613Sbms 2243170613Sbms default: 2244189592Sbms CTR2(KTR_IGMPV3, "%s: unknown sopt_name %d", 2245189592Sbms __func__, sopt->sopt_name); 2246170613Sbms return (EOPNOTSUPP); 2247170613Sbms break; 2248170613Sbms } 2249170613Sbms 2250170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 2251170613Sbms return (EINVAL); 2252170613Sbms 2253170613Sbms /* 2254170613Sbms * Find the membership in the membership array. 2255170613Sbms */ 2256170613Sbms imo = inp_findmoptions(inp); 2257170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 2258170613Sbms if (idx == -1) { 2259170613Sbms error = EADDRNOTAVAIL; 2260189592Sbms goto out_inp_locked; 2261170613Sbms } 2262189592Sbms inm = imo->imo_membership[idx]; 2263170613Sbms imf = &imo->imo_mfilters[idx]; 2264170613Sbms 2265189592Sbms if (ssa->ss.ss_family != AF_UNSPEC) 2266189592Sbms is_final = 0; 2267189592Sbms 2268170613Sbms /* 2269189592Sbms * Begin state merge transaction at socket layer. 2270189592Sbms */ 2271189592Sbms INP_WLOCK_ASSERT(inp); 2272189592Sbms 2273189592Sbms /* 2274170613Sbms * If we were instructed only to leave a given source, do so. 2275189592Sbms * MCAST_LEAVE_SOURCE_GROUP is only valid for inclusive memberships. 2276170613Sbms */ 2277189592Sbms if (is_final) { 2278189592Sbms imf_leave(imf); 2279189592Sbms } else { 2280189592Sbms if (imf->imf_st[0] == MCAST_EXCLUDE) { 2281189592Sbms error = EADDRNOTAVAIL; 2282189592Sbms goto out_inp_locked; 2283170613Sbms } 2284189592Sbms ims = imo_match_source(imo, idx, &ssa->sa); 2285189592Sbms if (ims == NULL) { 2286189592Sbms CTR3(KTR_IGMPV3, "%s: source %s %spresent", __func__, 2287189592Sbms inet_ntoa(ssa->sin.sin_addr), "not "); 2288189592Sbms error = EADDRNOTAVAIL; 2289189592Sbms goto out_inp_locked; 2290189592Sbms } 2291189592Sbms CTR2(KTR_IGMPV3, "%s: %s source", __func__, "block"); 2292189592Sbms error = imf_prune(imf, &ssa->sin); 2293189592Sbms if (error) { 2294189592Sbms CTR1(KTR_IGMPV3, "%s: merge imf state failed", 2295189592Sbms __func__); 2296189592Sbms goto out_inp_locked; 2297189592Sbms } 2298170613Sbms } 2299170613Sbms 2300170613Sbms /* 2301189592Sbms * Begin state merge transaction at IGMP layer. 2302170613Sbms */ 2303189592Sbms IN_MULTI_LOCK(); 2304170613Sbms 2305189592Sbms if (is_final) { 2306189592Sbms /* 2307189592Sbms * Give up the multicast address record to which 2308189592Sbms * the membership points. 2309189592Sbms */ 2310189592Sbms (void)in_leavegroup_locked(inm, imf); 2311189592Sbms } else { 2312189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2313189592Sbms error = inm_merge(inm, imf); 2314189592Sbms if (error) { 2315189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", 2316189592Sbms __func__); 2317189592Sbms goto out_imf_rollback; 2318170613Sbms } 2319189592Sbms 2320189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2321189592Sbms error = igmp_change_state(inm); 2322189592Sbms if (error) { 2323189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", 2324189592Sbms __func__); 2325189592Sbms } 2326170613Sbms } 2327170613Sbms 2328189592Sbms IN_MULTI_UNLOCK(); 2329170613Sbms 2330189592Sbmsout_imf_rollback: 2331189592Sbms if (error) 2332189592Sbms imf_rollback(imf); 2333189592Sbms else 2334189592Sbms imf_commit(imf); 2335189592Sbms 2336189592Sbms imf_reap(imf); 2337189592Sbms 2338189592Sbms if (is_final) { 2339197130Sbms /* Remove the gap in the membership and filter array. */ 2340197130Sbms for (++idx; idx < imo->imo_num_memberships; ++idx) { 2341189592Sbms imo->imo_membership[idx-1] = imo->imo_membership[idx]; 2342197130Sbms imo->imo_mfilters[idx-1] = imo->imo_mfilters[idx]; 2343197130Sbms } 2344189592Sbms imo->imo_num_memberships--; 2345189592Sbms } 2346189592Sbms 2347189592Sbmsout_inp_locked: 2348178285Srwatson INP_WUNLOCK(inp); 2349170613Sbms return (error); 2350170613Sbms} 2351170613Sbms 2352170613Sbms/* 2353170613Sbms * Select the interface for transmitting IPv4 multicast datagrams. 2354170613Sbms * 2355170613Sbms * Either an instance of struct in_addr or an instance of struct ip_mreqn 2356170613Sbms * may be passed to this socket option. An address of INADDR_ANY or an 2357170613Sbms * interface index of 0 is used to remove a previous selection. 2358170613Sbms * When no interface is selected, one is chosen for every send. 2359170613Sbms */ 2360170613Sbmsstatic int 2361170613Sbmsinp_set_multicast_if(struct inpcb *inp, struct sockopt *sopt) 2362170613Sbms{ 2363170613Sbms struct in_addr addr; 2364170613Sbms struct ip_mreqn mreqn; 2365170613Sbms struct ifnet *ifp; 2366170613Sbms struct ip_moptions *imo; 2367170613Sbms int error; 2368170613Sbms 2369170613Sbms if (sopt->sopt_valsize == sizeof(struct ip_mreqn)) { 2370170613Sbms /* 2371170613Sbms * An interface index was specified using the 2372170613Sbms * Linux-derived ip_mreqn structure. 2373170613Sbms */ 2374170613Sbms error = sooptcopyin(sopt, &mreqn, sizeof(struct ip_mreqn), 2375170613Sbms sizeof(struct ip_mreqn)); 2376170613Sbms if (error) 2377170613Sbms return (error); 2378170613Sbms 2379181803Sbz if (mreqn.imr_ifindex < 0 || V_if_index < mreqn.imr_ifindex) 2380170613Sbms return (EINVAL); 2381170613Sbms 2382170613Sbms if (mreqn.imr_ifindex == 0) { 2383170613Sbms ifp = NULL; 2384170613Sbms } else { 2385170613Sbms ifp = ifnet_byindex(mreqn.imr_ifindex); 2386170613Sbms if (ifp == NULL) 2387170613Sbms return (EADDRNOTAVAIL); 2388170613Sbms } 2389170613Sbms } else { 2390170613Sbms /* 2391170613Sbms * An interface was specified by IPv4 address. 2392170613Sbms * This is the traditional BSD usage. 2393170613Sbms */ 2394170613Sbms error = sooptcopyin(sopt, &addr, sizeof(struct in_addr), 2395170613Sbms sizeof(struct in_addr)); 2396170613Sbms if (error) 2397170613Sbms return (error); 2398189592Sbms if (in_nullhost(addr)) { 2399170613Sbms ifp = NULL; 2400170613Sbms } else { 2401170613Sbms INADDR_TO_IFP(addr, ifp); 2402170613Sbms if (ifp == NULL) 2403170613Sbms return (EADDRNOTAVAIL); 2404170613Sbms } 2405189592Sbms CTR3(KTR_IGMPV3, "%s: ifp = %p, addr = %s", __func__, ifp, 2406189592Sbms inet_ntoa(addr)); 2407170613Sbms } 2408170613Sbms 2409170613Sbms /* Reject interfaces which do not support multicast. */ 2410170613Sbms if (ifp != NULL && (ifp->if_flags & IFF_MULTICAST) == 0) 2411170613Sbms return (EOPNOTSUPP); 2412170613Sbms 2413170613Sbms imo = inp_findmoptions(inp); 2414170613Sbms imo->imo_multicast_ifp = ifp; 2415170613Sbms imo->imo_multicast_addr.s_addr = INADDR_ANY; 2416178285Srwatson INP_WUNLOCK(inp); 2417170613Sbms 2418170613Sbms return (0); 2419170613Sbms} 2420170613Sbms 2421170613Sbms/* 2422170613Sbms * Atomically set source filters on a socket for an IPv4 multicast group. 2423189592Sbms * 2424189592Sbms * SMPng: NOTE: Potentially calls malloc(M_WAITOK) with Giant held. 2425170613Sbms */ 2426170613Sbmsstatic int 2427170613Sbmsinp_set_source_filters(struct inpcb *inp, struct sockopt *sopt) 2428170613Sbms{ 2429170613Sbms struct __msfilterreq msfr; 2430170613Sbms sockunion_t *gsa; 2431170613Sbms struct ifnet *ifp; 2432170613Sbms struct in_mfilter *imf; 2433170613Sbms struct ip_moptions *imo; 2434189592Sbms struct in_multi *inm; 2435170613Sbms size_t idx; 2436170613Sbms int error; 2437170613Sbms 2438170613Sbms error = sooptcopyin(sopt, &msfr, sizeof(struct __msfilterreq), 2439170613Sbms sizeof(struct __msfilterreq)); 2440170613Sbms if (error) 2441170613Sbms return (error); 2442170613Sbms 2443197314Sbms if (msfr.msfr_nsrcs > in_mcast_maxsocksrc) 2444197314Sbms return (ENOBUFS); 2445197314Sbms 2446197314Sbms if ((msfr.msfr_fmode != MCAST_EXCLUDE && 2447170613Sbms msfr.msfr_fmode != MCAST_INCLUDE)) 2448170613Sbms return (EINVAL); 2449170613Sbms 2450170613Sbms if (msfr.msfr_group.ss_family != AF_INET || 2451170613Sbms msfr.msfr_group.ss_len != sizeof(struct sockaddr_in)) 2452170613Sbms return (EINVAL); 2453170613Sbms 2454170613Sbms gsa = (sockunion_t *)&msfr.msfr_group; 2455170613Sbms if (!IN_MULTICAST(ntohl(gsa->sin.sin_addr.s_addr))) 2456170613Sbms return (EINVAL); 2457170613Sbms 2458170613Sbms gsa->sin.sin_port = 0; /* ignore port */ 2459170613Sbms 2460181803Sbz if (msfr.msfr_ifindex == 0 || V_if_index < msfr.msfr_ifindex) 2461170613Sbms return (EADDRNOTAVAIL); 2462170613Sbms 2463170613Sbms ifp = ifnet_byindex(msfr.msfr_ifindex); 2464170613Sbms if (ifp == NULL) 2465170613Sbms return (EADDRNOTAVAIL); 2466170613Sbms 2467170613Sbms /* 2468189592Sbms * Take the INP write lock. 2469170613Sbms * Check if this socket is a member of this group. 2470170613Sbms */ 2471170613Sbms imo = inp_findmoptions(inp); 2472170613Sbms idx = imo_match_group(imo, ifp, &gsa->sa); 2473170613Sbms if (idx == -1 || imo->imo_mfilters == NULL) { 2474170613Sbms error = EADDRNOTAVAIL; 2475189592Sbms goto out_inp_locked; 2476170613Sbms } 2477189592Sbms inm = imo->imo_membership[idx]; 2478170613Sbms imf = &imo->imo_mfilters[idx]; 2479170613Sbms 2480170613Sbms /* 2481189592Sbms * Begin state merge transaction at socket layer. 2482170613Sbms */ 2483189592Sbms INP_WLOCK_ASSERT(inp); 2484170613Sbms 2485189592Sbms imf->imf_st[1] = msfr.msfr_fmode; 2486189592Sbms 2487170613Sbms /* 2488170613Sbms * Apply any new source filters, if present. 2489189592Sbms * Make a copy of the user-space source vector so 2490189592Sbms * that we may copy them with a single copyin. This 2491189592Sbms * allows us to deal with page faults up-front. 2492170613Sbms */ 2493170613Sbms if (msfr.msfr_nsrcs > 0) { 2494189592Sbms struct in_msource *lims; 2495189592Sbms struct sockaddr_in *psin; 2496189592Sbms struct sockaddr_storage *kss, *pkss; 2497189592Sbms int i; 2498170613Sbms 2499178285Srwatson INP_WUNLOCK(inp); 2500189592Sbms 2501189592Sbms CTR2(KTR_IGMPV3, "%s: loading %lu source list entries", 2502189592Sbms __func__, (unsigned long)msfr.msfr_nsrcs); 2503184214Sdes kss = malloc(sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs, 2504170613Sbms M_TEMP, M_WAITOK); 2505170613Sbms error = copyin(msfr.msfr_srcs, kss, 2506170613Sbms sizeof(struct sockaddr_storage) * msfr.msfr_nsrcs); 2507170613Sbms if (error) { 2508184205Sdes free(kss, M_TEMP); 2509170613Sbms return (error); 2510170613Sbms } 2511170613Sbms 2512189592Sbms INP_WLOCK(inp); 2513189592Sbms 2514170613Sbms /* 2515189592Sbms * Mark all source filters as UNDEFINED at t1. 2516189592Sbms * Restore new group filter mode, as imf_leave() 2517189592Sbms * will set it to INCLUDE. 2518170613Sbms */ 2519189592Sbms imf_leave(imf); 2520189592Sbms imf->imf_st[1] = msfr.msfr_fmode; 2521189592Sbms 2522189592Sbms /* 2523189592Sbms * Update socket layer filters at t1, lazy-allocating 2524189592Sbms * new entries. This saves a bunch of memory at the 2525189592Sbms * cost of one RB_FIND() per source entry; duplicate 2526189592Sbms * entries in the msfr_nsrcs vector are ignored. 2527189592Sbms * If we encounter an error, rollback transaction. 2528189592Sbms * 2529189592Sbms * XXX This too could be replaced with a set-symmetric 2530189592Sbms * difference like loop to avoid walking from root 2531189592Sbms * every time, as the key space is common. 2532189592Sbms */ 2533189592Sbms for (i = 0, pkss = kss; i < msfr.msfr_nsrcs; i++, pkss++) { 2534189592Sbms psin = (struct sockaddr_in *)pkss; 2535189592Sbms if (psin->sin_family != AF_INET) { 2536170613Sbms error = EAFNOSUPPORT; 2537170613Sbms break; 2538170613Sbms } 2539189592Sbms if (psin->sin_len != sizeof(struct sockaddr_in)) { 2540189592Sbms error = EINVAL; 2541189592Sbms break; 2542189592Sbms } 2543189592Sbms error = imf_get_source(imf, psin, &lims); 2544170613Sbms if (error) 2545170613Sbms break; 2546189592Sbms lims->imsl_st[1] = imf->imf_st[1]; 2547170613Sbms } 2548189592Sbms free(kss, M_TEMP); 2549189592Sbms } 2550170613Sbms 2551189592Sbms if (error) 2552189592Sbms goto out_imf_rollback; 2553170613Sbms 2554189592Sbms INP_WLOCK_ASSERT(inp); 2555189592Sbms IN_MULTI_LOCK(); 2556170613Sbms 2557170613Sbms /* 2558189592Sbms * Begin state merge transaction at IGMP layer. 2559170613Sbms */ 2560189592Sbms CTR1(KTR_IGMPV3, "%s: merge inm state", __func__); 2561189592Sbms error = inm_merge(inm, imf); 2562189592Sbms if (error) { 2563189592Sbms CTR1(KTR_IGMPV3, "%s: failed to merge inm state", __func__); 2564189592Sbms goto out_imf_rollback; 2565189592Sbms } 2566170613Sbms 2567189592Sbms CTR1(KTR_IGMPV3, "%s: doing igmp downcall", __func__); 2568189592Sbms error = igmp_change_state(inm); 2569189592Sbms if (error) 2570189592Sbms CTR1(KTR_IGMPV3, "%s: failed igmp downcall", __func__); 2571189592Sbms 2572189592Sbms IN_MULTI_UNLOCK(); 2573189592Sbms 2574189592Sbmsout_imf_rollback: 2575189592Sbms if (error) 2576189592Sbms imf_rollback(imf); 2577189592Sbms else 2578189592Sbms imf_commit(imf); 2579189592Sbms 2580189592Sbms imf_reap(imf); 2581189592Sbms 2582189592Sbmsout_inp_locked: 2583178285Srwatson INP_WUNLOCK(inp); 2584170613Sbms return (error); 2585170613Sbms} 2586170613Sbms 2587170613Sbms/* 2588170613Sbms * Set the IP multicast options in response to user setsockopt(). 2589170613Sbms * 2590170613Sbms * Many of the socket options handled in this function duplicate the 2591170613Sbms * functionality of socket options in the regular unicast API. However, 2592170613Sbms * it is not possible to merge the duplicate code, because the idempotence 2593170613Sbms * of the IPv4 multicast part of the BSD Sockets API must be preserved; 2594170613Sbms * the effects of these options must be treated as separate and distinct. 2595189592Sbms * 2596189592Sbms * SMPng: XXX: Unlocked read of inp_socket believed OK. 2597189592Sbms * FUTURE: The IP_MULTICAST_VIF option may be eliminated if MROUTING 2598189592Sbms * is refactored to no longer use vifs. 2599170613Sbms */ 2600170613Sbmsint 2601170613Sbmsinp_setmoptions(struct inpcb *inp, struct sockopt *sopt) 2602170613Sbms{ 2603170613Sbms struct ip_moptions *imo; 2604170613Sbms int error; 2605170613Sbms 2606170613Sbms error = 0; 2607170613Sbms 2608171746Scsjp /* 2609171746Scsjp * If socket is neither of type SOCK_RAW or SOCK_DGRAM, 2610171746Scsjp * or is a divert socket, reject it. 2611171746Scsjp */ 2612171746Scsjp if (inp->inp_socket->so_proto->pr_protocol == IPPROTO_DIVERT || 2613171746Scsjp (inp->inp_socket->so_proto->pr_type != SOCK_RAW && 2614189592Sbms inp->inp_socket->so_proto->pr_type != SOCK_DGRAM)) 2615171746Scsjp return (EOPNOTSUPP); 2616171746Scsjp 2617170613Sbms switch (sopt->sopt_name) { 2618170613Sbms case IP_MULTICAST_VIF: { 2619170613Sbms int vifi; 2620170613Sbms /* 2621170613Sbms * Select a multicast VIF for transmission. 2622170613Sbms * Only useful if multicast forwarding is active. 2623170613Sbms */ 2624170613Sbms if (legal_vif_num == NULL) { 2625170613Sbms error = EOPNOTSUPP; 2626170613Sbms break; 2627170613Sbms } 2628170613Sbms error = sooptcopyin(sopt, &vifi, sizeof(int), sizeof(int)); 2629170613Sbms if (error) 2630170613Sbms break; 2631170613Sbms if (!legal_vif_num(vifi) && (vifi != -1)) { 2632170613Sbms error = EINVAL; 2633170613Sbms break; 2634170613Sbms } 2635170613Sbms imo = inp_findmoptions(inp); 2636170613Sbms imo->imo_multicast_vif = vifi; 2637178285Srwatson INP_WUNLOCK(inp); 2638170613Sbms break; 2639170613Sbms } 2640170613Sbms 2641170613Sbms case IP_MULTICAST_IF: 2642170613Sbms error = inp_set_multicast_if(inp, sopt); 2643170613Sbms break; 2644170613Sbms 2645170613Sbms case IP_MULTICAST_TTL: { 2646170613Sbms u_char ttl; 2647170613Sbms 2648170613Sbms /* 2649170613Sbms * Set the IP time-to-live for outgoing multicast packets. 2650170613Sbms * The original multicast API required a char argument, 2651170613Sbms * which is inconsistent with the rest of the socket API. 2652170613Sbms * We allow either a char or an int. 2653170613Sbms */ 2654170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) { 2655170613Sbms error = sooptcopyin(sopt, &ttl, sizeof(u_char), 2656170613Sbms sizeof(u_char)); 2657170613Sbms if (error) 2658170613Sbms break; 2659170613Sbms } else { 2660170613Sbms u_int ittl; 2661170613Sbms 2662170613Sbms error = sooptcopyin(sopt, &ittl, sizeof(u_int), 2663170613Sbms sizeof(u_int)); 2664170613Sbms if (error) 2665170613Sbms break; 2666170613Sbms if (ittl > 255) { 2667170613Sbms error = EINVAL; 2668170613Sbms break; 2669170613Sbms } 2670170613Sbms ttl = (u_char)ittl; 2671170613Sbms } 2672170613Sbms imo = inp_findmoptions(inp); 2673170613Sbms imo->imo_multicast_ttl = ttl; 2674178285Srwatson INP_WUNLOCK(inp); 2675170613Sbms break; 2676170613Sbms } 2677170613Sbms 2678170613Sbms case IP_MULTICAST_LOOP: { 2679170613Sbms u_char loop; 2680170613Sbms 2681170613Sbms /* 2682170613Sbms * Set the loopback flag for outgoing multicast packets. 2683170613Sbms * Must be zero or one. The original multicast API required a 2684170613Sbms * char argument, which is inconsistent with the rest 2685170613Sbms * of the socket API. We allow either a char or an int. 2686170613Sbms */ 2687170613Sbms if (sopt->sopt_valsize == sizeof(u_char)) { 2688170613Sbms error = sooptcopyin(sopt, &loop, sizeof(u_char), 2689170613Sbms sizeof(u_char)); 2690170613Sbms if (error) 2691170613Sbms break; 2692170613Sbms } else { 2693170613Sbms u_int iloop; 2694170613Sbms 2695170613Sbms error = sooptcopyin(sopt, &iloop, sizeof(u_int), 2696170613Sbms sizeof(u_int)); 2697170613Sbms if (error) 2698170613Sbms break; 2699170613Sbms loop = (u_char)iloop; 2700170613Sbms } 2701170613Sbms imo = inp_findmoptions(inp); 2702170613Sbms imo->imo_multicast_loop = !!loop; 2703178285Srwatson INP_WUNLOCK(inp); 2704170613Sbms break; 2705170613Sbms } 2706170613Sbms 2707170613Sbms case IP_ADD_MEMBERSHIP: 2708170613Sbms case IP_ADD_SOURCE_MEMBERSHIP: 2709170613Sbms case MCAST_JOIN_GROUP: 2710170613Sbms case MCAST_JOIN_SOURCE_GROUP: 2711170613Sbms error = inp_join_group(inp, sopt); 2712170613Sbms break; 2713170613Sbms 2714170613Sbms case IP_DROP_MEMBERSHIP: 2715170613Sbms case IP_DROP_SOURCE_MEMBERSHIP: 2716170613Sbms case MCAST_LEAVE_GROUP: 2717170613Sbms case MCAST_LEAVE_SOURCE_GROUP: 2718170613Sbms error = inp_leave_group(inp, sopt); 2719170613Sbms break; 2720170613Sbms 2721170613Sbms case IP_BLOCK_SOURCE: 2722170613Sbms case IP_UNBLOCK_SOURCE: 2723170613Sbms case MCAST_BLOCK_SOURCE: 2724170613Sbms case MCAST_UNBLOCK_SOURCE: 2725189592Sbms error = inp_block_unblock_source(inp, sopt); 2726170613Sbms break; 2727170613Sbms 2728170613Sbms case IP_MSFILTER: 2729170613Sbms error = inp_set_source_filters(inp, sopt); 2730170613Sbms break; 2731170613Sbms 2732170613Sbms default: 2733170613Sbms error = EOPNOTSUPP; 2734170613Sbms break; 2735170613Sbms } 2736170613Sbms 2737170613Sbms INP_UNLOCK_ASSERT(inp); 2738170613Sbms 2739170613Sbms return (error); 2740170613Sbms} 2741189592Sbms 2742189592Sbms/* 2743189592Sbms * Expose IGMP's multicast filter mode and source list(s) to userland, 2744189592Sbms * keyed by (ifindex, group). 2745189592Sbms * The filter mode is written out as a uint32_t, followed by 2746189592Sbms * 0..n of struct in_addr. 2747189592Sbms * For use by ifmcstat(8). 2748189592Sbms * SMPng: NOTE: unlocked read of ifindex space. 2749189592Sbms */ 2750189592Sbmsstatic int 2751189592Sbmssysctl_ip_mcast_filters(SYSCTL_HANDLER_ARGS) 2752189592Sbms{ 2753189592Sbms struct in_addr src, group; 2754189592Sbms struct ifnet *ifp; 2755189592Sbms struct ifmultiaddr *ifma; 2756189592Sbms struct in_multi *inm; 2757189592Sbms struct ip_msource *ims; 2758189592Sbms int *name; 2759189592Sbms int retval; 2760189592Sbms u_int namelen; 2761189592Sbms uint32_t fmode, ifindex; 2762189592Sbms 2763189592Sbms name = (int *)arg1; 2764189592Sbms namelen = arg2; 2765189592Sbms 2766189592Sbms if (req->newptr != NULL) 2767189592Sbms return (EPERM); 2768189592Sbms 2769189592Sbms if (namelen != 2) 2770189592Sbms return (EINVAL); 2771189592Sbms 2772189592Sbms ifindex = name[0]; 2773189592Sbms if (ifindex <= 0 || ifindex > V_if_index) { 2774189592Sbms CTR2(KTR_IGMPV3, "%s: ifindex %u out of range", 2775189592Sbms __func__, ifindex); 2776189592Sbms return (ENOENT); 2777189592Sbms } 2778189592Sbms 2779189592Sbms group.s_addr = name[1]; 2780189592Sbms if (!IN_MULTICAST(ntohl(group.s_addr))) { 2781189592Sbms CTR2(KTR_IGMPV3, "%s: group %s is not multicast", 2782189592Sbms __func__, inet_ntoa(group)); 2783189592Sbms return (EINVAL); 2784189592Sbms } 2785189592Sbms 2786189592Sbms ifp = ifnet_byindex(ifindex); 2787189592Sbms if (ifp == NULL) { 2788189592Sbms CTR2(KTR_IGMPV3, "%s: no ifp for ifindex %u", 2789189592Sbms __func__, ifindex); 2790189592Sbms return (ENOENT); 2791189592Sbms } 2792189592Sbms 2793189592Sbms retval = sysctl_wire_old_buffer(req, 2794189592Sbms sizeof(uint32_t) + (in_mcast_maxgrpsrc * sizeof(struct in_addr))); 2795189592Sbms if (retval) 2796189592Sbms return (retval); 2797189592Sbms 2798189592Sbms IN_MULTI_LOCK(); 2799189592Sbms 2800233200Sjhb IF_ADDR_RLOCK(ifp); 2801189592Sbms TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { 2802189592Sbms if (ifma->ifma_addr->sa_family != AF_INET || 2803189592Sbms ifma->ifma_protospec == NULL) 2804189592Sbms continue; 2805189592Sbms inm = (struct in_multi *)ifma->ifma_protospec; 2806189592Sbms if (!in_hosteq(inm->inm_addr, group)) 2807189592Sbms continue; 2808189592Sbms fmode = inm->inm_st[1].iss_fmode; 2809189592Sbms retval = SYSCTL_OUT(req, &fmode, sizeof(uint32_t)); 2810189592Sbms if (retval != 0) 2811189592Sbms break; 2812189592Sbms RB_FOREACH(ims, ip_msource_tree, &inm->inm_srcs) { 2813189592Sbms#ifdef KTR 2814189592Sbms struct in_addr ina; 2815189592Sbms ina.s_addr = htonl(ims->ims_haddr); 2816189592Sbms CTR2(KTR_IGMPV3, "%s: visit node %s", __func__, 2817189592Sbms inet_ntoa(ina)); 2818189592Sbms#endif 2819189592Sbms /* 2820189592Sbms * Only copy-out sources which are in-mode. 2821189592Sbms */ 2822189592Sbms if (fmode != ims_get_mode(inm, ims, 1)) { 2823189592Sbms CTR1(KTR_IGMPV3, "%s: skip non-in-mode", 2824189592Sbms __func__); 2825189592Sbms continue; 2826189592Sbms } 2827189592Sbms src.s_addr = htonl(ims->ims_haddr); 2828189592Sbms retval = SYSCTL_OUT(req, &src, sizeof(struct in_addr)); 2829189592Sbms if (retval != 0) 2830189592Sbms break; 2831189592Sbms } 2832189592Sbms } 2833233200Sjhb IF_ADDR_RUNLOCK(ifp); 2834189592Sbms 2835189592Sbms IN_MULTI_UNLOCK(); 2836189592Sbms 2837189592Sbms return (retval); 2838189592Sbms} 2839189592Sbms 2840189592Sbms#ifdef KTR 2841189592Sbms 2842189592Sbmsstatic const char *inm_modestrs[] = { "un", "in", "ex" }; 2843189592Sbms 2844189592Sbmsstatic const char * 2845189592Sbmsinm_mode_str(const int mode) 2846189592Sbms{ 2847189592Sbms 2848189592Sbms if (mode >= MCAST_UNDEFINED && mode <= MCAST_EXCLUDE) 2849189592Sbms return (inm_modestrs[mode]); 2850189592Sbms return ("??"); 2851189592Sbms} 2852189592Sbms 2853189592Sbmsstatic const char *inm_statestrs[] = { 2854189592Sbms "not-member", 2855189592Sbms "silent", 2856189592Sbms "idle", 2857189592Sbms "lazy", 2858189592Sbms "sleeping", 2859189592Sbms "awakening", 2860189592Sbms "query-pending", 2861189592Sbms "sg-query-pending", 2862189592Sbms "leaving" 2863189592Sbms}; 2864189592Sbms 2865189592Sbmsstatic const char * 2866189592Sbmsinm_state_str(const int state) 2867189592Sbms{ 2868189592Sbms 2869189592Sbms if (state >= IGMP_NOT_MEMBER && state <= IGMP_LEAVING_MEMBER) 2870189592Sbms return (inm_statestrs[state]); 2871189592Sbms return ("??"); 2872189592Sbms} 2873189592Sbms 2874189592Sbms/* 2875189592Sbms * Dump an in_multi structure to the console. 2876189592Sbms */ 2877189592Sbmsvoid 2878189592Sbmsinm_print(const struct in_multi *inm) 2879189592Sbms{ 2880189592Sbms int t; 2881189592Sbms 2882190753Skan if ((ktr_mask & KTR_IGMPV3) == 0) 2883189635Sbms return; 2884189635Sbms 2885189592Sbms printf("%s: --- begin inm %p ---\n", __func__, inm); 2886189592Sbms printf("addr %s ifp %p(%s) ifma %p\n", 2887189592Sbms inet_ntoa(inm->inm_addr), 2888189592Sbms inm->inm_ifp, 2889189592Sbms inm->inm_ifp->if_xname, 2890189592Sbms inm->inm_ifma); 2891189592Sbms printf("timer %u state %s refcount %u scq.len %u\n", 2892189592Sbms inm->inm_timer, 2893189592Sbms inm_state_str(inm->inm_state), 2894189592Sbms inm->inm_refcount, 2895189592Sbms inm->inm_scq.ifq_len); 2896189592Sbms printf("igi %p nsrc %lu sctimer %u scrv %u\n", 2897189592Sbms inm->inm_igi, 2898189592Sbms inm->inm_nsrc, 2899189592Sbms inm->inm_sctimer, 2900189592Sbms inm->inm_scrv); 2901189592Sbms for (t = 0; t < 2; t++) { 2902189592Sbms printf("t%d: fmode %s asm %u ex %u in %u rec %u\n", t, 2903189592Sbms inm_mode_str(inm->inm_st[t].iss_fmode), 2904189592Sbms inm->inm_st[t].iss_asm, 2905189592Sbms inm->inm_st[t].iss_ex, 2906189592Sbms inm->inm_st[t].iss_in, 2907189592Sbms inm->inm_st[t].iss_rec); 2908189592Sbms } 2909189592Sbms printf("%s: --- end inm %p ---\n", __func__, inm); 2910189592Sbms} 2911189592Sbms 2912189592Sbms#else /* !KTR */ 2913189592Sbms 2914189592Sbmsvoid 2915189592Sbmsinm_print(const struct in_multi *inm) 2916189592Sbms{ 2917189592Sbms 2918189592Sbms} 2919189592Sbms 2920189592Sbms#endif /* KTR */ 2921189592Sbms 2922189592SbmsRB_GENERATE(ip_msource_tree, ip_msource, ims_link, ip_msource_cmp); 2923