if_bridge.c revision 1.100
1/* $NetBSD: if_bridge.c,v 1.100 2015/07/23 10:52:34 ozaki-r Exp $ */ 2 3/* 4 * Copyright 2001 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/* 39 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net) 40 * All rights reserved. 41 * 42 * Redistribution and use in source and binary forms, with or without 43 * modification, are permitted provided that the following conditions 44 * are met: 45 * 1. Redistributions of source code must retain the above copyright 46 * notice, this list of conditions and the following disclaimer. 47 * 2. Redistributions in binary form must reproduce the above copyright 48 * notice, this list of conditions and the following disclaimer in the 49 * documentation and/or other materials provided with the distribution. 50 * 3. All advertising materials mentioning features or use of this software 51 * must display the following acknowledgement: 52 * This product includes software developed by Jason L. Wright 53 * 4. The name of the author may not be used to endorse or promote products 54 * derived from this software without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 57 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 58 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 59 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, 60 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 61 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 62 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 64 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 65 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 66 * POSSIBILITY OF SUCH DAMAGE. 67 * 68 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp 69 */ 70 71/* 72 * Network interface bridge support. 73 * 74 * TODO: 75 * 76 * - Currently only supports Ethernet-like interfaces (Ethernet, 77 * 802.11, VLANs on Ethernet, etc.) Figure out a nice way 78 * to bridge other types of interfaces (FDDI-FDDI, and maybe 79 * consider heterogenous bridges). 80 */ 81 82#include <sys/cdefs.h> 83__KERNEL_RCSID(0, "$NetBSD: if_bridge.c,v 1.100 2015/07/23 10:52:34 ozaki-r Exp $"); 84 85#ifdef _KERNEL_OPT 86#include "opt_bridge_ipf.h" 87#include "opt_inet.h" 88#endif /* _KERNEL_OPT */ 89 90#include <sys/param.h> 91#include <sys/kernel.h> 92#include <sys/mbuf.h> 93#include <sys/queue.h> 94#include <sys/socket.h> 95#include <sys/socketvar.h> /* for softnet_lock */ 96#include <sys/sockio.h> 97#include <sys/systm.h> 98#include <sys/proc.h> 99#include <sys/pool.h> 100#include <sys/kauth.h> 101#include <sys/cpu.h> 102#include <sys/cprng.h> 103#include <sys/mutex.h> 104#include <sys/kmem.h> 105 106#include <net/bpf.h> 107#include <net/if.h> 108#include <net/if_dl.h> 109#include <net/if_types.h> 110#include <net/if_llc.h> 111#include <net/pktqueue.h> 112 113#include <net/if_ether.h> 114#include <net/if_bridgevar.h> 115 116#if defined(BRIDGE_IPF) 117/* Used for bridge_ip[6]_checkbasic */ 118#include <netinet/in.h> 119#include <netinet/in_systm.h> 120#include <netinet/ip.h> 121#include <netinet/ip_var.h> 122#include <netinet/ip_private.h> /* XXX */ 123 124#include <netinet/ip6.h> 125#include <netinet6/in6_var.h> 126#include <netinet6/ip6_var.h> 127#include <netinet6/ip6_private.h> /* XXX */ 128#endif /* BRIDGE_IPF */ 129 130/* 131 * Size of the route hash table. Must be a power of two. 132 */ 133#ifndef BRIDGE_RTHASH_SIZE 134#define BRIDGE_RTHASH_SIZE 1024 135#endif 136 137#define BRIDGE_RTHASH_MASK (BRIDGE_RTHASH_SIZE - 1) 138 139#include "carp.h" 140#if NCARP > 0 141#include <netinet/in.h> 142#include <netinet/in_var.h> 143#include <netinet/ip_carp.h> 144#endif 145 146__CTASSERT(sizeof(struct ifbifconf) == sizeof(struct ifbaconf)); 147__CTASSERT(offsetof(struct ifbifconf, ifbic_len) == offsetof(struct ifbaconf, ifbac_len)); 148__CTASSERT(offsetof(struct ifbifconf, ifbic_buf) == offsetof(struct ifbaconf, ifbac_buf)); 149 150/* 151 * Maximum number of addresses to cache. 152 */ 153#ifndef BRIDGE_RTABLE_MAX 154#define BRIDGE_RTABLE_MAX 100 155#endif 156 157/* 158 * Spanning tree defaults. 159 */ 160#define BSTP_DEFAULT_MAX_AGE (20 * 256) 161#define BSTP_DEFAULT_HELLO_TIME (2 * 256) 162#define BSTP_DEFAULT_FORWARD_DELAY (15 * 256) 163#define BSTP_DEFAULT_HOLD_TIME (1 * 256) 164#define BSTP_DEFAULT_BRIDGE_PRIORITY 0x8000 165#define BSTP_DEFAULT_PORT_PRIORITY 0x80 166#define BSTP_DEFAULT_PATH_COST 55 167 168/* 169 * Timeout (in seconds) for entries learned dynamically. 170 */ 171#ifndef BRIDGE_RTABLE_TIMEOUT 172#define BRIDGE_RTABLE_TIMEOUT (20 * 60) /* same as ARP */ 173#endif 174 175/* 176 * Number of seconds between walks of the route list. 177 */ 178#ifndef BRIDGE_RTABLE_PRUNE_PERIOD 179#define BRIDGE_RTABLE_PRUNE_PERIOD (5 * 60) 180#endif 181 182#define BRIDGE_RT_INTR_LOCK(_sc) mutex_enter((_sc)->sc_rtlist_intr_lock) 183#define BRIDGE_RT_INTR_UNLOCK(_sc) mutex_exit((_sc)->sc_rtlist_intr_lock) 184#define BRIDGE_RT_INTR_LOCKED(_sc) mutex_owned((_sc)->sc_rtlist_intr_lock) 185 186#define BRIDGE_RT_LOCK(_sc) if ((_sc)->sc_rtlist_lock) \ 187 mutex_enter((_sc)->sc_rtlist_lock) 188#define BRIDGE_RT_UNLOCK(_sc) if ((_sc)->sc_rtlist_lock) \ 189 mutex_exit((_sc)->sc_rtlist_lock) 190#define BRIDGE_RT_LOCKED(_sc) (!(_sc)->sc_rtlist_lock || \ 191 mutex_owned((_sc)->sc_rtlist_lock)) 192 193#define BRIDGE_RT_PSZ_PERFORM(_sc) \ 194 if ((_sc)->sc_rtlist_psz != NULL) \ 195 pserialize_perform((_sc)->sc_rtlist_psz); 196 197#ifdef BRIDGE_MPSAFE 198#define BRIDGE_RT_RENTER(__s) do { \ 199 if (!cpu_intr_p()) \ 200 __s = pserialize_read_enter(); \ 201 else \ 202 __s = splhigh(); \ 203 } while (0) 204#define BRIDGE_RT_REXIT(__s) do { \ 205 if (!cpu_intr_p()) \ 206 pserialize_read_exit(__s); \ 207 else \ 208 splx(__s); \ 209 } while (0) 210#else /* BRIDGE_MPSAFE */ 211#define BRIDGE_RT_RENTER(__s) do { __s = 0; } while (0) 212#define BRIDGE_RT_REXIT(__s) do { (void)__s; } while (0) 213#endif /* BRIDGE_MPSAFE */ 214 215int bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD; 216 217static struct pool bridge_rtnode_pool; 218static struct work bridge_rtage_wk; 219 220void bridgeattach(int); 221 222static int bridge_clone_create(struct if_clone *, int); 223static int bridge_clone_destroy(struct ifnet *); 224 225static int bridge_ioctl(struct ifnet *, u_long, void *); 226static int bridge_init(struct ifnet *); 227static void bridge_stop(struct ifnet *, int); 228static void bridge_start(struct ifnet *); 229 230static void bridge_input(struct ifnet *, struct mbuf *); 231static void bridge_forward(void *); 232 233static void bridge_timer(void *); 234 235static void bridge_broadcast(struct bridge_softc *, struct ifnet *, 236 struct mbuf *); 237 238static int bridge_rtupdate(struct bridge_softc *, const uint8_t *, 239 struct ifnet *, int, uint8_t); 240static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *); 241static void bridge_rttrim(struct bridge_softc *); 242static void bridge_rtage(struct bridge_softc *); 243static void bridge_rtage_work(struct work *, void *); 244static void bridge_rtflush(struct bridge_softc *, int); 245static int bridge_rtdaddr(struct bridge_softc *, const uint8_t *); 246static void bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp); 247 248static void bridge_rtable_init(struct bridge_softc *); 249static void bridge_rtable_fini(struct bridge_softc *); 250 251static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *, 252 const uint8_t *); 253static int bridge_rtnode_insert(struct bridge_softc *, 254 struct bridge_rtnode *); 255static void bridge_rtnode_remove(struct bridge_softc *, 256 struct bridge_rtnode *); 257static void bridge_rtnode_destroy(struct bridge_rtnode *); 258 259static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *, 260 const char *name); 261static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *, 262 struct ifnet *ifp); 263static void bridge_release_member(struct bridge_softc *, struct bridge_iflist *); 264static void bridge_delete_member(struct bridge_softc *, 265 struct bridge_iflist *); 266static struct bridge_iflist *bridge_try_hold_bif(struct bridge_iflist *); 267 268static int bridge_ioctl_add(struct bridge_softc *, void *); 269static int bridge_ioctl_del(struct bridge_softc *, void *); 270static int bridge_ioctl_gifflags(struct bridge_softc *, void *); 271static int bridge_ioctl_sifflags(struct bridge_softc *, void *); 272static int bridge_ioctl_scache(struct bridge_softc *, void *); 273static int bridge_ioctl_gcache(struct bridge_softc *, void *); 274static int bridge_ioctl_gifs(struct bridge_softc *, void *); 275static int bridge_ioctl_rts(struct bridge_softc *, void *); 276static int bridge_ioctl_saddr(struct bridge_softc *, void *); 277static int bridge_ioctl_sto(struct bridge_softc *, void *); 278static int bridge_ioctl_gto(struct bridge_softc *, void *); 279static int bridge_ioctl_daddr(struct bridge_softc *, void *); 280static int bridge_ioctl_flush(struct bridge_softc *, void *); 281static int bridge_ioctl_gpri(struct bridge_softc *, void *); 282static int bridge_ioctl_spri(struct bridge_softc *, void *); 283static int bridge_ioctl_ght(struct bridge_softc *, void *); 284static int bridge_ioctl_sht(struct bridge_softc *, void *); 285static int bridge_ioctl_gfd(struct bridge_softc *, void *); 286static int bridge_ioctl_sfd(struct bridge_softc *, void *); 287static int bridge_ioctl_gma(struct bridge_softc *, void *); 288static int bridge_ioctl_sma(struct bridge_softc *, void *); 289static int bridge_ioctl_sifprio(struct bridge_softc *, void *); 290static int bridge_ioctl_sifcost(struct bridge_softc *, void *); 291#if defined(BRIDGE_IPF) 292static int bridge_ioctl_gfilt(struct bridge_softc *, void *); 293static int bridge_ioctl_sfilt(struct bridge_softc *, void *); 294static int bridge_ipf(void *, struct mbuf **, struct ifnet *, int); 295static int bridge_ip_checkbasic(struct mbuf **mp); 296# ifdef INET6 297static int bridge_ip6_checkbasic(struct mbuf **mp); 298# endif /* INET6 */ 299#endif /* BRIDGE_IPF */ 300 301static void bridge_sysctl_fwdq_setup(struct sysctllog **clog, 302 struct bridge_softc *sc); 303 304struct bridge_control { 305 int (*bc_func)(struct bridge_softc *, void *); 306 int bc_argsize; 307 int bc_flags; 308}; 309 310#define BC_F_COPYIN 0x01 /* copy arguments in */ 311#define BC_F_COPYOUT 0x02 /* copy arguments out */ 312#define BC_F_SUSER 0x04 /* do super-user check */ 313#define BC_F_XLATEIN 0x08 /* xlate arguments in */ 314#define BC_F_XLATEOUT 0x10 /* xlate arguments out */ 315 316static const struct bridge_control bridge_control_table[] = { 317[BRDGADD] = {bridge_ioctl_add, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 318[BRDGDEL] = {bridge_ioctl_del, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 319 320[BRDGGIFFLGS] = {bridge_ioctl_gifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_COPYOUT}, 321[BRDGSIFFLGS] = {bridge_ioctl_sifflags, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 322 323[BRDGSCACHE] = {bridge_ioctl_scache, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 324[BRDGGCACHE] = {bridge_ioctl_gcache, sizeof(struct ifbrparam), BC_F_COPYOUT}, 325 326[OBRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_COPYIN|BC_F_COPYOUT}, 327[OBRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_COPYIN|BC_F_COPYOUT}, 328 329[BRDGSADDR] = {bridge_ioctl_saddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER}, 330 331[BRDGSTO] = {bridge_ioctl_sto, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 332[BRDGGTO] = {bridge_ioctl_gto, sizeof(struct ifbrparam), BC_F_COPYOUT}, 333 334[BRDGDADDR] = {bridge_ioctl_daddr, sizeof(struct ifbareq), BC_F_COPYIN|BC_F_SUSER}, 335 336[BRDGFLUSH] = {bridge_ioctl_flush, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 337 338[BRDGGPRI] = {bridge_ioctl_gpri, sizeof(struct ifbrparam), BC_F_COPYOUT}, 339[BRDGSPRI] = {bridge_ioctl_spri, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 340 341[BRDGGHT] = {bridge_ioctl_ght, sizeof(struct ifbrparam), BC_F_COPYOUT}, 342[BRDGSHT] = {bridge_ioctl_sht, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 343 344[BRDGGFD] = {bridge_ioctl_gfd, sizeof(struct ifbrparam), BC_F_COPYOUT}, 345[BRDGSFD] = {bridge_ioctl_sfd, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 346 347[BRDGGMA] = {bridge_ioctl_gma, sizeof(struct ifbrparam), BC_F_COPYOUT}, 348[BRDGSMA] = {bridge_ioctl_sma, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 349 350[BRDGSIFPRIO] = {bridge_ioctl_sifprio, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 351 352[BRDGSIFCOST] = {bridge_ioctl_sifcost, sizeof(struct ifbreq), BC_F_COPYIN|BC_F_SUSER}, 353#if defined(BRIDGE_IPF) 354[BRDGGFILT] = {bridge_ioctl_gfilt, sizeof(struct ifbrparam), BC_F_COPYOUT}, 355[BRDGSFILT] = {bridge_ioctl_sfilt, sizeof(struct ifbrparam), BC_F_COPYIN|BC_F_SUSER}, 356#endif /* BRIDGE_IPF */ 357[BRDGGIFS] = {bridge_ioctl_gifs, sizeof(struct ifbifconf), BC_F_XLATEIN|BC_F_XLATEOUT}, 358[BRDGRTS] = {bridge_ioctl_rts, sizeof(struct ifbaconf), BC_F_XLATEIN|BC_F_XLATEOUT}, 359}; 360 361static const int bridge_control_table_size = __arraycount(bridge_control_table); 362 363static LIST_HEAD(, bridge_softc) bridge_list; 364static kmutex_t bridge_list_lock; 365 366static struct if_clone bridge_cloner = 367 IF_CLONE_INITIALIZER("bridge", bridge_clone_create, bridge_clone_destroy); 368 369/* 370 * bridgeattach: 371 * 372 * Pseudo-device attach routine. 373 */ 374void 375bridgeattach(int n) 376{ 377 378 pool_init(&bridge_rtnode_pool, sizeof(struct bridge_rtnode), 379 0, 0, 0, "brtpl", NULL, IPL_NET); 380 381 LIST_INIT(&bridge_list); 382 mutex_init(&bridge_list_lock, MUTEX_DEFAULT, IPL_NET); 383 if_clone_attach(&bridge_cloner); 384} 385 386/* 387 * bridge_clone_create: 388 * 389 * Create a new bridge instance. 390 */ 391static int 392bridge_clone_create(struct if_clone *ifc, int unit) 393{ 394 struct bridge_softc *sc; 395 struct ifnet *ifp; 396 int error, flags; 397 398 sc = kmem_zalloc(sizeof(*sc), KM_SLEEP); 399 ifp = &sc->sc_if; 400 401 sc->sc_brtmax = BRIDGE_RTABLE_MAX; 402 sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT; 403 sc->sc_bridge_max_age = BSTP_DEFAULT_MAX_AGE; 404 sc->sc_bridge_hello_time = BSTP_DEFAULT_HELLO_TIME; 405 sc->sc_bridge_forward_delay = BSTP_DEFAULT_FORWARD_DELAY; 406 sc->sc_bridge_priority = BSTP_DEFAULT_BRIDGE_PRIORITY; 407 sc->sc_hold_time = BSTP_DEFAULT_HOLD_TIME; 408 sc->sc_filter_flags = 0; 409 410 /* Initialize our routing table. */ 411 bridge_rtable_init(sc); 412 413#ifdef BRIDGE_MPSAFE 414 flags = WQ_MPSAFE; 415#else 416 flags = 0; 417#endif 418 error = workqueue_create(&sc->sc_rtage_wq, "bridge_rtage", 419 bridge_rtage_work, sc, PRI_SOFTNET, IPL_SOFTNET, flags); 420 if (error) 421 panic("%s: workqueue_create %d\n", __func__, error); 422 423 callout_init(&sc->sc_brcallout, 0); 424 callout_init(&sc->sc_bstpcallout, 0); 425 426 LIST_INIT(&sc->sc_iflist); 427#ifdef BRIDGE_MPSAFE 428 sc->sc_iflist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 429 sc->sc_iflist_psz = pserialize_create(); 430 sc->sc_iflist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 431#else 432 sc->sc_iflist_intr_lock = NULL; 433 sc->sc_iflist_psz = NULL; 434 sc->sc_iflist_lock = NULL; 435#endif 436 cv_init(&sc->sc_iflist_cv, "if_bridge_cv"); 437 438 if_initname(ifp, ifc->ifc_name, unit); 439 ifp->if_softc = sc; 440 ifp->if_mtu = ETHERMTU; 441 ifp->if_ioctl = bridge_ioctl; 442 ifp->if_output = bridge_output; 443 ifp->if_start = bridge_start; 444 ifp->if_stop = bridge_stop; 445 ifp->if_init = bridge_init; 446 ifp->if_type = IFT_BRIDGE; 447 ifp->if_addrlen = 0; 448 ifp->if_dlt = DLT_EN10MB; 449 ifp->if_hdrlen = ETHER_HDR_LEN; 450 451 sc->sc_fwd_pktq = pktq_create(IFQ_MAXLEN, bridge_forward, sc); 452 KASSERT(sc->sc_fwd_pktq != NULL); 453 454 bridge_sysctl_fwdq_setup(&ifp->if_sysctl_log, sc); 455 456 if_attach(ifp); 457 458 if_alloc_sadl(ifp); 459 460 mutex_enter(&bridge_list_lock); 461 LIST_INSERT_HEAD(&bridge_list, sc, sc_list); 462 mutex_exit(&bridge_list_lock); 463 464 return (0); 465} 466 467/* 468 * bridge_clone_destroy: 469 * 470 * Destroy a bridge instance. 471 */ 472static int 473bridge_clone_destroy(struct ifnet *ifp) 474{ 475 struct bridge_softc *sc = ifp->if_softc; 476 struct bridge_iflist *bif; 477 int s; 478 479 /* Must be called during IFF_RUNNING, i.e., before bridge_stop */ 480 pktq_barrier(sc->sc_fwd_pktq); 481 482 s = splnet(); 483 484 bridge_stop(ifp, 1); 485 486 BRIDGE_LOCK(sc); 487 while ((bif = LIST_FIRST(&sc->sc_iflist)) != NULL) 488 bridge_delete_member(sc, bif); 489 BRIDGE_UNLOCK(sc); 490 491 mutex_enter(&bridge_list_lock); 492 LIST_REMOVE(sc, sc_list); 493 mutex_exit(&bridge_list_lock); 494 495 splx(s); 496 497 if_detach(ifp); 498 499 /* Should be called after if_detach for safe */ 500 pktq_flush(sc->sc_fwd_pktq); 501 pktq_destroy(sc->sc_fwd_pktq); 502 503 /* Tear down the routing table. */ 504 bridge_rtable_fini(sc); 505 506 cv_destroy(&sc->sc_iflist_cv); 507 if (sc->sc_iflist_intr_lock) 508 mutex_obj_free(sc->sc_iflist_intr_lock); 509 510 if (sc->sc_iflist_psz) 511 pserialize_destroy(sc->sc_iflist_psz); 512 if (sc->sc_iflist_lock) 513 mutex_obj_free(sc->sc_iflist_lock); 514 515 workqueue_destroy(sc->sc_rtage_wq); 516 517 kmem_free(sc, sizeof(*sc)); 518 519 return (0); 520} 521 522static int 523bridge_sysctl_fwdq_maxlen(SYSCTLFN_ARGS) 524{ 525 struct sysctlnode node = *rnode; 526 const struct bridge_softc *sc = node.sysctl_data; 527 return sysctl_pktq_maxlen(SYSCTLFN_CALL(rnode), sc->sc_fwd_pktq); 528} 529 530#define SYSCTL_BRIDGE_PKTQ(cn, c) \ 531 static int \ 532 bridge_sysctl_fwdq_##cn(SYSCTLFN_ARGS) \ 533 { \ 534 struct sysctlnode node = *rnode; \ 535 const struct bridge_softc *sc = node.sysctl_data; \ 536 return sysctl_pktq_count(SYSCTLFN_CALL(rnode), \ 537 sc->sc_fwd_pktq, c); \ 538 } 539 540SYSCTL_BRIDGE_PKTQ(items, PKTQ_NITEMS) 541SYSCTL_BRIDGE_PKTQ(drops, PKTQ_DROPS) 542 543static void 544bridge_sysctl_fwdq_setup(struct sysctllog **clog, struct bridge_softc *sc) 545{ 546 const struct sysctlnode *cnode, *rnode; 547 sysctlfn len_func = NULL, maxlen_func = NULL, drops_func = NULL; 548 const char *ifname = sc->sc_if.if_xname; 549 550 len_func = bridge_sysctl_fwdq_items; 551 maxlen_func = bridge_sysctl_fwdq_maxlen; 552 drops_func = bridge_sysctl_fwdq_drops; 553 554 if (sysctl_createv(clog, 0, NULL, &rnode, 555 CTLFLAG_PERMANENT, 556 CTLTYPE_NODE, "interfaces", 557 SYSCTL_DESCR("Per-interface controls"), 558 NULL, 0, NULL, 0, 559 CTL_NET, CTL_CREATE, CTL_EOL) != 0) 560 goto bad; 561 562 if (sysctl_createv(clog, 0, &rnode, &rnode, 563 CTLFLAG_PERMANENT, 564 CTLTYPE_NODE, ifname, 565 SYSCTL_DESCR("Interface controls"), 566 NULL, 0, NULL, 0, 567 CTL_CREATE, CTL_EOL) != 0) 568 goto bad; 569 570 if (sysctl_createv(clog, 0, &rnode, &rnode, 571 CTLFLAG_PERMANENT, 572 CTLTYPE_NODE, "fwdq", 573 SYSCTL_DESCR("Protocol input queue controls"), 574 NULL, 0, NULL, 0, 575 CTL_CREATE, CTL_EOL) != 0) 576 goto bad; 577 578 if (sysctl_createv(clog, 0, &rnode, &cnode, 579 CTLFLAG_PERMANENT, 580 CTLTYPE_INT, "len", 581 SYSCTL_DESCR("Current forwarding queue length"), 582 len_func, 0, (void *)sc, 0, 583 CTL_CREATE, IFQCTL_LEN, CTL_EOL) != 0) 584 goto bad; 585 586 if (sysctl_createv(clog, 0, &rnode, &cnode, 587 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 588 CTLTYPE_INT, "maxlen", 589 SYSCTL_DESCR("Maximum allowed forwarding queue length"), 590 maxlen_func, 0, (void *)sc, 0, 591 CTL_CREATE, IFQCTL_MAXLEN, CTL_EOL) != 0) 592 goto bad; 593 594 if (sysctl_createv(clog, 0, &rnode, &cnode, 595 CTLFLAG_PERMANENT, 596 CTLTYPE_INT, "drops", 597 SYSCTL_DESCR("Packets dropped due to full forwarding queue"), 598 drops_func, 0, (void *)sc, 0, 599 CTL_CREATE, IFQCTL_DROPS, CTL_EOL) != 0) 600 goto bad; 601 602 return; 603bad: 604 aprint_error("%s: could not attach sysctl nodes\n", ifname); 605 return; 606} 607 608/* 609 * bridge_ioctl: 610 * 611 * Handle a control request from the operator. 612 */ 613static int 614bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data) 615{ 616 struct bridge_softc *sc = ifp->if_softc; 617 struct lwp *l = curlwp; /* XXX */ 618 union { 619 struct ifbreq ifbreq; 620 struct ifbifconf ifbifconf; 621 struct ifbareq ifbareq; 622 struct ifbaconf ifbaconf; 623 struct ifbrparam ifbrparam; 624 } args; 625 struct ifdrv *ifd = (struct ifdrv *) data; 626 const struct bridge_control *bc = NULL; /* XXXGCC */ 627 int s, error = 0; 628 629 /* Authorize command before calling splnet(). */ 630 switch (cmd) { 631 case SIOCGDRVSPEC: 632 case SIOCSDRVSPEC: 633 if (ifd->ifd_cmd >= bridge_control_table_size 634 || (bc = &bridge_control_table[ifd->ifd_cmd]) == NULL) { 635 error = EINVAL; 636 return error; 637 } 638 639 /* We only care about BC_F_SUSER at this point. */ 640 if ((bc->bc_flags & BC_F_SUSER) == 0) 641 break; 642 643 error = kauth_authorize_network(l->l_cred, 644 KAUTH_NETWORK_INTERFACE_BRIDGE, 645 cmd == SIOCGDRVSPEC ? 646 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_GETPRIV : 647 KAUTH_REQ_NETWORK_INTERFACE_BRIDGE_SETPRIV, 648 ifd, NULL, NULL); 649 if (error) 650 return (error); 651 652 break; 653 } 654 655 s = splnet(); 656 657 switch (cmd) { 658 case SIOCGDRVSPEC: 659 case SIOCSDRVSPEC: 660 KASSERT(bc != NULL); 661 if (cmd == SIOCGDRVSPEC && 662 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) == 0) { 663 error = EINVAL; 664 break; 665 } 666 else if (cmd == SIOCSDRVSPEC && 667 (bc->bc_flags & (BC_F_COPYOUT|BC_F_XLATEOUT)) != 0) { 668 error = EINVAL; 669 break; 670 } 671 672 /* BC_F_SUSER is checked above, before splnet(). */ 673 674 if ((bc->bc_flags & (BC_F_XLATEIN|BC_F_XLATEOUT)) == 0 675 && (ifd->ifd_len != bc->bc_argsize 676 || ifd->ifd_len > sizeof(args))) { 677 error = EINVAL; 678 break; 679 } 680 681 memset(&args, 0, sizeof(args)); 682 if (bc->bc_flags & BC_F_COPYIN) { 683 error = copyin(ifd->ifd_data, &args, ifd->ifd_len); 684 if (error) 685 break; 686 } else if (bc->bc_flags & BC_F_XLATEIN) { 687 args.ifbifconf.ifbic_len = ifd->ifd_len; 688 args.ifbifconf.ifbic_buf = ifd->ifd_data; 689 } 690 691 error = (*bc->bc_func)(sc, &args); 692 if (error) 693 break; 694 695 if (bc->bc_flags & BC_F_COPYOUT) { 696 error = copyout(&args, ifd->ifd_data, ifd->ifd_len); 697 } else if (bc->bc_flags & BC_F_XLATEOUT) { 698 ifd->ifd_len = args.ifbifconf.ifbic_len; 699 ifd->ifd_data = args.ifbifconf.ifbic_buf; 700 } 701 break; 702 703 case SIOCSIFFLAGS: 704 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 705 break; 706 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 707 case IFF_RUNNING: 708 /* 709 * If interface is marked down and it is running, 710 * then stop and disable it. 711 */ 712 (*ifp->if_stop)(ifp, 1); 713 break; 714 case IFF_UP: 715 /* 716 * If interface is marked up and it is stopped, then 717 * start it. 718 */ 719 error = (*ifp->if_init)(ifp); 720 break; 721 default: 722 break; 723 } 724 break; 725 726 case SIOCSIFMTU: 727 if ((error = ifioctl_common(ifp, cmd, data)) == ENETRESET) 728 error = 0; 729 break; 730 731 default: 732 error = ifioctl_common(ifp, cmd, data); 733 break; 734 } 735 736 splx(s); 737 738 return (error); 739} 740 741/* 742 * bridge_lookup_member: 743 * 744 * Lookup a bridge member interface. 745 */ 746static struct bridge_iflist * 747bridge_lookup_member(struct bridge_softc *sc, const char *name) 748{ 749 struct bridge_iflist *bif; 750 struct ifnet *ifp; 751 int s; 752 753 BRIDGE_PSZ_RENTER(s); 754 755 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 756 ifp = bif->bif_ifp; 757 if (strcmp(ifp->if_xname, name) == 0) 758 break; 759 } 760 bif = bridge_try_hold_bif(bif); 761 762 BRIDGE_PSZ_REXIT(s); 763 764 return bif; 765} 766 767/* 768 * bridge_lookup_member_if: 769 * 770 * Lookup a bridge member interface by ifnet*. 771 */ 772static struct bridge_iflist * 773bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp) 774{ 775 struct bridge_iflist *bif; 776 int s; 777 778 BRIDGE_PSZ_RENTER(s); 779 780 bif = member_ifp->if_bridgeif; 781 bif = bridge_try_hold_bif(bif); 782 783 BRIDGE_PSZ_REXIT(s); 784 785 return bif; 786} 787 788static struct bridge_iflist * 789bridge_try_hold_bif(struct bridge_iflist *bif) 790{ 791#ifdef BRIDGE_MPSAFE 792 if (bif != NULL) { 793 if (bif->bif_waiting) 794 bif = NULL; 795 else 796 atomic_inc_32(&bif->bif_refs); 797 } 798#endif 799 return bif; 800} 801 802/* 803 * bridge_release_member: 804 * 805 * Release the specified member interface. 806 */ 807static void 808bridge_release_member(struct bridge_softc *sc, struct bridge_iflist *bif) 809{ 810#ifdef BRIDGE_MPSAFE 811 uint32_t refs; 812 813 refs = atomic_dec_uint_nv(&bif->bif_refs); 814 if (__predict_false(refs == 0 && bif->bif_waiting)) { 815 BRIDGE_INTR_LOCK(sc); 816 cv_broadcast(&sc->sc_iflist_cv); 817 BRIDGE_INTR_UNLOCK(sc); 818 } 819#else 820 (void)sc; 821 (void)bif; 822#endif 823} 824 825/* 826 * bridge_delete_member: 827 * 828 * Delete the specified member interface. 829 */ 830static void 831bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif) 832{ 833 struct ifnet *ifs = bif->bif_ifp; 834 835 KASSERT(BRIDGE_LOCKED(sc)); 836 837 ifs->if_input = ether_input; 838 ifs->if_bridge = NULL; 839 ifs->if_bridgeif = NULL; 840 841 LIST_REMOVE(bif, bif_next); 842 843 BRIDGE_PSZ_PERFORM(sc); 844 845 BRIDGE_UNLOCK(sc); 846 847#ifdef BRIDGE_MPSAFE 848 BRIDGE_INTR_LOCK(sc); 849 bif->bif_waiting = true; 850 membar_sync(); 851 while (bif->bif_refs > 0) { 852 aprint_debug("%s: cv_wait on iflist\n", __func__); 853 cv_wait(&sc->sc_iflist_cv, sc->sc_iflist_intr_lock); 854 } 855 bif->bif_waiting = false; 856 BRIDGE_INTR_UNLOCK(sc); 857#endif 858 859 kmem_free(bif, sizeof(*bif)); 860 861 BRIDGE_LOCK(sc); 862} 863 864static int 865bridge_ioctl_add(struct bridge_softc *sc, void *arg) 866{ 867 struct ifbreq *req = arg; 868 struct bridge_iflist *bif = NULL; 869 struct ifnet *ifs; 870 int error = 0; 871 872 ifs = ifunit(req->ifbr_ifsname); 873 if (ifs == NULL) 874 return (ENOENT); 875 876 if (sc->sc_if.if_mtu != ifs->if_mtu) 877 return (EINVAL); 878 879 if (ifs->if_bridge == sc) 880 return (EEXIST); 881 882 if (ifs->if_bridge != NULL) 883 return (EBUSY); 884 885 if (ifs->if_input != ether_input) 886 return EINVAL; 887 888 /* FIXME: doesn't work with non-IFF_SIMPLEX interfaces */ 889 if ((ifs->if_flags & IFF_SIMPLEX) == 0) 890 return EINVAL; 891 892 bif = kmem_alloc(sizeof(*bif), KM_SLEEP); 893 894 switch (ifs->if_type) { 895 case IFT_ETHER: 896 /* 897 * Place the interface into promiscuous mode. 898 */ 899 error = ifpromisc(ifs, 1); 900 if (error) 901 goto out; 902 break; 903 default: 904 error = EINVAL; 905 goto out; 906 } 907 908 bif->bif_ifp = ifs; 909 bif->bif_flags = IFBIF_LEARNING | IFBIF_DISCOVER; 910 bif->bif_priority = BSTP_DEFAULT_PORT_PRIORITY; 911 bif->bif_path_cost = BSTP_DEFAULT_PATH_COST; 912 bif->bif_refs = 0; 913 bif->bif_waiting = false; 914 915 BRIDGE_LOCK(sc); 916 917 ifs->if_bridge = sc; 918 ifs->if_bridgeif = bif; 919 LIST_INSERT_HEAD(&sc->sc_iflist, bif, bif_next); 920 ifs->if_input = bridge_input; 921 922 BRIDGE_UNLOCK(sc); 923 924 if (sc->sc_if.if_flags & IFF_RUNNING) 925 bstp_initialization(sc); 926 else 927 bstp_stop(sc); 928 929 out: 930 if (error) { 931 if (bif != NULL) 932 kmem_free(bif, sizeof(*bif)); 933 } 934 return (error); 935} 936 937static int 938bridge_ioctl_del(struct bridge_softc *sc, void *arg) 939{ 940 struct ifbreq *req = arg; 941 const char *name = req->ifbr_ifsname; 942 struct bridge_iflist *bif; 943 struct ifnet *ifs; 944 945 BRIDGE_LOCK(sc); 946 947 /* 948 * Don't use bridge_lookup_member. We want to get a member 949 * with bif_refs == 0. 950 */ 951 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 952 ifs = bif->bif_ifp; 953 if (strcmp(ifs->if_xname, name) == 0) 954 break; 955 } 956 957 if (bif == NULL) { 958 BRIDGE_UNLOCK(sc); 959 return ENOENT; 960 } 961 962 bridge_delete_member(sc, bif); 963 964 BRIDGE_UNLOCK(sc); 965 966 switch (ifs->if_type) { 967 case IFT_ETHER: 968 /* 969 * Take the interface out of promiscuous mode. 970 * Don't call it with holding a spin lock. 971 */ 972 (void) ifpromisc(ifs, 0); 973 break; 974 default: 975#ifdef DIAGNOSTIC 976 panic("bridge_delete_member: impossible"); 977#endif 978 break; 979 } 980 981 bridge_rtdelete(sc, ifs); 982 983 if (sc->sc_if.if_flags & IFF_RUNNING) 984 bstp_initialization(sc); 985 986 return 0; 987} 988 989static int 990bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg) 991{ 992 struct ifbreq *req = arg; 993 struct bridge_iflist *bif; 994 995 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 996 if (bif == NULL) 997 return (ENOENT); 998 999 req->ifbr_ifsflags = bif->bif_flags; 1000 req->ifbr_state = bif->bif_state; 1001 req->ifbr_priority = bif->bif_priority; 1002 req->ifbr_path_cost = bif->bif_path_cost; 1003 req->ifbr_portno = bif->bif_ifp->if_index & 0xff; 1004 1005 bridge_release_member(sc, bif); 1006 1007 return (0); 1008} 1009 1010static int 1011bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg) 1012{ 1013 struct ifbreq *req = arg; 1014 struct bridge_iflist *bif; 1015 1016 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1017 if (bif == NULL) 1018 return (ENOENT); 1019 1020 if (req->ifbr_ifsflags & IFBIF_STP) { 1021 switch (bif->bif_ifp->if_type) { 1022 case IFT_ETHER: 1023 /* These can do spanning tree. */ 1024 break; 1025 1026 default: 1027 /* Nothing else can. */ 1028 bridge_release_member(sc, bif); 1029 return (EINVAL); 1030 } 1031 } 1032 1033 bif->bif_flags = req->ifbr_ifsflags; 1034 1035 bridge_release_member(sc, bif); 1036 1037 if (sc->sc_if.if_flags & IFF_RUNNING) 1038 bstp_initialization(sc); 1039 1040 return (0); 1041} 1042 1043static int 1044bridge_ioctl_scache(struct bridge_softc *sc, void *arg) 1045{ 1046 struct ifbrparam *param = arg; 1047 1048 sc->sc_brtmax = param->ifbrp_csize; 1049 bridge_rttrim(sc); 1050 1051 return (0); 1052} 1053 1054static int 1055bridge_ioctl_gcache(struct bridge_softc *sc, void *arg) 1056{ 1057 struct ifbrparam *param = arg; 1058 1059 param->ifbrp_csize = sc->sc_brtmax; 1060 1061 return (0); 1062} 1063 1064static int 1065bridge_ioctl_gifs(struct bridge_softc *sc, void *arg) 1066{ 1067 struct ifbifconf *bifc = arg; 1068 struct bridge_iflist *bif; 1069 struct ifbreq *breqs; 1070 int i, count, error = 0; 1071 1072retry: 1073 BRIDGE_LOCK(sc); 1074 count = 0; 1075 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1076 count++; 1077 BRIDGE_UNLOCK(sc); 1078 1079 if (count == 0) { 1080 bifc->ifbic_len = 0; 1081 return 0; 1082 } 1083 1084 if (bifc->ifbic_len == 0 || bifc->ifbic_len < (sizeof(*breqs) * count)) { 1085 /* Tell that a larger buffer is needed */ 1086 bifc->ifbic_len = sizeof(*breqs) * count; 1087 return 0; 1088 } 1089 1090 breqs = kmem_alloc(sizeof(*breqs) * count, KM_SLEEP); 1091 1092 BRIDGE_LOCK(sc); 1093 1094 i = 0; 1095 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) 1096 i++; 1097 if (i > count) { 1098 /* 1099 * The number of members has been increased. 1100 * We need more memory! 1101 */ 1102 BRIDGE_UNLOCK(sc); 1103 kmem_free(breqs, sizeof(*breqs) * count); 1104 goto retry; 1105 } 1106 1107 i = 0; 1108 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1109 struct ifbreq *breq = &breqs[i++]; 1110 memset(breq, 0, sizeof(*breq)); 1111 1112 strlcpy(breq->ifbr_ifsname, bif->bif_ifp->if_xname, 1113 sizeof(breq->ifbr_ifsname)); 1114 breq->ifbr_ifsflags = bif->bif_flags; 1115 breq->ifbr_state = bif->bif_state; 1116 breq->ifbr_priority = bif->bif_priority; 1117 breq->ifbr_path_cost = bif->bif_path_cost; 1118 breq->ifbr_portno = bif->bif_ifp->if_index & 0xff; 1119 } 1120 1121 /* Don't call copyout with holding the mutex */ 1122 BRIDGE_UNLOCK(sc); 1123 1124 for (i = 0; i < count; i++) { 1125 error = copyout(&breqs[i], bifc->ifbic_req + i, sizeof(*breqs)); 1126 if (error) 1127 break; 1128 } 1129 bifc->ifbic_len = sizeof(*breqs) * i; 1130 1131 kmem_free(breqs, sizeof(*breqs) * count); 1132 1133 return error; 1134} 1135 1136static int 1137bridge_ioctl_rts(struct bridge_softc *sc, void *arg) 1138{ 1139 struct ifbaconf *bac = arg; 1140 struct bridge_rtnode *brt; 1141 struct ifbareq bareq; 1142 int count = 0, error = 0, len; 1143 1144 if (bac->ifbac_len == 0) 1145 return (0); 1146 1147 BRIDGE_RT_INTR_LOCK(sc); 1148 1149 len = bac->ifbac_len; 1150 LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) { 1151 if (len < sizeof(bareq)) 1152 goto out; 1153 memset(&bareq, 0, sizeof(bareq)); 1154 strlcpy(bareq.ifba_ifsname, brt->brt_ifp->if_xname, 1155 sizeof(bareq.ifba_ifsname)); 1156 memcpy(bareq.ifba_dst, brt->brt_addr, sizeof(brt->brt_addr)); 1157 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 1158 bareq.ifba_expire = brt->brt_expire - time_uptime; 1159 } else 1160 bareq.ifba_expire = 0; 1161 bareq.ifba_flags = brt->brt_flags; 1162 1163 error = copyout(&bareq, bac->ifbac_req + count, sizeof(bareq)); 1164 if (error) 1165 goto out; 1166 count++; 1167 len -= sizeof(bareq); 1168 } 1169 out: 1170 BRIDGE_RT_INTR_UNLOCK(sc); 1171 1172 bac->ifbac_len = sizeof(bareq) * count; 1173 return (error); 1174} 1175 1176static int 1177bridge_ioctl_saddr(struct bridge_softc *sc, void *arg) 1178{ 1179 struct ifbareq *req = arg; 1180 struct bridge_iflist *bif; 1181 int error; 1182 1183 bif = bridge_lookup_member(sc, req->ifba_ifsname); 1184 if (bif == NULL) 1185 return (ENOENT); 1186 1187 error = bridge_rtupdate(sc, req->ifba_dst, bif->bif_ifp, 1, 1188 req->ifba_flags); 1189 1190 bridge_release_member(sc, bif); 1191 1192 return (error); 1193} 1194 1195static int 1196bridge_ioctl_sto(struct bridge_softc *sc, void *arg) 1197{ 1198 struct ifbrparam *param = arg; 1199 1200 sc->sc_brttimeout = param->ifbrp_ctime; 1201 1202 return (0); 1203} 1204 1205static int 1206bridge_ioctl_gto(struct bridge_softc *sc, void *arg) 1207{ 1208 struct ifbrparam *param = arg; 1209 1210 param->ifbrp_ctime = sc->sc_brttimeout; 1211 1212 return (0); 1213} 1214 1215static int 1216bridge_ioctl_daddr(struct bridge_softc *sc, void *arg) 1217{ 1218 struct ifbareq *req = arg; 1219 1220 return (bridge_rtdaddr(sc, req->ifba_dst)); 1221} 1222 1223static int 1224bridge_ioctl_flush(struct bridge_softc *sc, void *arg) 1225{ 1226 struct ifbreq *req = arg; 1227 1228 bridge_rtflush(sc, req->ifbr_ifsflags); 1229 1230 return (0); 1231} 1232 1233static int 1234bridge_ioctl_gpri(struct bridge_softc *sc, void *arg) 1235{ 1236 struct ifbrparam *param = arg; 1237 1238 param->ifbrp_prio = sc->sc_bridge_priority; 1239 1240 return (0); 1241} 1242 1243static int 1244bridge_ioctl_spri(struct bridge_softc *sc, void *arg) 1245{ 1246 struct ifbrparam *param = arg; 1247 1248 sc->sc_bridge_priority = param->ifbrp_prio; 1249 1250 if (sc->sc_if.if_flags & IFF_RUNNING) 1251 bstp_initialization(sc); 1252 1253 return (0); 1254} 1255 1256static int 1257bridge_ioctl_ght(struct bridge_softc *sc, void *arg) 1258{ 1259 struct ifbrparam *param = arg; 1260 1261 param->ifbrp_hellotime = sc->sc_bridge_hello_time >> 8; 1262 1263 return (0); 1264} 1265 1266static int 1267bridge_ioctl_sht(struct bridge_softc *sc, void *arg) 1268{ 1269 struct ifbrparam *param = arg; 1270 1271 if (param->ifbrp_hellotime == 0) 1272 return (EINVAL); 1273 sc->sc_bridge_hello_time = param->ifbrp_hellotime << 8; 1274 1275 if (sc->sc_if.if_flags & IFF_RUNNING) 1276 bstp_initialization(sc); 1277 1278 return (0); 1279} 1280 1281static int 1282bridge_ioctl_gfd(struct bridge_softc *sc, void *arg) 1283{ 1284 struct ifbrparam *param = arg; 1285 1286 param->ifbrp_fwddelay = sc->sc_bridge_forward_delay >> 8; 1287 1288 return (0); 1289} 1290 1291static int 1292bridge_ioctl_sfd(struct bridge_softc *sc, void *arg) 1293{ 1294 struct ifbrparam *param = arg; 1295 1296 if (param->ifbrp_fwddelay == 0) 1297 return (EINVAL); 1298 sc->sc_bridge_forward_delay = param->ifbrp_fwddelay << 8; 1299 1300 if (sc->sc_if.if_flags & IFF_RUNNING) 1301 bstp_initialization(sc); 1302 1303 return (0); 1304} 1305 1306static int 1307bridge_ioctl_gma(struct bridge_softc *sc, void *arg) 1308{ 1309 struct ifbrparam *param = arg; 1310 1311 param->ifbrp_maxage = sc->sc_bridge_max_age >> 8; 1312 1313 return (0); 1314} 1315 1316static int 1317bridge_ioctl_sma(struct bridge_softc *sc, void *arg) 1318{ 1319 struct ifbrparam *param = arg; 1320 1321 if (param->ifbrp_maxage == 0) 1322 return (EINVAL); 1323 sc->sc_bridge_max_age = param->ifbrp_maxage << 8; 1324 1325 if (sc->sc_if.if_flags & IFF_RUNNING) 1326 bstp_initialization(sc); 1327 1328 return (0); 1329} 1330 1331static int 1332bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg) 1333{ 1334 struct ifbreq *req = arg; 1335 struct bridge_iflist *bif; 1336 1337 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1338 if (bif == NULL) 1339 return (ENOENT); 1340 1341 bif->bif_priority = req->ifbr_priority; 1342 1343 if (sc->sc_if.if_flags & IFF_RUNNING) 1344 bstp_initialization(sc); 1345 1346 bridge_release_member(sc, bif); 1347 1348 return (0); 1349} 1350 1351#if defined(BRIDGE_IPF) 1352static int 1353bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg) 1354{ 1355 struct ifbrparam *param = arg; 1356 1357 param->ifbrp_filter = sc->sc_filter_flags; 1358 1359 return (0); 1360} 1361 1362static int 1363bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg) 1364{ 1365 struct ifbrparam *param = arg; 1366 uint32_t nflags, oflags; 1367 1368 if (param->ifbrp_filter & ~IFBF_FILT_MASK) 1369 return (EINVAL); 1370 1371 nflags = param->ifbrp_filter; 1372 oflags = sc->sc_filter_flags; 1373 1374 if ((nflags & IFBF_FILT_USEIPF) && !(oflags & IFBF_FILT_USEIPF)) { 1375 pfil_add_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT, 1376 sc->sc_if.if_pfil); 1377 } 1378 if (!(nflags & IFBF_FILT_USEIPF) && (oflags & IFBF_FILT_USEIPF)) { 1379 pfil_remove_hook((void *)bridge_ipf, NULL, PFIL_IN|PFIL_OUT, 1380 sc->sc_if.if_pfil); 1381 } 1382 1383 sc->sc_filter_flags = nflags; 1384 1385 return (0); 1386} 1387#endif /* BRIDGE_IPF */ 1388 1389static int 1390bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg) 1391{ 1392 struct ifbreq *req = arg; 1393 struct bridge_iflist *bif; 1394 1395 bif = bridge_lookup_member(sc, req->ifbr_ifsname); 1396 if (bif == NULL) 1397 return (ENOENT); 1398 1399 bif->bif_path_cost = req->ifbr_path_cost; 1400 1401 if (sc->sc_if.if_flags & IFF_RUNNING) 1402 bstp_initialization(sc); 1403 1404 bridge_release_member(sc, bif); 1405 1406 return (0); 1407} 1408 1409/* 1410 * bridge_ifdetach: 1411 * 1412 * Detach an interface from a bridge. Called when a member 1413 * interface is detaching. 1414 */ 1415void 1416bridge_ifdetach(struct ifnet *ifp) 1417{ 1418 struct bridge_softc *sc = ifp->if_bridge; 1419 struct ifbreq breq; 1420 1421 /* ioctl_lock should prevent this from happening */ 1422 KASSERT(sc != NULL); 1423 1424 memset(&breq, 0, sizeof(breq)); 1425 strlcpy(breq.ifbr_ifsname, ifp->if_xname, sizeof(breq.ifbr_ifsname)); 1426 1427 (void) bridge_ioctl_del(sc, &breq); 1428} 1429 1430/* 1431 * bridge_init: 1432 * 1433 * Initialize a bridge interface. 1434 */ 1435static int 1436bridge_init(struct ifnet *ifp) 1437{ 1438 struct bridge_softc *sc = ifp->if_softc; 1439 1440 if (ifp->if_flags & IFF_RUNNING) 1441 return (0); 1442 1443 callout_reset(&sc->sc_brcallout, bridge_rtable_prune_period * hz, 1444 bridge_timer, sc); 1445 1446 ifp->if_flags |= IFF_RUNNING; 1447 bstp_initialization(sc); 1448 return (0); 1449} 1450 1451/* 1452 * bridge_stop: 1453 * 1454 * Stop the bridge interface. 1455 */ 1456static void 1457bridge_stop(struct ifnet *ifp, int disable) 1458{ 1459 struct bridge_softc *sc = ifp->if_softc; 1460 1461 if ((ifp->if_flags & IFF_RUNNING) == 0) 1462 return; 1463 1464 callout_stop(&sc->sc_brcallout); 1465 bstp_stop(sc); 1466 1467 bridge_rtflush(sc, IFBF_FLUSHDYN); 1468 1469 ifp->if_flags &= ~IFF_RUNNING; 1470} 1471 1472/* 1473 * bridge_enqueue: 1474 * 1475 * Enqueue a packet on a bridge member interface. 1476 */ 1477void 1478bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m, 1479 int runfilt) 1480{ 1481 ALTQ_DECL(struct altq_pktattr pktattr;) 1482 int len, error; 1483 short mflags; 1484 1485 /* 1486 * Clear any in-bound checksum flags for this packet. 1487 */ 1488 m->m_pkthdr.csum_flags = 0; 1489 1490 if (runfilt) { 1491 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, 1492 dst_ifp, PFIL_OUT) != 0) { 1493 if (m != NULL) 1494 m_freem(m); 1495 return; 1496 } 1497 if (m == NULL) 1498 return; 1499 } 1500 1501#ifdef ALTQ 1502 /* 1503 * If ALTQ is enabled on the member interface, do 1504 * classification; the queueing discipline might 1505 * not require classification, but might require 1506 * the address family/header pointer in the pktattr. 1507 */ 1508 if (ALTQ_IS_ENABLED(&dst_ifp->if_snd)) { 1509 /* XXX IFT_ETHER */ 1510 altq_etherclassify(&dst_ifp->if_snd, m, &pktattr); 1511 } 1512#endif /* ALTQ */ 1513 1514 len = m->m_pkthdr.len; 1515 m->m_flags |= M_PROTO1; 1516 mflags = m->m_flags; 1517 1518 IFQ_ENQUEUE(&dst_ifp->if_snd, m, &pktattr, error); 1519 1520 if (error) { 1521 /* mbuf is already freed */ 1522 sc->sc_if.if_oerrors++; 1523 return; 1524 } 1525 1526 sc->sc_if.if_opackets++; 1527 sc->sc_if.if_obytes += len; 1528 1529 dst_ifp->if_obytes += len; 1530 1531 if (mflags & M_MCAST) { 1532 sc->sc_if.if_omcasts++; 1533 dst_ifp->if_omcasts++; 1534 } 1535 1536 if ((dst_ifp->if_flags & IFF_OACTIVE) == 0) 1537 (*dst_ifp->if_start)(dst_ifp); 1538} 1539 1540/* 1541 * bridge_output: 1542 * 1543 * Send output from a bridge member interface. This 1544 * performs the bridging function for locally originated 1545 * packets. 1546 * 1547 * The mbuf has the Ethernet header already attached. We must 1548 * enqueue or free the mbuf before returning. 1549 */ 1550int 1551bridge_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *sa, 1552 struct rtentry *rt) 1553{ 1554 struct ether_header *eh; 1555 struct ifnet *dst_if; 1556 struct bridge_softc *sc; 1557#ifndef BRIDGE_MPSAFE 1558 int s; 1559#endif 1560 1561 if (m->m_len < ETHER_HDR_LEN) { 1562 m = m_pullup(m, ETHER_HDR_LEN); 1563 if (m == NULL) 1564 return (0); 1565 } 1566 1567 eh = mtod(m, struct ether_header *); 1568 sc = ifp->if_bridge; 1569 1570#ifndef BRIDGE_MPSAFE 1571 s = splnet(); 1572#endif 1573 1574 /* 1575 * If bridge is down, but the original output interface is up, 1576 * go ahead and send out that interface. Otherwise, the packet 1577 * is dropped below. 1578 */ 1579 if (__predict_false(sc == NULL) || 1580 (sc->sc_if.if_flags & IFF_RUNNING) == 0) { 1581 dst_if = ifp; 1582 goto sendunicast; 1583 } 1584 1585 /* 1586 * If the packet is a multicast, or we don't know a better way to 1587 * get there, send to all interfaces. 1588 */ 1589 if (ETHER_IS_MULTICAST(eh->ether_dhost)) 1590 dst_if = NULL; 1591 else 1592 dst_if = bridge_rtlookup(sc, eh->ether_dhost); 1593 if (dst_if == NULL) { 1594 struct bridge_iflist *bif; 1595 struct mbuf *mc; 1596 int used = 0; 1597 int ss; 1598 1599 BRIDGE_PSZ_RENTER(ss); 1600 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 1601 bif = bridge_try_hold_bif(bif); 1602 if (bif == NULL) 1603 continue; 1604 BRIDGE_PSZ_REXIT(ss); 1605 1606 dst_if = bif->bif_ifp; 1607 if ((dst_if->if_flags & IFF_RUNNING) == 0) 1608 goto next; 1609 1610 /* 1611 * If this is not the original output interface, 1612 * and the interface is participating in spanning 1613 * tree, make sure the port is in a state that 1614 * allows forwarding. 1615 */ 1616 if (dst_if != ifp && 1617 (bif->bif_flags & IFBIF_STP) != 0) { 1618 switch (bif->bif_state) { 1619 case BSTP_IFSTATE_BLOCKING: 1620 case BSTP_IFSTATE_LISTENING: 1621 case BSTP_IFSTATE_DISABLED: 1622 goto next; 1623 } 1624 } 1625 1626 if (LIST_NEXT(bif, bif_next) == NULL) { 1627 used = 1; 1628 mc = m; 1629 } else { 1630 mc = m_copym(m, 0, M_COPYALL, M_NOWAIT); 1631 if (mc == NULL) { 1632 sc->sc_if.if_oerrors++; 1633 goto next; 1634 } 1635 } 1636 1637 bridge_enqueue(sc, dst_if, mc, 0); 1638next: 1639 bridge_release_member(sc, bif); 1640 BRIDGE_PSZ_RENTER(ss); 1641 } 1642 BRIDGE_PSZ_REXIT(ss); 1643 1644 if (used == 0) 1645 m_freem(m); 1646#ifndef BRIDGE_MPSAFE 1647 splx(s); 1648#endif 1649 return (0); 1650 } 1651 1652 sendunicast: 1653 /* 1654 * XXX Spanning tree consideration here? 1655 */ 1656 1657 if ((dst_if->if_flags & IFF_RUNNING) == 0) { 1658 m_freem(m); 1659#ifndef BRIDGE_MPSAFE 1660 splx(s); 1661#endif 1662 return (0); 1663 } 1664 1665 bridge_enqueue(sc, dst_if, m, 0); 1666 1667#ifndef BRIDGE_MPSAFE 1668 splx(s); 1669#endif 1670 return (0); 1671} 1672 1673/* 1674 * bridge_start: 1675 * 1676 * Start output on a bridge. 1677 * 1678 * NOTE: This routine should never be called in this implementation. 1679 */ 1680static void 1681bridge_start(struct ifnet *ifp) 1682{ 1683 1684 printf("%s: bridge_start() called\n", ifp->if_xname); 1685} 1686 1687/* 1688 * bridge_forward: 1689 * 1690 * The forwarding function of the bridge. 1691 */ 1692static void 1693bridge_forward(void *v) 1694{ 1695 struct bridge_softc *sc = v; 1696 struct mbuf *m; 1697 struct bridge_iflist *bif; 1698 struct ifnet *src_if, *dst_if; 1699 struct ether_header *eh; 1700#ifndef BRIDGE_MPSAFE 1701 int s; 1702 1703 KERNEL_LOCK(1, NULL); 1704 mutex_enter(softnet_lock); 1705#endif 1706 1707 if ((sc->sc_if.if_flags & IFF_RUNNING) == 0) { 1708#ifndef BRIDGE_MPSAFE 1709 mutex_exit(softnet_lock); 1710 KERNEL_UNLOCK_ONE(NULL); 1711#endif 1712 return; 1713 } 1714 1715#ifndef BRIDGE_MPSAFE 1716 s = splnet(); 1717#endif 1718 while ((m = pktq_dequeue(sc->sc_fwd_pktq)) != NULL) { 1719 src_if = m->m_pkthdr.rcvif; 1720 1721 sc->sc_if.if_ipackets++; 1722 sc->sc_if.if_ibytes += m->m_pkthdr.len; 1723 1724 /* 1725 * Look up the bridge_iflist. 1726 */ 1727 bif = bridge_lookup_member_if(sc, src_if); 1728 if (bif == NULL) { 1729 /* Interface is not a bridge member (anymore?) */ 1730 m_freem(m); 1731 continue; 1732 } 1733 1734 if (bif->bif_flags & IFBIF_STP) { 1735 switch (bif->bif_state) { 1736 case BSTP_IFSTATE_BLOCKING: 1737 case BSTP_IFSTATE_LISTENING: 1738 case BSTP_IFSTATE_DISABLED: 1739 m_freem(m); 1740 bridge_release_member(sc, bif); 1741 continue; 1742 } 1743 } 1744 1745 eh = mtod(m, struct ether_header *); 1746 1747 /* 1748 * If the interface is learning, and the source 1749 * address is valid and not multicast, record 1750 * the address. 1751 */ 1752 if ((bif->bif_flags & IFBIF_LEARNING) != 0 && 1753 ETHER_IS_MULTICAST(eh->ether_shost) == 0 && 1754 (eh->ether_shost[0] == 0 && 1755 eh->ether_shost[1] == 0 && 1756 eh->ether_shost[2] == 0 && 1757 eh->ether_shost[3] == 0 && 1758 eh->ether_shost[4] == 0 && 1759 eh->ether_shost[5] == 0) == 0) { 1760 (void) bridge_rtupdate(sc, eh->ether_shost, 1761 src_if, 0, IFBAF_DYNAMIC); 1762 } 1763 1764 if ((bif->bif_flags & IFBIF_STP) != 0 && 1765 bif->bif_state == BSTP_IFSTATE_LEARNING) { 1766 m_freem(m); 1767 bridge_release_member(sc, bif); 1768 continue; 1769 } 1770 1771 bridge_release_member(sc, bif); 1772 1773 /* 1774 * At this point, the port either doesn't participate 1775 * in spanning tree or it is in the forwarding state. 1776 */ 1777 1778 /* 1779 * If the packet is unicast, destined for someone on 1780 * "this" side of the bridge, drop it. 1781 */ 1782 if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) { 1783 dst_if = bridge_rtlookup(sc, eh->ether_dhost); 1784 if (src_if == dst_if) { 1785 m_freem(m); 1786 continue; 1787 } 1788 } else { 1789 /* ...forward it to all interfaces. */ 1790 sc->sc_if.if_imcasts++; 1791 dst_if = NULL; 1792 } 1793 1794 if (pfil_run_hooks(sc->sc_if.if_pfil, &m, 1795 m->m_pkthdr.rcvif, PFIL_IN) != 0) { 1796 if (m != NULL) 1797 m_freem(m); 1798 continue; 1799 } 1800 if (m == NULL) 1801 continue; 1802 1803 if (dst_if == NULL) { 1804 bridge_broadcast(sc, src_if, m); 1805 continue; 1806 } 1807 1808 /* 1809 * At this point, we're dealing with a unicast frame 1810 * going to a different interface. 1811 */ 1812 if ((dst_if->if_flags & IFF_RUNNING) == 0) { 1813 m_freem(m); 1814 continue; 1815 } 1816 1817 bif = bridge_lookup_member_if(sc, dst_if); 1818 if (bif == NULL) { 1819 /* Not a member of the bridge (anymore?) */ 1820 m_freem(m); 1821 continue; 1822 } 1823 1824 if (bif->bif_flags & IFBIF_STP) { 1825 switch (bif->bif_state) { 1826 case BSTP_IFSTATE_DISABLED: 1827 case BSTP_IFSTATE_BLOCKING: 1828 m_freem(m); 1829 bridge_release_member(sc, bif); 1830 continue; 1831 } 1832 } 1833 1834 bridge_release_member(sc, bif); 1835 1836 bridge_enqueue(sc, dst_if, m, 1); 1837 } 1838#ifndef BRIDGE_MPSAFE 1839 splx(s); 1840 mutex_exit(softnet_lock); 1841 KERNEL_UNLOCK_ONE(NULL); 1842#endif 1843} 1844 1845static bool 1846bstp_state_before_learning(struct bridge_iflist *bif) 1847{ 1848 if (bif->bif_flags & IFBIF_STP) { 1849 switch (bif->bif_state) { 1850 case BSTP_IFSTATE_BLOCKING: 1851 case BSTP_IFSTATE_LISTENING: 1852 case BSTP_IFSTATE_DISABLED: 1853 return true; 1854 } 1855 } 1856 return false; 1857} 1858 1859static bool 1860bridge_ourether(struct bridge_iflist *bif, struct ether_header *eh, int src) 1861{ 1862 uint8_t *ether = src ? eh->ether_shost : eh->ether_dhost; 1863 1864 if (memcmp(CLLADDR(bif->bif_ifp->if_sadl), ether, ETHER_ADDR_LEN) == 0 1865#if NCARP > 0 1866 || (bif->bif_ifp->if_carp && 1867 carp_ourether(bif->bif_ifp->if_carp, eh, IFT_ETHER, src) != NULL) 1868#endif /* NCARP > 0 */ 1869 ) 1870 return true; 1871 1872 return false; 1873} 1874 1875/* 1876 * bridge_input: 1877 * 1878 * Receive input from a member interface. Queue the packet for 1879 * bridging if it is not for us. 1880 */ 1881static void 1882bridge_input(struct ifnet *ifp, struct mbuf *m) 1883{ 1884 struct bridge_softc *sc = ifp->if_bridge; 1885 struct bridge_iflist *bif; 1886 struct ether_header *eh; 1887 1888 if (__predict_false(sc == NULL) || 1889 (sc->sc_if.if_flags & IFF_RUNNING) == 0) { 1890 ether_input(ifp, m); 1891 return; 1892 } 1893 1894 bif = bridge_lookup_member_if(sc, ifp); 1895 if (bif == NULL) { 1896 ether_input(ifp, m); 1897 return; 1898 } 1899 1900 eh = mtod(m, struct ether_header *); 1901 1902 if (ETHER_IS_MULTICAST(eh->ether_dhost)) { 1903 if (memcmp(etherbroadcastaddr, 1904 eh->ether_dhost, ETHER_ADDR_LEN) == 0) 1905 m->m_flags |= M_BCAST; 1906 else 1907 m->m_flags |= M_MCAST; 1908 } 1909 1910 /* 1911 * A 'fast' path for packets addressed to interfaces that are 1912 * part of this bridge. 1913 */ 1914 if (!(m->m_flags & (M_BCAST|M_MCAST)) && 1915 !bstp_state_before_learning(bif)) { 1916 struct bridge_iflist *_bif; 1917 struct ifnet *_ifp = NULL; 1918 int s; 1919 1920 BRIDGE_PSZ_RENTER(s); 1921 LIST_FOREACH(_bif, &sc->sc_iflist, bif_next) { 1922 /* It is destined for us. */ 1923 if (bridge_ourether(_bif, eh, 0)) { 1924 _bif = bridge_try_hold_bif(_bif); 1925 BRIDGE_PSZ_REXIT(s); 1926 if (_bif == NULL) 1927 goto out; 1928 if (_bif->bif_flags & IFBIF_LEARNING) 1929 (void) bridge_rtupdate(sc, 1930 eh->ether_shost, ifp, 0, IFBAF_DYNAMIC); 1931 _ifp = m->m_pkthdr.rcvif = _bif->bif_ifp; 1932 bridge_release_member(sc, _bif); 1933 goto out; 1934 } 1935 1936 /* We just received a packet that we sent out. */ 1937 if (bridge_ourether(_bif, eh, 1)) 1938 break; 1939 } 1940 BRIDGE_PSZ_REXIT(s); 1941out: 1942 1943 if (_bif != NULL) { 1944 bridge_release_member(sc, bif); 1945 if (_ifp != NULL) { 1946 m->m_flags &= ~M_PROMISC; 1947 ether_input(_ifp, m); 1948 } else 1949 m_freem(m); 1950 return; 1951 } 1952 } 1953 1954 /* Tap off 802.1D packets; they do not get forwarded. */ 1955 if (bif->bif_flags & IFBIF_STP && 1956 memcmp(eh->ether_dhost, bstp_etheraddr, ETHER_ADDR_LEN) == 0) { 1957 bstp_input(sc, bif, m); 1958 bridge_release_member(sc, bif); 1959 return; 1960 } 1961 1962 /* 1963 * A normal switch would discard the packet here, but that's not what 1964 * we've done historically. This also prevents some obnoxious behaviour. 1965 */ 1966 if (bstp_state_before_learning(bif)) { 1967 bridge_release_member(sc, bif); 1968 ether_input(ifp, m); 1969 return; 1970 } 1971 1972 bridge_release_member(sc, bif); 1973 1974 /* Queue the packet for bridge forwarding. */ 1975 if (__predict_false(!pktq_enqueue(sc->sc_fwd_pktq, m, 0))) 1976 m_freem(m); 1977} 1978 1979/* 1980 * bridge_broadcast: 1981 * 1982 * Send a frame to all interfaces that are members of 1983 * the bridge, except for the one on which the packet 1984 * arrived. 1985 */ 1986static void 1987bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if, 1988 struct mbuf *m) 1989{ 1990 struct bridge_iflist *bif; 1991 struct mbuf *mc; 1992 struct ifnet *dst_if; 1993 bool bmcast; 1994 int s; 1995 1996 bmcast = m->m_flags & (M_BCAST|M_MCAST); 1997 1998 BRIDGE_PSZ_RENTER(s); 1999 LIST_FOREACH(bif, &sc->sc_iflist, bif_next) { 2000 bif = bridge_try_hold_bif(bif); 2001 if (bif == NULL) 2002 continue; 2003 BRIDGE_PSZ_REXIT(s); 2004 2005 dst_if = bif->bif_ifp; 2006 2007 if (bif->bif_flags & IFBIF_STP) { 2008 switch (bif->bif_state) { 2009 case BSTP_IFSTATE_BLOCKING: 2010 case BSTP_IFSTATE_DISABLED: 2011 goto next; 2012 } 2013 } 2014 2015 if ((bif->bif_flags & IFBIF_DISCOVER) == 0 && !bmcast) 2016 goto next; 2017 2018 if ((dst_if->if_flags & IFF_RUNNING) == 0) 2019 goto next; 2020 2021 if (dst_if != src_if) { 2022 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 2023 if (mc == NULL) { 2024 sc->sc_if.if_oerrors++; 2025 goto next; 2026 } 2027 bridge_enqueue(sc, dst_if, mc, 1); 2028 } 2029 2030 if (bmcast) { 2031 mc = m_copym(m, 0, M_COPYALL, M_DONTWAIT); 2032 if (mc == NULL) { 2033 sc->sc_if.if_oerrors++; 2034 goto next; 2035 } 2036 2037 mc->m_pkthdr.rcvif = dst_if; 2038 mc->m_flags &= ~M_PROMISC; 2039 ether_input(dst_if, mc); 2040 } 2041next: 2042 bridge_release_member(sc, bif); 2043 BRIDGE_PSZ_RENTER(s); 2044 } 2045 BRIDGE_PSZ_REXIT(s); 2046 2047 m_freem(m); 2048} 2049 2050static int 2051bridge_rtalloc(struct bridge_softc *sc, const uint8_t *dst, 2052 struct bridge_rtnode **brtp) 2053{ 2054 struct bridge_rtnode *brt; 2055 int error; 2056 2057 if (sc->sc_brtcnt >= sc->sc_brtmax) 2058 return ENOSPC; 2059 2060 /* 2061 * Allocate a new bridge forwarding node, and 2062 * initialize the expiration time and Ethernet 2063 * address. 2064 */ 2065 brt = pool_get(&bridge_rtnode_pool, PR_NOWAIT); 2066 if (brt == NULL) 2067 return ENOMEM; 2068 2069 memset(brt, 0, sizeof(*brt)); 2070 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2071 brt->brt_flags = IFBAF_DYNAMIC; 2072 memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN); 2073 2074 BRIDGE_RT_INTR_LOCK(sc); 2075 error = bridge_rtnode_insert(sc, brt); 2076 BRIDGE_RT_INTR_UNLOCK(sc); 2077 2078 if (error != 0) { 2079 pool_put(&bridge_rtnode_pool, brt); 2080 return error; 2081 } 2082 2083 *brtp = brt; 2084 return 0; 2085} 2086 2087/* 2088 * bridge_rtupdate: 2089 * 2090 * Add a bridge routing entry. 2091 */ 2092static int 2093bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, 2094 struct ifnet *dst_if, int setflags, uint8_t flags) 2095{ 2096 struct bridge_rtnode *brt; 2097 int s; 2098 2099again: 2100 /* 2101 * A route for this destination might already exist. If so, 2102 * update it, otherwise create a new one. 2103 */ 2104 BRIDGE_RT_RENTER(s); 2105 brt = bridge_rtnode_lookup(sc, dst); 2106 2107 if (brt != NULL) { 2108 brt->brt_ifp = dst_if; 2109 if (setflags) { 2110 brt->brt_flags = flags; 2111 if (flags & IFBAF_STATIC) 2112 brt->brt_expire = 0; 2113 else 2114 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2115 } else { 2116 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2117 brt->brt_expire = time_uptime + sc->sc_brttimeout; 2118 } 2119 } 2120 BRIDGE_RT_REXIT(s); 2121 2122 if (brt == NULL) { 2123 int r; 2124 2125 r = bridge_rtalloc(sc, dst, &brt); 2126 if (r != 0) 2127 return r; 2128 goto again; 2129 } 2130 2131 return 0; 2132} 2133 2134/* 2135 * bridge_rtlookup: 2136 * 2137 * Lookup the destination interface for an address. 2138 */ 2139static struct ifnet * 2140bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr) 2141{ 2142 struct bridge_rtnode *brt; 2143 struct ifnet *ifs = NULL; 2144 int s; 2145 2146 BRIDGE_RT_RENTER(s); 2147 brt = bridge_rtnode_lookup(sc, addr); 2148 if (brt != NULL) 2149 ifs = brt->brt_ifp; 2150 BRIDGE_RT_REXIT(s); 2151 2152 return ifs; 2153} 2154 2155typedef bool (*bridge_iterate_cb_t) 2156 (struct bridge_softc *, struct bridge_rtnode *, bool *, void *); 2157 2158/* 2159 * bridge_rtlist_iterate_remove: 2160 * 2161 * It iterates on sc->sc_rtlist and removes rtnodes of it which func 2162 * callback judges to remove. Removals of rtnodes are done in a manner 2163 * of pserialize. To this end, all kmem_* operations are placed out of 2164 * mutexes. 2165 */ 2166static void 2167bridge_rtlist_iterate_remove(struct bridge_softc *sc, bridge_iterate_cb_t func, void *arg) 2168{ 2169 struct bridge_rtnode *brt, *nbrt; 2170 struct bridge_rtnode **brt_list; 2171 int i, count; 2172 2173retry: 2174 count = sc->sc_brtcnt; 2175 if (count == 0) 2176 return; 2177 brt_list = kmem_alloc(sizeof(struct bridge_rtnode *) * count, KM_SLEEP); 2178 2179 BRIDGE_RT_LOCK(sc); 2180 BRIDGE_RT_INTR_LOCK(sc); 2181 if (__predict_false(sc->sc_brtcnt > count)) { 2182 /* The rtnodes increased, we need more memory */ 2183 BRIDGE_RT_INTR_UNLOCK(sc); 2184 BRIDGE_RT_UNLOCK(sc); 2185 kmem_free(brt_list, sizeof(*brt_list) * count); 2186 goto retry; 2187 } 2188 2189 i = 0; 2190 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2191 bool need_break = false; 2192 if (func(sc, brt, &need_break, arg)) { 2193 bridge_rtnode_remove(sc, brt); 2194 brt_list[i++] = brt; 2195 } 2196 if (need_break) 2197 break; 2198 } 2199 BRIDGE_RT_INTR_UNLOCK(sc); 2200 2201 if (i > 0) 2202 BRIDGE_RT_PSZ_PERFORM(sc); 2203 BRIDGE_RT_UNLOCK(sc); 2204 2205 while (--i >= 0) 2206 bridge_rtnode_destroy(brt_list[i]); 2207 2208 kmem_free(brt_list, sizeof(*brt_list) * count); 2209} 2210 2211static bool 2212bridge_rttrim0_cb(struct bridge_softc *sc, struct bridge_rtnode *brt, 2213 bool *need_break, void *arg) 2214{ 2215 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) { 2216 /* Take into account of the subsequent removal */ 2217 if ((sc->sc_brtcnt - 1) <= sc->sc_brtmax) 2218 *need_break = true; 2219 return true; 2220 } else 2221 return false; 2222} 2223 2224static void 2225bridge_rttrim0(struct bridge_softc *sc) 2226{ 2227 bridge_rtlist_iterate_remove(sc, bridge_rttrim0_cb, NULL); 2228} 2229 2230/* 2231 * bridge_rttrim: 2232 * 2233 * Trim the routine table so that we have a number 2234 * of routing entries less than or equal to the 2235 * maximum number. 2236 */ 2237static void 2238bridge_rttrim(struct bridge_softc *sc) 2239{ 2240 2241 /* Make sure we actually need to do this. */ 2242 if (sc->sc_brtcnt <= sc->sc_brtmax) 2243 return; 2244 2245 /* Force an aging cycle; this might trim enough addresses. */ 2246 bridge_rtage(sc); 2247 if (sc->sc_brtcnt <= sc->sc_brtmax) 2248 return; 2249 2250 bridge_rttrim0(sc); 2251 2252 return; 2253} 2254 2255/* 2256 * bridge_timer: 2257 * 2258 * Aging timer for the bridge. 2259 */ 2260static void 2261bridge_timer(void *arg) 2262{ 2263 struct bridge_softc *sc = arg; 2264 2265 workqueue_enqueue(sc->sc_rtage_wq, &bridge_rtage_wk, NULL); 2266} 2267 2268static void 2269bridge_rtage_work(struct work *wk, void *arg) 2270{ 2271 struct bridge_softc *sc = arg; 2272 2273 KASSERT(wk == &bridge_rtage_wk); 2274 2275 bridge_rtage(sc); 2276 2277 if (sc->sc_if.if_flags & IFF_RUNNING) 2278 callout_reset(&sc->sc_brcallout, 2279 bridge_rtable_prune_period * hz, bridge_timer, sc); 2280} 2281 2282static bool 2283bridge_rtage_cb(struct bridge_softc *sc, struct bridge_rtnode *brt, 2284 bool *need_break, void *arg) 2285{ 2286 if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC && 2287 time_uptime >= brt->brt_expire) 2288 return true; 2289 else 2290 return false; 2291} 2292 2293/* 2294 * bridge_rtage: 2295 * 2296 * Perform an aging cycle. 2297 */ 2298static void 2299bridge_rtage(struct bridge_softc *sc) 2300{ 2301 bridge_rtlist_iterate_remove(sc, bridge_rtage_cb, NULL); 2302} 2303 2304 2305static bool 2306bridge_rtflush_cb(struct bridge_softc *sc, struct bridge_rtnode *brt, 2307 bool *need_break, void *arg) 2308{ 2309 int full = *(int*)arg; 2310 2311 if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) 2312 return true; 2313 else 2314 return false; 2315} 2316 2317/* 2318 * bridge_rtflush: 2319 * 2320 * Remove all dynamic addresses from the bridge. 2321 */ 2322static void 2323bridge_rtflush(struct bridge_softc *sc, int full) 2324{ 2325 bridge_rtlist_iterate_remove(sc, bridge_rtflush_cb, &full); 2326} 2327 2328/* 2329 * bridge_rtdaddr: 2330 * 2331 * Remove an address from the table. 2332 */ 2333static int 2334bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr) 2335{ 2336 struct bridge_rtnode *brt; 2337 2338 BRIDGE_RT_LOCK(sc); 2339 BRIDGE_RT_INTR_LOCK(sc); 2340 if ((brt = bridge_rtnode_lookup(sc, addr)) == NULL) { 2341 BRIDGE_RT_INTR_UNLOCK(sc); 2342 BRIDGE_RT_UNLOCK(sc); 2343 return ENOENT; 2344 } 2345 bridge_rtnode_remove(sc, brt); 2346 BRIDGE_RT_INTR_UNLOCK(sc); 2347 BRIDGE_RT_PSZ_PERFORM(sc); 2348 BRIDGE_RT_UNLOCK(sc); 2349 2350 bridge_rtnode_destroy(brt); 2351 2352 return 0; 2353} 2354 2355/* 2356 * bridge_rtdelete: 2357 * 2358 * Delete routes to a speicifc member interface. 2359 */ 2360static void 2361bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp) 2362{ 2363 struct bridge_rtnode *brt, *nbrt; 2364 2365 BRIDGE_RT_LOCK(sc); 2366 BRIDGE_RT_INTR_LOCK(sc); 2367 LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) { 2368 if (brt->brt_ifp == ifp) 2369 break; 2370 } 2371 if (brt == NULL) { 2372 BRIDGE_RT_INTR_UNLOCK(sc); 2373 BRIDGE_RT_UNLOCK(sc); 2374 return; 2375 } 2376 bridge_rtnode_remove(sc, brt); 2377 BRIDGE_RT_INTR_UNLOCK(sc); 2378 BRIDGE_RT_PSZ_PERFORM(sc); 2379 BRIDGE_RT_UNLOCK(sc); 2380 2381 bridge_rtnode_destroy(brt); 2382} 2383 2384/* 2385 * bridge_rtable_init: 2386 * 2387 * Initialize the route table for this bridge. 2388 */ 2389static void 2390bridge_rtable_init(struct bridge_softc *sc) 2391{ 2392 int i; 2393 2394 sc->sc_rthash = kmem_alloc(sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE, 2395 KM_SLEEP); 2396 2397 for (i = 0; i < BRIDGE_RTHASH_SIZE; i++) 2398 LIST_INIT(&sc->sc_rthash[i]); 2399 2400 sc->sc_rthash_key = cprng_fast32(); 2401 2402 LIST_INIT(&sc->sc_rtlist); 2403 2404 sc->sc_rtlist_intr_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NET); 2405#ifdef BRIDGE_MPSAFE 2406 sc->sc_rtlist_psz = pserialize_create(); 2407 sc->sc_rtlist_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SOFTNET); 2408#else 2409 sc->sc_rtlist_psz = NULL; 2410 sc->sc_rtlist_lock = NULL; 2411#endif 2412} 2413 2414/* 2415 * bridge_rtable_fini: 2416 * 2417 * Deconstruct the route table for this bridge. 2418 */ 2419static void 2420bridge_rtable_fini(struct bridge_softc *sc) 2421{ 2422 2423 kmem_free(sc->sc_rthash, sizeof(*sc->sc_rthash) * BRIDGE_RTHASH_SIZE); 2424 if (sc->sc_rtlist_intr_lock) 2425 mutex_obj_free(sc->sc_rtlist_intr_lock); 2426 if (sc->sc_rtlist_lock) 2427 mutex_obj_free(sc->sc_rtlist_lock); 2428 if (sc->sc_rtlist_psz) 2429 pserialize_destroy(sc->sc_rtlist_psz); 2430} 2431 2432/* 2433 * The following hash function is adapted from "Hash Functions" by Bob Jenkins 2434 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997). 2435 */ 2436#define mix(a, b, c) \ 2437do { \ 2438 a -= b; a -= c; a ^= (c >> 13); \ 2439 b -= c; b -= a; b ^= (a << 8); \ 2440 c -= a; c -= b; c ^= (b >> 13); \ 2441 a -= b; a -= c; a ^= (c >> 12); \ 2442 b -= c; b -= a; b ^= (a << 16); \ 2443 c -= a; c -= b; c ^= (b >> 5); \ 2444 a -= b; a -= c; a ^= (c >> 3); \ 2445 b -= c; b -= a; b ^= (a << 10); \ 2446 c -= a; c -= b; c ^= (b >> 15); \ 2447} while (/*CONSTCOND*/0) 2448 2449static inline uint32_t 2450bridge_rthash(struct bridge_softc *sc, const uint8_t *addr) 2451{ 2452 uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key; 2453 2454 b += addr[5] << 8; 2455 b += addr[4]; 2456 a += addr[3] << 24; 2457 a += addr[2] << 16; 2458 a += addr[1] << 8; 2459 a += addr[0]; 2460 2461 mix(a, b, c); 2462 2463 return (c & BRIDGE_RTHASH_MASK); 2464} 2465 2466#undef mix 2467 2468/* 2469 * bridge_rtnode_lookup: 2470 * 2471 * Look up a bridge route node for the specified destination. 2472 */ 2473static struct bridge_rtnode * 2474bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr) 2475{ 2476 struct bridge_rtnode *brt; 2477 uint32_t hash; 2478 int dir; 2479 2480 hash = bridge_rthash(sc, addr); 2481 LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) { 2482 dir = memcmp(addr, brt->brt_addr, ETHER_ADDR_LEN); 2483 if (dir == 0) 2484 return (brt); 2485 if (dir > 0) 2486 return (NULL); 2487 } 2488 2489 return (NULL); 2490} 2491 2492/* 2493 * bridge_rtnode_insert: 2494 * 2495 * Insert the specified bridge node into the route table. We 2496 * assume the entry is not already in the table. 2497 */ 2498static int 2499bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt) 2500{ 2501 struct bridge_rtnode *lbrt; 2502 uint32_t hash; 2503 int dir; 2504 2505 KASSERT(BRIDGE_RT_INTR_LOCKED(sc)); 2506 2507 hash = bridge_rthash(sc, brt->brt_addr); 2508 2509 lbrt = LIST_FIRST(&sc->sc_rthash[hash]); 2510 if (lbrt == NULL) { 2511 LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash); 2512 goto out; 2513 } 2514 2515 do { 2516 dir = memcmp(brt->brt_addr, lbrt->brt_addr, ETHER_ADDR_LEN); 2517 if (dir == 0) 2518 return (EEXIST); 2519 if (dir > 0) { 2520 LIST_INSERT_BEFORE(lbrt, brt, brt_hash); 2521 goto out; 2522 } 2523 if (LIST_NEXT(lbrt, brt_hash) == NULL) { 2524 LIST_INSERT_AFTER(lbrt, brt, brt_hash); 2525 goto out; 2526 } 2527 lbrt = LIST_NEXT(lbrt, brt_hash); 2528 } while (lbrt != NULL); 2529 2530#ifdef DIAGNOSTIC 2531 panic("bridge_rtnode_insert: impossible"); 2532#endif 2533 2534 out: 2535 LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list); 2536 sc->sc_brtcnt++; 2537 2538 return (0); 2539} 2540 2541/* 2542 * bridge_rtnode_remove: 2543 * 2544 * Remove a bridge rtnode from the rthash and the rtlist of a bridge. 2545 */ 2546static void 2547bridge_rtnode_remove(struct bridge_softc *sc, struct bridge_rtnode *brt) 2548{ 2549 2550 KASSERT(BRIDGE_RT_INTR_LOCKED(sc)); 2551 2552 LIST_REMOVE(brt, brt_hash); 2553 LIST_REMOVE(brt, brt_list); 2554 sc->sc_brtcnt--; 2555} 2556 2557/* 2558 * bridge_rtnode_destroy: 2559 * 2560 * Destroy a bridge rtnode. 2561 */ 2562static void 2563bridge_rtnode_destroy(struct bridge_rtnode *brt) 2564{ 2565 2566 pool_put(&bridge_rtnode_pool, brt); 2567} 2568 2569#if defined(BRIDGE_IPF) 2570extern pfil_head_t *inet_pfil_hook; /* XXX */ 2571extern pfil_head_t *inet6_pfil_hook; /* XXX */ 2572 2573/* 2574 * Send bridge packets through IPF if they are one of the types IPF can deal 2575 * with, or if they are ARP or REVARP. (IPF will pass ARP and REVARP without 2576 * question.) 2577 */ 2578static int 2579bridge_ipf(void *arg, struct mbuf **mp, struct ifnet *ifp, int dir) 2580{ 2581 int snap, error; 2582 struct ether_header *eh1, eh2; 2583 struct llc llc1; 2584 uint16_t ether_type; 2585 2586 snap = 0; 2587 error = -1; /* Default error if not error == 0 */ 2588 eh1 = mtod(*mp, struct ether_header *); 2589 ether_type = ntohs(eh1->ether_type); 2590 2591 /* 2592 * Check for SNAP/LLC. 2593 */ 2594 if (ether_type < ETHERMTU) { 2595 struct llc *llc2 = (struct llc *)(eh1 + 1); 2596 2597 if ((*mp)->m_len >= ETHER_HDR_LEN + 8 && 2598 llc2->llc_dsap == LLC_SNAP_LSAP && 2599 llc2->llc_ssap == LLC_SNAP_LSAP && 2600 llc2->llc_control == LLC_UI) { 2601 ether_type = htons(llc2->llc_un.type_snap.ether_type); 2602 snap = 1; 2603 } 2604 } 2605 2606 /* 2607 * If we're trying to filter bridge traffic, don't look at anything 2608 * other than IP and ARP traffic. If the filter doesn't understand 2609 * IPv6, don't allow IPv6 through the bridge either. This is lame 2610 * since if we really wanted, say, an AppleTalk filter, we are hosed, 2611 * but of course we don't have an AppleTalk filter to begin with. 2612 * (Note that since IPF doesn't understand ARP it will pass *ALL* 2613 * ARP traffic.) 2614 */ 2615 switch (ether_type) { 2616 case ETHERTYPE_ARP: 2617 case ETHERTYPE_REVARP: 2618 return 0; /* Automatically pass */ 2619 case ETHERTYPE_IP: 2620# ifdef INET6 2621 case ETHERTYPE_IPV6: 2622# endif /* INET6 */ 2623 break; 2624 default: 2625 goto bad; 2626 } 2627 2628 /* Strip off the Ethernet header and keep a copy. */ 2629 m_copydata(*mp, 0, ETHER_HDR_LEN, (void *) &eh2); 2630 m_adj(*mp, ETHER_HDR_LEN); 2631 2632 /* Strip off snap header, if present */ 2633 if (snap) { 2634 m_copydata(*mp, 0, sizeof(struct llc), (void *) &llc1); 2635 m_adj(*mp, sizeof(struct llc)); 2636 } 2637 2638 /* 2639 * Check basic packet sanity and run IPF through pfil. 2640 */ 2641 KASSERT(!cpu_intr_p()); 2642 switch (ether_type) 2643 { 2644 case ETHERTYPE_IP : 2645 error = (dir == PFIL_IN) ? bridge_ip_checkbasic(mp) : 0; 2646 if (error == 0) 2647 error = pfil_run_hooks(inet_pfil_hook, mp, ifp, dir); 2648 break; 2649# ifdef INET6 2650 case ETHERTYPE_IPV6 : 2651 error = (dir == PFIL_IN) ? bridge_ip6_checkbasic(mp) : 0; 2652 if (error == 0) 2653 error = pfil_run_hooks(inet6_pfil_hook, mp, ifp, dir); 2654 break; 2655# endif 2656 default : 2657 error = 0; 2658 break; 2659 } 2660 2661 if (*mp == NULL) 2662 return error; 2663 if (error != 0) 2664 goto bad; 2665 2666 error = -1; 2667 2668 /* 2669 * Finally, put everything back the way it was and return 2670 */ 2671 if (snap) { 2672 M_PREPEND(*mp, sizeof(struct llc), M_DONTWAIT); 2673 if (*mp == NULL) 2674 return error; 2675 bcopy(&llc1, mtod(*mp, void *), sizeof(struct llc)); 2676 } 2677 2678 M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT); 2679 if (*mp == NULL) 2680 return error; 2681 bcopy(&eh2, mtod(*mp, void *), ETHER_HDR_LEN); 2682 2683 return 0; 2684 2685 bad: 2686 m_freem(*mp); 2687 *mp = NULL; 2688 return error; 2689} 2690 2691/* 2692 * Perform basic checks on header size since 2693 * IPF assumes ip_input has already processed 2694 * it for it. Cut-and-pasted from ip_input.c. 2695 * Given how simple the IPv6 version is, 2696 * does the IPv4 version really need to be 2697 * this complicated? 2698 * 2699 * XXX Should we update ipstat here, or not? 2700 * XXX Right now we update ipstat but not 2701 * XXX csum_counter. 2702 */ 2703static int 2704bridge_ip_checkbasic(struct mbuf **mp) 2705{ 2706 struct mbuf *m = *mp; 2707 struct ip *ip; 2708 int len, hlen; 2709 2710 if (*mp == NULL) 2711 return -1; 2712 2713 if (IP_HDR_ALIGNED_P(mtod(m, void *)) == 0) { 2714 if ((m = m_copyup(m, sizeof(struct ip), 2715 (max_linkhdr + 3) & ~3)) == NULL) { 2716 /* XXXJRT new stat, please */ 2717 ip_statinc(IP_STAT_TOOSMALL); 2718 goto bad; 2719 } 2720 } else if (__predict_false(m->m_len < sizeof (struct ip))) { 2721 if ((m = m_pullup(m, sizeof (struct ip))) == NULL) { 2722 ip_statinc(IP_STAT_TOOSMALL); 2723 goto bad; 2724 } 2725 } 2726 ip = mtod(m, struct ip *); 2727 if (ip == NULL) goto bad; 2728 2729 if (ip->ip_v != IPVERSION) { 2730 ip_statinc(IP_STAT_BADVERS); 2731 goto bad; 2732 } 2733 hlen = ip->ip_hl << 2; 2734 if (hlen < sizeof(struct ip)) { /* minimum header length */ 2735 ip_statinc(IP_STAT_BADHLEN); 2736 goto bad; 2737 } 2738 if (hlen > m->m_len) { 2739 if ((m = m_pullup(m, hlen)) == 0) { 2740 ip_statinc(IP_STAT_BADHLEN); 2741 goto bad; 2742 } 2743 ip = mtod(m, struct ip *); 2744 if (ip == NULL) goto bad; 2745 } 2746 2747 switch (m->m_pkthdr.csum_flags & 2748 ((m->m_pkthdr.rcvif->if_csum_flags_rx & M_CSUM_IPv4) | 2749 M_CSUM_IPv4_BAD)) { 2750 case M_CSUM_IPv4|M_CSUM_IPv4_BAD: 2751 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_bad); */ 2752 goto bad; 2753 2754 case M_CSUM_IPv4: 2755 /* Checksum was okay. */ 2756 /* INET_CSUM_COUNTER_INCR(&ip_hwcsum_ok); */ 2757 break; 2758 2759 default: 2760 /* Must compute it ourselves. */ 2761 /* INET_CSUM_COUNTER_INCR(&ip_swcsum); */ 2762 if (in_cksum(m, hlen) != 0) 2763 goto bad; 2764 break; 2765 } 2766 2767 /* Retrieve the packet length. */ 2768 len = ntohs(ip->ip_len); 2769 2770 /* 2771 * Check for additional length bogosity 2772 */ 2773 if (len < hlen) { 2774 ip_statinc(IP_STAT_BADLEN); 2775 goto bad; 2776 } 2777 2778 /* 2779 * Check that the amount of data in the buffers 2780 * is as at least much as the IP header would have us expect. 2781 * Drop packet if shorter than we expect. 2782 */ 2783 if (m->m_pkthdr.len < len) { 2784 ip_statinc(IP_STAT_TOOSHORT); 2785 goto bad; 2786 } 2787 2788 /* Checks out, proceed */ 2789 *mp = m; 2790 return 0; 2791 2792 bad: 2793 *mp = m; 2794 return -1; 2795} 2796 2797# ifdef INET6 2798/* 2799 * Same as above, but for IPv6. 2800 * Cut-and-pasted from ip6_input.c. 2801 * XXX Should we update ip6stat, or not? 2802 */ 2803static int 2804bridge_ip6_checkbasic(struct mbuf **mp) 2805{ 2806 struct mbuf *m = *mp; 2807 struct ip6_hdr *ip6; 2808 2809 /* 2810 * If the IPv6 header is not aligned, slurp it up into a new 2811 * mbuf with space for link headers, in the event we forward 2812 * it. Otherwise, if it is aligned, make sure the entire base 2813 * IPv6 header is in the first mbuf of the chain. 2814 */ 2815 if (IP6_HDR_ALIGNED_P(mtod(m, void *)) == 0) { 2816 struct ifnet *inifp = m->m_pkthdr.rcvif; 2817 if ((m = m_copyup(m, sizeof(struct ip6_hdr), 2818 (max_linkhdr + 3) & ~3)) == NULL) { 2819 /* XXXJRT new stat, please */ 2820 ip6_statinc(IP6_STAT_TOOSMALL); 2821 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 2822 goto bad; 2823 } 2824 } else if (__predict_false(m->m_len < sizeof(struct ip6_hdr))) { 2825 struct ifnet *inifp = m->m_pkthdr.rcvif; 2826 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) { 2827 ip6_statinc(IP6_STAT_TOOSMALL); 2828 in6_ifstat_inc(inifp, ifs6_in_hdrerr); 2829 goto bad; 2830 } 2831 } 2832 2833 ip6 = mtod(m, struct ip6_hdr *); 2834 2835 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) { 2836 ip6_statinc(IP6_STAT_BADVERS); 2837 in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr); 2838 goto bad; 2839 } 2840 2841 /* Checks out, proceed */ 2842 *mp = m; 2843 return 0; 2844 2845 bad: 2846 *mp = m; 2847 return -1; 2848} 2849# endif /* INET6 */ 2850#endif /* BRIDGE_IPF */ 2851