1/*
2 * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29/*	$NetBSD: if_bridge.c,v 1.31 2005/06/01 19:45:34 jdc Exp $	*/
30/*
31 * Copyright 2001 Wasabi Systems, Inc.
32 * All rights reserved.
33 *
34 * Written by Jason R. Thorpe for Wasabi Systems, Inc.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed for the NetBSD Project by
47 *	Wasabi Systems, Inc.
48 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
49 *    or promote products derived from this software without specific prior
50 *    written permission.
51 *
52 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
53 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
54 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
55 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
56 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
57 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
58 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
59 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
60 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
61 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
62 * POSSIBILITY OF SUCH DAMAGE.
63 */
64
65/*
66 * Copyright (c) 1999, 2000 Jason L. Wright (jason@thought.net)
67 * All rights reserved.
68 *
69 * Redistribution and use in source and binary forms, with or without
70 * modification, are permitted provided that the following conditions
71 * are met:
72 * 1. Redistributions of source code must retain the above copyright
73 *    notice, this list of conditions and the following disclaimer.
74 * 2. Redistributions in binary form must reproduce the above copyright
75 *    notice, this list of conditions and the following disclaimer in the
76 *    documentation and/or other materials provided with the distribution.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
79 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
80 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
81 * DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
82 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
83 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
84 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
86 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
87 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
88 * POSSIBILITY OF SUCH DAMAGE.
89 *
90 * OpenBSD: if_bridge.c,v 1.60 2001/06/15 03:38:33 itojun Exp
91 */
92
93/*
94 * Network interface bridge support.
95 *
96 * TODO:
97 *
98 *	- Currently only supports Ethernet-like interfaces (Ethernet,
99 *	  802.11, VLANs on Ethernet, etc.)  Figure out a nice way
100 *	  to bridge other types of interfaces (FDDI-FDDI, and maybe
101 *	  consider heterogenous bridges).
102 *
103 *	- GIF isn't handled due to the lack of IPPROTO_ETHERIP support.
104 */
105
106#include <sys/cdefs.h>
107
108#define	BRIDGE_DEBUG 1
109
110#include <sys/param.h>
111#include <sys/mbuf.h>
112#include <sys/malloc.h>
113#include <sys/protosw.h>
114#include <sys/systm.h>
115#include <sys/time.h>
116#include <sys/socket.h> /* for net/if.h */
117#include <sys/sockio.h>
118#include <sys/kernel.h>
119#include <sys/random.h>
120#include <sys/syslog.h>
121#include <sys/sysctl.h>
122#include <sys/proc.h>
123#include <sys/lock.h>
124#include <sys/mcache.h>
125
126#include <sys/kauth.h>
127
128#include <kern/thread_call.h>
129
130#include <libkern/libkern.h>
131
132#include <kern/zalloc.h>
133
134#if NBPFILTER > 0
135#include <net/bpf.h>
136#endif
137#include <net/if.h>
138#include <net/if_dl.h>
139#include <net/if_types.h>
140#include <net/if_var.h>
141#include <net/if_media.h>
142
143#include <netinet/in.h> /* for struct arpcom */
144#include <netinet/in_systm.h>
145#include <netinet/in_var.h>
146#define	_IP_VHL
147#include <netinet/ip.h>
148#include <netinet/ip_var.h>
149#if INET6
150#include <netinet/ip6.h>
151#include <netinet6/ip6_var.h>
152#endif
153#ifdef DEV_CARP
154#include <netinet/ip_carp.h>
155#endif
156#include <netinet/if_ether.h> /* for struct arpcom */
157#include <net/bridgestp.h>
158#include <net/if_bridgevar.h>
159#include <net/if_llc.h>
160#if NVLAN > 0
161#include <net/if_vlan_var.h>
162#endif /* NVLAN > 0 */
163
164#include <net/if_ether.h>
165#include <net/dlil.h>
166#include <net/kpi_interfacefilter.h>
167
168#include <net/route.h>
169#ifdef PFIL_HOOKS
170#include <netinet/ip_fw2.h>
171#include <netinet/ip_dummynet.h>
172#endif /* PFIL_HOOKS */
173#include <dev/random/randomdev.h>
174
175#include <netinet/bootp.h>
176#include <netinet/dhcp.h>
177
178#if BRIDGE_DEBUG
179#define	BR_DBGF_LIFECYCLE	0x0001
180#define	BR_DBGF_INPUT		0x0002
181#define	BR_DBGF_OUTPPUT		0x0004
182#define	BR_DBGF_RT_TABLE	0x0008
183#define	BR_DBGF_DELAYED_CALL	0x0010
184#define	BR_DBGF_IOCTL		0x0020
185#define	BR_DBGF_MBUF		0x0040
186#define	BR_DBGF_MCAST		0x0080
187#define	BR_DBGF_HOSTFILTER	0x0100
188#endif /* BRIDGE_DEBUG */
189
190#define	_BRIDGE_LOCK(_sc)		lck_mtx_lock(&(_sc)->sc_mtx)
191#define	_BRIDGE_UNLOCK(_sc)		lck_mtx_unlock(&(_sc)->sc_mtx)
192#define	BRIDGE_LOCK_ASSERT_HELD(_sc)		\
193	lck_mtx_assert(&(_sc)->sc_mtx, LCK_MTX_ASSERT_OWNED)
194#define	BRIDGE_LOCK_ASSERT_NOTHELD(_sc)		\
195	lck_mtx_assert(&(_sc)->sc_mtx, LCK_MTX_ASSERT_NOTOWNED)
196
197#if BRIDGE_DEBUG
198
199#define	BR_LCKDBG_MAX			4
200
201#define	BRIDGE_LOCK(_sc)		bridge_lock(_sc)
202#define	BRIDGE_UNLOCK(_sc)		bridge_unlock(_sc)
203#define	BRIDGE_LOCK2REF(_sc, _err)	_err = bridge_lock2ref(_sc)
204#define	BRIDGE_UNREF(_sc)		bridge_unref(_sc)
205#define	BRIDGE_XLOCK(_sc)		bridge_xlock(_sc)
206#define	BRIDGE_XDROP(_sc)		bridge_xdrop(_sc)
207
208#else /* !BRIDGE_DEBUG */
209
210#define	BRIDGE_LOCK(_sc)		_BRIDGE_LOCK(_sc)
211#define	BRIDGE_UNLOCK(_sc)		_BRIDGE_UNLOCK(_sc)
212#define	BRIDGE_LOCK2REF(_sc, _err)	do {				\
213	BRIDGE_LOCK_ASSERT_HELD(_sc);					\
214	if ((_sc)->sc_iflist_xcnt > 0)					\
215		(_err) = EBUSY;						\
216	else								\
217		(_sc)->sc_iflist_ref++;					\
218	_BRIDGE_UNLOCK(_sc);						\
219} while (0)
220#define	BRIDGE_UNREF(_sc)		do {				\
221	_BRIDGE_LOCK(_sc);						\
222	(_sc)->sc_iflist_ref--;						\
223	if (((_sc)->sc_iflist_xcnt > 0) && ((_sc)->sc_iflist_ref == 0))	{ \
224		_BRIDGE_UNLOCK(_sc);					\
225		wakeup(&(_sc)->sc_cv);					\
226	} else								\
227		_BRIDGE_UNLOCK(_sc);					\
228} while (0)
229#define	BRIDGE_XLOCK(_sc)		do {				\
230	BRIDGE_LOCK_ASSERT_HELD(_sc);					\
231	(_sc)->sc_iflist_xcnt++;					\
232	while ((_sc)->sc_iflist_ref > 0)				\
233		msleep(&(_sc)->sc_cv, &(_sc)->sc_mtx, PZERO,		\
234		    "BRIDGE_XLOCK", NULL);				\
235} while (0)
236#define	BRIDGE_XDROP(_sc)		do {				\
237	BRIDGE_LOCK_ASSERT_HELD(_sc);					\
238	(_sc)->sc_iflist_xcnt--;					\
239} while (0)
240
241#endif /* BRIDGE_DEBUG */
242
243#if NBPFILTER > 0
244#define	BRIDGE_BPF_MTAP_INPUT(sc, m)					\
245	if (sc->sc_bpf_input)						\
246		bridge_bpf_input(sc->sc_ifp, m)
247#else /* NBPFILTER */
248#define	BRIDGE_BPF_MTAP_INPUT(ifp, m)
249#endif /* NBPFILTER */
250
251/*
252 * Initial size of the route hash table.  Must be a power of two.
253 */
254#ifndef BRIDGE_RTHASH_SIZE
255#define	BRIDGE_RTHASH_SIZE		16
256#endif
257
258/*
259 * Maximum size of the routing hash table
260 */
261#define	BRIDGE_RTHASH_SIZE_MAX		2048
262
263#define	BRIDGE_RTHASH_MASK(sc)		((sc)->sc_rthash_size - 1)
264
265/*
266 * Maximum number of addresses to cache.
267 */
268#ifndef BRIDGE_RTABLE_MAX
269#define	BRIDGE_RTABLE_MAX		100
270#endif
271
272
273/*
274 * Timeout (in seconds) for entries learned dynamically.
275 */
276#ifndef BRIDGE_RTABLE_TIMEOUT
277#define	BRIDGE_RTABLE_TIMEOUT		(20 * 60)	/* same as ARP */
278#endif
279
280/*
281 * Number of seconds between walks of the route list.
282 */
283#ifndef BRIDGE_RTABLE_PRUNE_PERIOD
284#define	BRIDGE_RTABLE_PRUNE_PERIOD	(5 * 60)
285#endif
286
287/*
288 * List of capabilities to possibly mask on the member interface.
289 */
290#define	BRIDGE_IFCAPS_MASK		(IFCAP_TOE|IFCAP_TSO|IFCAP_TXCSUM)
291/*
292 * List of capabilities to disable on the member interface.
293 */
294#define	BRIDGE_IFCAPS_STRIP		IFCAP_LRO
295
296/*
297 * Bridge interface list entry.
298 */
299struct bridge_iflist {
300	TAILQ_ENTRY(bridge_iflist) bif_next;
301	struct ifnet		*bif_ifp;	/* member if */
302	struct bstp_port	bif_stp;	/* STP state */
303	uint32_t		bif_ifflags;	/* member if flags */
304	int			bif_savedcaps;	/* saved capabilities */
305	uint32_t		bif_addrmax;	/* max # of addresses */
306	uint32_t		bif_addrcnt;	/* cur. # of addresses */
307	uint32_t		bif_addrexceeded; /* # of address violations */
308
309	interface_filter_t	bif_iff_ref;
310	struct bridge_softc	*bif_sc;
311	uint32_t		bif_flags;
312
313	struct in_addr		bif_hf_ipsrc;
314	uint8_t			bif_hf_hwsrc[ETHER_ADDR_LEN];
315};
316
317#define	BIFF_PROMISC		0x01	/* promiscuous mode set */
318#define	BIFF_PROTO_ATTACHED	0x02	/* protocol attached */
319#define	BIFF_FILTER_ATTACHED	0x04	/* interface filter attached */
320#define	BIFF_MEDIA_ACTIVE	0x08	/* interface media active */
321#define	BIFF_HOST_FILTER	0x10	/* host filter enabled */
322#define	BIFF_HF_HWSRC		0x20	/* host filter source MAC is set */
323#define	BIFF_HF_IPSRC		0x40	/* host filter source IP is set */
324
325/*
326 * Bridge route node.
327 */
328struct bridge_rtnode {
329	LIST_ENTRY(bridge_rtnode) brt_hash;	/* hash table linkage */
330	LIST_ENTRY(bridge_rtnode) brt_list;	/* list linkage */
331	struct bridge_iflist	*brt_dst;	/* destination if */
332	unsigned long		brt_expire;	/* expiration time */
333	uint8_t			brt_flags;	/* address flags */
334	uint8_t			brt_addr[ETHER_ADDR_LEN];
335	uint16_t		brt_vlan;	/* vlan id */
336
337};
338#define	brt_ifp			brt_dst->bif_ifp
339
340/*
341 * Bridge delayed function call context
342 */
343typedef void (*bridge_delayed_func_t)(struct bridge_softc *);
344
345struct bridge_delayed_call {
346	struct bridge_softc	*bdc_sc;
347	bridge_delayed_func_t 	bdc_func; /* Function to call */
348	struct timespec 	bdc_ts;	/* Time to call */
349	u_int32_t		bdc_flags;
350	thread_call_t		bdc_thread_call;
351};
352
353#define	BDCF_OUTSTANDING 	0x01	/* Delayed call has been scheduled */
354#define	BDCF_CANCELLING		0x02	/* May be waiting for call completion */
355
356/*
357 * Software state for each bridge.
358 */
359
360LIST_HEAD(_bridge_rtnode_list, bridge_rtnode);
361
362struct bridge_softc {
363	struct ifnet		*sc_ifp;	/* make this an interface */
364	LIST_ENTRY(bridge_softc) sc_list;
365	decl_lck_mtx_data(,	sc_mtx);
366	void			*sc_cv;
367	uint32_t		sc_brtmax;	/* max # of addresses */
368	uint32_t		sc_brtcnt;	/* cur. # of addresses */
369	uint32_t		sc_brttimeout;	/* rt timeout in seconds */
370	uint32_t		sc_iflist_ref;	/* refcount for sc_iflist */
371	uint32_t		sc_iflist_xcnt;	/* refcount for sc_iflist */
372	TAILQ_HEAD(, bridge_iflist) sc_iflist;	/* member interface list */
373	struct _bridge_rtnode_list *sc_rthash;	/* our forwarding table */
374	struct _bridge_rtnode_list sc_rtlist;	/* list version of above */
375	uint32_t		sc_rthash_key;	/* key for hash */
376	uint32_t		sc_rthash_size;	/* size of the hash table */
377	TAILQ_HEAD(, bridge_iflist) sc_spanlist;	/* span ports list */
378	struct bstp_state	sc_stp;		/* STP state */
379	uint32_t		sc_brtexceeded;	/* # of cache drops */
380	uint32_t		sc_filter_flags; /* ipf and flags */
381	struct ifnet		*sc_ifaddr;	/* member mac copied from */
382	u_char			sc_defaddr[6];	/* Default MAC address */
383
384	char			sc_if_xname[IFNAMSIZ];
385	bpf_packet_func		sc_bpf_input;
386	bpf_packet_func		sc_bpf_output;
387	u_int32_t		sc_flags;
388	struct bridge_delayed_call sc_aging_timer;
389	struct bridge_delayed_call sc_resize_call;
390
391#if BRIDGE_DEBUG
392	/*
393	 * Locking and unlocking calling history
394	 */
395	void			*lock_lr[BR_LCKDBG_MAX];
396	int			next_lock_lr;
397	void			*unlock_lr[BR_LCKDBG_MAX];
398	int			next_unlock_lr;
399#endif /* BRIDGE_DEBUG */
400};
401
402#define	SCF_DETACHING 0x01
403#define	SCF_RESIZING 0x02
404#define	SCF_MEDIA_ACTIVE 0x04
405
406struct bridge_hostfilter_stats bridge_hostfilter_stats;
407
408decl_lck_mtx_data(static, bridge_list_mtx);
409
410static int	bridge_rtable_prune_period = BRIDGE_RTABLE_PRUNE_PERIOD;
411
412static zone_t	bridge_rtnode_pool = NULL;
413
414static int	bridge_clone_create(struct if_clone *, uint32_t, void *);
415static int	bridge_clone_destroy(struct ifnet *);
416
417static errno_t	bridge_ioctl(struct ifnet *, u_long, void *);
418#if HAS_IF_CAP
419static void	bridge_mutecaps(struct bridge_softc *);
420static void	bridge_set_ifcap(struct bridge_softc *, struct bridge_iflist *,
421		    int);
422#endif
423static errno_t bridge_set_tso(struct bridge_softc *);
424__private_extern__ void	bridge_ifdetach(struct bridge_iflist *, struct ifnet *);
425static int	bridge_init(struct ifnet *);
426#if HAS_BRIDGE_DUMMYNET
427static void	bridge_dummynet(struct mbuf *, struct ifnet *);
428#endif
429static void	bridge_ifstop(struct ifnet *, int);
430static int	bridge_output(struct ifnet *, struct mbuf *);
431static void	bridge_finalize_cksum(struct ifnet *, struct mbuf *);
432static void	bridge_start(struct ifnet *);
433__private_extern__ errno_t bridge_input(struct ifnet *, struct mbuf *, void *);
434#if BRIDGE_MEMBER_OUT_FILTER
435static errno_t bridge_iff_output(void *, ifnet_t, protocol_family_t,
436	mbuf_t *);
437static int	bridge_member_output(struct ifnet *, struct mbuf *,
438		    struct sockaddr *, struct rtentry *);
439#endif
440static int	bridge_enqueue(struct bridge_softc *, struct ifnet *,
441		    struct mbuf *);
442static void	bridge_rtdelete(struct bridge_softc *, struct ifnet *ifp, int);
443
444static void	bridge_forward(struct bridge_softc *, struct bridge_iflist *,
445		    struct mbuf *);
446
447static void	bridge_aging_timer(struct bridge_softc *sc);
448
449static void	bridge_broadcast(struct bridge_softc *, struct ifnet *,
450		    struct mbuf *, int);
451static void	bridge_span(struct bridge_softc *, struct mbuf *);
452
453static int	bridge_rtupdate(struct bridge_softc *, const uint8_t *,
454		    uint16_t, struct bridge_iflist *, int, uint8_t);
455static struct ifnet *bridge_rtlookup(struct bridge_softc *, const uint8_t *,
456		    uint16_t);
457static void	bridge_rttrim(struct bridge_softc *);
458static void	bridge_rtage(struct bridge_softc *);
459static void	bridge_rtflush(struct bridge_softc *, int);
460static int	bridge_rtdaddr(struct bridge_softc *, const uint8_t *,
461		    uint16_t);
462
463static int	bridge_rtable_init(struct bridge_softc *);
464static void	bridge_rtable_fini(struct bridge_softc *);
465
466static void	bridge_rthash_resize(struct bridge_softc *);
467
468static int	bridge_rtnode_addr_cmp(const uint8_t *, const uint8_t *);
469static struct bridge_rtnode *bridge_rtnode_lookup(struct bridge_softc *,
470		    const uint8_t *, uint16_t);
471static int	bridge_rtnode_hash(struct bridge_softc *,
472		    struct bridge_rtnode *);
473static int	bridge_rtnode_insert(struct bridge_softc *,
474		    struct bridge_rtnode *);
475static void	bridge_rtnode_destroy(struct bridge_softc *,
476		    struct bridge_rtnode *);
477#if BRIDGESTP
478static void	bridge_rtable_expire(struct ifnet *, int);
479static void	bridge_state_change(struct ifnet *, int);
480#endif /* BRIDGESTP */
481
482static struct bridge_iflist *bridge_lookup_member(struct bridge_softc *,
483		    const char *name);
484static struct bridge_iflist *bridge_lookup_member_if(struct bridge_softc *,
485		    struct ifnet *ifp);
486static void	bridge_delete_member(struct bridge_softc *,
487		    struct bridge_iflist *, int);
488static void	bridge_delete_span(struct bridge_softc *,
489		    struct bridge_iflist *);
490
491static int	bridge_ioctl_add(struct bridge_softc *, void *);
492static int	bridge_ioctl_del(struct bridge_softc *, void *);
493static int	bridge_ioctl_gifflags(struct bridge_softc *, void *);
494static int	bridge_ioctl_sifflags(struct bridge_softc *, void *);
495static int	bridge_ioctl_scache(struct bridge_softc *, void *);
496static int	bridge_ioctl_gcache(struct bridge_softc *, void *);
497static int	bridge_ioctl_gifs32(struct bridge_softc *, void *);
498static int	bridge_ioctl_gifs64(struct bridge_softc *, void *);
499static int	bridge_ioctl_rts32(struct bridge_softc *, void *);
500static int	bridge_ioctl_rts64(struct bridge_softc *, void *);
501static int	bridge_ioctl_saddr32(struct bridge_softc *, void *);
502static int	bridge_ioctl_saddr64(struct bridge_softc *, void *);
503static int	bridge_ioctl_sto(struct bridge_softc *, void *);
504static int	bridge_ioctl_gto(struct bridge_softc *, void *);
505static int	bridge_ioctl_daddr32(struct bridge_softc *, void *);
506static int	bridge_ioctl_daddr64(struct bridge_softc *, void *);
507static int	bridge_ioctl_flush(struct bridge_softc *, void *);
508static int	bridge_ioctl_gpri(struct bridge_softc *, void *);
509static int	bridge_ioctl_spri(struct bridge_softc *, void *);
510static int	bridge_ioctl_ght(struct bridge_softc *, void *);
511static int	bridge_ioctl_sht(struct bridge_softc *, void *);
512static int	bridge_ioctl_gfd(struct bridge_softc *, void *);
513static int	bridge_ioctl_sfd(struct bridge_softc *, void *);
514static int	bridge_ioctl_gma(struct bridge_softc *, void *);
515static int	bridge_ioctl_sma(struct bridge_softc *, void *);
516static int	bridge_ioctl_sifprio(struct bridge_softc *, void *);
517static int	bridge_ioctl_sifcost(struct bridge_softc *, void *);
518static int	bridge_ioctl_sifmaxaddr(struct bridge_softc *, void *);
519static int	bridge_ioctl_addspan(struct bridge_softc *, void *);
520static int	bridge_ioctl_delspan(struct bridge_softc *, void *);
521static int	bridge_ioctl_gbparam32(struct bridge_softc *, void *);
522static int	bridge_ioctl_gbparam64(struct bridge_softc *, void *);
523static int	bridge_ioctl_grte(struct bridge_softc *, void *);
524static int	bridge_ioctl_gifsstp32(struct bridge_softc *, void *);
525static int	bridge_ioctl_gifsstp64(struct bridge_softc *, void *);
526static int	bridge_ioctl_sproto(struct bridge_softc *, void *);
527static int	bridge_ioctl_stxhc(struct bridge_softc *, void *);
528static int	bridge_ioctl_purge(struct bridge_softc *sc, void *);
529static int	bridge_ioctl_gfilt(struct bridge_softc *, void *);
530static int	bridge_ioctl_sfilt(struct bridge_softc *, void *);
531static int	bridge_ioctl_ghostfilter(struct bridge_softc *, void *);
532static int	bridge_ioctl_shostfilter(struct bridge_softc *, void *);
533#ifdef PFIL_HOOKS
534static int	bridge_pfil(struct mbuf **, struct ifnet *, struct ifnet *,
535		    int);
536static int	bridge_ip_checkbasic(struct mbuf **);
537#ifdef INET6
538static int	bridge_ip6_checkbasic(struct mbuf **);
539#endif /* INET6 */
540static int	bridge_fragment(struct ifnet *, struct mbuf *,
541		    struct ether_header *, int, struct llc *);
542#endif /* PFIL_HOOKS */
543
544static errno_t bridge_set_bpf_tap(ifnet_t, bpf_tap_mode, bpf_packet_func);
545__private_extern__ errno_t bridge_bpf_input(ifnet_t, struct mbuf *);
546__private_extern__ errno_t bridge_bpf_output(ifnet_t, struct mbuf *);
547
548static void bridge_detach(ifnet_t);
549static void bridge_link_event(struct ifnet *, u_int32_t);
550static void bridge_iflinkevent(struct ifnet *);
551static u_int32_t bridge_updatelinkstatus(struct bridge_softc *);
552static int interface_media_active(struct ifnet *);
553static void bridge_schedule_delayed_call(struct bridge_delayed_call *);
554static void bridge_cancel_delayed_call(struct bridge_delayed_call *);
555static void bridge_cleanup_delayed_call(struct bridge_delayed_call *);
556static int bridge_host_filter(struct bridge_iflist *, struct mbuf *);
557
558#define	m_copypacket(m, how) m_copym(m, 0, M_COPYALL, how)
559
560/* The default bridge vlan is 1 (IEEE 802.1Q-2003 Table 9-2) */
561#define	VLANTAGOF(_m)	0
562
563u_int8_t bstp_etheraddr[ETHER_ADDR_LEN] =
564	{ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
565
566static u_int8_t ethernulladdr[ETHER_ADDR_LEN] =
567	{ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
568
569#if BRIDGESTP
570static struct bstp_cb_ops bridge_ops = {
571	.bcb_state = bridge_state_change,
572	.bcb_rtage = bridge_rtable_expire
573};
574#endif /* BRIDGESTP */
575
576SYSCTL_DECL(_net_link);
577SYSCTL_NODE(_net_link, IFT_BRIDGE, bridge, CTLFLAG_RW|CTLFLAG_LOCKED, 0,
578	"Bridge");
579
580static int bridge_inherit_mac = 0;   /* share MAC with first bridge member */
581SYSCTL_INT(_net_link_bridge, OID_AUTO, inherit_mac,
582	CTLFLAG_RW|CTLFLAG_LOCKED,
583	&bridge_inherit_mac, 0,
584	"Inherit MAC address from the first bridge member");
585
586SYSCTL_INT(_net_link_bridge, OID_AUTO, rtable_prune_period,
587	CTLFLAG_RW|CTLFLAG_LOCKED,
588	&bridge_rtable_prune_period, 0,
589	"Interval between pruning of routing table");
590
591static unsigned int bridge_rtable_hash_size_max = BRIDGE_RTHASH_SIZE_MAX;
592SYSCTL_UINT(_net_link_bridge, OID_AUTO, rtable_hash_size_max,
593	CTLFLAG_RW|CTLFLAG_LOCKED,
594	&bridge_rtable_hash_size_max, 0,
595	"Maximum size of the routing hash table");
596
597#if BRIDGE_DEBUG_DELAYED_CALLBACK
598static int bridge_delayed_callback_delay = 0;
599SYSCTL_INT(_net_link_bridge, OID_AUTO, delayed_callback_delay,
600	CTLFLAG_RW|CTLFLAG_LOCKED,
601	&bridge_delayed_callback_delay, 0,
602	"Delay before calling delayed function");
603#endif
604
605SYSCTL_STRUCT(_net_link_bridge, OID_AUTO,
606	hostfilterstats, CTLFLAG_RD | CTLFLAG_LOCKED,
607	&bridge_hostfilter_stats, bridge_hostfilter_stats, "");
608
609#if defined(PFIL_HOOKS)
610static int pfil_onlyip = 1; /* only pass IP[46] packets when pfil is enabled */
611static int pfil_bridge = 1; /* run pfil hooks on the bridge interface */
612static int pfil_member = 1; /* run pfil hooks on the member interface */
613static int pfil_ipfw = 0;   /* layer2 filter with ipfw */
614static int pfil_ipfw_arp = 0;   /* layer2 filter with ipfw */
615static int pfil_local_phys = 0; /* run pfil hooks on the physical interface */
616				/* for locally destined packets */
617SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_onlyip, CTLFLAG_RW|CTLFLAG_LOCKED,
618	&pfil_onlyip, 0, "Only pass IP packets when pfil is enabled");
619SYSCTL_INT(_net_link_bridge, OID_AUTO, ipfw_arp, CTLFLAG_RW|CTLFLAG_LOCKED,
620	&pfil_ipfw_arp, 0, "Filter ARP packets through IPFW layer2");
621SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_bridge, CTLFLAG_RW|CTLFLAG_LOCKED,
622	&pfil_bridge, 0, "Packet filter on the bridge interface");
623SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_member, CTLFLAG_RW|CTLFLAG_LOCKED,
624	&pfil_member, 0, "Packet filter on the member interface");
625SYSCTL_INT(_net_link_bridge, OID_AUTO, pfil_local_phys,
626	CTLFLAG_RW|CTLFLAG_LOCKED, &pfil_local_phys, 0,
627	"Packet filter on the physical interface for locally destined packets");
628#endif /* PFIL_HOOKS */
629
630#if BRIDGESTP
631static int log_stp   = 0;   /* log STP state changes */
632SYSCTL_INT(_net_link_bridge, OID_AUTO, log_stp, CTLFLAG_RW,
633	&log_stp, 0, "Log STP state changes");
634#endif /* BRIDGESTP */
635
636struct bridge_control {
637	int		(*bc_func)(struct bridge_softc *, void *);
638	unsigned int	bc_argsize;
639	unsigned int	bc_flags;
640};
641
642#define	BC_F_COPYIN		0x01	/* copy arguments in */
643#define	BC_F_COPYOUT		0x02	/* copy arguments out */
644#define	BC_F_SUSER		0x04	/* do super-user check */
645
646static const struct bridge_control bridge_control_table32[] = {
647	{ bridge_ioctl_add,		sizeof (struct ifbreq),	/* 0 */
648	    BC_F_COPYIN|BC_F_SUSER },
649	{ bridge_ioctl_del,		sizeof (struct ifbreq),
650	    BC_F_COPYIN|BC_F_SUSER },
651
652	{ bridge_ioctl_gifflags,	sizeof (struct ifbreq),
653	    BC_F_COPYIN|BC_F_COPYOUT },
654	{ bridge_ioctl_sifflags,	sizeof (struct ifbreq),
655	    BC_F_COPYIN|BC_F_SUSER },
656
657	{ bridge_ioctl_scache,		sizeof (struct ifbrparam),
658	    BC_F_COPYIN|BC_F_SUSER },
659	{ bridge_ioctl_gcache,		sizeof (struct ifbrparam),
660	    BC_F_COPYOUT },
661
662	{ bridge_ioctl_gifs32,		sizeof (struct ifbifconf32),
663	    BC_F_COPYIN|BC_F_COPYOUT },
664	{ bridge_ioctl_rts32,		sizeof (struct ifbaconf32),
665	    BC_F_COPYIN|BC_F_COPYOUT },
666
667	{ bridge_ioctl_saddr32,		sizeof (struct ifbareq32),
668	    BC_F_COPYIN|BC_F_SUSER },
669
670	{ bridge_ioctl_sto,		sizeof (struct ifbrparam),
671	    BC_F_COPYIN|BC_F_SUSER },
672	{ bridge_ioctl_gto,		sizeof (struct ifbrparam), /* 10 */
673	    BC_F_COPYOUT },
674
675	{ bridge_ioctl_daddr32,		sizeof (struct ifbareq32),
676	    BC_F_COPYIN|BC_F_SUSER },
677
678	{ bridge_ioctl_flush,		sizeof (struct ifbreq),
679	    BC_F_COPYIN|BC_F_SUSER },
680
681	{ bridge_ioctl_gpri,		sizeof (struct ifbrparam),
682	    BC_F_COPYOUT },
683	{ bridge_ioctl_spri,		sizeof (struct ifbrparam),
684	    BC_F_COPYIN|BC_F_SUSER },
685
686	{ bridge_ioctl_ght,		sizeof (struct ifbrparam),
687	    BC_F_COPYOUT },
688	{ bridge_ioctl_sht,		sizeof (struct ifbrparam),
689	    BC_F_COPYIN|BC_F_SUSER },
690
691	{ bridge_ioctl_gfd,		sizeof (struct ifbrparam),
692	    BC_F_COPYOUT },
693	{ bridge_ioctl_sfd,		sizeof (struct ifbrparam),
694	    BC_F_COPYIN|BC_F_SUSER },
695
696	{ bridge_ioctl_gma,		sizeof (struct ifbrparam),
697	    BC_F_COPYOUT },
698	{ bridge_ioctl_sma,		sizeof (struct ifbrparam), /* 20 */
699	    BC_F_COPYIN|BC_F_SUSER },
700
701	{ bridge_ioctl_sifprio,		sizeof (struct ifbreq),
702	    BC_F_COPYIN|BC_F_SUSER },
703
704	{ bridge_ioctl_sifcost,		sizeof (struct ifbreq),
705	    BC_F_COPYIN|BC_F_SUSER },
706
707	{ bridge_ioctl_gfilt,		sizeof (struct ifbrparam),
708	    BC_F_COPYOUT },
709	{ bridge_ioctl_sfilt,		sizeof (struct ifbrparam),
710	    BC_F_COPYIN|BC_F_SUSER },
711
712	{ bridge_ioctl_purge,		sizeof (struct ifbreq),
713	    BC_F_COPYIN|BC_F_SUSER },
714
715	{ bridge_ioctl_addspan,		sizeof (struct ifbreq),
716		BC_F_COPYIN|BC_F_SUSER },
717	{ bridge_ioctl_delspan,		sizeof (struct ifbreq),
718		BC_F_COPYIN|BC_F_SUSER },
719
720	{ bridge_ioctl_gbparam32,	sizeof (struct ifbropreq32),
721	    BC_F_COPYOUT },
722
723	{ bridge_ioctl_grte,		sizeof (struct ifbrparam),
724	    BC_F_COPYOUT },
725
726	{ bridge_ioctl_gifsstp32,	sizeof (struct ifbpstpconf32), /* 30 */
727	    BC_F_COPYIN|BC_F_COPYOUT },
728
729	{ bridge_ioctl_sproto,		sizeof (struct ifbrparam),
730	    BC_F_COPYIN|BC_F_SUSER },
731
732	{ bridge_ioctl_stxhc,		sizeof (struct ifbrparam),
733	    BC_F_COPYIN|BC_F_SUSER },
734
735	{ bridge_ioctl_sifmaxaddr,	sizeof (struct ifbreq),
736	    BC_F_COPYIN|BC_F_SUSER },
737
738	{ bridge_ioctl_ghostfilter,	sizeof (struct ifbrhostfilter),
739	    BC_F_COPYIN|BC_F_COPYOUT },
740	{ bridge_ioctl_shostfilter,	sizeof (struct ifbrhostfilter),
741	    BC_F_COPYIN|BC_F_SUSER },
742};
743
744static const struct bridge_control bridge_control_table64[] = {
745	{ bridge_ioctl_add,		sizeof (struct ifbreq), /* 0 */
746	    BC_F_COPYIN|BC_F_SUSER },
747	{ bridge_ioctl_del,		sizeof (struct ifbreq),
748	    BC_F_COPYIN|BC_F_SUSER },
749
750	{ bridge_ioctl_gifflags,	sizeof (struct ifbreq),
751	    BC_F_COPYIN|BC_F_COPYOUT },
752	{ bridge_ioctl_sifflags,	sizeof (struct ifbreq),
753	    BC_F_COPYIN|BC_F_SUSER },
754
755	{ bridge_ioctl_scache,		sizeof (struct ifbrparam),
756	    BC_F_COPYIN|BC_F_SUSER },
757	{ bridge_ioctl_gcache,		sizeof (struct ifbrparam),
758	    BC_F_COPYOUT },
759
760	{ bridge_ioctl_gifs64,		sizeof (struct ifbifconf64),
761	    BC_F_COPYIN|BC_F_COPYOUT },
762	{ bridge_ioctl_rts64,		sizeof (struct ifbaconf64),
763	    BC_F_COPYIN|BC_F_COPYOUT },
764
765	{ bridge_ioctl_saddr64,		sizeof (struct ifbareq64),
766	    BC_F_COPYIN|BC_F_SUSER },
767
768	{ bridge_ioctl_sto,		sizeof (struct ifbrparam),
769	    BC_F_COPYIN|BC_F_SUSER },
770	{ bridge_ioctl_gto,		sizeof (struct ifbrparam), /* 10 */
771	    BC_F_COPYOUT },
772
773	{ bridge_ioctl_daddr64,		sizeof (struct ifbareq64),
774	    BC_F_COPYIN|BC_F_SUSER },
775
776	{ bridge_ioctl_flush,		sizeof (struct ifbreq),
777	    BC_F_COPYIN|BC_F_SUSER },
778
779	{ bridge_ioctl_gpri,		sizeof (struct ifbrparam),
780	    BC_F_COPYOUT },
781	{ bridge_ioctl_spri,		sizeof (struct ifbrparam),
782	    BC_F_COPYIN|BC_F_SUSER },
783
784	{ bridge_ioctl_ght,		sizeof (struct ifbrparam),
785	    BC_F_COPYOUT },
786	{ bridge_ioctl_sht,		sizeof (struct ifbrparam),
787	    BC_F_COPYIN|BC_F_SUSER },
788
789	{ bridge_ioctl_gfd,		sizeof (struct ifbrparam),
790	    BC_F_COPYOUT },
791	{ bridge_ioctl_sfd,		sizeof (struct ifbrparam),
792	    BC_F_COPYIN|BC_F_SUSER },
793
794	{ bridge_ioctl_gma,		sizeof (struct ifbrparam),
795	    BC_F_COPYOUT },
796	{ bridge_ioctl_sma,		sizeof (struct ifbrparam), /* 20 */
797	    BC_F_COPYIN|BC_F_SUSER },
798
799	{ bridge_ioctl_sifprio,		sizeof (struct ifbreq),
800	    BC_F_COPYIN|BC_F_SUSER },
801
802	{ bridge_ioctl_sifcost,		sizeof (struct ifbreq),
803	    BC_F_COPYIN|BC_F_SUSER },
804
805	{ bridge_ioctl_gfilt,		sizeof (struct ifbrparam),
806	    BC_F_COPYOUT },
807	{ bridge_ioctl_sfilt,		sizeof (struct ifbrparam),
808	    BC_F_COPYIN|BC_F_SUSER },
809
810	{ bridge_ioctl_purge,	sizeof (struct ifbreq),
811	    BC_F_COPYIN|BC_F_SUSER },
812
813	{ bridge_ioctl_addspan,		sizeof (struct ifbreq),
814	    BC_F_COPYIN|BC_F_SUSER },
815	{ bridge_ioctl_delspan,		sizeof (struct ifbreq),
816	    BC_F_COPYIN|BC_F_SUSER },
817
818	{ bridge_ioctl_gbparam64,	sizeof (struct ifbropreq64),
819	    BC_F_COPYOUT },
820
821	{ bridge_ioctl_grte,		sizeof (struct ifbrparam),
822	    BC_F_COPYOUT },
823
824	{ bridge_ioctl_gifsstp64,	sizeof (struct ifbpstpconf64), /* 30 */
825	    BC_F_COPYIN|BC_F_COPYOUT },
826
827	{ bridge_ioctl_sproto,		sizeof (struct ifbrparam),
828	    BC_F_COPYIN|BC_F_SUSER },
829
830	{ bridge_ioctl_stxhc,		sizeof (struct ifbrparam),
831	    BC_F_COPYIN|BC_F_SUSER },
832
833	{ bridge_ioctl_sifmaxaddr,	sizeof (struct ifbreq),
834	    BC_F_COPYIN|BC_F_SUSER },
835
836	{ bridge_ioctl_ghostfilter,	sizeof (struct ifbrhostfilter),
837	    BC_F_COPYIN|BC_F_COPYOUT },
838	{ bridge_ioctl_shostfilter,	sizeof (struct ifbrhostfilter),
839	    BC_F_COPYIN|BC_F_SUSER },
840};
841
842static const unsigned int bridge_control_table_size =
843	sizeof (bridge_control_table32) / sizeof (bridge_control_table32[0]);
844
845static LIST_HEAD(, bridge_softc) bridge_list =
846	LIST_HEAD_INITIALIZER(bridge_list);
847
848static lck_grp_t *bridge_lock_grp = NULL;
849static lck_attr_t *bridge_lock_attr = NULL;
850
851static if_clone_t bridge_cloner = NULL;
852
853static int if_bridge_txstart = 0;
854SYSCTL_INT(_net_link_bridge, OID_AUTO, txstart, CTLFLAG_RW | CTLFLAG_LOCKED,
855	&if_bridge_txstart, 0, "Bridge interface uses TXSTART model");
856
857#if BRIDGE_DEBUG
858static int if_bridge_debug = 0;
859SYSCTL_INT(_net_link_bridge, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED,
860	&if_bridge_debug, 0, "Bridge debug");
861
862static void printf_ether_header(struct ether_header *);
863static void printf_mbuf_data(mbuf_t, size_t, size_t);
864static void printf_mbuf_pkthdr(mbuf_t, const char *, const char *);
865static void printf_mbuf(mbuf_t, const char *, const char *);
866static void link_print(struct sockaddr_dl *);
867
868static void bridge_lock(struct bridge_softc *);
869static void bridge_unlock(struct bridge_softc *);
870static int bridge_lock2ref(struct bridge_softc *);
871static void bridge_unref(struct bridge_softc *);
872static void bridge_xlock(struct bridge_softc *);
873static void bridge_xdrop(struct bridge_softc *);
874
875static void
876bridge_lock(struct bridge_softc *sc)
877{
878	void *lr_saved = __builtin_return_address(0);
879
880	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
881
882	_BRIDGE_LOCK(sc);
883
884	sc->lock_lr[sc->next_lock_lr] = lr_saved;
885	sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
886}
887
888static void
889bridge_unlock(struct bridge_softc *sc)
890{
891	void *lr_saved = __builtin_return_address(0);
892
893	BRIDGE_LOCK_ASSERT_HELD(sc);
894
895	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
896	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
897
898	_BRIDGE_UNLOCK(sc);
899}
900
901static int
902bridge_lock2ref(struct bridge_softc *sc)
903{
904	int error = 0;
905	void *lr_saved = __builtin_return_address(0);
906
907	BRIDGE_LOCK_ASSERT_HELD(sc);
908
909	if (sc->sc_iflist_xcnt > 0)
910		error = EBUSY;
911	else
912		sc->sc_iflist_ref++;
913
914	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
915	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
916
917	_BRIDGE_UNLOCK(sc);
918
919	return (error);
920}
921
922static void
923bridge_unref(struct bridge_softc *sc)
924{
925	void *lr_saved = __builtin_return_address(0);
926
927	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
928
929	_BRIDGE_LOCK(sc);
930	sc->lock_lr[sc->next_lock_lr] = lr_saved;
931	sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
932
933	sc->sc_iflist_ref--;
934
935	sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
936	sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
937	if ((sc->sc_iflist_xcnt > 0) && (sc->sc_iflist_ref == 0)) {
938		_BRIDGE_UNLOCK(sc);
939		wakeup(&sc->sc_cv);
940	} else
941		_BRIDGE_UNLOCK(sc);
942}
943
944static void
945bridge_xlock(struct bridge_softc *sc)
946{
947	void *lr_saved = __builtin_return_address(0);
948
949	BRIDGE_LOCK_ASSERT_HELD(sc);
950
951	sc->sc_iflist_xcnt++;
952	while (sc->sc_iflist_ref > 0) {
953		sc->unlock_lr[sc->next_unlock_lr] = lr_saved;
954		sc->next_unlock_lr = (sc->next_unlock_lr+1) % SO_LCKDBG_MAX;
955
956		msleep(&sc->sc_cv, &sc->sc_mtx, PZERO, "BRIDGE_XLOCK", NULL);
957
958		sc->lock_lr[sc->next_lock_lr] = lr_saved;
959		sc->next_lock_lr = (sc->next_lock_lr+1) % SO_LCKDBG_MAX;
960	}
961}
962
963static void
964bridge_xdrop(struct bridge_softc *sc)
965{
966	BRIDGE_LOCK_ASSERT_HELD(sc);
967
968	sc->sc_iflist_xcnt--;
969}
970
971void
972printf_mbuf_pkthdr(mbuf_t m, const char *prefix, const char *suffix)
973{
974	if (m)
975		printf("%spktlen: %u rcvif: 0x%llx header: 0x%llx "
976		    "nextpkt: 0x%llx%s",
977		    prefix ? prefix : "", (unsigned int)mbuf_pkthdr_len(m),
978		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
979		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_header(m)),
980		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_nextpkt(m)),
981		    suffix ? suffix : "");
982	else
983		printf("%s<NULL>%s\n", prefix, suffix);
984}
985
986void
987printf_mbuf(mbuf_t m, const char *prefix, const char *suffix)
988{
989	if (m) {
990		printf("%s0x%llx type: %u flags: 0x%x len: %u data: 0x%llx "
991		    "maxlen: %u datastart: 0x%llx next: 0x%llx%s",
992		    prefix ? prefix : "", (uint64_t)VM_KERNEL_ADDRPERM(m),
993		    mbuf_type(m), mbuf_flags(m), (unsigned int)mbuf_len(m),
994		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
995		    (unsigned int)mbuf_maxlen(m),
996		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)),
997		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_next(m)),
998		    !suffix || (mbuf_flags(m) & MBUF_PKTHDR) ? "" : suffix);
999		if ((mbuf_flags(m) & MBUF_PKTHDR))
1000			printf_mbuf_pkthdr(m, " ", suffix);
1001	} else
1002		printf("%s<NULL>%s\n", prefix, suffix);
1003}
1004
1005void
1006printf_mbuf_data(mbuf_t m, size_t offset, size_t len)
1007{
1008	mbuf_t			n;
1009	size_t			i, j;
1010	size_t			pktlen, mlen, maxlen;
1011	unsigned char	*ptr;
1012
1013	pktlen = mbuf_pkthdr_len(m);
1014
1015	if (offset > pktlen)
1016		return;
1017
1018	maxlen = (pktlen - offset > len) ? len : pktlen - offset;
1019	n = m;
1020	mlen = mbuf_len(n);
1021	ptr = mbuf_data(n);
1022	for (i = 0, j = 0; i < maxlen; i++, j++) {
1023		if (j >= mlen) {
1024			n = mbuf_next(n);
1025			if (n == 0)
1026				break;
1027			ptr = mbuf_data(n);
1028			mlen = mbuf_len(n);
1029			j = 0;
1030		}
1031		if (i >= offset) {
1032			printf("%02x%s", ptr[j], i % 2 ? " " : "");
1033		}
1034	}
1035}
1036
1037static void
1038printf_ether_header(struct ether_header *eh)
1039{
1040	printf("%02x:%02x:%02x:%02x:%02x:%02x > "
1041	    "%02x:%02x:%02x:%02x:%02x:%02x 0x%04x ",
1042	    eh->ether_shost[0], eh->ether_shost[1], eh->ether_shost[2],
1043	    eh->ether_shost[3], eh->ether_shost[4], eh->ether_shost[5],
1044	    eh->ether_dhost[0], eh->ether_dhost[1], eh->ether_dhost[2],
1045	    eh->ether_dhost[3], eh->ether_dhost[4], eh->ether_dhost[5],
1046	    ntohs(eh->ether_type));
1047}
1048
1049static void
1050link_print(struct sockaddr_dl *dl_p)
1051{
1052	int i;
1053
1054#if 1
1055	printf("sdl len %d index %d family %d type 0x%x nlen %d alen %d"
1056	    " slen %d addr ", dl_p->sdl_len, dl_p->sdl_index,
1057	    dl_p->sdl_family, dl_p->sdl_type, dl_p->sdl_nlen,
1058	    dl_p->sdl_alen, dl_p->sdl_slen);
1059#endif
1060	for (i = 0; i < dl_p->sdl_alen; i++)
1061		printf("%s%x", i ? ":" : "", (CONST_LLADDR(dl_p))[i]);
1062	printf("\n");
1063}
1064
1065#endif /* BRIDGE_DEBUG */
1066
1067/*
1068 * bridgeattach:
1069 *
1070 *	Pseudo-device attach routine.
1071 */
1072__private_extern__ int
1073bridgeattach(int n)
1074{
1075#pragma unused(n)
1076	int error;
1077	lck_grp_attr_t *lck_grp_attr = NULL;
1078	struct ifnet_clone_params ifnet_clone_params;
1079
1080	bridge_rtnode_pool = zinit(sizeof (struct bridge_rtnode),
1081	    1024 * sizeof (struct bridge_rtnode), 0, "bridge_rtnode");
1082	zone_change(bridge_rtnode_pool, Z_CALLERACCT, FALSE);
1083
1084	lck_grp_attr = lck_grp_attr_alloc_init();
1085
1086	bridge_lock_grp = lck_grp_alloc_init("if_bridge", lck_grp_attr);
1087
1088	bridge_lock_attr = lck_attr_alloc_init();
1089
1090#if BRIDGE_DEBUG
1091	lck_attr_setdebug(bridge_lock_attr);
1092#endif
1093
1094	lck_mtx_init(&bridge_list_mtx, bridge_lock_grp, bridge_lock_attr);
1095
1096	/* can free the attributes once we've allocated the group lock */
1097	lck_grp_attr_free(lck_grp_attr);
1098
1099	LIST_INIT(&bridge_list);
1100
1101#if BRIDGESTP
1102	bstp_sys_init();
1103#endif /* BRIDGESTP */
1104
1105	ifnet_clone_params.ifc_name = "bridge";
1106	ifnet_clone_params.ifc_create = bridge_clone_create;
1107	ifnet_clone_params.ifc_destroy = bridge_clone_destroy;
1108
1109	error = ifnet_clone_attach(&ifnet_clone_params, &bridge_cloner);
1110	if (error != 0)
1111		printf("%s: ifnet_clone_attach failed %d\n", __func__, error);
1112
1113	return (error);
1114}
1115
1116#if defined(PFIL_HOOKS)
1117/*
1118 * handler for net.link.bridge.pfil_ipfw
1119 */
1120static int
1121sysctl_pfil_ipfw SYSCTL_HANDLER_ARGS
1122{
1123#pragma unused(arg1, arg2)
1124	int enable = pfil_ipfw;
1125	int error;
1126
1127	error = sysctl_handle_int(oidp, &enable, 0, req);
1128	enable = (enable) ? 1 : 0;
1129
1130	if (enable != pfil_ipfw) {
1131		pfil_ipfw = enable;
1132
1133		/*
1134		 * Disable pfil so that ipfw doesnt run twice, if the user
1135		 * really wants both then they can re-enable pfil_bridge and/or
1136		 * pfil_member. Also allow non-ip packets as ipfw can filter by
1137		 * layer2 type.
1138		 */
1139		if (pfil_ipfw) {
1140			pfil_onlyip = 0;
1141			pfil_bridge = 0;
1142			pfil_member = 0;
1143		}
1144	}
1145
1146	return (error);
1147}
1148
1149SYSCTL_PROC(_net_link_bridge, OID_AUTO, ipfw, CTLTYPE_INT|CTLFLAG_RW,
1150	    &pfil_ipfw, 0, &sysctl_pfil_ipfw, "I", "Layer2 filter with IPFW");
1151#endif /* PFIL_HOOKS */
1152
1153/*
1154 * bridge_clone_create:
1155 *
1156 *	Create a new bridge instance.
1157 */
1158static int
1159bridge_clone_create(struct if_clone *ifc, uint32_t unit, void *params)
1160{
1161#pragma unused(params)
1162	struct ifnet *ifp = NULL;
1163	struct bridge_softc *sc, *sc2;
1164	struct ifnet_init_eparams init_params;
1165	errno_t error = 0;
1166	uint32_t sdl_buffer[offsetof(struct sockaddr_dl, sdl_data) +
1167	    IFNAMSIZ + ETHER_ADDR_LEN];
1168	struct sockaddr_dl *sdl = (struct sockaddr_dl *)sdl_buffer;
1169	uint8_t eth_hostid[ETHER_ADDR_LEN];
1170	int fb, retry, has_hostid;
1171
1172	sc = _MALLOC(sizeof (*sc), M_DEVBUF, M_WAITOK | M_ZERO);
1173
1174	lck_mtx_init(&sc->sc_mtx, bridge_lock_grp, bridge_lock_attr);
1175	sc->sc_brtmax = BRIDGE_RTABLE_MAX;
1176	sc->sc_brttimeout = BRIDGE_RTABLE_TIMEOUT;
1177	sc->sc_filter_flags = IFBF_FILT_DEFAULT;
1178#ifndef BRIDGE_IPF
1179	/*
1180	 * For backwards compatibility with previous behaviour...
1181	 * Switch off filtering on the bridge itself if BRIDGE_IPF is
1182	 * not defined.
1183	 */
1184	sc->sc_filter_flags &= ~IFBF_FILT_USEIPF;
1185#endif
1186
1187	/* Initialize our routing table. */
1188	error = bridge_rtable_init(sc);
1189	if (error != 0) {
1190		printf("%s: bridge_rtable_init failed %d\n", __func__, error);
1191		goto done;
1192	}
1193
1194	TAILQ_INIT(&sc->sc_iflist);
1195	TAILQ_INIT(&sc->sc_spanlist);
1196
1197	/* use the interface name as the unique id for ifp recycle */
1198	snprintf(sc->sc_if_xname, sizeof (sc->sc_if_xname), "%s%d",
1199	    ifc->ifc_name, unit);
1200	bzero(&init_params, sizeof (init_params));
1201	init_params.ver			= IFNET_INIT_CURRENT_VERSION;
1202	init_params.len			= sizeof (init_params);
1203	if (if_bridge_txstart) {
1204		init_params.start	= bridge_start;
1205	} else {
1206		init_params.flags	= IFNET_INIT_LEGACY;
1207		init_params.output	= bridge_output;
1208	}
1209	init_params.uniqueid		= sc->sc_if_xname;
1210	init_params.uniqueid_len	= strlen(sc->sc_if_xname);
1211	init_params.sndq_maxlen		= IFQ_MAXLEN;
1212	init_params.name		= ifc->ifc_name;
1213	init_params.unit		= unit;
1214	init_params.family		= IFNET_FAMILY_ETHERNET;
1215	init_params.type		= IFT_BRIDGE;
1216	init_params.demux		= ether_demux;
1217	init_params.add_proto		= ether_add_proto;
1218	init_params.del_proto		= ether_del_proto;
1219	init_params.check_multi		= ether_check_multi;
1220	init_params.framer_extended	= ether_frameout_extended;
1221	init_params.softc		= sc;
1222	init_params.ioctl		= bridge_ioctl;
1223	init_params.set_bpf_tap		= bridge_set_bpf_tap;
1224	init_params.detach		= bridge_detach;
1225	init_params.broadcast_addr	= etherbroadcastaddr;
1226	init_params.broadcast_len	= ETHER_ADDR_LEN;
1227	error = ifnet_allocate_extended(&init_params, &ifp);
1228	if (error != 0) {
1229		printf("%s: ifnet_allocate failed %d\n", __func__, error);
1230		goto done;
1231	}
1232	sc->sc_ifp = ifp;
1233
1234	error = ifnet_set_mtu(ifp, ETHERMTU);
1235	if (error != 0) {
1236		printf("%s: ifnet_set_mtu failed %d\n", __func__, error);
1237		goto done;
1238	}
1239	error = ifnet_set_addrlen(ifp, ETHER_ADDR_LEN);
1240	if (error != 0) {
1241		printf("%s: ifnet_set_addrlen failed %d\n", __func__, error);
1242		goto done;
1243	}
1244	error = ifnet_set_hdrlen(ifp, ETHER_HDR_LEN);
1245	if (error != 0) {
1246		printf("%s: ifnet_set_hdrlen failed %d\n", __func__, error);
1247		goto done;
1248	}
1249	error = ifnet_set_flags(ifp,
1250	    IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST,
1251	    0xffff);
1252	if (error != 0) {
1253		printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1254		goto done;
1255	}
1256
1257	/*
1258	 * Generate an ethernet address with a locally administered address.
1259	 *
1260	 * Since we are using random ethernet addresses for the bridge, it is
1261	 * possible that we might have address collisions, so make sure that
1262	 * this hardware address isn't already in use on another bridge.
1263	 * The first try uses the "hostid" and falls back to read_random();
1264	 * for "hostid", we use the MAC address of the first-encountered
1265	 * Ethernet-type interface that is currently configured.
1266	 */
1267	fb = 0;
1268	has_hostid = (uuid_get_ethernet(&eth_hostid[0]) == 0);
1269	for (retry = 1; retry != 0; ) {
1270		if (fb || has_hostid == 0) {
1271			read_random(&sc->sc_defaddr, ETHER_ADDR_LEN);
1272			sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1273			sc->sc_defaddr[0] |= 2;  /* set the LAA bit */
1274		} else {
1275			bcopy(&eth_hostid[0], &sc->sc_defaddr,
1276			    ETHER_ADDR_LEN);
1277			sc->sc_defaddr[0] &= ~1; /* clear multicast bit */
1278			sc->sc_defaddr[0] |= 2;  /* set the LAA bit */
1279			sc->sc_defaddr[3] =	/* stir it up a bit */
1280			    ((sc->sc_defaddr[3] & 0x0f) << 4) |
1281			    ((sc->sc_defaddr[3] & 0xf0) >> 4);
1282			/*
1283			 * Mix in the LSB as it's actually pretty significant,
1284			 * see rdar://14076061
1285			 */
1286			sc->sc_defaddr[4] =
1287			    (((sc->sc_defaddr[4] & 0x0f) << 4) |
1288			    ((sc->sc_defaddr[4] & 0xf0) >> 4)) ^
1289			    sc->sc_defaddr[5];
1290			sc->sc_defaddr[5] = ifp->if_unit & 0xff;
1291		}
1292
1293		fb = 1;
1294		retry = 0;
1295		lck_mtx_lock(&bridge_list_mtx);
1296		LIST_FOREACH(sc2, &bridge_list, sc_list) {
1297			if (memcmp(sc->sc_defaddr,
1298			    IF_LLADDR(sc2->sc_ifp), ETHER_ADDR_LEN) == 0)
1299				retry = 1;
1300		}
1301		lck_mtx_unlock(&bridge_list_mtx);
1302	}
1303
1304	memset(sdl, 0, sizeof (sdl_buffer));
1305	sdl->sdl_family = AF_LINK;
1306	sdl->sdl_nlen = strlen(sc->sc_if_xname);
1307	sdl->sdl_alen = ETHER_ADDR_LEN;
1308	sdl->sdl_len = offsetof(struct sockaddr_dl, sdl_data);
1309	memcpy(sdl->sdl_data, sc->sc_if_xname, sdl->sdl_nlen);
1310	memcpy(LLADDR(sdl), sc->sc_defaddr, ETHER_ADDR_LEN);
1311
1312	sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
1313
1314#if BRIDGE_DEBUG
1315	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1316		link_print(sdl);
1317#endif
1318
1319	error = ifnet_attach(ifp, NULL);
1320	if (error != 0) {
1321		printf("%s: ifnet_attach failed %d\n", __func__, error);
1322		goto done;
1323	}
1324
1325	error = ifnet_set_lladdr_and_type(ifp, sc->sc_defaddr, ETHER_ADDR_LEN,
1326	    IFT_ETHER);
1327	if (error != 0) {
1328		printf("%s: ifnet_set_lladdr_and_type failed %d\n", __func__,
1329		    error);
1330		goto done;
1331	}
1332
1333	ifnet_set_offload(ifp,
1334	    IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP |
1335	    IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_MULTIPAGES);
1336
1337	error = bridge_set_tso(sc);
1338	if (error != 0) {
1339		printf("%s: bridge_set_tso failed %d\n", __func__, error);
1340		goto done;
1341	}
1342
1343#if BRIDGESTP
1344	bstp_attach(&sc->sc_stp, &bridge_ops);
1345#endif /* BRIDGESTP */
1346
1347	lck_mtx_lock(&bridge_list_mtx);
1348	LIST_INSERT_HEAD(&bridge_list, sc, sc_list);
1349	lck_mtx_unlock(&bridge_list_mtx);
1350
1351	/* attach as ethernet */
1352	error = bpf_attach(ifp, DLT_EN10MB, sizeof (struct ether_header),
1353	    NULL, NULL);
1354
1355done:
1356	if (error != 0) {
1357		printf("%s failed error %d\n", __func__, error);
1358		/* Cleanup TBD */
1359	}
1360
1361	return (error);
1362}
1363
1364/*
1365 * bridge_clone_destroy:
1366 *
1367 *	Destroy a bridge instance.
1368 */
1369static int
1370bridge_clone_destroy(struct ifnet *ifp)
1371{
1372	struct bridge_softc *sc = ifp->if_softc;
1373	struct bridge_iflist *bif;
1374	errno_t error;
1375
1376	BRIDGE_LOCK(sc);
1377	if ((sc->sc_flags & SCF_DETACHING)) {
1378		BRIDGE_UNLOCK(sc);
1379		return (0);
1380	}
1381	sc->sc_flags |= SCF_DETACHING;
1382
1383	bridge_ifstop(ifp, 1);
1384
1385	bridge_cancel_delayed_call(&sc->sc_resize_call);
1386
1387	bridge_cleanup_delayed_call(&sc->sc_resize_call);
1388	bridge_cleanup_delayed_call(&sc->sc_aging_timer);
1389
1390	error = ifnet_set_flags(ifp, 0, IFF_UP);
1391	if (error != 0) {
1392		printf("%s: ifnet_set_flags failed %d\n", __func__, error);
1393	}
1394
1395	while ((bif = TAILQ_FIRST(&sc->sc_iflist)) != NULL)
1396		bridge_delete_member(sc, bif, 0);
1397
1398	while ((bif = TAILQ_FIRST(&sc->sc_spanlist)) != NULL) {
1399		bridge_delete_span(sc, bif);
1400	}
1401
1402	BRIDGE_UNLOCK(sc);
1403
1404	error = ifnet_detach(ifp);
1405	if (error != 0) {
1406		panic("bridge_clone_destroy: ifnet_detach(%p) failed %d\n",
1407		    ifp, error);
1408		if ((sc = (struct bridge_softc *)ifnet_softc(ifp)) != NULL) {
1409			BRIDGE_LOCK(sc);
1410			sc->sc_flags &= ~SCF_DETACHING;
1411			BRIDGE_UNLOCK(sc);
1412		}
1413		return (0);
1414	}
1415
1416	return (0);
1417}
1418
1419#define	DRVSPEC do { \
1420	if (ifd->ifd_cmd >= bridge_control_table_size) {		\
1421		error = EINVAL;						\
1422		break;							\
1423	}								\
1424	bc = &bridge_control_table[ifd->ifd_cmd];			\
1425									\
1426	if (cmd == SIOCGDRVSPEC &&					\
1427	    (bc->bc_flags & BC_F_COPYOUT) == 0) {			\
1428		error = EINVAL;						\
1429		break;							\
1430	} else if (cmd == SIOCSDRVSPEC &&				\
1431	    (bc->bc_flags & BC_F_COPYOUT) != 0) {			\
1432		error = EINVAL;						\
1433		break;							\
1434	}								\
1435									\
1436	if (bc->bc_flags & BC_F_SUSER) {				\
1437		error = kauth_authorize_generic(kauth_cred_get(),	\
1438		    KAUTH_GENERIC_ISSUSER);				\
1439		if (error)						\
1440			break;						\
1441	}								\
1442									\
1443	if (ifd->ifd_len != bc->bc_argsize ||				\
1444	    ifd->ifd_len > sizeof (args)) {				\
1445		error = EINVAL;						\
1446		break;							\
1447	}								\
1448									\
1449	bzero(&args, sizeof (args));					\
1450	if (bc->bc_flags & BC_F_COPYIN) {				\
1451		error = copyin(ifd->ifd_data, &args, ifd->ifd_len);	\
1452		if (error)						\
1453			break;						\
1454	}								\
1455									\
1456	BRIDGE_LOCK(sc);						\
1457	error = (*bc->bc_func)(sc, &args);				\
1458	BRIDGE_UNLOCK(sc);						\
1459	if (error)							\
1460		break;							\
1461									\
1462	if (bc->bc_flags & BC_F_COPYOUT)				\
1463		error = copyout(&args, ifd->ifd_data, ifd->ifd_len);	\
1464} while (0)
1465
1466/*
1467 * bridge_ioctl:
1468 *
1469 *	Handle a control request from the operator.
1470 */
1471static errno_t
1472bridge_ioctl(struct ifnet *ifp, u_long cmd, void *data)
1473{
1474	struct bridge_softc *sc = ifp->if_softc;
1475	struct ifreq *ifr = (struct ifreq *)data;
1476	struct bridge_iflist *bif;
1477	int error = 0;
1478
1479	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1480
1481#if BRIDGE_DEBUG
1482	if (if_bridge_debug & BR_DBGF_IOCTL)
1483		printf("%s: ifp %s cmd 0x%08lx (%c%c [%lu] %c %lu)\n",
1484		    __func__, ifp->if_xname, cmd, (cmd & IOC_IN) ? 'I' : ' ',
1485		    (cmd & IOC_OUT) ? 'O' : ' ', IOCPARM_LEN(cmd),
1486		    (char)IOCGROUP(cmd), cmd & 0xff);
1487#endif /* BRIDGE_DEBUG */
1488
1489	switch (cmd) {
1490
1491	case SIOCSIFADDR:
1492	case SIOCAIFADDR:
1493		ifnet_set_flags(ifp, IFF_UP, IFF_UP);
1494		break;
1495
1496	case SIOCGIFMEDIA32:
1497	case SIOCGIFMEDIA64: {
1498		struct ifmediareq *ifmr = (struct ifmediareq *)data;
1499		user_addr_t user_addr;
1500
1501		user_addr = (cmd == SIOCGIFMEDIA64) ?
1502		    ((struct ifmediareq64 *)ifmr)->ifmu_ulist :
1503		    CAST_USER_ADDR_T(((struct ifmediareq32 *)ifmr)->ifmu_ulist);
1504
1505		ifmr->ifm_status = IFM_AVALID;
1506		ifmr->ifm_mask = 0;
1507		ifmr->ifm_count = 1;
1508
1509		BRIDGE_LOCK(sc);
1510		if (!(sc->sc_flags & SCF_DETACHING) &&
1511		    (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
1512			ifmr->ifm_status |= IFM_ACTIVE;
1513			ifmr->ifm_active = ifmr->ifm_current =
1514			    IFM_ETHER | IFM_AUTO;
1515		} else {
1516			ifmr->ifm_active = ifmr->ifm_current = IFM_NONE;
1517		}
1518		BRIDGE_UNLOCK(sc);
1519
1520		if (user_addr != USER_ADDR_NULL) {
1521			error = copyout(&ifmr->ifm_current, user_addr,
1522			    sizeof (int));
1523		}
1524		break;
1525	}
1526
1527	case SIOCADDMULTI:
1528	case SIOCDELMULTI:
1529		break;
1530
1531	case SIOCSDRVSPEC32:
1532	case SIOCGDRVSPEC32: {
1533		union {
1534			struct ifbreq ifbreq;
1535			struct ifbifconf32 ifbifconf;
1536			struct ifbareq32 ifbareq;
1537			struct ifbaconf32 ifbaconf;
1538			struct ifbrparam ifbrparam;
1539			struct ifbropreq32 ifbropreq;
1540		} args;
1541		struct ifdrv32 *ifd = (struct ifdrv32 *)data;
1542		const struct bridge_control *bridge_control_table =
1543		    bridge_control_table32, *bc;
1544
1545		DRVSPEC;
1546
1547		break;
1548	}
1549	case SIOCSDRVSPEC64:
1550	case SIOCGDRVSPEC64: {
1551		union {
1552			struct ifbreq ifbreq;
1553			struct ifbifconf64 ifbifconf;
1554			struct ifbareq64 ifbareq;
1555			struct ifbaconf64 ifbaconf;
1556			struct ifbrparam ifbrparam;
1557			struct ifbropreq64 ifbropreq;
1558		} args;
1559		struct ifdrv64 *ifd = (struct ifdrv64 *)data;
1560		const struct bridge_control *bridge_control_table =
1561		    bridge_control_table64, *bc;
1562
1563		DRVSPEC;
1564
1565		break;
1566	}
1567
1568	case SIOCSIFFLAGS:
1569		if (!(ifp->if_flags & IFF_UP) &&
1570		    (ifp->if_flags & IFF_RUNNING)) {
1571			/*
1572			 * If interface is marked down and it is running,
1573			 * then stop and disable it.
1574			 */
1575			BRIDGE_LOCK(sc);
1576			bridge_ifstop(ifp, 1);
1577			BRIDGE_UNLOCK(sc);
1578		} else if ((ifp->if_flags & IFF_UP) &&
1579		    !(ifp->if_flags & IFF_RUNNING)) {
1580			/*
1581			 * If interface is marked up and it is stopped, then
1582			 * start it.
1583			 */
1584			BRIDGE_LOCK(sc);
1585			error = bridge_init(ifp);
1586			BRIDGE_UNLOCK(sc);
1587		}
1588		break;
1589
1590	case SIOCSIFLLADDR:
1591		error = ifnet_set_lladdr(ifp, ifr->ifr_addr.sa_data,
1592		    ifr->ifr_addr.sa_len);
1593		if (error != 0)
1594			printf("%s: SIOCSIFLLADDR error %d\n", ifp->if_xname,
1595			    error);
1596		break;
1597
1598	case SIOCSIFMTU:
1599		if (ifr->ifr_mtu < 576) {
1600			error = EINVAL;
1601			break;
1602		}
1603		BRIDGE_LOCK(sc);
1604		if (TAILQ_EMPTY(&sc->sc_iflist)) {
1605			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1606			BRIDGE_UNLOCK(sc);
1607			break;
1608		}
1609		TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1610			if (bif->bif_ifp->if_mtu != (unsigned)ifr->ifr_mtu) {
1611				printf("%s: invalid MTU: %u(%s) != %d\n",
1612				    sc->sc_ifp->if_xname,
1613				    bif->bif_ifp->if_mtu,
1614				    bif->bif_ifp->if_xname, ifr->ifr_mtu);
1615				error = EINVAL;
1616				break;
1617			}
1618		}
1619		if (!error)
1620			sc->sc_ifp->if_mtu = ifr->ifr_mtu;
1621		BRIDGE_UNLOCK(sc);
1622		break;
1623
1624	default:
1625		error = ether_ioctl(ifp, cmd, data);
1626#if BRIDGE_DEBUG
1627		if (error != 0 && error != EOPNOTSUPP)
1628			printf("%s: ifp %s cmd 0x%08lx "
1629			    "(%c%c [%lu] %c %lu) failed error: %d\n",
1630			    __func__, ifp->if_xname, cmd,
1631			    (cmd & IOC_IN) ? 'I' : ' ',
1632			    (cmd & IOC_OUT) ? 'O' : ' ',
1633			    IOCPARM_LEN(cmd), (char)IOCGROUP(cmd),
1634			    cmd & 0xff, error);
1635#endif /* BRIDGE_DEBUG */
1636		break;
1637	}
1638	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1639
1640	return (error);
1641}
1642
1643#if HAS_IF_CAP
1644/*
1645 * bridge_mutecaps:
1646 *
1647 *	Clear or restore unwanted capabilities on the member interface
1648 */
1649static void
1650bridge_mutecaps(struct bridge_softc *sc)
1651{
1652	struct bridge_iflist *bif;
1653	int enabled, mask;
1654
1655	/* Initial bitmask of capabilities to test */
1656	mask = BRIDGE_IFCAPS_MASK;
1657
1658	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1659		/* Every member must support it or its disabled */
1660		mask &= bif->bif_savedcaps;
1661	}
1662
1663	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1664		enabled = bif->bif_ifp->if_capenable;
1665		enabled &= ~BRIDGE_IFCAPS_STRIP;
1666		/* strip off mask bits and enable them again if allowed */
1667		enabled &= ~BRIDGE_IFCAPS_MASK;
1668		enabled |= mask;
1669
1670		bridge_set_ifcap(sc, bif, enabled);
1671	}
1672
1673}
1674
1675static void
1676bridge_set_ifcap(struct bridge_softc *sc, struct bridge_iflist *bif, int set)
1677{
1678	struct ifnet *ifp = bif->bif_ifp;
1679	struct ifreq ifr;
1680	int error;
1681
1682	bzero(&ifr, sizeof (ifr));
1683	ifr.ifr_reqcap = set;
1684
1685	if (ifp->if_capenable != set) {
1686		IFF_LOCKGIANT(ifp);
1687		error = (*ifp->if_ioctl)(ifp, SIOCSIFCAP, (caddr_t)&ifr);
1688		IFF_UNLOCKGIANT(ifp);
1689		if (error)
1690			printf("%s: %s error setting interface capabilities "
1691			    "on %s\n", __func__, sc->sc_ifp->if_xname,
1692			    ifp->if_xname);
1693	}
1694}
1695#endif /* HAS_IF_CAP */
1696
1697static errno_t
1698bridge_set_tso(struct bridge_softc *sc)
1699{
1700	struct bridge_iflist *bif;
1701	u_int32_t tso_v4_mtu;
1702	u_int32_t tso_v6_mtu;
1703	ifnet_offload_t offload;
1704	errno_t error = 0;
1705
1706	/* By default, support TSO */
1707	offload = sc->sc_ifp->if_hwassist | IFNET_TSO_IPV4 | IFNET_TSO_IPV6;
1708	tso_v4_mtu = IP_MAXPACKET;
1709	tso_v6_mtu = IP_MAXPACKET;
1710
1711	/* Use the lowest common denominator of the members */
1712	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1713		ifnet_t ifp = bif->bif_ifp;
1714
1715		if (ifp == NULL)
1716			continue;
1717
1718		if (offload & IFNET_TSO_IPV4) {
1719			if (ifp->if_hwassist & IFNET_TSO_IPV4) {
1720				if (tso_v4_mtu > ifp->if_tso_v4_mtu)
1721					tso_v4_mtu = ifp->if_tso_v4_mtu;
1722			} else {
1723				offload &= ~IFNET_TSO_IPV4;
1724				tso_v4_mtu = 0;
1725			}
1726		}
1727		if (offload & IFNET_TSO_IPV6) {
1728			if (ifp->if_hwassist & IFNET_TSO_IPV6) {
1729				if (tso_v6_mtu > ifp->if_tso_v6_mtu)
1730					tso_v6_mtu = ifp->if_tso_v6_mtu;
1731			} else {
1732				offload &= ~IFNET_TSO_IPV6;
1733				tso_v6_mtu = 0;
1734			}
1735		}
1736	}
1737
1738	if (offload != sc->sc_ifp->if_hwassist) {
1739		error = ifnet_set_offload(sc->sc_ifp, offload);
1740		if (error != 0) {
1741#if BRIDGE_DEBUG
1742			if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1743				printf("%s: ifnet_set_offload(%s, 0x%x) "
1744				    "failed %d\n", __func__,
1745				    sc->sc_ifp->if_xname, offload, error);
1746#endif /* BRIDGE_DEBUG */
1747			goto done;
1748		}
1749		/*
1750		 * For ifnet_set_tso_mtu() sake, the TSO MTU must be at least
1751		 * as large as the interface MTU
1752		 */
1753		if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV4) {
1754			if (tso_v4_mtu < sc->sc_ifp->if_mtu)
1755				tso_v4_mtu = sc->sc_ifp->if_mtu;
1756			error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET,
1757			    tso_v4_mtu);
1758			if (error != 0) {
1759#if BRIDGE_DEBUG
1760				if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1761					printf("%s: ifnet_set_tso_mtu(%s, "
1762					    "AF_INET, %u) failed %d\n",
1763					    __func__, sc->sc_ifp->if_xname,
1764					    tso_v4_mtu, error);
1765#endif /* BRIDGE_DEBUG */
1766				goto done;
1767			}
1768		}
1769		if (sc->sc_ifp->if_hwassist & IFNET_TSO_IPV6) {
1770			if (tso_v6_mtu < sc->sc_ifp->if_mtu)
1771				tso_v6_mtu = sc->sc_ifp->if_mtu;
1772			error = ifnet_set_tso_mtu(sc->sc_ifp, AF_INET6,
1773			    tso_v6_mtu);
1774			if (error != 0) {
1775#if BRIDGE_DEBUG
1776				if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1777					printf("%s: ifnet_set_tso_mtu(%s, "
1778					    "AF_INET6, %u) failed %d\n",
1779					    __func__, sc->sc_ifp->if_xname,
1780					    tso_v6_mtu, error);
1781#endif /* BRIDGE_DEBUG */
1782				goto done;
1783			}
1784		}
1785	}
1786done:
1787	return (error);
1788}
1789
1790/*
1791 * bridge_lookup_member:
1792 *
1793 *	Lookup a bridge member interface.
1794 */
1795static struct bridge_iflist *
1796bridge_lookup_member(struct bridge_softc *sc, const char *name)
1797{
1798	struct bridge_iflist *bif;
1799	struct ifnet *ifp;
1800
1801	BRIDGE_LOCK_ASSERT_HELD(sc);
1802
1803	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1804		ifp = bif->bif_ifp;
1805		if (strcmp(ifp->if_xname, name) == 0)
1806			return (bif);
1807	}
1808
1809	return (NULL);
1810}
1811
1812/*
1813 * bridge_lookup_member_if:
1814 *
1815 *	Lookup a bridge member interface by ifnet*.
1816 */
1817static struct bridge_iflist *
1818bridge_lookup_member_if(struct bridge_softc *sc, struct ifnet *member_ifp)
1819{
1820	struct bridge_iflist *bif;
1821
1822	BRIDGE_LOCK_ASSERT_HELD(sc);
1823
1824	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
1825		if (bif->bif_ifp == member_ifp)
1826			return (bif);
1827	}
1828
1829	return (NULL);
1830}
1831
1832static errno_t
1833bridge_iff_input(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1834	mbuf_t *data, char **frame_ptr)
1835{
1836#pragma unused(protocol)
1837	errno_t error = 0;
1838	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1839	struct bridge_softc *sc = bif->bif_sc;
1840	int included = 0;
1841	size_t frmlen = 0;
1842	mbuf_t m = *data;
1843
1844	if ((m->m_flags & M_PROTO1))
1845		goto out;
1846
1847	if (*frame_ptr >= (char *)mbuf_datastart(m) &&
1848	    *frame_ptr <= (char *)mbuf_data(m)) {
1849		included = 1;
1850		frmlen = (char *)mbuf_data(m) - *frame_ptr;
1851	}
1852#if BRIDGE_DEBUG
1853	if (if_bridge_debug & BR_DBGF_INPUT) {
1854		printf("%s: %s from %s m 0x%llx data 0x%llx frame 0x%llx %s "
1855		    "frmlen %lu\n", __func__, sc->sc_ifp->if_xname,
1856		    ifp->if_xname, (uint64_t)VM_KERNEL_ADDRPERM(m),
1857		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)),
1858		    (uint64_t)VM_KERNEL_ADDRPERM(*frame_ptr),
1859		    included ? "inside" : "outside", frmlen);
1860
1861		if (if_bridge_debug & BR_DBGF_MBUF) {
1862			printf_mbuf(m, "bridge_iff_input[", "\n");
1863			printf_ether_header((struct ether_header *)
1864			    (void *)*frame_ptr);
1865			printf_mbuf_data(m, 0, 20);
1866			printf("\n");
1867		}
1868	}
1869#endif /* BRIDGE_DEBUG */
1870
1871	/* Move data pointer to start of frame to the link layer header */
1872	if (included) {
1873		(void) mbuf_setdata(m, (char *)mbuf_data(m) - frmlen,
1874		    mbuf_len(m) + frmlen);
1875		(void) mbuf_pkthdr_adjustlen(m, frmlen);
1876	} else {
1877		printf("%s: frame_ptr outside mbuf\n", __func__);
1878		goto out;
1879	}
1880
1881	error = bridge_input(ifp, m, *frame_ptr);
1882
1883	/* Adjust packet back to original */
1884	if (error == 0) {
1885		(void) mbuf_setdata(m, (char *)mbuf_data(m) + frmlen,
1886		    mbuf_len(m) - frmlen);
1887		(void) mbuf_pkthdr_adjustlen(m, -frmlen);
1888	}
1889#if BRIDGE_DEBUG
1890	if ((if_bridge_debug & BR_DBGF_INPUT) &&
1891	    (if_bridge_debug & BR_DBGF_MBUF)) {
1892		printf("\n");
1893		printf_mbuf(m, "bridge_iff_input]", "\n");
1894	}
1895#endif /* BRIDGE_DEBUG */
1896
1897out:
1898	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1899
1900	return (error);
1901}
1902
1903#if BRIDGE_MEMBER_OUT_FILTER
1904static errno_t
1905bridge_iff_output(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1906	mbuf_t *data)
1907{
1908#pragma unused(protocol)
1909	errno_t error = 0;
1910	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1911	struct bridge_softc *sc = bif->bif_sc;
1912	mbuf_t m = *data;
1913
1914	if ((m->m_flags & M_PROTO1))
1915		goto out;
1916
1917#if BRIDGE_DEBUG
1918	if (if_bridge_debug & BR_DBGF_OUTPPUT) {
1919		printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
1920		    sc->sc_ifp->if_xname, ifp->if_xname,
1921		    (uint64_t)VM_KERNEL_ADDRPERM(m),
1922		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
1923	}
1924#endif /* BRIDGE_DEBUG */
1925
1926	error = bridge_member_output(sc, ifp, m);
1927	if (error != 0) {
1928		printf("%s: bridge_member_output failed error %d\n", __func__,
1929		    error);
1930	}
1931
1932out:
1933	BRIDGE_LOCK_ASSERT_NOTHELD(sc);
1934
1935	return (error);
1936}
1937#endif /* BRIDGE_MEMBER_OUT_FILTER */
1938
1939static void
1940bridge_iff_event(void *cookie, ifnet_t ifp, protocol_family_t protocol,
1941	const struct kev_msg *event_msg)
1942{
1943#pragma unused(protocol)
1944	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
1945	struct bridge_softc *sc = bif->bif_sc;
1946
1947	if (event_msg->vendor_code == KEV_VENDOR_APPLE &&
1948	    event_msg->kev_class == KEV_NETWORK_CLASS &&
1949	    event_msg->kev_subclass == KEV_DL_SUBCLASS) {
1950#if BRIDGE_DEBUG
1951		if (if_bridge_debug & BR_DBGF_LIFECYCLE)
1952			printf("%s: %s event_code %u - %s\n", __func__,
1953			    ifp->if_xname, event_msg->event_code,
1954			    dlil_kev_dl_code_str(event_msg->event_code));
1955#endif /* BRIDGE_DEBUG */
1956
1957		switch (event_msg->event_code) {
1958			case KEV_DL_IF_DETACHING:
1959			case KEV_DL_IF_DETACHED: {
1960				bridge_ifdetach(bif, ifp);
1961				break;
1962			}
1963			case KEV_DL_LINK_OFF:
1964			case KEV_DL_LINK_ON: {
1965				bridge_iflinkevent(ifp);
1966#if BRIDGESTP
1967				bstp_linkstate(ifp, event_msg->event_code);
1968#endif /* BRIDGESTP */
1969				break;
1970			}
1971			case KEV_DL_SIFFLAGS: {
1972				if ((bif->bif_flags & BIFF_PROMISC) == 0 &&
1973				    (ifp->if_flags & IFF_UP)) {
1974					errno_t error;
1975
1976					error = ifnet_set_promiscuous(ifp, 1);
1977					if (error != 0) {
1978						printf("%s: "
1979						    "ifnet_set_promiscuous (%s)"
1980						    " failed %d\n",
1981						    __func__, ifp->if_xname,
1982						    error);
1983					} else {
1984						bif->bif_flags |= BIFF_PROMISC;
1985					}
1986				}
1987				break;
1988			}
1989			case KEV_DL_IFCAP_CHANGED: {
1990				BRIDGE_LOCK(sc);
1991				bridge_set_tso(sc);
1992				BRIDGE_UNLOCK(sc);
1993				break;
1994			}
1995			default:
1996				break;
1997		}
1998	}
1999}
2000
2001/*
2002 * bridge_iff_detached:
2003 *
2004 *	Detach an interface from a bridge.  Called when a member
2005 *	interface is detaching.
2006 */
2007static void
2008bridge_iff_detached(void *cookie, ifnet_t ifp)
2009{
2010	struct bridge_iflist *bif = (struct bridge_iflist *)cookie;
2011
2012#if BRIDGE_DEBUG
2013	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2014		printf("%s: %s\n", __func__, ifp->if_xname);
2015#endif /* BRIDGE_DEBUG */
2016
2017	bridge_ifdetach(bif, ifp);
2018
2019	_FREE(bif, M_DEVBUF);
2020}
2021
2022static errno_t
2023bridge_proto_input(ifnet_t ifp, protocol_family_t protocol, mbuf_t packet,
2024	char *header)
2025{
2026#pragma unused(protocol, packet, header)
2027#if BRIDGE_DEBUG
2028	printf("%s: unexpected packet from %s\n", __func__,
2029	    ifp->if_xname);
2030#endif /* BRIDGE_DEBUG */
2031	return (0);
2032}
2033
2034static int
2035bridge_attach_protocol(struct ifnet *ifp)
2036{
2037	int	error;
2038	struct ifnet_attach_proto_param	reg;
2039
2040#if BRIDGE_DEBUG
2041	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2042		printf("%s: %s\n", __func__, ifp->if_xname);
2043#endif /* BRIDGE_DEBUG */
2044
2045	bzero(&reg, sizeof (reg));
2046	reg.input = bridge_proto_input;
2047
2048	error = ifnet_attach_protocol(ifp, PF_BRIDGE, &reg);
2049	if (error)
2050		printf("%s: ifnet_attach_protocol(%s) failed, %d\n",
2051		    __func__, ifp->if_xname, error);
2052
2053	return (error);
2054}
2055
2056static int
2057bridge_detach_protocol(struct ifnet *ifp)
2058{
2059	int	error;
2060
2061#if BRIDGE_DEBUG
2062	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
2063		printf("%s: %s\n", __func__, ifp->if_xname);
2064#endif /* BRIDGE_DEBUG */
2065	error = ifnet_detach_protocol(ifp, PF_BRIDGE);
2066	if (error)
2067		printf("%s: ifnet_detach_protocol(%s) failed, %d\n",
2068		    __func__, ifp->if_xname, error);
2069
2070	return (error);
2071}
2072
2073/*
2074 * bridge_delete_member:
2075 *
2076 *	Delete the specified member interface.
2077 */
2078static void
2079bridge_delete_member(struct bridge_softc *sc, struct bridge_iflist *bif,
2080	int gone)
2081{
2082	struct ifnet *ifs = bif->bif_ifp, *bifp = sc->sc_ifp;
2083	int lladdr_changed = 0, error, filt_attached;
2084	uint8_t eaddr[ETHER_ADDR_LEN];
2085	u_int32_t event_code = 0;
2086
2087	BRIDGE_LOCK_ASSERT_HELD(sc);
2088	VERIFY(ifs != NULL);
2089
2090	if (!gone) {
2091		switch (ifs->if_type) {
2092		case IFT_ETHER:
2093		case IFT_L2VLAN:
2094			/*
2095			 * Take the interface out of promiscuous mode.
2096			 */
2097			if (bif->bif_flags & BIFF_PROMISC)
2098				(void) ifnet_set_promiscuous(ifs, 0);
2099			break;
2100
2101		case IFT_GIF:
2102			/* currently not supported */
2103			/* FALLTHRU */
2104		default:
2105			VERIFY(0);
2106			/* NOTREACHED */
2107		}
2108
2109#if HAS_IF_CAP
2110		/* reneable any interface capabilities */
2111		bridge_set_ifcap(sc, bif, bif->bif_savedcaps);
2112#endif
2113	}
2114
2115	if (bif->bif_flags & BIFF_PROTO_ATTACHED) {
2116		/* Respect lock ordering with DLIL lock */
2117		BRIDGE_UNLOCK(sc);
2118		(void) bridge_detach_protocol(ifs);
2119		BRIDGE_LOCK(sc);
2120	}
2121#if BRIDGESTP
2122	if (bif->bif_ifflags & IFBIF_STP)
2123		bstp_disable(&bif->bif_stp);
2124#endif /* BRIDGESTP */
2125
2126	BRIDGE_XLOCK(sc);
2127	TAILQ_REMOVE(&sc->sc_iflist, bif, bif_next);
2128	BRIDGE_XDROP(sc);
2129
2130	/*
2131	 * If removing the interface that gave the bridge its mac address, set
2132	 * the mac address of the bridge to the address of the next member, or
2133	 * to its default address if no members are left.
2134	 */
2135	if (bridge_inherit_mac && sc->sc_ifaddr == ifs) {
2136		ifnet_release(sc->sc_ifaddr);
2137		if (TAILQ_EMPTY(&sc->sc_iflist)) {
2138			bcopy(sc->sc_defaddr, eaddr, ETHER_ADDR_LEN);
2139			sc->sc_ifaddr = NULL;
2140		} else {
2141			struct ifnet *fif =
2142			    TAILQ_FIRST(&sc->sc_iflist)->bif_ifp;
2143			bcopy(IF_LLADDR(fif), eaddr, ETHER_ADDR_LEN);
2144			sc->sc_ifaddr = fif;
2145			ifnet_reference(fif);	/* for sc_ifaddr */
2146		}
2147		lladdr_changed = 1;
2148	}
2149
2150#if HAS_IF_CAP
2151	bridge_mutecaps(sc);	/* recalculate now this interface is removed */
2152#endif /* HAS_IF_CAP */
2153
2154	error = bridge_set_tso(sc);
2155	if (error != 0) {
2156		printf("%s: bridge_set_tso failed %d\n", __func__, error);
2157	}
2158
2159	bridge_rtdelete(sc, ifs, IFBF_FLUSHALL);
2160	KASSERT(bif->bif_addrcnt == 0,
2161	    ("%s: %d bridge routes referenced", __func__, bif->bif_addrcnt));
2162
2163	filt_attached = bif->bif_flags & BIFF_FILTER_ATTACHED;
2164
2165	/*
2166	 * Update link status of the bridge based on its remaining members
2167	 */
2168	event_code = bridge_updatelinkstatus(sc);
2169
2170	BRIDGE_UNLOCK(sc);
2171
2172	if (lladdr_changed &&
2173	    (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
2174		printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2175
2176	if (event_code != 0)
2177		bridge_link_event(bifp, event_code);
2178
2179#if BRIDGESTP
2180	bstp_destroy(&bif->bif_stp);	/* prepare to free */
2181#endif /* BRIDGESTP */
2182
2183	if (filt_attached)
2184		iflt_detach(bif->bif_iff_ref);
2185	else
2186		_FREE(bif, M_DEVBUF);
2187
2188	ifs->if_bridge = NULL;
2189	ifnet_release(ifs);
2190
2191	BRIDGE_LOCK(sc);
2192}
2193
2194/*
2195 * bridge_delete_span:
2196 *
2197 *	Delete the specified span interface.
2198 */
2199static void
2200bridge_delete_span(struct bridge_softc *sc, struct bridge_iflist *bif)
2201{
2202	BRIDGE_LOCK_ASSERT_HELD(sc);
2203
2204	KASSERT(bif->bif_ifp->if_bridge == NULL,
2205	    ("%s: not a span interface", __func__));
2206
2207	ifnet_release(bif->bif_ifp);
2208
2209	TAILQ_REMOVE(&sc->sc_spanlist, bif, bif_next);
2210	_FREE(bif, M_DEVBUF);
2211}
2212
2213static int
2214bridge_ioctl_add(struct bridge_softc *sc, void *arg)
2215{
2216	struct ifbreq *req = arg;
2217	struct bridge_iflist *bif = NULL;
2218	struct ifnet *ifs, *bifp = sc->sc_ifp;
2219	int error = 0, lladdr_changed = 0;
2220	uint8_t eaddr[ETHER_ADDR_LEN];
2221	struct iff_filter iff;
2222	u_int32_t event_code = 0;
2223
2224	ifs = ifunit(req->ifbr_ifsname);
2225	if (ifs == NULL)
2226		return (ENOENT);
2227	if (ifs->if_ioctl == NULL)	/* must be supported */
2228		return (EINVAL);
2229
2230	/* If it's in the span list, it can't be a member. */
2231	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2232		if (ifs == bif->bif_ifp)
2233			return (EBUSY);
2234
2235	if (ifs->if_bridge == sc)
2236		return (EEXIST);
2237
2238	if (ifs->if_bridge != NULL)
2239		return (EBUSY);
2240
2241	switch (ifs->if_type) {
2242	case IFT_ETHER:
2243	case IFT_L2VLAN:
2244		/* permitted interface types */
2245		break;
2246	case IFT_GIF:
2247		/* currently not supported */
2248		/* FALLTHRU */
2249	default:
2250		return (EINVAL);
2251	}
2252
2253	bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_NOWAIT | M_ZERO);
2254	if (bif == NULL)
2255		return (ENOMEM);
2256
2257	bif->bif_ifp = ifs;
2258	ifnet_reference(ifs);
2259	bif->bif_ifflags = IFBIF_LEARNING | IFBIF_DISCOVER;
2260#if HAS_IF_CAP
2261	bif->bif_savedcaps = ifs->if_capenable;
2262#endif /* HAS_IF_CAP */
2263	bif->bif_sc = sc;
2264
2265	/* Allow the first Ethernet member to define the MTU */
2266	if (TAILQ_EMPTY(&sc->sc_iflist))
2267		sc->sc_ifp->if_mtu = ifs->if_mtu;
2268	else if (sc->sc_ifp->if_mtu != ifs->if_mtu) {
2269		printf("%s: %s: invalid MTU for %s", __func__,
2270		    sc->sc_ifp->if_xname,
2271		    ifs->if_xname);
2272		return (EINVAL);
2273	}
2274
2275	/*
2276	 * Assign the interface's MAC address to the bridge if it's the first
2277	 * member and the MAC address of the bridge has not been changed from
2278	 * the default (randomly) generated one.
2279	 */
2280	if (bridge_inherit_mac && TAILQ_EMPTY(&sc->sc_iflist) &&
2281	    !memcmp(IF_LLADDR(sc->sc_ifp), sc->sc_defaddr, ETHER_ADDR_LEN)) {
2282		bcopy(IF_LLADDR(ifs), eaddr, ETHER_ADDR_LEN);
2283		sc->sc_ifaddr = ifs;
2284		ifnet_reference(ifs);	/* for sc_ifaddr */
2285		lladdr_changed = 1;
2286	}
2287
2288	ifs->if_bridge = sc;
2289#if BRIDGESTP
2290	bstp_create(&sc->sc_stp, &bif->bif_stp, bif->bif_ifp);
2291#endif /* BRIDGESTP */
2292
2293	/*
2294	 * XXX: XLOCK HERE!?!
2295	 */
2296	TAILQ_INSERT_TAIL(&sc->sc_iflist, bif, bif_next);
2297
2298#if HAS_IF_CAP
2299	/* Set interface capabilities to the intersection set of all members */
2300	bridge_mutecaps(sc);
2301#endif /* HAS_IF_CAP */
2302
2303	bridge_set_tso(sc);
2304
2305
2306	/*
2307	 * Place the interface into promiscuous mode.
2308	 */
2309	switch (ifs->if_type) {
2310	case IFT_ETHER:
2311	case IFT_L2VLAN:
2312		error = ifnet_set_promiscuous(ifs, 1);
2313		if (error) {
2314			/* Ignore error when device is not up */
2315			if (error != ENETDOWN)
2316				goto out;
2317			error = 0;
2318		} else {
2319			bif->bif_flags |= BIFF_PROMISC;
2320		}
2321		break;
2322
2323	default:
2324		break;
2325	}
2326
2327	/*
2328	 * The new member may change the link status of the bridge interface
2329	 */
2330	if (interface_media_active(ifs))
2331		bif->bif_flags |= BIFF_MEDIA_ACTIVE;
2332	else
2333		bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
2334
2335	event_code = bridge_updatelinkstatus(sc);
2336
2337	/*
2338	 * Respect lock ordering with DLIL lock for the following operations
2339	 */
2340	BRIDGE_UNLOCK(sc);
2341
2342	/*
2343	 * install an interface filter
2344	 */
2345	memset(&iff, 0, sizeof (struct iff_filter));
2346	iff.iff_cookie = bif;
2347	iff.iff_name = "com.apple.kernel.bsd.net.if_bridge";
2348	iff.iff_input = bridge_iff_input;
2349#if BRIDGE_MEMBER_OUT_FILTER
2350	iff.iff_output = bridge_iff_output;
2351#endif /* BRIDGE_MEMBER_OUT_FILTER */
2352	iff.iff_event = bridge_iff_event;
2353	iff.iff_detached = bridge_iff_detached;
2354	error = dlil_attach_filter(ifs, &iff, &bif->bif_iff_ref, DLIL_IFF_TSO);
2355	if (error != 0) {
2356		printf("%s: iflt_attach failed %d\n", __func__, error);
2357		BRIDGE_LOCK(sc);
2358		goto out;
2359	}
2360	bif->bif_flags |= BIFF_FILTER_ATTACHED;
2361
2362	/*
2363	 * install an dummy "bridge" protocol
2364	 */
2365	if ((error = bridge_attach_protocol(ifs)) != 0) {
2366		if (error != 0) {
2367			printf("%s: bridge_attach_protocol failed %d\n",
2368			    __func__, error);
2369			BRIDGE_LOCK(sc);
2370			goto out;
2371		}
2372	}
2373	bif->bif_flags |= BIFF_PROTO_ATTACHED;
2374
2375	if (lladdr_changed &&
2376	    (error = ifnet_set_lladdr(bifp, eaddr, ETHER_ADDR_LEN)) != 0)
2377		printf("%s: ifnet_set_lladdr failed %d\n", __func__, error);
2378
2379	if (event_code != 0)
2380		bridge_link_event(bifp, event_code);
2381
2382	BRIDGE_LOCK(sc);
2383
2384out:
2385	if (error && bif != NULL)
2386		bridge_delete_member(sc, bif, 1);
2387
2388	return (error);
2389}
2390
2391static int
2392bridge_ioctl_del(struct bridge_softc *sc, void *arg)
2393{
2394	struct ifbreq *req = arg;
2395	struct bridge_iflist *bif;
2396
2397	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2398	if (bif == NULL)
2399		return (ENOENT);
2400
2401	bridge_delete_member(sc, bif, 0);
2402
2403	return (0);
2404}
2405
2406static int
2407bridge_ioctl_purge(struct bridge_softc *sc, void *arg)
2408{
2409#pragma unused(sc, arg)
2410	return (0);
2411}
2412
2413static int
2414bridge_ioctl_gifflags(struct bridge_softc *sc, void *arg)
2415{
2416	struct ifbreq *req = arg;
2417	struct bridge_iflist *bif;
2418	struct bstp_port *bp;
2419
2420	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2421	if (bif == NULL)
2422		return (ENOENT);
2423
2424	bp = &bif->bif_stp;
2425	req->ifbr_ifsflags = bif->bif_ifflags;
2426	req->ifbr_state = bp->bp_state;
2427	req->ifbr_priority = bp->bp_priority;
2428	req->ifbr_path_cost = bp->bp_path_cost;
2429	req->ifbr_portno = bif->bif_ifp->if_index & 0xfff;
2430	req->ifbr_proto = bp->bp_protover;
2431	req->ifbr_role = bp->bp_role;
2432	req->ifbr_stpflags = bp->bp_flags;
2433	req->ifbr_addrcnt = bif->bif_addrcnt;
2434	req->ifbr_addrmax = bif->bif_addrmax;
2435	req->ifbr_addrexceeded = bif->bif_addrexceeded;
2436
2437	/* Copy STP state options as flags */
2438	if (bp->bp_operedge)
2439		req->ifbr_ifsflags |= IFBIF_BSTP_EDGE;
2440	if (bp->bp_flags & BSTP_PORT_AUTOEDGE)
2441		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOEDGE;
2442	if (bp->bp_ptp_link)
2443		req->ifbr_ifsflags |= IFBIF_BSTP_PTP;
2444	if (bp->bp_flags & BSTP_PORT_AUTOPTP)
2445		req->ifbr_ifsflags |= IFBIF_BSTP_AUTOPTP;
2446	if (bp->bp_flags & BSTP_PORT_ADMEDGE)
2447		req->ifbr_ifsflags |= IFBIF_BSTP_ADMEDGE;
2448	if (bp->bp_flags & BSTP_PORT_ADMCOST)
2449		req->ifbr_ifsflags |= IFBIF_BSTP_ADMCOST;
2450	return (0);
2451}
2452
2453static int
2454bridge_ioctl_sifflags(struct bridge_softc *sc, void *arg)
2455{
2456	struct ifbreq *req = arg;
2457	struct bridge_iflist *bif;
2458#if BRIDGESTP
2459	struct bstp_port *bp;
2460	int error;
2461#endif /* BRIDGESTP */
2462
2463	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2464	if (bif == NULL)
2465		return (ENOENT);
2466
2467	if (req->ifbr_ifsflags & IFBIF_SPAN)
2468		/* SPAN is readonly */
2469		return (EINVAL);
2470
2471
2472#if BRIDGESTP
2473	if (req->ifbr_ifsflags & IFBIF_STP) {
2474		if ((bif->bif_ifflags & IFBIF_STP) == 0) {
2475			error = bstp_enable(&bif->bif_stp);
2476			if (error)
2477				return (error);
2478		}
2479	} else {
2480		if ((bif->bif_ifflags & IFBIF_STP) != 0)
2481			bstp_disable(&bif->bif_stp);
2482	}
2483
2484	/* Pass on STP flags */
2485	bp = &bif->bif_stp;
2486	bstp_set_edge(bp, req->ifbr_ifsflags & IFBIF_BSTP_EDGE ? 1 : 0);
2487	bstp_set_autoedge(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOEDGE ? 1 : 0);
2488	bstp_set_ptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_PTP ? 1 : 0);
2489	bstp_set_autoptp(bp, req->ifbr_ifsflags & IFBIF_BSTP_AUTOPTP ? 1 : 0);
2490#else /* !BRIDGESTP */
2491	if (req->ifbr_ifsflags & IFBIF_STP)
2492		return (EOPNOTSUPP);
2493#endif /* !BRIDGESTP */
2494
2495	/* Save the bits relating to the bridge */
2496	bif->bif_ifflags = req->ifbr_ifsflags & IFBIFMASK;
2497
2498
2499	return (0);
2500}
2501
2502static int
2503bridge_ioctl_scache(struct bridge_softc *sc, void *arg)
2504{
2505	struct ifbrparam *param = arg;
2506
2507	sc->sc_brtmax = param->ifbrp_csize;
2508	bridge_rttrim(sc);
2509
2510	return (0);
2511}
2512
2513static int
2514bridge_ioctl_gcache(struct bridge_softc *sc, void *arg)
2515{
2516	struct ifbrparam *param = arg;
2517
2518	param->ifbrp_csize = sc->sc_brtmax;
2519
2520	return (0);
2521}
2522
2523#define	BRIDGE_IOCTL_GIFS do { \
2524	struct bridge_iflist *bif;					\
2525	struct ifbreq breq;						\
2526	char *buf, *outbuf;						\
2527	unsigned int count, buflen, len;				\
2528									\
2529	count = 0;							\
2530	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next)			\
2531		count++;						\
2532	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)			\
2533		count++;						\
2534									\
2535	buflen = sizeof (breq) * count;					\
2536	if (bifc->ifbic_len == 0) {					\
2537		bifc->ifbic_len = buflen;				\
2538		return (0);						\
2539	}								\
2540	BRIDGE_UNLOCK(sc);						\
2541	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		\
2542	BRIDGE_LOCK(sc);						\
2543									\
2544	count = 0;							\
2545	buf = outbuf;							\
2546	len = min(bifc->ifbic_len, buflen);				\
2547	bzero(&breq, sizeof (breq));					\
2548	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
2549		if (len < sizeof (breq))				\
2550			break;						\
2551									\
2552		snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname),	\
2553		    "%s", bif->bif_ifp->if_xname);			\
2554		/* Fill in the ifbreq structure */			\
2555		error = bridge_ioctl_gifflags(sc, &breq);		\
2556		if (error)						\
2557			break;						\
2558		memcpy(buf, &breq, sizeof (breq));			\
2559		count++;						\
2560		buf += sizeof (breq);					\
2561		len -= sizeof (breq);					\
2562	}								\
2563	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {		\
2564		if (len < sizeof (breq))				\
2565			break;						\
2566									\
2567		snprintf(breq.ifbr_ifsname, sizeof (breq.ifbr_ifsname),	\
2568		    "%s", bif->bif_ifp->if_xname);			\
2569		breq.ifbr_ifsflags = bif->bif_ifflags;			\
2570		breq.ifbr_portno = bif->bif_ifp->if_index & 0xfff;	\
2571		memcpy(buf, &breq, sizeof (breq));			\
2572		count++;						\
2573		buf += sizeof (breq);					\
2574		len -= sizeof (breq);					\
2575	}								\
2576									\
2577	BRIDGE_UNLOCK(sc);						\
2578	bifc->ifbic_len = sizeof (breq) * count;			\
2579	error = copyout(outbuf, bifc->ifbic_req, bifc->ifbic_len);	\
2580	BRIDGE_LOCK(sc);						\
2581	_FREE(outbuf, M_TEMP);						\
2582} while (0)
2583
2584static int
2585bridge_ioctl_gifs64(struct bridge_softc *sc, void *arg)
2586{
2587	struct ifbifconf64 *bifc = arg;
2588	int error = 0;
2589
2590	BRIDGE_IOCTL_GIFS;
2591
2592	return (error);
2593}
2594
2595static int
2596bridge_ioctl_gifs32(struct bridge_softc *sc, void *arg)
2597{
2598	struct ifbifconf32 *bifc = arg;
2599	int error = 0;
2600
2601	BRIDGE_IOCTL_GIFS;
2602
2603	return (error);
2604}
2605
2606#define	BRIDGE_IOCTL_RTS do {						    \
2607	struct bridge_rtnode *brt;					    \
2608	char *buf, *outbuf;						    \
2609	unsigned int count, buflen, len;				    \
2610	unsigned long now;						    \
2611									    \
2612	if (bac->ifbac_len == 0)					    \
2613		return (0);						    \
2614									    \
2615	count = 0;							    \
2616	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list)			    \
2617		count++;						    \
2618	buflen = sizeof (bareq) * count;				    \
2619									    \
2620	BRIDGE_UNLOCK(sc);						    \
2621	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		    \
2622	BRIDGE_LOCK(sc);						    \
2623									    \
2624	count = 0;							    \
2625	buf = outbuf;							    \
2626	len = min(bac->ifbac_len, buflen);				    \
2627	bzero(&bareq, sizeof (bareq));					    \
2628	LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {			    \
2629		if (len < sizeof (bareq))				    \
2630			goto out;					    \
2631		snprintf(bareq.ifba_ifsname, sizeof (bareq.ifba_ifsname),   \
2632		    "%s", brt->brt_ifp->if_xname);			    \
2633		memcpy(bareq.ifba_dst, brt->brt_addr, sizeof (brt->brt_addr)); \
2634		bareq.ifba_vlan = brt->brt_vlan;			    \
2635		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {   \
2636			now = (unsigned long) net_uptime();		    \
2637			if (now < brt->brt_expire)			    \
2638				bareq.ifba_expire =			    \
2639				    brt->brt_expire - now;		    \
2640		} else							    \
2641			bareq.ifba_expire = 0;				    \
2642		bareq.ifba_flags = brt->brt_flags;			    \
2643									    \
2644		memcpy(buf, &bareq, sizeof (bareq));			    \
2645		count++;						    \
2646		buf += sizeof (bareq);					    \
2647		len -= sizeof (bareq);					    \
2648	}								    \
2649out:									    \
2650	BRIDGE_UNLOCK(sc);						    \
2651	bac->ifbac_len = sizeof (bareq) * count;			    \
2652	error = copyout(outbuf, bac->ifbac_req, bac->ifbac_len);	    \
2653	BRIDGE_LOCK(sc);						    \
2654	_FREE(outbuf, M_TEMP);						    \
2655	return (error);							    \
2656} while (0)
2657
2658static int
2659bridge_ioctl_rts64(struct bridge_softc *sc, void *arg)
2660{
2661	struct ifbaconf64 *bac = arg;
2662	struct ifbareq64 bareq;
2663	int error = 0;
2664
2665	BRIDGE_IOCTL_RTS;
2666
2667	return (error);
2668}
2669
2670static int
2671bridge_ioctl_rts32(struct bridge_softc *sc, void *arg)
2672{
2673	struct ifbaconf32 *bac = arg;
2674	struct ifbareq32 bareq;
2675	int error = 0;
2676
2677	BRIDGE_IOCTL_RTS;
2678
2679	return (error);
2680}
2681
2682static int
2683bridge_ioctl_saddr32(struct bridge_softc *sc, void *arg)
2684{
2685	struct ifbareq32 *req = arg;
2686	struct bridge_iflist *bif;
2687	int error;
2688
2689	bif = bridge_lookup_member(sc, req->ifba_ifsname);
2690	if (bif == NULL)
2691		return (ENOENT);
2692
2693	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2694	    req->ifba_flags);
2695
2696	return (error);
2697}
2698
2699static int
2700bridge_ioctl_saddr64(struct bridge_softc *sc, void *arg)
2701{
2702	struct ifbareq64 *req = arg;
2703	struct bridge_iflist *bif;
2704	int error;
2705
2706	bif = bridge_lookup_member(sc, req->ifba_ifsname);
2707	if (bif == NULL)
2708		return (ENOENT);
2709
2710	error = bridge_rtupdate(sc, req->ifba_dst, req->ifba_vlan, bif, 1,
2711	    req->ifba_flags);
2712
2713	return (error);
2714}
2715
2716static int
2717bridge_ioctl_sto(struct bridge_softc *sc, void *arg)
2718{
2719	struct ifbrparam *param = arg;
2720
2721	sc->sc_brttimeout = param->ifbrp_ctime;
2722	return (0);
2723}
2724
2725static int
2726bridge_ioctl_gto(struct bridge_softc *sc, void *arg)
2727{
2728	struct ifbrparam *param = arg;
2729
2730	param->ifbrp_ctime = sc->sc_brttimeout;
2731	return (0);
2732}
2733
2734static int
2735bridge_ioctl_daddr32(struct bridge_softc *sc, void *arg)
2736{
2737	struct ifbareq32 *req = arg;
2738
2739	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2740}
2741
2742static int
2743bridge_ioctl_daddr64(struct bridge_softc *sc, void *arg)
2744{
2745	struct ifbareq64 *req = arg;
2746
2747	return (bridge_rtdaddr(sc, req->ifba_dst, req->ifba_vlan));
2748}
2749
2750static int
2751bridge_ioctl_flush(struct bridge_softc *sc, void *arg)
2752{
2753	struct ifbreq *req = arg;
2754
2755	bridge_rtflush(sc, req->ifbr_ifsflags);
2756	return (0);
2757}
2758
2759static int
2760bridge_ioctl_gpri(struct bridge_softc *sc, void *arg)
2761{
2762	struct ifbrparam *param = arg;
2763	struct bstp_state *bs = &sc->sc_stp;
2764
2765	param->ifbrp_prio = bs->bs_bridge_priority;
2766	return (0);
2767}
2768
2769static int
2770bridge_ioctl_spri(struct bridge_softc *sc, void *arg)
2771{
2772#if BRIDGESTP
2773	struct ifbrparam *param = arg;
2774
2775	return (bstp_set_priority(&sc->sc_stp, param->ifbrp_prio));
2776#else /* !BRIDGESTP */
2777#pragma unused(sc, arg)
2778	return (EOPNOTSUPP);
2779#endif /* !BRIDGESTP */
2780}
2781
2782static int
2783bridge_ioctl_ght(struct bridge_softc *sc, void *arg)
2784{
2785	struct ifbrparam *param = arg;
2786	struct bstp_state *bs = &sc->sc_stp;
2787
2788	param->ifbrp_hellotime = bs->bs_bridge_htime >> 8;
2789	return (0);
2790}
2791
2792static int
2793bridge_ioctl_sht(struct bridge_softc *sc, void *arg)
2794{
2795#if BRIDGESTP
2796	struct ifbrparam *param = arg;
2797
2798	return (bstp_set_htime(&sc->sc_stp, param->ifbrp_hellotime));
2799#else /* !BRIDGESTP */
2800#pragma unused(sc, arg)
2801	return (EOPNOTSUPP);
2802#endif /* !BRIDGESTP */
2803}
2804
2805static int
2806bridge_ioctl_gfd(struct bridge_softc *sc, void *arg)
2807{
2808	struct ifbrparam *param = arg;
2809	struct bstp_state *bs = &sc->sc_stp;
2810
2811	param->ifbrp_fwddelay = bs->bs_bridge_fdelay >> 8;
2812	return (0);
2813}
2814
2815static int
2816bridge_ioctl_sfd(struct bridge_softc *sc, void *arg)
2817{
2818#if BRIDGESTP
2819	struct ifbrparam *param = arg;
2820
2821	return (bstp_set_fdelay(&sc->sc_stp, param->ifbrp_fwddelay));
2822#else /* !BRIDGESTP */
2823#pragma unused(sc, arg)
2824	return (EOPNOTSUPP);
2825#endif /* !BRIDGESTP */
2826}
2827
2828static int
2829bridge_ioctl_gma(struct bridge_softc *sc, void *arg)
2830{
2831	struct ifbrparam *param = arg;
2832	struct bstp_state *bs = &sc->sc_stp;
2833
2834	param->ifbrp_maxage = bs->bs_bridge_max_age >> 8;
2835	return (0);
2836}
2837
2838static int
2839bridge_ioctl_sma(struct bridge_softc *sc, void *arg)
2840{
2841#if BRIDGESTP
2842	struct ifbrparam *param = arg;
2843
2844	return (bstp_set_maxage(&sc->sc_stp, param->ifbrp_maxage));
2845#else /* !BRIDGESTP */
2846#pragma unused(sc, arg)
2847	return (EOPNOTSUPP);
2848#endif /* !BRIDGESTP */
2849}
2850
2851static int
2852bridge_ioctl_sifprio(struct bridge_softc *sc, void *arg)
2853{
2854#if BRIDGESTP
2855	struct ifbreq *req = arg;
2856	struct bridge_iflist *bif;
2857
2858	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2859	if (bif == NULL)
2860		return (ENOENT);
2861
2862	return (bstp_set_port_priority(&bif->bif_stp, req->ifbr_priority));
2863#else /* !BRIDGESTP */
2864#pragma unused(sc, arg)
2865	return (EOPNOTSUPP);
2866#endif /* !BRIDGESTP */
2867}
2868
2869static int
2870bridge_ioctl_sifcost(struct bridge_softc *sc, void *arg)
2871{
2872#if BRIDGESTP
2873	struct ifbreq *req = arg;
2874	struct bridge_iflist *bif;
2875
2876	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2877	if (bif == NULL)
2878		return (ENOENT);
2879
2880	return (bstp_set_path_cost(&bif->bif_stp, req->ifbr_path_cost));
2881#else /* !BRIDGESTP */
2882#pragma unused(sc, arg)
2883	return (EOPNOTSUPP);
2884#endif /* !BRIDGESTP */
2885}
2886
2887static int
2888bridge_ioctl_gfilt(struct bridge_softc *sc, void *arg)
2889{
2890	struct ifbrparam *param = arg;
2891
2892	param->ifbrp_filter = sc->sc_filter_flags;
2893
2894	return (0);
2895}
2896
2897static int
2898bridge_ioctl_sfilt(struct bridge_softc *sc, void *arg)
2899{
2900	struct ifbrparam *param = arg;
2901
2902	if (param->ifbrp_filter & ~IFBF_FILT_MASK)
2903		return (EINVAL);
2904
2905#ifndef BRIDGE_IPF
2906	if (param->ifbrp_filter & IFBF_FILT_USEIPF)
2907		return (EINVAL);
2908#endif
2909
2910	sc->sc_filter_flags = param->ifbrp_filter;
2911
2912	return (0);
2913}
2914
2915static int
2916bridge_ioctl_sifmaxaddr(struct bridge_softc *sc, void *arg)
2917{
2918	struct ifbreq *req = arg;
2919	struct bridge_iflist *bif;
2920
2921	bif = bridge_lookup_member(sc, req->ifbr_ifsname);
2922	if (bif == NULL)
2923		return (ENOENT);
2924
2925	bif->bif_addrmax = req->ifbr_addrmax;
2926	return (0);
2927}
2928
2929static int
2930bridge_ioctl_addspan(struct bridge_softc *sc, void *arg)
2931{
2932	struct ifbreq *req = arg;
2933	struct bridge_iflist *bif = NULL;
2934	struct ifnet *ifs;
2935
2936	ifs = ifunit(req->ifbr_ifsname);
2937	if (ifs == NULL)
2938		return (ENOENT);
2939
2940	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2941		if (ifs == bif->bif_ifp)
2942			return (EBUSY);
2943
2944	if (ifs->if_bridge != NULL)
2945		return (EBUSY);
2946
2947	switch (ifs->if_type) {
2948		case IFT_ETHER:
2949		case IFT_L2VLAN:
2950			break;
2951		case IFT_GIF:
2952			/* currently not supported */
2953			/* FALLTHRU */
2954		default:
2955			return (EINVAL);
2956	}
2957
2958	bif = _MALLOC(sizeof (*bif), M_DEVBUF, M_NOWAIT | M_ZERO);
2959	if (bif == NULL)
2960		return (ENOMEM);
2961
2962	bif->bif_ifp = ifs;
2963	bif->bif_ifflags = IFBIF_SPAN;
2964
2965	ifnet_reference(bif->bif_ifp);
2966
2967	TAILQ_INSERT_HEAD(&sc->sc_spanlist, bif, bif_next);
2968
2969	return (0);
2970}
2971
2972static int
2973bridge_ioctl_delspan(struct bridge_softc *sc, void *arg)
2974{
2975	struct ifbreq *req = arg;
2976	struct bridge_iflist *bif;
2977	struct ifnet *ifs;
2978
2979	ifs = ifunit(req->ifbr_ifsname);
2980	if (ifs == NULL)
2981		return (ENOENT);
2982
2983	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
2984		if (ifs == bif->bif_ifp)
2985			break;
2986
2987	if (bif == NULL)
2988		return (ENOENT);
2989
2990	bridge_delete_span(sc, bif);
2991
2992	return (0);
2993}
2994
2995#define	BRIDGE_IOCTL_GBPARAM do {					\
2996	struct bstp_state *bs = &sc->sc_stp;				\
2997	struct bstp_port *root_port;					\
2998									\
2999	req->ifbop_maxage = bs->bs_bridge_max_age >> 8;			\
3000	req->ifbop_hellotime = bs->bs_bridge_htime >> 8;		\
3001	req->ifbop_fwddelay = bs->bs_bridge_fdelay >> 8;		\
3002									\
3003	root_port = bs->bs_root_port;					\
3004	if (root_port == NULL)						\
3005		req->ifbop_root_port = 0;				\
3006	else								\
3007		req->ifbop_root_port = root_port->bp_ifp->if_index;	\
3008									\
3009	req->ifbop_holdcount = bs->bs_txholdcount;			\
3010	req->ifbop_priority = bs->bs_bridge_priority;			\
3011	req->ifbop_protocol = bs->bs_protover;				\
3012	req->ifbop_root_path_cost = bs->bs_root_pv.pv_cost;		\
3013	req->ifbop_bridgeid = bs->bs_bridge_pv.pv_dbridge_id;		\
3014	req->ifbop_designated_root = bs->bs_root_pv.pv_root_id;		\
3015	req->ifbop_designated_bridge = bs->bs_root_pv.pv_dbridge_id;	\
3016	req->ifbop_last_tc_time.tv_sec = bs->bs_last_tc_time.tv_sec;	\
3017	req->ifbop_last_tc_time.tv_usec = bs->bs_last_tc_time.tv_usec;	\
3018} while (0)
3019
3020static int
3021bridge_ioctl_gbparam32(struct bridge_softc *sc, void *arg)
3022{
3023	struct ifbropreq32 *req = arg;
3024
3025	BRIDGE_IOCTL_GBPARAM;
3026
3027	return (0);
3028}
3029
3030static int
3031bridge_ioctl_gbparam64(struct bridge_softc *sc, void *arg)
3032{
3033	struct ifbropreq64 *req = arg;
3034
3035	BRIDGE_IOCTL_GBPARAM;
3036
3037	return (0);
3038}
3039
3040static int
3041bridge_ioctl_grte(struct bridge_softc *sc, void *arg)
3042{
3043	struct ifbrparam *param = arg;
3044
3045	param->ifbrp_cexceeded = sc->sc_brtexceeded;
3046	return (0);
3047}
3048
3049#define	BRIDGE_IOCTL_GIFSSTP do {					\
3050	struct bridge_iflist *bif;					\
3051	struct bstp_port *bp;						\
3052	struct ifbpstpreq bpreq;					\
3053	char *buf, *outbuf;						\
3054	unsigned int count, buflen, len;				\
3055									\
3056	count = 0;							\
3057	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
3058		if ((bif->bif_ifflags & IFBIF_STP) != 0)		\
3059			count++;					\
3060	}								\
3061									\
3062	buflen = sizeof (bpreq) * count;				\
3063	if (bifstp->ifbpstp_len == 0) {					\
3064		bifstp->ifbpstp_len = buflen;				\
3065		return (0);						\
3066	}								\
3067									\
3068	BRIDGE_UNLOCK(sc);						\
3069	outbuf = _MALLOC(buflen, M_TEMP, M_WAITOK | M_ZERO);		\
3070	BRIDGE_LOCK(sc);						\
3071									\
3072	count = 0;							\
3073	buf = outbuf;							\
3074	len = min(bifstp->ifbpstp_len, buflen);				\
3075	bzero(&bpreq, sizeof (bpreq));					\
3076	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {			\
3077		if (len < sizeof (bpreq))				\
3078			break;						\
3079									\
3080		if ((bif->bif_ifflags & IFBIF_STP) == 0)		\
3081			continue;					\
3082									\
3083		bp = &bif->bif_stp;					\
3084		bpreq.ifbp_portno = bif->bif_ifp->if_index & 0xfff;	\
3085		bpreq.ifbp_fwd_trans = bp->bp_forward_transitions;	\
3086		bpreq.ifbp_design_cost = bp->bp_desg_pv.pv_cost;	\
3087		bpreq.ifbp_design_port = bp->bp_desg_pv.pv_port_id;	\
3088		bpreq.ifbp_design_bridge = bp->bp_desg_pv.pv_dbridge_id; \
3089		bpreq.ifbp_design_root = bp->bp_desg_pv.pv_root_id;	\
3090									\
3091		memcpy(buf, &bpreq, sizeof (bpreq));			\
3092		count++;						\
3093		buf += sizeof (bpreq);					\
3094		len -= sizeof (bpreq);					\
3095	}								\
3096									\
3097	BRIDGE_UNLOCK(sc);						\
3098	bifstp->ifbpstp_len = sizeof (bpreq) * count;			\
3099	error = copyout(outbuf, bifstp->ifbpstp_req, bifstp->ifbpstp_len); \
3100	BRIDGE_LOCK(sc);						\
3101	_FREE(outbuf, M_TEMP);						\
3102	return (error);							\
3103} while (0)
3104
3105static int
3106bridge_ioctl_gifsstp32(struct bridge_softc *sc, void *arg)
3107{
3108	struct ifbpstpconf32 *bifstp = arg;
3109	int error = 0;
3110
3111	BRIDGE_IOCTL_GIFSSTP;
3112
3113	return (error);
3114}
3115
3116static int
3117bridge_ioctl_gifsstp64(struct bridge_softc *sc, void *arg)
3118{
3119	struct ifbpstpconf64 *bifstp = arg;
3120	int error = 0;
3121
3122	BRIDGE_IOCTL_GIFSSTP;
3123
3124	return (error);
3125}
3126
3127static int
3128bridge_ioctl_sproto(struct bridge_softc *sc, void *arg)
3129{
3130#if BRIDGESTP
3131	struct ifbrparam *param = arg;
3132
3133	return (bstp_set_protocol(&sc->sc_stp, param->ifbrp_proto));
3134#else /* !BRIDGESTP */
3135#pragma unused(sc, arg)
3136	return (EOPNOTSUPP);
3137#endif /* !BRIDGESTP */
3138}
3139
3140static int
3141bridge_ioctl_stxhc(struct bridge_softc *sc, void *arg)
3142{
3143#if BRIDGESTP
3144	struct ifbrparam *param = arg;
3145
3146	return (bstp_set_holdcount(&sc->sc_stp, param->ifbrp_txhc));
3147#else /* !BRIDGESTP */
3148#pragma unused(sc, arg)
3149	return (EOPNOTSUPP);
3150#endif /* !BRIDGESTP */
3151}
3152
3153
3154static int
3155bridge_ioctl_ghostfilter(struct bridge_softc *sc, void *arg)
3156{
3157	struct ifbrhostfilter *req = arg;
3158	struct bridge_iflist *bif;
3159
3160	bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3161	if (bif == NULL)
3162		return (ENOENT);
3163
3164	bzero(req, sizeof(struct ifbrhostfilter));
3165	if (bif->bif_flags & BIFF_HOST_FILTER) {
3166		req->ifbrhf_flags |= IFBRHF_ENABLED;
3167		bcopy(bif->bif_hf_hwsrc, req->ifbrhf_hwsrca,
3168		    ETHER_ADDR_LEN);
3169		req->ifbrhf_ipsrc = bif->bif_hf_ipsrc.s_addr;
3170	}
3171	return (0);
3172}
3173
3174static int
3175bridge_ioctl_shostfilter(struct bridge_softc *sc, void *arg)
3176{
3177	struct ifbrhostfilter *req = arg;
3178	struct bridge_iflist *bif;
3179
3180	bif = bridge_lookup_member(sc, req->ifbrhf_ifsname);
3181	if (bif == NULL)
3182		return (ENOENT);
3183
3184	if (req->ifbrhf_flags & IFBRHF_ENABLED) {
3185		bif->bif_flags |= BIFF_HOST_FILTER;
3186
3187		if (req->ifbrhf_flags & IFBRHF_HWSRC) {
3188			bcopy(req->ifbrhf_hwsrca, bif->bif_hf_hwsrc,
3189			    ETHER_ADDR_LEN);
3190			if (bcmp(req->ifbrhf_hwsrca, ethernulladdr,
3191			    ETHER_ADDR_LEN) != 0)
3192				bif->bif_flags |= BIFF_HF_HWSRC;
3193			else
3194				bif->bif_flags &= ~BIFF_HF_HWSRC;
3195		}
3196		if (req->ifbrhf_flags & IFBRHF_IPSRC) {
3197			bif->bif_hf_ipsrc.s_addr = req->ifbrhf_ipsrc;
3198			if (bif->bif_hf_ipsrc.s_addr != INADDR_ANY)
3199				bif->bif_flags |= BIFF_HF_IPSRC;
3200			else
3201				bif->bif_flags &= ~BIFF_HF_IPSRC;
3202		}
3203	} else {
3204		bif->bif_flags &= ~(BIFF_HOST_FILTER | BIFF_HF_HWSRC |
3205		    BIFF_HF_IPSRC);
3206		bzero(bif->bif_hf_hwsrc, ETHER_ADDR_LEN);
3207		bif->bif_hf_ipsrc.s_addr = INADDR_ANY;
3208	}
3209
3210	return (0);
3211}
3212
3213
3214/*
3215 * bridge_ifdetach:
3216 *
3217 *	Detach an interface from a bridge.  Called when a member
3218 *	interface is detaching.
3219 */
3220__private_extern__ void
3221bridge_ifdetach(struct bridge_iflist *bif, struct ifnet *ifp)
3222{
3223	struct bridge_softc *sc = ifp->if_bridge;
3224
3225#if BRIDGE_DEBUG
3226	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
3227		printf("%s: %s\n", __func__, ifp->if_xname);
3228#endif /* BRIDGE_DEBUG */
3229
3230	/* Check if the interface is a bridge member */
3231	if (sc != NULL) {
3232		BRIDGE_LOCK(sc);
3233		bif = bridge_lookup_member_if(sc, ifp);
3234		if (bif != NULL)
3235			bridge_delete_member(sc, bif, 1);
3236		BRIDGE_UNLOCK(sc);
3237		return;
3238	}
3239
3240	/* Check if the interface is a span port */
3241	lck_mtx_lock(&bridge_list_mtx);
3242	LIST_FOREACH(sc, &bridge_list, sc_list) {
3243		BRIDGE_LOCK(sc);
3244		TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next)
3245			if (ifp == bif->bif_ifp) {
3246				bridge_delete_span(sc, bif);
3247				break;
3248			}
3249		BRIDGE_UNLOCK(sc);
3250	}
3251	lck_mtx_unlock(&bridge_list_mtx);
3252}
3253
3254/*
3255 * interface_media_active:
3256 *
3257 *	Tells if an interface media is active.
3258 */
3259static int
3260interface_media_active(struct ifnet *ifp)
3261{
3262	struct ifmediareq   ifmr;
3263	int status = 0;
3264
3265	bzero(&ifmr, sizeof(ifmr));
3266	if (ifnet_ioctl(ifp, 0, SIOCGIFMEDIA, &ifmr) == 0) {
3267		if ((ifmr.ifm_status & IFM_AVALID) && ifmr.ifm_count > 0)
3268			status = ifmr.ifm_status & IFM_ACTIVE ? 1 : 0;
3269	}
3270
3271	return (status);
3272}
3273
3274/*
3275 * bridge_updatelinkstatus:
3276 *
3277 * 	Update the media active status of the bridge based on the
3278 *	media active status of its member.
3279 *	If changed, return the corresponding onf/off link event.
3280 */
3281static u_int32_t
3282bridge_updatelinkstatus(struct bridge_softc *sc)
3283{
3284	struct bridge_iflist *bif;
3285	int active_member = 0;
3286	u_int32_t event_code = 0;
3287
3288	BRIDGE_LOCK_ASSERT_HELD(sc);
3289
3290	/*
3291	 * Find out if we have an active interface
3292	 */
3293	TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3294		if (bif->bif_flags & BIFF_MEDIA_ACTIVE) {
3295			active_member = 1;
3296			break;
3297		}
3298	}
3299
3300	if (active_member && !(sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3301		sc->sc_flags |= SCF_MEDIA_ACTIVE;
3302		event_code = KEV_DL_LINK_ON;
3303	} else if (!active_member && (sc->sc_flags & SCF_MEDIA_ACTIVE)) {
3304		sc->sc_flags &= ~SCF_MEDIA_ACTIVE;
3305		event_code = KEV_DL_LINK_OFF;
3306	}
3307
3308	return (event_code);
3309}
3310
3311/*
3312 * bridge_iflinkevent:
3313 */
3314static void
3315bridge_iflinkevent(struct ifnet *ifp)
3316{
3317	struct bridge_softc *sc = ifp->if_bridge;
3318	struct bridge_iflist *bif;
3319	u_int32_t event_code = 0;
3320
3321#if BRIDGE_DEBUG
3322	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
3323		printf("%s: %s\n", __func__, ifp->if_xname);
3324#endif /* BRIDGE_DEBUG */
3325
3326	/* Check if the interface is a bridge member */
3327	if (sc == NULL)
3328		return;
3329
3330	BRIDGE_LOCK(sc);
3331	bif = bridge_lookup_member_if(sc, ifp);
3332	if (bif != NULL) {
3333		if (interface_media_active(ifp))
3334			bif->bif_flags |= BIFF_MEDIA_ACTIVE;
3335		else
3336			bif->bif_flags &= ~BIFF_MEDIA_ACTIVE;
3337
3338			event_code = bridge_updatelinkstatus(sc);
3339	}
3340	BRIDGE_UNLOCK(sc);
3341
3342	if (event_code != 0)
3343		bridge_link_event(sc->sc_ifp, event_code);
3344}
3345
3346/*
3347 * bridge_delayed_callback:
3348 *
3349 *	Makes a delayed call
3350 */
3351static void
3352bridge_delayed_callback(void *param)
3353{
3354	struct bridge_delayed_call *call = (struct bridge_delayed_call *)param;
3355	struct bridge_softc *sc = call->bdc_sc;
3356
3357#if BRIDGE_DEBUG_DELAYED_CALLBACK
3358	if (bridge_delayed_callback_delay > 0) {
3359		struct timespec ts;
3360
3361		ts.tv_sec = bridge_delayed_callback_delay;
3362		ts.tv_nsec = 0;
3363
3364		printf("%s: sleeping for %d seconds\n",
3365		    __func__, bridge_delayed_callback_delay);
3366
3367		msleep(&bridge_delayed_callback_delay, NULL, PZERO,
3368		    __func__, &ts);
3369
3370		printf("%s: awoken\n", __func__);
3371	}
3372#endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3373
3374	BRIDGE_LOCK(sc);
3375
3376#if BRIDGE_DEBUG_DELAYED_CALLBACK
3377	if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3378		printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3379		    sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3380		    call->bdc_flags);
3381#endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3382
3383	if (call->bdc_flags & BDCF_CANCELLING) {
3384		wakeup(call);
3385	} else {
3386		if ((sc->sc_flags & SCF_DETACHING) == 0)
3387			(*call->bdc_func)(sc);
3388		}
3389	call->bdc_flags &= ~BDCF_OUTSTANDING;
3390	BRIDGE_UNLOCK(sc);
3391}
3392
3393/*
3394 * bridge_schedule_delayed_call:
3395 *
3396 *	Schedule a function to be called on a separate thread
3397 * 	The actual call may be scheduled to run at a given time or ASAP.
3398 */
3399static void
3400bridge_schedule_delayed_call(struct bridge_delayed_call *call)
3401{
3402	uint64_t deadline = 0;
3403	struct bridge_softc *sc = call->bdc_sc;
3404
3405	BRIDGE_LOCK_ASSERT_HELD(sc);
3406
3407	if ((sc->sc_flags & SCF_DETACHING) ||
3408	    (call->bdc_flags & (BDCF_OUTSTANDING | BDCF_CANCELLING)))
3409		return;
3410
3411	if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec) {
3412		nanoseconds_to_absolutetime(
3413		    (uint64_t)call->bdc_ts.tv_sec * NSEC_PER_SEC +
3414		    call->bdc_ts.tv_nsec, &deadline);
3415		clock_absolutetime_interval_to_deadline(deadline, &deadline);
3416	}
3417
3418	call->bdc_flags = BDCF_OUTSTANDING;
3419
3420#if BRIDGE_DEBUG_DELAYED_CALLBACK
3421	if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3422		printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3423		    sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3424		    call->bdc_flags);
3425#endif /* BRIDGE_DEBUG_DELAYED_CALLBACK */
3426
3427	if (call->bdc_ts.tv_sec || call->bdc_ts.tv_nsec)
3428		thread_call_func_delayed(
3429			(thread_call_func_t)bridge_delayed_callback,
3430			call, deadline);
3431	else {
3432		if (call->bdc_thread_call == NULL)
3433			call->bdc_thread_call = thread_call_allocate(
3434				(thread_call_func_t)bridge_delayed_callback,
3435				call);
3436		thread_call_enter(call->bdc_thread_call);
3437	}
3438}
3439
3440/*
3441 * bridge_cancel_delayed_call:
3442 *
3443 *	Cancel a queued or running delayed call.
3444 *	If call is running, does not return until the call is done to
3445 *	prevent race condition with the brigde interface getting destroyed
3446 */
3447static void
3448bridge_cancel_delayed_call(struct bridge_delayed_call *call)
3449{
3450	boolean_t result;
3451	struct bridge_softc *sc = call->bdc_sc;
3452
3453	/*
3454	 * The call was never scheduled
3455	 */
3456	if (sc == NULL)
3457		return;
3458
3459	BRIDGE_LOCK_ASSERT_HELD(sc);
3460
3461	call->bdc_flags |= BDCF_CANCELLING;
3462
3463	while (call->bdc_flags & BDCF_OUTSTANDING) {
3464#if BRIDGE_DEBUG
3465		if (if_bridge_debug & BR_DBGF_DELAYED_CALL)
3466			printf("%s: %s call 0x%llx flags 0x%x\n", __func__,
3467			    sc->sc_if_xname, (uint64_t)VM_KERNEL_ADDRPERM(call),
3468			    call->bdc_flags);
3469#endif /* BRIDGE_DEBUG */
3470		result = thread_call_func_cancel(
3471		    (thread_call_func_t)bridge_delayed_callback, call, FALSE);
3472
3473		if (result) {
3474			/*
3475			 * We managed to dequeue the delayed call
3476			 */
3477			call->bdc_flags &= ~BDCF_OUTSTANDING;
3478		} else {
3479			/*
3480			 * Wait for delayed call do be done running
3481			 */
3482			msleep(call, &sc->sc_mtx, PZERO, __func__, NULL);
3483		}
3484	}
3485	call->bdc_flags &= ~BDCF_CANCELLING;
3486}
3487
3488/*
3489 * bridge_cleanup_delayed_call:
3490 *
3491 *	Dispose resource allocated for a delayed call
3492 *	Assume the delayed call is not queued or running .
3493 */
3494static void
3495bridge_cleanup_delayed_call(struct bridge_delayed_call *call)
3496{
3497	boolean_t result;
3498	struct bridge_softc *sc = call->bdc_sc;
3499
3500	/*
3501	 * The call was never scheduled
3502	 */
3503	if (sc == NULL)
3504		return;
3505
3506	BRIDGE_LOCK_ASSERT_HELD(sc);
3507
3508	VERIFY((call->bdc_flags & BDCF_OUTSTANDING) == 0);
3509	VERIFY((call->bdc_flags & BDCF_CANCELLING) == 0);
3510
3511	if (call->bdc_thread_call != NULL) {
3512		result = thread_call_free(call->bdc_thread_call);
3513		if (result == FALSE)
3514			panic("%s thread_call_free() failed for call %p",
3515				__func__, call);
3516		call->bdc_thread_call = NULL;
3517	}
3518}
3519
3520/*
3521 * bridge_init:
3522 *
3523 *	Initialize a bridge interface.
3524 */
3525static int
3526bridge_init(struct ifnet *ifp)
3527{
3528	struct bridge_softc *sc = (struct bridge_softc *)ifp->if_softc;
3529	errno_t error;
3530
3531	BRIDGE_LOCK_ASSERT_HELD(sc);
3532
3533	if ((ifnet_flags(ifp) & IFF_RUNNING))
3534		return (0);
3535
3536	error = ifnet_set_flags(ifp, IFF_RUNNING, IFF_RUNNING);
3537
3538	/*
3539	 * Calling bridge_aging_timer() is OK as there are no entries to
3540	 * age so we're just going to arm the timer
3541	 */
3542	bridge_aging_timer(sc);
3543
3544#if BRIDGESTP
3545	if (error == 0)
3546		bstp_init(&sc->sc_stp);		/* Initialize Spanning Tree */
3547#endif /* BRIDGESTP */
3548
3549	return (error);
3550}
3551
3552/*
3553 * bridge_ifstop:
3554 *
3555 *	Stop the bridge interface.
3556 */
3557static void
3558bridge_ifstop(struct ifnet *ifp, int disable)
3559{
3560#pragma unused(disable)
3561	struct bridge_softc *sc = ifp->if_softc;
3562
3563	BRIDGE_LOCK_ASSERT_HELD(sc);
3564
3565	if ((ifnet_flags(ifp) & IFF_RUNNING) == 0)
3566		return;
3567
3568	bridge_cancel_delayed_call(&sc->sc_aging_timer);
3569
3570#if BRIDGESTP
3571	bstp_stop(&sc->sc_stp);
3572#endif /* BRIDGESTP */
3573
3574	bridge_rtflush(sc, IFBF_FLUSHDYN);
3575
3576	(void) ifnet_set_flags(ifp, 0, IFF_RUNNING);
3577}
3578
3579/*
3580 * bridge_enqueue:
3581 *
3582 *	Enqueue a packet on a bridge member interface.
3583 *
3584 */
3585static int
3586bridge_enqueue(struct bridge_softc *sc, struct ifnet *dst_ifp, struct mbuf *m)
3587{
3588	int len, error = 0;
3589	short mflags;
3590	struct mbuf *m0;
3591
3592	VERIFY(dst_ifp != NULL);
3593
3594	/*
3595	 * We may be sending a fragment so traverse the mbuf
3596	 *
3597	 * NOTE: bridge_fragment() is called only when PFIL_HOOKS is enabled.
3598	 */
3599	for (; m; m = m0) {
3600		errno_t _error;
3601		struct flowadv adv = { FADV_SUCCESS };
3602
3603		m0 = m->m_nextpkt;
3604		m->m_nextpkt = NULL;
3605
3606		len = m->m_pkthdr.len;
3607		mflags = m->m_flags;
3608		m->m_flags |= M_PROTO1; /* set to avoid loops */
3609
3610		bridge_finalize_cksum(dst_ifp, m);
3611
3612#if HAS_IF_CAP
3613		/*
3614		 * If underlying interface can not do VLAN tag insertion itself
3615		 * then attach a packet tag that holds it.
3616		 */
3617		if ((m->m_flags & M_VLANTAG) &&
3618		    (dst_ifp->if_capenable & IFCAP_VLAN_HWTAGGING) == 0) {
3619			m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
3620			if (m == NULL) {
3621				printf("%s: %s: unable to prepend VLAN "
3622				    "header\n", __func__, dst_ifp->if_xname);
3623				(void) ifnet_stat_increment_out(dst_ifp,
3624				    0, 0, 1);
3625				continue;
3626			}
3627			m->m_flags &= ~M_VLANTAG;
3628		}
3629#endif /* HAS_IF_CAP */
3630
3631		_error = dlil_output(dst_ifp, 0, m, NULL, NULL, 1, &adv);
3632
3633		/* Preserve existing error value */
3634		if (error == 0) {
3635			if (_error != 0)
3636				error = _error;
3637			else if (adv.code == FADV_FLOW_CONTROLLED)
3638				error = EQFULL;
3639			else if (adv.code == FADV_SUSPENDED)
3640				error = EQSUSPENDED;
3641		}
3642
3643		if (_error == 0) {
3644			(void) ifnet_stat_increment_out(sc->sc_ifp, 1, len, 0);
3645		} else {
3646			(void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
3647		}
3648	}
3649
3650	return (error);
3651}
3652
3653#if HAS_BRIDGE_DUMMYNET
3654/*
3655 * bridge_dummynet:
3656 *
3657 *	Receive a queued packet from dummynet and pass it on to the output
3658 *	interface.
3659 *
3660 *	The mbuf has the Ethernet header already attached.
3661 */
3662static void
3663bridge_dummynet(struct mbuf *m, struct ifnet *ifp)
3664{
3665	struct bridge_softc *sc;
3666
3667	sc = ifp->if_bridge;
3668
3669	/*
3670	 * The packet didnt originate from a member interface. This should only
3671	 * ever happen if a member interface is removed while packets are
3672	 * queued for it.
3673	 */
3674	if (sc == NULL) {
3675		m_freem(m);
3676		return;
3677	}
3678
3679	if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
3680		if (bridge_pfil(&m, sc->sc_ifp, ifp, PFIL_OUT) != 0)
3681			return;
3682		if (m == NULL)
3683			return;
3684	}
3685
3686	(void) bridge_enqueue(sc, ifp, m);
3687}
3688#endif /* HAS_BRIDGE_DUMMYNET */
3689
3690#if BRIDGE_MEMBER_OUT_FILTER
3691/*
3692 * bridge_member_output:
3693 *
3694 *	Send output from a bridge member interface.  This
3695 *	performs the bridging function for locally originated
3696 *	packets.
3697 *
3698 *	The mbuf has the Ethernet header already attached.  We must
3699 *	enqueue or free the mbuf before returning.
3700 */
3701static int
3702bridge_member_output(struct ifnet *ifp, struct mbuf *m, struct sockaddr *sa,
3703	struct rtentry *rt)
3704{
3705#pragma unused(sa, rt)
3706	struct ether_header *eh;
3707	struct ifnet *dst_if;
3708	struct bridge_softc *sc;
3709	uint16_t vlan;
3710
3711#if BRIDGE_DEBUG
3712	if (if_bridge_debug & BR_DBGF_OUTPPUT)
3713		printf("%s: ifp %s\n", __func__, ifp->if_xname);
3714#endif /* BRIDGE_DEBUG */
3715
3716	if (m->m_len < ETHER_HDR_LEN) {
3717		m = m_pullup(m, ETHER_HDR_LEN);
3718		if (m == NULL)
3719			return (0);
3720	}
3721
3722	eh = mtod(m, struct ether_header *);
3723	sc = ifp->if_bridge;
3724	vlan = VLANTAGOF(m);
3725
3726	BRIDGE_LOCK(sc);
3727
3728	/*
3729	 * APPLE MODIFICATION
3730	 * If the packet is an 802.1X ethertype, then only send on the
3731	 * original output interface.
3732	 */
3733	if (eh->ether_type == htons(ETHERTYPE_PAE)) {
3734		dst_if = ifp;
3735		goto sendunicast;
3736	}
3737
3738	/*
3739	 * If bridge is down, but the original output interface is up,
3740	 * go ahead and send out that interface.  Otherwise, the packet
3741	 * is dropped below.
3742	 */
3743	if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
3744		dst_if = ifp;
3745		goto sendunicast;
3746	}
3747
3748	/*
3749	 * If the packet is a multicast, or we don't know a better way to
3750	 * get there, send to all interfaces.
3751	 */
3752	if (ETHER_IS_MULTICAST(eh->ether_dhost))
3753		dst_if = NULL;
3754	else
3755		dst_if = bridge_rtlookup(sc, eh->ether_dhost, vlan);
3756	if (dst_if == NULL) {
3757		struct bridge_iflist *bif;
3758		struct mbuf *mc;
3759		int error = 0, used = 0;
3760
3761		bridge_span(sc, m);
3762
3763		BRIDGE_LOCK2REF(sc, error);
3764		if (error) {
3765			m_freem(m);
3766			return (0);
3767		}
3768
3769		TAILQ_FOREACH(bif, &sc->sc_iflist, bif_next) {
3770			dst_if = bif->bif_ifp;
3771
3772			if (dst_if->if_type == IFT_GIF)
3773				continue;
3774			if ((dst_if->if_flags & IFF_RUNNING) == 0)
3775				continue;
3776
3777			/*
3778			 * If this is not the original output interface,
3779			 * and the interface is participating in spanning
3780			 * tree, make sure the port is in a state that
3781			 * allows forwarding.
3782			 */
3783			if (dst_if != ifp && (bif->bif_ifflags & IFBIF_STP) &&
3784			    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3785				continue;
3786
3787			if (LIST_NEXT(bif, bif_next) == NULL) {
3788				used = 1;
3789				mc = m;
3790			} else {
3791				mc = m_copypacket(m, M_DONTWAIT);
3792				if (mc == NULL) {
3793					(void) ifnet_stat_increment_out(
3794					    sc->sc_ifp, 0, 0, 1);
3795					continue;
3796				}
3797			}
3798
3799			(void) bridge_enqueue(sc, dst_if, mc);
3800		}
3801		if (used == 0)
3802			m_freem(m);
3803		BRIDGE_UNREF(sc);
3804		return (0);
3805	}
3806
3807sendunicast:
3808	/*
3809	 * XXX Spanning tree consideration here?
3810	 */
3811
3812	bridge_span(sc, m);
3813	if ((dst_if->if_flags & IFF_RUNNING) == 0) {
3814		m_freem(m);
3815		BRIDGE_UNLOCK(sc);
3816		return (0);
3817	}
3818
3819	BRIDGE_UNLOCK(sc);
3820	(void) bridge_enqueue(sc, dst_if, m);
3821	return (0);
3822}
3823#endif /* BRIDGE_MEMBER_OUT_FILTER */
3824
3825/*
3826 * Output callback.
3827 *
3828 * This routine is called externally from above only when if_bridge_txstart
3829 * is disabled; otherwise it is called internally by bridge_start().
3830 */
3831static int
3832bridge_output(struct ifnet *ifp, struct mbuf *m)
3833{
3834	struct bridge_softc *sc = ifnet_softc(ifp);
3835	struct ether_header *eh;
3836	struct ifnet *dst_if;
3837	int error = 0;
3838
3839	eh = mtod(m, struct ether_header *);
3840	dst_if = NULL;
3841
3842	BRIDGE_LOCK(sc);
3843	if (!(m->m_flags & (M_BCAST|M_MCAST)))
3844		dst_if = bridge_rtlookup(sc, eh->ether_dhost, 0);
3845
3846	(void) ifnet_stat_increment_out(ifp, 1, m->m_pkthdr.len, 0);
3847
3848#if NBPFILTER > 0
3849	if (sc->sc_bpf_output)
3850		bridge_bpf_output(ifp, m);
3851#endif
3852
3853	if (dst_if == NULL) {
3854		/* callee will unlock */
3855		bridge_broadcast(sc, ifp, m, 0);
3856	} else {
3857		BRIDGE_UNLOCK(sc);
3858		error = bridge_enqueue(sc, dst_if, m);
3859	}
3860
3861	return (error);
3862}
3863
3864static void
3865bridge_finalize_cksum(struct ifnet *ifp, struct mbuf *m)
3866{
3867	struct ether_header *eh = mtod(m, struct ether_header *);
3868	uint32_t sw_csum, hwcap;
3869
3870	if (ifp != NULL)
3871		hwcap = (ifp->if_hwassist | CSUM_DATA_VALID);
3872	else
3873		hwcap = 0;
3874
3875	/* do in software what the hardware cannot */
3876	sw_csum = m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_FLAGS(hwcap);
3877	sw_csum &= IF_HWASSIST_CSUM_MASK;
3878
3879	switch (ntohs(eh->ether_type)) {
3880	case ETHERTYPE_IP:
3881		if ((hwcap & CSUM_PARTIAL) && !(sw_csum & CSUM_DELAY_DATA) &&
3882		    (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)) {
3883			if (m->m_pkthdr.csum_flags & CSUM_TCP) {
3884				uint16_t start =
3885				    sizeof (*eh) + sizeof (struct ip);
3886				uint16_t ulpoff =
3887				    m->m_pkthdr.csum_data & 0xffff;
3888				m->m_pkthdr.csum_flags |=
3889				    (CSUM_DATA_VALID | CSUM_PARTIAL);
3890				m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
3891				m->m_pkthdr.csum_tx_start = start;
3892			} else {
3893				sw_csum |= (CSUM_DELAY_DATA &
3894				    m->m_pkthdr.csum_flags);
3895			}
3896		}
3897		(void) in_finalize_cksum(m, sizeof (*eh), sw_csum);
3898		break;
3899
3900#if INET6
3901	case ETHERTYPE_IPV6:
3902		if ((hwcap & CSUM_PARTIAL) &&
3903		    !(sw_csum & CSUM_DELAY_IPV6_DATA) &&
3904		    (m->m_pkthdr.csum_flags & CSUM_DELAY_IPV6_DATA)) {
3905			if (m->m_pkthdr.csum_flags & CSUM_TCPIPV6) {
3906				uint16_t start =
3907				    sizeof (*eh) + sizeof (struct ip6_hdr);
3908				uint16_t ulpoff =
3909				    m->m_pkthdr.csum_data & 0xffff;
3910				m->m_pkthdr.csum_flags |=
3911				    (CSUM_DATA_VALID | CSUM_PARTIAL);
3912				m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
3913				m->m_pkthdr.csum_tx_start = start;
3914			} else {
3915				sw_csum |= (CSUM_DELAY_IPV6_DATA &
3916				    m->m_pkthdr.csum_flags);
3917			}
3918		}
3919		(void) in6_finalize_cksum(m, sizeof (*eh), -1, -1, sw_csum);
3920		break;
3921#endif /* INET6 */
3922	}
3923}
3924
3925/*
3926 * bridge_start:
3927 *
3928 *	Start output on a bridge.
3929 *
3930 * This routine is invoked by the start worker thread; because we never call
3931 * it directly, there is no need do deploy any serialization mechanism other
3932 * than what's already used by the worker thread, i.e. this is already single
3933 * threaded.
3934 *
3935 * This routine is called only when if_bridge_txstart is enabled.
3936 */
3937static void
3938bridge_start(struct ifnet *ifp)
3939{
3940	struct mbuf *m;
3941
3942	for (;;) {
3943		if (ifnet_dequeue(ifp, &m) != 0)
3944			break;
3945
3946		(void) bridge_output(ifp, m);
3947	}
3948}
3949
3950/*
3951 * bridge_forward:
3952 *
3953 *	The forwarding function of the bridge.
3954 *
3955 *	NOTE: Releases the lock on return.
3956 */
3957static void
3958bridge_forward(struct bridge_softc *sc, struct bridge_iflist *sbif,
3959	struct mbuf *m)
3960{
3961	struct bridge_iflist *dbif;
3962	struct ifnet *src_if, *dst_if, *ifp;
3963	struct ether_header *eh;
3964	uint16_t vlan;
3965	uint8_t *dst;
3966	int error;
3967
3968	BRIDGE_LOCK_ASSERT_HELD(sc);
3969
3970#if BRIDGE_DEBUG
3971	if (if_bridge_debug & BR_DBGF_OUTPPUT)
3972		printf("%s: %s m 0x%llx\n", __func__, sc->sc_ifp->if_xname,
3973		    (uint64_t)VM_KERNEL_ADDRPERM(m));
3974#endif /* BRIDGE_DEBUG */
3975
3976	src_if = m->m_pkthdr.rcvif;
3977	ifp = sc->sc_ifp;
3978
3979	(void) ifnet_stat_increment_in(ifp, 1, m->m_pkthdr.len, 0);
3980	vlan = VLANTAGOF(m);
3981
3982
3983	if ((sbif->bif_ifflags & IFBIF_STP) &&
3984	    sbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
3985		goto drop;
3986
3987	eh = mtod(m, struct ether_header *);
3988	dst = eh->ether_dhost;
3989
3990	/* If the interface is learning, record the address. */
3991	if (sbif->bif_ifflags & IFBIF_LEARNING) {
3992		error = bridge_rtupdate(sc, eh->ether_shost, vlan,
3993		    sbif, 0, IFBAF_DYNAMIC);
3994		/*
3995		 * If the interface has addresses limits then deny any source
3996		 * that is not in the cache.
3997		 */
3998		if (error && sbif->bif_addrmax)
3999			goto drop;
4000	}
4001
4002	if ((sbif->bif_ifflags & IFBIF_STP) != 0 &&
4003	    sbif->bif_stp.bp_state == BSTP_IFSTATE_LEARNING)
4004		goto drop;
4005
4006	/*
4007	 * At this point, the port either doesn't participate
4008	 * in spanning tree or it is in the forwarding state.
4009	 */
4010
4011	/*
4012	 * If the packet is unicast, destined for someone on
4013	 * "this" side of the bridge, drop it.
4014	 */
4015	if ((m->m_flags & (M_BCAST|M_MCAST)) == 0) {
4016		dst_if = bridge_rtlookup(sc, dst, vlan);
4017		if (src_if == dst_if)
4018			goto drop;
4019	} else {
4020		/*
4021		 * Check if its a reserved multicast address, any address
4022		 * listed in 802.1D section 7.12.6 may not be forwarded by the
4023		 * bridge.
4024		 * This is currently 01-80-C2-00-00-00 to 01-80-C2-00-00-0F
4025		 */
4026		if (dst[0] == 0x01 && dst[1] == 0x80 &&
4027		    dst[2] == 0xc2 && dst[3] == 0x00 &&
4028		    dst[4] == 0x00 && dst[5] <= 0x0f)
4029			goto drop;
4030
4031
4032		/* ...forward it to all interfaces. */
4033		atomic_add_64(&ifp->if_imcasts, 1);
4034		dst_if = NULL;
4035	}
4036
4037	/*
4038	 * If we have a destination interface which is a member of our bridge,
4039	 * OR this is a unicast packet, push it through the bpf(4) machinery.
4040	 * For broadcast or multicast packets, don't bother because it will
4041	 * be reinjected into ether_input. We do this before we pass the packets
4042	 * through the pfil(9) framework, as it is possible that pfil(9) will
4043	 * drop the packet, or possibly modify it, making it difficult to debug
4044	 * firewall issues on the bridge.
4045	 */
4046#if NBPFILTER > 0
4047	if (eh->ether_type == htons(ETHERTYPE_RSN_PREAUTH) ||
4048	    dst_if != NULL || (m->m_flags & (M_BCAST | M_MCAST)) == 0) {
4049		m->m_pkthdr.rcvif = ifp;
4050		if (sc->sc_bpf_input)
4051			bridge_bpf_input(ifp, m);
4052	}
4053#endif /* NBPFILTER */
4054
4055#if defined(PFIL_HOOKS)
4056	/* run the packet filter */
4057	if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4058		BRIDGE_UNLOCK(sc);
4059		if (bridge_pfil(&m, ifp, src_if, PFIL_IN) != 0)
4060			return;
4061		if (m == NULL)
4062			return;
4063		BRIDGE_LOCK(sc);
4064	}
4065#endif /* PFIL_HOOKS */
4066
4067	if (dst_if == NULL) {
4068		bridge_broadcast(sc, src_if, m, 1);
4069		return;
4070	}
4071
4072	/*
4073	 * At this point, we're dealing with a unicast frame
4074	 * going to a different interface.
4075	 */
4076	if ((dst_if->if_flags & IFF_RUNNING) == 0)
4077		goto drop;
4078
4079	dbif = bridge_lookup_member_if(sc, dst_if);
4080	if (dbif == NULL)
4081		/* Not a member of the bridge (anymore?) */
4082		goto drop;
4083
4084	/* Private segments can not talk to each other */
4085	if (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE)
4086		goto drop;
4087
4088	if ((dbif->bif_ifflags & IFBIF_STP) &&
4089	    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4090		goto drop;
4091
4092#if HAS_DHCPRA_MASK
4093	/* APPLE MODIFICATION <rdar:6985737> */
4094	if ((dst_if->if_extflags & IFEXTF_DHCPRA_MASK) != 0) {
4095		m = ip_xdhcpra_output(dst_if, m);
4096		if (!m) {
4097			++sc->sc_sc.sc_ifp.if_xdhcpra;
4098			return;
4099		}
4100	}
4101#endif /* HAS_DHCPRA_MASK */
4102
4103	BRIDGE_UNLOCK(sc);
4104
4105#if defined(PFIL_HOOKS)
4106	if (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6) {
4107		if (bridge_pfil(&m, ifp, dst_if, PFIL_OUT) != 0)
4108			return;
4109		if (m == NULL)
4110			return;
4111	}
4112#endif /* PFIL_HOOKS */
4113
4114	(void) bridge_enqueue(sc, dst_if, m);
4115	return;
4116
4117drop:
4118	BRIDGE_UNLOCK(sc);
4119	m_freem(m);
4120}
4121
4122#if BRIDGE_DEBUG
4123
4124char *ether_ntop(char *, size_t, const u_char *);
4125
4126__private_extern__ char *
4127ether_ntop(char *buf, size_t len, const u_char *ap)
4128{
4129	snprintf(buf, len, "%02x:%02x:%02x:%02x:%02x:%02x",
4130	    ap[0], ap[1], ap[2], ap[3], ap[4], ap[5]);
4131
4132	return (buf);
4133}
4134
4135#endif /* BRIDGE_DEBUG */
4136
4137/*
4138 * bridge_input:
4139 *
4140 *	Filter input from a member interface.  Queue the packet for
4141 *	bridging if it is not for us.
4142 */
4143__private_extern__ errno_t
4144bridge_input(struct ifnet *ifp, struct mbuf *m, void *frame_header)
4145{
4146	struct bridge_softc *sc = ifp->if_bridge;
4147	struct bridge_iflist *bif, *bif2;
4148	struct ifnet *bifp;
4149	struct ether_header *eh;
4150	struct mbuf *mc, *mc2;
4151	uint16_t vlan;
4152	int error;
4153
4154#if BRIDGE_DEBUG
4155	if (if_bridge_debug & BR_DBGF_INPUT)
4156		printf("%s: %s from %s m 0x%llx data 0x%llx\n", __func__,
4157		    sc->sc_ifp->if_xname, ifp->if_xname,
4158		    (uint64_t)VM_KERNEL_ADDRPERM(m),
4159		    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_data(m)));
4160#endif /* BRIDGE_DEBUG */
4161
4162	if ((sc->sc_ifp->if_flags & IFF_RUNNING) == 0) {
4163#if BRIDGE_DEBUG
4164		if (if_bridge_debug & BR_DBGF_INPUT)
4165			printf("%s: %s not running passing along\n",
4166			    __func__, sc->sc_ifp->if_xname);
4167#endif /* BRIDGE_DEBUG */
4168		return (0);
4169	}
4170
4171	bifp = sc->sc_ifp;
4172	vlan = VLANTAGOF(m);
4173
4174#ifdef IFF_MONITOR
4175	/*
4176	 * Implement support for bridge monitoring. If this flag has been
4177	 * set on this interface, discard the packet once we push it through
4178	 * the bpf(4) machinery, but before we do, increment the byte and
4179	 * packet counters associated with this interface.
4180	 */
4181	if ((bifp->if_flags & IFF_MONITOR) != 0) {
4182		m->m_pkthdr.rcvif  = bifp;
4183		BRIDGE_BPF_MTAP_INPUT(sc, m);
4184		(void) ifnet_stat_increment_in(bifp, 1, m->m_pkthdr.len, 0);
4185		m_freem(m);
4186		return (EJUSTRETURN);
4187	}
4188#endif /* IFF_MONITOR */
4189
4190	/*
4191	 * Need to clear the promiscous flags otherwise it will be
4192	 * dropped by DLIL after processing filters
4193	 */
4194	if ((mbuf_flags(m) & MBUF_PROMISC))
4195		mbuf_setflags_mask(m, 0, MBUF_PROMISC);
4196
4197	BRIDGE_LOCK(sc);
4198	bif = bridge_lookup_member_if(sc, ifp);
4199	if (bif == NULL) {
4200		BRIDGE_UNLOCK(sc);
4201#if BRIDGE_DEBUG
4202		if (if_bridge_debug & BR_DBGF_INPUT)
4203			printf("%s: %s bridge_lookup_member_if failed\n",
4204			    __func__, sc->sc_ifp->if_xname);
4205#endif /* BRIDGE_DEBUG */
4206		return (0);
4207	}
4208
4209	if (bif->bif_flags & BIFF_HOST_FILTER) {
4210		error = bridge_host_filter(bif, m);
4211		if (error != 0) {
4212			if (if_bridge_debug & BR_DBGF_INPUT)
4213				printf("%s: %s bridge_host_filter failed\n",
4214				    __func__, bif->bif_ifp->if_xname);
4215			BRIDGE_UNLOCK(sc);
4216			return (EJUSTRETURN);
4217		}
4218	}
4219
4220	eh = mtod(m, struct ether_header *);
4221
4222	bridge_span(sc, m);
4223
4224	if (m->m_flags & (M_BCAST|M_MCAST)) {
4225
4226#if BRIDGE_DEBUG
4227		if (if_bridge_debug & BR_DBGF_MCAST)
4228			if ((m->m_flags & M_MCAST))
4229				printf("%s: multicast: "
4230				    "%02x:%02x:%02x:%02x:%02x:%02x\n",
4231				    __func__,
4232				    eh->ether_dhost[0], eh->ether_dhost[1],
4233				    eh->ether_dhost[2], eh->ether_dhost[3],
4234				    eh->ether_dhost[4], eh->ether_dhost[5]);
4235#endif /* BRIDGE_DEBUG */
4236
4237		/* Tap off 802.1D packets; they do not get forwarded. */
4238		if (memcmp(eh->ether_dhost, bstp_etheraddr,
4239		    ETHER_ADDR_LEN) == 0) {
4240#if BRIDGESTP
4241			m = bstp_input(&bif->bif_stp, ifp, m);
4242#else /* !BRIDGESTP */
4243			m_freem(m);
4244			m = NULL;
4245#endif /* !BRIDGESTP */
4246			if (m == NULL) {
4247				BRIDGE_UNLOCK(sc);
4248				return (EJUSTRETURN);
4249			}
4250		}
4251
4252		if ((bif->bif_ifflags & IFBIF_STP) &&
4253		    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4254			BRIDGE_UNLOCK(sc);
4255			return (0);
4256		}
4257
4258		/*
4259		 * Make a deep copy of the packet and enqueue the copy
4260		 * for bridge processing; return the original packet for
4261		 * local processing.
4262		 */
4263		mc = m_dup(m, M_DONTWAIT);
4264		if (mc == NULL) {
4265			BRIDGE_UNLOCK(sc);
4266			return (0);
4267		}
4268
4269		/*
4270		 * Perform the bridge forwarding function with the copy.
4271		 *
4272		 * Note that bridge_forward calls BRIDGE_UNLOCK
4273		 */
4274		bridge_forward(sc, bif, mc);
4275
4276		/*
4277		 * Reinject the mbuf as arriving on the bridge so we have a
4278		 * chance at claiming multicast packets. We can not loop back
4279		 * here from ether_input as a bridge is never a member of a
4280		 * bridge.
4281		 */
4282		VERIFY(bifp->if_bridge == NULL);
4283		mc2 = m_dup(m, M_DONTWAIT);
4284		if (mc2 != NULL) {
4285			/* Keep the layer3 header aligned */
4286			int i = min(mc2->m_pkthdr.len, max_protohdr);
4287			mc2 = m_copyup(mc2, i, ETHER_ALIGN);
4288		}
4289		if (mc2 != NULL) {
4290			/* mark packet as arriving on the bridge */
4291			mc2->m_pkthdr.rcvif = bifp;
4292			mc2->m_pkthdr.pkt_hdr = mbuf_data(mc2);
4293
4294#if NBPFILTER > 0
4295			if (sc->sc_bpf_input)
4296				bridge_bpf_input(bifp, mc2);
4297#endif /* NBPFILTER */
4298			(void) mbuf_setdata(mc2,
4299			    (char *)mbuf_data(mc2) + ETHER_HDR_LEN,
4300			    mbuf_len(mc2) - ETHER_HDR_LEN);
4301			(void) mbuf_pkthdr_adjustlen(mc2, - ETHER_HDR_LEN);
4302
4303			(void) ifnet_stat_increment_in(bifp, 1,
4304			    mbuf_pkthdr_len(mc2), 0);
4305
4306#if BRIDGE_DEBUG
4307			if (if_bridge_debug & BR_DBGF_MCAST)
4308				printf("%s: %s mcast for us\n", __func__,
4309				    sc->sc_ifp->if_xname);
4310#endif /* BRIDGE_DEBUG */
4311
4312			dlil_input_packet_list(bifp, mc2);
4313		}
4314
4315		/* Return the original packet for local processing. */
4316		return (0);
4317	}
4318
4319	if ((bif->bif_ifflags & IFBIF_STP) &&
4320	    bif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING) {
4321		BRIDGE_UNLOCK(sc);
4322		return (0);
4323	}
4324
4325#ifdef DEV_CARP
4326#define	CARP_CHECK_WE_ARE_DST(iface) \
4327	((iface)->if_carp &&\
4328		carp_forus((iface)->if_carp, eh->ether_dhost))
4329#define	CARP_CHECK_WE_ARE_SRC(iface) \
4330	((iface)->if_carp &&\
4331		carp_forus((iface)->if_carp, eh->ether_shost))
4332#else
4333#define	CARP_CHECK_WE_ARE_DST(iface) 0
4334#define	CARP_CHECK_WE_ARE_SRC(iface) 0
4335#endif
4336
4337#ifdef INET6
4338#define	PFIL_HOOKED_INET6 PFIL_HOOKED(&inet6_pfil_hook)
4339#else
4340#define	PFIL_HOOKED_INET6 0
4341#endif
4342
4343#if defined(PFIL_HOOKS)
4344#define	PFIL_PHYS(sc, ifp, m) do {					\
4345	if (pfil_local_phys &&						\
4346	(PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {		\
4347		if (bridge_pfil(&m, NULL, ifp,				\
4348		    PFIL_IN) != 0 || m == NULL) {			\
4349			BRIDGE_UNLOCK(sc);				\
4350			return (NULL);					\
4351		}							\
4352	}								\
4353} while (0)
4354#else /* PFIL_HOOKS */
4355#define	PFIL_PHYS(sc, ifp, m)
4356#endif /* PFIL_HOOKS */
4357
4358#define	GRAB_OUR_PACKETS(iface)						\
4359	if ((iface)->if_type == IFT_GIF)				\
4360		continue;						\
4361	/* It is destined for us. */					\
4362	if (memcmp(IF_LLADDR((iface)), eh->ether_dhost,		\
4363	    ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST((iface))) {	\
4364		if ((iface)->if_type == IFT_BRIDGE) {			\
4365			BRIDGE_BPF_MTAP_INPUT(sc, m);			\
4366			/* Filter on the physical interface. */		\
4367			PFIL_PHYS(sc, iface, m);			\
4368		}							\
4369		if (bif->bif_ifflags & IFBIF_LEARNING) {		\
4370			error = bridge_rtupdate(sc, eh->ether_shost,	\
4371			    vlan, bif, 0, IFBAF_DYNAMIC);		\
4372			if (error && bif->bif_addrmax) {		\
4373				BRIDGE_UNLOCK(sc);			\
4374				return (EJUSTRETURN);			\
4375			}						\
4376		}							\
4377		m->m_pkthdr.rcvif = iface;				\
4378		BRIDGE_UNLOCK(sc);					\
4379		return (0);						\
4380	}								\
4381									\
4382	/* We just received a packet that we sent out. */		\
4383	if (memcmp(IF_LLADDR((iface)), eh->ether_shost,		\
4384	    ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_SRC((iface))) {	\
4385		BRIDGE_UNLOCK(sc);					\
4386		return (EJUSTRETURN);					\
4387	}
4388
4389	/*
4390	 * Unicast.
4391	 */
4392	/*
4393	 * If the packet is for us, set the packets source as the
4394	 * bridge, and return the packet back to ether_input for
4395	 * local processing.
4396	 */
4397	if (memcmp(eh->ether_dhost, IF_LLADDR(bifp),
4398	    ETHER_ADDR_LEN) == 0 || CARP_CHECK_WE_ARE_DST(bifp)) {
4399
4400		/* Mark the packet as arriving on the bridge interface */
4401		(void) mbuf_pkthdr_setrcvif(m, bifp);
4402		mbuf_pkthdr_setheader(m, frame_header);
4403
4404		/*
4405		 * If the interface is learning, and the source
4406		 * address is valid and not multicast, record
4407		 * the address.
4408		 */
4409		if (bif->bif_ifflags & IFBIF_LEARNING)
4410			(void) bridge_rtupdate(sc, eh->ether_shost,
4411			    vlan, bif, 0, IFBAF_DYNAMIC);
4412
4413		BRIDGE_BPF_MTAP_INPUT(sc, m);
4414
4415		(void) mbuf_setdata(m, (char *)mbuf_data(m) + ETHER_HDR_LEN,
4416		    mbuf_len(m) - ETHER_HDR_LEN);
4417		(void) mbuf_pkthdr_adjustlen(m, - ETHER_HDR_LEN);
4418
4419		(void) ifnet_stat_increment_in(bifp, 1, mbuf_pkthdr_len(m), 0);
4420
4421		BRIDGE_UNLOCK(sc);
4422
4423#if BRIDGE_DEBUG
4424		if (if_bridge_debug & BR_DBGF_INPUT)
4425			printf("%s: %s packet for bridge\n", __func__,
4426			    sc->sc_ifp->if_xname);
4427#endif /* BRIDGE_DEBUG */
4428
4429		dlil_input_packet_list(bifp, m);
4430
4431		return (EJUSTRETURN);
4432	}
4433
4434	/*
4435	 * if the destination of the packet is for the MAC address of
4436	 * the member interface itself, then we don't need to forward
4437	 * it -- just pass it back.  Note that it'll likely just be
4438	 * dropped by the stack, but if something else is bound to
4439	 * the interface directly (for example, the wireless stats
4440	 * protocol -- although that actually uses BPF right now),
4441	 * then it will consume the packet
4442	 *
4443	 * ALSO, note that we do this check AFTER checking for the
4444	 * bridge's own MAC address, because the bridge may be
4445	 * using the SAME MAC address as one of its interfaces
4446	 */
4447	if (memcmp(eh->ether_dhost, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0) {
4448
4449#ifdef VERY_VERY_VERY_DIAGNOSTIC
4450			printf("%s: not forwarding packet bound for member "
4451			    "interface\n", __func__);
4452#endif
4453			BRIDGE_UNLOCK(sc);
4454			return (0);
4455	}
4456
4457	/* Now check the all bridge members. */
4458	TAILQ_FOREACH(bif2, &sc->sc_iflist, bif_next) {
4459		GRAB_OUR_PACKETS(bif2->bif_ifp)
4460	}
4461
4462#undef CARP_CHECK_WE_ARE_DST
4463#undef CARP_CHECK_WE_ARE_SRC
4464#undef GRAB_OUR_PACKETS
4465
4466	/*
4467	 * Perform the bridge forwarding function.
4468	 *
4469	 * Note that bridge_forward calls BRIDGE_UNLOCK
4470	 */
4471	bridge_forward(sc, bif, m);
4472
4473	return (EJUSTRETURN);
4474}
4475
4476/*
4477 * bridge_broadcast:
4478 *
4479 *	Send a frame to all interfaces that are members of
4480 *	the bridge, except for the one on which the packet
4481 *	arrived.
4482 *
4483 *	NOTE: Releases the lock on return.
4484 */
4485static void
4486bridge_broadcast(struct bridge_softc *sc, struct ifnet *src_if,
4487	struct mbuf *m, int runfilt)
4488{
4489#ifndef PFIL_HOOKS
4490#pragma unused(runfilt)
4491#endif
4492	struct bridge_iflist *dbif, *sbif;
4493	struct mbuf *mc;
4494	struct ifnet *dst_if;
4495	int error = 0, used = 0;
4496
4497	sbif = bridge_lookup_member_if(sc, src_if);
4498
4499	BRIDGE_LOCK2REF(sc, error);
4500	if (error) {
4501		m_freem(m);
4502		return;
4503	}
4504
4505#ifdef PFIL_HOOKS
4506	/* Filter on the bridge interface before broadcasting */
4507	if (runfilt && (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4508		if (bridge_pfil(&m, sc->sc_ifp, NULL, PFIL_OUT) != 0)
4509			goto out;
4510		if (m == NULL)
4511			goto out;
4512	}
4513#endif /* PFIL_HOOKS */
4514
4515	TAILQ_FOREACH(dbif, &sc->sc_iflist, bif_next) {
4516		dst_if = dbif->bif_ifp;
4517		if (dst_if == src_if)
4518			continue;
4519
4520		/* Private segments can not talk to each other */
4521		if (sbif &&
4522		    (sbif->bif_ifflags & dbif->bif_ifflags & IFBIF_PRIVATE))
4523			continue;
4524
4525		if ((dbif->bif_ifflags & IFBIF_STP) &&
4526		    dbif->bif_stp.bp_state == BSTP_IFSTATE_DISCARDING)
4527			continue;
4528
4529		if ((dbif->bif_ifflags & IFBIF_DISCOVER) == 0 &&
4530		    (m->m_flags & (M_BCAST|M_MCAST)) == 0)
4531			continue;
4532
4533		if ((dst_if->if_flags & IFF_RUNNING) == 0)
4534			continue;
4535
4536		if (!(dbif->bif_flags & BIFF_MEDIA_ACTIVE)) {
4537			continue;
4538		}
4539
4540		if (TAILQ_NEXT(dbif, bif_next) == NULL) {
4541			mc = m;
4542			used = 1;
4543		} else {
4544			mc = m_dup(m, M_DONTWAIT);
4545			if (mc == NULL) {
4546				(void) ifnet_stat_increment_out(sc->sc_ifp,
4547				    0, 0, 1);
4548				continue;
4549			}
4550		}
4551
4552#ifdef PFIL_HOOKS
4553		/*
4554		 * Filter on the output interface. Pass a NULL bridge interface
4555		 * pointer so we do not redundantly filter on the bridge for
4556		 * each interface we broadcast on.
4557		 */
4558		if (runfilt &&
4559		    (PFIL_HOOKED(&inet_pfil_hook) || PFIL_HOOKED_INET6)) {
4560			if (used == 0) {
4561				/* Keep the layer3 header aligned */
4562				int i = min(mc->m_pkthdr.len, max_protohdr);
4563				mc = m_copyup(mc, i, ETHER_ALIGN);
4564				if (mc == NULL) {
4565					(void) ifnet_stat_increment_out(
4566					    sc->sc_ifp, 0, 0, 1);
4567					continue;
4568				}
4569			}
4570			if (bridge_pfil(&mc, NULL, dst_if, PFIL_OUT) != 0)
4571				continue;
4572			if (mc == NULL)
4573				continue;
4574		}
4575#endif /* PFIL_HOOKS */
4576
4577		(void) bridge_enqueue(sc, dst_if, mc);
4578	}
4579	if (used == 0)
4580		m_freem(m);
4581
4582#ifdef PFIL_HOOKS
4583out:
4584#endif /* PFIL_HOOKS */
4585
4586	BRIDGE_UNREF(sc);
4587}
4588
4589/*
4590 * bridge_span:
4591 *
4592 *	Duplicate a packet out one or more interfaces that are in span mode,
4593 *	the original mbuf is unmodified.
4594 */
4595static void
4596bridge_span(struct bridge_softc *sc, struct mbuf *m)
4597{
4598	struct bridge_iflist *bif;
4599	struct ifnet *dst_if;
4600	struct mbuf *mc;
4601
4602	if (TAILQ_EMPTY(&sc->sc_spanlist))
4603		return;
4604
4605	TAILQ_FOREACH(bif, &sc->sc_spanlist, bif_next) {
4606		dst_if = bif->bif_ifp;
4607
4608		if ((dst_if->if_flags & IFF_RUNNING) == 0)
4609			continue;
4610
4611		mc = m_copypacket(m, M_DONTWAIT);
4612		if (mc == NULL) {
4613			(void) ifnet_stat_increment_out(sc->sc_ifp, 0, 0, 1);
4614			continue;
4615		}
4616
4617		(void) bridge_enqueue(sc, dst_if, mc);
4618	}
4619}
4620
4621
4622/*
4623 * bridge_rtupdate:
4624 *
4625 *	Add a bridge routing entry.
4626 */
4627static int
4628bridge_rtupdate(struct bridge_softc *sc, const uint8_t *dst, uint16_t vlan,
4629	struct bridge_iflist *bif, int setflags, uint8_t flags)
4630{
4631	struct bridge_rtnode *brt;
4632	int error;
4633
4634	BRIDGE_LOCK_ASSERT_HELD(sc);
4635
4636	/* Check the source address is valid and not multicast. */
4637	if (ETHER_IS_MULTICAST(dst) ||
4638	    (dst[0] == 0 && dst[1] == 0 && dst[2] == 0 &&
4639	    dst[3] == 0 && dst[4] == 0 && dst[5] == 0) != 0)
4640		return (EINVAL);
4641
4642
4643	/* 802.1p frames map to vlan 1 */
4644	if (vlan == 0)
4645		vlan = 1;
4646
4647	/*
4648	 * A route for this destination might already exist.  If so,
4649	 * update it, otherwise create a new one.
4650	 */
4651	if ((brt = bridge_rtnode_lookup(sc, dst, vlan)) == NULL) {
4652		if (sc->sc_brtcnt >= sc->sc_brtmax) {
4653			sc->sc_brtexceeded++;
4654			return (ENOSPC);
4655		}
4656		/* Check per interface address limits (if enabled) */
4657		if (bif->bif_addrmax && bif->bif_addrcnt >= bif->bif_addrmax) {
4658			bif->bif_addrexceeded++;
4659			return (ENOSPC);
4660		}
4661
4662		/*
4663		 * Allocate a new bridge forwarding node, and
4664		 * initialize the expiration time and Ethernet
4665		 * address.
4666		 */
4667		brt = zalloc_noblock(bridge_rtnode_pool);
4668		if (brt == NULL)
4669			return (ENOMEM);
4670		bzero(brt, sizeof(struct bridge_rtnode));
4671
4672		if (bif->bif_ifflags & IFBIF_STICKY)
4673			brt->brt_flags = IFBAF_STICKY;
4674		else
4675			brt->brt_flags = IFBAF_DYNAMIC;
4676
4677		memcpy(brt->brt_addr, dst, ETHER_ADDR_LEN);
4678		brt->brt_vlan = vlan;
4679
4680
4681		if ((error = bridge_rtnode_insert(sc, brt)) != 0) {
4682			zfree(bridge_rtnode_pool, brt);
4683			return (error);
4684		}
4685		brt->brt_dst = bif;
4686		bif->bif_addrcnt++;
4687#if BRIDGE_DEBUG
4688		if (if_bridge_debug & BR_DBGF_RT_TABLE)
4689			printf("%s: added %02x:%02x:%02x:%02x:%02x:%02x "
4690			    "on %s count %u hashsize %u\n", __func__,
4691			    dst[0], dst[1], dst[2], dst[3], dst[4], dst[5],
4692			    sc->sc_ifp->if_xname, sc->sc_brtcnt,
4693			    sc->sc_rthash_size);
4694#endif
4695	}
4696
4697	if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC &&
4698	    brt->brt_dst != bif) {
4699		brt->brt_dst->bif_addrcnt--;
4700		brt->brt_dst = bif;
4701		brt->brt_dst->bif_addrcnt++;
4702	}
4703
4704	if ((flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4705		unsigned long now;
4706
4707		now = (unsigned long) net_uptime();
4708		brt->brt_expire = now + sc->sc_brttimeout;
4709	}
4710	if (setflags)
4711		brt->brt_flags = flags;
4712
4713
4714	return (0);
4715}
4716
4717/*
4718 * bridge_rtlookup:
4719 *
4720 *	Lookup the destination interface for an address.
4721 */
4722static struct ifnet *
4723bridge_rtlookup(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4724{
4725	struct bridge_rtnode *brt;
4726
4727	BRIDGE_LOCK_ASSERT_HELD(sc);
4728
4729	if ((brt = bridge_rtnode_lookup(sc, addr, vlan)) == NULL)
4730		return (NULL);
4731
4732	return (brt->brt_ifp);
4733}
4734
4735/*
4736 * bridge_rttrim:
4737 *
4738 *	Trim the routine table so that we have a number
4739 *	of routing entries less than or equal to the
4740 *	maximum number.
4741 */
4742static void
4743bridge_rttrim(struct bridge_softc *sc)
4744{
4745	struct bridge_rtnode *brt, *nbrt;
4746
4747	BRIDGE_LOCK_ASSERT_HELD(sc);
4748
4749	/* Make sure we actually need to do this. */
4750	if (sc->sc_brtcnt <= sc->sc_brtmax)
4751		return;
4752
4753	/* Force an aging cycle; this might trim enough addresses. */
4754	bridge_rtage(sc);
4755	if (sc->sc_brtcnt <= sc->sc_brtmax)
4756		return;
4757
4758	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4759		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4760			bridge_rtnode_destroy(sc, brt);
4761			if (sc->sc_brtcnt <= sc->sc_brtmax)
4762				return;
4763		}
4764	}
4765}
4766
4767/*
4768 * bridge_aging_timer:
4769 *
4770 *	Aging periodic timer for the bridge routing table.
4771 */
4772static void
4773bridge_aging_timer(struct bridge_softc *sc)
4774{
4775	BRIDGE_LOCK_ASSERT_HELD(sc);
4776
4777	bridge_rtage(sc);
4778
4779	if ((sc->sc_ifp->if_flags & IFF_RUNNING) &&
4780	    (sc->sc_flags & SCF_DETACHING) == 0) {
4781		sc->sc_aging_timer.bdc_sc = sc;
4782		sc->sc_aging_timer.bdc_func = bridge_aging_timer;
4783		sc->sc_aging_timer.bdc_ts.tv_sec = bridge_rtable_prune_period;
4784		bridge_schedule_delayed_call(&sc->sc_aging_timer);
4785	}
4786}
4787
4788/*
4789 * bridge_rtage:
4790 *
4791 *	Perform an aging cycle.
4792 */
4793static void
4794bridge_rtage(struct bridge_softc *sc)
4795{
4796	struct bridge_rtnode *brt, *nbrt;
4797	unsigned long now;
4798
4799	BRIDGE_LOCK_ASSERT_HELD(sc);
4800
4801	now = (unsigned long) net_uptime();
4802
4803	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4804		if ((brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC) {
4805			if (now >= brt->brt_expire)
4806				bridge_rtnode_destroy(sc, brt);
4807		}
4808	}
4809}
4810
4811/*
4812 * bridge_rtflush:
4813 *
4814 *	Remove all dynamic addresses from the bridge.
4815 */
4816static void
4817bridge_rtflush(struct bridge_softc *sc, int full)
4818{
4819	struct bridge_rtnode *brt, *nbrt;
4820
4821	BRIDGE_LOCK_ASSERT_HELD(sc);
4822
4823	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4824		if (full || (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
4825			bridge_rtnode_destroy(sc, brt);
4826	}
4827}
4828
4829/*
4830 * bridge_rtdaddr:
4831 *
4832 *	Remove an address from the table.
4833 */
4834static int
4835bridge_rtdaddr(struct bridge_softc *sc, const uint8_t *addr, uint16_t vlan)
4836{
4837	struct bridge_rtnode *brt;
4838	int found = 0;
4839
4840	BRIDGE_LOCK_ASSERT_HELD(sc);
4841
4842	/*
4843	 * If vlan is zero then we want to delete for all vlans so the lookup
4844	 * may return more than one.
4845	 */
4846	while ((brt = bridge_rtnode_lookup(sc, addr, vlan)) != NULL) {
4847		bridge_rtnode_destroy(sc, brt);
4848		found = 1;
4849	}
4850
4851	return (found ? 0 : ENOENT);
4852}
4853
4854/*
4855 * bridge_rtdelete:
4856 *
4857 *	Delete routes to a speicifc member interface.
4858 */
4859static void
4860bridge_rtdelete(struct bridge_softc *sc, struct ifnet *ifp, int full)
4861{
4862	struct bridge_rtnode *brt, *nbrt;
4863
4864	BRIDGE_LOCK_ASSERT_HELD(sc);
4865
4866	LIST_FOREACH_SAFE(brt, &sc->sc_rtlist, brt_list, nbrt) {
4867		if (brt->brt_ifp == ifp && (full ||
4868		    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC))
4869			bridge_rtnode_destroy(sc, brt);
4870	}
4871}
4872
4873/*
4874 * bridge_rtable_init:
4875 *
4876 *	Initialize the route table for this bridge.
4877 */
4878static int
4879bridge_rtable_init(struct bridge_softc *sc)
4880{
4881	u_int32_t i;
4882
4883	sc->sc_rthash = _MALLOC(sizeof (*sc->sc_rthash) * BRIDGE_RTHASH_SIZE,
4884	    M_DEVBUF, M_WAITOK | M_ZERO);
4885	if (sc->sc_rthash == NULL) {
4886		printf("%s: no memory\n", __func__);
4887		return (ENOMEM);
4888	}
4889	sc->sc_rthash_size = BRIDGE_RTHASH_SIZE;
4890
4891	for (i = 0; i < sc->sc_rthash_size; i++)
4892		LIST_INIT(&sc->sc_rthash[i]);
4893
4894	sc->sc_rthash_key = RandomULong();
4895
4896	LIST_INIT(&sc->sc_rtlist);
4897
4898	return (0);
4899}
4900
4901/*
4902 * bridge_rthash_delayed_resize:
4903 *
4904 *	Resize the routing table hash on a delayed thread call.
4905 */
4906static void
4907bridge_rthash_delayed_resize(struct bridge_softc *sc)
4908{
4909	u_int32_t new_rthash_size;
4910	struct _bridge_rtnode_list *new_rthash = NULL;
4911	struct _bridge_rtnode_list *old_rthash = NULL;
4912	u_int32_t i;
4913	struct bridge_rtnode *brt;
4914	int error = 0;
4915
4916	BRIDGE_LOCK_ASSERT_HELD(sc);
4917
4918	/*
4919	 * Four entries per hash bucket is our ideal load factor
4920	 */
4921	if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
4922		goto out;
4923
4924	/*
4925	 * Doubling the number of hash buckets may be too simplistic
4926	 * especially when facing a spike of new entries
4927	 */
4928	new_rthash_size = sc->sc_rthash_size * 2;
4929
4930	sc->sc_flags |= SCF_RESIZING;
4931	BRIDGE_UNLOCK(sc);
4932
4933	new_rthash = _MALLOC(sizeof (*sc->sc_rthash) * new_rthash_size,
4934	    M_DEVBUF, M_WAITOK | M_ZERO);
4935
4936	BRIDGE_LOCK(sc);
4937	sc->sc_flags &= ~SCF_RESIZING;
4938
4939	if (new_rthash == NULL) {
4940		error = ENOMEM;
4941		goto out;
4942	}
4943	if ((sc->sc_flags & SCF_DETACHING)) {
4944		error = ENODEV;
4945		goto out;
4946	}
4947	/*
4948	 * Fail safe from here on
4949	 */
4950	old_rthash = sc->sc_rthash;
4951	sc->sc_rthash = new_rthash;
4952	sc->sc_rthash_size = new_rthash_size;
4953
4954	/*
4955	 * Get a new key to force entries to be shuffled around to reduce
4956	 * the likelihood they will land in the same buckets
4957	 */
4958	sc->sc_rthash_key = RandomULong();
4959
4960	for (i = 0; i < sc->sc_rthash_size; i++)
4961		LIST_INIT(&sc->sc_rthash[i]);
4962
4963		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
4964		LIST_REMOVE(brt, brt_hash);
4965		(void) bridge_rtnode_hash(sc, brt);
4966	}
4967out:
4968	if (error == 0) {
4969#if BRIDGE_DEBUG
4970		if (if_bridge_debug & BR_DBGF_RT_TABLE)
4971			printf("%s: %s new size %u\n", __func__,
4972			    sc->sc_ifp->if_xname, sc->sc_rthash_size);
4973#endif /* BRIDGE_DEBUG */
4974		if (old_rthash)
4975			_FREE(old_rthash, M_DEVBUF);
4976	} else {
4977#if BRIDGE_DEBUG
4978		printf("%s: %s failed %d\n", __func__,
4979		    sc->sc_ifp->if_xname, error);
4980#endif /* BRIDGE_DEBUG */
4981		if (new_rthash != NULL)
4982			_FREE(new_rthash, M_DEVBUF);
4983	}
4984}
4985
4986/*
4987 * Resize the number of hash buckets based on the load factor
4988 * Currently only grow
4989 * Failing to resize the hash table is not fatal
4990 */
4991static void
4992bridge_rthash_resize(struct bridge_softc *sc)
4993{
4994	BRIDGE_LOCK_ASSERT_HELD(sc);
4995
4996	if ((sc->sc_flags & SCF_DETACHING) || (sc->sc_flags & SCF_RESIZING))
4997		return;
4998
4999	/*
5000	 * Four entries per hash bucket is our ideal load factor
5001	 */
5002	if (sc->sc_brtcnt < sc->sc_rthash_size * 4)
5003		return;
5004	/*
5005	 * Hard limit on the size of the routing hash table
5006	 */
5007	if (sc->sc_rthash_size >= bridge_rtable_hash_size_max)
5008		return;
5009
5010	sc->sc_resize_call.bdc_sc = sc;
5011	sc->sc_resize_call.bdc_func = bridge_rthash_delayed_resize;
5012	bridge_schedule_delayed_call(&sc->sc_resize_call);
5013}
5014
5015/*
5016 * bridge_rtable_fini:
5017 *
5018 *	Deconstruct the route table for this bridge.
5019 */
5020static void
5021bridge_rtable_fini(struct bridge_softc *sc)
5022{
5023	KASSERT(sc->sc_brtcnt == 0,
5024	    ("%s: %d bridge routes referenced", __func__, sc->sc_brtcnt));
5025	if (sc->sc_rthash) {
5026		_FREE(sc->sc_rthash, M_DEVBUF);
5027		sc->sc_rthash = NULL;
5028	}
5029}
5030
5031/*
5032 * The following hash function is adapted from "Hash Functions" by Bob Jenkins
5033 * ("Algorithm Alley", Dr. Dobbs Journal, September 1997).
5034 */
5035#define	mix(a, b, c)							\
5036do {									\
5037	a -= b; a -= c; a ^= (c >> 13);					\
5038	b -= c; b -= a; b ^= (a << 8);					\
5039	c -= a; c -= b; c ^= (b >> 13);					\
5040	a -= b; a -= c; a ^= (c >> 12);					\
5041	b -= c; b -= a; b ^= (a << 16);					\
5042	c -= a; c -= b; c ^= (b >> 5);					\
5043	a -= b; a -= c; a ^= (c >> 3);					\
5044	b -= c; b -= a; b ^= (a << 10);					\
5045	c -= a; c -= b; c ^= (b >> 15);					\
5046} while (/*CONSTCOND*/0)
5047
5048static __inline uint32_t
5049bridge_rthash(struct bridge_softc *sc, const uint8_t *addr)
5050{
5051	uint32_t a = 0x9e3779b9, b = 0x9e3779b9, c = sc->sc_rthash_key;
5052
5053	b += addr[5] << 8;
5054	b += addr[4];
5055	a += addr[3] << 24;
5056	a += addr[2] << 16;
5057	a += addr[1] << 8;
5058	a += addr[0];
5059
5060	mix(a, b, c);
5061
5062	return (c & BRIDGE_RTHASH_MASK(sc));
5063}
5064
5065#undef mix
5066
5067static int
5068bridge_rtnode_addr_cmp(const uint8_t *a, const uint8_t *b)
5069{
5070	int i, d;
5071
5072	for (i = 0, d = 0; i < ETHER_ADDR_LEN && d == 0; i++) {
5073		d = ((int)a[i]) - ((int)b[i]);
5074	}
5075
5076	return (d);
5077}
5078
5079/*
5080 * bridge_rtnode_lookup:
5081 *
5082 *	Look up a bridge route node for the specified destination. Compare the
5083 *	vlan id or if zero then just return the first match.
5084 */
5085static struct bridge_rtnode *
5086bridge_rtnode_lookup(struct bridge_softc *sc, const uint8_t *addr,
5087	uint16_t vlan)
5088{
5089	struct bridge_rtnode *brt;
5090	uint32_t hash;
5091	int dir;
5092
5093	BRIDGE_LOCK_ASSERT_HELD(sc);
5094
5095	hash = bridge_rthash(sc, addr);
5096	LIST_FOREACH(brt, &sc->sc_rthash[hash], brt_hash) {
5097		dir = bridge_rtnode_addr_cmp(addr, brt->brt_addr);
5098		if (dir == 0 && (brt->brt_vlan == vlan || vlan == 0))
5099			return (brt);
5100		if (dir > 0)
5101			return (NULL);
5102	}
5103
5104	return (NULL);
5105}
5106
5107/*
5108 * bridge_rtnode_hash:
5109 *
5110 *	Insert the specified bridge node into the route hash table.
5111 *	This is used when adding a new node or to rehash when resizing
5112 *	the hash table
5113 */
5114static int
5115bridge_rtnode_hash(struct bridge_softc *sc, struct bridge_rtnode *brt)
5116{
5117	struct bridge_rtnode *lbrt;
5118	uint32_t hash;
5119	int dir;
5120
5121	BRIDGE_LOCK_ASSERT_HELD(sc);
5122
5123	hash = bridge_rthash(sc, brt->brt_addr);
5124
5125	lbrt = LIST_FIRST(&sc->sc_rthash[hash]);
5126	if (lbrt == NULL) {
5127		LIST_INSERT_HEAD(&sc->sc_rthash[hash], brt, brt_hash);
5128		goto out;
5129	}
5130
5131	do {
5132		dir = bridge_rtnode_addr_cmp(brt->brt_addr, lbrt->brt_addr);
5133		if (dir == 0 && brt->brt_vlan == lbrt->brt_vlan) {
5134#if BRIDGE_DEBUG
5135			if (if_bridge_debug & BR_DBGF_RT_TABLE)
5136				printf("%s: %s EEXIST "
5137				    "%02x:%02x:%02x:%02x:%02x:%02x\n",
5138				    __func__, sc->sc_ifp->if_xname,
5139				    brt->brt_addr[0], brt->brt_addr[1],
5140				    brt->brt_addr[2], brt->brt_addr[3],
5141				    brt->brt_addr[4], brt->brt_addr[5]);
5142#endif
5143			return (EEXIST);
5144		}
5145		if (dir > 0) {
5146			LIST_INSERT_BEFORE(lbrt, brt, brt_hash);
5147			goto out;
5148		}
5149		if (LIST_NEXT(lbrt, brt_hash) == NULL) {
5150			LIST_INSERT_AFTER(lbrt, brt, brt_hash);
5151			goto out;
5152		}
5153		lbrt = LIST_NEXT(lbrt, brt_hash);
5154	} while (lbrt != NULL);
5155
5156#if BRIDGE_DEBUG
5157	if (if_bridge_debug & BR_DBGF_RT_TABLE)
5158		printf("%s: %s impossible %02x:%02x:%02x:%02x:%02x:%02x\n",
5159		    __func__, sc->sc_ifp->if_xname,
5160		    brt->brt_addr[0], brt->brt_addr[1], brt->brt_addr[2],
5161		    brt->brt_addr[3], brt->brt_addr[4], brt->brt_addr[5]);
5162#endif
5163
5164out:
5165	return (0);
5166}
5167
5168/*
5169 * bridge_rtnode_insert:
5170 *
5171 *	Insert the specified bridge node into the route table.  We
5172 *	assume the entry is not already in the table.
5173 */
5174static int
5175bridge_rtnode_insert(struct bridge_softc *sc, struct bridge_rtnode *brt)
5176{
5177	int error;
5178
5179	error = bridge_rtnode_hash(sc, brt);
5180	if (error != 0)
5181		return (error);
5182
5183	LIST_INSERT_HEAD(&sc->sc_rtlist, brt, brt_list);
5184	sc->sc_brtcnt++;
5185
5186	bridge_rthash_resize(sc);
5187
5188	return (0);
5189}
5190
5191/*
5192 * bridge_rtnode_destroy:
5193 *
5194 *	Destroy a bridge rtnode.
5195 */
5196static void
5197bridge_rtnode_destroy(struct bridge_softc *sc, struct bridge_rtnode *brt)
5198{
5199	BRIDGE_LOCK_ASSERT_HELD(sc);
5200
5201	LIST_REMOVE(brt, brt_hash);
5202
5203	LIST_REMOVE(brt, brt_list);
5204	sc->sc_brtcnt--;
5205	brt->brt_dst->bif_addrcnt--;
5206	zfree(bridge_rtnode_pool, brt);
5207}
5208
5209#if BRIDGESTP
5210/*
5211 * bridge_rtable_expire:
5212 *
5213 *	Set the expiry time for all routes on an interface.
5214 */
5215static void
5216bridge_rtable_expire(struct ifnet *ifp, int age)
5217{
5218	struct bridge_softc *sc = ifp->if_bridge;
5219	struct bridge_rtnode *brt;
5220
5221	BRIDGE_LOCK(sc);
5222
5223	/*
5224	 * If the age is zero then flush, otherwise set all the expiry times to
5225	 * age for the interface
5226	 */
5227	if (age == 0) {
5228		bridge_rtdelete(sc, ifp, IFBF_FLUSHDYN);
5229	} else {
5230		unsigned long now;
5231
5232		now = (unsigned long) net_uptime();
5233
5234		LIST_FOREACH(brt, &sc->sc_rtlist, brt_list) {
5235			/* Cap the expiry time to 'age' */
5236			if (brt->brt_ifp == ifp &&
5237			    brt->brt_expire > now + age &&
5238			    (brt->brt_flags & IFBAF_TYPEMASK) == IFBAF_DYNAMIC)
5239				brt->brt_expire = now + age;
5240		}
5241	}
5242	BRIDGE_UNLOCK(sc);
5243}
5244
5245/*
5246 * bridge_state_change:
5247 *
5248 *	Callback from the bridgestp code when a port changes states.
5249 */
5250static void
5251bridge_state_change(struct ifnet *ifp, int state)
5252{
5253	struct bridge_softc *sc = ifp->if_bridge;
5254	static const char *stpstates[] = {
5255		"disabled",
5256		"listening",
5257		"learning",
5258		"forwarding",
5259		"blocking",
5260		"discarding"
5261	};
5262
5263	if (log_stp)
5264		log(LOG_NOTICE, "%s: state changed to %s on %s\n",
5265		    sc->sc_ifp->if_xname,
5266		    stpstates[state], ifp->if_xname);
5267}
5268#endif /* BRIDGESTP */
5269
5270#ifdef PFIL_HOOKS
5271/*
5272 * Send bridge packets through pfil if they are one of the types pfil can deal
5273 * with, or if they are ARP or REVARP.  (pfil will pass ARP and REVARP without
5274 * question.) If *bifp or *ifp are NULL then packet filtering is skipped for
5275 * that interface.
5276 */
5277static int
5278bridge_pfil(struct mbuf **mp, struct ifnet *bifp, struct ifnet *ifp, int dir)
5279{
5280	int snap, error, i, hlen;
5281	struct ether_header *eh1, eh2;
5282	struct ip_fw_args args;
5283	struct ip *ip;
5284	struct llc llc1;
5285	u_int16_t ether_type;
5286
5287	snap = 0;
5288	error = -1;	/* Default error if not error == 0 */
5289
5290#if 0
5291	/* we may return with the IP fields swapped, ensure its not shared */
5292	KASSERT(M_WRITABLE(*mp), ("%s: modifying a shared mbuf", __func__));
5293#endif
5294
5295	if (pfil_bridge == 0 && pfil_member == 0 && pfil_ipfw == 0)
5296		return (0); /* filtering is disabled */
5297
5298	i = min((*mp)->m_pkthdr.len, max_protohdr);
5299	if ((*mp)->m_len < i) {
5300		*mp = m_pullup(*mp, i);
5301		if (*mp == NULL) {
5302			printf("%s: m_pullup failed\n", __func__);
5303			return (-1);
5304		}
5305	}
5306
5307	eh1 = mtod(*mp, struct ether_header *);
5308	ether_type = ntohs(eh1->ether_type);
5309
5310	/*
5311	 * Check for SNAP/LLC.
5312	 */
5313	if (ether_type < ETHERMTU) {
5314		struct llc *llc2 = (struct llc *)(eh1 + 1);
5315
5316		if ((*mp)->m_len >= ETHER_HDR_LEN + 8 &&
5317		    llc2->llc_dsap == LLC_SNAP_LSAP &&
5318		    llc2->llc_ssap == LLC_SNAP_LSAP &&
5319		    llc2->llc_control == LLC_UI) {
5320			ether_type = htons(llc2->llc_un.type_snap.ether_type);
5321			snap = 1;
5322		}
5323	}
5324
5325	/*
5326	 * If we're trying to filter bridge traffic, don't look at anything
5327	 * other than IP and ARP traffic.  If the filter doesn't understand
5328	 * IPv6, don't allow IPv6 through the bridge either.  This is lame
5329	 * since if we really wanted, say, an AppleTalk filter, we are hosed,
5330	 * but of course we don't have an AppleTalk filter to begin with.
5331	 * (Note that since pfil doesn't understand ARP it will pass *ALL*
5332	 * ARP traffic.)
5333	 */
5334	switch (ether_type) {
5335		case ETHERTYPE_ARP:
5336		case ETHERTYPE_REVARP:
5337			if (pfil_ipfw_arp == 0)
5338				return (0); /* Automatically pass */
5339			break;
5340
5341		case ETHERTYPE_IP:
5342#if INET6
5343		case ETHERTYPE_IPV6:
5344#endif /* INET6 */
5345			break;
5346		default:
5347			/*
5348			 * Check to see if the user wants to pass non-ip
5349			 * packets, these will not be checked by pfil(9) and
5350			 * passed unconditionally so the default is to drop.
5351			 */
5352			if (pfil_onlyip)
5353				goto bad;
5354	}
5355
5356	/* Strip off the Ethernet header and keep a copy. */
5357	m_copydata(*mp, 0, ETHER_HDR_LEN, (caddr_t)&eh2);
5358	m_adj(*mp, ETHER_HDR_LEN);
5359
5360	/* Strip off snap header, if present */
5361	if (snap) {
5362		m_copydata(*mp, 0, sizeof (struct llc), (caddr_t)&llc1);
5363		m_adj(*mp, sizeof (struct llc));
5364	}
5365
5366	/*
5367	 * Check the IP header for alignment and errors
5368	 */
5369	if (dir == PFIL_IN) {
5370		switch (ether_type) {
5371			case ETHERTYPE_IP:
5372				error = bridge_ip_checkbasic(mp);
5373				break;
5374#if INET6
5375			case ETHERTYPE_IPV6:
5376				error = bridge_ip6_checkbasic(mp);
5377				break;
5378#endif /* INET6 */
5379			default:
5380				error = 0;
5381		}
5382		if (error)
5383			goto bad;
5384	}
5385
5386	if (IPFW_LOADED && pfil_ipfw != 0 && dir == PFIL_OUT && ifp != NULL) {
5387		error = -1;
5388		args.rule = ip_dn_claim_rule(*mp);
5389		if (args.rule != NULL && fw_one_pass)
5390			goto ipfwpass; /* packet already partially processed */
5391
5392		args.m = *mp;
5393		args.oif = ifp;
5394		args.next_hop = NULL;
5395		args.eh = &eh2;
5396		args.inp = NULL;	/* used by ipfw uid/gid/jail rules */
5397		i = ip_fw_chk_ptr(&args);
5398		*mp = args.m;
5399
5400		if (*mp == NULL)
5401			return (error);
5402
5403		if (DUMMYNET_LOADED && (i == IP_FW_DUMMYNET)) {
5404
5405			/* put the Ethernet header back on */
5406			M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
5407			if (*mp == NULL)
5408				return (error);
5409			bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5410
5411			/*
5412			 * Pass the pkt to dummynet, which consumes it. The
5413			 * packet will return to us via bridge_dummynet().
5414			 */
5415			args.oif = ifp;
5416			ip_dn_io_ptr(mp, DN_TO_IFB_FWD, &args, DN_CLIENT_IPFW);
5417			return (error);
5418		}
5419
5420		if (i != IP_FW_PASS) /* drop */
5421			goto bad;
5422	}
5423
5424ipfwpass:
5425	error = 0;
5426
5427	/*
5428	 * Run the packet through pfil
5429	 */
5430	switch (ether_type) {
5431	case ETHERTYPE_IP:
5432		/*
5433		 * before calling the firewall, swap fields the same as
5434		 * IP does. here we assume the header is contiguous
5435		 */
5436		ip = mtod(*mp, struct ip *);
5437
5438		ip->ip_len = ntohs(ip->ip_len);
5439		ip->ip_off = ntohs(ip->ip_off);
5440
5441		/*
5442		 * Run pfil on the member interface and the bridge, both can
5443		 * be skipped by clearing pfil_member or pfil_bridge.
5444		 *
5445		 * Keep the order:
5446		 *   in_if -> bridge_if -> out_if
5447		 */
5448		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
5449			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5450			    dir, NULL);
5451
5452		if (*mp == NULL || error != 0) /* filter may consume */
5453			break;
5454
5455		if (pfil_member && ifp != NULL)
5456			error = pfil_run_hooks(&inet_pfil_hook, mp, ifp,
5457			    dir, NULL);
5458
5459		if (*mp == NULL || error != 0) /* filter may consume */
5460			break;
5461
5462		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
5463			error = pfil_run_hooks(&inet_pfil_hook, mp, bifp,
5464			    dir, NULL);
5465
5466		if (*mp == NULL || error != 0) /* filter may consume */
5467			break;
5468
5469		/* check if we need to fragment the packet */
5470		if (pfil_member && ifp != NULL && dir == PFIL_OUT) {
5471			i = (*mp)->m_pkthdr.len;
5472			if (i > ifp->if_mtu) {
5473				error = bridge_fragment(ifp, *mp, &eh2, snap,
5474				    &llc1);
5475				return (error);
5476			}
5477		}
5478
5479		/* Recalculate the ip checksum and restore byte ordering */
5480		ip = mtod(*mp, struct ip *);
5481		hlen = ip->ip_hl << 2;
5482		if (hlen < sizeof (struct ip))
5483			goto bad;
5484		if (hlen > (*mp)->m_len) {
5485			if ((*mp = m_pullup(*mp, hlen)) == 0)
5486				goto bad;
5487			ip = mtod(*mp, struct ip *);
5488			if (ip == NULL)
5489				goto bad;
5490		}
5491		ip->ip_len = htons(ip->ip_len);
5492		ip->ip_off = htons(ip->ip_off);
5493		ip->ip_sum = 0;
5494		if (hlen == sizeof (struct ip))
5495			ip->ip_sum = in_cksum_hdr(ip);
5496		else
5497			ip->ip_sum = in_cksum(*mp, hlen);
5498
5499		break;
5500#if INET6
5501	case ETHERTYPE_IPV6:
5502		if (pfil_bridge && dir == PFIL_OUT && bifp != NULL)
5503			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5504			    dir, NULL);
5505
5506		if (*mp == NULL || error != 0) /* filter may consume */
5507			break;
5508
5509		if (pfil_member && ifp != NULL)
5510			error = pfil_run_hooks(&inet6_pfil_hook, mp, ifp,
5511			    dir, NULL);
5512
5513		if (*mp == NULL || error != 0) /* filter may consume */
5514			break;
5515
5516		if (pfil_bridge && dir == PFIL_IN && bifp != NULL)
5517			error = pfil_run_hooks(&inet6_pfil_hook, mp, bifp,
5518			    dir, NULL);
5519		break;
5520#endif
5521	default:
5522		error = 0;
5523		break;
5524	}
5525
5526	if (*mp == NULL)
5527		return (error);
5528	if (error != 0)
5529		goto bad;
5530
5531	error = -1;
5532
5533	/*
5534	 * Finally, put everything back the way it was and return
5535	 */
5536	if (snap) {
5537		M_PREPEND(*mp, sizeof (struct llc), M_DONTWAIT);
5538		if (*mp == NULL)
5539			return (error);
5540		bcopy(&llc1, mtod(*mp, caddr_t), sizeof (struct llc));
5541	}
5542
5543	M_PREPEND(*mp, ETHER_HDR_LEN, M_DONTWAIT);
5544	if (*mp == NULL)
5545		return (error);
5546	bcopy(&eh2, mtod(*mp, caddr_t), ETHER_HDR_LEN);
5547
5548	return (0);
5549
5550bad:
5551	m_freem(*mp);
5552	*mp = NULL;
5553	return (error);
5554}
5555
5556/*
5557 * Perform basic checks on header size since
5558 * pfil assumes ip_input has already processed
5559 * it for it.  Cut-and-pasted from ip_input.c.
5560 * Given how simple the IPv6 version is,
5561 * does the IPv4 version really need to be
5562 * this complicated?
5563 *
5564 * XXX Should we update ipstat here, or not?
5565 * XXX Right now we update ipstat but not
5566 * XXX csum_counter.
5567 */
5568static int
5569bridge_ip_checkbasic(struct mbuf **mp)
5570{
5571	struct mbuf *m = *mp;
5572	struct ip *ip;
5573	int len, hlen;
5574	u_short sum;
5575
5576	if (*mp == NULL)
5577		return (-1);
5578
5579	if (IP_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5580		/* max_linkhdr is already rounded up to nearest 4-byte */
5581		if ((m = m_copyup(m, sizeof (struct ip),
5582		    max_linkhdr)) == NULL) {
5583			/* XXXJRT new stat, please */
5584			ipstat.ips_toosmall++;
5585			goto bad;
5586		}
5587	} else if (__predict_false(m->m_len < sizeof (struct ip))) {
5588		if ((m = m_pullup(m, sizeof (struct ip))) == NULL) {
5589			ipstat.ips_toosmall++;
5590			goto bad;
5591		}
5592	}
5593	ip = mtod(m, struct ip *);
5594	if (ip == NULL) goto bad;
5595
5596	if (ip->ip_v != IPVERSION) {
5597		ipstat.ips_badvers++;
5598		goto bad;
5599	}
5600	hlen = ip->ip_hl << 2;
5601	if (hlen < sizeof (struct ip)) { /* minimum header length */
5602		ipstat.ips_badhlen++;
5603		goto bad;
5604	}
5605	if (hlen > m->m_len) {
5606		if ((m = m_pullup(m, hlen)) == 0) {
5607			ipstat.ips_badhlen++;
5608			goto bad;
5609		}
5610		ip = mtod(m, struct ip *);
5611		if (ip == NULL) goto bad;
5612	}
5613
5614	if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
5615		sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
5616	} else {
5617		if (hlen == sizeof (struct ip)) {
5618			sum = in_cksum_hdr(ip);
5619		} else {
5620			sum = in_cksum(m, hlen);
5621		}
5622	}
5623	if (sum) {
5624		ipstat.ips_badsum++;
5625		goto bad;
5626	}
5627
5628	/* Retrieve the packet length. */
5629	len = ntohs(ip->ip_len);
5630
5631	/*
5632	 * Check for additional length bogosity
5633	 */
5634	if (len < hlen) {
5635		ipstat.ips_badlen++;
5636		goto bad;
5637	}
5638
5639	/*
5640	 * Check that the amount of data in the buffers
5641	 * is as at least much as the IP header would have us expect.
5642	 * Drop packet if shorter than we expect.
5643	 */
5644	if (m->m_pkthdr.len < len) {
5645		ipstat.ips_tooshort++;
5646		goto bad;
5647	}
5648
5649	/* Checks out, proceed */
5650	*mp = m;
5651	return (0);
5652
5653bad:
5654	*mp = m;
5655	return (-1);
5656}
5657
5658#if INET6
5659/*
5660 * Same as above, but for IPv6.
5661 * Cut-and-pasted from ip6_input.c.
5662 * XXX Should we update ip6stat, or not?
5663 */
5664static int
5665bridge_ip6_checkbasic(struct mbuf **mp)
5666{
5667	struct mbuf *m = *mp;
5668	struct ip6_hdr *ip6;
5669
5670	/*
5671	 * If the IPv6 header is not aligned, slurp it up into a new
5672	 * mbuf with space for link headers, in the event we forward
5673	 * it.  Otherwise, if it is aligned, make sure the entire base
5674	 * IPv6 header is in the first mbuf of the chain.
5675	 */
5676	if (IP6_HDR_ALIGNED_P(mtod(m, caddr_t)) == 0) {
5677		struct ifnet *inifp = m->m_pkthdr.rcvif;
5678		/* max_linkhdr is already rounded up to nearest 4-byte */
5679		if ((m = m_copyup(m, sizeof (struct ip6_hdr),
5680		    max_linkhdr)) == NULL) {
5681			/* XXXJRT new stat, please */
5682			ip6stat.ip6s_toosmall++;
5683			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5684			goto bad;
5685		}
5686	} else if (__predict_false(m->m_len < sizeof (struct ip6_hdr))) {
5687		struct ifnet *inifp = m->m_pkthdr.rcvif;
5688		if ((m = m_pullup(m, sizeof (struct ip6_hdr))) == NULL) {
5689			ip6stat.ip6s_toosmall++;
5690			in6_ifstat_inc(inifp, ifs6_in_hdrerr);
5691			goto bad;
5692		}
5693	}
5694
5695	ip6 = mtod(m, struct ip6_hdr *);
5696
5697	if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
5698		ip6stat.ip6s_badvers++;
5699		in6_ifstat_inc(m->m_pkthdr.rcvif, ifs6_in_hdrerr);
5700		goto bad;
5701	}
5702
5703	/* Checks out, proceed */
5704	*mp = m;
5705	return (0);
5706
5707bad:
5708	*mp = m;
5709	return (-1);
5710}
5711#endif /* INET6 */
5712
5713/*
5714 * bridge_fragment:
5715 *
5716 *	Return a fragmented mbuf chain.
5717 */
5718static int
5719bridge_fragment(struct ifnet *ifp, struct mbuf *m, struct ether_header *eh,
5720	int snap, struct llc *llc)
5721{
5722	struct mbuf *m0;
5723	struct ip *ip;
5724	int error = -1;
5725
5726	if (m->m_len < sizeof (struct ip) &&
5727	    (m = m_pullup(m, sizeof (struct ip))) == NULL)
5728		goto out;
5729	ip = mtod(m, struct ip *);
5730
5731	error = ip_fragment(ip, &m, ifp->if_mtu, ifp->if_hwassist,
5732	    CSUM_DELAY_IP);
5733	if (error)
5734		goto out;
5735
5736	/* walk the chain and re-add the Ethernet header */
5737	for (m0 = m; m0; m0 = m0->m_nextpkt) {
5738		if (error == 0) {
5739			if (snap) {
5740				M_PREPEND(m0, sizeof (struct llc), M_DONTWAIT);
5741				if (m0 == NULL) {
5742					error = ENOBUFS;
5743					continue;
5744				}
5745				bcopy(llc, mtod(m0, caddr_t),
5746				    sizeof (struct llc));
5747			}
5748			M_PREPEND(m0, ETHER_HDR_LEN, M_DONTWAIT);
5749			if (m0 == NULL) {
5750				error = ENOBUFS;
5751				continue;
5752			}
5753			bcopy(eh, mtod(m0, caddr_t), ETHER_HDR_LEN);
5754		} else {
5755			m_freem(m);
5756		}
5757	}
5758
5759	if (error == 0)
5760		ipstat.ips_fragmented++;
5761
5762	return (error);
5763
5764out:
5765	if (m != NULL)
5766		m_freem(m);
5767	return (error);
5768}
5769#endif /* PFIL_HOOKS */
5770
5771/*
5772 * bridge_set_bpf_tap:
5773 *
5774 *	Sets ups the BPF callbacks.
5775 */
5776static errno_t
5777bridge_set_bpf_tap(ifnet_t ifp, bpf_tap_mode mode, bpf_packet_func bpf_callback)
5778{
5779	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5780
5781	/* TBD locking */
5782	if (sc == NULL || (sc->sc_flags & SCF_DETACHING)) {
5783		return (ENODEV);
5784	}
5785
5786	switch (mode) {
5787		case BPF_TAP_DISABLE:
5788			sc->sc_bpf_input = sc->sc_bpf_output = NULL;
5789			break;
5790
5791		case BPF_TAP_INPUT:
5792			sc->sc_bpf_input = bpf_callback;
5793			break;
5794
5795		case BPF_TAP_OUTPUT:
5796			sc->sc_bpf_output = bpf_callback;
5797			break;
5798
5799		case BPF_TAP_INPUT_OUTPUT:
5800			sc->sc_bpf_input = sc->sc_bpf_output = bpf_callback;
5801			break;
5802
5803		default:
5804			break;
5805	}
5806
5807	return (0);
5808}
5809
5810/*
5811 * bridge_detach:
5812 *
5813 *	Callback when interface has been detached.
5814 */
5815static void
5816bridge_detach(ifnet_t ifp)
5817{
5818	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5819
5820#if BRIDGESTP
5821	bstp_detach(&sc->sc_stp);
5822#endif /* BRIDGESTP */
5823
5824	/* Tear down the routing table. */
5825	bridge_rtable_fini(sc);
5826
5827	lck_mtx_lock(&bridge_list_mtx);
5828	LIST_REMOVE(sc, sc_list);
5829	lck_mtx_unlock(&bridge_list_mtx);
5830
5831	ifnet_release(ifp);
5832
5833	lck_mtx_destroy(&sc->sc_mtx, bridge_lock_grp);
5834
5835	_FREE(sc, M_DEVBUF);
5836}
5837
5838/*
5839 * bridge_bpf_input:
5840 *
5841 *	Invoke the input BPF callback if enabled
5842 */
5843__private_extern__ errno_t
5844bridge_bpf_input(ifnet_t ifp, struct mbuf *m)
5845{
5846	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5847
5848	if (sc->sc_bpf_input) {
5849		if (mbuf_pkthdr_rcvif(m) != ifp) {
5850			printf("%s: rcvif: 0x%llx != ifp 0x%llx\n", __func__,
5851			    (uint64_t)VM_KERNEL_ADDRPERM(mbuf_pkthdr_rcvif(m)),
5852			    (uint64_t)VM_KERNEL_ADDRPERM(ifp));
5853		}
5854		(*sc->sc_bpf_input)(ifp, m);
5855	}
5856	return (0);
5857}
5858
5859/*
5860 * bridge_bpf_output:
5861 *
5862 *	Invoke the output BPF callback if enabled
5863 */
5864__private_extern__ errno_t
5865bridge_bpf_output(ifnet_t ifp, struct mbuf *m)
5866{
5867	struct bridge_softc *sc = (struct bridge_softc *)ifnet_softc(ifp);
5868
5869	if (sc->sc_bpf_output) {
5870		(*sc->sc_bpf_output)(ifp, m);
5871	}
5872	return (0);
5873}
5874
5875/*
5876 * bridge_link_event:
5877 *
5878 *	Report a data link event on an interface
5879 */
5880static void
5881bridge_link_event(struct ifnet *ifp, u_int32_t event_code)
5882{
5883	struct {
5884		struct kern_event_msg	header;
5885		u_int32_t		unit;
5886		char			if_name[IFNAMSIZ];
5887	} event;
5888
5889#if BRIDGE_DEBUG
5890	if (if_bridge_debug & BR_DBGF_LIFECYCLE)
5891		printf("%s: %s event_code %u - %s\n", __func__, ifp->if_xname,
5892		    event_code, dlil_kev_dl_code_str(event_code));
5893#endif /* BRIDGE_DEBUG */
5894
5895	bzero(&event, sizeof (event));
5896	event.header.total_size		= sizeof (event);
5897	event.header.vendor_code	= KEV_VENDOR_APPLE;
5898	event.header.kev_class		= KEV_NETWORK_CLASS;
5899	event.header.kev_subclass	= KEV_DL_SUBCLASS;
5900	event.header.event_code		= event_code;
5901	event.header.event_data[0]	= ifnet_family(ifp);
5902	event.unit			= (u_int32_t)ifnet_unit(ifp);
5903	strlcpy(event.if_name, ifnet_name(ifp), IFNAMSIZ);
5904	ifnet_event(ifp, &event.header);
5905}
5906
5907#define	BRIDGE_HF_DROP(reason, func, line) { \
5908	bridge_hostfilter_stats.reason++; \
5909	if (if_bridge_debug & BR_DBGF_HOSTFILTER) \
5910		printf("%s.%d" #reason, func, line); \
5911	error = EINVAL; \
5912}
5913
5914/*
5915 * Make sure this is a DHCP or Bootp request that match the host filter
5916 */
5917static int
5918bridge_dhcp_filter(struct bridge_iflist *bif, struct mbuf *m, size_t offset)
5919{
5920	int error = EINVAL;
5921	struct dhcp dhcp;
5922
5923	/*
5924	 * Note: We use the dhcp structure because bootp structure definition
5925	 * is larger and some vendors do not pad the request
5926	 */
5927	error = mbuf_copydata(m, offset, sizeof(struct dhcp), &dhcp);
5928	if (error != 0) {
5929		BRIDGE_HF_DROP(brhf_dhcp_too_small, __func__, __LINE__);
5930		goto done;
5931	}
5932	if (dhcp.dp_op != BOOTREQUEST) {
5933		BRIDGE_HF_DROP(brhf_dhcp_bad_op, __func__, __LINE__);
5934		goto done;
5935	}
5936	/*
5937	 * The hardware address must be an exact match
5938	 */
5939	if (dhcp.dp_htype != ARPHRD_ETHER) {
5940		BRIDGE_HF_DROP(brhf_dhcp_bad_htype, __func__, __LINE__);
5941		goto done;
5942	}
5943	if (dhcp.dp_hlen != ETHER_ADDR_LEN) {
5944		BRIDGE_HF_DROP(brhf_dhcp_bad_hlen, __func__, __LINE__);
5945		goto done;
5946	}
5947	if (bcmp(dhcp.dp_chaddr, bif->bif_hf_hwsrc,
5948	    ETHER_ADDR_LEN) != 0) {
5949		BRIDGE_HF_DROP(brhf_dhcp_bad_chaddr, __func__, __LINE__);
5950		goto done;
5951	}
5952	/*
5953	 * Client address must match the host address or be not specified
5954	 */
5955	if (dhcp.dp_ciaddr.s_addr != bif->bif_hf_ipsrc.s_addr &&
5956	    dhcp.dp_ciaddr.s_addr != INADDR_ANY) {
5957		BRIDGE_HF_DROP(brhf_dhcp_bad_ciaddr, __func__, __LINE__);
5958		goto done;
5959	}
5960	error = 0;
5961done:
5962	return (error);
5963}
5964
5965static int
5966bridge_host_filter(struct bridge_iflist *bif, struct mbuf *m)
5967{
5968	int error = EINVAL;
5969	struct ether_header *eh;
5970	static struct in_addr inaddr_any = { .s_addr = INADDR_ANY };
5971
5972	/*
5973	 * Check the Ethernet header is large enough
5974	 */
5975	if (mbuf_pkthdr_len(m) < sizeof(struct ether_header)) {
5976		BRIDGE_HF_DROP(brhf_ether_too_small, __func__, __LINE__);
5977		goto done;
5978	}
5979	if (mbuf_len(m) < sizeof(struct ether_header) &&
5980	    mbuf_pullup(&m, sizeof(struct ether_header)) != 0) {
5981		BRIDGE_HF_DROP(brhf_ether_pullup_failed, __func__, __LINE__);
5982		goto done;
5983	}
5984	eh = mtod(m, struct ether_header *);
5985
5986	/*
5987	 * Restrict the source hardware address
5988	 */
5989	if ((bif->bif_flags & BIFF_HF_HWSRC) == 0 ||
5990	    bcmp(eh->ether_shost, bif->bif_hf_hwsrc,
5991	    ETHER_ADDR_LEN) != 0) {
5992		BRIDGE_HF_DROP(brhf_bad_ether_srchw_addr, __func__, __LINE__);
5993		goto done;
5994	}
5995
5996	/*
5997	 * Restrict Ethernet protocols to ARP and IP
5998	 */
5999	if (eh->ether_type == htons(ETHERTYPE_ARP)) {
6000		struct ether_arp *ea;
6001		size_t minlen = sizeof(struct ether_header) +
6002			sizeof(struct ether_arp);
6003
6004		/*
6005		 * Make the Ethernet and ARP headers contiguous
6006		 */
6007		if (mbuf_pkthdr_len(m) < minlen) {
6008			BRIDGE_HF_DROP(brhf_arp_too_small, __func__, __LINE__);
6009			goto done;
6010		}
6011		if (mbuf_len(m) < minlen && mbuf_pullup(&m, minlen) != 0) {
6012			BRIDGE_HF_DROP(brhf_arp_pullup_failed,
6013				__func__, __LINE__);
6014			goto done;
6015		}
6016		/*
6017		 * Verify this is an ethernet/ip arp
6018		 */
6019		eh = mtod(m, struct ether_header *);
6020		ea = (struct ether_arp *)(eh + 1);
6021		if (ea->arp_hrd != htons(ARPHRD_ETHER)) {
6022			BRIDGE_HF_DROP(brhf_arp_bad_hw_type,
6023				__func__, __LINE__);
6024			goto done;
6025		}
6026		if (ea->arp_pro != htons(ETHERTYPE_IP)) {
6027			BRIDGE_HF_DROP(brhf_arp_bad_pro_type,
6028				__func__, __LINE__);
6029			goto done;
6030		}
6031		/*
6032		 * Verify the address lengths are correct
6033		 */
6034		if (ea->arp_hln != ETHER_ADDR_LEN) {
6035			BRIDGE_HF_DROP(brhf_arp_bad_hw_len, __func__, __LINE__);
6036			goto done;
6037		}
6038		if (ea->arp_pln != sizeof(struct in_addr)) {
6039			BRIDGE_HF_DROP(brhf_arp_bad_pro_len,
6040				__func__, __LINE__);
6041			goto done;
6042		}
6043
6044		/*
6045		 * Allow only ARP request or ARP reply
6046		 */
6047		if (ea->arp_op != htons(ARPOP_REQUEST) &&
6048		    ea->arp_op != htons(ARPOP_REPLY)) {
6049			BRIDGE_HF_DROP(brhf_arp_bad_op, __func__, __LINE__);
6050			goto done;
6051		}
6052		/*
6053		 * Verify source hardware address matches
6054		 */
6055		if (bcmp(ea->arp_sha, bif->bif_hf_hwsrc,
6056		    ETHER_ADDR_LEN) != 0) {
6057			BRIDGE_HF_DROP(brhf_arp_bad_sha, __func__, __LINE__);
6058			goto done;
6059		}
6060		/*
6061		 * Verify source protocol address:
6062		 * May be null for an ARP probe
6063		 */
6064		if (bcmp(ea->arp_spa, &bif->bif_hf_ipsrc.s_addr,
6065			sizeof(struct in_addr)) != 0 &&
6066		    bcmp(ea->arp_spa, &inaddr_any,
6067			sizeof(struct in_addr)) != 0) {
6068			BRIDGE_HF_DROP(brhf_arp_bad_spa, __func__, __LINE__);
6069			goto done;
6070		}
6071		/*
6072		 *
6073		 */
6074		bridge_hostfilter_stats.brhf_arp_ok += 1;
6075		error = 0;
6076	} else if (eh->ether_type == htons(ETHERTYPE_IP)) {
6077		size_t minlen = sizeof(struct ether_header) + sizeof(struct ip);
6078		struct ip iphdr;
6079		size_t offset;
6080
6081		/*
6082		 * Make the Ethernet and IP headers contiguous
6083		 */
6084		if (mbuf_pkthdr_len(m) < minlen) {
6085			BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6086			goto done;
6087		}
6088		offset = sizeof(struct ether_header);
6089		error = mbuf_copydata(m, offset, sizeof(struct ip), &iphdr);
6090		if (error != 0) {
6091			BRIDGE_HF_DROP(brhf_ip_too_small, __func__, __LINE__);
6092			goto done;
6093		}
6094		/*
6095		 * Verify the source IP address
6096		 */
6097		if (iphdr.ip_p == IPPROTO_UDP) {
6098			struct udphdr udp;
6099
6100			minlen += sizeof(struct udphdr);
6101			if (mbuf_pkthdr_len(m) < minlen) {
6102				BRIDGE_HF_DROP(brhf_ip_too_small,
6103					__func__, __LINE__);
6104				goto done;
6105			}
6106
6107			/*
6108			 * Allow all zero addresses for DHCP requests
6109			 */
6110			if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr &&
6111			    iphdr.ip_src.s_addr != INADDR_ANY) {
6112				BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6113					__func__, __LINE__);
6114				goto done;
6115			}
6116			offset = sizeof(struct ether_header) +
6117			    (IP_VHL_HL(iphdr.ip_vhl) << 2);
6118			error = mbuf_copydata(m, offset,
6119			    sizeof(struct udphdr), &udp);
6120			if (error != 0) {
6121				BRIDGE_HF_DROP(brhf_ip_too_small,
6122					__func__, __LINE__);
6123				goto done;
6124			}
6125			/*
6126			 * Either it's a Bootp/DHCP packet that we like or
6127			 * it's a UDP packet from the host IP as source address
6128			 */
6129			if (udp.uh_sport == htons(IPPORT_BOOTPC) &&
6130			    udp.uh_dport == htons(IPPORT_BOOTPS)) {
6131				minlen += sizeof(struct dhcp);
6132				if (mbuf_pkthdr_len(m) < minlen) {
6133					BRIDGE_HF_DROP(brhf_ip_too_small,
6134						__func__, __LINE__);
6135					goto done;
6136				}
6137				offset += sizeof(struct udphdr);
6138				error = bridge_dhcp_filter(bif, m, offset);
6139				if (error != 0)
6140					goto done;
6141			} else if (iphdr.ip_src.s_addr == INADDR_ANY) {
6142				BRIDGE_HF_DROP(brhf_ip_bad_srcaddr,
6143					__func__, __LINE__);
6144				goto done;
6145			}
6146		} else if (iphdr.ip_src.s_addr != bif->bif_hf_ipsrc.s_addr ||
6147		    bif->bif_hf_ipsrc.s_addr == INADDR_ANY) {
6148
6149			BRIDGE_HF_DROP(brhf_ip_bad_srcaddr, __func__, __LINE__);
6150			goto done;
6151		}
6152		/*
6153		 * Allow only boring IP protocols
6154		 */
6155		if (iphdr.ip_p != IPPROTO_TCP &&
6156		    iphdr.ip_p != IPPROTO_UDP &&
6157		    iphdr.ip_p != IPPROTO_ICMP &&
6158		    iphdr.ip_p != IPPROTO_ESP &&
6159		    iphdr.ip_p != IPPROTO_AH &&
6160		    iphdr.ip_p != IPPROTO_GRE) {
6161			BRIDGE_HF_DROP(brhf_ip_bad_proto, __func__, __LINE__);
6162			goto done;
6163		}
6164		bridge_hostfilter_stats.brhf_ip_ok += 1;
6165		error = 0;
6166	} else {
6167		BRIDGE_HF_DROP(brhf_bad_ether_type, __func__, __LINE__);
6168		goto done;
6169	}
6170done:
6171	if (error != 0) {
6172		if (if_bridge_debug & BR_DBGF_HOSTFILTER) {
6173			if (m) {
6174				printf_mbuf_data(m, 0,
6175				    sizeof(struct ether_header) +
6176				    sizeof(struct ip));
6177			}
6178			printf("\n");
6179		}
6180
6181		if (m != NULL)
6182			m_freem(m);
6183	}
6184	return (error);
6185}
6186