if.c revision 186275
1/*-
2 * Copyright (c) 1980, 1986, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)if.c	8.5 (Berkeley) 1/9/95
30 * $FreeBSD: head/sys/net/if.c 186275 2008-12-18 09:59:24Z kmacy $
31 */
32
33#include "opt_compat.h"
34#include "opt_inet6.h"
35#include "opt_inet.h"
36#include "opt_mac.h"
37#include "opt_carp.h"
38
39#include <sys/param.h>
40#include <sys/types.h>
41#include <sys/conf.h>
42#include <sys/malloc.h>
43#include <sys/sbuf.h>
44#include <sys/bus.h>
45#include <sys/mbuf.h>
46#include <sys/systm.h>
47#include <sys/priv.h>
48#include <sys/proc.h>
49#include <sys/socket.h>
50#include <sys/socketvar.h>
51#include <sys/protosw.h>
52#include <sys/kernel.h>
53#include <sys/lock.h>
54#include <sys/rwlock.h>
55#include <sys/sockio.h>
56#include <sys/syslog.h>
57#include <sys/sysctl.h>
58#include <sys/taskqueue.h>
59#include <sys/domain.h>
60#include <sys/jail.h>
61#include <sys/vimage.h>
62#include <machine/stdarg.h>
63#include <vm/uma.h>
64
65#include <net/if.h>
66#include <net/if_arp.h>
67#include <net/if_clone.h>
68#include <net/if_dl.h>
69#include <net/if_types.h>
70#include <net/if_var.h>
71#include <net/radix.h>
72#include <net/route.h>
73#include <net/vnet.h>
74
75#if defined(INET) || defined(INET6)
76/*XXX*/
77#include <netinet/in.h>
78#include <netinet/in_var.h>
79#ifdef INET6
80#include <netinet6/in6_var.h>
81#include <netinet6/in6_ifattach.h>
82#endif
83#endif
84#ifdef INET
85#include <netinet/if_ether.h>
86#include <netinet/vinet.h>
87#endif
88#ifdef DEV_CARP
89#include <netinet/ip_carp.h>
90#endif
91
92#include <security/mac/mac_framework.h>
93
94#ifndef VIMAGE
95#ifndef VIMAGE_GLOBALS
96struct vnet_net vnet_net_0;
97#endif
98#endif
99
100SYSCTL_NODE(_net, PF_LINK, link, CTLFLAG_RW, 0, "Link layers");
101SYSCTL_NODE(_net_link, 0, generic, CTLFLAG_RW, 0, "Generic link-management");
102
103/* Log link state change events */
104static int log_link_state_change = 1;
105
106SYSCTL_INT(_net_link, OID_AUTO, log_link_state_change, CTLFLAG_RW,
107	&log_link_state_change, 0,
108	"log interface link state change events");
109
110void	(*bstp_linkstate_p)(struct ifnet *ifp, int state);
111void	(*ng_ether_link_state_p)(struct ifnet *ifp, int state);
112void	(*lagg_linkstate_p)(struct ifnet *ifp, int state);
113
114struct mbuf *(*tbr_dequeue_ptr)(struct ifaltq *, int) = NULL;
115
116/*
117 * XXX: Style; these should be sorted alphabetically, and unprototyped
118 * static functions should be prototyped. Currently they are sorted by
119 * declaration order.
120 */
121static void	if_attachdomain(void *);
122static void	if_attachdomain1(struct ifnet *);
123static int	ifconf(u_long, caddr_t);
124static void	if_freemulti(struct ifmultiaddr *);
125static void	if_grow(void);
126static void	if_init(void *);
127static void	if_qflush(struct ifnet *);
128static void	if_route(struct ifnet *, int flag, int fam);
129static int	if_setflag(struct ifnet *, int, int, int *, int);
130static void	if_slowtimo(void *);
131static int	if_transmit(struct ifnet *ifp, struct mbuf *m);
132static void	if_unroute(struct ifnet *, int flag, int fam);
133static void	link_rtrequest(int, struct rtentry *, struct rt_addrinfo *);
134static int	if_rtdel(struct radix_node *, void *);
135static int	ifhwioctl(u_long, struct ifnet *, caddr_t, struct thread *);
136static int	if_delmulti_locked(struct ifnet *, struct ifmultiaddr *, int);
137static void	if_start_deferred(void *context, int pending);
138static void	do_link_state_change(void *, int);
139static int	if_getgroup(struct ifgroupreq *, struct ifnet *);
140static int	if_getgroupmembers(struct ifgroupreq *);
141
142#ifdef INET6
143/*
144 * XXX: declare here to avoid to include many inet6 related files..
145 * should be more generalized?
146 */
147extern void	nd6_setmtu(struct ifnet *);
148#endif
149
150#ifdef VIMAGE_GLOBALS
151struct	ifnethead ifnet;	/* depend on static init XXX */
152struct	ifgrouphead ifg_head;
153int	if_index;
154static	int if_indexlim;
155/* Table of ifnet/cdev by index.  Locked with ifnet_lock. */
156static struct ifindex_entry *ifindex_table;
157static struct	knlist ifklist;
158#endif
159
160int	ifqmaxlen = IFQ_MAXLEN;
161struct rwlock ifnet_lock;
162static	if_com_alloc_t *if_com_alloc[256];
163static	if_com_free_t *if_com_free[256];
164
165static void	filt_netdetach(struct knote *kn);
166static int	filt_netdev(struct knote *kn, long hint);
167
168static struct filterops netdev_filtops =
169    { 1, NULL, filt_netdetach, filt_netdev };
170
171#ifndef VIMAGE_GLOBALS
172static struct vnet_symmap vnet_net_symmap[] = {
173	VNET_SYMMAP(net, ifnet),
174	VNET_SYMMAP(net, rt_tables),
175	VNET_SYMMAP(net, rtstat),
176	VNET_SYMMAP(net, rttrash),
177	VNET_SYMMAP_END
178};
179
180VNET_MOD_DECLARE(NET, net, vnet_net_iattach, vnet_net_idetach,
181    NONE, vnet_net_symmap)
182#endif
183
184/*
185 * System initialization
186 */
187SYSINIT(interfaces, SI_SUB_INIT_IF, SI_ORDER_FIRST, if_init, NULL);
188SYSINIT(interface_check, SI_SUB_PROTO_IF, SI_ORDER_FIRST, if_slowtimo, NULL);
189
190MALLOC_DEFINE(M_IFNET, "ifnet", "interface internals");
191MALLOC_DEFINE(M_IFADDR, "ifaddr", "interface address");
192MALLOC_DEFINE(M_IFMADDR, "ether_multi", "link-level multicast address");
193
194static struct ifnet *
195ifnet_byindex_locked(u_short idx)
196{
197	INIT_VNET_NET(curvnet);
198	struct ifnet *ifp;
199
200	ifp = V_ifindex_table[idx].ife_ifnet;
201	return (ifp);
202}
203
204struct ifnet *
205ifnet_byindex(u_short idx)
206{
207	struct ifnet *ifp;
208
209	IFNET_RLOCK();
210	ifp = ifnet_byindex_locked(idx);
211	IFNET_RUNLOCK();
212	return (ifp);
213}
214
215static void
216ifnet_setbyindex(u_short idx, struct ifnet *ifp)
217{
218	INIT_VNET_NET(curvnet);
219
220	IFNET_WLOCK_ASSERT();
221
222	V_ifindex_table[idx].ife_ifnet = ifp;
223}
224
225struct ifaddr *
226ifaddr_byindex(u_short idx)
227{
228	struct ifaddr *ifa;
229
230	IFNET_RLOCK();
231	ifa = ifnet_byindex_locked(idx)->if_addr;
232	IFNET_RUNLOCK();
233	return (ifa);
234}
235
236struct cdev *
237ifdev_byindex(u_short idx)
238{
239	INIT_VNET_NET(curvnet);
240	struct cdev *cdev;
241
242	IFNET_RLOCK();
243	cdev = V_ifindex_table[idx].ife_dev;
244	IFNET_RUNLOCK();
245	return (cdev);
246}
247
248static void
249ifdev_setbyindex(u_short idx, struct cdev *cdev)
250{
251	INIT_VNET_NET(curvnet);
252
253	IFNET_WLOCK();
254	V_ifindex_table[idx].ife_dev = cdev;
255	IFNET_WUNLOCK();
256}
257
258static d_open_t		netopen;
259static d_close_t	netclose;
260static d_ioctl_t	netioctl;
261static d_kqfilter_t	netkqfilter;
262
263static struct cdevsw net_cdevsw = {
264	.d_version =	D_VERSION,
265	.d_flags =	D_NEEDGIANT,
266	.d_open =	netopen,
267	.d_close =	netclose,
268	.d_ioctl =	netioctl,
269	.d_name =	"net",
270	.d_kqfilter =	netkqfilter,
271};
272
273static int
274netopen(struct cdev *dev, int flag, int mode, struct thread *td)
275{
276	return (0);
277}
278
279static int
280netclose(struct cdev *dev, int flags, int fmt, struct thread *td)
281{
282	return (0);
283}
284
285static int
286netioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
287{
288	struct ifnet *ifp;
289	int error, idx;
290
291	/* only support interface specific ioctls */
292	if (IOCGROUP(cmd) != 'i')
293		return (EOPNOTSUPP);
294	idx = dev2unit(dev);
295	if (idx == 0) {
296		/*
297		 * special network device, not interface.
298		 */
299		if (cmd == SIOCGIFCONF)
300			return (ifconf(cmd, data));	/* XXX remove cmd */
301#ifdef __amd64__
302		if (cmd == SIOCGIFCONF32)
303			return (ifconf(cmd, data));	/* XXX remove cmd */
304#endif
305		return (EOPNOTSUPP);
306	}
307
308	ifp = ifnet_byindex(idx);
309	if (ifp == NULL)
310		return (ENXIO);
311
312	error = ifhwioctl(cmd, ifp, data, td);
313	if (error == ENOIOCTL)
314		error = EOPNOTSUPP;
315	return (error);
316}
317
318static int
319netkqfilter(struct cdev *dev, struct knote *kn)
320{
321	INIT_VNET_NET(curvnet);
322	struct knlist *klist;
323	struct ifnet *ifp;
324	int idx;
325
326	switch (kn->kn_filter) {
327	case EVFILT_NETDEV:
328		kn->kn_fop = &netdev_filtops;
329		break;
330	default:
331		return (EINVAL);
332	}
333
334	idx = dev2unit(dev);
335	if (idx == 0) {
336		klist = &V_ifklist;
337	} else {
338		ifp = ifnet_byindex(idx);
339		if (ifp == NULL)
340			return (1);
341		klist = &ifp->if_klist;
342	}
343
344	kn->kn_hook = (caddr_t)klist;
345
346	knlist_add(klist, kn, 0);
347
348	return (0);
349}
350
351static void
352filt_netdetach(struct knote *kn)
353{
354	struct knlist *klist = (struct knlist *)kn->kn_hook;
355
356	knlist_remove(klist, kn, 0);
357}
358
359static int
360filt_netdev(struct knote *kn, long hint)
361{
362	struct knlist *klist = (struct knlist *)kn->kn_hook;
363
364	/*
365	 * Currently NOTE_EXIT is abused to indicate device detach.
366	 */
367	if (hint == NOTE_EXIT) {
368		kn->kn_data = NOTE_LINKINV;
369		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
370		knlist_remove_inevent(klist, kn);
371		return (1);
372	}
373	if (hint != 0)
374		kn->kn_data = hint;			/* current status */
375	if (kn->kn_sfflags & hint)
376		kn->kn_fflags |= hint;
377	return (kn->kn_fflags != 0);
378}
379
380/*
381 * Network interface utility routines.
382 *
383 * Routines with ifa_ifwith* names take sockaddr *'s as
384 * parameters.
385 */
386
387/* ARGSUSED*/
388static void
389if_init(void *dummy __unused)
390{
391	INIT_VNET_NET(curvnet);
392
393#ifndef VIMAGE_GLOBALS
394	vnet_mod_register(&vnet_net_modinfo);
395#endif
396
397	V_if_index = 0;
398	V_ifindex_table = NULL;
399	V_if_indexlim = 8;
400
401	IFNET_LOCK_INIT();
402	TAILQ_INIT(&V_ifnet);
403	TAILQ_INIT(&V_ifg_head);
404	knlist_init(&V_ifklist, NULL, NULL, NULL, NULL);
405	if_grow();				/* create initial table */
406	ifdev_setbyindex(0, make_dev(&net_cdevsw, 0, UID_ROOT, GID_WHEEL,
407	    0600, "network"));
408	if_clone_init();
409}
410
411static void
412if_grow(void)
413{
414	INIT_VNET_NET(curvnet);
415	u_int n;
416	struct ifindex_entry *e;
417
418	V_if_indexlim <<= 1;
419	n = V_if_indexlim * sizeof(*e);
420	e = malloc(n, M_IFNET, M_WAITOK | M_ZERO);
421	if (V_ifindex_table != NULL) {
422		memcpy((caddr_t)e, (caddr_t)V_ifindex_table, n/2);
423		free((caddr_t)V_ifindex_table, M_IFNET);
424	}
425	V_ifindex_table = e;
426}
427
428/*
429 * Allocate a struct ifnet and an index for an interface.  A layer 2
430 * common structure will also be allocated if an allocation routine is
431 * registered for the passed type.
432 */
433struct ifnet*
434if_alloc(u_char type)
435{
436	INIT_VNET_NET(curvnet);
437	struct ifnet *ifp;
438
439	ifp = malloc(sizeof(struct ifnet), M_IFNET, M_WAITOK|M_ZERO);
440
441	/*
442	 * Try to find an empty slot below if_index.  If we fail, take
443	 * the next slot.
444	 *
445	 * XXX: should be locked!
446	 */
447	for (ifp->if_index = 1; ifp->if_index <= V_if_index; ifp->if_index++) {
448		if (ifnet_byindex(ifp->if_index) == NULL)
449			break;
450	}
451	/* Catch if_index overflow. */
452	if (ifp->if_index < 1) {
453		free(ifp, M_IFNET);
454		return (NULL);
455	}
456	if (ifp->if_index > V_if_index)
457		V_if_index = ifp->if_index;
458	if (V_if_index >= V_if_indexlim)
459		if_grow();
460
461	ifp->if_type = type;
462
463	if (if_com_alloc[type] != NULL) {
464		ifp->if_l2com = if_com_alloc[type](type, ifp);
465		if (ifp->if_l2com == NULL) {
466			free(ifp, M_IFNET);
467			return (NULL);
468		}
469	}
470	IFNET_WLOCK();
471	ifnet_setbyindex(ifp->if_index, ifp);
472	IFNET_WUNLOCK();
473	IF_ADDR_LOCK_INIT(ifp);
474
475	return (ifp);
476}
477
478/*
479 * Free the struct ifnet, the associated index, and the layer 2 common
480 * structure if needed.  All the work is done in if_free_type().
481 *
482 * Do not add code to this function!  Add it to if_free_type().
483 */
484void
485if_free(struct ifnet *ifp)
486{
487
488	if_free_type(ifp, ifp->if_type);
489}
490
491/*
492 * Do the actual work of freeing a struct ifnet, associated index, and
493 * layer 2 common structure.  This version should only be called by
494 * intefaces that switch their type after calling if_alloc().
495 */
496void
497if_free_type(struct ifnet *ifp, u_char type)
498{
499	INIT_VNET_NET(curvnet); /* ifp->if_vnet can be NULL here ! */
500
501	if (ifp != ifnet_byindex(ifp->if_index)) {
502		if_printf(ifp, "%s: value was not if_alloced, skipping\n",
503		    __func__);
504		return;
505	}
506
507	IFNET_WLOCK();
508	ifnet_setbyindex(ifp->if_index, NULL);
509
510	/* XXX: should be locked with if_findindex() */
511	while (V_if_index > 0 && ifnet_byindex_locked(V_if_index) == NULL)
512		V_if_index--;
513	IFNET_WUNLOCK();
514
515	if (if_com_free[type] != NULL)
516		if_com_free[type](ifp->if_l2com, type);
517
518	IF_ADDR_LOCK_DESTROY(ifp);
519	free(ifp, M_IFNET);
520};
521
522void
523ifq_attach(struct ifaltq *ifq, struct ifnet *ifp)
524{
525
526	mtx_init(&ifq->ifq_mtx, ifp->if_xname, "if send queue", MTX_DEF);
527
528	if (ifq->ifq_maxlen == 0)
529		ifq->ifq_maxlen = ifqmaxlen;
530
531	ifq->altq_type = 0;
532	ifq->altq_disc = NULL;
533	ifq->altq_flags &= ALTQF_CANTCHANGE;
534	ifq->altq_tbr  = NULL;
535	ifq->altq_ifp  = ifp;
536}
537
538void
539ifq_detach(struct ifaltq *ifq)
540{
541	mtx_destroy(&ifq->ifq_mtx);
542}
543
544/*
545 * Perform generic interface initalization tasks and attach the interface
546 * to the list of "active" interfaces.
547 *
548 * XXX:
549 *  - The decision to return void and thus require this function to
550 *    succeed is questionable.
551 *  - We do more initialization here then is probably a good idea.
552 *    Some of this should probably move to if_alloc().
553 *  - We should probably do more sanity checking.  For instance we don't
554 *    do anything to insure if_xname is unique or non-empty.
555 */
556void
557if_attach(struct ifnet *ifp)
558{
559	INIT_VNET_NET(curvnet);
560	unsigned socksize, ifasize;
561	int namelen, masklen;
562	struct sockaddr_dl *sdl;
563	struct ifaddr *ifa;
564
565	if (ifp->if_index == 0 || ifp != ifnet_byindex(ifp->if_index))
566		panic ("%s: BUG: if_attach called without if_alloc'd input()\n",
567		    ifp->if_xname);
568
569	TASK_INIT(&ifp->if_starttask, 0, if_start_deferred, ifp);
570	TASK_INIT(&ifp->if_linktask, 0, do_link_state_change, ifp);
571	IF_AFDATA_LOCK_INIT(ifp);
572	ifp->if_afdata_initialized = 0;
573
574	TAILQ_INIT(&ifp->if_addrhead);
575	TAILQ_INIT(&ifp->if_prefixhead);
576	TAILQ_INIT(&ifp->if_multiaddrs);
577	TAILQ_INIT(&ifp->if_groups);
578
579	if_addgroup(ifp, IFG_ALL);
580
581	knlist_init(&ifp->if_klist, NULL, NULL, NULL, NULL);
582	getmicrotime(&ifp->if_lastchange);
583	ifp->if_data.ifi_epoch = time_uptime;
584	ifp->if_data.ifi_datalen = sizeof(struct if_data);
585	ifp->if_transmit = if_transmit;
586	ifp->if_qflush = if_qflush;
587#ifdef MAC
588	mac_ifnet_init(ifp);
589	mac_ifnet_create(ifp);
590#endif
591
592	ifdev_setbyindex(ifp->if_index, make_dev(&net_cdevsw,
593	    ifp->if_index, UID_ROOT, GID_WHEEL, 0600, "%s/%s",
594	    net_cdevsw.d_name, ifp->if_xname));
595	make_dev_alias(ifdev_byindex(ifp->if_index), "%s%d",
596	    net_cdevsw.d_name, ifp->if_index);
597
598	ifq_attach(&ifp->if_snd, ifp);
599
600	/*
601	 * create a Link Level name for this device
602	 */
603	namelen = strlen(ifp->if_xname);
604	/*
605	 * Always save enough space for any possiable name so we can do
606	 * a rename in place later.
607	 */
608	masklen = offsetof(struct sockaddr_dl, sdl_data[0]) + IFNAMSIZ;
609	socksize = masklen + ifp->if_addrlen;
610	if (socksize < sizeof(*sdl))
611		socksize = sizeof(*sdl);
612	socksize = roundup2(socksize, sizeof(long));
613	ifasize = sizeof(*ifa) + 2 * socksize;
614	ifa = malloc(ifasize, M_IFADDR, M_WAITOK | M_ZERO);
615	IFA_LOCK_INIT(ifa);
616	sdl = (struct sockaddr_dl *)(ifa + 1);
617	sdl->sdl_len = socksize;
618	sdl->sdl_family = AF_LINK;
619	bcopy(ifp->if_xname, sdl->sdl_data, namelen);
620	sdl->sdl_nlen = namelen;
621	sdl->sdl_index = ifp->if_index;
622	sdl->sdl_type = ifp->if_type;
623	ifp->if_addr = ifa;
624	ifa->ifa_ifp = ifp;
625	ifa->ifa_rtrequest = link_rtrequest;
626	ifa->ifa_addr = (struct sockaddr *)sdl;
627	sdl = (struct sockaddr_dl *)(socksize + (caddr_t)sdl);
628	ifa->ifa_netmask = (struct sockaddr *)sdl;
629	sdl->sdl_len = masklen;
630	while (namelen != 0)
631		sdl->sdl_data[--namelen] = 0xff;
632	ifa->ifa_refcnt = 1;
633	TAILQ_INSERT_HEAD(&ifp->if_addrhead, ifa, ifa_link);
634	ifp->if_broadcastaddr = NULL; /* reliably crash if used uninitialized */
635
636
637	IFNET_WLOCK();
638	TAILQ_INSERT_TAIL(&V_ifnet, ifp, if_link);
639	IFNET_WUNLOCK();
640
641	if (domain_init_status >= 2)
642		if_attachdomain1(ifp);
643
644	EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
645	devctl_notify("IFNET", ifp->if_xname, "ATTACH", NULL);
646
647	/* Announce the interface. */
648	rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
649
650	if (ifp->if_watchdog != NULL)
651		if_printf(ifp,
652		    "WARNING: using obsoleted if_watchdog interface\n");
653	if (ifp->if_flags & IFF_NEEDSGIANT)
654		if_printf(ifp,
655		    "WARNING: using obsoleted IFF_NEEDSGIANT flag\n");
656}
657
658static void
659if_attachdomain(void *dummy)
660{
661	INIT_VNET_NET(curvnet);
662	struct ifnet *ifp;
663	int s;
664
665	s = splnet();
666	TAILQ_FOREACH(ifp, &V_ifnet, if_link)
667		if_attachdomain1(ifp);
668	splx(s);
669}
670SYSINIT(domainifattach, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_SECOND,
671    if_attachdomain, NULL);
672
673static void
674if_attachdomain1(struct ifnet *ifp)
675{
676	struct domain *dp;
677	int s;
678
679	s = splnet();
680
681	/*
682	 * Since dp->dom_ifattach calls malloc() with M_WAITOK, we
683	 * cannot lock ifp->if_afdata initialization, entirely.
684	 */
685	if (IF_AFDATA_TRYLOCK(ifp) == 0) {
686		splx(s);
687		return;
688	}
689	if (ifp->if_afdata_initialized >= domain_init_status) {
690		IF_AFDATA_UNLOCK(ifp);
691		splx(s);
692		printf("if_attachdomain called more than once on %s\n",
693		    ifp->if_xname);
694		return;
695	}
696	ifp->if_afdata_initialized = domain_init_status;
697	IF_AFDATA_UNLOCK(ifp);
698
699	/* address family dependent data region */
700	bzero(ifp->if_afdata, sizeof(ifp->if_afdata));
701	for (dp = domains; dp; dp = dp->dom_next) {
702		if (dp->dom_ifattach)
703			ifp->if_afdata[dp->dom_family] =
704			    (*dp->dom_ifattach)(ifp);
705	}
706
707	splx(s);
708}
709
710/*
711 * Remove any unicast or broadcast network addresses from an interface.
712 */
713void
714if_purgeaddrs(struct ifnet *ifp)
715{
716	struct ifaddr *ifa, *next;
717
718	TAILQ_FOREACH_SAFE(ifa, &ifp->if_addrhead, ifa_link, next) {
719		if (ifa->ifa_addr->sa_family == AF_LINK)
720			continue;
721#ifdef INET
722		/* XXX: Ugly!! ad hoc just for INET */
723		if (ifa->ifa_addr->sa_family == AF_INET) {
724			struct ifaliasreq ifr;
725
726			bzero(&ifr, sizeof(ifr));
727			ifr.ifra_addr = *ifa->ifa_addr;
728			if (ifa->ifa_dstaddr)
729				ifr.ifra_broadaddr = *ifa->ifa_dstaddr;
730			if (in_control(NULL, SIOCDIFADDR, (caddr_t)&ifr, ifp,
731			    NULL) == 0)
732				continue;
733		}
734#endif /* INET */
735#ifdef INET6
736		if (ifa->ifa_addr->sa_family == AF_INET6) {
737			in6_purgeaddr(ifa);
738			/* ifp_addrhead is already updated */
739			continue;
740		}
741#endif /* INET6 */
742		TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
743		IFAFREE(ifa);
744	}
745}
746
747/*
748 * Remove any multicast network addresses from an interface.
749 */
750void
751if_purgemaddrs(struct ifnet *ifp)
752{
753	struct ifmultiaddr *ifma;
754	struct ifmultiaddr *next;
755
756	IF_ADDR_LOCK(ifp);
757	TAILQ_FOREACH_SAFE(ifma, &ifp->if_multiaddrs, ifma_link, next)
758		if_delmulti_locked(ifp, ifma, 1);
759	IF_ADDR_UNLOCK(ifp);
760}
761
762/*
763 * Detach an interface, removing it from the
764 * list of "active" interfaces.
765 *
766 * XXXRW: There are some significant questions about event ordering, and
767 * how to prevent things from starting to use the interface during detach.
768 */
769void
770if_detach(struct ifnet *ifp)
771{
772	INIT_VNET_NET(ifp->if_vnet);
773	struct ifaddr *ifa;
774	struct radix_node_head	*rnh;
775	int s, i, j;
776	struct domain *dp;
777 	struct ifnet *iter;
778 	int found = 0;
779
780	IFNET_WLOCK();
781	TAILQ_FOREACH(iter, &V_ifnet, if_link)
782		if (iter == ifp) {
783			TAILQ_REMOVE(&V_ifnet, ifp, if_link);
784			found = 1;
785			break;
786		}
787	IFNET_WUNLOCK();
788	if (!found)
789		return;
790
791	/*
792	 * Remove/wait for pending events.
793	 */
794	taskqueue_drain(taskqueue_swi, &ifp->if_linktask);
795
796	/*
797	 * Remove routes and flush queues.
798	 */
799	s = splnet();
800	if_down(ifp);
801#ifdef ALTQ
802	if (ALTQ_IS_ENABLED(&ifp->if_snd))
803		altq_disable(&ifp->if_snd);
804	if (ALTQ_IS_ATTACHED(&ifp->if_snd))
805		altq_detach(&ifp->if_snd);
806#endif
807
808	if_purgeaddrs(ifp);
809
810#ifdef INET
811	in_ifdetach(ifp);
812#endif
813
814#ifdef INET6
815	/*
816	 * Remove all IPv6 kernel structs related to ifp.  This should be done
817	 * before removing routing entries below, since IPv6 interface direct
818	 * routes are expected to be removed by the IPv6-specific kernel API.
819	 * Otherwise, the kernel will detect some inconsistency and bark it.
820	 */
821	in6_ifdetach(ifp);
822#endif
823	if_purgemaddrs(ifp);
824
825	/*
826	 * Remove link ifaddr pointer and maybe decrement if_index.
827	 * Clean up all addresses.
828	 */
829	ifp->if_addr = NULL;
830	destroy_dev(ifdev_byindex(ifp->if_index));
831	ifdev_setbyindex(ifp->if_index, NULL);
832
833	/* We can now free link ifaddr. */
834	if (!TAILQ_EMPTY(&ifp->if_addrhead)) {
835		ifa = TAILQ_FIRST(&ifp->if_addrhead);
836		TAILQ_REMOVE(&ifp->if_addrhead, ifa, ifa_link);
837		IFAFREE(ifa);
838	}
839
840	/*
841	 * Delete all remaining routes using this interface
842	 * Unfortuneatly the only way to do this is to slog through
843	 * the entire routing table looking for routes which point
844	 * to this interface...oh well...
845	 */
846	for (i = 1; i <= AF_MAX; i++) {
847		for (j = 0; j < rt_numfibs; j++) {
848			if ((rnh = V_rt_tables[j][i]) == NULL)
849				continue;
850			RADIX_NODE_HEAD_LOCK(rnh);
851			(void) rnh->rnh_walktree(rnh, if_rtdel, ifp);
852			RADIX_NODE_HEAD_UNLOCK(rnh);
853		}
854	}
855
856	/* Announce that the interface is gone. */
857	rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
858	EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
859	devctl_notify("IFNET", ifp->if_xname, "DETACH", NULL);
860
861	IF_AFDATA_LOCK(ifp);
862	for (dp = domains; dp; dp = dp->dom_next) {
863		if (dp->dom_ifdetach && ifp->if_afdata[dp->dom_family])
864			(*dp->dom_ifdetach)(ifp,
865			    ifp->if_afdata[dp->dom_family]);
866	}
867	IF_AFDATA_UNLOCK(ifp);
868
869#ifdef MAC
870	mac_ifnet_destroy(ifp);
871#endif /* MAC */
872	KNOTE_UNLOCKED(&ifp->if_klist, NOTE_EXIT);
873	knlist_clear(&ifp->if_klist, 0);
874	knlist_destroy(&ifp->if_klist);
875	ifq_detach(&ifp->if_snd);
876	IF_AFDATA_DESTROY(ifp);
877	splx(s);
878}
879
880/*
881 * Add a group to an interface
882 */
883int
884if_addgroup(struct ifnet *ifp, const char *groupname)
885{
886	INIT_VNET_NET(ifp->if_vnet);
887	struct ifg_list		*ifgl;
888	struct ifg_group	*ifg = NULL;
889	struct ifg_member	*ifgm;
890
891	if (groupname[0] && groupname[strlen(groupname) - 1] >= '0' &&
892	    groupname[strlen(groupname) - 1] <= '9')
893		return (EINVAL);
894
895	IFNET_WLOCK();
896	TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
897		if (!strcmp(ifgl->ifgl_group->ifg_group, groupname)) {
898			IFNET_WUNLOCK();
899			return (EEXIST);
900		}
901
902	if ((ifgl = (struct ifg_list *)malloc(sizeof(struct ifg_list), M_TEMP,
903	    M_NOWAIT)) == NULL) {
904	    	IFNET_WUNLOCK();
905		return (ENOMEM);
906	}
907
908	if ((ifgm = (struct ifg_member *)malloc(sizeof(struct ifg_member),
909	    M_TEMP, M_NOWAIT)) == NULL) {
910		free(ifgl, M_TEMP);
911		IFNET_WUNLOCK();
912		return (ENOMEM);
913	}
914
915	TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
916		if (!strcmp(ifg->ifg_group, groupname))
917			break;
918
919	if (ifg == NULL) {
920		if ((ifg = (struct ifg_group *)malloc(sizeof(struct ifg_group),
921		    M_TEMP, M_NOWAIT)) == NULL) {
922			free(ifgl, M_TEMP);
923			free(ifgm, M_TEMP);
924			IFNET_WUNLOCK();
925			return (ENOMEM);
926		}
927		strlcpy(ifg->ifg_group, groupname, sizeof(ifg->ifg_group));
928		ifg->ifg_refcnt = 0;
929		TAILQ_INIT(&ifg->ifg_members);
930		EVENTHANDLER_INVOKE(group_attach_event, ifg);
931		TAILQ_INSERT_TAIL(&V_ifg_head, ifg, ifg_next);
932	}
933
934	ifg->ifg_refcnt++;
935	ifgl->ifgl_group = ifg;
936	ifgm->ifgm_ifp = ifp;
937
938	IF_ADDR_LOCK(ifp);
939	TAILQ_INSERT_TAIL(&ifg->ifg_members, ifgm, ifgm_next);
940	TAILQ_INSERT_TAIL(&ifp->if_groups, ifgl, ifgl_next);
941	IF_ADDR_UNLOCK(ifp);
942
943	IFNET_WUNLOCK();
944
945	EVENTHANDLER_INVOKE(group_change_event, groupname);
946
947	return (0);
948}
949
950/*
951 * Remove a group from an interface
952 */
953int
954if_delgroup(struct ifnet *ifp, const char *groupname)
955{
956	INIT_VNET_NET(ifp->if_vnet);
957	struct ifg_list		*ifgl;
958	struct ifg_member	*ifgm;
959
960	IFNET_WLOCK();
961	TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
962		if (!strcmp(ifgl->ifgl_group->ifg_group, groupname))
963			break;
964	if (ifgl == NULL) {
965		IFNET_WUNLOCK();
966		return (ENOENT);
967	}
968
969	IF_ADDR_LOCK(ifp);
970	TAILQ_REMOVE(&ifp->if_groups, ifgl, ifgl_next);
971	IF_ADDR_UNLOCK(ifp);
972
973	TAILQ_FOREACH(ifgm, &ifgl->ifgl_group->ifg_members, ifgm_next)
974		if (ifgm->ifgm_ifp == ifp)
975			break;
976
977	if (ifgm != NULL) {
978		TAILQ_REMOVE(&ifgl->ifgl_group->ifg_members, ifgm, ifgm_next);
979		free(ifgm, M_TEMP);
980	}
981
982	if (--ifgl->ifgl_group->ifg_refcnt == 0) {
983		TAILQ_REMOVE(&V_ifg_head, ifgl->ifgl_group, ifg_next);
984		EVENTHANDLER_INVOKE(group_detach_event, ifgl->ifgl_group);
985		free(ifgl->ifgl_group, M_TEMP);
986	}
987	IFNET_WUNLOCK();
988
989	free(ifgl, M_TEMP);
990
991	EVENTHANDLER_INVOKE(group_change_event, groupname);
992
993	return (0);
994}
995
996/*
997 * Stores all groups from an interface in memory pointed
998 * to by data
999 */
1000static int
1001if_getgroup(struct ifgroupreq *data, struct ifnet *ifp)
1002{
1003	int			 len, error;
1004	struct ifg_list		*ifgl;
1005	struct ifg_req		 ifgrq, *ifgp;
1006	struct ifgroupreq	*ifgr = data;
1007
1008	if (ifgr->ifgr_len == 0) {
1009		IF_ADDR_LOCK(ifp);
1010		TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next)
1011			ifgr->ifgr_len += sizeof(struct ifg_req);
1012		IF_ADDR_UNLOCK(ifp);
1013		return (0);
1014	}
1015
1016	len = ifgr->ifgr_len;
1017	ifgp = ifgr->ifgr_groups;
1018	/* XXX: wire */
1019	IF_ADDR_LOCK(ifp);
1020	TAILQ_FOREACH(ifgl, &ifp->if_groups, ifgl_next) {
1021		if (len < sizeof(ifgrq)) {
1022			IF_ADDR_UNLOCK(ifp);
1023			return (EINVAL);
1024		}
1025		bzero(&ifgrq, sizeof ifgrq);
1026		strlcpy(ifgrq.ifgrq_group, ifgl->ifgl_group->ifg_group,
1027		    sizeof(ifgrq.ifgrq_group));
1028		if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
1029		    	IF_ADDR_UNLOCK(ifp);
1030			return (error);
1031		}
1032		len -= sizeof(ifgrq);
1033		ifgp++;
1034	}
1035	IF_ADDR_UNLOCK(ifp);
1036
1037	return (0);
1038}
1039
1040/*
1041 * Stores all members of a group in memory pointed to by data
1042 */
1043static int
1044if_getgroupmembers(struct ifgroupreq *data)
1045{
1046	INIT_VNET_NET(curvnet);
1047	struct ifgroupreq	*ifgr = data;
1048	struct ifg_group	*ifg;
1049	struct ifg_member	*ifgm;
1050	struct ifg_req		 ifgrq, *ifgp;
1051	int			 len, error;
1052
1053	IFNET_RLOCK();
1054	TAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
1055		if (!strcmp(ifg->ifg_group, ifgr->ifgr_name))
1056			break;
1057	if (ifg == NULL) {
1058		IFNET_RUNLOCK();
1059		return (ENOENT);
1060	}
1061
1062	if (ifgr->ifgr_len == 0) {
1063		TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next)
1064			ifgr->ifgr_len += sizeof(ifgrq);
1065		IFNET_RUNLOCK();
1066		return (0);
1067	}
1068
1069	len = ifgr->ifgr_len;
1070	ifgp = ifgr->ifgr_groups;
1071	TAILQ_FOREACH(ifgm, &ifg->ifg_members, ifgm_next) {
1072		if (len < sizeof(ifgrq)) {
1073			IFNET_RUNLOCK();
1074			return (EINVAL);
1075		}
1076		bzero(&ifgrq, sizeof ifgrq);
1077		strlcpy(ifgrq.ifgrq_member, ifgm->ifgm_ifp->if_xname,
1078		    sizeof(ifgrq.ifgrq_member));
1079		if ((error = copyout(&ifgrq, ifgp, sizeof(struct ifg_req)))) {
1080			IFNET_RUNLOCK();
1081			return (error);
1082		}
1083		len -= sizeof(ifgrq);
1084		ifgp++;
1085	}
1086	IFNET_RUNLOCK();
1087
1088	return (0);
1089}
1090
1091/*
1092 * Delete Routes for a Network Interface
1093 *
1094 * Called for each routing entry via the rnh->rnh_walktree() call above
1095 * to delete all route entries referencing a detaching network interface.
1096 *
1097 * Arguments:
1098 *	rn	pointer to node in the routing table
1099 *	arg	argument passed to rnh->rnh_walktree() - detaching interface
1100 *
1101 * Returns:
1102 *	0	successful
1103 *	errno	failed - reason indicated
1104 *
1105 */
1106static int
1107if_rtdel(struct radix_node *rn, void *arg)
1108{
1109	struct rtentry	*rt = (struct rtentry *)rn;
1110	struct ifnet	*ifp = arg;
1111	int		err;
1112
1113	if (rt->rt_ifp == ifp) {
1114
1115		/*
1116		 * Protect (sorta) against walktree recursion problems
1117		 * with cloned routes
1118		 */
1119		if ((rt->rt_flags & RTF_UP) == 0)
1120			return (0);
1121
1122		err = rtrequest_fib(RTM_DELETE, rt_key(rt), rt->rt_gateway,
1123				rt_mask(rt), rt->rt_flags|RTF_RNH_LOCKED,
1124				(struct rtentry **) NULL, rt->rt_fibnum);
1125		if (err) {
1126			log(LOG_WARNING, "if_rtdel: error %d\n", err);
1127		}
1128	}
1129
1130	return (0);
1131}
1132
1133/*
1134 * XXX: Because sockaddr_dl has deeper structure than the sockaddr
1135 * structs used to represent other address families, it is necessary
1136 * to perform a different comparison.
1137 */
1138
1139#define	sa_equal(a1, a2)	\
1140	(bcmp((a1), (a2), ((a1))->sa_len) == 0)
1141
1142#define	sa_dl_equal(a1, a2)	\
1143	((((struct sockaddr_dl *)(a1))->sdl_len ==			\
1144	 ((struct sockaddr_dl *)(a2))->sdl_len) &&			\
1145	 (bcmp(LLADDR((struct sockaddr_dl *)(a1)),			\
1146	       LLADDR((struct sockaddr_dl *)(a2)),			\
1147	       ((struct sockaddr_dl *)(a1))->sdl_alen) == 0))
1148
1149/*
1150 * Locate an interface based on a complete address.
1151 */
1152/*ARGSUSED*/
1153struct ifaddr *
1154ifa_ifwithaddr(struct sockaddr *addr)
1155{
1156	INIT_VNET_NET(curvnet);
1157	struct ifnet *ifp;
1158	struct ifaddr *ifa;
1159
1160	IFNET_RLOCK();
1161	TAILQ_FOREACH(ifp, &V_ifnet, if_link)
1162		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1163			if (ifa->ifa_addr->sa_family != addr->sa_family)
1164				continue;
1165			if (sa_equal(addr, ifa->ifa_addr))
1166				goto done;
1167			/* IP6 doesn't have broadcast */
1168			if ((ifp->if_flags & IFF_BROADCAST) &&
1169			    ifa->ifa_broadaddr &&
1170			    ifa->ifa_broadaddr->sa_len != 0 &&
1171			    sa_equal(ifa->ifa_broadaddr, addr))
1172				goto done;
1173		}
1174	ifa = NULL;
1175done:
1176	IFNET_RUNLOCK();
1177	return (ifa);
1178}
1179
1180/*
1181 * Locate an interface based on the broadcast address.
1182 */
1183/* ARGSUSED */
1184struct ifaddr *
1185ifa_ifwithbroadaddr(struct sockaddr *addr)
1186{
1187	INIT_VNET_NET(curvnet);
1188	struct ifnet *ifp;
1189	struct ifaddr *ifa;
1190
1191	IFNET_RLOCK();
1192	TAILQ_FOREACH(ifp, &V_ifnet, if_link)
1193		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1194			if (ifa->ifa_addr->sa_family != addr->sa_family)
1195				continue;
1196			if ((ifp->if_flags & IFF_BROADCAST) &&
1197			    ifa->ifa_broadaddr &&
1198			    ifa->ifa_broadaddr->sa_len != 0 &&
1199			    sa_equal(ifa->ifa_broadaddr, addr))
1200				goto done;
1201		}
1202	ifa = NULL;
1203done:
1204	IFNET_RUNLOCK();
1205	return (ifa);
1206}
1207
1208/*
1209 * Locate the point to point interface with a given destination address.
1210 */
1211/*ARGSUSED*/
1212struct ifaddr *
1213ifa_ifwithdstaddr(struct sockaddr *addr)
1214{
1215	INIT_VNET_NET(curvnet);
1216	struct ifnet *ifp;
1217	struct ifaddr *ifa;
1218
1219	IFNET_RLOCK();
1220	TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1221		if ((ifp->if_flags & IFF_POINTOPOINT) == 0)
1222			continue;
1223		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1224			if (ifa->ifa_addr->sa_family != addr->sa_family)
1225				continue;
1226			if (ifa->ifa_dstaddr != NULL &&
1227			    sa_equal(addr, ifa->ifa_dstaddr))
1228				goto done;
1229		}
1230	}
1231	ifa = NULL;
1232done:
1233	IFNET_RUNLOCK();
1234	return (ifa);
1235}
1236
1237/*
1238 * Find an interface on a specific network.  If many, choice
1239 * is most specific found.
1240 */
1241struct ifaddr *
1242ifa_ifwithnet(struct sockaddr *addr)
1243{
1244	INIT_VNET_NET(curvnet);
1245	struct ifnet *ifp;
1246	struct ifaddr *ifa;
1247	struct ifaddr *ifa_maybe = (struct ifaddr *) 0;
1248	u_int af = addr->sa_family;
1249	char *addr_data = addr->sa_data, *cplim;
1250
1251	/*
1252	 * AF_LINK addresses can be looked up directly by their index number,
1253	 * so do that if we can.
1254	 */
1255	if (af == AF_LINK) {
1256	    struct sockaddr_dl *sdl = (struct sockaddr_dl *)addr;
1257	    if (sdl->sdl_index && sdl->sdl_index <= V_if_index)
1258		return (ifaddr_byindex(sdl->sdl_index));
1259	}
1260
1261	/*
1262	 * Scan though each interface, looking for ones that have
1263	 * addresses in this address family.
1264	 */
1265	IFNET_RLOCK();
1266	TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1267		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1268			char *cp, *cp2, *cp3;
1269
1270			if (ifa->ifa_addr->sa_family != af)
1271next:				continue;
1272			if (af == AF_INET && ifp->if_flags & IFF_POINTOPOINT) {
1273				/*
1274				 * This is a bit broken as it doesn't
1275				 * take into account that the remote end may
1276				 * be a single node in the network we are
1277				 * looking for.
1278				 * The trouble is that we don't know the
1279				 * netmask for the remote end.
1280				 */
1281				if (ifa->ifa_dstaddr != NULL &&
1282				    sa_equal(addr, ifa->ifa_dstaddr))
1283					goto done;
1284			} else {
1285				/*
1286				 * if we have a special address handler,
1287				 * then use it instead of the generic one.
1288				 */
1289				if (ifa->ifa_claim_addr) {
1290					if ((*ifa->ifa_claim_addr)(ifa, addr))
1291						goto done;
1292					continue;
1293				}
1294
1295				/*
1296				 * Scan all the bits in the ifa's address.
1297				 * If a bit dissagrees with what we are
1298				 * looking for, mask it with the netmask
1299				 * to see if it really matters.
1300				 * (A byte at a time)
1301				 */
1302				if (ifa->ifa_netmask == 0)
1303					continue;
1304				cp = addr_data;
1305				cp2 = ifa->ifa_addr->sa_data;
1306				cp3 = ifa->ifa_netmask->sa_data;
1307				cplim = ifa->ifa_netmask->sa_len
1308					+ (char *)ifa->ifa_netmask;
1309				while (cp3 < cplim)
1310					if ((*cp++ ^ *cp2++) & *cp3++)
1311						goto next; /* next address! */
1312				/*
1313				 * If the netmask of what we just found
1314				 * is more specific than what we had before
1315				 * (if we had one) then remember the new one
1316				 * before continuing to search
1317				 * for an even better one.
1318				 */
1319				if (ifa_maybe == 0 ||
1320				    rn_refines((caddr_t)ifa->ifa_netmask,
1321				    (caddr_t)ifa_maybe->ifa_netmask))
1322					ifa_maybe = ifa;
1323			}
1324		}
1325	}
1326	ifa = ifa_maybe;
1327done:
1328	IFNET_RUNLOCK();
1329	return (ifa);
1330}
1331
1332/*
1333 * Find an interface address specific to an interface best matching
1334 * a given address.
1335 */
1336struct ifaddr *
1337ifaof_ifpforaddr(struct sockaddr *addr, struct ifnet *ifp)
1338{
1339	struct ifaddr *ifa;
1340	char *cp, *cp2, *cp3;
1341	char *cplim;
1342	struct ifaddr *ifa_maybe = 0;
1343	u_int af = addr->sa_family;
1344
1345	if (af >= AF_MAX)
1346		return (0);
1347	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1348		if (ifa->ifa_addr->sa_family != af)
1349			continue;
1350		if (ifa_maybe == 0)
1351			ifa_maybe = ifa;
1352		if (ifa->ifa_netmask == 0) {
1353			if (sa_equal(addr, ifa->ifa_addr) ||
1354			    (ifa->ifa_dstaddr &&
1355			    sa_equal(addr, ifa->ifa_dstaddr)))
1356				goto done;
1357			continue;
1358		}
1359		if (ifp->if_flags & IFF_POINTOPOINT) {
1360			if (sa_equal(addr, ifa->ifa_dstaddr))
1361				goto done;
1362		} else {
1363			cp = addr->sa_data;
1364			cp2 = ifa->ifa_addr->sa_data;
1365			cp3 = ifa->ifa_netmask->sa_data;
1366			cplim = ifa->ifa_netmask->sa_len + (char *)ifa->ifa_netmask;
1367			for (; cp3 < cplim; cp3++)
1368				if ((*cp++ ^ *cp2++) & *cp3)
1369					break;
1370			if (cp3 == cplim)
1371				goto done;
1372		}
1373	}
1374	ifa = ifa_maybe;
1375done:
1376	return (ifa);
1377}
1378
1379#include <net/route.h>
1380#include <net/if_llatbl.h>
1381
1382/*
1383 * Default action when installing a route with a Link Level gateway.
1384 * Lookup an appropriate real ifa to point to.
1385 * This should be moved to /sys/net/link.c eventually.
1386 */
1387static void
1388link_rtrequest(int cmd, struct rtentry *rt, struct rt_addrinfo *info)
1389{
1390	struct ifaddr *ifa, *oifa;
1391	struct sockaddr *dst;
1392	struct ifnet *ifp;
1393
1394	RT_LOCK_ASSERT(rt);
1395
1396	if (cmd != RTM_ADD || ((ifa = rt->rt_ifa) == 0) ||
1397	    ((ifp = ifa->ifa_ifp) == 0) || ((dst = rt_key(rt)) == 0))
1398		return;
1399	ifa = ifaof_ifpforaddr(dst, ifp);
1400	if (ifa) {
1401		IFAREF(ifa);		/* XXX */
1402		oifa = rt->rt_ifa;
1403		rt->rt_ifa = ifa;
1404		IFAFREE(oifa);
1405		if (ifa->ifa_rtrequest && ifa->ifa_rtrequest != link_rtrequest)
1406			ifa->ifa_rtrequest(cmd, rt, info);
1407	}
1408}
1409
1410/*
1411 * Mark an interface down and notify protocols of
1412 * the transition.
1413 * NOTE: must be called at splnet or eqivalent.
1414 */
1415static void
1416if_unroute(struct ifnet *ifp, int flag, int fam)
1417{
1418	struct ifaddr *ifa;
1419
1420	KASSERT(flag == IFF_UP, ("if_unroute: flag != IFF_UP"));
1421
1422	ifp->if_flags &= ~flag;
1423	getmicrotime(&ifp->if_lastchange);
1424	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
1425		if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1426			pfctlinput(PRC_IFDOWN, ifa->ifa_addr);
1427	ifp->if_qflush(ifp);
1428
1429#ifdef DEV_CARP
1430	if (ifp->if_carp)
1431		carp_carpdev_state(ifp->if_carp);
1432#endif
1433	rt_ifmsg(ifp);
1434}
1435
1436/*
1437 * Mark an interface up and notify protocols of
1438 * the transition.
1439 * NOTE: must be called at splnet or eqivalent.
1440 */
1441static void
1442if_route(struct ifnet *ifp, int flag, int fam)
1443{
1444	struct ifaddr *ifa;
1445
1446	KASSERT(flag == IFF_UP, ("if_route: flag != IFF_UP"));
1447
1448	ifp->if_flags |= flag;
1449	getmicrotime(&ifp->if_lastchange);
1450	TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link)
1451		if (fam == PF_UNSPEC || (fam == ifa->ifa_addr->sa_family))
1452			pfctlinput(PRC_IFUP, ifa->ifa_addr);
1453#ifdef DEV_CARP
1454	if (ifp->if_carp)
1455		carp_carpdev_state(ifp->if_carp);
1456#endif
1457	rt_ifmsg(ifp);
1458#ifdef INET6
1459	in6_if_up(ifp);
1460#endif
1461}
1462
1463void	(*vlan_link_state_p)(struct ifnet *, int);	/* XXX: private from if_vlan */
1464void	(*vlan_trunk_cap_p)(struct ifnet *);		/* XXX: private from if_vlan */
1465
1466/*
1467 * Handle a change in the interface link state. To avoid LORs
1468 * between driver lock and upper layer locks, as well as possible
1469 * recursions, we post event to taskqueue, and all job
1470 * is done in static do_link_state_change().
1471 */
1472void
1473if_link_state_change(struct ifnet *ifp, int link_state)
1474{
1475	/* Return if state hasn't changed. */
1476	if (ifp->if_link_state == link_state)
1477		return;
1478
1479	ifp->if_link_state = link_state;
1480
1481	taskqueue_enqueue(taskqueue_swi, &ifp->if_linktask);
1482}
1483
1484static void
1485do_link_state_change(void *arg, int pending)
1486{
1487	struct ifnet *ifp = (struct ifnet *)arg;
1488	int link_state = ifp->if_link_state;
1489	int link;
1490	CURVNET_SET(ifp->if_vnet);
1491
1492	/* Notify that the link state has changed. */
1493	rt_ifmsg(ifp);
1494	if (link_state == LINK_STATE_UP)
1495		link = NOTE_LINKUP;
1496	else if (link_state == LINK_STATE_DOWN)
1497		link = NOTE_LINKDOWN;
1498	else
1499		link = NOTE_LINKINV;
1500	KNOTE_UNLOCKED(&ifp->if_klist, link);
1501	if (ifp->if_vlantrunk != NULL)
1502		(*vlan_link_state_p)(ifp, link);
1503
1504	if ((ifp->if_type == IFT_ETHER || ifp->if_type == IFT_L2VLAN) &&
1505	    IFP2AC(ifp)->ac_netgraph != NULL)
1506		(*ng_ether_link_state_p)(ifp, link_state);
1507#ifdef DEV_CARP
1508	if (ifp->if_carp)
1509		carp_carpdev_state(ifp->if_carp);
1510#endif
1511	if (ifp->if_bridge) {
1512		KASSERT(bstp_linkstate_p != NULL,("if_bridge bstp not loaded!"));
1513		(*bstp_linkstate_p)(ifp, link_state);
1514	}
1515	if (ifp->if_lagg) {
1516		KASSERT(lagg_linkstate_p != NULL,("if_lagg not loaded!"));
1517		(*lagg_linkstate_p)(ifp, link_state);
1518	}
1519
1520	devctl_notify("IFNET", ifp->if_xname,
1521	    (link_state == LINK_STATE_UP) ? "LINK_UP" : "LINK_DOWN", NULL);
1522	if (pending > 1)
1523		if_printf(ifp, "%d link states coalesced\n", pending);
1524	if (log_link_state_change)
1525		log(LOG_NOTICE, "%s: link state changed to %s\n", ifp->if_xname,
1526		    (link_state == LINK_STATE_UP) ? "UP" : "DOWN" );
1527	CURVNET_RESTORE();
1528}
1529
1530/*
1531 * Mark an interface down and notify protocols of
1532 * the transition.
1533 * NOTE: must be called at splnet or eqivalent.
1534 */
1535void
1536if_down(struct ifnet *ifp)
1537{
1538
1539	if_unroute(ifp, IFF_UP, AF_UNSPEC);
1540}
1541
1542/*
1543 * Mark an interface up and notify protocols of
1544 * the transition.
1545 * NOTE: must be called at splnet or eqivalent.
1546 */
1547void
1548if_up(struct ifnet *ifp)
1549{
1550
1551	if_route(ifp, IFF_UP, AF_UNSPEC);
1552}
1553
1554/*
1555 * Flush an interface queue.
1556 */
1557static void
1558if_qflush(struct ifnet *ifp)
1559{
1560	struct mbuf *m, *n;
1561	struct ifaltq *ifq;
1562
1563	ifq = &ifp->if_snd;
1564	IFQ_LOCK(ifq);
1565#ifdef ALTQ
1566	if (ALTQ_IS_ENABLED(ifq))
1567		ALTQ_PURGE(ifq);
1568#endif
1569	n = ifq->ifq_head;
1570	while ((m = n) != 0) {
1571		n = m->m_act;
1572		m_freem(m);
1573	}
1574	ifq->ifq_head = 0;
1575	ifq->ifq_tail = 0;
1576	ifq->ifq_len = 0;
1577	IFQ_UNLOCK(ifq);
1578}
1579
1580/*
1581 * Handle interface watchdog timer routines.  Called
1582 * from softclock, we decrement timers (if set) and
1583 * call the appropriate interface routine on expiration.
1584 *
1585 * XXXRW: Note that because timeouts run with Giant, if_watchdog() is called
1586 * holding Giant.  If we switch to an MPSAFE callout, we likely need to grab
1587 * Giant before entering if_watchdog() on an IFF_NEEDSGIANT interface.
1588 */
1589static void
1590if_slowtimo(void *arg)
1591{
1592	VNET_ITERATOR_DECL(vnet_iter);
1593	struct ifnet *ifp;
1594	int s = splimp();
1595
1596	IFNET_RLOCK();
1597	VNET_LIST_RLOCK();
1598	VNET_FOREACH(vnet_iter) {
1599		CURVNET_SET(vnet_iter);
1600		INIT_VNET_NET(vnet_iter);
1601		TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1602			if (ifp->if_timer == 0 || --ifp->if_timer)
1603				continue;
1604			if (ifp->if_watchdog)
1605				(*ifp->if_watchdog)(ifp);
1606		}
1607		CURVNET_RESTORE();
1608	}
1609	VNET_LIST_RUNLOCK();
1610	IFNET_RUNLOCK();
1611	splx(s);
1612	timeout(if_slowtimo, (void *)0, hz / IFNET_SLOWHZ);
1613}
1614
1615/*
1616 * Map interface name to
1617 * interface structure pointer.
1618 */
1619struct ifnet *
1620ifunit(const char *name)
1621{
1622	INIT_VNET_NET(curvnet);
1623	struct ifnet *ifp;
1624
1625	IFNET_RLOCK();
1626	TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
1627		if (strncmp(name, ifp->if_xname, IFNAMSIZ) == 0)
1628			break;
1629	}
1630	IFNET_RUNLOCK();
1631	return (ifp);
1632}
1633
1634/*
1635 * Hardware specific interface ioctls.
1636 */
1637static int
1638ifhwioctl(u_long cmd, struct ifnet *ifp, caddr_t data, struct thread *td)
1639{
1640	struct ifreq *ifr;
1641	struct ifstat *ifs;
1642	int error = 0;
1643	int new_flags, temp_flags;
1644	size_t namelen, onamelen;
1645	char new_name[IFNAMSIZ];
1646	struct ifaddr *ifa;
1647	struct sockaddr_dl *sdl;
1648
1649	ifr = (struct ifreq *)data;
1650	switch (cmd) {
1651	case SIOCGIFINDEX:
1652		ifr->ifr_index = ifp->if_index;
1653		break;
1654
1655	case SIOCGIFFLAGS:
1656		temp_flags = ifp->if_flags | ifp->if_drv_flags;
1657		ifr->ifr_flags = temp_flags & 0xffff;
1658		ifr->ifr_flagshigh = temp_flags >> 16;
1659		break;
1660
1661	case SIOCGIFCAP:
1662		ifr->ifr_reqcap = ifp->if_capabilities;
1663		ifr->ifr_curcap = ifp->if_capenable;
1664		break;
1665
1666#ifdef MAC
1667	case SIOCGIFMAC:
1668		error = mac_ifnet_ioctl_get(td->td_ucred, ifr, ifp);
1669		break;
1670#endif
1671
1672	case SIOCGIFMETRIC:
1673		ifr->ifr_metric = ifp->if_metric;
1674		break;
1675
1676	case SIOCGIFMTU:
1677		ifr->ifr_mtu = ifp->if_mtu;
1678		break;
1679
1680	case SIOCGIFPHYS:
1681		ifr->ifr_phys = ifp->if_physical;
1682		break;
1683
1684	case SIOCSIFFLAGS:
1685		error = priv_check(td, PRIV_NET_SETIFFLAGS);
1686		if (error)
1687			return (error);
1688		/*
1689		 * Currently, no driver owned flags pass the IFF_CANTCHANGE
1690		 * check, so we don't need special handling here yet.
1691		 */
1692		new_flags = (ifr->ifr_flags & 0xffff) |
1693		    (ifr->ifr_flagshigh << 16);
1694		if (ifp->if_flags & IFF_SMART) {
1695			/* Smart drivers twiddle their own routes */
1696		} else if (ifp->if_flags & IFF_UP &&
1697		    (new_flags & IFF_UP) == 0) {
1698			int s = splimp();
1699			if_down(ifp);
1700			splx(s);
1701		} else if (new_flags & IFF_UP &&
1702		    (ifp->if_flags & IFF_UP) == 0) {
1703			int s = splimp();
1704			if_up(ifp);
1705			splx(s);
1706		}
1707		/* See if permanently promiscuous mode bit is about to flip */
1708		if ((ifp->if_flags ^ new_flags) & IFF_PPROMISC) {
1709			if (new_flags & IFF_PPROMISC)
1710				ifp->if_flags |= IFF_PROMISC;
1711			else if (ifp->if_pcount == 0)
1712				ifp->if_flags &= ~IFF_PROMISC;
1713			log(LOG_INFO, "%s: permanently promiscuous mode %s\n",
1714			    ifp->if_xname,
1715			    (new_flags & IFF_PPROMISC) ? "enabled" : "disabled");
1716		}
1717		ifp->if_flags = (ifp->if_flags & IFF_CANTCHANGE) |
1718			(new_flags &~ IFF_CANTCHANGE);
1719		if (ifp->if_ioctl) {
1720			IFF_LOCKGIANT(ifp);
1721			(void) (*ifp->if_ioctl)(ifp, cmd, data);
1722			IFF_UNLOCKGIANT(ifp);
1723		}
1724		getmicrotime(&ifp->if_lastchange);
1725		break;
1726
1727	case SIOCSIFCAP:
1728		error = priv_check(td, PRIV_NET_SETIFCAP);
1729		if (error)
1730			return (error);
1731		if (ifp->if_ioctl == NULL)
1732			return (EOPNOTSUPP);
1733		if (ifr->ifr_reqcap & ~ifp->if_capabilities)
1734			return (EINVAL);
1735		IFF_LOCKGIANT(ifp);
1736		error = (*ifp->if_ioctl)(ifp, cmd, data);
1737		IFF_UNLOCKGIANT(ifp);
1738		if (error == 0)
1739			getmicrotime(&ifp->if_lastchange);
1740		break;
1741
1742#ifdef MAC
1743	case SIOCSIFMAC:
1744		error = mac_ifnet_ioctl_set(td->td_ucred, ifr, ifp);
1745		break;
1746#endif
1747
1748	case SIOCSIFNAME:
1749		error = priv_check(td, PRIV_NET_SETIFNAME);
1750		if (error)
1751			return (error);
1752		error = copyinstr(ifr->ifr_data, new_name, IFNAMSIZ, NULL);
1753		if (error != 0)
1754			return (error);
1755		if (new_name[0] == '\0')
1756			return (EINVAL);
1757		if (ifunit(new_name) != NULL)
1758			return (EEXIST);
1759
1760		/* Announce the departure of the interface. */
1761		rt_ifannouncemsg(ifp, IFAN_DEPARTURE);
1762		EVENTHANDLER_INVOKE(ifnet_departure_event, ifp);
1763
1764		log(LOG_INFO, "%s: changing name to '%s'\n",
1765		    ifp->if_xname, new_name);
1766
1767		strlcpy(ifp->if_xname, new_name, sizeof(ifp->if_xname));
1768		ifa = ifp->if_addr;
1769		IFA_LOCK(ifa);
1770		sdl = (struct sockaddr_dl *)ifa->ifa_addr;
1771		namelen = strlen(new_name);
1772		onamelen = sdl->sdl_nlen;
1773		/*
1774		 * Move the address if needed.  This is safe because we
1775		 * allocate space for a name of length IFNAMSIZ when we
1776		 * create this in if_attach().
1777		 */
1778		if (namelen != onamelen) {
1779			bcopy(sdl->sdl_data + onamelen,
1780			    sdl->sdl_data + namelen, sdl->sdl_alen);
1781		}
1782		bcopy(new_name, sdl->sdl_data, namelen);
1783		sdl->sdl_nlen = namelen;
1784		sdl = (struct sockaddr_dl *)ifa->ifa_netmask;
1785		bzero(sdl->sdl_data, onamelen);
1786		while (namelen != 0)
1787			sdl->sdl_data[--namelen] = 0xff;
1788		IFA_UNLOCK(ifa);
1789
1790		EVENTHANDLER_INVOKE(ifnet_arrival_event, ifp);
1791		/* Announce the return of the interface. */
1792		rt_ifannouncemsg(ifp, IFAN_ARRIVAL);
1793		break;
1794
1795	case SIOCSIFMETRIC:
1796		error = priv_check(td, PRIV_NET_SETIFMETRIC);
1797		if (error)
1798			return (error);
1799		ifp->if_metric = ifr->ifr_metric;
1800		getmicrotime(&ifp->if_lastchange);
1801		break;
1802
1803	case SIOCSIFPHYS:
1804		error = priv_check(td, PRIV_NET_SETIFPHYS);
1805		if (error)
1806			return (error);
1807		if (ifp->if_ioctl == NULL)
1808			return (EOPNOTSUPP);
1809		IFF_LOCKGIANT(ifp);
1810		error = (*ifp->if_ioctl)(ifp, cmd, data);
1811		IFF_UNLOCKGIANT(ifp);
1812		if (error == 0)
1813			getmicrotime(&ifp->if_lastchange);
1814		break;
1815
1816	case SIOCSIFMTU:
1817	{
1818		u_long oldmtu = ifp->if_mtu;
1819
1820		error = priv_check(td, PRIV_NET_SETIFMTU);
1821		if (error)
1822			return (error);
1823		if (ifr->ifr_mtu < IF_MINMTU || ifr->ifr_mtu > IF_MAXMTU)
1824			return (EINVAL);
1825		if (ifp->if_ioctl == NULL)
1826			return (EOPNOTSUPP);
1827		IFF_LOCKGIANT(ifp);
1828		error = (*ifp->if_ioctl)(ifp, cmd, data);
1829		IFF_UNLOCKGIANT(ifp);
1830		if (error == 0) {
1831			getmicrotime(&ifp->if_lastchange);
1832			rt_ifmsg(ifp);
1833		}
1834		/*
1835		 * If the link MTU changed, do network layer specific procedure.
1836		 */
1837		if (ifp->if_mtu != oldmtu) {
1838#ifdef INET6
1839			nd6_setmtu(ifp);
1840#endif
1841		}
1842		break;
1843	}
1844
1845	case SIOCADDMULTI:
1846	case SIOCDELMULTI:
1847		if (cmd == SIOCADDMULTI)
1848			error = priv_check(td, PRIV_NET_ADDMULTI);
1849		else
1850			error = priv_check(td, PRIV_NET_DELMULTI);
1851		if (error)
1852			return (error);
1853
1854		/* Don't allow group membership on non-multicast interfaces. */
1855		if ((ifp->if_flags & IFF_MULTICAST) == 0)
1856			return (EOPNOTSUPP);
1857
1858		/* Don't let users screw up protocols' entries. */
1859		if (ifr->ifr_addr.sa_family != AF_LINK)
1860			return (EINVAL);
1861
1862		if (cmd == SIOCADDMULTI) {
1863			struct ifmultiaddr *ifma;
1864
1865			/*
1866			 * Userland is only permitted to join groups once
1867			 * via the if_addmulti() KPI, because it cannot hold
1868			 * struct ifmultiaddr * between calls. It may also
1869			 * lose a race while we check if the membership
1870			 * already exists.
1871			 */
1872			IF_ADDR_LOCK(ifp);
1873			ifma = if_findmulti(ifp, &ifr->ifr_addr);
1874			IF_ADDR_UNLOCK(ifp);
1875			if (ifma != NULL)
1876				error = EADDRINUSE;
1877			else
1878				error = if_addmulti(ifp, &ifr->ifr_addr, &ifma);
1879		} else {
1880			error = if_delmulti(ifp, &ifr->ifr_addr);
1881		}
1882		if (error == 0)
1883			getmicrotime(&ifp->if_lastchange);
1884		break;
1885
1886	case SIOCSIFPHYADDR:
1887	case SIOCDIFPHYADDR:
1888#ifdef INET6
1889	case SIOCSIFPHYADDR_IN6:
1890#endif
1891	case SIOCSLIFPHYADDR:
1892	case SIOCSIFMEDIA:
1893	case SIOCSIFGENERIC:
1894		error = priv_check(td, PRIV_NET_HWIOCTL);
1895		if (error)
1896			return (error);
1897		if (ifp->if_ioctl == NULL)
1898			return (EOPNOTSUPP);
1899		IFF_LOCKGIANT(ifp);
1900		error = (*ifp->if_ioctl)(ifp, cmd, data);
1901		IFF_UNLOCKGIANT(ifp);
1902		if (error == 0)
1903			getmicrotime(&ifp->if_lastchange);
1904		break;
1905
1906	case SIOCGIFSTATUS:
1907		ifs = (struct ifstat *)data;
1908		ifs->ascii[0] = '\0';
1909
1910	case SIOCGIFPSRCADDR:
1911	case SIOCGIFPDSTADDR:
1912	case SIOCGLIFPHYADDR:
1913	case SIOCGIFMEDIA:
1914	case SIOCGIFGENERIC:
1915		if (ifp->if_ioctl == NULL)
1916			return (EOPNOTSUPP);
1917		IFF_LOCKGIANT(ifp);
1918		error = (*ifp->if_ioctl)(ifp, cmd, data);
1919		IFF_UNLOCKGIANT(ifp);
1920		break;
1921
1922	case SIOCSIFLLADDR:
1923		error = priv_check(td, PRIV_NET_SETLLADDR);
1924		if (error)
1925			return (error);
1926		error = if_setlladdr(ifp,
1927		    ifr->ifr_addr.sa_data, ifr->ifr_addr.sa_len);
1928		break;
1929
1930	case SIOCAIFGROUP:
1931	{
1932		struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
1933
1934		error = priv_check(td, PRIV_NET_ADDIFGROUP);
1935		if (error)
1936			return (error);
1937		if ((error = if_addgroup(ifp, ifgr->ifgr_group)))
1938			return (error);
1939		break;
1940	}
1941
1942	case SIOCGIFGROUP:
1943		if ((error = if_getgroup((struct ifgroupreq *)ifr, ifp)))
1944			return (error);
1945		break;
1946
1947	case SIOCDIFGROUP:
1948	{
1949		struct ifgroupreq *ifgr = (struct ifgroupreq *)ifr;
1950
1951		error = priv_check(td, PRIV_NET_DELIFGROUP);
1952		if (error)
1953			return (error);
1954		if ((error = if_delgroup(ifp, ifgr->ifgr_group)))
1955			return (error);
1956		break;
1957	}
1958
1959	default:
1960		error = ENOIOCTL;
1961		break;
1962	}
1963	return (error);
1964}
1965
1966/*
1967 * Interface ioctls.
1968 */
1969int
1970ifioctl(struct socket *so, u_long cmd, caddr_t data, struct thread *td)
1971{
1972	struct ifnet *ifp;
1973	struct ifreq *ifr;
1974	int error;
1975	int oif_flags;
1976
1977	switch (cmd) {
1978	case SIOCGIFCONF:
1979	case OSIOCGIFCONF:
1980#ifdef __amd64__
1981	case SIOCGIFCONF32:
1982#endif
1983		return (ifconf(cmd, data));
1984	}
1985	ifr = (struct ifreq *)data;
1986
1987	switch (cmd) {
1988	case SIOCIFCREATE:
1989	case SIOCIFCREATE2:
1990		error = priv_check(td, PRIV_NET_IFCREATE);
1991		if (error)
1992			return (error);
1993		return (if_clone_create(ifr->ifr_name, sizeof(ifr->ifr_name),
1994			cmd == SIOCIFCREATE2 ? ifr->ifr_data : NULL));
1995	case SIOCIFDESTROY:
1996		error = priv_check(td, PRIV_NET_IFDESTROY);
1997		if (error)
1998			return (error);
1999		return if_clone_destroy(ifr->ifr_name);
2000
2001	case SIOCIFGCLONERS:
2002		return (if_clone_list((struct if_clonereq *)data));
2003	case SIOCGIFGMEMB:
2004		return (if_getgroupmembers((struct ifgroupreq *)data));
2005	}
2006
2007	ifp = ifunit(ifr->ifr_name);
2008	if (ifp == 0)
2009		return (ENXIO);
2010
2011	error = ifhwioctl(cmd, ifp, data, td);
2012	if (error != ENOIOCTL)
2013		return (error);
2014
2015	oif_flags = ifp->if_flags;
2016	if (so->so_proto == 0)
2017		return (EOPNOTSUPP);
2018#ifndef COMPAT_43
2019	error = ((*so->so_proto->pr_usrreqs->pru_control)(so, cmd,
2020								 data,
2021								 ifp, td));
2022#else
2023	{
2024		int ocmd = cmd;
2025
2026		switch (cmd) {
2027
2028		case SIOCSIFDSTADDR:
2029		case SIOCSIFADDR:
2030		case SIOCSIFBRDADDR:
2031		case SIOCSIFNETMASK:
2032#if BYTE_ORDER != BIG_ENDIAN
2033			if (ifr->ifr_addr.sa_family == 0 &&
2034			    ifr->ifr_addr.sa_len < 16) {
2035				ifr->ifr_addr.sa_family = ifr->ifr_addr.sa_len;
2036				ifr->ifr_addr.sa_len = 16;
2037			}
2038#else
2039			if (ifr->ifr_addr.sa_len == 0)
2040				ifr->ifr_addr.sa_len = 16;
2041#endif
2042			break;
2043
2044		case OSIOCGIFADDR:
2045			cmd = SIOCGIFADDR;
2046			break;
2047
2048		case OSIOCGIFDSTADDR:
2049			cmd = SIOCGIFDSTADDR;
2050			break;
2051
2052		case OSIOCGIFBRDADDR:
2053			cmd = SIOCGIFBRDADDR;
2054			break;
2055
2056		case OSIOCGIFNETMASK:
2057			cmd = SIOCGIFNETMASK;
2058		}
2059		error =  ((*so->so_proto->pr_usrreqs->pru_control)(so,
2060								   cmd,
2061								   data,
2062								   ifp, td));
2063		switch (ocmd) {
2064
2065		case OSIOCGIFADDR:
2066		case OSIOCGIFDSTADDR:
2067		case OSIOCGIFBRDADDR:
2068		case OSIOCGIFNETMASK:
2069			*(u_short *)&ifr->ifr_addr = ifr->ifr_addr.sa_family;
2070
2071		}
2072	}
2073#endif /* COMPAT_43 */
2074
2075	if ((oif_flags ^ ifp->if_flags) & IFF_UP) {
2076#ifdef INET6
2077		DELAY(100);/* XXX: temporary workaround for fxp issue*/
2078		if (ifp->if_flags & IFF_UP) {
2079			int s = splimp();
2080			in6_if_up(ifp);
2081			splx(s);
2082		}
2083#endif
2084	}
2085	return (error);
2086}
2087
2088/*
2089 * The code common to handling reference counted flags,
2090 * e.g., in ifpromisc() and if_allmulti().
2091 * The "pflag" argument can specify a permanent mode flag to check,
2092 * such as IFF_PPROMISC for promiscuous mode; should be 0 if none.
2093 *
2094 * Only to be used on stack-owned flags, not driver-owned flags.
2095 */
2096static int
2097if_setflag(struct ifnet *ifp, int flag, int pflag, int *refcount, int onswitch)
2098{
2099	struct ifreq ifr;
2100	int error;
2101	int oldflags, oldcount;
2102
2103	/* Sanity checks to catch programming errors */
2104	KASSERT((flag & (IFF_DRV_OACTIVE|IFF_DRV_RUNNING)) == 0,
2105	    ("%s: setting driver-owned flag %d", __func__, flag));
2106
2107	if (onswitch)
2108		KASSERT(*refcount >= 0,
2109		    ("%s: increment negative refcount %d for flag %d",
2110		    __func__, *refcount, flag));
2111	else
2112		KASSERT(*refcount > 0,
2113		    ("%s: decrement non-positive refcount %d for flag %d",
2114		    __func__, *refcount, flag));
2115
2116	/* In case this mode is permanent, just touch refcount */
2117	if (ifp->if_flags & pflag) {
2118		*refcount += onswitch ? 1 : -1;
2119		return (0);
2120	}
2121
2122	/* Save ifnet parameters for if_ioctl() may fail */
2123	oldcount = *refcount;
2124	oldflags = ifp->if_flags;
2125
2126	/*
2127	 * See if we aren't the only and touching refcount is enough.
2128	 * Actually toggle interface flag if we are the first or last.
2129	 */
2130	if (onswitch) {
2131		if ((*refcount)++)
2132			return (0);
2133		ifp->if_flags |= flag;
2134	} else {
2135		if (--(*refcount))
2136			return (0);
2137		ifp->if_flags &= ~flag;
2138	}
2139
2140	/* Call down the driver since we've changed interface flags */
2141	if (ifp->if_ioctl == NULL) {
2142		error = EOPNOTSUPP;
2143		goto recover;
2144	}
2145	ifr.ifr_flags = ifp->if_flags & 0xffff;
2146	ifr.ifr_flagshigh = ifp->if_flags >> 16;
2147	IFF_LOCKGIANT(ifp);
2148	error = (*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
2149	IFF_UNLOCKGIANT(ifp);
2150	if (error)
2151		goto recover;
2152	/* Notify userland that interface flags have changed */
2153	rt_ifmsg(ifp);
2154	return (0);
2155
2156recover:
2157	/* Recover after driver error */
2158	*refcount = oldcount;
2159	ifp->if_flags = oldflags;
2160	return (error);
2161}
2162
2163/*
2164 * Set/clear promiscuous mode on interface ifp based on the truth value
2165 * of pswitch.  The calls are reference counted so that only the first
2166 * "on" request actually has an effect, as does the final "off" request.
2167 * Results are undefined if the "off" and "on" requests are not matched.
2168 */
2169int
2170ifpromisc(struct ifnet *ifp, int pswitch)
2171{
2172	int error;
2173	int oldflags = ifp->if_flags;
2174
2175	error = if_setflag(ifp, IFF_PROMISC, IFF_PPROMISC,
2176			   &ifp->if_pcount, pswitch);
2177	/* If promiscuous mode status has changed, log a message */
2178	if (error == 0 && ((ifp->if_flags ^ oldflags) & IFF_PROMISC))
2179		log(LOG_INFO, "%s: promiscuous mode %s\n",
2180		    ifp->if_xname,
2181		    (ifp->if_flags & IFF_PROMISC) ? "enabled" : "disabled");
2182	return (error);
2183}
2184
2185/*
2186 * Return interface configuration
2187 * of system.  List may be used
2188 * in later ioctl's (above) to get
2189 * other information.
2190 */
2191/*ARGSUSED*/
2192static int
2193ifconf(u_long cmd, caddr_t data)
2194{
2195	INIT_VNET_NET(curvnet);
2196	struct ifconf *ifc = (struct ifconf *)data;
2197#ifdef __amd64__
2198	struct ifconf32 *ifc32 = (struct ifconf32 *)data;
2199	struct ifconf ifc_swab;
2200#endif
2201	struct ifnet *ifp;
2202	struct ifaddr *ifa;
2203	struct ifreq ifr;
2204	struct sbuf *sb;
2205	int error, full = 0, valid_len, max_len;
2206
2207#ifdef __amd64__
2208	if (cmd == SIOCGIFCONF32) {
2209		ifc_swab.ifc_len = ifc32->ifc_len;
2210		ifc_swab.ifc_buf = (caddr_t)(uintptr_t)ifc32->ifc_buf;
2211		ifc = &ifc_swab;
2212	}
2213#endif
2214	/* Limit initial buffer size to MAXPHYS to avoid DoS from userspace. */
2215	max_len = MAXPHYS - 1;
2216
2217	/* Prevent hostile input from being able to crash the system */
2218	if (ifc->ifc_len <= 0)
2219		return (EINVAL);
2220
2221again:
2222	if (ifc->ifc_len <= max_len) {
2223		max_len = ifc->ifc_len;
2224		full = 1;
2225	}
2226	sb = sbuf_new(NULL, NULL, max_len + 1, SBUF_FIXEDLEN);
2227	max_len = 0;
2228	valid_len = 0;
2229
2230	IFNET_RLOCK();		/* could sleep XXX */
2231	TAILQ_FOREACH(ifp, &V_ifnet, if_link) {
2232		int addrs;
2233
2234		/*
2235		 * Zero the ifr_name buffer to make sure we don't
2236		 * disclose the contents of the stack.
2237		 */
2238		memset(ifr.ifr_name, 0, sizeof(ifr.ifr_name));
2239
2240		if (strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name))
2241		    >= sizeof(ifr.ifr_name)) {
2242			sbuf_delete(sb);
2243			IFNET_RUNLOCK();
2244			return (ENAMETOOLONG);
2245		}
2246
2247		addrs = 0;
2248		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2249			struct sockaddr *sa = ifa->ifa_addr;
2250
2251			if (jailed(curthread->td_ucred) &&
2252			    !prison_if(curthread->td_ucred, sa))
2253				continue;
2254			addrs++;
2255#ifdef COMPAT_43
2256			if (cmd == OSIOCGIFCONF) {
2257				struct osockaddr *osa =
2258					 (struct osockaddr *)&ifr.ifr_addr;
2259				ifr.ifr_addr = *sa;
2260				osa->sa_family = sa->sa_family;
2261				sbuf_bcat(sb, &ifr, sizeof(ifr));
2262				max_len += sizeof(ifr);
2263			} else
2264#endif
2265			if (sa->sa_len <= sizeof(*sa)) {
2266				ifr.ifr_addr = *sa;
2267				sbuf_bcat(sb, &ifr, sizeof(ifr));
2268				max_len += sizeof(ifr);
2269			} else {
2270				sbuf_bcat(sb, &ifr,
2271				    offsetof(struct ifreq, ifr_addr));
2272				max_len += offsetof(struct ifreq, ifr_addr);
2273				sbuf_bcat(sb, sa, sa->sa_len);
2274				max_len += sa->sa_len;
2275			}
2276
2277			if (!sbuf_overflowed(sb))
2278				valid_len = sbuf_len(sb);
2279		}
2280		if (addrs == 0) {
2281			bzero((caddr_t)&ifr.ifr_addr, sizeof(ifr.ifr_addr));
2282			sbuf_bcat(sb, &ifr, sizeof(ifr));
2283			max_len += sizeof(ifr);
2284
2285			if (!sbuf_overflowed(sb))
2286				valid_len = sbuf_len(sb);
2287		}
2288	}
2289	IFNET_RUNLOCK();
2290
2291	/*
2292	 * If we didn't allocate enough space (uncommon), try again.  If
2293	 * we have already allocated as much space as we are allowed,
2294	 * return what we've got.
2295	 */
2296	if (valid_len != max_len && !full) {
2297		sbuf_delete(sb);
2298		goto again;
2299	}
2300
2301	ifc->ifc_len = valid_len;
2302#ifdef __amd64__
2303	if (cmd == SIOCGIFCONF32)
2304		ifc32->ifc_len = valid_len;
2305#endif
2306	sbuf_finish(sb);
2307	error = copyout(sbuf_data(sb), ifc->ifc_req, ifc->ifc_len);
2308	sbuf_delete(sb);
2309	return (error);
2310}
2311
2312/*
2313 * Just like ifpromisc(), but for all-multicast-reception mode.
2314 */
2315int
2316if_allmulti(struct ifnet *ifp, int onswitch)
2317{
2318
2319	return (if_setflag(ifp, IFF_ALLMULTI, 0, &ifp->if_amcount, onswitch));
2320}
2321
2322struct ifmultiaddr *
2323if_findmulti(struct ifnet *ifp, struct sockaddr *sa)
2324{
2325	struct ifmultiaddr *ifma;
2326
2327	IF_ADDR_LOCK_ASSERT(ifp);
2328
2329	TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
2330		if (sa->sa_family == AF_LINK) {
2331			if (sa_dl_equal(ifma->ifma_addr, sa))
2332				break;
2333		} else {
2334			if (sa_equal(ifma->ifma_addr, sa))
2335				break;
2336		}
2337	}
2338
2339	return ifma;
2340}
2341
2342/*
2343 * Allocate a new ifmultiaddr and initialize based on passed arguments.  We
2344 * make copies of passed sockaddrs.  The ifmultiaddr will not be added to
2345 * the ifnet multicast address list here, so the caller must do that and
2346 * other setup work (such as notifying the device driver).  The reference
2347 * count is initialized to 1.
2348 */
2349static struct ifmultiaddr *
2350if_allocmulti(struct ifnet *ifp, struct sockaddr *sa, struct sockaddr *llsa,
2351    int mflags)
2352{
2353	struct ifmultiaddr *ifma;
2354	struct sockaddr *dupsa;
2355
2356	ifma = malloc(sizeof *ifma, M_IFMADDR, mflags |
2357	    M_ZERO);
2358	if (ifma == NULL)
2359		return (NULL);
2360
2361	dupsa = malloc(sa->sa_len, M_IFMADDR, mflags);
2362	if (dupsa == NULL) {
2363		free(ifma, M_IFMADDR);
2364		return (NULL);
2365	}
2366	bcopy(sa, dupsa, sa->sa_len);
2367	ifma->ifma_addr = dupsa;
2368
2369	ifma->ifma_ifp = ifp;
2370	ifma->ifma_refcount = 1;
2371	ifma->ifma_protospec = NULL;
2372
2373	if (llsa == NULL) {
2374		ifma->ifma_lladdr = NULL;
2375		return (ifma);
2376	}
2377
2378	dupsa = malloc(llsa->sa_len, M_IFMADDR, mflags);
2379	if (dupsa == NULL) {
2380		free(ifma->ifma_addr, M_IFMADDR);
2381		free(ifma, M_IFMADDR);
2382		return (NULL);
2383	}
2384	bcopy(llsa, dupsa, llsa->sa_len);
2385	ifma->ifma_lladdr = dupsa;
2386
2387	return (ifma);
2388}
2389
2390/*
2391 * if_freemulti: free ifmultiaddr structure and possibly attached related
2392 * addresses.  The caller is responsible for implementing reference
2393 * counting, notifying the driver, handling routing messages, and releasing
2394 * any dependent link layer state.
2395 */
2396static void
2397if_freemulti(struct ifmultiaddr *ifma)
2398{
2399
2400	KASSERT(ifma->ifma_refcount == 0, ("if_freemulti: refcount %d",
2401	    ifma->ifma_refcount));
2402	KASSERT(ifma->ifma_protospec == NULL,
2403	    ("if_freemulti: protospec not NULL"));
2404
2405	if (ifma->ifma_lladdr != NULL)
2406		free(ifma->ifma_lladdr, M_IFMADDR);
2407	free(ifma->ifma_addr, M_IFMADDR);
2408	free(ifma, M_IFMADDR);
2409}
2410
2411/*
2412 * Register an additional multicast address with a network interface.
2413 *
2414 * - If the address is already present, bump the reference count on the
2415 *   address and return.
2416 * - If the address is not link-layer, look up a link layer address.
2417 * - Allocate address structures for one or both addresses, and attach to the
2418 *   multicast address list on the interface.  If automatically adding a link
2419 *   layer address, the protocol address will own a reference to the link
2420 *   layer address, to be freed when it is freed.
2421 * - Notify the network device driver of an addition to the multicast address
2422 *   list.
2423 *
2424 * 'sa' points to caller-owned memory with the desired multicast address.
2425 *
2426 * 'retifma' will be used to return a pointer to the resulting multicast
2427 * address reference, if desired.
2428 */
2429int
2430if_addmulti(struct ifnet *ifp, struct sockaddr *sa,
2431    struct ifmultiaddr **retifma)
2432{
2433	struct ifmultiaddr *ifma, *ll_ifma;
2434	struct sockaddr *llsa;
2435	int error;
2436
2437	/*
2438	 * If the address is already present, return a new reference to it;
2439	 * otherwise, allocate storage and set up a new address.
2440	 */
2441	IF_ADDR_LOCK(ifp);
2442	ifma = if_findmulti(ifp, sa);
2443	if (ifma != NULL) {
2444		ifma->ifma_refcount++;
2445		if (retifma != NULL)
2446			*retifma = ifma;
2447		IF_ADDR_UNLOCK(ifp);
2448		return (0);
2449	}
2450
2451	/*
2452	 * The address isn't already present; resolve the protocol address
2453	 * into a link layer address, and then look that up, bump its
2454	 * refcount or allocate an ifma for that also.  If 'llsa' was
2455	 * returned, we will need to free it later.
2456	 */
2457	llsa = NULL;
2458	ll_ifma = NULL;
2459	if (ifp->if_resolvemulti != NULL) {
2460		error = ifp->if_resolvemulti(ifp, &llsa, sa);
2461		if (error)
2462			goto unlock_out;
2463	}
2464
2465	/*
2466	 * Allocate the new address.  Don't hook it up yet, as we may also
2467	 * need to allocate a link layer multicast address.
2468	 */
2469	ifma = if_allocmulti(ifp, sa, llsa, M_NOWAIT);
2470	if (ifma == NULL) {
2471		error = ENOMEM;
2472		goto free_llsa_out;
2473	}
2474
2475	/*
2476	 * If a link layer address is found, we'll need to see if it's
2477	 * already present in the address list, or allocate is as well.
2478	 * When this block finishes, the link layer address will be on the
2479	 * list.
2480	 */
2481	if (llsa != NULL) {
2482		ll_ifma = if_findmulti(ifp, llsa);
2483		if (ll_ifma == NULL) {
2484			ll_ifma = if_allocmulti(ifp, llsa, NULL, M_NOWAIT);
2485			if (ll_ifma == NULL) {
2486				--ifma->ifma_refcount;
2487				if_freemulti(ifma);
2488				error = ENOMEM;
2489				goto free_llsa_out;
2490			}
2491			TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ll_ifma,
2492			    ifma_link);
2493		} else
2494			ll_ifma->ifma_refcount++;
2495		ifma->ifma_llifma = ll_ifma;
2496	}
2497
2498	/*
2499	 * We now have a new multicast address, ifma, and possibly a new or
2500	 * referenced link layer address.  Add the primary address to the
2501	 * ifnet address list.
2502	 */
2503	TAILQ_INSERT_HEAD(&ifp->if_multiaddrs, ifma, ifma_link);
2504
2505	if (retifma != NULL)
2506		*retifma = ifma;
2507
2508	/*
2509	 * Must generate the message while holding the lock so that 'ifma'
2510	 * pointer is still valid.
2511	 */
2512	rt_newmaddrmsg(RTM_NEWMADDR, ifma);
2513	IF_ADDR_UNLOCK(ifp);
2514
2515	/*
2516	 * We are certain we have added something, so call down to the
2517	 * interface to let them know about it.
2518	 */
2519	if (ifp->if_ioctl != NULL) {
2520		IFF_LOCKGIANT(ifp);
2521		(void) (*ifp->if_ioctl)(ifp, SIOCADDMULTI, 0);
2522		IFF_UNLOCKGIANT(ifp);
2523	}
2524
2525	if (llsa != NULL)
2526		free(llsa, M_IFMADDR);
2527
2528	return (0);
2529
2530free_llsa_out:
2531	if (llsa != NULL)
2532		free(llsa, M_IFMADDR);
2533
2534unlock_out:
2535	IF_ADDR_UNLOCK(ifp);
2536	return (error);
2537}
2538
2539/*
2540 * Delete a multicast group membership by network-layer group address.
2541 *
2542 * Returns ENOENT if the entry could not be found. If ifp no longer
2543 * exists, results are undefined. This entry point should only be used
2544 * from subsystems which do appropriate locking to hold ifp for the
2545 * duration of the call.
2546 * Network-layer protocol domains must use if_delmulti_ifma().
2547 */
2548int
2549if_delmulti(struct ifnet *ifp, struct sockaddr *sa)
2550{
2551	struct ifmultiaddr *ifma;
2552	int lastref;
2553#ifdef INVARIANTS
2554	struct ifnet *oifp;
2555	INIT_VNET_NET(ifp->if_vnet);
2556
2557	IFNET_RLOCK();
2558	TAILQ_FOREACH(oifp, &V_ifnet, if_link)
2559		if (ifp == oifp)
2560			break;
2561	if (ifp != oifp)
2562		ifp = NULL;
2563	IFNET_RUNLOCK();
2564
2565	KASSERT(ifp != NULL, ("%s: ifnet went away", __func__));
2566#endif
2567	if (ifp == NULL)
2568		return (ENOENT);
2569
2570	IF_ADDR_LOCK(ifp);
2571	lastref = 0;
2572	ifma = if_findmulti(ifp, sa);
2573	if (ifma != NULL)
2574		lastref = if_delmulti_locked(ifp, ifma, 0);
2575	IF_ADDR_UNLOCK(ifp);
2576
2577	if (ifma == NULL)
2578		return (ENOENT);
2579
2580	if (lastref && ifp->if_ioctl != NULL) {
2581		IFF_LOCKGIANT(ifp);
2582		(void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
2583		IFF_UNLOCKGIANT(ifp);
2584	}
2585
2586	return (0);
2587}
2588
2589/*
2590 * Delete a multicast group membership by group membership pointer.
2591 * Network-layer protocol domains must use this routine.
2592 *
2593 * It is safe to call this routine if the ifp disappeared. Callers should
2594 * hold IFF_LOCKGIANT() to avoid a LOR in case the hardware needs to be
2595 * reconfigured.
2596 */
2597void
2598if_delmulti_ifma(struct ifmultiaddr *ifma)
2599{
2600#ifdef DIAGNOSTIC
2601	INIT_VNET_NET(curvnet);
2602#endif
2603	struct ifnet *ifp;
2604	int lastref;
2605
2606	ifp = ifma->ifma_ifp;
2607#ifdef DIAGNOSTIC
2608	if (ifp == NULL) {
2609		printf("%s: ifma_ifp seems to be detached\n", __func__);
2610	} else {
2611		struct ifnet *oifp;
2612
2613		IFNET_RLOCK();
2614		TAILQ_FOREACH(oifp, &V_ifnet, if_link)
2615			if (ifp == oifp)
2616				break;
2617		if (ifp != oifp) {
2618			printf("%s: ifnet %p disappeared\n", __func__, ifp);
2619			ifp = NULL;
2620		}
2621		IFNET_RUNLOCK();
2622	}
2623#endif
2624	/*
2625	 * If and only if the ifnet instance exists: Acquire the address lock.
2626	 */
2627	if (ifp != NULL)
2628		IF_ADDR_LOCK(ifp);
2629
2630	lastref = if_delmulti_locked(ifp, ifma, 0);
2631
2632	if (ifp != NULL) {
2633		/*
2634		 * If and only if the ifnet instance exists:
2635		 *  Release the address lock.
2636		 *  If the group was left: update the hardware hash filter.
2637		 */
2638		IF_ADDR_UNLOCK(ifp);
2639		if (lastref && ifp->if_ioctl != NULL) {
2640			IFF_LOCKGIANT(ifp);
2641			(void)(*ifp->if_ioctl)(ifp, SIOCDELMULTI, 0);
2642			IFF_UNLOCKGIANT(ifp);
2643		}
2644	}
2645}
2646
2647/*
2648 * Perform deletion of network-layer and/or link-layer multicast address.
2649 *
2650 * Return 0 if the reference count was decremented.
2651 * Return 1 if the final reference was released, indicating that the
2652 * hardware hash filter should be reprogrammed.
2653 */
2654static int
2655if_delmulti_locked(struct ifnet *ifp, struct ifmultiaddr *ifma, int detaching)
2656{
2657	struct ifmultiaddr *ll_ifma;
2658
2659	if (ifp != NULL && ifma->ifma_ifp != NULL) {
2660		KASSERT(ifma->ifma_ifp == ifp,
2661		    ("%s: inconsistent ifp %p", __func__, ifp));
2662		IF_ADDR_LOCK_ASSERT(ifp);
2663	}
2664
2665	ifp = ifma->ifma_ifp;
2666
2667	/*
2668	 * If the ifnet is detaching, null out references to ifnet,
2669	 * so that upper protocol layers will notice, and not attempt
2670	 * to obtain locks for an ifnet which no longer exists. The
2671	 * routing socket announcement must happen before the ifnet
2672	 * instance is detached from the system.
2673	 */
2674	if (detaching) {
2675#ifdef DIAGNOSTIC
2676		printf("%s: detaching ifnet instance %p\n", __func__, ifp);
2677#endif
2678		/*
2679		 * ifp may already be nulled out if we are being reentered
2680		 * to delete the ll_ifma.
2681		 */
2682		if (ifp != NULL) {
2683			rt_newmaddrmsg(RTM_DELMADDR, ifma);
2684			ifma->ifma_ifp = NULL;
2685		}
2686	}
2687
2688	if (--ifma->ifma_refcount > 0)
2689		return 0;
2690
2691	/*
2692	 * If this ifma is a network-layer ifma, a link-layer ifma may
2693	 * have been associated with it. Release it first if so.
2694	 */
2695	ll_ifma = ifma->ifma_llifma;
2696	if (ll_ifma != NULL) {
2697		KASSERT(ifma->ifma_lladdr != NULL,
2698		    ("%s: llifma w/o lladdr", __func__));
2699		if (detaching)
2700			ll_ifma->ifma_ifp = NULL;	/* XXX */
2701		if (--ll_ifma->ifma_refcount == 0) {
2702			if (ifp != NULL) {
2703				TAILQ_REMOVE(&ifp->if_multiaddrs, ll_ifma,
2704				    ifma_link);
2705			}
2706			if_freemulti(ll_ifma);
2707		}
2708	}
2709
2710	if (ifp != NULL)
2711		TAILQ_REMOVE(&ifp->if_multiaddrs, ifma, ifma_link);
2712
2713	if_freemulti(ifma);
2714
2715	/*
2716	 * The last reference to this instance of struct ifmultiaddr
2717	 * was released; the hardware should be notified of this change.
2718	 */
2719	return 1;
2720}
2721
2722/*
2723 * Set the link layer address on an interface.
2724 *
2725 * At this time we only support certain types of interfaces,
2726 * and we don't allow the length of the address to change.
2727 */
2728int
2729if_setlladdr(struct ifnet *ifp, const u_char *lladdr, int len)
2730{
2731	struct sockaddr_dl *sdl;
2732	struct ifaddr *ifa;
2733	struct ifreq ifr;
2734
2735	ifa = ifp->if_addr;
2736	if (ifa == NULL)
2737		return (EINVAL);
2738	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
2739	if (sdl == NULL)
2740		return (EINVAL);
2741	if (len != sdl->sdl_alen)	/* don't allow length to change */
2742		return (EINVAL);
2743	switch (ifp->if_type) {
2744	case IFT_ETHER:
2745	case IFT_FDDI:
2746	case IFT_XETHER:
2747	case IFT_ISO88025:
2748	case IFT_L2VLAN:
2749	case IFT_BRIDGE:
2750	case IFT_ARCNET:
2751	case IFT_IEEE8023ADLAG:
2752		bcopy(lladdr, LLADDR(sdl), len);
2753		break;
2754	default:
2755		return (ENODEV);
2756	}
2757	/*
2758	 * If the interface is already up, we need
2759	 * to re-init it in order to reprogram its
2760	 * address filter.
2761	 */
2762	if ((ifp->if_flags & IFF_UP) != 0) {
2763		if (ifp->if_ioctl) {
2764			IFF_LOCKGIANT(ifp);
2765			ifp->if_flags &= ~IFF_UP;
2766			ifr.ifr_flags = ifp->if_flags & 0xffff;
2767			ifr.ifr_flagshigh = ifp->if_flags >> 16;
2768			(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
2769			ifp->if_flags |= IFF_UP;
2770			ifr.ifr_flags = ifp->if_flags & 0xffff;
2771			ifr.ifr_flagshigh = ifp->if_flags >> 16;
2772			(*ifp->if_ioctl)(ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
2773			IFF_UNLOCKGIANT(ifp);
2774		}
2775#ifdef INET
2776		/*
2777		 * Also send gratuitous ARPs to notify other nodes about
2778		 * the address change.
2779		 */
2780		TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
2781			if (ifa->ifa_addr->sa_family == AF_INET)
2782				arp_ifinit(ifp, ifa);
2783		}
2784#endif
2785	}
2786	return (0);
2787}
2788
2789/*
2790 * The name argument must be a pointer to storage which will last as
2791 * long as the interface does.  For physical devices, the result of
2792 * device_get_name(dev) is a good choice and for pseudo-devices a
2793 * static string works well.
2794 */
2795void
2796if_initname(struct ifnet *ifp, const char *name, int unit)
2797{
2798	ifp->if_dname = name;
2799	ifp->if_dunit = unit;
2800	if (unit != IF_DUNIT_NONE)
2801		snprintf(ifp->if_xname, IFNAMSIZ, "%s%d", name, unit);
2802	else
2803		strlcpy(ifp->if_xname, name, IFNAMSIZ);
2804}
2805
2806int
2807if_printf(struct ifnet *ifp, const char * fmt, ...)
2808{
2809	va_list ap;
2810	int retval;
2811
2812	retval = printf("%s: ", ifp->if_xname);
2813	va_start(ap, fmt);
2814	retval += vprintf(fmt, ap);
2815	va_end(ap);
2816	return (retval);
2817}
2818
2819/*
2820 * When an interface is marked IFF_NEEDSGIANT, its if_start() routine cannot
2821 * be called without Giant.  However, we often can't acquire the Giant lock
2822 * at those points; instead, we run it via a task queue that holds Giant via
2823 * if_start_deferred.
2824 *
2825 * XXXRW: We need to make sure that the ifnet isn't fully detached until any
2826 * outstanding if_start_deferred() tasks that will run after the free.  This
2827 * probably means waiting in if_detach().
2828 */
2829void
2830if_start(struct ifnet *ifp)
2831{
2832
2833	if (ifp->if_flags & IFF_NEEDSGIANT) {
2834		if (mtx_owned(&Giant))
2835			(*(ifp)->if_start)(ifp);
2836		else
2837			taskqueue_enqueue(taskqueue_swi_giant,
2838			    &ifp->if_starttask);
2839	} else
2840		(*(ifp)->if_start)(ifp);
2841}
2842
2843static void
2844if_start_deferred(void *context, int pending)
2845{
2846	struct ifnet *ifp;
2847
2848	GIANT_REQUIRED;
2849
2850	ifp = context;
2851	(ifp->if_start)(ifp);
2852}
2853
2854/*
2855 * Backwards compatibility interface for drivers
2856 * that have not implemented it
2857 */
2858static int
2859if_transmit(struct ifnet *ifp, struct mbuf *m)
2860{
2861	int error;
2862
2863	IFQ_HANDOFF(ifp, m, error);
2864	return (error);
2865}
2866
2867int
2868if_handoff(struct ifqueue *ifq, struct mbuf *m, struct ifnet *ifp, int adjust)
2869{
2870	int active = 0;
2871
2872	IF_LOCK(ifq);
2873	if (_IF_QFULL(ifq)) {
2874		_IF_DROP(ifq);
2875		IF_UNLOCK(ifq);
2876		m_freem(m);
2877		return (0);
2878	}
2879	if (ifp != NULL) {
2880		ifp->if_obytes += m->m_pkthdr.len + adjust;
2881		if (m->m_flags & (M_BCAST|M_MCAST))
2882			ifp->if_omcasts++;
2883		active = ifp->if_drv_flags & IFF_DRV_OACTIVE;
2884	}
2885	_IF_ENQUEUE(ifq, m);
2886	IF_UNLOCK(ifq);
2887	if (ifp != NULL && !active)
2888		if_start(ifp);
2889	return (1);
2890}
2891
2892void
2893if_register_com_alloc(u_char type,
2894    if_com_alloc_t *a, if_com_free_t *f)
2895{
2896
2897	KASSERT(if_com_alloc[type] == NULL,
2898	    ("if_register_com_alloc: %d already registered", type));
2899	KASSERT(if_com_free[type] == NULL,
2900	    ("if_register_com_alloc: %d free already registered", type));
2901
2902	if_com_alloc[type] = a;
2903	if_com_free[type] = f;
2904}
2905
2906void
2907if_deregister_com_alloc(u_char type)
2908{
2909
2910	KASSERT(if_com_alloc[type] != NULL,
2911	    ("if_deregister_com_alloc: %d not registered", type));
2912	KASSERT(if_com_free[type] != NULL,
2913	    ("if_deregister_com_alloc: %d free not registered", type));
2914	if_com_alloc[type] = NULL;
2915	if_com_free[type] = NULL;
2916}
2917