1/*
2 * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include "kpi_interface.h"
30
31#include <sys/queue.h>
32#include <sys/param.h>	/* for definition of NULL */
33#include <kern/debug.h> /* for panic */
34#include <sys/errno.h>
35#include <sys/socket.h>
36#include <sys/kern_event.h>
37#include <sys/kernel.h>
38#include <sys/malloc.h>
39#include <sys/kpi_mbuf.h>
40#include <sys/mcache.h>
41#include <sys/protosw.h>
42#include <sys/syslog.h>
43#include <net/if_var.h>
44#include <net/if_dl.h>
45#include <net/dlil.h>
46#include <net/if_types.h>
47#include <net/if_dl.h>
48#include <net/if_arp.h>
49#include <net/if_llreach.h>
50#include <net/if_ether.h>
51#include <net/route.h>
52#include <libkern/libkern.h>
53#include <libkern/OSAtomic.h>
54#include <kern/locks.h>
55#include <kern/clock.h>
56#include <sys/sockio.h>
57#include <sys/proc.h>
58#include <sys/sysctl.h>
59#include <sys/mbuf.h>
60#include <netinet/ip_var.h>
61#include <netinet/udp.h>
62#include <netinet/udp_var.h>
63#include <netinet/tcp.h>
64#include <netinet/tcp_var.h>
65#include <netinet/in_pcb.h>
66#ifdef INET
67#include <netinet/igmp_var.h>
68#endif
69#ifdef INET6
70#include <netinet6/mld6_var.h>
71#endif
72
73#include "net/net_str_id.h"
74
75#if CONFIG_MACF
76#include <sys/kauth.h>
77#include <security/mac_framework.h>
78#endif
79
80#define	TOUCHLASTCHANGE(__if_lastchange) {				\
81	(__if_lastchange)->tv_sec = net_uptime();			\
82	(__if_lastchange)->tv_usec = 0;					\
83}
84
85static errno_t ifnet_defrouter_llreachinfo(ifnet_t, int,
86    struct ifnet_llreach_info *);
87static void ifnet_kpi_free(ifnet_t);
88static errno_t ifnet_list_get_common(ifnet_family_t, boolean_t, ifnet_t **,
89    u_int32_t *);
90static errno_t ifnet_set_lladdr_internal(ifnet_t, const void *, size_t,
91    u_char, int);
92static errno_t ifnet_awdl_check_eflags(ifnet_t, u_int32_t *, u_int32_t *);
93
94/*
95 * Temporary work around until we have real reference counting
96 *
97 * We keep the bits about calling dlil_if_release (which should be
98 * called recycle) transparent by calling it from our if_free function
99 * pointer. We have to keep the client's original detach function
100 * somewhere so we can call it.
101 */
102static void
103ifnet_kpi_free(ifnet_t ifp)
104{
105	ifnet_detached_func detach_func = ifp->if_kpi_storage;
106
107	if (detach_func != NULL)
108		detach_func(ifp);
109
110	if (ifp->if_broadcast.length > sizeof (ifp->if_broadcast.u.buffer)) {
111		FREE(ifp->if_broadcast.u.ptr, M_IFADDR);
112		ifp->if_broadcast.u.ptr = NULL;
113	}
114
115	dlil_if_release(ifp);
116}
117
118errno_t
119ifnet_allocate(const struct ifnet_init_params *init, ifnet_t *interface)
120{
121	struct ifnet_init_eparams einit;
122
123	bzero(&einit, sizeof (einit));
124
125	einit.ver		= IFNET_INIT_CURRENT_VERSION;
126	einit.len		= sizeof (einit);
127	einit.flags		= IFNET_INIT_LEGACY;
128	einit.uniqueid		= init->uniqueid;
129	einit.uniqueid_len	= init->uniqueid_len;
130	einit.name		= init->name;
131	einit.unit		= init->unit;
132	einit.family		= init->family;
133	einit.type		= init->type;
134	einit.output		= init->output;
135	einit.demux		= init->demux;
136	einit.add_proto		= init->add_proto;
137	einit.del_proto		= init->del_proto;
138	einit.check_multi	= init->check_multi;
139	einit.framer		= init->framer;
140	einit.softc		= init->softc;
141	einit.ioctl		= init->ioctl;
142	einit.set_bpf_tap	= init->set_bpf_tap;
143	einit.detach		= init->detach;
144	einit.event		= init->event;
145	einit.broadcast_addr	= init->broadcast_addr;
146	einit.broadcast_len	= init->broadcast_len;
147
148	return (ifnet_allocate_extended(&einit, interface));
149}
150
151errno_t
152ifnet_allocate_extended(const struct ifnet_init_eparams *einit0,
153    ifnet_t *interface)
154{
155	struct ifnet_init_eparams einit;
156	struct ifnet *ifp = NULL;
157	int error;
158
159	einit = *einit0;
160
161	if (einit.ver != IFNET_INIT_CURRENT_VERSION ||
162	    einit.len < sizeof (einit))
163		return (EINVAL);
164
165	if (einit.family == 0 || einit.name == NULL ||
166	    strlen(einit.name) >= IFNAMSIZ ||
167	    (einit.type & 0xFFFFFF00) != 0 || einit.type == 0)
168		return (EINVAL);
169
170	if (einit.flags & IFNET_INIT_LEGACY) {
171		if (einit.output == NULL || einit.flags != IFNET_INIT_LEGACY)
172			return (EINVAL);
173
174		einit.pre_enqueue = NULL;
175		einit.start = NULL;
176		einit.output_ctl = NULL;
177		einit.output_sched_model = IFNET_SCHED_MODEL_NORMAL;
178		einit.input_poll = NULL;
179		einit.input_ctl = NULL;
180	} else {
181		if (einit.start == NULL)
182			return (EINVAL);
183
184		einit.output = NULL;
185		if (einit.output_sched_model >= IFNET_SCHED_MODEL_MAX)
186			return (EINVAL);
187
188		if (einit.flags & IFNET_INIT_INPUT_POLL) {
189			if (einit.input_poll == NULL || einit.input_ctl == NULL)
190				return (EINVAL);
191		} else {
192			einit.input_poll = NULL;
193			einit.input_ctl = NULL;
194		}
195	}
196
197	error = dlil_if_acquire(einit.family, einit.uniqueid,
198	    einit.uniqueid_len, &ifp);
199
200	if (error == 0) {
201		u_int64_t br;
202
203		/*
204		 * Cast ifp->if_name as non const. dlil_if_acquire sets it up
205		 * to point to storage of at least IFNAMSIZ bytes. It is safe
206		 * to write to this.
207		 */
208		strlcpy(__DECONST(char *, ifp->if_name), einit.name, IFNAMSIZ);
209		ifp->if_type		= einit.type;
210		ifp->if_family		= einit.family;
211		ifp->if_subfamily	= einit.subfamily;
212		ifp->if_unit		= einit.unit;
213		ifp->if_output		= einit.output;
214		ifp->if_pre_enqueue	= einit.pre_enqueue;
215		ifp->if_start		= einit.start;
216		ifp->if_output_ctl	= einit.output_ctl;
217		ifp->if_output_sched_model = einit.output_sched_model;
218		ifp->if_output_bw.eff_bw = einit.output_bw;
219		ifp->if_output_bw.max_bw = einit.output_bw_max;
220		ifp->if_output_lt.eff_lt = einit.output_lt;
221		ifp->if_output_lt.max_lt = einit.output_lt_max;
222		ifp->if_input_poll	= einit.input_poll;
223		ifp->if_input_ctl	= einit.input_ctl;
224		ifp->if_input_bw.eff_bw	= einit.input_bw;
225		ifp->if_input_bw.max_bw	= einit.input_bw_max;
226		ifp->if_input_lt.eff_lt	= einit.input_lt;
227		ifp->if_input_lt.max_lt	= einit.input_lt_max;
228		ifp->if_demux		= einit.demux;
229		ifp->if_add_proto	= einit.add_proto;
230		ifp->if_del_proto	= einit.del_proto;
231		ifp->if_check_multi	= einit.check_multi;
232		ifp->if_framer_legacy	= einit.framer;
233		ifp->if_framer		= einit.framer_extended;
234		ifp->if_softc		= einit.softc;
235		ifp->if_ioctl		= einit.ioctl;
236		ifp->if_set_bpf_tap	= einit.set_bpf_tap;
237		ifp->if_free		= ifnet_kpi_free;
238		ifp->if_event		= einit.event;
239		ifp->if_kpi_storage	= einit.detach;
240
241		/* Initialize external name (name + unit) */
242		snprintf(__DECONST(char *, ifp->if_xname), IFXNAMSIZ,
243		    "%s%d", ifp->if_name, ifp->if_unit);
244
245		/*
246		 * On embedded, framer() is already in the extended form;
247		 * we simply use it as is, unless the caller specifies
248		 * framer_extended() which will then override it.
249		 *
250		 * On non-embedded, framer() has long been exposed as part
251		 * of the public KPI, and therefore its signature must
252		 * remain the same (without the pre- and postpend length
253		 * parameters.)  We special case ether_frameout, such that
254		 * it gets mapped to its extended variant.  All other cases
255		 * utilize the stub routine which will simply return zeroes
256		 * for those new parameters.
257		 *
258		 * Internally, DLIL will only use the extended callback
259		 * variant which is represented by if_framer.
260		 */
261		if (ifp->if_framer == NULL && ifp->if_framer_legacy != NULL) {
262			if (ifp->if_framer_legacy == ether_frameout)
263				ifp->if_framer = ether_frameout_extended;
264			else
265				ifp->if_framer = ifnet_framer_stub;
266		}
267
268		if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
269			ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
270		else if (ifp->if_output_bw.eff_bw == 0)
271			ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
272
273		if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
274			ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
275		else if (ifp->if_input_bw.eff_bw == 0)
276			ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
277
278		if (ifp->if_output_bw.max_bw == 0)
279			ifp->if_output_bw = ifp->if_input_bw;
280		else if (ifp->if_input_bw.max_bw == 0)
281			ifp->if_input_bw = ifp->if_output_bw;
282
283		/* Pin if_baudrate to 32 bits */
284		br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
285		if (br != 0)
286			ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
287
288		if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
289			ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
290		else if (ifp->if_output_lt.eff_lt == 0)
291			ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
292
293		if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
294			ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
295		else if (ifp->if_input_lt.eff_lt == 0)
296			ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
297
298		if (ifp->if_output_lt.max_lt == 0)
299			ifp->if_output_lt = ifp->if_input_lt;
300		else if (ifp->if_input_lt.max_lt == 0)
301			ifp->if_input_lt = ifp->if_output_lt;
302
303		if (ifp->if_ioctl == NULL)
304			ifp->if_ioctl = ifp_if_ioctl;
305
306		if (ifp->if_start != NULL) {
307			ifp->if_eflags |= IFEF_TXSTART;
308			if (ifp->if_pre_enqueue == NULL)
309				ifp->if_pre_enqueue = ifnet_enqueue;
310			ifp->if_output = ifp->if_pre_enqueue;
311		} else {
312			ifp->if_eflags &= ~IFEF_TXSTART;
313		}
314
315		if (ifp->if_input_poll != NULL)
316			ifp->if_eflags |= IFEF_RXPOLL;
317		else
318			ifp->if_eflags &= ~IFEF_RXPOLL;
319
320		VERIFY(!(einit.flags & IFNET_INIT_LEGACY) ||
321		    (ifp->if_pre_enqueue == NULL && ifp->if_start == NULL &&
322		    ifp->if_output_ctl == NULL && ifp->if_input_poll == NULL &&
323		    ifp->if_input_ctl == NULL));
324		VERIFY(!(einit.flags & IFNET_INIT_INPUT_POLL) ||
325		    (ifp->if_input_poll != NULL && ifp->if_input_ctl != NULL));
326
327		if (einit.broadcast_len && einit.broadcast_addr) {
328			if (einit.broadcast_len >
329			    sizeof (ifp->if_broadcast.u.buffer)) {
330				MALLOC(ifp->if_broadcast.u.ptr, u_char *,
331				    einit.broadcast_len, M_IFADDR, M_NOWAIT);
332				if (ifp->if_broadcast.u.ptr == NULL) {
333					error = ENOMEM;
334				} else {
335					bcopy(einit.broadcast_addr,
336					    ifp->if_broadcast.u.ptr,
337					    einit.broadcast_len);
338				}
339			} else {
340				bcopy(einit.broadcast_addr,
341				    ifp->if_broadcast.u.buffer,
342				    einit.broadcast_len);
343			}
344			ifp->if_broadcast.length = einit.broadcast_len;
345		} else {
346			bzero(&ifp->if_broadcast, sizeof (ifp->if_broadcast));
347		}
348
349		IFCQ_TARGET_QDELAY(&ifp->if_snd) =
350		    einit.output_target_qdelay;
351		IFCQ_MAXLEN(&ifp->if_snd) = einit.sndq_maxlen;
352
353		if (error == 0) {
354			*interface = ifp;
355			// temporary - this should be done in dlil_if_acquire
356			ifnet_reference(ifp);
357		} else {
358			dlil_if_release(ifp);
359			*interface = NULL;
360		}
361	}
362
363	/*
364	 * Note: We should do something here to indicate that we haven't been
365	 * attached yet. By doing so, we can catch the case in ifnet_release
366	 * where the reference count reaches zero and call the recycle
367	 * function. If the interface is attached, the interface will be
368	 * recycled when the interface's if_free function is called. If the
369	 * interface is never attached, the if_free function will never be
370	 * called and the interface will never be recycled.
371	 */
372
373	return (error);
374}
375
376errno_t
377ifnet_reference(ifnet_t ifp)
378{
379	return (dlil_if_ref(ifp));
380}
381
382errno_t
383ifnet_release(ifnet_t ifp)
384{
385	return (dlil_if_free(ifp));
386}
387
388errno_t
389ifnet_interface_family_find(const char *module_string,
390    ifnet_family_t *family_id)
391{
392	if (module_string == NULL || family_id == NULL)
393		return (EINVAL);
394
395	return (net_str_id_find_internal(module_string, family_id,
396	    NSI_IF_FAM_ID, 1));
397}
398
399void *
400ifnet_softc(ifnet_t interface)
401{
402	return ((interface == NULL) ? NULL : interface->if_softc);
403}
404
405const char *
406ifnet_name(ifnet_t interface)
407{
408	return ((interface == NULL) ? NULL : interface->if_name);
409}
410
411ifnet_family_t
412ifnet_family(ifnet_t interface)
413{
414	return ((interface == NULL) ? 0 : interface->if_family);
415}
416
417ifnet_subfamily_t
418ifnet_subfamily(ifnet_t interface)
419{
420	return ((interface == NULL) ? 0 : interface->if_subfamily);
421}
422
423u_int32_t
424ifnet_unit(ifnet_t interface)
425{
426	return ((interface == NULL) ? (u_int32_t)0xffffffff :
427	    (u_int32_t)interface->if_unit);
428}
429
430u_int32_t
431ifnet_index(ifnet_t interface)
432{
433	return ((interface == NULL) ? (u_int32_t)0xffffffff :
434	    interface->if_index);
435}
436
437errno_t
438ifnet_set_flags(ifnet_t interface, u_int16_t new_flags, u_int16_t mask)
439{
440	uint16_t old_flags;
441
442	if (interface == NULL)
443		return (EINVAL);
444
445	ifnet_lock_exclusive(interface);
446
447	/* If we are modifying the up/down state, call if_updown */
448	if ((mask & IFF_UP) != 0) {
449		if_updown(interface, (new_flags & IFF_UP) == IFF_UP);
450	}
451
452	old_flags = interface->if_flags;
453	interface->if_flags = (new_flags & mask) | (interface->if_flags & ~mask);
454	/* If we are modifying the multicast flag, set/unset the silent flag */
455	if ((old_flags & IFF_MULTICAST) !=
456	    (interface->if_flags & IFF_MULTICAST)) {
457#if INET
458		if (IGMP_IFINFO(interface) != NULL)
459			igmp_initsilent(interface, IGMP_IFINFO(interface));
460#endif /* INET */
461#if INET6
462		if (MLD_IFINFO(interface) != NULL)
463			mld6_initsilent(interface, MLD_IFINFO(interface));
464#endif /* INET6 */
465	}
466
467	ifnet_lock_done(interface);
468
469	return (0);
470}
471
472u_int16_t
473ifnet_flags(ifnet_t interface)
474{
475	return ((interface == NULL) ? 0 : interface->if_flags);
476}
477
478/*
479 * This routine ensures the following:
480 *
481 * If IFEF_AWDL is set by the caller, also set the rest of flags as
482 * defined in IFEF_AWDL_MASK.
483 *
484 * If IFEF_AWDL has been set on the interface and the caller attempts
485 * to clear one or more of the associated flags in IFEF_AWDL_MASK,
486 * return failure.
487 *
488 * If IFEF_AWDL_RESTRICTED is set by the caller, make sure IFEF_AWDL is set
489 * on the interface.
490 *
491 * All other flags not associated with AWDL are not affected.
492 *
493 * See <net/if.h> for current definition of IFEF_AWDL_MASK.
494 */
495static errno_t
496ifnet_awdl_check_eflags(ifnet_t ifp, u_int32_t *new_eflags, u_int32_t *mask)
497{
498	u_int32_t eflags;
499
500	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
501
502	eflags = (*new_eflags & *mask) | (ifp->if_eflags & ~(*mask));
503
504	if (ifp->if_eflags & IFEF_AWDL) {
505		if (eflags & IFEF_AWDL) {
506			if ((eflags & IFEF_AWDL_MASK) != IFEF_AWDL_MASK)
507				return (EINVAL);
508		} else {
509			*new_eflags &= ~IFEF_AWDL_MASK;
510			*mask |= IFEF_AWDL_MASK;
511		}
512	} else if (eflags & IFEF_AWDL) {
513		*new_eflags |= IFEF_AWDL_MASK;
514		*mask |= IFEF_AWDL_MASK;
515	} else if (eflags & IFEF_AWDL_RESTRICTED &&
516	    !(ifp->if_eflags & IFEF_AWDL))
517		return (EINVAL);
518
519	return (0);
520}
521
522errno_t
523ifnet_set_eflags(ifnet_t interface, u_int32_t new_flags, u_int32_t mask)
524{
525	uint32_t oeflags;
526	struct kev_msg ev_msg;
527	struct net_event_data ev_data;
528
529	if (interface == NULL)
530		return (EINVAL);
531
532	bzero(&ev_msg, sizeof(ev_msg));
533	ifnet_lock_exclusive(interface);
534	/*
535	 * Sanity checks for IFEF_AWDL and its related flags.
536	 */
537	if (ifnet_awdl_check_eflags(interface, &new_flags, &mask) != 0) {
538		ifnet_lock_done(interface);
539		return (EINVAL);
540	}
541	oeflags = interface->if_eflags;
542	interface->if_eflags =
543	    (new_flags & mask) | (interface->if_eflags & ~mask);
544	ifnet_lock_done(interface);
545	if (interface->if_eflags & IFEF_AWDL_RESTRICTED &&
546	    !(oeflags & IFEF_AWDL_RESTRICTED)) {
547		ev_msg.event_code = KEV_DL_AWDL_RESTRICTED;
548		/*
549		 * The interface is now restricted to applications that have
550		 * the entitlement.
551		 * The check for the entitlement will be done in the data
552		 * path, so we don't have to do anything here.
553		 */
554	} else if (oeflags & IFEF_AWDL_RESTRICTED &&
555	    !(interface->if_eflags & IFEF_AWDL_RESTRICTED))
556		ev_msg.event_code = KEV_DL_AWDL_UNRESTRICTED;
557	/*
558	 * Notify configd so that it has a chance to perform better
559	 * reachability detection.
560	 */
561	if (ev_msg.event_code) {
562		bzero(&ev_data, sizeof(ev_data));
563		ev_msg.vendor_code = KEV_VENDOR_APPLE;
564		ev_msg.kev_class = KEV_NETWORK_CLASS;
565		ev_msg.kev_subclass = KEV_DL_SUBCLASS;
566		strlcpy(ev_data.if_name, interface->if_name, IFNAMSIZ);
567		ev_data.if_family = interface->if_family;
568		ev_data.if_unit = interface->if_unit;
569		ev_msg.dv[0].data_length = sizeof(struct net_event_data);
570		ev_msg.dv[0].data_ptr = &ev_data;
571		ev_msg.dv[1].data_length = 0;
572		kev_post_msg(&ev_msg);
573	}
574
575	return (0);
576}
577
578u_int32_t
579ifnet_eflags(ifnet_t interface)
580{
581	return ((interface == NULL) ? 0 : interface->if_eflags);
582}
583
584errno_t
585ifnet_set_idle_flags_locked(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
586{
587	int before, after;
588
589	if (ifp == NULL)
590		return (EINVAL);
591
592	lck_mtx_assert(rnh_lock, LCK_MTX_ASSERT_OWNED);
593	ifnet_lock_assert(ifp, IFNET_LCK_ASSERT_EXCLUSIVE);
594
595	/*
596	 * If this is called prior to ifnet attach, the actual work will
597	 * be done at attach time.  Otherwise, if it is called after
598	 * ifnet detach, then it is a no-op.
599	 */
600	if (!ifnet_is_attached(ifp, 0)) {
601		ifp->if_idle_new_flags = new_flags;
602		ifp->if_idle_new_flags_mask = mask;
603		return (0);
604	} else {
605		ifp->if_idle_new_flags = ifp->if_idle_new_flags_mask = 0;
606	}
607
608	before = ifp->if_idle_flags;
609	ifp->if_idle_flags = (new_flags & mask) | (ifp->if_idle_flags & ~mask);
610	after = ifp->if_idle_flags;
611
612	if ((after - before) < 0 && ifp->if_idle_flags == 0 &&
613	    ifp->if_want_aggressive_drain != 0) {
614		ifp->if_want_aggressive_drain = 0;
615		if (ifnet_aggressive_drainers == 0)
616			panic("%s: ifp=%p negative aggdrain!", __func__, ifp);
617	} else if ((after - before) > 0 && ifp->if_want_aggressive_drain == 0) {
618		ifp->if_want_aggressive_drain++;
619		if (++ifnet_aggressive_drainers == 0)
620			panic("%s: ifp=%p wraparound aggdrain!", __func__, ifp);
621	}
622
623	return (0);
624}
625
626errno_t
627ifnet_set_idle_flags(ifnet_t ifp, u_int32_t new_flags, u_int32_t mask)
628{
629	errno_t err;
630
631	lck_mtx_lock(rnh_lock);
632	ifnet_lock_exclusive(ifp);
633	err = ifnet_set_idle_flags_locked(ifp, new_flags, mask);
634	ifnet_lock_done(ifp);
635	lck_mtx_unlock(rnh_lock);
636
637	return (err);
638}
639
640u_int32_t
641ifnet_idle_flags(ifnet_t ifp)
642{
643	return ((ifp == NULL) ? 0 : ifp->if_idle_flags);
644}
645
646errno_t
647ifnet_set_link_quality(ifnet_t ifp, int quality)
648{
649	errno_t err = 0;
650
651	if (ifp == NULL || quality < IFNET_LQM_MIN || quality > IFNET_LQM_MAX) {
652		err = EINVAL;
653		goto done;
654	}
655
656	if (!ifnet_is_attached(ifp, 0)) {
657		err = ENXIO;
658		goto done;
659	}
660
661	if_lqm_update(ifp, quality);
662
663done:
664	return (err);
665}
666
667int
668ifnet_link_quality(ifnet_t ifp)
669{
670	int lqm;
671
672	if (ifp == NULL)
673		return (IFNET_LQM_THRESH_OFF);
674
675	ifnet_lock_shared(ifp);
676	lqm = ifp->if_lqm;
677	ifnet_lock_done(ifp);
678
679	return (lqm);
680}
681
682static errno_t
683ifnet_defrouter_llreachinfo(ifnet_t ifp, int af,
684    struct ifnet_llreach_info *iflri)
685{
686	if (ifp == NULL || iflri == NULL)
687		return (EINVAL);
688
689	VERIFY(af == AF_INET || af == AF_INET6);
690
691	return (ifnet_llreach_get_defrouter(ifp, af, iflri));
692}
693
694errno_t
695ifnet_inet_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
696{
697	return (ifnet_defrouter_llreachinfo(ifp, AF_INET, iflri));
698}
699
700errno_t
701ifnet_inet6_defrouter_llreachinfo(ifnet_t ifp, struct ifnet_llreach_info *iflri)
702{
703	return (ifnet_defrouter_llreachinfo(ifp, AF_INET6, iflri));
704}
705
706errno_t
707ifnet_set_capabilities_supported(ifnet_t ifp, u_int32_t new_caps,
708    u_int32_t mask)
709{
710	errno_t error = 0;
711	int tmp;
712
713	if (ifp == NULL)
714		return (EINVAL);
715
716	ifnet_lock_exclusive(ifp);
717	tmp = (new_caps & mask) | (ifp->if_capabilities & ~mask);
718	if ((tmp & ~IFCAP_VALID))
719		error = EINVAL;
720	else
721		ifp->if_capabilities = tmp;
722	ifnet_lock_done(ifp);
723
724	return (error);
725}
726
727u_int32_t
728ifnet_capabilities_supported(ifnet_t ifp)
729{
730	return ((ifp == NULL) ? 0 : ifp->if_capabilities);
731}
732
733
734errno_t
735ifnet_set_capabilities_enabled(ifnet_t ifp, u_int32_t new_caps,
736    u_int32_t mask)
737{
738	errno_t error = 0;
739	int tmp;
740	struct kev_msg ev_msg;
741	struct net_event_data ev_data;
742
743	if (ifp == NULL)
744		return (EINVAL);
745
746	ifnet_lock_exclusive(ifp);
747	tmp = (new_caps & mask) | (ifp->if_capenable & ~mask);
748	if ((tmp & ~IFCAP_VALID) || (tmp & ~ifp->if_capabilities))
749		error = EINVAL;
750	else
751		ifp->if_capenable = tmp;
752	ifnet_lock_done(ifp);
753
754	/* Notify application of the change */
755	bzero(&ev_data, sizeof (struct net_event_data));
756	bzero(&ev_msg, sizeof (struct kev_msg));
757	ev_msg.vendor_code	= KEV_VENDOR_APPLE;
758	ev_msg.kev_class	= KEV_NETWORK_CLASS;
759	ev_msg.kev_subclass	= KEV_DL_SUBCLASS;
760
761	ev_msg.event_code	= KEV_DL_IFCAP_CHANGED;
762	strlcpy(&ev_data.if_name[0], ifp->if_name, IFNAMSIZ);
763	ev_data.if_family	= ifp->if_family;
764	ev_data.if_unit		= (u_int32_t)ifp->if_unit;
765	ev_msg.dv[0].data_length = sizeof (struct net_event_data);
766	ev_msg.dv[0].data_ptr = &ev_data;
767	ev_msg.dv[1].data_length = 0;
768	kev_post_msg(&ev_msg);
769
770	return (error);
771}
772
773u_int32_t
774ifnet_capabilities_enabled(ifnet_t ifp)
775{
776	return ((ifp == NULL) ? 0 : ifp->if_capenable);
777}
778
779static const ifnet_offload_t offload_mask =
780	(IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
781	IFNET_IP_FRAGMENT | IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 |
782	IFNET_IPV6_FRAGMENT | IFNET_CSUM_PARTIAL | IFNET_VLAN_TAGGING |
783	IFNET_VLAN_MTU | IFNET_MULTIPAGES | IFNET_TSO_IPV4 | IFNET_TSO_IPV6 |
784	IFNET_TX_STATUS);
785
786static const ifnet_offload_t any_offload_csum =
787	(IFNET_CSUM_IP | IFNET_CSUM_TCP | IFNET_CSUM_UDP | IFNET_CSUM_FRAGMENT |
788	IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6 | IFNET_CSUM_PARTIAL);
789
790errno_t
791ifnet_set_offload(ifnet_t interface, ifnet_offload_t offload)
792{
793	u_int32_t ifcaps = 0;
794
795	if (interface == NULL)
796		return (EINVAL);
797
798	ifnet_lock_exclusive(interface);
799	interface->if_hwassist = (offload & offload_mask);
800	/*
801	 * Hardware capable of partial checksum offload is
802	 * flexible enough to handle any transports utilizing
803	 * Internet Checksumming.  Include those transports
804	 * here, and leave the final decision to IP.
805	 */
806	if (interface->if_hwassist & IFNET_CSUM_PARTIAL) {
807		interface->if_hwassist |= (IFNET_CSUM_TCP | IFNET_CSUM_UDP |
808		    IFNET_CSUM_TCPIPV6 | IFNET_CSUM_UDPIPV6);
809	}
810	if (dlil_verbose) {
811		log(LOG_DEBUG, "%s: set offload flags=%b\n",
812		    if_name(interface),
813		    interface->if_hwassist, IFNET_OFFLOADF_BITS);
814	}
815	ifnet_lock_done(interface);
816
817	if ((offload & any_offload_csum))
818		ifcaps |= IFCAP_HWCSUM;
819	if ((offload & IFNET_TSO_IPV4))
820		ifcaps |= IFCAP_TSO4;
821	if ((offload & IFNET_TSO_IPV6))
822		ifcaps |= IFCAP_TSO6;
823	if ((offload & IFNET_VLAN_MTU))
824		ifcaps |= IFCAP_VLAN_MTU;
825	if ((offload & IFNET_VLAN_TAGGING))
826		ifcaps |= IFCAP_VLAN_HWTAGGING;
827	if ((offload & IFNET_TX_STATUS))
828		ifcaps |= IFNET_TX_STATUS;
829	if (ifcaps != 0) {
830		(void) ifnet_set_capabilities_supported(interface, ifcaps,
831		    IFCAP_VALID);
832		(void) ifnet_set_capabilities_enabled(interface, ifcaps,
833		    IFCAP_VALID);
834	}
835
836	return (0);
837}
838
839ifnet_offload_t
840ifnet_offload(ifnet_t interface)
841{
842	return ((interface == NULL) ?
843	    0 : (interface->if_hwassist & offload_mask));
844}
845
846errno_t
847ifnet_set_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t mtuLen)
848{
849	errno_t error = 0;
850
851	if (interface == NULL || mtuLen < interface->if_mtu)
852		return (EINVAL);
853
854	switch (family) {
855	case AF_INET:
856		if (interface->if_hwassist & IFNET_TSO_IPV4)
857			interface->if_tso_v4_mtu = mtuLen;
858		else
859			error = EINVAL;
860		break;
861
862	case AF_INET6:
863		if (interface->if_hwassist & IFNET_TSO_IPV6)
864			interface->if_tso_v6_mtu = mtuLen;
865		else
866			error = EINVAL;
867		break;
868
869	default:
870		error = EPROTONOSUPPORT;
871		break;
872	}
873
874	return (error);
875}
876
877errno_t
878ifnet_get_tso_mtu(ifnet_t interface, sa_family_t family, u_int32_t *mtuLen)
879{
880	errno_t error = 0;
881
882	if (interface == NULL || mtuLen == NULL)
883		return (EINVAL);
884
885	switch (family) {
886	case AF_INET:
887		if (interface->if_hwassist & IFNET_TSO_IPV4)
888			*mtuLen = interface->if_tso_v4_mtu;
889		else
890			error = EINVAL;
891		break;
892
893	case AF_INET6:
894		if (interface->if_hwassist & IFNET_TSO_IPV6)
895			*mtuLen = interface->if_tso_v6_mtu;
896		else
897			error = EINVAL;
898		break;
899
900	default:
901		error = EPROTONOSUPPORT;
902		break;
903	}
904
905	return (error);
906}
907
908errno_t
909ifnet_set_wake_flags(ifnet_t interface, u_int32_t properties, u_int32_t mask)
910{
911	struct kev_msg ev_msg;
912	struct net_event_data ev_data;
913
914	bzero(&ev_data, sizeof (struct net_event_data));
915	bzero(&ev_msg, sizeof (struct kev_msg));
916
917	if (interface == NULL)
918		return (EINVAL);
919
920	/* Do not accept wacky values */
921	if ((properties & mask) & ~IF_WAKE_VALID_FLAGS)
922		return (EINVAL);
923
924	ifnet_lock_exclusive(interface);
925
926	interface->if_wake_properties =
927	    (properties & mask) | (interface->if_wake_properties & ~mask);
928
929	ifnet_lock_done(interface);
930
931	(void) ifnet_touch_lastchange(interface);
932
933	/* Notify application of the change */
934	ev_msg.vendor_code	= KEV_VENDOR_APPLE;
935	ev_msg.kev_class	= KEV_NETWORK_CLASS;
936	ev_msg.kev_subclass	= KEV_DL_SUBCLASS;
937
938	ev_msg.event_code	= KEV_DL_WAKEFLAGS_CHANGED;
939	strlcpy(&ev_data.if_name[0], interface->if_name, IFNAMSIZ);
940	ev_data.if_family	= interface->if_family;
941	ev_data.if_unit		= (u_int32_t)interface->if_unit;
942	ev_msg.dv[0].data_length = sizeof (struct net_event_data);
943	ev_msg.dv[0].data_ptr	= &ev_data;
944	ev_msg.dv[1].data_length = 0;
945	kev_post_msg(&ev_msg);
946
947	return (0);
948}
949
950u_int32_t
951ifnet_get_wake_flags(ifnet_t interface)
952{
953	return ((interface == NULL) ? 0 : interface->if_wake_properties);
954}
955
956/*
957 * Should MIB data store a copy?
958 */
959errno_t
960ifnet_set_link_mib_data(ifnet_t interface, void *mibData, u_int32_t mibLen)
961{
962	if (interface == NULL)
963		return (EINVAL);
964
965	ifnet_lock_exclusive(interface);
966	interface->if_linkmib = (void*)mibData;
967	interface->if_linkmiblen = mibLen;
968	ifnet_lock_done(interface);
969	return (0);
970}
971
972errno_t
973ifnet_get_link_mib_data(ifnet_t interface, void *mibData, u_int32_t *mibLen)
974{
975	errno_t	result = 0;
976
977	if (interface == NULL)
978		return (EINVAL);
979
980	ifnet_lock_shared(interface);
981	if (*mibLen < interface->if_linkmiblen)
982		result = EMSGSIZE;
983	if (result == 0 && interface->if_linkmib == NULL)
984		result = ENOTSUP;
985
986	if (result == 0) {
987		*mibLen = interface->if_linkmiblen;
988		bcopy(interface->if_linkmib, mibData, *mibLen);
989	}
990	ifnet_lock_done(interface);
991
992	return (result);
993}
994
995u_int32_t
996ifnet_get_link_mib_data_length(ifnet_t interface)
997{
998	return ((interface == NULL) ? 0 : interface->if_linkmiblen);
999}
1000
1001errno_t
1002ifnet_output(ifnet_t interface, protocol_family_t protocol_family,
1003    mbuf_t m, void *route, const struct sockaddr *dest)
1004{
1005	if (interface == NULL || protocol_family == 0 || m == NULL) {
1006		if (m != NULL)
1007			mbuf_freem_list(m);
1008		return (EINVAL);
1009	}
1010	return (dlil_output(interface, protocol_family, m, route, dest, 0, NULL));
1011}
1012
1013errno_t
1014ifnet_output_raw(ifnet_t interface, protocol_family_t protocol_family, mbuf_t m)
1015{
1016	if (interface == NULL || m == NULL) {
1017		if (m != NULL)
1018			mbuf_freem_list(m);
1019		return (EINVAL);
1020	}
1021	return (dlil_output(interface, protocol_family, m, NULL, NULL, 1, NULL));
1022}
1023
1024errno_t
1025ifnet_set_mtu(ifnet_t interface, u_int32_t mtu)
1026{
1027	if (interface == NULL)
1028		return (EINVAL);
1029
1030	interface->if_mtu = mtu;
1031	return (0);
1032}
1033
1034u_int32_t
1035ifnet_mtu(ifnet_t interface)
1036{
1037	return ((interface == NULL) ? 0 : interface->if_mtu);
1038}
1039
1040u_char
1041ifnet_type(ifnet_t interface)
1042{
1043	return ((interface == NULL) ? 0 : interface->if_data.ifi_type);
1044}
1045
1046errno_t
1047ifnet_set_addrlen(ifnet_t interface, u_char addrlen)
1048{
1049	if (interface == NULL)
1050		return (EINVAL);
1051
1052	interface->if_data.ifi_addrlen = addrlen;
1053	return (0);
1054}
1055
1056u_char
1057ifnet_addrlen(ifnet_t interface)
1058{
1059	return ((interface == NULL) ? 0 : interface->if_data.ifi_addrlen);
1060}
1061
1062errno_t
1063ifnet_set_hdrlen(ifnet_t interface, u_char hdrlen)
1064{
1065	if (interface == NULL)
1066		return (EINVAL);
1067
1068	interface->if_data.ifi_hdrlen = hdrlen;
1069	return (0);
1070}
1071
1072u_char
1073ifnet_hdrlen(ifnet_t interface)
1074{
1075	return ((interface == NULL) ? 0 : interface->if_data.ifi_hdrlen);
1076}
1077
1078errno_t
1079ifnet_set_metric(ifnet_t interface, u_int32_t metric)
1080{
1081	if (interface == NULL)
1082		return (EINVAL);
1083
1084	interface->if_data.ifi_metric = metric;
1085	return (0);
1086}
1087
1088u_int32_t
1089ifnet_metric(ifnet_t interface)
1090{
1091	return ((interface == NULL) ? 0 : interface->if_data.ifi_metric);
1092}
1093
1094errno_t
1095ifnet_set_baudrate(struct ifnet *ifp, u_int64_t baudrate)
1096{
1097	if (ifp == NULL)
1098		return (EINVAL);
1099
1100	ifp->if_output_bw.max_bw = ifp->if_input_bw.max_bw =
1101	    ifp->if_output_bw.eff_bw = ifp->if_input_bw.eff_bw = baudrate;
1102
1103	/* Pin if_baudrate to 32 bits until we can change the storage size */
1104	ifp->if_baudrate = (baudrate > 0xFFFFFFFF) ? 0xFFFFFFFF : baudrate;
1105
1106	return (0);
1107}
1108
1109u_int64_t
1110ifnet_baudrate(struct ifnet *ifp)
1111{
1112	return ((ifp == NULL) ? 0 : ifp->if_baudrate);
1113}
1114
1115errno_t
1116ifnet_set_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1117    struct if_bandwidths *input_bw)
1118{
1119	if (ifp == NULL)
1120		return (EINVAL);
1121
1122	/* set input values first (if any), as output values depend on them */
1123	if (input_bw != NULL)
1124		(void) ifnet_set_input_bandwidths(ifp, input_bw);
1125
1126	if (output_bw != NULL)
1127		(void) ifnet_set_output_bandwidths(ifp, output_bw, FALSE);
1128
1129	return (0);
1130}
1131
1132errno_t
1133ifnet_set_output_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw,
1134    boolean_t locked)
1135{
1136	struct if_bandwidths old_bw;
1137	struct ifclassq *ifq;
1138	u_int64_t br;
1139
1140	VERIFY(ifp != NULL && bw != NULL);
1141
1142	ifq = &ifp->if_snd;
1143	if (!locked)
1144		IFCQ_LOCK(ifq);
1145	IFCQ_LOCK_ASSERT_HELD(ifq);
1146
1147	old_bw = ifp->if_output_bw;
1148	if (bw->eff_bw != 0)
1149		ifp->if_output_bw.eff_bw = bw->eff_bw;
1150	if (bw->max_bw != 0)
1151		ifp->if_output_bw.max_bw = bw->max_bw;
1152	if (ifp->if_output_bw.eff_bw > ifp->if_output_bw.max_bw)
1153		ifp->if_output_bw.max_bw = ifp->if_output_bw.eff_bw;
1154	else if (ifp->if_output_bw.eff_bw == 0)
1155		ifp->if_output_bw.eff_bw = ifp->if_output_bw.max_bw;
1156
1157	/* Pin if_baudrate to 32 bits */
1158	br = MAX(ifp->if_output_bw.max_bw, ifp->if_input_bw.max_bw);
1159	if (br != 0)
1160		ifp->if_baudrate = (br > 0xFFFFFFFF) ? 0xFFFFFFFF : br;
1161
1162	/* Adjust queue parameters if needed */
1163	if (old_bw.eff_bw != ifp->if_output_bw.eff_bw ||
1164	    old_bw.max_bw != ifp->if_output_bw.max_bw)
1165		ifnet_update_sndq(ifq, CLASSQ_EV_LINK_BANDWIDTH);
1166
1167	if (!locked)
1168		IFCQ_UNLOCK(ifq);
1169
1170	return (0);
1171}
1172
1173errno_t
1174ifnet_set_input_bandwidths(struct ifnet *ifp, struct if_bandwidths *bw)
1175{
1176	struct if_bandwidths old_bw;
1177
1178	VERIFY(ifp != NULL && bw != NULL);
1179
1180	old_bw = ifp->if_input_bw;
1181	if (bw->eff_bw != 0)
1182		ifp->if_input_bw.eff_bw = bw->eff_bw;
1183	if (bw->max_bw != 0)
1184		ifp->if_input_bw.max_bw = bw->max_bw;
1185	if (ifp->if_input_bw.eff_bw > ifp->if_input_bw.max_bw)
1186		ifp->if_input_bw.max_bw = ifp->if_input_bw.eff_bw;
1187	else if (ifp->if_input_bw.eff_bw == 0)
1188		ifp->if_input_bw.eff_bw = ifp->if_input_bw.max_bw;
1189
1190	if (old_bw.eff_bw != ifp->if_input_bw.eff_bw ||
1191	    old_bw.max_bw != ifp->if_input_bw.max_bw)
1192		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_BANDWIDTH);
1193
1194	return (0);
1195}
1196
1197u_int64_t
1198ifnet_output_linkrate(struct ifnet *ifp)
1199{
1200	struct ifclassq *ifq = &ifp->if_snd;
1201	u_int64_t rate;
1202
1203	IFCQ_LOCK_ASSERT_HELD(ifq);
1204
1205	rate = ifp->if_output_bw.eff_bw;
1206	if (IFCQ_TBR_IS_ENABLED(ifq)) {
1207		u_int64_t tbr_rate = ifp->if_snd.ifcq_tbr.tbr_rate_raw;
1208		VERIFY(tbr_rate > 0);
1209		rate = MIN(rate, ifp->if_snd.ifcq_tbr.tbr_rate_raw);
1210	}
1211
1212	return (rate);
1213}
1214
1215u_int64_t
1216ifnet_input_linkrate(struct ifnet *ifp)
1217{
1218	return (ifp->if_input_bw.eff_bw);
1219}
1220
1221errno_t
1222ifnet_bandwidths(struct ifnet *ifp, struct if_bandwidths *output_bw,
1223    struct if_bandwidths *input_bw)
1224{
1225	if (ifp == NULL)
1226		return (EINVAL);
1227
1228	if (output_bw != NULL)
1229		*output_bw = ifp->if_output_bw;
1230	if (input_bw != NULL)
1231		*input_bw = ifp->if_input_bw;
1232
1233	return (0);
1234}
1235
1236errno_t
1237ifnet_set_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1238    struct if_latencies *input_lt)
1239{
1240	if (ifp == NULL)
1241		return (EINVAL);
1242
1243	if (output_lt != NULL)
1244		(void) ifnet_set_output_latencies(ifp, output_lt, FALSE);
1245
1246	if (input_lt != NULL)
1247		(void) ifnet_set_input_latencies(ifp, input_lt);
1248
1249	return (0);
1250}
1251
1252errno_t
1253ifnet_set_output_latencies(struct ifnet *ifp, struct if_latencies *lt,
1254    boolean_t locked)
1255{
1256	struct if_latencies old_lt;
1257	struct ifclassq *ifq;
1258
1259	VERIFY(ifp != NULL && lt != NULL);
1260
1261	ifq = &ifp->if_snd;
1262	if (!locked)
1263		IFCQ_LOCK(ifq);
1264	IFCQ_LOCK_ASSERT_HELD(ifq);
1265
1266	old_lt = ifp->if_output_lt;
1267	if (lt->eff_lt != 0)
1268		ifp->if_output_lt.eff_lt = lt->eff_lt;
1269	if (lt->max_lt != 0)
1270		ifp->if_output_lt.max_lt = lt->max_lt;
1271	if (ifp->if_output_lt.eff_lt > ifp->if_output_lt.max_lt)
1272		ifp->if_output_lt.max_lt = ifp->if_output_lt.eff_lt;
1273	else if (ifp->if_output_lt.eff_lt == 0)
1274		ifp->if_output_lt.eff_lt = ifp->if_output_lt.max_lt;
1275
1276	/* Adjust queue parameters if needed */
1277	if (old_lt.eff_lt != ifp->if_output_lt.eff_lt ||
1278	    old_lt.max_lt != ifp->if_output_lt.max_lt)
1279		ifnet_update_sndq(ifq, CLASSQ_EV_LINK_LATENCY);
1280
1281	if (!locked)
1282		IFCQ_UNLOCK(ifq);
1283
1284	return (0);
1285}
1286
1287errno_t
1288ifnet_set_input_latencies(struct ifnet *ifp, struct if_latencies *lt)
1289{
1290	struct if_latencies old_lt;
1291
1292	VERIFY(ifp != NULL && lt != NULL);
1293
1294	old_lt = ifp->if_input_lt;
1295	if (lt->eff_lt != 0)
1296		ifp->if_input_lt.eff_lt = lt->eff_lt;
1297	if (lt->max_lt != 0)
1298		ifp->if_input_lt.max_lt = lt->max_lt;
1299	if (ifp->if_input_lt.eff_lt > ifp->if_input_lt.max_lt)
1300		ifp->if_input_lt.max_lt = ifp->if_input_lt.eff_lt;
1301	else if (ifp->if_input_lt.eff_lt == 0)
1302		ifp->if_input_lt.eff_lt = ifp->if_input_lt.max_lt;
1303
1304	if (old_lt.eff_lt != ifp->if_input_lt.eff_lt ||
1305	    old_lt.max_lt != ifp->if_input_lt.max_lt)
1306		ifnet_update_rcv(ifp, CLASSQ_EV_LINK_LATENCY);
1307
1308	return (0);
1309}
1310
1311errno_t
1312ifnet_latencies(struct ifnet *ifp, struct if_latencies *output_lt,
1313    struct if_latencies *input_lt)
1314{
1315	if (ifp == NULL)
1316		return (EINVAL);
1317
1318	if (output_lt != NULL)
1319		*output_lt = ifp->if_output_lt;
1320	if (input_lt != NULL)
1321		*input_lt = ifp->if_input_lt;
1322
1323	return (0);
1324}
1325
1326errno_t
1327ifnet_set_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1328{
1329	errno_t err;
1330
1331	if (ifp == NULL)
1332		return (EINVAL);
1333	else if (!ifnet_is_attached(ifp, 1))
1334		return (ENXIO);
1335
1336	err = dlil_rxpoll_set_params(ifp, p, FALSE);
1337
1338	/* Release the io ref count */
1339	ifnet_decr_iorefcnt(ifp);
1340
1341	return (err);
1342}
1343
1344errno_t
1345ifnet_poll_params(struct ifnet *ifp, struct ifnet_poll_params *p)
1346{
1347	errno_t err;
1348
1349	if (ifp == NULL || p == NULL)
1350		return (EINVAL);
1351	else if (!ifnet_is_attached(ifp, 1))
1352		return (ENXIO);
1353
1354	err = dlil_rxpoll_get_params(ifp, p);
1355
1356	/* Release the io ref count */
1357	ifnet_decr_iorefcnt(ifp);
1358
1359	return (err);
1360}
1361
1362errno_t
1363ifnet_stat_increment(struct ifnet *ifp,
1364    const struct ifnet_stat_increment_param *s)
1365{
1366	if (ifp == NULL)
1367		return (EINVAL);
1368
1369	if (s->packets_in != 0)
1370		atomic_add_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1371	if (s->bytes_in != 0)
1372		atomic_add_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1373	if (s->errors_in != 0)
1374		atomic_add_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1375
1376	if (s->packets_out != 0)
1377		atomic_add_64(&ifp->if_data.ifi_opackets, s->packets_out);
1378	if (s->bytes_out != 0)
1379		atomic_add_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1380	if (s->errors_out != 0)
1381		atomic_add_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1382
1383	if (s->collisions != 0)
1384		atomic_add_64(&ifp->if_data.ifi_collisions, s->collisions);
1385	if (s->dropped != 0)
1386		atomic_add_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1387
1388	/* Touch the last change time. */
1389	TOUCHLASTCHANGE(&ifp->if_lastchange);
1390
1391	return (0);
1392}
1393
1394errno_t
1395ifnet_stat_increment_in(struct ifnet *ifp, u_int32_t packets_in,
1396    u_int32_t bytes_in, u_int32_t errors_in)
1397{
1398	if (ifp == NULL)
1399		return (EINVAL);
1400
1401	if (packets_in != 0)
1402		atomic_add_64(&ifp->if_data.ifi_ipackets, packets_in);
1403	if (bytes_in != 0)
1404		atomic_add_64(&ifp->if_data.ifi_ibytes, bytes_in);
1405	if (errors_in != 0)
1406		atomic_add_64(&ifp->if_data.ifi_ierrors, errors_in);
1407
1408	TOUCHLASTCHANGE(&ifp->if_lastchange);
1409
1410	return (0);
1411}
1412
1413errno_t
1414ifnet_stat_increment_out(struct ifnet *ifp, u_int32_t packets_out,
1415    u_int32_t bytes_out, u_int32_t errors_out)
1416{
1417	if (ifp == NULL)
1418		return (EINVAL);
1419
1420	if (packets_out != 0)
1421		atomic_add_64(&ifp->if_data.ifi_opackets, packets_out);
1422	if (bytes_out != 0)
1423		atomic_add_64(&ifp->if_data.ifi_obytes, bytes_out);
1424	if (errors_out != 0)
1425		atomic_add_64(&ifp->if_data.ifi_oerrors, errors_out);
1426
1427	TOUCHLASTCHANGE(&ifp->if_lastchange);
1428
1429	return (0);
1430}
1431
1432errno_t
1433ifnet_set_stat(struct ifnet *ifp, const struct ifnet_stats_param *s)
1434{
1435	if (ifp == NULL)
1436		return (EINVAL);
1437
1438	atomic_set_64(&ifp->if_data.ifi_ipackets, s->packets_in);
1439	atomic_set_64(&ifp->if_data.ifi_ibytes, s->bytes_in);
1440	atomic_set_64(&ifp->if_data.ifi_imcasts, s->multicasts_in);
1441	atomic_set_64(&ifp->if_data.ifi_ierrors, s->errors_in);
1442
1443	atomic_set_64(&ifp->if_data.ifi_opackets, s->packets_out);
1444	atomic_set_64(&ifp->if_data.ifi_obytes, s->bytes_out);
1445	atomic_set_64(&ifp->if_data.ifi_omcasts, s->multicasts_out);
1446	atomic_set_64(&ifp->if_data.ifi_oerrors, s->errors_out);
1447
1448	atomic_set_64(&ifp->if_data.ifi_collisions, s->collisions);
1449	atomic_set_64(&ifp->if_data.ifi_iqdrops, s->dropped);
1450	atomic_set_64(&ifp->if_data.ifi_noproto, s->no_protocol);
1451
1452	/* Touch the last change time. */
1453	TOUCHLASTCHANGE(&ifp->if_lastchange);
1454
1455	return (0);
1456}
1457
1458errno_t
1459ifnet_stat(struct ifnet *ifp, struct ifnet_stats_param *s)
1460{
1461	if (ifp == NULL)
1462		return (EINVAL);
1463
1464	atomic_get_64(s->packets_in, &ifp->if_data.ifi_ipackets);
1465	atomic_get_64(s->bytes_in, &ifp->if_data.ifi_ibytes);
1466	atomic_get_64(s->multicasts_in, &ifp->if_data.ifi_imcasts);
1467	atomic_get_64(s->errors_in, &ifp->if_data.ifi_ierrors);
1468
1469	atomic_get_64(s->packets_out, &ifp->if_data.ifi_opackets);
1470	atomic_get_64(s->bytes_out, &ifp->if_data.ifi_obytes);
1471	atomic_get_64(s->multicasts_out, &ifp->if_data.ifi_omcasts);
1472	atomic_get_64(s->errors_out, &ifp->if_data.ifi_oerrors);
1473
1474	atomic_get_64(s->collisions, &ifp->if_data.ifi_collisions);
1475	atomic_get_64(s->dropped, &ifp->if_data.ifi_iqdrops);
1476	atomic_get_64(s->no_protocol, &ifp->if_data.ifi_noproto);
1477
1478	return (0);
1479}
1480
1481errno_t
1482ifnet_touch_lastchange(ifnet_t interface)
1483{
1484	if (interface == NULL)
1485		return (EINVAL);
1486
1487	TOUCHLASTCHANGE(&interface->if_lastchange);
1488
1489	return (0);
1490}
1491
1492errno_t
1493ifnet_lastchange(ifnet_t interface, struct timeval *last_change)
1494{
1495	if (interface == NULL)
1496		return (EINVAL);
1497
1498	*last_change = interface->if_data.ifi_lastchange;
1499	/* Crude conversion from uptime to calendar time */
1500	last_change->tv_sec += boottime_sec();
1501
1502	return (0);
1503}
1504
1505errno_t
1506ifnet_get_address_list(ifnet_t interface, ifaddr_t **addresses)
1507{
1508	return (addresses == NULL ? EINVAL :
1509	    ifnet_get_address_list_family(interface, addresses, 0));
1510}
1511
1512struct ifnet_addr_list {
1513	SLIST_ENTRY(ifnet_addr_list)	ifal_le;
1514	struct ifaddr			*ifal_ifa;
1515};
1516
1517errno_t
1518ifnet_get_address_list_family(ifnet_t interface, ifaddr_t **addresses,
1519    sa_family_t family)
1520{
1521	return (ifnet_get_address_list_family_internal(interface, addresses,
1522	    family, 0, M_NOWAIT, 0));
1523}
1524
1525errno_t
1526ifnet_get_inuse_address_list(ifnet_t interface, ifaddr_t **addresses)
1527{
1528	return (addresses == NULL ? EINVAL :
1529		ifnet_get_address_list_family_internal(interface, addresses,
1530		0, 0, M_NOWAIT, 1));
1531}
1532
1533extern uint32_t tcp_find_anypcb_byaddr(struct ifaddr *ifa);
1534
1535extern uint32_t udp_find_anypcb_byaddr(struct ifaddr *ifa);
1536
1537__private_extern__ errno_t
1538ifnet_get_address_list_family_internal(ifnet_t interface, ifaddr_t **addresses,
1539    sa_family_t family, int detached, int how, int return_inuse_addrs)
1540{
1541	SLIST_HEAD(, ifnet_addr_list) ifal_head;
1542	struct ifnet_addr_list *ifal, *ifal_tmp;
1543	struct ifnet *ifp;
1544	int count = 0;
1545	errno_t err = 0;
1546	int usecount = 0;
1547	int index = 0;
1548
1549	SLIST_INIT(&ifal_head);
1550
1551	if (addresses == NULL) {
1552		err = EINVAL;
1553		goto done;
1554	}
1555	*addresses = NULL;
1556
1557	if (detached) {
1558		/*
1559		 * Interface has been detached, so skip the lookup
1560		 * at ifnet_head and go directly to inner loop.
1561		 */
1562		ifp = interface;
1563		if (ifp == NULL) {
1564			err = EINVAL;
1565			goto done;
1566		}
1567		goto one;
1568	}
1569
1570	ifnet_head_lock_shared();
1571	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1572		if (interface != NULL && ifp != interface)
1573			continue;
1574one:
1575		ifnet_lock_shared(ifp);
1576		if (interface == NULL || interface == ifp) {
1577			struct ifaddr *ifa;
1578			TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) {
1579				IFA_LOCK(ifa);
1580				if (family != 0 &&
1581				    ifa->ifa_addr->sa_family != family) {
1582					IFA_UNLOCK(ifa);
1583					continue;
1584				}
1585				MALLOC(ifal, struct ifnet_addr_list *,
1586				    sizeof (*ifal), M_TEMP, how);
1587				if (ifal == NULL) {
1588					IFA_UNLOCK(ifa);
1589					ifnet_lock_done(ifp);
1590					if (!detached)
1591						ifnet_head_done();
1592					err = ENOMEM;
1593					goto done;
1594				}
1595				ifal->ifal_ifa = ifa;
1596				IFA_ADDREF_LOCKED(ifa);
1597				SLIST_INSERT_HEAD(&ifal_head, ifal, ifal_le);
1598				++count;
1599				IFA_UNLOCK(ifa);
1600			}
1601		}
1602		ifnet_lock_done(ifp);
1603		if (detached)
1604			break;
1605	}
1606	if (!detached)
1607		ifnet_head_done();
1608
1609	if (count == 0) {
1610		err = ENXIO;
1611		goto done;
1612	}
1613	MALLOC(*addresses, ifaddr_t *, sizeof (ifaddr_t) * (count + 1),
1614	    M_TEMP, how);
1615	if (*addresses == NULL) {
1616		err = ENOMEM;
1617		goto done;
1618	}
1619	bzero(*addresses, sizeof (ifaddr_t) * (count + 1));
1620
1621done:
1622	SLIST_FOREACH_SAFE(ifal, &ifal_head, ifal_le, ifal_tmp) {
1623		SLIST_REMOVE(&ifal_head, ifal, ifnet_addr_list, ifal_le);
1624		if (err == 0) {
1625			if (return_inuse_addrs) {
1626				usecount = tcp_find_anypcb_byaddr(ifal->ifal_ifa);
1627				usecount += udp_find_anypcb_byaddr(ifal->ifal_ifa);
1628				if (usecount) {
1629					(*addresses)[index] = ifal->ifal_ifa;
1630					index++;
1631				}
1632				else
1633					IFA_REMREF(ifal->ifal_ifa);
1634			} else {
1635				(*addresses)[--count] = ifal->ifal_ifa;
1636			}
1637		}
1638		else {
1639			IFA_REMREF(ifal->ifal_ifa);
1640		}
1641		FREE(ifal, M_TEMP);
1642	}
1643
1644	VERIFY(err == 0 || *addresses == NULL);
1645	if ((err == 0) && (count) && ((*addresses)[0] == NULL)) {
1646		VERIFY(return_inuse_addrs == 1);
1647		FREE(*addresses, M_TEMP);
1648		err = ENXIO;
1649	}
1650	return (err);
1651}
1652
1653void
1654ifnet_free_address_list(ifaddr_t *addresses)
1655{
1656	int i;
1657
1658	if (addresses == NULL)
1659		return;
1660
1661	for (i = 0; addresses[i] != NULL; i++)
1662		IFA_REMREF(addresses[i]);
1663
1664	FREE(addresses, M_TEMP);
1665}
1666
1667void *
1668ifnet_lladdr(ifnet_t interface)
1669{
1670	struct ifaddr *ifa;
1671	void *lladdr;
1672
1673	if (interface == NULL)
1674		return (NULL);
1675
1676	/*
1677	 * if_lladdr points to the permanent link address of
1678	 * the interface and it never gets deallocated; internal
1679	 * code should simply use IF_LLADDR() for performance.
1680	 */
1681	ifa = interface->if_lladdr;
1682	IFA_LOCK_SPIN(ifa);
1683	lladdr = LLADDR(SDL((void *)ifa->ifa_addr));
1684	IFA_UNLOCK(ifa);
1685
1686	return (lladdr);
1687}
1688
1689errno_t
1690ifnet_llbroadcast_copy_bytes(ifnet_t interface, void *addr, size_t buffer_len,
1691    size_t *out_len)
1692{
1693	if (interface == NULL || addr == NULL || out_len == NULL)
1694		return (EINVAL);
1695
1696	*out_len = interface->if_broadcast.length;
1697
1698	if (buffer_len < interface->if_broadcast.length)
1699		return (EMSGSIZE);
1700
1701	if (interface->if_broadcast.length == 0)
1702		return (ENXIO);
1703
1704	if (interface->if_broadcast.length <=
1705	    sizeof (interface->if_broadcast.u.buffer)) {
1706		bcopy(interface->if_broadcast.u.buffer, addr,
1707		    interface->if_broadcast.length);
1708	} else {
1709		bcopy(interface->if_broadcast.u.ptr, addr,
1710		    interface->if_broadcast.length);
1711	}
1712
1713	return (0);
1714}
1715
1716static errno_t
1717ifnet_lladdr_copy_bytes_internal(ifnet_t interface, void *lladdr,
1718    size_t lladdr_len, kauth_cred_t *credp)
1719{
1720	const u_int8_t *bytes;
1721	size_t bytes_len;
1722	struct ifaddr *ifa;
1723	uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
1724	errno_t error = 0;
1725
1726	/*
1727	 * Make sure to accomodate the largest possible
1728	 * size of SA(if_lladdr)->sa_len.
1729	 */
1730	_CASSERT(sizeof (sdlbuf) == (SOCK_MAXADDRLEN + 1));
1731
1732	if (interface == NULL || lladdr == NULL)
1733		return (EINVAL);
1734
1735	ifa = interface->if_lladdr;
1736	IFA_LOCK_SPIN(ifa);
1737	bcopy(ifa->ifa_addr, &sdlbuf, SDL(ifa->ifa_addr)->sdl_len);
1738	IFA_UNLOCK(ifa);
1739
1740	bytes = dlil_ifaddr_bytes(SDL(&sdlbuf), &bytes_len, credp);
1741	if (bytes_len != lladdr_len) {
1742		bzero(lladdr, lladdr_len);
1743		error = EMSGSIZE;
1744	} else {
1745		bcopy(bytes, lladdr, bytes_len);
1746	}
1747
1748	return (error);
1749}
1750
1751errno_t
1752ifnet_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1753{
1754	return (ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1755	    NULL));
1756}
1757
1758errno_t
1759ifnet_guarded_lladdr_copy_bytes(ifnet_t interface, void *lladdr, size_t length)
1760{
1761#if CONFIG_MACF
1762	kauth_cred_t cred;
1763	net_thread_marks_t marks;
1764#endif
1765	kauth_cred_t *credp;
1766	errno_t error;
1767
1768	credp = NULL;
1769#if CONFIG_MACF
1770	marks = net_thread_marks_push(NET_THREAD_CKREQ_LLADDR);
1771	cred = kauth_cred_proc_ref(current_proc());
1772	credp = &cred;
1773#else
1774	credp = NULL;
1775#endif
1776
1777	error = ifnet_lladdr_copy_bytes_internal(interface, lladdr, length,
1778	    credp);
1779
1780#if CONFIG_MACF
1781	kauth_cred_unref(credp);
1782	net_thread_marks_pop(marks);
1783#endif
1784
1785	return (error);
1786}
1787
1788static errno_t
1789ifnet_set_lladdr_internal(ifnet_t interface, const void *lladdr,
1790    size_t lladdr_len, u_char new_type, int apply_type)
1791{
1792	struct ifaddr *ifa;
1793	errno_t	error = 0;
1794
1795	if (interface == NULL)
1796		return (EINVAL);
1797
1798	ifnet_head_lock_shared();
1799	ifnet_lock_exclusive(interface);
1800	if (lladdr_len != 0 &&
1801	    (lladdr_len != interface->if_addrlen || lladdr == 0)) {
1802		ifnet_lock_done(interface);
1803		ifnet_head_done();
1804		return (EINVAL);
1805	}
1806	ifa = ifnet_addrs[interface->if_index - 1];
1807	if (ifa != NULL) {
1808		struct sockaddr_dl *sdl;
1809
1810		IFA_LOCK_SPIN(ifa);
1811		sdl = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
1812		if (lladdr_len != 0) {
1813			bcopy(lladdr, LLADDR(sdl), lladdr_len);
1814		} else {
1815			bzero(LLADDR(sdl), interface->if_addrlen);
1816		}
1817		sdl->sdl_alen = lladdr_len;
1818
1819		if (apply_type) {
1820			sdl->sdl_type = new_type;
1821		}
1822		IFA_UNLOCK(ifa);
1823	} else {
1824		error = ENXIO;
1825	}
1826	ifnet_lock_done(interface);
1827	ifnet_head_done();
1828
1829	/* Generate a kernel event */
1830	if (error == 0) {
1831		dlil_post_msg(interface, KEV_DL_SUBCLASS,
1832		    KEV_DL_LINK_ADDRESS_CHANGED, NULL, 0);
1833	}
1834
1835	return (error);
1836}
1837
1838errno_t
1839ifnet_set_lladdr(ifnet_t interface, const void* lladdr, size_t lladdr_len)
1840{
1841	return (ifnet_set_lladdr_internal(interface, lladdr, lladdr_len, 0, 0));
1842}
1843
1844errno_t
1845ifnet_set_lladdr_and_type(ifnet_t interface, const void* lladdr,
1846    size_t lladdr_len, u_char type)
1847{
1848	return (ifnet_set_lladdr_internal(interface, lladdr,
1849	    lladdr_len, type, 1));
1850}
1851
1852errno_t
1853ifnet_add_multicast(ifnet_t interface, const struct sockaddr *maddr,
1854    ifmultiaddr_t *ifmap)
1855{
1856	if (interface == NULL || maddr == NULL)
1857		return (EINVAL);
1858
1859	/* Don't let users screw up protocols' entries. */
1860	if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
1861		return (EINVAL);
1862
1863	return (if_addmulti_anon(interface, maddr, ifmap));
1864}
1865
1866errno_t
1867ifnet_remove_multicast(ifmultiaddr_t ifma)
1868{
1869	struct sockaddr *maddr;
1870
1871	if (ifma == NULL)
1872		return (EINVAL);
1873
1874	maddr = ifma->ifma_addr;
1875	/* Don't let users screw up protocols' entries. */
1876	if (maddr->sa_family != AF_UNSPEC && maddr->sa_family != AF_LINK)
1877		return (EINVAL);
1878
1879	return (if_delmulti_anon(ifma->ifma_ifp, maddr));
1880}
1881
1882errno_t
1883ifnet_get_multicast_list(ifnet_t ifp, ifmultiaddr_t **addresses)
1884{
1885	int count = 0;
1886	int cmax = 0;
1887	struct ifmultiaddr *addr;
1888
1889	if (ifp == NULL || addresses == NULL)
1890		return (EINVAL);
1891
1892	ifnet_lock_shared(ifp);
1893	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
1894		cmax++;
1895	}
1896
1897	MALLOC(*addresses, ifmultiaddr_t *, sizeof (ifmultiaddr_t) * (cmax + 1),
1898	    M_TEMP, M_NOWAIT);
1899	if (*addresses == NULL) {
1900		ifnet_lock_done(ifp);
1901		return (ENOMEM);
1902	}
1903
1904	LIST_FOREACH(addr, &ifp->if_multiaddrs, ifma_link) {
1905		if (count + 1 > cmax)
1906			break;
1907		(*addresses)[count] = (ifmultiaddr_t)addr;
1908		ifmaddr_reference((*addresses)[count]);
1909		count++;
1910	}
1911	(*addresses)[cmax] = NULL;
1912	ifnet_lock_done(ifp);
1913
1914	return (0);
1915}
1916
1917void
1918ifnet_free_multicast_list(ifmultiaddr_t *addresses)
1919{
1920	int i;
1921
1922	if (addresses == NULL)
1923		return;
1924
1925	for (i = 0; addresses[i] != NULL; i++)
1926		ifmaddr_release(addresses[i]);
1927
1928	FREE(addresses, M_TEMP);
1929}
1930
1931errno_t
1932ifnet_find_by_name(const char *ifname, ifnet_t *ifpp)
1933{
1934	struct ifnet *ifp;
1935	int	namelen;
1936
1937	if (ifname == NULL)
1938		return (EINVAL);
1939
1940	namelen = strlen(ifname);
1941
1942	*ifpp = NULL;
1943
1944	ifnet_head_lock_shared();
1945	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
1946		struct ifaddr *ifa;
1947		struct sockaddr_dl *ll_addr;
1948
1949		ifa = ifnet_addrs[ifp->if_index - 1];
1950		if (ifa == NULL)
1951			continue;
1952
1953		IFA_LOCK(ifa);
1954		ll_addr = (struct sockaddr_dl *)(void *)ifa->ifa_addr;
1955
1956		if (namelen == ll_addr->sdl_nlen && strncmp(ll_addr->sdl_data,
1957		    ifname, ll_addr->sdl_nlen) == 0) {
1958			IFA_UNLOCK(ifa);
1959			*ifpp = ifp;
1960			ifnet_reference(*ifpp);
1961			break;
1962		}
1963		IFA_UNLOCK(ifa);
1964	}
1965	ifnet_head_done();
1966
1967	return ((ifp == NULL) ? ENXIO : 0);
1968}
1969
1970errno_t
1971ifnet_list_get(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
1972{
1973	return (ifnet_list_get_common(family, FALSE, list, count));
1974}
1975
1976__private_extern__ errno_t
1977ifnet_list_get_all(ifnet_family_t family, ifnet_t **list, u_int32_t *count)
1978{
1979	return (ifnet_list_get_common(family, TRUE, list, count));
1980}
1981
1982struct ifnet_list {
1983	SLIST_ENTRY(ifnet_list)	ifl_le;
1984	struct ifnet		*ifl_ifp;
1985};
1986
1987static errno_t
1988ifnet_list_get_common(ifnet_family_t family, boolean_t get_all, ifnet_t **list,
1989    u_int32_t *count)
1990{
1991#pragma unused(get_all)
1992	SLIST_HEAD(, ifnet_list) ifl_head;
1993	struct ifnet_list *ifl, *ifl_tmp;
1994	struct ifnet *ifp;
1995	int cnt = 0;
1996	errno_t err = 0;
1997
1998	SLIST_INIT(&ifl_head);
1999
2000	if (list == NULL || count == NULL) {
2001		err = EINVAL;
2002		goto done;
2003	}
2004	*count = 0;
2005	*list = NULL;
2006
2007	ifnet_head_lock_shared();
2008	TAILQ_FOREACH(ifp, &ifnet_head, if_link) {
2009		if (family == IFNET_FAMILY_ANY || ifp->if_family == family) {
2010			MALLOC(ifl, struct ifnet_list *, sizeof (*ifl),
2011			    M_TEMP, M_NOWAIT);
2012			if (ifl == NULL) {
2013				ifnet_head_done();
2014				err = ENOMEM;
2015				goto done;
2016			}
2017			ifl->ifl_ifp = ifp;
2018			ifnet_reference(ifp);
2019			SLIST_INSERT_HEAD(&ifl_head, ifl, ifl_le);
2020			++cnt;
2021		}
2022	}
2023	ifnet_head_done();
2024
2025	if (cnt == 0) {
2026		err = ENXIO;
2027		goto done;
2028	}
2029
2030	MALLOC(*list, ifnet_t *, sizeof (ifnet_t) * (cnt + 1),
2031	    M_TEMP, M_NOWAIT);
2032	if (*list == NULL) {
2033		err = ENOMEM;
2034		goto done;
2035	}
2036	bzero(*list, sizeof (ifnet_t) * (cnt + 1));
2037	*count = cnt;
2038
2039done:
2040	SLIST_FOREACH_SAFE(ifl, &ifl_head, ifl_le, ifl_tmp) {
2041		SLIST_REMOVE(&ifl_head, ifl, ifnet_list, ifl_le);
2042		if (err == 0)
2043			(*list)[--cnt] = ifl->ifl_ifp;
2044		else
2045			ifnet_release(ifl->ifl_ifp);
2046		FREE(ifl, M_TEMP);
2047	}
2048
2049	return (err);
2050}
2051
2052void
2053ifnet_list_free(ifnet_t *interfaces)
2054{
2055	int i;
2056
2057	if (interfaces == NULL)
2058		return;
2059
2060	for (i = 0; interfaces[i]; i++)
2061		ifnet_release(interfaces[i]);
2062
2063	FREE(interfaces, M_TEMP);
2064}
2065
2066void
2067ifnet_transmit_burst_start(ifnet_t ifp, mbuf_t pkt)
2068{
2069#if MEASURE_BW
2070	uint32_t orig_flags;
2071
2072	if (ifp == NULL || !(pkt->m_flags & M_PKTHDR))
2073		return;
2074
2075	orig_flags = OSBitOrAtomic(IF_MEASURED_BW_INPROGRESS,
2076	    &ifp->if_bw.flags);
2077	if (orig_flags & IF_MEASURED_BW_INPROGRESS) {
2078		/* There is already a measurement in progress; skip this one */
2079		return;
2080	}
2081
2082	ifp->if_bw.start_seq = pkt->m_pkthdr.pkt_bwseq;
2083	ifp->if_bw.start_ts = mach_absolute_time();
2084#else /*!MEASURE_BW */
2085#pragma unused(ifp, pkt)
2086#endif /* !MEASURE_BW */
2087}
2088
2089void
2090ifnet_transmit_burst_end(ifnet_t ifp, mbuf_t pkt)
2091{
2092#if MEASURE_BW
2093	uint64_t oseq, ots, bytes, ts, t;
2094	uint32_t flags;
2095
2096	if ( ifp == NULL || !(pkt->m_flags & M_PKTHDR))
2097		return;
2098
2099	flags = OSBitOrAtomic(IF_MEASURED_BW_CALCULATION, &ifp->if_bw.flags);
2100
2101	/* If a calculation is already in progress, just return */
2102	if (flags & IF_MEASURED_BW_CALCULATION)
2103		return;
2104
2105	/* Check if a measurement was started at all */
2106	if (!(flags & IF_MEASURED_BW_INPROGRESS)) {
2107		/*
2108		 * It is an error to call burst_end before burst_start.
2109		 * Reset the calculation flag and return.
2110		 */
2111		goto done;
2112	}
2113
2114	oseq = pkt->m_pkthdr.pkt_bwseq;
2115	ots = mach_absolute_time();
2116
2117	if (ifp->if_bw.start_seq > 0 && oseq > ifp->if_bw.start_seq) {
2118		ts = ots - ifp->if_bw.start_ts;
2119		if (ts > 0 ) {
2120			absolutetime_to_nanoseconds(ts, &t);
2121			bytes = oseq - ifp->if_bw.start_seq;
2122			ifp->if_bw.bytes = bytes;
2123			ifp->if_bw.ts = ts;
2124
2125			if (t > 0) {
2126				uint64_t bw = 0;
2127
2128				/* Compute bandwidth as bytes/ms */
2129				bw = (bytes * NSEC_PER_MSEC) / t;
2130				if (bw > 0) {
2131					if (ifp->if_bw.bw > 0) {
2132						u_int32_t shft;
2133
2134						shft = if_bw_smoothing_val;
2135						/* Compute EWMA of bw */
2136						ifp->if_bw.bw = (bw +
2137						    ((ifp->if_bw.bw << shft) -
2138						    ifp->if_bw.bw)) >> shft;
2139					} else {
2140						ifp->if_bw.bw = bw;
2141					}
2142				}
2143			}
2144			ifp->if_bw.last_seq = oseq;
2145			ifp->if_bw.last_ts = ots;
2146		}
2147	}
2148
2149done:
2150	flags = ~(IF_MEASURED_BW_INPROGRESS | IF_MEASURED_BW_CALCULATION);
2151	OSBitAndAtomic(flags, &ifp->if_bw.flags);
2152#else /* !MEASURE_BW */
2153#pragma unused(ifp, pkt)
2154#endif /* !MEASURE_BW */
2155}
2156
2157/****************************************************************************/
2158/* ifaddr_t accessors							    */
2159/****************************************************************************/
2160
2161errno_t
2162ifaddr_reference(ifaddr_t ifa)
2163{
2164	if (ifa == NULL)
2165		return (EINVAL);
2166
2167	IFA_ADDREF(ifa);
2168	return (0);
2169}
2170
2171errno_t
2172ifaddr_release(ifaddr_t ifa)
2173{
2174	if (ifa == NULL)
2175		return (EINVAL);
2176
2177	IFA_REMREF(ifa);
2178	return (0);
2179}
2180
2181sa_family_t
2182ifaddr_address_family(ifaddr_t ifa)
2183{
2184	sa_family_t family = 0;
2185
2186	if (ifa != NULL) {
2187		IFA_LOCK_SPIN(ifa);
2188		if (ifa->ifa_addr != NULL)
2189			family = ifa->ifa_addr->sa_family;
2190		IFA_UNLOCK(ifa);
2191	}
2192	return (family);
2193}
2194
2195errno_t
2196ifaddr_address(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2197{
2198	u_int32_t copylen;
2199
2200	if (ifa == NULL || out_addr == NULL)
2201		return (EINVAL);
2202
2203	IFA_LOCK_SPIN(ifa);
2204	if (ifa->ifa_addr == NULL) {
2205		IFA_UNLOCK(ifa);
2206		return (ENOTSUP);
2207	}
2208
2209	copylen = (addr_size >= ifa->ifa_addr->sa_len) ?
2210	    ifa->ifa_addr->sa_len : addr_size;
2211	bcopy(ifa->ifa_addr, out_addr, copylen);
2212
2213	if (ifa->ifa_addr->sa_len > addr_size) {
2214		IFA_UNLOCK(ifa);
2215		return (EMSGSIZE);
2216	}
2217
2218	IFA_UNLOCK(ifa);
2219	return (0);
2220}
2221
2222errno_t
2223ifaddr_dstaddress(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2224{
2225	u_int32_t copylen;
2226
2227	if (ifa == NULL || out_addr == NULL)
2228		return (EINVAL);
2229
2230	IFA_LOCK_SPIN(ifa);
2231	if (ifa->ifa_dstaddr == NULL) {
2232		IFA_UNLOCK(ifa);
2233		return (ENOTSUP);
2234	}
2235
2236	copylen = (addr_size >= ifa->ifa_dstaddr->sa_len) ?
2237	    ifa->ifa_dstaddr->sa_len : addr_size;
2238	bcopy(ifa->ifa_dstaddr, out_addr, copylen);
2239
2240	if (ifa->ifa_dstaddr->sa_len > addr_size) {
2241		IFA_UNLOCK(ifa);
2242		return (EMSGSIZE);
2243	}
2244
2245	IFA_UNLOCK(ifa);
2246	return (0);
2247}
2248
2249errno_t
2250ifaddr_netmask(ifaddr_t ifa, struct sockaddr *out_addr, u_int32_t addr_size)
2251{
2252	u_int32_t copylen;
2253
2254	if (ifa == NULL || out_addr == NULL)
2255		return (EINVAL);
2256
2257	IFA_LOCK_SPIN(ifa);
2258	if (ifa->ifa_netmask == NULL) {
2259		IFA_UNLOCK(ifa);
2260		return (ENOTSUP);
2261	}
2262
2263	copylen = addr_size >= ifa->ifa_netmask->sa_len ?
2264	    ifa->ifa_netmask->sa_len : addr_size;
2265	bcopy(ifa->ifa_netmask, out_addr, copylen);
2266
2267	if (ifa->ifa_netmask->sa_len > addr_size) {
2268		IFA_UNLOCK(ifa);
2269		return (EMSGSIZE);
2270	}
2271
2272	IFA_UNLOCK(ifa);
2273	return (0);
2274}
2275
2276ifnet_t
2277ifaddr_ifnet(ifaddr_t ifa)
2278{
2279	struct ifnet *ifp;
2280
2281	if (ifa == NULL)
2282		return (NULL);
2283
2284	/* ifa_ifp is set once at creation time; it is never changed */
2285	ifp = ifa->ifa_ifp;
2286
2287	return (ifp);
2288}
2289
2290ifaddr_t
2291ifaddr_withaddr(const struct sockaddr *address)
2292{
2293	if (address == NULL)
2294		return (NULL);
2295
2296	return (ifa_ifwithaddr(address));
2297}
2298
2299ifaddr_t
2300ifaddr_withdstaddr(const struct sockaddr *address)
2301{
2302	if (address == NULL)
2303		return (NULL);
2304
2305	return (ifa_ifwithdstaddr(address));
2306}
2307
2308ifaddr_t
2309ifaddr_withnet(const struct sockaddr *net)
2310{
2311	if (net == NULL)
2312		return (NULL);
2313
2314	return (ifa_ifwithnet(net));
2315}
2316
2317ifaddr_t
2318ifaddr_withroute(int flags, const struct sockaddr *destination,
2319    const struct sockaddr *gateway)
2320{
2321	if (destination == NULL || gateway == NULL)
2322		return (NULL);
2323
2324	return (ifa_ifwithroute(flags, destination, gateway));
2325}
2326
2327ifaddr_t
2328ifaddr_findbestforaddr(const struct sockaddr *addr, ifnet_t interface)
2329{
2330	if (addr == NULL || interface == NULL)
2331		return (NULL);
2332
2333	return (ifaof_ifpforaddr(addr, interface));
2334}
2335
2336errno_t
2337ifmaddr_reference(ifmultiaddr_t ifmaddr)
2338{
2339	if (ifmaddr == NULL)
2340		return (EINVAL);
2341
2342	IFMA_ADDREF(ifmaddr);
2343	return (0);
2344}
2345
2346errno_t
2347ifmaddr_release(ifmultiaddr_t ifmaddr)
2348{
2349	if (ifmaddr == NULL)
2350		return (EINVAL);
2351
2352	IFMA_REMREF(ifmaddr);
2353	return (0);
2354}
2355
2356errno_t
2357ifmaddr_address(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2358    u_int32_t addr_size)
2359{
2360	u_int32_t copylen;
2361
2362	if (ifma == NULL || out_addr == NULL)
2363		return (EINVAL);
2364
2365	IFMA_LOCK(ifma);
2366	if (ifma->ifma_addr == NULL) {
2367		IFMA_UNLOCK(ifma);
2368		return (ENOTSUP);
2369	}
2370
2371	copylen = (addr_size >= ifma->ifma_addr->sa_len ?
2372	    ifma->ifma_addr->sa_len : addr_size);
2373	bcopy(ifma->ifma_addr, out_addr, copylen);
2374
2375	if (ifma->ifma_addr->sa_len > addr_size) {
2376		IFMA_UNLOCK(ifma);
2377		return (EMSGSIZE);
2378	}
2379	IFMA_UNLOCK(ifma);
2380	return (0);
2381}
2382
2383errno_t
2384ifmaddr_lladdress(ifmultiaddr_t ifma, struct sockaddr *out_addr,
2385    u_int32_t addr_size)
2386{
2387	struct ifmultiaddr *ifma_ll;
2388
2389	if (ifma == NULL || out_addr == NULL)
2390		return (EINVAL);
2391	if ((ifma_ll = ifma->ifma_ll) == NULL)
2392		return (ENOTSUP);
2393
2394	return (ifmaddr_address(ifma_ll, out_addr, addr_size));
2395}
2396
2397ifnet_t
2398ifmaddr_ifnet(ifmultiaddr_t ifma)
2399{
2400	return ((ifma == NULL) ? NULL : ifma->ifma_ifp);
2401}
2402
2403/******************************************************************************/
2404/* interface cloner                                                           */
2405/******************************************************************************/
2406
2407errno_t
2408ifnet_clone_attach(struct ifnet_clone_params *cloner_params,
2409    if_clone_t *ifcloner)
2410{
2411	errno_t error = 0;
2412	struct if_clone *ifc = NULL;
2413	size_t namelen;
2414
2415	if (cloner_params == NULL || ifcloner == NULL ||
2416	    cloner_params->ifc_name == NULL ||
2417	    cloner_params->ifc_create == NULL ||
2418	    cloner_params->ifc_destroy == NULL ||
2419	    (namelen = strlen(cloner_params->ifc_name)) >= IFNAMSIZ) {
2420		error = EINVAL;
2421		goto fail;
2422	}
2423
2424	if (if_clone_lookup(cloner_params->ifc_name, NULL) != NULL) {
2425		printf("%s: already a cloner for %s\n", __func__,
2426		    cloner_params->ifc_name);
2427		error = EEXIST;
2428		goto fail;
2429	}
2430
2431	/* Make room for name string */
2432	ifc = _MALLOC(sizeof (struct if_clone) + IFNAMSIZ + 1, M_CLONE,
2433	    M_WAITOK | M_ZERO);
2434	if (ifc == NULL) {
2435		printf("%s: _MALLOC failed\n", __func__);
2436		error = ENOBUFS;
2437		goto fail;
2438	}
2439	strlcpy((char *)(ifc + 1), cloner_params->ifc_name, IFNAMSIZ + 1);
2440	ifc->ifc_name = (char *)(ifc + 1);
2441	ifc->ifc_namelen = namelen;
2442	ifc->ifc_maxunit = IF_MAXUNIT;
2443	ifc->ifc_create = cloner_params->ifc_create;
2444	ifc->ifc_destroy = cloner_params->ifc_destroy;
2445
2446	error = if_clone_attach(ifc);
2447	if (error != 0) {
2448		printf("%s: if_clone_attach failed %d\n", __func__, error);
2449		goto fail;
2450	}
2451	*ifcloner = ifc;
2452
2453	return (0);
2454fail:
2455	if (ifc != NULL)
2456		FREE(ifc, M_CLONE);
2457	return (error);
2458}
2459
2460errno_t
2461ifnet_clone_detach(if_clone_t ifcloner)
2462{
2463	errno_t error = 0;
2464	struct if_clone *ifc = ifcloner;
2465
2466	if (ifc == NULL || ifc->ifc_name == NULL)
2467		return (EINVAL);
2468
2469	if ((if_clone_lookup(ifc->ifc_name, NULL)) == NULL) {
2470		printf("%s: no cloner for %s\n", __func__, ifc->ifc_name);
2471		error = EINVAL;
2472		goto fail;
2473	}
2474
2475	if_clone_detach(ifc);
2476
2477	FREE(ifc, M_CLONE);
2478
2479fail:
2480	return (error);
2481}
2482
2483/******************************************************************************/
2484/* misc                                                                       */
2485/******************************************************************************/
2486
2487errno_t
2488ifnet_get_local_ports_extended(ifnet_t ifp, protocol_family_t protocol,
2489    u_int32_t flags, u_int8_t *bitfield)
2490{
2491	u_int32_t ifindex;
2492	u_int32_t inp_flags = 0;
2493
2494	inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_WILDCARDOK) ?
2495		INPCB_GET_PORTS_USED_WILDCARDOK : 0);
2496	inp_flags |= ((flags & IFNET_GET_LOCAL_PORTS_NOWAKEUPOK) ?
2497		INPCB_GET_PORTS_USED_NOWAKEUPOK : 0);
2498
2499	if (bitfield == NULL)
2500		return (EINVAL);
2501
2502	switch (protocol) {
2503	case PF_UNSPEC:
2504	case PF_INET:
2505	case PF_INET6:
2506		break;
2507	default:
2508		return (EINVAL);
2509	}
2510
2511	/* bit string is long enough to hold 16-bit port values */
2512	bzero(bitfield, bitstr_size(65536));
2513
2514	ifindex = (ifp != NULL) ? ifp->if_index : 0;
2515
2516	if (!(flags & IFNET_GET_LOCAL_PORTS_TCPONLY))
2517		udp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
2518
2519	if (!(flags & IFNET_GET_LOCAL_PORTS_UDPONLY))
2520		tcp_get_ports_used(ifindex, protocol, inp_flags, bitfield);
2521
2522	return (0);
2523}
2524
2525errno_t
2526ifnet_get_local_ports(ifnet_t ifp, u_int8_t *bitfield)
2527{
2528	u_int32_t flags = IFNET_GET_LOCAL_PORTS_WILDCARDOK;
2529	return (ifnet_get_local_ports_extended(ifp, PF_UNSPEC, flags,
2530		bitfield));
2531}
2532
2533errno_t
2534ifnet_notice_node_presence(ifnet_t ifp, struct sockaddr* sa, int32_t rssi,
2535    int lqm, int npm, u_int8_t srvinfo[48])
2536{
2537	if (ifp == NULL || sa == NULL || srvinfo == NULL)
2538		return(EINVAL);
2539	if (sa->sa_len > sizeof(struct sockaddr_storage))
2540		return(EINVAL);
2541	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2542		return(EINVAL);
2543
2544	dlil_node_present(ifp, sa, rssi, lqm, npm, srvinfo);
2545	return (0);
2546}
2547
2548errno_t
2549ifnet_notice_node_absence(ifnet_t ifp, struct sockaddr* sa)
2550{
2551	if (ifp == NULL || sa == NULL)
2552		return(EINVAL);
2553	if (sa->sa_len > sizeof(struct sockaddr_storage))
2554		return(EINVAL);
2555	if (sa->sa_family != AF_LINK && sa->sa_family != AF_INET6)
2556		return(EINVAL);
2557
2558	dlil_node_absent(ifp, sa);
2559	return (0);
2560}
2561
2562errno_t
2563ifnet_notice_master_elected(ifnet_t ifp)
2564{
2565	if (ifp == NULL)
2566		return(EINVAL);
2567
2568	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_MASTER_ELECTED, NULL, 0);
2569	return (0);
2570}
2571
2572errno_t
2573ifnet_tx_compl_status(ifnet_t ifp, mbuf_t m, tx_compl_val_t val)
2574{
2575#pragma unused(ifp, m, val)
2576	/* Dummy function to be implemented XXX */
2577	return (0);
2578}
2579
2580errno_t
2581ifnet_report_issues(ifnet_t ifp, u_int8_t modid[IFNET_MODIDLEN],
2582    u_int8_t info[IFNET_MODARGLEN])
2583{
2584	if (ifp == NULL || modid == NULL)
2585		return (EINVAL);
2586
2587	dlil_report_issues(ifp, modid, info);
2588	return (0);
2589}
2590
2591extern errno_t
2592ifnet_set_delegate(ifnet_t ifp, ifnet_t delegated_ifp)
2593{
2594	ifnet_t odifp = NULL;
2595
2596	if (ifp == NULL)
2597		return (EINVAL);
2598	else if (!ifnet_is_attached(ifp, 1))
2599		return (ENXIO);
2600
2601	ifnet_lock_exclusive(ifp);
2602	odifp = ifp->if_delegated.ifp;
2603	if (odifp != NULL && odifp == delegated_ifp) {
2604		/* delegate info is unchanged; nothing more to do */
2605		ifnet_lock_done(ifp);
2606		goto done;
2607	}
2608	bzero(&ifp->if_delegated, sizeof (ifp->if_delegated));
2609	if (delegated_ifp != NULL && ifp != delegated_ifp) {
2610		ifp->if_delegated.ifp = delegated_ifp;
2611		ifnet_reference(delegated_ifp);
2612		ifp->if_delegated.type = delegated_ifp->if_type;
2613		ifp->if_delegated.family = delegated_ifp->if_family;
2614		ifp->if_delegated.subfamily = delegated_ifp->if_subfamily;
2615		ifp->if_delegated.expensive =
2616		    delegated_ifp->if_eflags & IFEF_EXPENSIVE ? 1 : 0;
2617		printf("%s: is now delegating %s (type 0x%x, family %u, "
2618		    "sub-family %u)\n", ifp->if_xname, delegated_ifp->if_xname,
2619		    delegated_ifp->if_type, delegated_ifp->if_family,
2620		    delegated_ifp->if_subfamily);
2621	}
2622	ifnet_lock_done(ifp);
2623
2624	if (odifp != NULL) {
2625		if (odifp != delegated_ifp) {
2626			printf("%s: is no longer delegating %s\n",
2627			    ifp->if_xname, odifp->if_xname);
2628		}
2629		ifnet_release(odifp);
2630	}
2631
2632	/* Generate a kernel event */
2633	dlil_post_msg(ifp, KEV_DL_SUBCLASS, KEV_DL_IFDELEGATE_CHANGED, NULL, 0);
2634
2635done:
2636	/* Release the io ref count */
2637	ifnet_decr_iorefcnt(ifp);
2638
2639	return (0);
2640}
2641
2642extern errno_t
2643ifnet_get_delegate(ifnet_t ifp, ifnet_t *pdelegated_ifp)
2644{
2645	if (ifp == NULL || pdelegated_ifp == NULL)
2646		return (EINVAL);
2647	else if (!ifnet_is_attached(ifp, 1))
2648		return (ENXIO);
2649
2650	ifnet_lock_shared(ifp);
2651	if (ifp->if_delegated.ifp != NULL)
2652		ifnet_reference(ifp->if_delegated.ifp);
2653	*pdelegated_ifp = ifp->if_delegated.ifp;
2654	ifnet_lock_done(ifp);
2655
2656	/* Release the io ref count */
2657	ifnet_decr_iorefcnt(ifp);
2658
2659	return (0);
2660}
2661
2662extern u_int32_t key_fill_offload_frames_for_savs (ifnet_t ifp,
2663	struct ipsec_offload_frame *frames_array, u_int32_t frames_array_count,
2664	size_t frame_data_offset);
2665
2666extern errno_t
2667ifnet_get_ipsec_offload_frames(ifnet_t ifp,
2668							   struct ipsec_offload_frame *frames_array,
2669							   u_int32_t frames_array_count,
2670							   size_t frame_data_offset,
2671							   u_int32_t *used_frames_count)
2672{
2673	if (frames_array == NULL || used_frames_count == NULL) {
2674		return (EINVAL);
2675	}
2676
2677	*used_frames_count = 0;
2678
2679	if (frames_array_count == 0) {
2680		return (0);
2681	}
2682
2683	*used_frames_count = key_fill_offload_frames_for_savs(ifp,
2684		frames_array, frames_array_count, frame_data_offset);
2685	return (0);
2686}
2687