1/*-
2 * Copyright (c) 1982, 1989, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 * 4. Neither the name of the University nor the names of its contributors
14 *    may be used to endorse or promote products derived from this software
15 *    without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 *
29 *	@(#)if_ethersubr.c	8.1 (Berkeley) 6/10/93
30 * $FreeBSD: stable/11/sys/net/if_ethersubr.c 360299 2020-04-25 12:49:48Z kp $
31 */
32
33#include "opt_inet.h"
34#include "opt_inet6.h"
35#include "opt_netgraph.h"
36#include "opt_mbuf_profiling.h"
37#include "opt_rss.h"
38
39#include <sys/param.h>
40#include <sys/systm.h>
41#include <sys/bus.h>
42#include <sys/eventhandler.h>
43#include <sys/jail.h>
44#include <sys/kernel.h>
45#include <sys/lock.h>
46#include <sys/malloc.h>
47#include <sys/module.h>
48#include <sys/mbuf.h>
49#include <sys/proc.h>
50#include <sys/priv.h>
51#include <sys/random.h>
52#include <sys/socket.h>
53#include <sys/sockio.h>
54#include <sys/sysctl.h>
55#include <sys/uuid.h>
56
57#include <net/ieee_oui.h>
58#include <net/if.h>
59#include <net/if_var.h>
60#include <net/if_arp.h>
61#include <net/netisr.h>
62#include <net/route.h>
63#include <net/if_llc.h>
64#include <net/if_dl.h>
65#include <net/if_types.h>
66#include <net/bpf.h>
67#include <net/ethernet.h>
68#include <net/if_bridgevar.h>
69#include <net/if_vlan_var.h>
70#include <net/if_llatbl.h>
71#include <net/pfil.h>
72#include <net/rss_config.h>
73#include <net/vnet.h>
74
75#include <netpfil/pf/pf_mtag.h>
76
77#if defined(INET) || defined(INET6)
78#include <netinet/in.h>
79#include <netinet/in_var.h>
80#include <netinet/if_ether.h>
81#include <netinet/ip_carp.h>
82#include <netinet/ip_var.h>
83#endif
84#ifdef INET6
85#include <netinet6/nd6.h>
86#endif
87#include <security/mac/mac_framework.h>
88
89#include <crypto/sha1.h>
90
91#ifdef CTASSERT
92CTASSERT(sizeof (struct ether_header) == ETHER_ADDR_LEN * 2 + 2);
93CTASSERT(sizeof (struct ether_addr) == ETHER_ADDR_LEN);
94#endif
95
96VNET_DEFINE(struct pfil_head, link_pfil_hook);	/* Packet filter hooks */
97
98/* netgraph node hooks for ng_ether(4) */
99void	(*ng_ether_input_p)(struct ifnet *ifp, struct mbuf **mp);
100void	(*ng_ether_input_orphan_p)(struct ifnet *ifp, struct mbuf *m);
101int	(*ng_ether_output_p)(struct ifnet *ifp, struct mbuf **mp);
102void	(*ng_ether_attach_p)(struct ifnet *ifp);
103void	(*ng_ether_detach_p)(struct ifnet *ifp);
104
105void	(*vlan_input_p)(struct ifnet *, struct mbuf *);
106
107/* if_bridge(4) support */
108struct mbuf *(*bridge_input_p)(struct ifnet *, struct mbuf *);
109int	(*bridge_output_p)(struct ifnet *, struct mbuf *,
110		struct sockaddr *, struct rtentry *);
111void	(*bridge_dn_p)(struct mbuf *, struct ifnet *);
112
113/* if_lagg(4) support */
114struct mbuf *(*lagg_input_p)(struct ifnet *, struct mbuf *);
115
116static const u_char etherbroadcastaddr[ETHER_ADDR_LEN] =
117			{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
118
119static	int ether_resolvemulti(struct ifnet *, struct sockaddr **,
120		struct sockaddr *);
121#ifdef VIMAGE
122static	void ether_reassign(struct ifnet *, struct vnet *, char *);
123#endif
124static	int ether_requestencap(struct ifnet *, struct if_encap_req *);
125
126
127#define senderr(e) do { error = (e); goto bad;} while (0)
128
129static void
130update_mbuf_csumflags(struct mbuf *src, struct mbuf *dst)
131{
132	int csum_flags = 0;
133
134	if (src->m_pkthdr.csum_flags & CSUM_IP)
135		csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID);
136	if (src->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
137		csum_flags |= (CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
138	if (src->m_pkthdr.csum_flags & CSUM_SCTP)
139		csum_flags |= CSUM_SCTP_VALID;
140	dst->m_pkthdr.csum_flags |= csum_flags;
141	if (csum_flags & CSUM_DATA_VALID)
142		dst->m_pkthdr.csum_data = 0xffff;
143}
144
145/*
146 * Handle link-layer encapsulation requests.
147 */
148static int
149ether_requestencap(struct ifnet *ifp, struct if_encap_req *req)
150{
151	struct ether_header *eh;
152	struct arphdr *ah;
153	uint16_t etype;
154	const u_char *lladdr;
155
156	if (req->rtype != IFENCAP_LL)
157		return (EOPNOTSUPP);
158
159	if (req->bufsize < ETHER_HDR_LEN)
160		return (ENOMEM);
161
162	eh = (struct ether_header *)req->buf;
163	lladdr = req->lladdr;
164	req->lladdr_off = 0;
165
166	switch (req->family) {
167	case AF_INET:
168		etype = htons(ETHERTYPE_IP);
169		break;
170	case AF_INET6:
171		etype = htons(ETHERTYPE_IPV6);
172		break;
173	case AF_ARP:
174		ah = (struct arphdr *)req->hdata;
175		ah->ar_hrd = htons(ARPHRD_ETHER);
176
177		switch(ntohs(ah->ar_op)) {
178		case ARPOP_REVREQUEST:
179		case ARPOP_REVREPLY:
180			etype = htons(ETHERTYPE_REVARP);
181			break;
182		case ARPOP_REQUEST:
183		case ARPOP_REPLY:
184		default:
185			etype = htons(ETHERTYPE_ARP);
186			break;
187		}
188
189		if (req->flags & IFENCAP_FLAG_BROADCAST)
190			lladdr = ifp->if_broadcastaddr;
191		break;
192	default:
193		return (EAFNOSUPPORT);
194	}
195
196	memcpy(&eh->ether_type, &etype, sizeof(eh->ether_type));
197	memcpy(eh->ether_dhost, lladdr, ETHER_ADDR_LEN);
198	memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
199	req->bufsize = sizeof(struct ether_header);
200
201	return (0);
202}
203
204
205static int
206ether_resolve_addr(struct ifnet *ifp, struct mbuf *m,
207	const struct sockaddr *dst, struct route *ro, u_char *phdr,
208	uint32_t *pflags, struct llentry **plle)
209{
210	struct ether_header *eh;
211	uint32_t lleflags = 0;
212	int error = 0;
213#if defined(INET) || defined(INET6)
214	uint16_t etype;
215#endif
216
217	if (plle)
218		*plle = NULL;
219	eh = (struct ether_header *)phdr;
220
221	switch (dst->sa_family) {
222#ifdef INET
223	case AF_INET:
224		if ((m->m_flags & (M_BCAST | M_MCAST)) == 0)
225			error = arpresolve(ifp, 0, m, dst, phdr, &lleflags,
226			    plle);
227		else {
228			if (m->m_flags & M_BCAST)
229				memcpy(eh->ether_dhost, ifp->if_broadcastaddr,
230				    ETHER_ADDR_LEN);
231			else {
232				const struct in_addr *a;
233				a = &(((const struct sockaddr_in *)dst)->sin_addr);
234				ETHER_MAP_IP_MULTICAST(a, eh->ether_dhost);
235			}
236			etype = htons(ETHERTYPE_IP);
237			memcpy(&eh->ether_type, &etype, sizeof(etype));
238			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
239		}
240		break;
241#endif
242#ifdef INET6
243	case AF_INET6:
244		if ((m->m_flags & M_MCAST) == 0)
245			error = nd6_resolve(ifp, 0, m, dst, phdr, &lleflags,
246			    plle);
247		else {
248			const struct in6_addr *a6;
249			a6 = &(((const struct sockaddr_in6 *)dst)->sin6_addr);
250			ETHER_MAP_IPV6_MULTICAST(a6, eh->ether_dhost);
251			etype = htons(ETHERTYPE_IPV6);
252			memcpy(&eh->ether_type, &etype, sizeof(etype));
253			memcpy(eh->ether_shost, IF_LLADDR(ifp), ETHER_ADDR_LEN);
254		}
255		break;
256#endif
257	default:
258		if_printf(ifp, "can't handle af%d\n", dst->sa_family);
259		if (m != NULL)
260			m_freem(m);
261		return (EAFNOSUPPORT);
262	}
263
264	if (error == EHOSTDOWN) {
265		if (ro != NULL && (ro->ro_flags & RT_HAS_GW) != 0)
266			error = EHOSTUNREACH;
267	}
268
269	if (error != 0)
270		return (error);
271
272	*pflags = RT_MAY_LOOP;
273	if (lleflags & LLE_IFADDR)
274		*pflags |= RT_L2_ME;
275
276	return (0);
277}
278
279/*
280 * Ethernet output routine.
281 * Encapsulate a packet of type family for the local net.
282 * Use trailer local net encapsulation if enough data in first
283 * packet leaves a multiple of 512 bytes of data in remainder.
284 */
285int
286ether_output(struct ifnet *ifp, struct mbuf *m,
287	const struct sockaddr *dst, struct route *ro)
288{
289	int error = 0;
290	char linkhdr[ETHER_HDR_LEN], *phdr;
291	struct ether_header *eh;
292	struct pf_mtag *t;
293	int loop_copy = 1;
294	int hlen;	/* link layer header length */
295	uint32_t pflags;
296	struct llentry *lle = NULL;
297	struct rtentry *rt0 = NULL;
298	int addref = 0;
299
300	phdr = NULL;
301	pflags = 0;
302	if (ro != NULL) {
303		/* XXX BPF uses ro_prepend */
304		if (ro->ro_prepend != NULL) {
305			phdr = ro->ro_prepend;
306			hlen = ro->ro_plen;
307		} else if (!(m->m_flags & (M_BCAST | M_MCAST))) {
308			if ((ro->ro_flags & RT_LLE_CACHE) != 0) {
309				lle = ro->ro_lle;
310				if (lle != NULL &&
311				    (lle->la_flags & LLE_VALID) == 0) {
312					LLE_FREE(lle);
313					lle = NULL;	/* redundant */
314					ro->ro_lle = NULL;
315				}
316				if (lle == NULL) {
317					/* if we lookup, keep cache */
318					addref = 1;
319				} else
320					/*
321					 * Notify LLE code that
322					 * the entry was used
323					 * by datapath.
324					 */
325					llentry_mark_used(lle);
326			}
327			if (lle != NULL) {
328				phdr = lle->r_linkdata;
329				hlen = lle->r_hdrlen;
330				pflags = lle->r_flags;
331			}
332		}
333		rt0 = ro->ro_rt;
334	}
335
336#ifdef MAC
337	error = mac_ifnet_check_transmit(ifp, m);
338	if (error)
339		senderr(error);
340#endif
341
342	M_PROFILE(m);
343	if (ifp->if_flags & IFF_MONITOR)
344		senderr(ENETDOWN);
345	if (!((ifp->if_flags & IFF_UP) &&
346	    (ifp->if_drv_flags & IFF_DRV_RUNNING)))
347		senderr(ENETDOWN);
348
349	if (phdr == NULL) {
350		/* No prepend data supplied. Try to calculate ourselves. */
351		phdr = linkhdr;
352		hlen = ETHER_HDR_LEN;
353		error = ether_resolve_addr(ifp, m, dst, ro, phdr, &pflags,
354		    addref ? &lle : NULL);
355		if (addref && lle != NULL)
356			ro->ro_lle = lle;
357		if (error != 0)
358			return (error == EWOULDBLOCK ? 0 : error);
359	}
360
361	if ((pflags & RT_L2_ME) != 0) {
362		update_mbuf_csumflags(m, m);
363		return (if_simloop(ifp, m, dst->sa_family, 0));
364	}
365	loop_copy = pflags & RT_MAY_LOOP;
366
367	/*
368	 * Add local net header.  If no space in first mbuf,
369	 * allocate another.
370	 *
371	 * Note that we do prepend regardless of RT_HAS_HEADER flag.
372	 * This is done because BPF code shifts m_data pointer
373	 * to the end of ethernet header prior to calling if_output().
374	 */
375	M_PREPEND(m, hlen, M_NOWAIT);
376	if (m == NULL)
377		senderr(ENOBUFS);
378	if ((pflags & RT_HAS_HEADER) == 0) {
379		eh = mtod(m, struct ether_header *);
380		memcpy(eh, phdr, hlen);
381	}
382
383	/*
384	 * If a simplex interface, and the packet is being sent to our
385	 * Ethernet address or a broadcast address, loopback a copy.
386	 * XXX To make a simplex device behave exactly like a duplex
387	 * device, we should copy in the case of sending to our own
388	 * ethernet address (thus letting the original actually appear
389	 * on the wire). However, we don't do that here for security
390	 * reasons and compatibility with the original behavior.
391	 */
392	if ((m->m_flags & M_BCAST) && loop_copy && (ifp->if_flags & IFF_SIMPLEX) &&
393	    ((t = pf_find_mtag(m)) == NULL || !t->routed)) {
394		struct mbuf *n;
395
396		/*
397		 * Because if_simloop() modifies the packet, we need a
398		 * writable copy through m_dup() instead of a readonly
399		 * one as m_copy[m] would give us. The alternative would
400		 * be to modify if_simloop() to handle the readonly mbuf,
401		 * but performancewise it is mostly equivalent (trading
402		 * extra data copying vs. extra locking).
403		 *
404		 * XXX This is a local workaround.  A number of less
405		 * often used kernel parts suffer from the same bug.
406		 * See PR kern/105943 for a proposed general solution.
407		 */
408		if ((n = m_dup(m, M_NOWAIT)) != NULL) {
409			update_mbuf_csumflags(m, n);
410			(void)if_simloop(ifp, n, dst->sa_family, hlen);
411		} else
412			if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
413	}
414
415       /*
416	* Bridges require special output handling.
417	*/
418	if (ifp->if_bridge) {
419		BRIDGE_OUTPUT(ifp, m, error);
420		return (error);
421	}
422
423#if defined(INET) || defined(INET6)
424	if (ifp->if_carp &&
425	    (error = (*carp_output_p)(ifp, m, dst)))
426		goto bad;
427#endif
428
429	/* Handle ng_ether(4) processing, if any */
430	if (ifp->if_l2com != NULL) {
431		KASSERT(ng_ether_output_p != NULL,
432		    ("ng_ether_output_p is NULL"));
433		if ((error = (*ng_ether_output_p)(ifp, &m)) != 0) {
434bad:			if (m != NULL)
435				m_freem(m);
436			return (error);
437		}
438		if (m == NULL)
439			return (0);
440	}
441
442	/* Continue with link-layer output */
443	return ether_output_frame(ifp, m);
444}
445
446static bool
447ether_set_pcp(struct mbuf **mp, struct ifnet *ifp, uint8_t pcp)
448{
449	struct ether_header *eh;
450
451	eh = mtod(*mp, struct ether_header *);
452	if (ntohs(eh->ether_type) == ETHERTYPE_VLAN ||
453	    ether_8021q_frame(mp, ifp, ifp, 0, pcp))
454		return (true);
455	if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
456	return (false);
457}
458
459/*
460 * Ethernet link layer output routine to send a raw frame to the device.
461 *
462 * This assumes that the 14 byte Ethernet header is present and contiguous
463 * in the first mbuf (if BRIDGE'ing).
464 */
465int
466ether_output_frame(struct ifnet *ifp, struct mbuf *m)
467{
468	int error;
469	uint8_t pcp;
470
471	pcp = ifp->if_pcp;
472	if (pcp != IFNET_PCP_NONE && !ether_set_pcp(&m, ifp, pcp))
473		return (0);
474
475	if (PFIL_HOOKED(&V_link_pfil_hook)) {
476		error = pfil_run_hooks(&V_link_pfil_hook, &m, ifp,
477		    PFIL_OUT, 0, NULL);
478		if (error != 0)
479			return (EACCES);
480
481		if (m == NULL)
482			return (0);
483	}
484
485	/*
486	 * Queue message on interface, update output statistics if
487	 * successful, and start output if interface not yet active.
488	 */
489	return ((ifp->if_transmit)(ifp, m));
490}
491
492/*
493 * Process a received Ethernet packet; the packet is in the
494 * mbuf chain m with the ethernet header at the front.
495 */
496static void
497ether_input_internal(struct ifnet *ifp, struct mbuf *m)
498{
499	struct ether_header *eh;
500	u_short etype;
501
502	if ((ifp->if_flags & IFF_UP) == 0) {
503		m_freem(m);
504		return;
505	}
506#ifdef DIAGNOSTIC
507	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) {
508		if_printf(ifp, "discard frame at !IFF_DRV_RUNNING\n");
509		m_freem(m);
510		return;
511	}
512#endif
513	if (m->m_len < ETHER_HDR_LEN) {
514		/* XXX maybe should pullup? */
515		if_printf(ifp, "discard frame w/o leading ethernet "
516				"header (len %u pkt len %u)\n",
517				m->m_len, m->m_pkthdr.len);
518		if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
519		m_freem(m);
520		return;
521	}
522	eh = mtod(m, struct ether_header *);
523	etype = ntohs(eh->ether_type);
524	random_harvest_queue(m, sizeof(*m), 2, RANDOM_NET_ETHER);
525
526	CURVNET_SET_QUIET(ifp->if_vnet);
527
528	if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
529		if (ETHER_IS_BROADCAST(eh->ether_dhost))
530			m->m_flags |= M_BCAST;
531		else
532			m->m_flags |= M_MCAST;
533		if_inc_counter(ifp, IFCOUNTER_IMCASTS, 1);
534	}
535
536#ifdef MAC
537	/*
538	 * Tag the mbuf with an appropriate MAC label before any other
539	 * consumers can get to it.
540	 */
541	mac_ifnet_create_mbuf(ifp, m);
542#endif
543
544	/*
545	 * Give bpf a chance at the packet.
546	 */
547	ETHER_BPF_MTAP(ifp, m);
548
549	/*
550	 * If the CRC is still on the packet, trim it off. We do this once
551	 * and once only in case we are re-entered. Nothing else on the
552	 * Ethernet receive path expects to see the FCS.
553	 */
554	if (m->m_flags & M_HASFCS) {
555		m_adj(m, -ETHER_CRC_LEN);
556		m->m_flags &= ~M_HASFCS;
557	}
558
559	if (!(ifp->if_capenable & IFCAP_HWSTATS))
560		if_inc_counter(ifp, IFCOUNTER_IBYTES, m->m_pkthdr.len);
561
562	/* Allow monitor mode to claim this frame, after stats are updated. */
563	if (ifp->if_flags & IFF_MONITOR) {
564		m_freem(m);
565		CURVNET_RESTORE();
566		return;
567	}
568
569	/* Handle input from a lagg(4) port */
570	if (ifp->if_type == IFT_IEEE8023ADLAG) {
571		KASSERT(lagg_input_p != NULL,
572		    ("%s: if_lagg not loaded!", __func__));
573		m = (*lagg_input_p)(ifp, m);
574		if (m != NULL)
575			ifp = m->m_pkthdr.rcvif;
576		else {
577			CURVNET_RESTORE();
578			return;
579		}
580	}
581
582	/*
583	 * If the hardware did not process an 802.1Q tag, do this now,
584	 * to allow 802.1P priority frames to be passed to the main input
585	 * path correctly.
586	 * TODO: Deal with Q-in-Q frames, but not arbitrary nesting levels.
587	 */
588	if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_VLAN) {
589		struct ether_vlan_header *evl;
590
591		if (m->m_len < sizeof(*evl) &&
592		    (m = m_pullup(m, sizeof(*evl))) == NULL) {
593#ifdef DIAGNOSTIC
594			if_printf(ifp, "cannot pullup VLAN header\n");
595#endif
596			if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
597			CURVNET_RESTORE();
598			return;
599		}
600
601		evl = mtod(m, struct ether_vlan_header *);
602		m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag);
603		m->m_flags |= M_VLANTAG;
604
605		bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN,
606		    ETHER_HDR_LEN - ETHER_TYPE_LEN);
607		m_adj(m, ETHER_VLAN_ENCAP_LEN);
608		eh = mtod(m, struct ether_header *);
609	}
610
611	M_SETFIB(m, ifp->if_fib);
612
613	/* Allow ng_ether(4) to claim this frame. */
614	if (ifp->if_l2com != NULL) {
615		KASSERT(ng_ether_input_p != NULL,
616		    ("%s: ng_ether_input_p is NULL", __func__));
617		m->m_flags &= ~M_PROMISC;
618		(*ng_ether_input_p)(ifp, &m);
619		if (m == NULL) {
620			CURVNET_RESTORE();
621			return;
622		}
623		eh = mtod(m, struct ether_header *);
624	}
625
626	/*
627	 * Allow if_bridge(4) to claim this frame.
628	 * The BRIDGE_INPUT() macro will update ifp if the bridge changed it
629	 * and the frame should be delivered locally.
630	 */
631	if (ifp->if_bridge != NULL) {
632		m->m_flags &= ~M_PROMISC;
633		BRIDGE_INPUT(ifp, m);
634		if (m == NULL) {
635			CURVNET_RESTORE();
636			return;
637		}
638		eh = mtod(m, struct ether_header *);
639	}
640
641#if defined(INET) || defined(INET6)
642	/*
643	 * Clear M_PROMISC on frame so that carp(4) will see it when the
644	 * mbuf flows up to Layer 3.
645	 * FreeBSD's implementation of carp(4) uses the inprotosw
646	 * to dispatch IPPROTO_CARP. carp(4) also allocates its own
647	 * Ethernet addresses of the form 00:00:5e:00:01:xx, which
648	 * is outside the scope of the M_PROMISC test below.
649	 * TODO: Maintain a hash table of ethernet addresses other than
650	 * ether_dhost which may be active on this ifp.
651	 */
652	if (ifp->if_carp && (*carp_forus_p)(ifp, eh->ether_dhost)) {
653		m->m_flags &= ~M_PROMISC;
654	} else
655#endif
656	{
657		/*
658		 * If the frame received was not for our MAC address, set the
659		 * M_PROMISC flag on the mbuf chain. The frame may need to
660		 * be seen by the rest of the Ethernet input path in case of
661		 * re-entry (e.g. bridge, vlan, netgraph) but should not be
662		 * seen by upper protocol layers.
663		 */
664		if (!ETHER_IS_MULTICAST(eh->ether_dhost) &&
665		    bcmp(IF_LLADDR(ifp), eh->ether_dhost, ETHER_ADDR_LEN) != 0)
666			m->m_flags |= M_PROMISC;
667	}
668
669	ether_demux(ifp, m);
670	CURVNET_RESTORE();
671}
672
673/*
674 * Ethernet input dispatch; by default, direct dispatch here regardless of
675 * global configuration.  However, if RSS is enabled, hook up RSS affinity
676 * so that when deferred or hybrid dispatch is enabled, we can redistribute
677 * load based on RSS.
678 *
679 * XXXRW: Would be nice if the ifnet passed up a flag indicating whether or
680 * not it had already done work distribution via multi-queue.  Then we could
681 * direct dispatch in the event load balancing was already complete and
682 * handle the case of interfaces with different capabilities better.
683 *
684 * XXXRW: Sort of want an M_DISTRIBUTED flag to avoid multiple distributions
685 * at multiple layers?
686 *
687 * XXXRW: For now, enable all this only if RSS is compiled in, although it
688 * works fine without RSS.  Need to characterise the performance overhead
689 * of the detour through the netisr code in the event the result is always
690 * direct dispatch.
691 */
692static void
693ether_nh_input(struct mbuf *m)
694{
695
696	M_ASSERTPKTHDR(m);
697	KASSERT(m->m_pkthdr.rcvif != NULL,
698	    ("%s: NULL interface pointer", __func__));
699	ether_input_internal(m->m_pkthdr.rcvif, m);
700}
701
702static struct netisr_handler	ether_nh = {
703	.nh_name = "ether",
704	.nh_handler = ether_nh_input,
705	.nh_proto = NETISR_ETHER,
706#ifdef RSS
707	.nh_policy = NETISR_POLICY_CPU,
708	.nh_dispatch = NETISR_DISPATCH_DIRECT,
709	.nh_m2cpuid = rss_m2cpuid,
710#else
711	.nh_policy = NETISR_POLICY_SOURCE,
712	.nh_dispatch = NETISR_DISPATCH_DIRECT,
713#endif
714};
715
716static void
717ether_init(__unused void *arg)
718{
719
720	netisr_register(&ether_nh);
721}
722SYSINIT(ether, SI_SUB_INIT_IF, SI_ORDER_ANY, ether_init, NULL);
723
724static void
725vnet_ether_init(__unused void *arg)
726{
727	int i;
728
729	/* Initialize packet filter hooks. */
730	V_link_pfil_hook.ph_type = PFIL_TYPE_AF;
731	V_link_pfil_hook.ph_af = AF_LINK;
732	if ((i = pfil_head_register(&V_link_pfil_hook)) != 0)
733		printf("%s: WARNING: unable to register pfil link hook, "
734			"error %d\n", __func__, i);
735#ifdef VIMAGE
736	netisr_register_vnet(&ether_nh);
737#endif
738}
739VNET_SYSINIT(vnet_ether_init, SI_SUB_PROTO_IF, SI_ORDER_ANY,
740    vnet_ether_init, NULL);
741
742#ifdef VIMAGE
743static void
744vnet_ether_pfil_destroy(__unused void *arg)
745{
746	int i;
747
748	if ((i = pfil_head_unregister(&V_link_pfil_hook)) != 0)
749		printf("%s: WARNING: unable to unregister pfil link hook, "
750			"error %d\n", __func__, i);
751}
752VNET_SYSUNINIT(vnet_ether_pfil_uninit, SI_SUB_PROTO_PFIL, SI_ORDER_ANY,
753    vnet_ether_pfil_destroy, NULL);
754
755static void
756vnet_ether_destroy(__unused void *arg)
757{
758
759	netisr_unregister_vnet(&ether_nh);
760}
761VNET_SYSUNINIT(vnet_ether_uninit, SI_SUB_PROTO_IF, SI_ORDER_ANY,
762    vnet_ether_destroy, NULL);
763#endif
764
765
766
767static void
768ether_input(struct ifnet *ifp, struct mbuf *m)
769{
770
771	struct mbuf *mn;
772
773	/*
774	 * The drivers are allowed to pass in a chain of packets linked with
775	 * m_nextpkt. We split them up into separate packets here and pass
776	 * them up. This allows the drivers to amortize the receive lock.
777	 */
778	while (m) {
779		mn = m->m_nextpkt;
780		m->m_nextpkt = NULL;
781
782		/*
783		 * We will rely on rcvif being set properly in the deferred context,
784		 * so assert it is correct here.
785		 */
786		KASSERT(m->m_pkthdr.rcvif == ifp, ("%s: ifnet mismatch m %p "
787		    "rcvif %p ifp %p", __func__, m, m->m_pkthdr.rcvif, ifp));
788		CURVNET_SET_QUIET(ifp->if_vnet);
789		netisr_dispatch(NETISR_ETHER, m);
790		CURVNET_RESTORE();
791		m = mn;
792	}
793}
794
795/*
796 * Upper layer processing for a received Ethernet packet.
797 */
798void
799ether_demux(struct ifnet *ifp, struct mbuf *m)
800{
801	struct ether_header *eh;
802	int i, isr;
803	u_short ether_type;
804
805	KASSERT(ifp != NULL, ("%s: NULL interface pointer", __func__));
806
807	/* Do not grab PROMISC frames in case we are re-entered. */
808	if (PFIL_HOOKED(&V_link_pfil_hook) && !(m->m_flags & M_PROMISC)) {
809		i = pfil_run_hooks(&V_link_pfil_hook, &m, ifp, PFIL_IN, 0,
810		    NULL);
811
812		if (i != 0 || m == NULL)
813			return;
814	}
815
816	eh = mtod(m, struct ether_header *);
817	ether_type = ntohs(eh->ether_type);
818
819	/*
820	 * If this frame has a VLAN tag other than 0, call vlan_input()
821	 * if its module is loaded. Otherwise, drop.
822	 */
823	if ((m->m_flags & M_VLANTAG) &&
824	    EVL_VLANOFTAG(m->m_pkthdr.ether_vtag) != 0) {
825		if (ifp->if_vlantrunk == NULL) {
826			if_inc_counter(ifp, IFCOUNTER_NOPROTO, 1);
827			m_freem(m);
828			return;
829		}
830		KASSERT(vlan_input_p != NULL,("%s: VLAN not loaded!",
831		    __func__));
832		/* Clear before possibly re-entering ether_input(). */
833		m->m_flags &= ~M_PROMISC;
834		(*vlan_input_p)(ifp, m);
835		return;
836	}
837
838	/*
839	 * Pass promiscuously received frames to the upper layer if the user
840	 * requested this by setting IFF_PPROMISC. Otherwise, drop them.
841	 */
842	if ((ifp->if_flags & IFF_PPROMISC) == 0 && (m->m_flags & M_PROMISC)) {
843		m_freem(m);
844		return;
845	}
846
847	/*
848	 * Reset layer specific mbuf flags to avoid confusing upper layers.
849	 * Strip off Ethernet header.
850	 */
851	m->m_flags &= ~M_VLANTAG;
852	m_clrprotoflags(m);
853	m_adj(m, ETHER_HDR_LEN);
854
855	/*
856	 * Dispatch frame to upper layer.
857	 */
858	switch (ether_type) {
859#ifdef INET
860	case ETHERTYPE_IP:
861		isr = NETISR_IP;
862		break;
863
864	case ETHERTYPE_ARP:
865		if (ifp->if_flags & IFF_NOARP) {
866			/* Discard packet if ARP is disabled on interface */
867			m_freem(m);
868			return;
869		}
870		isr = NETISR_ARP;
871		break;
872#endif
873#ifdef INET6
874	case ETHERTYPE_IPV6:
875		isr = NETISR_IPV6;
876		break;
877#endif
878	default:
879		goto discard;
880	}
881	netisr_dispatch(isr, m);
882	return;
883
884discard:
885	/*
886	 * Packet is to be discarded.  If netgraph is present,
887	 * hand the packet to it for last chance processing;
888	 * otherwise dispose of it.
889	 */
890	if (ifp->if_l2com != NULL) {
891		KASSERT(ng_ether_input_orphan_p != NULL,
892		    ("ng_ether_input_orphan_p is NULL"));
893		/*
894		 * Put back the ethernet header so netgraph has a
895		 * consistent view of inbound packets.
896		 */
897		M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
898		(*ng_ether_input_orphan_p)(ifp, m);
899		return;
900	}
901	m_freem(m);
902}
903
904/*
905 * Convert Ethernet address to printable (loggable) representation.
906 * This routine is for compatibility; it's better to just use
907 *
908 *	printf("%6D", <pointer to address>, ":");
909 *
910 * since there's no static buffer involved.
911 */
912char *
913ether_sprintf(const u_char *ap)
914{
915	static char etherbuf[18];
916	snprintf(etherbuf, sizeof (etherbuf), "%6D", ap, ":");
917	return (etherbuf);
918}
919
920/*
921 * Perform common duties while attaching to interface list
922 */
923void
924ether_ifattach(struct ifnet *ifp, const u_int8_t *lla)
925{
926	int i;
927	struct ifaddr *ifa;
928	struct sockaddr_dl *sdl;
929
930	ifp->if_addrlen = ETHER_ADDR_LEN;
931	ifp->if_hdrlen = ETHER_HDR_LEN;
932	if_attach(ifp);
933	ifp->if_mtu = ETHERMTU;
934	ifp->if_output = ether_output;
935	ifp->if_input = ether_input;
936	ifp->if_resolvemulti = ether_resolvemulti;
937	ifp->if_requestencap = ether_requestencap;
938#ifdef VIMAGE
939	ifp->if_reassign = ether_reassign;
940#endif
941	if (ifp->if_baudrate == 0)
942		ifp->if_baudrate = IF_Mbps(10);		/* just a default */
943	ifp->if_broadcastaddr = etherbroadcastaddr;
944
945	ifa = ifp->if_addr;
946	KASSERT(ifa != NULL, ("%s: no lladdr!\n", __func__));
947	sdl = (struct sockaddr_dl *)ifa->ifa_addr;
948	sdl->sdl_type = IFT_ETHER;
949	sdl->sdl_alen = ifp->if_addrlen;
950	bcopy(lla, LLADDR(sdl), ifp->if_addrlen);
951
952	if (ifp->if_hw_addr != NULL)
953		bcopy(lla, ifp->if_hw_addr, ifp->if_addrlen);
954
955	bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
956	if (ng_ether_attach_p != NULL)
957		(*ng_ether_attach_p)(ifp);
958
959	/* Announce Ethernet MAC address if non-zero. */
960	for (i = 0; i < ifp->if_addrlen; i++)
961		if (lla[i] != 0)
962			break;
963	if (i != ifp->if_addrlen)
964		if_printf(ifp, "Ethernet address: %6D\n", lla, ":");
965
966	uuid_ether_add(LLADDR(sdl));
967
968	/* Add necessary bits are setup; announce it now. */
969	EVENTHANDLER_INVOKE(ether_ifattach_event, ifp);
970	if (IS_DEFAULT_VNET(curvnet))
971		devctl_notify("ETHERNET", ifp->if_xname, "IFATTACH", NULL);
972}
973
974/*
975 * Perform common duties while detaching an Ethernet interface
976 */
977void
978ether_ifdetach(struct ifnet *ifp)
979{
980	struct sockaddr_dl *sdl;
981
982	sdl = (struct sockaddr_dl *)(ifp->if_addr->ifa_addr);
983	uuid_ether_del(LLADDR(sdl));
984
985	if (ifp->if_l2com != NULL) {
986		KASSERT(ng_ether_detach_p != NULL,
987		    ("ng_ether_detach_p is NULL"));
988		(*ng_ether_detach_p)(ifp);
989	}
990
991	bpfdetach(ifp);
992	if_detach(ifp);
993}
994
995#ifdef VIMAGE
996void
997ether_reassign(struct ifnet *ifp, struct vnet *new_vnet, char *unused __unused)
998{
999
1000	if (ifp->if_l2com != NULL) {
1001		KASSERT(ng_ether_detach_p != NULL,
1002		    ("ng_ether_detach_p is NULL"));
1003		(*ng_ether_detach_p)(ifp);
1004	}
1005
1006	if (ng_ether_attach_p != NULL) {
1007		CURVNET_SET_QUIET(new_vnet);
1008		(*ng_ether_attach_p)(ifp);
1009		CURVNET_RESTORE();
1010	}
1011}
1012#endif
1013
1014SYSCTL_DECL(_net_link);
1015SYSCTL_NODE(_net_link, IFT_ETHER, ether, CTLFLAG_RW, 0, "Ethernet");
1016
1017#if 0
1018/*
1019 * This is for reference.  We have a table-driven version
1020 * of the little-endian crc32 generator, which is faster
1021 * than the double-loop.
1022 */
1023uint32_t
1024ether_crc32_le(const uint8_t *buf, size_t len)
1025{
1026	size_t i;
1027	uint32_t crc;
1028	int bit;
1029	uint8_t data;
1030
1031	crc = 0xffffffff;	/* initial value */
1032
1033	for (i = 0; i < len; i++) {
1034		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1035			carry = (crc ^ data) & 1;
1036			crc >>= 1;
1037			if (carry)
1038				crc = (crc ^ ETHER_CRC_POLY_LE);
1039		}
1040	}
1041
1042	return (crc);
1043}
1044#else
1045uint32_t
1046ether_crc32_le(const uint8_t *buf, size_t len)
1047{
1048	static const uint32_t crctab[] = {
1049		0x00000000, 0x1db71064, 0x3b6e20c8, 0x26d930ac,
1050		0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
1051		0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c,
1052		0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c
1053	};
1054	size_t i;
1055	uint32_t crc;
1056
1057	crc = 0xffffffff;	/* initial value */
1058
1059	for (i = 0; i < len; i++) {
1060		crc ^= buf[i];
1061		crc = (crc >> 4) ^ crctab[crc & 0xf];
1062		crc = (crc >> 4) ^ crctab[crc & 0xf];
1063	}
1064
1065	return (crc);
1066}
1067#endif
1068
1069uint32_t
1070ether_crc32_be(const uint8_t *buf, size_t len)
1071{
1072	size_t i;
1073	uint32_t crc, carry;
1074	int bit;
1075	uint8_t data;
1076
1077	crc = 0xffffffff;	/* initial value */
1078
1079	for (i = 0; i < len; i++) {
1080		for (data = *buf++, bit = 0; bit < 8; bit++, data >>= 1) {
1081			carry = ((crc & 0x80000000) ? 1 : 0) ^ (data & 0x01);
1082			crc <<= 1;
1083			if (carry)
1084				crc = (crc ^ ETHER_CRC_POLY_BE) | carry;
1085		}
1086	}
1087
1088	return (crc);
1089}
1090
1091int
1092ether_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
1093{
1094	struct ifaddr *ifa = (struct ifaddr *) data;
1095	struct ifreq *ifr = (struct ifreq *) data;
1096	int error = 0;
1097
1098	switch (command) {
1099	case SIOCSIFADDR:
1100		ifp->if_flags |= IFF_UP;
1101
1102		switch (ifa->ifa_addr->sa_family) {
1103#ifdef INET
1104		case AF_INET:
1105			ifp->if_init(ifp->if_softc);	/* before arpwhohas */
1106			arp_ifinit(ifp, ifa);
1107			break;
1108#endif
1109		default:
1110			ifp->if_init(ifp->if_softc);
1111			break;
1112		}
1113		break;
1114
1115	case SIOCGIFADDR:
1116		bcopy(IF_LLADDR(ifp), &ifr->ifr_addr.sa_data[0],
1117		    ETHER_ADDR_LEN);
1118		break;
1119
1120	case SIOCSIFMTU:
1121		/*
1122		 * Set the interface MTU.
1123		 */
1124		if (ifr->ifr_mtu > ETHERMTU) {
1125			error = EINVAL;
1126		} else {
1127			ifp->if_mtu = ifr->ifr_mtu;
1128		}
1129		break;
1130
1131	case SIOCSLANPCP:
1132		error = priv_check(curthread, PRIV_NET_SETLANPCP);
1133		if (error != 0)
1134			break;
1135		if (ifr->ifr_lan_pcp > 7 &&
1136		    ifr->ifr_lan_pcp != IFNET_PCP_NONE) {
1137			error = EINVAL;
1138		} else {
1139			ifp->if_pcp = ifr->ifr_lan_pcp;
1140			/* broadcast event about PCP change */
1141			EVENTHANDLER_INVOKE(ifnet_event, ifp, IFNET_EVENT_PCP);
1142		}
1143		break;
1144
1145	case SIOCGLANPCP:
1146		ifr->ifr_lan_pcp = ifp->if_pcp;
1147		break;
1148
1149	default:
1150		error = EINVAL;			/* XXX netbsd has ENOTTY??? */
1151		break;
1152	}
1153	return (error);
1154}
1155
1156static int
1157ether_resolvemulti(struct ifnet *ifp, struct sockaddr **llsa,
1158	struct sockaddr *sa)
1159{
1160	struct sockaddr_dl *sdl;
1161#ifdef INET
1162	struct sockaddr_in *sin;
1163#endif
1164#ifdef INET6
1165	struct sockaddr_in6 *sin6;
1166#endif
1167	u_char *e_addr;
1168
1169	switch(sa->sa_family) {
1170	case AF_LINK:
1171		/*
1172		 * No mapping needed. Just check that it's a valid MC address.
1173		 */
1174		sdl = (struct sockaddr_dl *)sa;
1175		e_addr = LLADDR(sdl);
1176		if (!ETHER_IS_MULTICAST(e_addr))
1177			return EADDRNOTAVAIL;
1178		*llsa = NULL;
1179		return 0;
1180
1181#ifdef INET
1182	case AF_INET:
1183		sin = (struct sockaddr_in *)sa;
1184		if (!IN_MULTICAST(ntohl(sin->sin_addr.s_addr)))
1185			return EADDRNOTAVAIL;
1186		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1187		sdl->sdl_alen = ETHER_ADDR_LEN;
1188		e_addr = LLADDR(sdl);
1189		ETHER_MAP_IP_MULTICAST(&sin->sin_addr, e_addr);
1190		*llsa = (struct sockaddr *)sdl;
1191		return 0;
1192#endif
1193#ifdef INET6
1194	case AF_INET6:
1195		sin6 = (struct sockaddr_in6 *)sa;
1196		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
1197			/*
1198			 * An IP6 address of 0 means listen to all
1199			 * of the Ethernet multicast address used for IP6.
1200			 * (This is used for multicast routers.)
1201			 */
1202			ifp->if_flags |= IFF_ALLMULTI;
1203			*llsa = NULL;
1204			return 0;
1205		}
1206		if (!IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr))
1207			return EADDRNOTAVAIL;
1208		sdl = link_init_sdl(ifp, *llsa, IFT_ETHER);
1209		sdl->sdl_alen = ETHER_ADDR_LEN;
1210		e_addr = LLADDR(sdl);
1211		ETHER_MAP_IPV6_MULTICAST(&sin6->sin6_addr, e_addr);
1212		*llsa = (struct sockaddr *)sdl;
1213		return 0;
1214#endif
1215
1216	default:
1217		/*
1218		 * Well, the text isn't quite right, but it's the name
1219		 * that counts...
1220		 */
1221		return EAFNOSUPPORT;
1222	}
1223}
1224
1225static moduledata_t ether_mod = {
1226	.name = "ether",
1227};
1228
1229void
1230ether_vlan_mtap(struct bpf_if *bp, struct mbuf *m, void *data, u_int dlen)
1231{
1232	struct ether_vlan_header vlan;
1233	struct mbuf mv, mb;
1234
1235	KASSERT((m->m_flags & M_VLANTAG) != 0,
1236	    ("%s: vlan information not present", __func__));
1237	KASSERT(m->m_len >= sizeof(struct ether_header),
1238	    ("%s: mbuf not large enough for header", __func__));
1239	bcopy(mtod(m, char *), &vlan, sizeof(struct ether_header));
1240	vlan.evl_proto = vlan.evl_encap_proto;
1241	vlan.evl_encap_proto = htons(ETHERTYPE_VLAN);
1242	vlan.evl_tag = htons(m->m_pkthdr.ether_vtag);
1243	m->m_len -= sizeof(struct ether_header);
1244	m->m_data += sizeof(struct ether_header);
1245	/*
1246	 * If a data link has been supplied by the caller, then we will need to
1247	 * re-create a stack allocated mbuf chain with the following structure:
1248	 *
1249	 * (1) mbuf #1 will contain the supplied data link
1250	 * (2) mbuf #2 will contain the vlan header
1251	 * (3) mbuf #3 will contain the original mbuf's packet data
1252	 *
1253	 * Otherwise, submit the packet and vlan header via bpf_mtap2().
1254	 */
1255	if (data != NULL) {
1256		mv.m_next = m;
1257		mv.m_data = (caddr_t)&vlan;
1258		mv.m_len = sizeof(vlan);
1259		mb.m_next = &mv;
1260		mb.m_data = data;
1261		mb.m_len = dlen;
1262		bpf_mtap(bp, &mb);
1263	} else
1264		bpf_mtap2(bp, &vlan, sizeof(vlan), m);
1265	m->m_len += sizeof(struct ether_header);
1266	m->m_data -= sizeof(struct ether_header);
1267}
1268
1269struct mbuf *
1270ether_vlanencap(struct mbuf *m, uint16_t tag)
1271{
1272	struct ether_vlan_header *evl;
1273
1274	M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT);
1275	if (m == NULL)
1276		return (NULL);
1277	/* M_PREPEND takes care of m_len, m_pkthdr.len for us */
1278
1279	if (m->m_len < sizeof(*evl)) {
1280		m = m_pullup(m, sizeof(*evl));
1281		if (m == NULL)
1282			return (NULL);
1283	}
1284
1285	/*
1286	 * Transform the Ethernet header into an Ethernet header
1287	 * with 802.1Q encapsulation.
1288	 */
1289	evl = mtod(m, struct ether_vlan_header *);
1290	bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN,
1291	    (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN);
1292	evl->evl_encap_proto = htons(ETHERTYPE_VLAN);
1293	evl->evl_tag = htons(tag);
1294	return (m);
1295}
1296
1297static SYSCTL_NODE(_net_link, IFT_L2VLAN, vlan, CTLFLAG_RW, 0,
1298    "IEEE 802.1Q VLAN");
1299static SYSCTL_NODE(_net_link_vlan, PF_LINK, link, CTLFLAG_RW, 0,
1300    "for consistency");
1301
1302static VNET_DEFINE(int, soft_pad);
1303#define	V_soft_pad	VNET(soft_pad)
1304SYSCTL_INT(_net_link_vlan, OID_AUTO, soft_pad, CTLFLAG_RW | CTLFLAG_VNET,
1305    &VNET_NAME(soft_pad), 0,
1306    "pad short frames before tagging");
1307
1308/*
1309 * For now, make preserving PCP via an mbuf tag optional, as it increases
1310 * per-packet memory allocations and frees.  In the future, it would be
1311 * preferable to reuse ether_vtag for this, or similar.
1312 */
1313int vlan_mtag_pcp = 0;
1314SYSCTL_INT(_net_link_vlan, OID_AUTO, mtag_pcp, CTLFLAG_RW,
1315    &vlan_mtag_pcp, 0,
1316    "Retain VLAN PCP information as packets are passed up the stack");
1317
1318bool
1319ether_8021q_frame(struct mbuf **mp, struct ifnet *ife, struct ifnet *p,
1320    uint16_t vid, uint8_t pcp)
1321{
1322	struct m_tag *mtag;
1323	int n;
1324	uint16_t tag;
1325	static const char pad[8];	/* just zeros */
1326
1327	/*
1328	 * Pad the frame to the minimum size allowed if told to.
1329	 * This option is in accord with IEEE Std 802.1Q, 2003 Ed.,
1330	 * paragraph C.4.4.3.b.  It can help to work around buggy
1331	 * bridges that violate paragraph C.4.4.3.a from the same
1332	 * document, i.e., fail to pad short frames after untagging.
1333	 * E.g., a tagged frame 66 bytes long (incl. FCS) is OK, but
1334	 * untagging it will produce a 62-byte frame, which is a runt
1335	 * and requires padding.  There are VLAN-enabled network
1336	 * devices that just discard such runts instead or mishandle
1337	 * them somehow.
1338	 */
1339	if (V_soft_pad && p->if_type == IFT_ETHER) {
1340		for (n = ETHERMIN + ETHER_HDR_LEN - (*mp)->m_pkthdr.len;
1341		     n > 0; n -= sizeof(pad)) {
1342			if (!m_append(*mp, min(n, sizeof(pad)), pad))
1343				break;
1344		}
1345		if (n > 0) {
1346			m_freem(*mp);
1347			*mp = NULL;
1348			if_printf(ife, "cannot pad short frame");
1349			return (false);
1350		}
1351	}
1352
1353	/*
1354	 * If underlying interface can do VLAN tag insertion itself,
1355	 * just pass the packet along. However, we need some way to
1356	 * tell the interface where the packet came from so that it
1357	 * knows how to find the VLAN tag to use, so we attach a
1358	 * packet tag that holds it.
1359	 */
1360	if (vlan_mtag_pcp && (mtag = m_tag_locate(*mp, MTAG_8021Q,
1361	    MTAG_8021Q_PCP_OUT, NULL)) != NULL)
1362		tag = EVL_MAKETAG(vid, *(uint8_t *)(mtag + 1), 0);
1363	else
1364		tag = EVL_MAKETAG(vid, pcp, 0);
1365	if (p->if_capenable & IFCAP_VLAN_HWTAGGING) {
1366		(*mp)->m_pkthdr.ether_vtag = tag;
1367		(*mp)->m_flags |= M_VLANTAG;
1368	} else {
1369		*mp = ether_vlanencap(*mp, tag);
1370		if (*mp == NULL) {
1371			if_printf(ife, "unable to prepend 802.1Q header");
1372			return (false);
1373		}
1374	}
1375	return (true);
1376}
1377
1378/*
1379 * Allocate an address from the FreeBSD Foundation OUI.  This uses a
1380 * cryptographic hash function on the containing jail's name, UUID and the
1381 * interface name to attempt to provide a unique but stable address.
1382 * Pseudo-interfaces which require a MAC address should use this function to
1383 * allocate non-locally-administered addresses.
1384 */
1385void
1386ether_gen_addr(struct ifnet *ifp, struct ether_addr *hwaddr)
1387{
1388	SHA1_CTX ctx;
1389	char *buf;
1390	char uuid[HOSTUUIDLEN + 1];
1391	uint64_t addr;
1392	int i, sz;
1393	char digest[SHA1_RESULTLEN];
1394	char jailname[MAXHOSTNAMELEN];
1395
1396	getcredhostuuid(curthread->td_ucred, uuid, sizeof(uuid));
1397	/* If each (vnet) jail would also have a unique hostuuid this would not
1398	 * be necessary. */
1399	getjailname(curthread->td_ucred, jailname, sizeof(jailname));
1400	sz = asprintf(&buf, M_TEMP, "%s-%s-%s", uuid, if_name(ifp),
1401	    jailname);
1402	if (sz < 0) {
1403		/* Fall back to a random mac address. */
1404		arc4rand(hwaddr, sizeof(*hwaddr), 0);
1405		hwaddr->octet[0] = 0x02;
1406		return;
1407	}
1408
1409	SHA1Init(&ctx);
1410	SHA1Update(&ctx, buf, sz);
1411	SHA1Final(digest, &ctx);
1412	free(buf, M_TEMP);
1413
1414	addr = ((digest[0] << 16) | (digest[1] << 8) | digest[2]) &
1415	    OUI_FREEBSD_GENERATED_MASK;
1416	addr = OUI_FREEBSD(addr);
1417	for (i = 0; i < ETHER_ADDR_LEN; ++i) {
1418		hwaddr->octet[i] = addr >> ((ETHER_ADDR_LEN - i - 1) * 8) &
1419		    0xFF;
1420	}
1421}
1422
1423DECLARE_MODULE(ether, ether_mod, SI_SUB_INIT_IF, SI_ORDER_ANY);
1424MODULE_VERSION(ether, 1);
1425