Deleted Added
full compact
if_lagg.c (251859) if_lagg.c (252511)
1/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3/*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/cdefs.h>
1/* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3/*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include <sys/cdefs.h>
21__FBSDID("$FreeBSD: head/sys/net/if_lagg.c 251859 2013-06-17 19:31:03Z delphij $");
21__FBSDID("$FreeBSD: head/sys/net/if_lagg.c 252511 2013-07-02 16:58:15Z hrs $");
22
23#include "opt_inet.h"
24#include "opt_inet6.h"
25
26#include <sys/param.h>
27#include <sys/kernel.h>
28#include <sys/malloc.h>
29#include <sys/mbuf.h>
30#include <sys/queue.h>
31#include <sys/socket.h>
32#include <sys/sockio.h>
33#include <sys/sysctl.h>
34#include <sys/module.h>
35#include <sys/priv.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38#include <sys/hash.h>
39#include <sys/lock.h>
40#include <sys/rwlock.h>
41#include <sys/taskqueue.h>
42#include <sys/eventhandler.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_clone.h>
47#include <net/if_arp.h>
48#include <net/if_dl.h>
49#include <net/if_llc.h>
50#include <net/if_media.h>
51#include <net/if_types.h>
52#include <net/if_var.h>
53#include <net/bpf.h>
54
55#if defined(INET) || defined(INET6)
56#include <netinet/in.h>
57#endif
58#ifdef INET
59#include <netinet/in_systm.h>
60#include <netinet/if_ether.h>
61#include <netinet/ip.h>
62#endif
63
64#ifdef INET6
65#include <netinet/ip6.h>
22
23#include "opt_inet.h"
24#include "opt_inet6.h"
25
26#include <sys/param.h>
27#include <sys/kernel.h>
28#include <sys/malloc.h>
29#include <sys/mbuf.h>
30#include <sys/queue.h>
31#include <sys/socket.h>
32#include <sys/sockio.h>
33#include <sys/sysctl.h>
34#include <sys/module.h>
35#include <sys/priv.h>
36#include <sys/systm.h>
37#include <sys/proc.h>
38#include <sys/hash.h>
39#include <sys/lock.h>
40#include <sys/rwlock.h>
41#include <sys/taskqueue.h>
42#include <sys/eventhandler.h>
43
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_clone.h>
47#include <net/if_arp.h>
48#include <net/if_dl.h>
49#include <net/if_llc.h>
50#include <net/if_media.h>
51#include <net/if_types.h>
52#include <net/if_var.h>
53#include <net/bpf.h>
54
55#if defined(INET) || defined(INET6)
56#include <netinet/in.h>
57#endif
58#ifdef INET
59#include <netinet/in_systm.h>
60#include <netinet/if_ether.h>
61#include <netinet/ip.h>
62#endif
63
64#ifdef INET6
65#include <netinet/ip6.h>
66#include <netinet6/in6_var.h>
67#include <netinet6/in6_ifattach.h>
66#endif
67
68#include <net/if_vlan_var.h>
69#include <net/if_lagg.h>
70#include <net/ieee8023ad_lacp.h>
71
72/* Special flags we should propagate to the lagg ports. */
73static struct {
74 int flag;
75 int (*func)(struct ifnet *, int);
76} lagg_pflags[] = {
77 {IFF_PROMISC, ifpromisc},
78 {IFF_ALLMULTI, if_allmulti},
79 {0, NULL}
80};
81
82SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
83static struct mtx lagg_list_mtx;
84eventhandler_tag lagg_detach_cookie = NULL;
85
86static int lagg_clone_create(struct if_clone *, int, caddr_t);
87static void lagg_clone_destroy(struct ifnet *);
88static struct if_clone *lagg_cloner;
89static const char laggname[] = "lagg";
90
91static void lagg_lladdr(struct lagg_softc *, uint8_t *);
92static void lagg_capabilities(struct lagg_softc *);
93static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
94static void lagg_port_setlladdr(void *, int);
95static int lagg_port_create(struct lagg_softc *, struct ifnet *);
96static int lagg_port_destroy(struct lagg_port *, int);
97static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
98static void lagg_linkstate(struct lagg_softc *);
99static void lagg_port_state(struct ifnet *, int);
100static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
101static int lagg_port_output(struct ifnet *, struct mbuf *,
102 const struct sockaddr *, struct route *);
103static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
104#ifdef LAGG_PORT_STACKING
105static int lagg_port_checkstacking(struct lagg_softc *);
106#endif
107static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
108static void lagg_init(void *);
109static void lagg_stop(struct lagg_softc *);
110static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
111static int lagg_ether_setmulti(struct lagg_softc *);
112static int lagg_ether_cmdmulti(struct lagg_port *, int);
113static int lagg_setflag(struct lagg_port *, int, int,
114 int (*func)(struct ifnet *, int));
115static int lagg_setflags(struct lagg_port *, int status);
116static int lagg_transmit(struct ifnet *, struct mbuf *);
117static void lagg_qflush(struct ifnet *);
118static int lagg_media_change(struct ifnet *);
119static void lagg_media_status(struct ifnet *, struct ifmediareq *);
120static struct lagg_port *lagg_link_active(struct lagg_softc *,
121 struct lagg_port *);
122static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
123
124/* Simple round robin */
125static int lagg_rr_attach(struct lagg_softc *);
126static int lagg_rr_detach(struct lagg_softc *);
127static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
128static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
129 struct mbuf *);
130
131/* Active failover */
132static int lagg_fail_attach(struct lagg_softc *);
133static int lagg_fail_detach(struct lagg_softc *);
134static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
135static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
136 struct mbuf *);
137
138/* Loadbalancing */
139static int lagg_lb_attach(struct lagg_softc *);
140static int lagg_lb_detach(struct lagg_softc *);
141static int lagg_lb_port_create(struct lagg_port *);
142static void lagg_lb_port_destroy(struct lagg_port *);
143static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
144static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
145 struct mbuf *);
146static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
147
148/* 802.3ad LACP */
149static int lagg_lacp_attach(struct lagg_softc *);
150static int lagg_lacp_detach(struct lagg_softc *);
151static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
152static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
153 struct mbuf *);
154static void lagg_lacp_lladdr(struct lagg_softc *);
155
156static void lagg_callout(void *);
157
158/* lagg protocol table */
159static const struct {
160 int ti_proto;
161 int (*ti_attach)(struct lagg_softc *);
162} lagg_protos[] = {
163 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
164 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
165 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
166 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
167 { LAGG_PROTO_LACP, lagg_lacp_attach },
168 { LAGG_PROTO_NONE, NULL }
169};
170
171SYSCTL_DECL(_net_link);
172static SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
173 "Link Aggregation");
174
175static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
176SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
177 &lagg_failover_rx_all, 0,
178 "Accept input from any interface in a failover lagg");
179static int def_use_flowid = 1; /* Default value for using M_FLOWID */
180TUNABLE_INT("net.link.lagg.default_use_flowid", &def_use_flowid);
181SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RW,
182 &def_use_flowid, 0,
183 "Default setting for using flow id for load sharing");
184
185static int
186lagg_modevent(module_t mod, int type, void *data)
187{
188
189 switch (type) {
190 case MOD_LOAD:
191 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
192 SLIST_INIT(&lagg_list);
193 lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
194 lagg_clone_destroy, 0);
195 lagg_input_p = lagg_input;
196 lagg_linkstate_p = lagg_port_state;
197 lagg_detach_cookie = EVENTHANDLER_REGISTER(
198 ifnet_departure_event, lagg_port_ifdetach, NULL,
199 EVENTHANDLER_PRI_ANY);
200 break;
201 case MOD_UNLOAD:
202 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
203 lagg_detach_cookie);
204 if_clone_detach(lagg_cloner);
205 lagg_input_p = NULL;
206 lagg_linkstate_p = NULL;
207 mtx_destroy(&lagg_list_mtx);
208 break;
209 default:
210 return (EOPNOTSUPP);
211 }
212 return (0);
213}
214
215static moduledata_t lagg_mod = {
216 "if_lagg",
217 lagg_modevent,
218 0
219};
220
221DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
222MODULE_VERSION(if_lagg, 1);
223
224/*
225 * This routine is run via an vlan
226 * config EVENT
227 */
228static void
229lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
230{
231 struct lagg_softc *sc = ifp->if_softc;
232 struct lagg_port *lp;
233
234 if (ifp->if_softc != arg) /* Not our event */
235 return;
236
237 LAGG_RLOCK(sc);
238 if (!SLIST_EMPTY(&sc->sc_ports)) {
239 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
240 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
241 }
242 LAGG_RUNLOCK(sc);
243}
244
245/*
246 * This routine is run via an vlan
247 * unconfig EVENT
248 */
249static void
250lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
251{
252 struct lagg_softc *sc = ifp->if_softc;
253 struct lagg_port *lp;
254
255 if (ifp->if_softc != arg) /* Not our event */
256 return;
257
258 LAGG_RLOCK(sc);
259 if (!SLIST_EMPTY(&sc->sc_ports)) {
260 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
261 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
262 }
263 LAGG_RUNLOCK(sc);
264}
265
266static int
267lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
268{
269 struct lagg_softc *sc;
270 struct ifnet *ifp;
271 int i, error = 0;
272 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
273 struct sysctl_oid *oid;
274 char num[14]; /* sufficient for 32 bits */
275
276 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
277 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
278 if (ifp == NULL) {
279 free(sc, M_DEVBUF);
280 return (ENOSPC);
281 }
282
283 sc->sc_ipackets = counter_u64_alloc(M_WAITOK);
284 sc->sc_opackets = counter_u64_alloc(M_WAITOK);
285 sc->sc_ibytes = counter_u64_alloc(M_WAITOK);
286 sc->sc_obytes = counter_u64_alloc(M_WAITOK);
287
288 sysctl_ctx_init(&sc->ctx);
289 snprintf(num, sizeof(num), "%u", unit);
290 sc->use_flowid = def_use_flowid;
291 oid = SYSCTL_ADD_NODE(&sc->ctx, &SYSCTL_NODE_CHILDREN(_net_link, lagg),
292 OID_AUTO, num, CTLFLAG_RD, NULL, "");
293 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
294 "use_flowid", CTLTYPE_INT|CTLFLAG_RW, &sc->use_flowid, sc->use_flowid,
295 "Use flow id for load sharing");
296 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
297 "count", CTLTYPE_INT|CTLFLAG_RD, &sc->sc_count, sc->sc_count,
298 "Total number of ports");
299 /* Hash all layers by default */
300 sc->sc_flags = LAGG_F_HASHL2|LAGG_F_HASHL3|LAGG_F_HASHL4;
301
302 sc->sc_proto = LAGG_PROTO_NONE;
303 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
304 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
305 sc->sc_proto = lagg_protos[i].ti_proto;
306 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
307 if_free(ifp);
308 free(sc, M_DEVBUF);
309 return (error);
310 }
311 break;
312 }
313 }
314 LAGG_LOCK_INIT(sc);
315 SLIST_INIT(&sc->sc_ports);
316 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
317 callout_init_rw(&sc->sc_callout, &sc->sc_mtx, CALLOUT_SHAREDLOCK);
318
319 /* Initialise pseudo media types */
320 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
321 lagg_media_status);
322 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
323 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
324
325 if_initname(ifp, laggname, unit);
326 ifp->if_softc = sc;
327 ifp->if_transmit = lagg_transmit;
328 ifp->if_qflush = lagg_qflush;
329 ifp->if_init = lagg_init;
330 ifp->if_ioctl = lagg_ioctl;
331 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
332
333 /*
334 * Attach as an ordinary ethernet device, children will be attached
335 * as special device IFT_IEEE8023ADLAG.
336 */
337 ether_ifattach(ifp, eaddr);
338
339 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
340 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
341 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
342 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
343
344 /* Insert into the global list of laggs */
345 mtx_lock(&lagg_list_mtx);
346 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
347 mtx_unlock(&lagg_list_mtx);
348
349 callout_reset(&sc->sc_callout, hz, lagg_callout, sc);
350
351 return (0);
352}
353
354static void
355lagg_clone_destroy(struct ifnet *ifp)
356{
357 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
358 struct lagg_port *lp;
359
360 LAGG_WLOCK(sc);
361
362 lagg_stop(sc);
363 ifp->if_flags &= ~IFF_UP;
364
365 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
366 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
367
368 /* Shutdown and remove lagg ports */
369 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
370 lagg_port_destroy(lp, 1);
371 /* Unhook the aggregation protocol */
372 if (sc->sc_detach != NULL)
373 (*sc->sc_detach)(sc);
374
375 LAGG_WUNLOCK(sc);
376
377 sysctl_ctx_free(&sc->ctx);
378 ifmedia_removeall(&sc->sc_media);
379 ether_ifdetach(ifp);
380 if_free(ifp);
381
382 callout_drain(&sc->sc_callout);
383 counter_u64_free(sc->sc_ipackets);
384 counter_u64_free(sc->sc_opackets);
385 counter_u64_free(sc->sc_ibytes);
386 counter_u64_free(sc->sc_obytes);
387
388 mtx_lock(&lagg_list_mtx);
389 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
390 mtx_unlock(&lagg_list_mtx);
391
392 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
393 LAGG_LOCK_DESTROY(sc);
394 free(sc, M_DEVBUF);
395}
396
397static void
398lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
399{
400 struct ifnet *ifp = sc->sc_ifp;
401
402 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
403 return;
404
405 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
406 /* Let the protocol know the MAC has changed */
407 if (sc->sc_lladdr != NULL)
408 (*sc->sc_lladdr)(sc);
409 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
410}
411
412static void
413lagg_capabilities(struct lagg_softc *sc)
414{
415 struct lagg_port *lp;
416 int cap = ~0, ena = ~0;
417 u_long hwa = ~0UL;
418
419 LAGG_WLOCK_ASSERT(sc);
420
421 /* Get capabilities from the lagg ports */
422 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
423 cap &= lp->lp_ifp->if_capabilities;
424 ena &= lp->lp_ifp->if_capenable;
425 hwa &= lp->lp_ifp->if_hwassist;
426 }
427 cap = (cap == ~0 ? 0 : cap);
428 ena = (ena == ~0 ? 0 : ena);
429 hwa = (hwa == ~0 ? 0 : hwa);
430
431 if (sc->sc_ifp->if_capabilities != cap ||
432 sc->sc_ifp->if_capenable != ena ||
433 sc->sc_ifp->if_hwassist != hwa) {
434 sc->sc_ifp->if_capabilities = cap;
435 sc->sc_ifp->if_capenable = ena;
436 sc->sc_ifp->if_hwassist = hwa;
437 getmicrotime(&sc->sc_ifp->if_lastchange);
438
439 if (sc->sc_ifflags & IFF_DEBUG)
440 if_printf(sc->sc_ifp,
441 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
442 }
443}
444
445static void
446lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
447{
448 struct lagg_softc *sc = lp->lp_softc;
449 struct ifnet *ifp = lp->lp_ifp;
450 struct lagg_llq *llq;
451 int pending = 0;
452
453 LAGG_WLOCK_ASSERT(sc);
454
455 if (lp->lp_detaching ||
456 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
457 return;
458
459 /* Check to make sure its not already queued to be changed */
460 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
461 if (llq->llq_ifp == ifp) {
462 pending = 1;
463 break;
464 }
465 }
466
467 if (!pending) {
468 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
469 if (llq == NULL) /* XXX what to do */
470 return;
471 }
472
473 /* Update the lladdr even if pending, it may have changed */
474 llq->llq_ifp = ifp;
475 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
476
477 if (!pending)
478 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
479
480 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
481}
482
483/*
484 * Set the interface MAC address from a taskqueue to avoid a LOR.
485 */
486static void
487lagg_port_setlladdr(void *arg, int pending)
488{
489 struct lagg_softc *sc = (struct lagg_softc *)arg;
490 struct lagg_llq *llq, *head;
491 struct ifnet *ifp;
492 int error;
493
494 /* Grab a local reference of the queue and remove it from the softc */
495 LAGG_WLOCK(sc);
496 head = SLIST_FIRST(&sc->sc_llq_head);
497 SLIST_FIRST(&sc->sc_llq_head) = NULL;
498 LAGG_WUNLOCK(sc);
499
500 /*
501 * Traverse the queue and set the lladdr on each ifp. It is safe to do
502 * unlocked as we have the only reference to it.
503 */
504 for (llq = head; llq != NULL; llq = head) {
505 ifp = llq->llq_ifp;
506
507 /* Set the link layer address */
508 CURVNET_SET(ifp->if_vnet);
509 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
510 CURVNET_RESTORE();
511 if (error)
512 printf("%s: setlladdr failed on %s\n", __func__,
513 ifp->if_xname);
514
515 head = SLIST_NEXT(llq, llq_entries);
516 free(llq, M_DEVBUF);
517 }
518}
519
520static int
521lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
522{
523 struct lagg_softc *sc_ptr;
524 struct lagg_port *lp;
525 int error = 0;
526
527 LAGG_WLOCK_ASSERT(sc);
528
529 /* Limit the maximal number of lagg ports */
530 if (sc->sc_count >= LAGG_MAX_PORTS)
531 return (ENOSPC);
532
533 /* Check if port has already been associated to a lagg */
534 if (ifp->if_lagg != NULL) {
535 /* Port is already in the current lagg? */
536 lp = (struct lagg_port *)ifp->if_lagg;
537 if (lp->lp_softc == sc)
538 return (EEXIST);
539 return (EBUSY);
540 }
541
542 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
543 if (ifp->if_type != IFT_ETHER)
544 return (EPROTONOSUPPORT);
545
68#endif
69
70#include <net/if_vlan_var.h>
71#include <net/if_lagg.h>
72#include <net/ieee8023ad_lacp.h>
73
74/* Special flags we should propagate to the lagg ports. */
75static struct {
76 int flag;
77 int (*func)(struct ifnet *, int);
78} lagg_pflags[] = {
79 {IFF_PROMISC, ifpromisc},
80 {IFF_ALLMULTI, if_allmulti},
81 {0, NULL}
82};
83
84SLIST_HEAD(__trhead, lagg_softc) lagg_list; /* list of laggs */
85static struct mtx lagg_list_mtx;
86eventhandler_tag lagg_detach_cookie = NULL;
87
88static int lagg_clone_create(struct if_clone *, int, caddr_t);
89static void lagg_clone_destroy(struct ifnet *);
90static struct if_clone *lagg_cloner;
91static const char laggname[] = "lagg";
92
93static void lagg_lladdr(struct lagg_softc *, uint8_t *);
94static void lagg_capabilities(struct lagg_softc *);
95static void lagg_port_lladdr(struct lagg_port *, uint8_t *);
96static void lagg_port_setlladdr(void *, int);
97static int lagg_port_create(struct lagg_softc *, struct ifnet *);
98static int lagg_port_destroy(struct lagg_port *, int);
99static struct mbuf *lagg_input(struct ifnet *, struct mbuf *);
100static void lagg_linkstate(struct lagg_softc *);
101static void lagg_port_state(struct ifnet *, int);
102static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
103static int lagg_port_output(struct ifnet *, struct mbuf *,
104 const struct sockaddr *, struct route *);
105static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
106#ifdef LAGG_PORT_STACKING
107static int lagg_port_checkstacking(struct lagg_softc *);
108#endif
109static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
110static void lagg_init(void *);
111static void lagg_stop(struct lagg_softc *);
112static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
113static int lagg_ether_setmulti(struct lagg_softc *);
114static int lagg_ether_cmdmulti(struct lagg_port *, int);
115static int lagg_setflag(struct lagg_port *, int, int,
116 int (*func)(struct ifnet *, int));
117static int lagg_setflags(struct lagg_port *, int status);
118static int lagg_transmit(struct ifnet *, struct mbuf *);
119static void lagg_qflush(struct ifnet *);
120static int lagg_media_change(struct ifnet *);
121static void lagg_media_status(struct ifnet *, struct ifmediareq *);
122static struct lagg_port *lagg_link_active(struct lagg_softc *,
123 struct lagg_port *);
124static const void *lagg_gethdr(struct mbuf *, u_int, u_int, void *);
125
126/* Simple round robin */
127static int lagg_rr_attach(struct lagg_softc *);
128static int lagg_rr_detach(struct lagg_softc *);
129static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
130static struct mbuf *lagg_rr_input(struct lagg_softc *, struct lagg_port *,
131 struct mbuf *);
132
133/* Active failover */
134static int lagg_fail_attach(struct lagg_softc *);
135static int lagg_fail_detach(struct lagg_softc *);
136static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
137static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
138 struct mbuf *);
139
140/* Loadbalancing */
141static int lagg_lb_attach(struct lagg_softc *);
142static int lagg_lb_detach(struct lagg_softc *);
143static int lagg_lb_port_create(struct lagg_port *);
144static void lagg_lb_port_destroy(struct lagg_port *);
145static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
146static struct mbuf *lagg_lb_input(struct lagg_softc *, struct lagg_port *,
147 struct mbuf *);
148static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
149
150/* 802.3ad LACP */
151static int lagg_lacp_attach(struct lagg_softc *);
152static int lagg_lacp_detach(struct lagg_softc *);
153static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
154static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
155 struct mbuf *);
156static void lagg_lacp_lladdr(struct lagg_softc *);
157
158static void lagg_callout(void *);
159
160/* lagg protocol table */
161static const struct {
162 int ti_proto;
163 int (*ti_attach)(struct lagg_softc *);
164} lagg_protos[] = {
165 { LAGG_PROTO_ROUNDROBIN, lagg_rr_attach },
166 { LAGG_PROTO_FAILOVER, lagg_fail_attach },
167 { LAGG_PROTO_LOADBALANCE, lagg_lb_attach },
168 { LAGG_PROTO_ETHERCHANNEL, lagg_lb_attach },
169 { LAGG_PROTO_LACP, lagg_lacp_attach },
170 { LAGG_PROTO_NONE, NULL }
171};
172
173SYSCTL_DECL(_net_link);
174static SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW, 0,
175 "Link Aggregation");
176
177static int lagg_failover_rx_all = 0; /* Allow input on any failover links */
178SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW,
179 &lagg_failover_rx_all, 0,
180 "Accept input from any interface in a failover lagg");
181static int def_use_flowid = 1; /* Default value for using M_FLOWID */
182TUNABLE_INT("net.link.lagg.default_use_flowid", &def_use_flowid);
183SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid, CTLFLAG_RW,
184 &def_use_flowid, 0,
185 "Default setting for using flow id for load sharing");
186
187static int
188lagg_modevent(module_t mod, int type, void *data)
189{
190
191 switch (type) {
192 case MOD_LOAD:
193 mtx_init(&lagg_list_mtx, "if_lagg list", NULL, MTX_DEF);
194 SLIST_INIT(&lagg_list);
195 lagg_cloner = if_clone_simple(laggname, lagg_clone_create,
196 lagg_clone_destroy, 0);
197 lagg_input_p = lagg_input;
198 lagg_linkstate_p = lagg_port_state;
199 lagg_detach_cookie = EVENTHANDLER_REGISTER(
200 ifnet_departure_event, lagg_port_ifdetach, NULL,
201 EVENTHANDLER_PRI_ANY);
202 break;
203 case MOD_UNLOAD:
204 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
205 lagg_detach_cookie);
206 if_clone_detach(lagg_cloner);
207 lagg_input_p = NULL;
208 lagg_linkstate_p = NULL;
209 mtx_destroy(&lagg_list_mtx);
210 break;
211 default:
212 return (EOPNOTSUPP);
213 }
214 return (0);
215}
216
217static moduledata_t lagg_mod = {
218 "if_lagg",
219 lagg_modevent,
220 0
221};
222
223DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
224MODULE_VERSION(if_lagg, 1);
225
226/*
227 * This routine is run via an vlan
228 * config EVENT
229 */
230static void
231lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
232{
233 struct lagg_softc *sc = ifp->if_softc;
234 struct lagg_port *lp;
235
236 if (ifp->if_softc != arg) /* Not our event */
237 return;
238
239 LAGG_RLOCK(sc);
240 if (!SLIST_EMPTY(&sc->sc_ports)) {
241 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
242 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
243 }
244 LAGG_RUNLOCK(sc);
245}
246
247/*
248 * This routine is run via an vlan
249 * unconfig EVENT
250 */
251static void
252lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
253{
254 struct lagg_softc *sc = ifp->if_softc;
255 struct lagg_port *lp;
256
257 if (ifp->if_softc != arg) /* Not our event */
258 return;
259
260 LAGG_RLOCK(sc);
261 if (!SLIST_EMPTY(&sc->sc_ports)) {
262 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
263 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
264 }
265 LAGG_RUNLOCK(sc);
266}
267
268static int
269lagg_clone_create(struct if_clone *ifc, int unit, caddr_t params)
270{
271 struct lagg_softc *sc;
272 struct ifnet *ifp;
273 int i, error = 0;
274 static const u_char eaddr[6]; /* 00:00:00:00:00:00 */
275 struct sysctl_oid *oid;
276 char num[14]; /* sufficient for 32 bits */
277
278 sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK|M_ZERO);
279 ifp = sc->sc_ifp = if_alloc(IFT_ETHER);
280 if (ifp == NULL) {
281 free(sc, M_DEVBUF);
282 return (ENOSPC);
283 }
284
285 sc->sc_ipackets = counter_u64_alloc(M_WAITOK);
286 sc->sc_opackets = counter_u64_alloc(M_WAITOK);
287 sc->sc_ibytes = counter_u64_alloc(M_WAITOK);
288 sc->sc_obytes = counter_u64_alloc(M_WAITOK);
289
290 sysctl_ctx_init(&sc->ctx);
291 snprintf(num, sizeof(num), "%u", unit);
292 sc->use_flowid = def_use_flowid;
293 oid = SYSCTL_ADD_NODE(&sc->ctx, &SYSCTL_NODE_CHILDREN(_net_link, lagg),
294 OID_AUTO, num, CTLFLAG_RD, NULL, "");
295 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
296 "use_flowid", CTLTYPE_INT|CTLFLAG_RW, &sc->use_flowid, sc->use_flowid,
297 "Use flow id for load sharing");
298 SYSCTL_ADD_INT(&sc->ctx, SYSCTL_CHILDREN(oid), OID_AUTO,
299 "count", CTLTYPE_INT|CTLFLAG_RD, &sc->sc_count, sc->sc_count,
300 "Total number of ports");
301 /* Hash all layers by default */
302 sc->sc_flags = LAGG_F_HASHL2|LAGG_F_HASHL3|LAGG_F_HASHL4;
303
304 sc->sc_proto = LAGG_PROTO_NONE;
305 for (i = 0; lagg_protos[i].ti_proto != LAGG_PROTO_NONE; i++) {
306 if (lagg_protos[i].ti_proto == LAGG_PROTO_DEFAULT) {
307 sc->sc_proto = lagg_protos[i].ti_proto;
308 if ((error = lagg_protos[i].ti_attach(sc)) != 0) {
309 if_free(ifp);
310 free(sc, M_DEVBUF);
311 return (error);
312 }
313 break;
314 }
315 }
316 LAGG_LOCK_INIT(sc);
317 SLIST_INIT(&sc->sc_ports);
318 TASK_INIT(&sc->sc_lladdr_task, 0, lagg_port_setlladdr, sc);
319 callout_init_rw(&sc->sc_callout, &sc->sc_mtx, CALLOUT_SHAREDLOCK);
320
321 /* Initialise pseudo media types */
322 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
323 lagg_media_status);
324 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
325 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
326
327 if_initname(ifp, laggname, unit);
328 ifp->if_softc = sc;
329 ifp->if_transmit = lagg_transmit;
330 ifp->if_qflush = lagg_qflush;
331 ifp->if_init = lagg_init;
332 ifp->if_ioctl = lagg_ioctl;
333 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
334
335 /*
336 * Attach as an ordinary ethernet device, children will be attached
337 * as special device IFT_IEEE8023ADLAG.
338 */
339 ether_ifattach(ifp, eaddr);
340
341 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
342 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
343 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
344 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
345
346 /* Insert into the global list of laggs */
347 mtx_lock(&lagg_list_mtx);
348 SLIST_INSERT_HEAD(&lagg_list, sc, sc_entries);
349 mtx_unlock(&lagg_list_mtx);
350
351 callout_reset(&sc->sc_callout, hz, lagg_callout, sc);
352
353 return (0);
354}
355
356static void
357lagg_clone_destroy(struct ifnet *ifp)
358{
359 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
360 struct lagg_port *lp;
361
362 LAGG_WLOCK(sc);
363
364 lagg_stop(sc);
365 ifp->if_flags &= ~IFF_UP;
366
367 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
368 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
369
370 /* Shutdown and remove lagg ports */
371 while ((lp = SLIST_FIRST(&sc->sc_ports)) != NULL)
372 lagg_port_destroy(lp, 1);
373 /* Unhook the aggregation protocol */
374 if (sc->sc_detach != NULL)
375 (*sc->sc_detach)(sc);
376
377 LAGG_WUNLOCK(sc);
378
379 sysctl_ctx_free(&sc->ctx);
380 ifmedia_removeall(&sc->sc_media);
381 ether_ifdetach(ifp);
382 if_free(ifp);
383
384 callout_drain(&sc->sc_callout);
385 counter_u64_free(sc->sc_ipackets);
386 counter_u64_free(sc->sc_opackets);
387 counter_u64_free(sc->sc_ibytes);
388 counter_u64_free(sc->sc_obytes);
389
390 mtx_lock(&lagg_list_mtx);
391 SLIST_REMOVE(&lagg_list, sc, lagg_softc, sc_entries);
392 mtx_unlock(&lagg_list_mtx);
393
394 taskqueue_drain(taskqueue_swi, &sc->sc_lladdr_task);
395 LAGG_LOCK_DESTROY(sc);
396 free(sc, M_DEVBUF);
397}
398
399static void
400lagg_lladdr(struct lagg_softc *sc, uint8_t *lladdr)
401{
402 struct ifnet *ifp = sc->sc_ifp;
403
404 if (memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
405 return;
406
407 bcopy(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN);
408 /* Let the protocol know the MAC has changed */
409 if (sc->sc_lladdr != NULL)
410 (*sc->sc_lladdr)(sc);
411 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
412}
413
414static void
415lagg_capabilities(struct lagg_softc *sc)
416{
417 struct lagg_port *lp;
418 int cap = ~0, ena = ~0;
419 u_long hwa = ~0UL;
420
421 LAGG_WLOCK_ASSERT(sc);
422
423 /* Get capabilities from the lagg ports */
424 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
425 cap &= lp->lp_ifp->if_capabilities;
426 ena &= lp->lp_ifp->if_capenable;
427 hwa &= lp->lp_ifp->if_hwassist;
428 }
429 cap = (cap == ~0 ? 0 : cap);
430 ena = (ena == ~0 ? 0 : ena);
431 hwa = (hwa == ~0 ? 0 : hwa);
432
433 if (sc->sc_ifp->if_capabilities != cap ||
434 sc->sc_ifp->if_capenable != ena ||
435 sc->sc_ifp->if_hwassist != hwa) {
436 sc->sc_ifp->if_capabilities = cap;
437 sc->sc_ifp->if_capenable = ena;
438 sc->sc_ifp->if_hwassist = hwa;
439 getmicrotime(&sc->sc_ifp->if_lastchange);
440
441 if (sc->sc_ifflags & IFF_DEBUG)
442 if_printf(sc->sc_ifp,
443 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
444 }
445}
446
447static void
448lagg_port_lladdr(struct lagg_port *lp, uint8_t *lladdr)
449{
450 struct lagg_softc *sc = lp->lp_softc;
451 struct ifnet *ifp = lp->lp_ifp;
452 struct lagg_llq *llq;
453 int pending = 0;
454
455 LAGG_WLOCK_ASSERT(sc);
456
457 if (lp->lp_detaching ||
458 memcmp(lladdr, IF_LLADDR(ifp), ETHER_ADDR_LEN) == 0)
459 return;
460
461 /* Check to make sure its not already queued to be changed */
462 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
463 if (llq->llq_ifp == ifp) {
464 pending = 1;
465 break;
466 }
467 }
468
469 if (!pending) {
470 llq = malloc(sizeof(struct lagg_llq), M_DEVBUF, M_NOWAIT);
471 if (llq == NULL) /* XXX what to do */
472 return;
473 }
474
475 /* Update the lladdr even if pending, it may have changed */
476 llq->llq_ifp = ifp;
477 bcopy(lladdr, llq->llq_lladdr, ETHER_ADDR_LEN);
478
479 if (!pending)
480 SLIST_INSERT_HEAD(&sc->sc_llq_head, llq, llq_entries);
481
482 taskqueue_enqueue(taskqueue_swi, &sc->sc_lladdr_task);
483}
484
485/*
486 * Set the interface MAC address from a taskqueue to avoid a LOR.
487 */
488static void
489lagg_port_setlladdr(void *arg, int pending)
490{
491 struct lagg_softc *sc = (struct lagg_softc *)arg;
492 struct lagg_llq *llq, *head;
493 struct ifnet *ifp;
494 int error;
495
496 /* Grab a local reference of the queue and remove it from the softc */
497 LAGG_WLOCK(sc);
498 head = SLIST_FIRST(&sc->sc_llq_head);
499 SLIST_FIRST(&sc->sc_llq_head) = NULL;
500 LAGG_WUNLOCK(sc);
501
502 /*
503 * Traverse the queue and set the lladdr on each ifp. It is safe to do
504 * unlocked as we have the only reference to it.
505 */
506 for (llq = head; llq != NULL; llq = head) {
507 ifp = llq->llq_ifp;
508
509 /* Set the link layer address */
510 CURVNET_SET(ifp->if_vnet);
511 error = if_setlladdr(ifp, llq->llq_lladdr, ETHER_ADDR_LEN);
512 CURVNET_RESTORE();
513 if (error)
514 printf("%s: setlladdr failed on %s\n", __func__,
515 ifp->if_xname);
516
517 head = SLIST_NEXT(llq, llq_entries);
518 free(llq, M_DEVBUF);
519 }
520}
521
522static int
523lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
524{
525 struct lagg_softc *sc_ptr;
526 struct lagg_port *lp;
527 int error = 0;
528
529 LAGG_WLOCK_ASSERT(sc);
530
531 /* Limit the maximal number of lagg ports */
532 if (sc->sc_count >= LAGG_MAX_PORTS)
533 return (ENOSPC);
534
535 /* Check if port has already been associated to a lagg */
536 if (ifp->if_lagg != NULL) {
537 /* Port is already in the current lagg? */
538 lp = (struct lagg_port *)ifp->if_lagg;
539 if (lp->lp_softc == sc)
540 return (EEXIST);
541 return (EBUSY);
542 }
543
544 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
545 if (ifp->if_type != IFT_ETHER)
546 return (EPROTONOSUPPORT);
547
548#ifdef INET6
549 /*
550 * The member interface should not have inet6 address because
551 * two interfaces with a valid link-local scope zone must not be
552 * merged in any form. This restriction is needed to
553 * prevent violation of link-local scope zone. Attempts to
554 * add a member interface which has inet6 addresses triggers
555 * removal of all inet6 addresses on the member interface.
556 */
557 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
558 if (in6ifa_llaonifp(lp->lp_ifp)) {
559 in6_ifdetach(lp->lp_ifp);
560 if_printf(sc->sc_ifp,
561 "IPv6 addresses on %s have been removed "
562 "before adding it as a member to prevent "
563 "IPv6 address scope violation.\n",
564 lp->lp_ifp->if_xname);
565 }
566 }
567 if (in6ifa_llaonifp(ifp)) {
568 in6_ifdetach(ifp);
569 if_printf(sc->sc_ifp,
570 "IPv6 addresses on %s have been removed "
571 "before adding it as a member to prevent "
572 "IPv6 address scope violation.\n",
573 ifp->if_xname);
574 }
575#endif
546 /* Allow the first Ethernet member to define the MTU */
547 if (SLIST_EMPTY(&sc->sc_ports))
548 sc->sc_ifp->if_mtu = ifp->if_mtu;
549 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
550 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
551 ifp->if_xname);
552 return (EINVAL);
553 }
554
555 if ((lp = malloc(sizeof(struct lagg_port),
556 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
557 return (ENOMEM);
558
559 /* Check if port is a stacked lagg */
560 mtx_lock(&lagg_list_mtx);
561 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
562 if (ifp == sc_ptr->sc_ifp) {
563 mtx_unlock(&lagg_list_mtx);
564 free(lp, M_DEVBUF);
565 return (EINVAL);
566 /* XXX disable stacking for the moment, its untested */
567#ifdef LAGG_PORT_STACKING
568 lp->lp_flags |= LAGG_PORT_STACK;
569 if (lagg_port_checkstacking(sc_ptr) >=
570 LAGG_MAX_STACKING) {
571 mtx_unlock(&lagg_list_mtx);
572 free(lp, M_DEVBUF);
573 return (E2BIG);
574 }
575#endif
576 }
577 }
578 mtx_unlock(&lagg_list_mtx);
579
580 /* Change the interface type */
581 lp->lp_iftype = ifp->if_type;
582 ifp->if_type = IFT_IEEE8023ADLAG;
583 ifp->if_lagg = lp;
584 lp->lp_ioctl = ifp->if_ioctl;
585 ifp->if_ioctl = lagg_port_ioctl;
586 lp->lp_output = ifp->if_output;
587 ifp->if_output = lagg_port_output;
588
589 lp->lp_ifp = ifp;
590 lp->lp_softc = sc;
591
592 /* Save port link layer address */
593 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
594
595 if (SLIST_EMPTY(&sc->sc_ports)) {
596 sc->sc_primary = lp;
597 lagg_lladdr(sc, IF_LLADDR(ifp));
598 } else {
599 /* Update link layer address for this port */
600 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
601 }
602
603 /* Insert into the list of ports */
604 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
605 sc->sc_count++;
606
607 /* Update lagg capabilities */
608 lagg_capabilities(sc);
609 lagg_linkstate(sc);
610
611 /* Add multicast addresses and interface flags to this port */
612 lagg_ether_cmdmulti(lp, 1);
613 lagg_setflags(lp, 1);
614
615 if (sc->sc_port_create != NULL)
616 error = (*sc->sc_port_create)(lp);
617 if (error) {
618 /* remove the port again, without calling sc_port_destroy */
619 lagg_port_destroy(lp, 0);
620 return (error);
621 }
622
623 return (error);
624}
625
626#ifdef LAGG_PORT_STACKING
627static int
628lagg_port_checkstacking(struct lagg_softc *sc)
629{
630 struct lagg_softc *sc_ptr;
631 struct lagg_port *lp;
632 int m = 0;
633
634 LAGG_WLOCK_ASSERT(sc);
635
636 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
637 if (lp->lp_flags & LAGG_PORT_STACK) {
638 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
639 m = MAX(m, lagg_port_checkstacking(sc_ptr));
640 }
641 }
642
643 return (m + 1);
644}
645#endif
646
647static int
648lagg_port_destroy(struct lagg_port *lp, int runpd)
649{
650 struct lagg_softc *sc = lp->lp_softc;
651 struct lagg_port *lp_ptr;
652 struct lagg_llq *llq;
653 struct ifnet *ifp = lp->lp_ifp;
654
655 LAGG_WLOCK_ASSERT(sc);
656
657 if (runpd && sc->sc_port_destroy != NULL)
658 (*sc->sc_port_destroy)(lp);
659
660 /*
661 * Remove multicast addresses and interface flags from this port and
662 * reset the MAC address, skip if the interface is being detached.
663 */
664 if (!lp->lp_detaching) {
665 lagg_ether_cmdmulti(lp, 0);
666 lagg_setflags(lp, 0);
667 lagg_port_lladdr(lp, lp->lp_lladdr);
668 }
669
670 /* Restore interface */
671 ifp->if_type = lp->lp_iftype;
672 ifp->if_ioctl = lp->lp_ioctl;
673 ifp->if_output = lp->lp_output;
674 ifp->if_lagg = NULL;
675
676 /* Finally, remove the port from the lagg */
677 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
678 sc->sc_count--;
679
680 /* Update the primary interface */
681 if (lp == sc->sc_primary) {
682 uint8_t lladdr[ETHER_ADDR_LEN];
683
684 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
685 bzero(&lladdr, ETHER_ADDR_LEN);
686 } else {
687 bcopy(lp_ptr->lp_lladdr,
688 lladdr, ETHER_ADDR_LEN);
689 }
690 lagg_lladdr(sc, lladdr);
691 sc->sc_primary = lp_ptr;
692
693 /* Update link layer address for each port */
694 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
695 lagg_port_lladdr(lp_ptr, lladdr);
696 }
697
698 /* Remove any pending lladdr changes from the queue */
699 if (lp->lp_detaching) {
700 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
701 if (llq->llq_ifp == ifp) {
702 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
703 llq_entries);
704 free(llq, M_DEVBUF);
705 break; /* Only appears once */
706 }
707 }
708 }
709
710 if (lp->lp_ifflags)
711 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
712
713 free(lp, M_DEVBUF);
714
715 /* Update lagg capabilities */
716 lagg_capabilities(sc);
717 lagg_linkstate(sc);
718
719 return (0);
720}
721
722static int
723lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
724{
725 struct lagg_reqport *rp = (struct lagg_reqport *)data;
726 struct lagg_softc *sc;
727 struct lagg_port *lp = NULL;
728 int error = 0;
729
730 /* Should be checked by the caller */
731 if (ifp->if_type != IFT_IEEE8023ADLAG ||
732 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
733 goto fallback;
734
735 switch (cmd) {
736 case SIOCGLAGGPORT:
737 if (rp->rp_portname[0] == '\0' ||
738 ifunit(rp->rp_portname) != ifp) {
739 error = EINVAL;
740 break;
741 }
742
743 LAGG_RLOCK(sc);
744 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
745 error = ENOENT;
746 LAGG_RUNLOCK(sc);
747 break;
748 }
749
750 lagg_port2req(lp, rp);
751 LAGG_RUNLOCK(sc);
752 break;
753
754 case SIOCSIFCAP:
755 if (lp->lp_ioctl == NULL) {
756 error = EINVAL;
757 break;
758 }
759 error = (*lp->lp_ioctl)(ifp, cmd, data);
760 if (error)
761 break;
762
763 /* Update lagg interface capabilities */
764 LAGG_WLOCK(sc);
765 lagg_capabilities(sc);
766 LAGG_WUNLOCK(sc);
767 break;
768
769 case SIOCSIFMTU:
770 /* Do not allow the MTU to be changed once joined */
771 error = EINVAL;
772 break;
773
774 default:
775 goto fallback;
776 }
777
778 return (error);
779
780fallback:
781 if (lp->lp_ioctl != NULL)
782 return ((*lp->lp_ioctl)(ifp, cmd, data));
783
784 return (EINVAL);
785}
786
787/*
788 * For direct output to child ports.
789 */
790static int
791lagg_port_output(struct ifnet *ifp, struct mbuf *m,
792 const struct sockaddr *dst, struct route *ro)
793{
794 struct lagg_port *lp = ifp->if_lagg;
795
796 switch (dst->sa_family) {
797 case pseudo_AF_HDRCMPLT:
798 case AF_UNSPEC:
799 return ((*lp->lp_output)(ifp, m, dst, ro));
800 }
801
802 /* drop any other frames */
803 m_freem(m);
804 return (ENETDOWN);
805}
806
807static void
808lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
809{
810 struct lagg_port *lp;
811 struct lagg_softc *sc;
812
813 if ((lp = ifp->if_lagg) == NULL)
814 return;
815 /* If the ifnet is just being renamed, don't do anything. */
816 if (ifp->if_flags & IFF_RENAMING)
817 return;
818
819 sc = lp->lp_softc;
820
821 LAGG_WLOCK(sc);
822 lp->lp_detaching = 1;
823 lagg_port_destroy(lp, 1);
824 LAGG_WUNLOCK(sc);
825}
826
827static void
828lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
829{
830 struct lagg_softc *sc = lp->lp_softc;
831
832 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
833 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
834 rp->rp_prio = lp->lp_prio;
835 rp->rp_flags = lp->lp_flags;
836 if (sc->sc_portreq != NULL)
837 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
838
839 /* Add protocol specific flags */
840 switch (sc->sc_proto) {
841 case LAGG_PROTO_FAILOVER:
842 if (lp == sc->sc_primary)
843 rp->rp_flags |= LAGG_PORT_MASTER;
844 if (lp == lagg_link_active(sc, sc->sc_primary))
845 rp->rp_flags |= LAGG_PORT_ACTIVE;
846 break;
847
848 case LAGG_PROTO_ROUNDROBIN:
849 case LAGG_PROTO_LOADBALANCE:
850 case LAGG_PROTO_ETHERCHANNEL:
851 if (LAGG_PORTACTIVE(lp))
852 rp->rp_flags |= LAGG_PORT_ACTIVE;
853 break;
854
855 case LAGG_PROTO_LACP:
856 /* LACP has a different definition of active */
857 if (lacp_isactive(lp))
858 rp->rp_flags |= LAGG_PORT_ACTIVE;
859 if (lacp_iscollecting(lp))
860 rp->rp_flags |= LAGG_PORT_COLLECTING;
861 if (lacp_isdistributing(lp))
862 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
863 break;
864 }
865
866}
867
868static void
869lagg_init(void *xsc)
870{
871 struct lagg_softc *sc = (struct lagg_softc *)xsc;
872 struct lagg_port *lp;
873 struct ifnet *ifp = sc->sc_ifp;
874
875 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
876 return;
877
878 LAGG_WLOCK(sc);
879
880 ifp->if_drv_flags |= IFF_DRV_RUNNING;
881 /* Update the port lladdrs */
882 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
883 lagg_port_lladdr(lp, IF_LLADDR(ifp));
884
885 if (sc->sc_init != NULL)
886 (*sc->sc_init)(sc);
887
888 LAGG_WUNLOCK(sc);
889}
890
891static void
892lagg_stop(struct lagg_softc *sc)
893{
894 struct ifnet *ifp = sc->sc_ifp;
895
896 LAGG_WLOCK_ASSERT(sc);
897
898 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
899 return;
900
901 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
902
903 if (sc->sc_stop != NULL)
904 (*sc->sc_stop)(sc);
905}
906
907static int
908lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
909{
910 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
911 struct lagg_reqall *ra = (struct lagg_reqall *)data;
912 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
913 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
914 struct ifreq *ifr = (struct ifreq *)data;
915 struct lagg_port *lp;
916 struct ifnet *tpif;
917 struct thread *td = curthread;
918 char *buf, *outbuf;
919 int count, buflen, len, error = 0;
920
921 bzero(&rpbuf, sizeof(rpbuf));
922
923 switch (cmd) {
924 case SIOCGLAGG:
925 LAGG_RLOCK(sc);
926 count = 0;
927 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
928 count++;
929 buflen = count * sizeof(struct lagg_reqport);
930 LAGG_RUNLOCK(sc);
931
932 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
933
934 LAGG_RLOCK(sc);
935 ra->ra_proto = sc->sc_proto;
936 if (sc->sc_req != NULL)
937 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
938
939 count = 0;
940 buf = outbuf;
941 len = min(ra->ra_size, buflen);
942 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
943 if (len < sizeof(rpbuf))
944 break;
945
946 lagg_port2req(lp, &rpbuf);
947 memcpy(buf, &rpbuf, sizeof(rpbuf));
948 count++;
949 buf += sizeof(rpbuf);
950 len -= sizeof(rpbuf);
951 }
952 LAGG_RUNLOCK(sc);
953 ra->ra_ports = count;
954 ra->ra_size = count * sizeof(rpbuf);
955 error = copyout(outbuf, ra->ra_port, ra->ra_size);
956 free(outbuf, M_TEMP);
957 break;
958 case SIOCSLAGG:
959 error = priv_check(td, PRIV_NET_LAGG);
960 if (error)
961 break;
962 if (ra->ra_proto >= LAGG_PROTO_MAX) {
963 error = EPROTONOSUPPORT;
964 break;
965 }
966 LAGG_WLOCK(sc);
967 if (sc->sc_proto != LAGG_PROTO_NONE) {
968 /* Reset protocol first in case detach unlocks */
969 sc->sc_proto = LAGG_PROTO_NONE;
970 error = sc->sc_detach(sc);
971 sc->sc_detach = NULL;
972 sc->sc_start = NULL;
973 sc->sc_input = NULL;
974 sc->sc_port_create = NULL;
975 sc->sc_port_destroy = NULL;
976 sc->sc_linkstate = NULL;
977 sc->sc_init = NULL;
978 sc->sc_stop = NULL;
979 sc->sc_lladdr = NULL;
980 sc->sc_req = NULL;
981 sc->sc_portreq = NULL;
982 } else if (sc->sc_input != NULL) {
983 /* Still detaching */
984 error = EBUSY;
985 }
986 if (error != 0) {
987 LAGG_WUNLOCK(sc);
988 break;
989 }
990 for (int i = 0; i < (sizeof(lagg_protos) /
991 sizeof(lagg_protos[0])); i++) {
992 if (lagg_protos[i].ti_proto == ra->ra_proto) {
993 if (sc->sc_ifflags & IFF_DEBUG)
994 printf("%s: using proto %u\n",
995 sc->sc_ifname,
996 lagg_protos[i].ti_proto);
997 sc->sc_proto = lagg_protos[i].ti_proto;
998 if (sc->sc_proto != LAGG_PROTO_NONE)
999 error = lagg_protos[i].ti_attach(sc);
1000 LAGG_WUNLOCK(sc);
1001 return (error);
1002 }
1003 }
1004 LAGG_WUNLOCK(sc);
1005 error = EPROTONOSUPPORT;
1006 break;
1007 case SIOCGLAGGFLAGS:
1008 rf->rf_flags = sc->sc_flags;
1009 break;
1010 case SIOCSLAGGHASH:
1011 error = priv_check(td, PRIV_NET_LAGG);
1012 if (error)
1013 break;
1014 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1015 error = EINVAL;
1016 break;
1017 }
1018 LAGG_WLOCK(sc);
1019 sc->sc_flags &= ~LAGG_F_HASHMASK;
1020 sc->sc_flags |= rf->rf_flags & LAGG_F_HASHMASK;
1021 LAGG_WUNLOCK(sc);
1022 break;
1023 case SIOCGLAGGPORT:
1024 if (rp->rp_portname[0] == '\0' ||
1025 (tpif = ifunit(rp->rp_portname)) == NULL) {
1026 error = EINVAL;
1027 break;
1028 }
1029
1030 LAGG_RLOCK(sc);
1031 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1032 lp->lp_softc != sc) {
1033 error = ENOENT;
1034 LAGG_RUNLOCK(sc);
1035 break;
1036 }
1037
1038 lagg_port2req(lp, rp);
1039 LAGG_RUNLOCK(sc);
1040 break;
1041 case SIOCSLAGGPORT:
1042 error = priv_check(td, PRIV_NET_LAGG);
1043 if (error)
1044 break;
1045 if (rp->rp_portname[0] == '\0' ||
1046 (tpif = ifunit(rp->rp_portname)) == NULL) {
1047 error = EINVAL;
1048 break;
1049 }
1050 LAGG_WLOCK(sc);
1051 error = lagg_port_create(sc, tpif);
1052 LAGG_WUNLOCK(sc);
1053 break;
1054 case SIOCSLAGGDELPORT:
1055 error = priv_check(td, PRIV_NET_LAGG);
1056 if (error)
1057 break;
1058 if (rp->rp_portname[0] == '\0' ||
1059 (tpif = ifunit(rp->rp_portname)) == NULL) {
1060 error = EINVAL;
1061 break;
1062 }
1063
1064 LAGG_WLOCK(sc);
1065 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1066 lp->lp_softc != sc) {
1067 error = ENOENT;
1068 LAGG_WUNLOCK(sc);
1069 break;
1070 }
1071
1072 error = lagg_port_destroy(lp, 1);
1073 LAGG_WUNLOCK(sc);
1074 break;
1075 case SIOCSIFFLAGS:
1076 /* Set flags on ports too */
1077 LAGG_WLOCK(sc);
1078 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1079 lagg_setflags(lp, 1);
1080 }
1081 LAGG_WUNLOCK(sc);
1082
1083 if (!(ifp->if_flags & IFF_UP) &&
1084 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1085 /*
1086 * If interface is marked down and it is running,
1087 * then stop and disable it.
1088 */
1089 LAGG_WLOCK(sc);
1090 lagg_stop(sc);
1091 LAGG_WUNLOCK(sc);
1092 } else if ((ifp->if_flags & IFF_UP) &&
1093 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1094 /*
1095 * If interface is marked up and it is stopped, then
1096 * start it.
1097 */
1098 (*ifp->if_init)(sc);
1099 }
1100 break;
1101 case SIOCADDMULTI:
1102 case SIOCDELMULTI:
1103 LAGG_WLOCK(sc);
1104 error = lagg_ether_setmulti(sc);
1105 LAGG_WUNLOCK(sc);
1106 break;
1107 case SIOCSIFMEDIA:
1108 case SIOCGIFMEDIA:
1109 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1110 break;
1111
1112 case SIOCSIFCAP:
1113 case SIOCSIFMTU:
1114 /* Do not allow the MTU or caps to be directly changed */
1115 error = EINVAL;
1116 break;
1117
1118 default:
1119 error = ether_ioctl(ifp, cmd, data);
1120 break;
1121 }
1122 return (error);
1123}
1124
1125static int
1126lagg_ether_setmulti(struct lagg_softc *sc)
1127{
1128 struct lagg_port *lp;
1129
1130 LAGG_WLOCK_ASSERT(sc);
1131
1132 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1133 /* First, remove any existing filter entries. */
1134 lagg_ether_cmdmulti(lp, 0);
1135 /* copy all addresses from the lagg interface to the port */
1136 lagg_ether_cmdmulti(lp, 1);
1137 }
1138 return (0);
1139}
1140
1141static int
1142lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1143{
1144 struct lagg_softc *sc = lp->lp_softc;
1145 struct ifnet *ifp = lp->lp_ifp;
1146 struct ifnet *scifp = sc->sc_ifp;
1147 struct lagg_mc *mc;
1148 struct ifmultiaddr *ifma, *rifma = NULL;
1149 struct sockaddr_dl sdl;
1150 int error;
1151
1152 LAGG_WLOCK_ASSERT(sc);
1153
1154 bzero((char *)&sdl, sizeof(sdl));
1155 sdl.sdl_len = sizeof(sdl);
1156 sdl.sdl_family = AF_LINK;
1157 sdl.sdl_type = IFT_ETHER;
1158 sdl.sdl_alen = ETHER_ADDR_LEN;
1159 sdl.sdl_index = ifp->if_index;
1160
1161 if (set) {
1162 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1163 if (ifma->ifma_addr->sa_family != AF_LINK)
1164 continue;
1165 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1166 LLADDR(&sdl), ETHER_ADDR_LEN);
1167
1168 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1169 if (error)
1170 return (error);
1171 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1172 if (mc == NULL)
1173 return (ENOMEM);
1174 mc->mc_ifma = rifma;
1175 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1176 }
1177 } else {
1178 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1179 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1180 if_delmulti_ifma(mc->mc_ifma);
1181 free(mc, M_DEVBUF);
1182 }
1183 }
1184 return (0);
1185}
1186
1187/* Handle a ref counted flag that should be set on the lagg port as well */
1188static int
1189lagg_setflag(struct lagg_port *lp, int flag, int status,
1190 int (*func)(struct ifnet *, int))
1191{
1192 struct lagg_softc *sc = lp->lp_softc;
1193 struct ifnet *scifp = sc->sc_ifp;
1194 struct ifnet *ifp = lp->lp_ifp;
1195 int error;
1196
1197 LAGG_WLOCK_ASSERT(sc);
1198
1199 status = status ? (scifp->if_flags & flag) : 0;
1200 /* Now "status" contains the flag value or 0 */
1201
1202 /*
1203 * See if recorded ports status is different from what
1204 * we want it to be. If it is, flip it. We record ports
1205 * status in lp_ifflags so that we won't clear ports flag
1206 * we haven't set. In fact, we don't clear or set ports
1207 * flags directly, but get or release references to them.
1208 * That's why we can be sure that recorded flags still are
1209 * in accord with actual ports flags.
1210 */
1211 if (status != (lp->lp_ifflags & flag)) {
1212 error = (*func)(ifp, status);
1213 if (error)
1214 return (error);
1215 lp->lp_ifflags &= ~flag;
1216 lp->lp_ifflags |= status;
1217 }
1218 return (0);
1219}
1220
1221/*
1222 * Handle IFF_* flags that require certain changes on the lagg port
1223 * if "status" is true, update ports flags respective to the lagg
1224 * if "status" is false, forcedly clear the flags set on port.
1225 */
1226static int
1227lagg_setflags(struct lagg_port *lp, int status)
1228{
1229 int error, i;
1230
1231 for (i = 0; lagg_pflags[i].flag; i++) {
1232 error = lagg_setflag(lp, lagg_pflags[i].flag,
1233 status, lagg_pflags[i].func);
1234 if (error)
1235 return (error);
1236 }
1237 return (0);
1238}
1239
1240static int
1241lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1242{
1243 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1244 int error, len, mcast;
1245
1246 len = m->m_pkthdr.len;
1247 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
1248
1249 LAGG_RLOCK(sc);
1250 /* We need a Tx algorithm and at least one port */
1251 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1252 LAGG_RUNLOCK(sc);
1253 m_freem(m);
1254 ifp->if_oerrors++;
1255 return (ENXIO);
1256 }
1257
1258 ETHER_BPF_MTAP(ifp, m);
1259
1260 error = (*sc->sc_start)(sc, m);
1261 LAGG_RUNLOCK(sc);
1262
1263 if (error == 0) {
1264 counter_u64_add(sc->sc_opackets, 1);
1265 counter_u64_add(sc->sc_obytes, len);
1266 ifp->if_omcasts += mcast;
1267 } else
1268 ifp->if_oerrors++;
1269
1270 return (error);
1271}
1272
1273/*
1274 * The ifp->if_qflush entry point for lagg(4) is no-op.
1275 */
1276static void
1277lagg_qflush(struct ifnet *ifp __unused)
1278{
1279}
1280
1281static struct mbuf *
1282lagg_input(struct ifnet *ifp, struct mbuf *m)
1283{
1284 struct lagg_port *lp = ifp->if_lagg;
1285 struct lagg_softc *sc = lp->lp_softc;
1286 struct ifnet *scifp = sc->sc_ifp;
1287
1288 LAGG_RLOCK(sc);
1289 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1290 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1291 sc->sc_proto == LAGG_PROTO_NONE) {
1292 LAGG_RUNLOCK(sc);
1293 m_freem(m);
1294 return (NULL);
1295 }
1296
1297 ETHER_BPF_MTAP(scifp, m);
1298
1299 m = (*sc->sc_input)(sc, lp, m);
1300
1301 if (m != NULL) {
1302 counter_u64_add(sc->sc_ipackets, 1);
1303 counter_u64_add(sc->sc_ibytes, m->m_pkthdr.len);
1304
1305 if (scifp->if_flags & IFF_MONITOR) {
1306 m_freem(m);
1307 m = NULL;
1308 }
1309 }
1310
1311 LAGG_RUNLOCK(sc);
1312 return (m);
1313}
1314
1315static int
1316lagg_media_change(struct ifnet *ifp)
1317{
1318 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1319
1320 if (sc->sc_ifflags & IFF_DEBUG)
1321 printf("%s\n", __func__);
1322
1323 /* Ignore */
1324 return (0);
1325}
1326
1327static void
1328lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1329{
1330 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1331 struct lagg_port *lp;
1332
1333 imr->ifm_status = IFM_AVALID;
1334 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1335
1336 LAGG_RLOCK(sc);
1337 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1338 if (LAGG_PORTACTIVE(lp))
1339 imr->ifm_status |= IFM_ACTIVE;
1340 }
1341 LAGG_RUNLOCK(sc);
1342}
1343
1344static void
1345lagg_linkstate(struct lagg_softc *sc)
1346{
1347 struct lagg_port *lp;
1348 int new_link = LINK_STATE_DOWN;
1349 uint64_t speed;
1350
1351 /* Our link is considered up if at least one of our ports is active */
1352 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1353 if (lp->lp_link_state == LINK_STATE_UP) {
1354 new_link = LINK_STATE_UP;
1355 break;
1356 }
1357 }
1358 if_link_state_change(sc->sc_ifp, new_link);
1359
1360 /* Update if_baudrate to reflect the max possible speed */
1361 switch (sc->sc_proto) {
1362 case LAGG_PROTO_FAILOVER:
1363 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1364 sc->sc_primary->lp_ifp->if_baudrate : 0;
1365 break;
1366 case LAGG_PROTO_ROUNDROBIN:
1367 case LAGG_PROTO_LOADBALANCE:
1368 case LAGG_PROTO_ETHERCHANNEL:
1369 speed = 0;
1370 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1371 speed += lp->lp_ifp->if_baudrate;
1372 sc->sc_ifp->if_baudrate = speed;
1373 break;
1374 case LAGG_PROTO_LACP:
1375 /* LACP updates if_baudrate itself */
1376 break;
1377 }
1378}
1379
1380static void
1381lagg_port_state(struct ifnet *ifp, int state)
1382{
1383 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1384 struct lagg_softc *sc = NULL;
1385
1386 if (lp != NULL)
1387 sc = lp->lp_softc;
1388 if (sc == NULL)
1389 return;
1390
1391 LAGG_WLOCK(sc);
1392 lagg_linkstate(sc);
1393 if (sc->sc_linkstate != NULL)
1394 (*sc->sc_linkstate)(lp);
1395 LAGG_WUNLOCK(sc);
1396}
1397
1398struct lagg_port *
1399lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1400{
1401 struct lagg_port *lp_next, *rval = NULL;
1402 // int new_link = LINK_STATE_DOWN;
1403
1404 LAGG_RLOCK_ASSERT(sc);
1405 /*
1406 * Search a port which reports an active link state.
1407 */
1408
1409 if (lp == NULL)
1410 goto search;
1411 if (LAGG_PORTACTIVE(lp)) {
1412 rval = lp;
1413 goto found;
1414 }
1415 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1416 LAGG_PORTACTIVE(lp_next)) {
1417 rval = lp_next;
1418 goto found;
1419 }
1420
1421search:
1422 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1423 if (LAGG_PORTACTIVE(lp_next)) {
1424 rval = lp_next;
1425 goto found;
1426 }
1427 }
1428
1429found:
1430 if (rval != NULL) {
1431 /*
1432 * The IEEE 802.1D standard assumes that a lagg with
1433 * multiple ports is always full duplex. This is valid
1434 * for load sharing laggs and if at least two links
1435 * are active. Unfortunately, checking the latter would
1436 * be too expensive at this point.
1437 XXX
1438 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1439 (sc->sc_count > 1))
1440 new_link = LINK_STATE_FULL_DUPLEX;
1441 else
1442 new_link = rval->lp_link_state;
1443 */
1444 }
1445
1446 return (rval);
1447}
1448
1449static const void *
1450lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1451{
1452 if (m->m_pkthdr.len < (off + len)) {
1453 return (NULL);
1454 } else if (m->m_len < (off + len)) {
1455 m_copydata(m, off, len, buf);
1456 return (buf);
1457 }
1458 return (mtod(m, char *) + off);
1459}
1460
1461uint32_t
1462lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m, uint32_t key)
1463{
1464 uint16_t etype;
1465 uint32_t p = key;
1466 int off;
1467 struct ether_header *eh;
1468 const struct ether_vlan_header *vlan;
1469#ifdef INET
1470 const struct ip *ip;
1471 const uint32_t *ports;
1472 int iphlen;
1473#endif
1474#ifdef INET6
1475 const struct ip6_hdr *ip6;
1476 uint32_t flow;
1477#endif
1478 union {
1479#ifdef INET
1480 struct ip ip;
1481#endif
1482#ifdef INET6
1483 struct ip6_hdr ip6;
1484#endif
1485 struct ether_vlan_header vlan;
1486 uint32_t port;
1487 } buf;
1488
1489
1490 off = sizeof(*eh);
1491 if (m->m_len < off)
1492 goto out;
1493 eh = mtod(m, struct ether_header *);
1494 etype = ntohs(eh->ether_type);
1495 if (sc->sc_flags & LAGG_F_HASHL2) {
1496 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p);
1497 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1498 }
1499
1500 /* Special handling for encapsulating VLAN frames */
1501 if ((m->m_flags & M_VLANTAG) && (sc->sc_flags & LAGG_F_HASHL2)) {
1502 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1503 sizeof(m->m_pkthdr.ether_vtag), p);
1504 } else if (etype == ETHERTYPE_VLAN) {
1505 vlan = lagg_gethdr(m, off, sizeof(*vlan), &buf);
1506 if (vlan == NULL)
1507 goto out;
1508
1509 if (sc->sc_flags & LAGG_F_HASHL2)
1510 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1511 etype = ntohs(vlan->evl_proto);
1512 off += sizeof(*vlan) - sizeof(*eh);
1513 }
1514
1515 switch (etype) {
1516#ifdef INET
1517 case ETHERTYPE_IP:
1518 ip = lagg_gethdr(m, off, sizeof(*ip), &buf);
1519 if (ip == NULL)
1520 goto out;
1521
1522 if (sc->sc_flags & LAGG_F_HASHL3) {
1523 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1524 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1525 }
1526 if (!(sc->sc_flags & LAGG_F_HASHL4))
1527 break;
1528 switch (ip->ip_p) {
1529 case IPPROTO_TCP:
1530 case IPPROTO_UDP:
1531 case IPPROTO_SCTP:
1532 iphlen = ip->ip_hl << 2;
1533 if (iphlen < sizeof(*ip))
1534 break;
1535 off += iphlen;
1536 ports = lagg_gethdr(m, off, sizeof(*ports), &buf);
1537 if (ports == NULL)
1538 break;
1539 p = hash32_buf(ports, sizeof(*ports), p);
1540 break;
1541 }
1542 break;
1543#endif
1544#ifdef INET6
1545 case ETHERTYPE_IPV6:
1546 if (!(sc->sc_flags & LAGG_F_HASHL3))
1547 break;
1548 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &buf);
1549 if (ip6 == NULL)
1550 goto out;
1551
1552 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1553 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1554 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1555 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1556 break;
1557#endif
1558 }
1559out:
1560 return (p);
1561}
1562
1563int
1564lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1565{
1566
1567 return (ifp->if_transmit)(ifp, m);
1568}
1569
1570/*
1571 * Simple round robin aggregation
1572 */
1573
1574static int
1575lagg_rr_attach(struct lagg_softc *sc)
1576{
1577 sc->sc_detach = lagg_rr_detach;
1578 sc->sc_start = lagg_rr_start;
1579 sc->sc_input = lagg_rr_input;
1580 sc->sc_port_create = NULL;
1581 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1582 sc->sc_seq = 0;
1583
1584 return (0);
1585}
1586
1587static int
1588lagg_rr_detach(struct lagg_softc *sc)
1589{
1590 return (0);
1591}
1592
1593static int
1594lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1595{
1596 struct lagg_port *lp;
1597 uint32_t p;
1598
1599 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1600 p %= sc->sc_count;
1601 lp = SLIST_FIRST(&sc->sc_ports);
1602 while (p--)
1603 lp = SLIST_NEXT(lp, lp_entries);
1604
1605 /*
1606 * Check the port's link state. This will return the next active
1607 * port if the link is down or the port is NULL.
1608 */
1609 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1610 m_freem(m);
1611 return (ENETDOWN);
1612 }
1613
1614 /* Send mbuf */
1615 return (lagg_enqueue(lp->lp_ifp, m));
1616}
1617
1618static struct mbuf *
1619lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1620{
1621 struct ifnet *ifp = sc->sc_ifp;
1622
1623 /* Just pass in the packet to our lagg device */
1624 m->m_pkthdr.rcvif = ifp;
1625
1626 return (m);
1627}
1628
1629/*
1630 * Active failover
1631 */
1632
1633static int
1634lagg_fail_attach(struct lagg_softc *sc)
1635{
1636 sc->sc_detach = lagg_fail_detach;
1637 sc->sc_start = lagg_fail_start;
1638 sc->sc_input = lagg_fail_input;
1639 sc->sc_port_create = NULL;
1640 sc->sc_port_destroy = NULL;
1641
1642 return (0);
1643}
1644
1645static int
1646lagg_fail_detach(struct lagg_softc *sc)
1647{
1648 return (0);
1649}
1650
1651static int
1652lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1653{
1654 struct lagg_port *lp;
1655
1656 /* Use the master port if active or the next available port */
1657 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1658 m_freem(m);
1659 return (ENETDOWN);
1660 }
1661
1662 /* Send mbuf */
1663 return (lagg_enqueue(lp->lp_ifp, m));
1664}
1665
1666static struct mbuf *
1667lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1668{
1669 struct ifnet *ifp = sc->sc_ifp;
1670 struct lagg_port *tmp_tp;
1671
1672 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1673 m->m_pkthdr.rcvif = ifp;
1674 return (m);
1675 }
1676
1677 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1678 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1679 /*
1680 * If tmp_tp is null, we've recieved a packet when all
1681 * our links are down. Weird, but process it anyways.
1682 */
1683 if ((tmp_tp == NULL || tmp_tp == lp)) {
1684 m->m_pkthdr.rcvif = ifp;
1685 return (m);
1686 }
1687 }
1688
1689 m_freem(m);
1690 return (NULL);
1691}
1692
1693/*
1694 * Loadbalancing
1695 */
1696
1697static int
1698lagg_lb_attach(struct lagg_softc *sc)
1699{
1700 struct lagg_port *lp;
1701 struct lagg_lb *lb;
1702
1703 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1704 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1705 return (ENOMEM);
1706
1707 sc->sc_detach = lagg_lb_detach;
1708 sc->sc_start = lagg_lb_start;
1709 sc->sc_input = lagg_lb_input;
1710 sc->sc_port_create = lagg_lb_port_create;
1711 sc->sc_port_destroy = lagg_lb_port_destroy;
1712 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1713
1714 lb->lb_key = arc4random();
1715 sc->sc_psc = (caddr_t)lb;
1716
1717 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1718 lagg_lb_port_create(lp);
1719
1720 return (0);
1721}
1722
1723static int
1724lagg_lb_detach(struct lagg_softc *sc)
1725{
1726 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1727 if (lb != NULL)
1728 free(lb, M_DEVBUF);
1729 return (0);
1730}
1731
1732static int
1733lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1734{
1735 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1736 struct lagg_port *lp_next;
1737 int i = 0;
1738
1739 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1740 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1741 if (lp_next == lp)
1742 continue;
1743 if (i >= LAGG_MAX_PORTS)
1744 return (EINVAL);
1745 if (sc->sc_ifflags & IFF_DEBUG)
1746 printf("%s: port %s at index %d\n",
1747 sc->sc_ifname, lp_next->lp_ifname, i);
1748 lb->lb_ports[i++] = lp_next;
1749 }
1750
1751 return (0);
1752}
1753
1754static int
1755lagg_lb_port_create(struct lagg_port *lp)
1756{
1757 struct lagg_softc *sc = lp->lp_softc;
1758 return (lagg_lb_porttable(sc, NULL));
1759}
1760
1761static void
1762lagg_lb_port_destroy(struct lagg_port *lp)
1763{
1764 struct lagg_softc *sc = lp->lp_softc;
1765 lagg_lb_porttable(sc, lp);
1766}
1767
1768static int
1769lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1770{
1771 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1772 struct lagg_port *lp = NULL;
1773 uint32_t p = 0;
1774
1775 if (sc->use_flowid && (m->m_flags & M_FLOWID))
1776 p = m->m_pkthdr.flowid;
1777 else
1778 p = lagg_hashmbuf(sc, m, lb->lb_key);
1779 p %= sc->sc_count;
1780 lp = lb->lb_ports[p];
1781
1782 /*
1783 * Check the port's link state. This will return the next active
1784 * port if the link is down or the port is NULL.
1785 */
1786 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1787 m_freem(m);
1788 return (ENETDOWN);
1789 }
1790
1791 /* Send mbuf */
1792 return (lagg_enqueue(lp->lp_ifp, m));
1793}
1794
1795static struct mbuf *
1796lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1797{
1798 struct ifnet *ifp = sc->sc_ifp;
1799
1800 /* Just pass in the packet to our lagg device */
1801 m->m_pkthdr.rcvif = ifp;
1802
1803 return (m);
1804}
1805
1806/*
1807 * 802.3ad LACP
1808 */
1809
1810static int
1811lagg_lacp_attach(struct lagg_softc *sc)
1812{
1813 struct lagg_port *lp;
1814 int error;
1815
1816 sc->sc_detach = lagg_lacp_detach;
1817 sc->sc_port_create = lacp_port_create;
1818 sc->sc_port_destroy = lacp_port_destroy;
1819 sc->sc_linkstate = lacp_linkstate;
1820 sc->sc_start = lagg_lacp_start;
1821 sc->sc_input = lagg_lacp_input;
1822 sc->sc_init = lacp_init;
1823 sc->sc_stop = lacp_stop;
1824 sc->sc_lladdr = lagg_lacp_lladdr;
1825 sc->sc_req = lacp_req;
1826 sc->sc_portreq = lacp_portreq;
1827
1828 error = lacp_attach(sc);
1829 if (error)
1830 return (error);
1831
1832 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1833 lacp_port_create(lp);
1834
1835 return (error);
1836}
1837
1838static int
1839lagg_lacp_detach(struct lagg_softc *sc)
1840{
1841 struct lagg_port *lp;
1842 int error;
1843
1844 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1845 lacp_port_destroy(lp);
1846
1847 /* unlocking is safe here */
1848 LAGG_WUNLOCK(sc);
1849 error = lacp_detach(sc);
1850 LAGG_WLOCK(sc);
1851
1852 return (error);
1853}
1854
1855static void
1856lagg_lacp_lladdr(struct lagg_softc *sc)
1857{
1858 struct lagg_port *lp;
1859
1860 /* purge all the lacp ports */
1861 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1862 lacp_port_destroy(lp);
1863
1864 /* add them back in */
1865 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1866 lacp_port_create(lp);
1867}
1868
1869static int
1870lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1871{
1872 struct lagg_port *lp;
1873
1874 lp = lacp_select_tx_port(sc, m);
1875 if (lp == NULL) {
1876 m_freem(m);
1877 return (ENETDOWN);
1878 }
1879
1880 /* Send mbuf */
1881 return (lagg_enqueue(lp->lp_ifp, m));
1882}
1883
1884static struct mbuf *
1885lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1886{
1887 struct ifnet *ifp = sc->sc_ifp;
1888 struct ether_header *eh;
1889 u_short etype;
1890
1891 eh = mtod(m, struct ether_header *);
1892 etype = ntohs(eh->ether_type);
1893
1894 /* Tap off LACP control messages */
1895 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
1896 m = lacp_input(lp, m);
1897 if (m == NULL)
1898 return (NULL);
1899 }
1900
1901 /*
1902 * If the port is not collecting or not in the active aggregator then
1903 * free and return.
1904 */
1905 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1906 m_freem(m);
1907 return (NULL);
1908 }
1909
1910 m->m_pkthdr.rcvif = ifp;
1911 return (m);
1912}
1913
1914static void
1915lagg_callout(void *arg)
1916{
1917 struct lagg_softc *sc = (struct lagg_softc *)arg;
1918 struct ifnet *ifp = sc->sc_ifp;
1919
1920 ifp->if_ipackets = counter_u64_fetch(sc->sc_ipackets);
1921 ifp->if_opackets = counter_u64_fetch(sc->sc_opackets);
1922 ifp->if_ibytes = counter_u64_fetch(sc->sc_ibytes);
1923 ifp->if_obytes = counter_u64_fetch(sc->sc_obytes);
1924
1925 callout_reset(&sc->sc_callout, hz, lagg_callout, sc);
1926}
576 /* Allow the first Ethernet member to define the MTU */
577 if (SLIST_EMPTY(&sc->sc_ports))
578 sc->sc_ifp->if_mtu = ifp->if_mtu;
579 else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
580 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
581 ifp->if_xname);
582 return (EINVAL);
583 }
584
585 if ((lp = malloc(sizeof(struct lagg_port),
586 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
587 return (ENOMEM);
588
589 /* Check if port is a stacked lagg */
590 mtx_lock(&lagg_list_mtx);
591 SLIST_FOREACH(sc_ptr, &lagg_list, sc_entries) {
592 if (ifp == sc_ptr->sc_ifp) {
593 mtx_unlock(&lagg_list_mtx);
594 free(lp, M_DEVBUF);
595 return (EINVAL);
596 /* XXX disable stacking for the moment, its untested */
597#ifdef LAGG_PORT_STACKING
598 lp->lp_flags |= LAGG_PORT_STACK;
599 if (lagg_port_checkstacking(sc_ptr) >=
600 LAGG_MAX_STACKING) {
601 mtx_unlock(&lagg_list_mtx);
602 free(lp, M_DEVBUF);
603 return (E2BIG);
604 }
605#endif
606 }
607 }
608 mtx_unlock(&lagg_list_mtx);
609
610 /* Change the interface type */
611 lp->lp_iftype = ifp->if_type;
612 ifp->if_type = IFT_IEEE8023ADLAG;
613 ifp->if_lagg = lp;
614 lp->lp_ioctl = ifp->if_ioctl;
615 ifp->if_ioctl = lagg_port_ioctl;
616 lp->lp_output = ifp->if_output;
617 ifp->if_output = lagg_port_output;
618
619 lp->lp_ifp = ifp;
620 lp->lp_softc = sc;
621
622 /* Save port link layer address */
623 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ETHER_ADDR_LEN);
624
625 if (SLIST_EMPTY(&sc->sc_ports)) {
626 sc->sc_primary = lp;
627 lagg_lladdr(sc, IF_LLADDR(ifp));
628 } else {
629 /* Update link layer address for this port */
630 lagg_port_lladdr(lp, IF_LLADDR(sc->sc_ifp));
631 }
632
633 /* Insert into the list of ports */
634 SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
635 sc->sc_count++;
636
637 /* Update lagg capabilities */
638 lagg_capabilities(sc);
639 lagg_linkstate(sc);
640
641 /* Add multicast addresses and interface flags to this port */
642 lagg_ether_cmdmulti(lp, 1);
643 lagg_setflags(lp, 1);
644
645 if (sc->sc_port_create != NULL)
646 error = (*sc->sc_port_create)(lp);
647 if (error) {
648 /* remove the port again, without calling sc_port_destroy */
649 lagg_port_destroy(lp, 0);
650 return (error);
651 }
652
653 return (error);
654}
655
656#ifdef LAGG_PORT_STACKING
657static int
658lagg_port_checkstacking(struct lagg_softc *sc)
659{
660 struct lagg_softc *sc_ptr;
661 struct lagg_port *lp;
662 int m = 0;
663
664 LAGG_WLOCK_ASSERT(sc);
665
666 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
667 if (lp->lp_flags & LAGG_PORT_STACK) {
668 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
669 m = MAX(m, lagg_port_checkstacking(sc_ptr));
670 }
671 }
672
673 return (m + 1);
674}
675#endif
676
677static int
678lagg_port_destroy(struct lagg_port *lp, int runpd)
679{
680 struct lagg_softc *sc = lp->lp_softc;
681 struct lagg_port *lp_ptr;
682 struct lagg_llq *llq;
683 struct ifnet *ifp = lp->lp_ifp;
684
685 LAGG_WLOCK_ASSERT(sc);
686
687 if (runpd && sc->sc_port_destroy != NULL)
688 (*sc->sc_port_destroy)(lp);
689
690 /*
691 * Remove multicast addresses and interface flags from this port and
692 * reset the MAC address, skip if the interface is being detached.
693 */
694 if (!lp->lp_detaching) {
695 lagg_ether_cmdmulti(lp, 0);
696 lagg_setflags(lp, 0);
697 lagg_port_lladdr(lp, lp->lp_lladdr);
698 }
699
700 /* Restore interface */
701 ifp->if_type = lp->lp_iftype;
702 ifp->if_ioctl = lp->lp_ioctl;
703 ifp->if_output = lp->lp_output;
704 ifp->if_lagg = NULL;
705
706 /* Finally, remove the port from the lagg */
707 SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
708 sc->sc_count--;
709
710 /* Update the primary interface */
711 if (lp == sc->sc_primary) {
712 uint8_t lladdr[ETHER_ADDR_LEN];
713
714 if ((lp_ptr = SLIST_FIRST(&sc->sc_ports)) == NULL) {
715 bzero(&lladdr, ETHER_ADDR_LEN);
716 } else {
717 bcopy(lp_ptr->lp_lladdr,
718 lladdr, ETHER_ADDR_LEN);
719 }
720 lagg_lladdr(sc, lladdr);
721 sc->sc_primary = lp_ptr;
722
723 /* Update link layer address for each port */
724 SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
725 lagg_port_lladdr(lp_ptr, lladdr);
726 }
727
728 /* Remove any pending lladdr changes from the queue */
729 if (lp->lp_detaching) {
730 SLIST_FOREACH(llq, &sc->sc_llq_head, llq_entries) {
731 if (llq->llq_ifp == ifp) {
732 SLIST_REMOVE(&sc->sc_llq_head, llq, lagg_llq,
733 llq_entries);
734 free(llq, M_DEVBUF);
735 break; /* Only appears once */
736 }
737 }
738 }
739
740 if (lp->lp_ifflags)
741 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
742
743 free(lp, M_DEVBUF);
744
745 /* Update lagg capabilities */
746 lagg_capabilities(sc);
747 lagg_linkstate(sc);
748
749 return (0);
750}
751
752static int
753lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
754{
755 struct lagg_reqport *rp = (struct lagg_reqport *)data;
756 struct lagg_softc *sc;
757 struct lagg_port *lp = NULL;
758 int error = 0;
759
760 /* Should be checked by the caller */
761 if (ifp->if_type != IFT_IEEE8023ADLAG ||
762 (lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
763 goto fallback;
764
765 switch (cmd) {
766 case SIOCGLAGGPORT:
767 if (rp->rp_portname[0] == '\0' ||
768 ifunit(rp->rp_portname) != ifp) {
769 error = EINVAL;
770 break;
771 }
772
773 LAGG_RLOCK(sc);
774 if ((lp = ifp->if_lagg) == NULL || lp->lp_softc != sc) {
775 error = ENOENT;
776 LAGG_RUNLOCK(sc);
777 break;
778 }
779
780 lagg_port2req(lp, rp);
781 LAGG_RUNLOCK(sc);
782 break;
783
784 case SIOCSIFCAP:
785 if (lp->lp_ioctl == NULL) {
786 error = EINVAL;
787 break;
788 }
789 error = (*lp->lp_ioctl)(ifp, cmd, data);
790 if (error)
791 break;
792
793 /* Update lagg interface capabilities */
794 LAGG_WLOCK(sc);
795 lagg_capabilities(sc);
796 LAGG_WUNLOCK(sc);
797 break;
798
799 case SIOCSIFMTU:
800 /* Do not allow the MTU to be changed once joined */
801 error = EINVAL;
802 break;
803
804 default:
805 goto fallback;
806 }
807
808 return (error);
809
810fallback:
811 if (lp->lp_ioctl != NULL)
812 return ((*lp->lp_ioctl)(ifp, cmd, data));
813
814 return (EINVAL);
815}
816
817/*
818 * For direct output to child ports.
819 */
820static int
821lagg_port_output(struct ifnet *ifp, struct mbuf *m,
822 const struct sockaddr *dst, struct route *ro)
823{
824 struct lagg_port *lp = ifp->if_lagg;
825
826 switch (dst->sa_family) {
827 case pseudo_AF_HDRCMPLT:
828 case AF_UNSPEC:
829 return ((*lp->lp_output)(ifp, m, dst, ro));
830 }
831
832 /* drop any other frames */
833 m_freem(m);
834 return (ENETDOWN);
835}
836
837static void
838lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
839{
840 struct lagg_port *lp;
841 struct lagg_softc *sc;
842
843 if ((lp = ifp->if_lagg) == NULL)
844 return;
845 /* If the ifnet is just being renamed, don't do anything. */
846 if (ifp->if_flags & IFF_RENAMING)
847 return;
848
849 sc = lp->lp_softc;
850
851 LAGG_WLOCK(sc);
852 lp->lp_detaching = 1;
853 lagg_port_destroy(lp, 1);
854 LAGG_WUNLOCK(sc);
855}
856
857static void
858lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
859{
860 struct lagg_softc *sc = lp->lp_softc;
861
862 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
863 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
864 rp->rp_prio = lp->lp_prio;
865 rp->rp_flags = lp->lp_flags;
866 if (sc->sc_portreq != NULL)
867 (*sc->sc_portreq)(lp, (caddr_t)&rp->rp_psc);
868
869 /* Add protocol specific flags */
870 switch (sc->sc_proto) {
871 case LAGG_PROTO_FAILOVER:
872 if (lp == sc->sc_primary)
873 rp->rp_flags |= LAGG_PORT_MASTER;
874 if (lp == lagg_link_active(sc, sc->sc_primary))
875 rp->rp_flags |= LAGG_PORT_ACTIVE;
876 break;
877
878 case LAGG_PROTO_ROUNDROBIN:
879 case LAGG_PROTO_LOADBALANCE:
880 case LAGG_PROTO_ETHERCHANNEL:
881 if (LAGG_PORTACTIVE(lp))
882 rp->rp_flags |= LAGG_PORT_ACTIVE;
883 break;
884
885 case LAGG_PROTO_LACP:
886 /* LACP has a different definition of active */
887 if (lacp_isactive(lp))
888 rp->rp_flags |= LAGG_PORT_ACTIVE;
889 if (lacp_iscollecting(lp))
890 rp->rp_flags |= LAGG_PORT_COLLECTING;
891 if (lacp_isdistributing(lp))
892 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
893 break;
894 }
895
896}
897
898static void
899lagg_init(void *xsc)
900{
901 struct lagg_softc *sc = (struct lagg_softc *)xsc;
902 struct lagg_port *lp;
903 struct ifnet *ifp = sc->sc_ifp;
904
905 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
906 return;
907
908 LAGG_WLOCK(sc);
909
910 ifp->if_drv_flags |= IFF_DRV_RUNNING;
911 /* Update the port lladdrs */
912 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
913 lagg_port_lladdr(lp, IF_LLADDR(ifp));
914
915 if (sc->sc_init != NULL)
916 (*sc->sc_init)(sc);
917
918 LAGG_WUNLOCK(sc);
919}
920
921static void
922lagg_stop(struct lagg_softc *sc)
923{
924 struct ifnet *ifp = sc->sc_ifp;
925
926 LAGG_WLOCK_ASSERT(sc);
927
928 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
929 return;
930
931 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
932
933 if (sc->sc_stop != NULL)
934 (*sc->sc_stop)(sc);
935}
936
937static int
938lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
939{
940 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
941 struct lagg_reqall *ra = (struct lagg_reqall *)data;
942 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
943 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
944 struct ifreq *ifr = (struct ifreq *)data;
945 struct lagg_port *lp;
946 struct ifnet *tpif;
947 struct thread *td = curthread;
948 char *buf, *outbuf;
949 int count, buflen, len, error = 0;
950
951 bzero(&rpbuf, sizeof(rpbuf));
952
953 switch (cmd) {
954 case SIOCGLAGG:
955 LAGG_RLOCK(sc);
956 count = 0;
957 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
958 count++;
959 buflen = count * sizeof(struct lagg_reqport);
960 LAGG_RUNLOCK(sc);
961
962 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
963
964 LAGG_RLOCK(sc);
965 ra->ra_proto = sc->sc_proto;
966 if (sc->sc_req != NULL)
967 (*sc->sc_req)(sc, (caddr_t)&ra->ra_psc);
968
969 count = 0;
970 buf = outbuf;
971 len = min(ra->ra_size, buflen);
972 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
973 if (len < sizeof(rpbuf))
974 break;
975
976 lagg_port2req(lp, &rpbuf);
977 memcpy(buf, &rpbuf, sizeof(rpbuf));
978 count++;
979 buf += sizeof(rpbuf);
980 len -= sizeof(rpbuf);
981 }
982 LAGG_RUNLOCK(sc);
983 ra->ra_ports = count;
984 ra->ra_size = count * sizeof(rpbuf);
985 error = copyout(outbuf, ra->ra_port, ra->ra_size);
986 free(outbuf, M_TEMP);
987 break;
988 case SIOCSLAGG:
989 error = priv_check(td, PRIV_NET_LAGG);
990 if (error)
991 break;
992 if (ra->ra_proto >= LAGG_PROTO_MAX) {
993 error = EPROTONOSUPPORT;
994 break;
995 }
996 LAGG_WLOCK(sc);
997 if (sc->sc_proto != LAGG_PROTO_NONE) {
998 /* Reset protocol first in case detach unlocks */
999 sc->sc_proto = LAGG_PROTO_NONE;
1000 error = sc->sc_detach(sc);
1001 sc->sc_detach = NULL;
1002 sc->sc_start = NULL;
1003 sc->sc_input = NULL;
1004 sc->sc_port_create = NULL;
1005 sc->sc_port_destroy = NULL;
1006 sc->sc_linkstate = NULL;
1007 sc->sc_init = NULL;
1008 sc->sc_stop = NULL;
1009 sc->sc_lladdr = NULL;
1010 sc->sc_req = NULL;
1011 sc->sc_portreq = NULL;
1012 } else if (sc->sc_input != NULL) {
1013 /* Still detaching */
1014 error = EBUSY;
1015 }
1016 if (error != 0) {
1017 LAGG_WUNLOCK(sc);
1018 break;
1019 }
1020 for (int i = 0; i < (sizeof(lagg_protos) /
1021 sizeof(lagg_protos[0])); i++) {
1022 if (lagg_protos[i].ti_proto == ra->ra_proto) {
1023 if (sc->sc_ifflags & IFF_DEBUG)
1024 printf("%s: using proto %u\n",
1025 sc->sc_ifname,
1026 lagg_protos[i].ti_proto);
1027 sc->sc_proto = lagg_protos[i].ti_proto;
1028 if (sc->sc_proto != LAGG_PROTO_NONE)
1029 error = lagg_protos[i].ti_attach(sc);
1030 LAGG_WUNLOCK(sc);
1031 return (error);
1032 }
1033 }
1034 LAGG_WUNLOCK(sc);
1035 error = EPROTONOSUPPORT;
1036 break;
1037 case SIOCGLAGGFLAGS:
1038 rf->rf_flags = sc->sc_flags;
1039 break;
1040 case SIOCSLAGGHASH:
1041 error = priv_check(td, PRIV_NET_LAGG);
1042 if (error)
1043 break;
1044 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1045 error = EINVAL;
1046 break;
1047 }
1048 LAGG_WLOCK(sc);
1049 sc->sc_flags &= ~LAGG_F_HASHMASK;
1050 sc->sc_flags |= rf->rf_flags & LAGG_F_HASHMASK;
1051 LAGG_WUNLOCK(sc);
1052 break;
1053 case SIOCGLAGGPORT:
1054 if (rp->rp_portname[0] == '\0' ||
1055 (tpif = ifunit(rp->rp_portname)) == NULL) {
1056 error = EINVAL;
1057 break;
1058 }
1059
1060 LAGG_RLOCK(sc);
1061 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1062 lp->lp_softc != sc) {
1063 error = ENOENT;
1064 LAGG_RUNLOCK(sc);
1065 break;
1066 }
1067
1068 lagg_port2req(lp, rp);
1069 LAGG_RUNLOCK(sc);
1070 break;
1071 case SIOCSLAGGPORT:
1072 error = priv_check(td, PRIV_NET_LAGG);
1073 if (error)
1074 break;
1075 if (rp->rp_portname[0] == '\0' ||
1076 (tpif = ifunit(rp->rp_portname)) == NULL) {
1077 error = EINVAL;
1078 break;
1079 }
1080 LAGG_WLOCK(sc);
1081 error = lagg_port_create(sc, tpif);
1082 LAGG_WUNLOCK(sc);
1083 break;
1084 case SIOCSLAGGDELPORT:
1085 error = priv_check(td, PRIV_NET_LAGG);
1086 if (error)
1087 break;
1088 if (rp->rp_portname[0] == '\0' ||
1089 (tpif = ifunit(rp->rp_portname)) == NULL) {
1090 error = EINVAL;
1091 break;
1092 }
1093
1094 LAGG_WLOCK(sc);
1095 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1096 lp->lp_softc != sc) {
1097 error = ENOENT;
1098 LAGG_WUNLOCK(sc);
1099 break;
1100 }
1101
1102 error = lagg_port_destroy(lp, 1);
1103 LAGG_WUNLOCK(sc);
1104 break;
1105 case SIOCSIFFLAGS:
1106 /* Set flags on ports too */
1107 LAGG_WLOCK(sc);
1108 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1109 lagg_setflags(lp, 1);
1110 }
1111 LAGG_WUNLOCK(sc);
1112
1113 if (!(ifp->if_flags & IFF_UP) &&
1114 (ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1115 /*
1116 * If interface is marked down and it is running,
1117 * then stop and disable it.
1118 */
1119 LAGG_WLOCK(sc);
1120 lagg_stop(sc);
1121 LAGG_WUNLOCK(sc);
1122 } else if ((ifp->if_flags & IFF_UP) &&
1123 !(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1124 /*
1125 * If interface is marked up and it is stopped, then
1126 * start it.
1127 */
1128 (*ifp->if_init)(sc);
1129 }
1130 break;
1131 case SIOCADDMULTI:
1132 case SIOCDELMULTI:
1133 LAGG_WLOCK(sc);
1134 error = lagg_ether_setmulti(sc);
1135 LAGG_WUNLOCK(sc);
1136 break;
1137 case SIOCSIFMEDIA:
1138 case SIOCGIFMEDIA:
1139 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1140 break;
1141
1142 case SIOCSIFCAP:
1143 case SIOCSIFMTU:
1144 /* Do not allow the MTU or caps to be directly changed */
1145 error = EINVAL;
1146 break;
1147
1148 default:
1149 error = ether_ioctl(ifp, cmd, data);
1150 break;
1151 }
1152 return (error);
1153}
1154
1155static int
1156lagg_ether_setmulti(struct lagg_softc *sc)
1157{
1158 struct lagg_port *lp;
1159
1160 LAGG_WLOCK_ASSERT(sc);
1161
1162 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1163 /* First, remove any existing filter entries. */
1164 lagg_ether_cmdmulti(lp, 0);
1165 /* copy all addresses from the lagg interface to the port */
1166 lagg_ether_cmdmulti(lp, 1);
1167 }
1168 return (0);
1169}
1170
1171static int
1172lagg_ether_cmdmulti(struct lagg_port *lp, int set)
1173{
1174 struct lagg_softc *sc = lp->lp_softc;
1175 struct ifnet *ifp = lp->lp_ifp;
1176 struct ifnet *scifp = sc->sc_ifp;
1177 struct lagg_mc *mc;
1178 struct ifmultiaddr *ifma, *rifma = NULL;
1179 struct sockaddr_dl sdl;
1180 int error;
1181
1182 LAGG_WLOCK_ASSERT(sc);
1183
1184 bzero((char *)&sdl, sizeof(sdl));
1185 sdl.sdl_len = sizeof(sdl);
1186 sdl.sdl_family = AF_LINK;
1187 sdl.sdl_type = IFT_ETHER;
1188 sdl.sdl_alen = ETHER_ADDR_LEN;
1189 sdl.sdl_index = ifp->if_index;
1190
1191 if (set) {
1192 TAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
1193 if (ifma->ifma_addr->sa_family != AF_LINK)
1194 continue;
1195 bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr),
1196 LLADDR(&sdl), ETHER_ADDR_LEN);
1197
1198 error = if_addmulti(ifp, (struct sockaddr *)&sdl, &rifma);
1199 if (error)
1200 return (error);
1201 mc = malloc(sizeof(struct lagg_mc), M_DEVBUF, M_NOWAIT);
1202 if (mc == NULL)
1203 return (ENOMEM);
1204 mc->mc_ifma = rifma;
1205 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
1206 }
1207 } else {
1208 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
1209 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
1210 if_delmulti_ifma(mc->mc_ifma);
1211 free(mc, M_DEVBUF);
1212 }
1213 }
1214 return (0);
1215}
1216
1217/* Handle a ref counted flag that should be set on the lagg port as well */
1218static int
1219lagg_setflag(struct lagg_port *lp, int flag, int status,
1220 int (*func)(struct ifnet *, int))
1221{
1222 struct lagg_softc *sc = lp->lp_softc;
1223 struct ifnet *scifp = sc->sc_ifp;
1224 struct ifnet *ifp = lp->lp_ifp;
1225 int error;
1226
1227 LAGG_WLOCK_ASSERT(sc);
1228
1229 status = status ? (scifp->if_flags & flag) : 0;
1230 /* Now "status" contains the flag value or 0 */
1231
1232 /*
1233 * See if recorded ports status is different from what
1234 * we want it to be. If it is, flip it. We record ports
1235 * status in lp_ifflags so that we won't clear ports flag
1236 * we haven't set. In fact, we don't clear or set ports
1237 * flags directly, but get or release references to them.
1238 * That's why we can be sure that recorded flags still are
1239 * in accord with actual ports flags.
1240 */
1241 if (status != (lp->lp_ifflags & flag)) {
1242 error = (*func)(ifp, status);
1243 if (error)
1244 return (error);
1245 lp->lp_ifflags &= ~flag;
1246 lp->lp_ifflags |= status;
1247 }
1248 return (0);
1249}
1250
1251/*
1252 * Handle IFF_* flags that require certain changes on the lagg port
1253 * if "status" is true, update ports flags respective to the lagg
1254 * if "status" is false, forcedly clear the flags set on port.
1255 */
1256static int
1257lagg_setflags(struct lagg_port *lp, int status)
1258{
1259 int error, i;
1260
1261 for (i = 0; lagg_pflags[i].flag; i++) {
1262 error = lagg_setflag(lp, lagg_pflags[i].flag,
1263 status, lagg_pflags[i].func);
1264 if (error)
1265 return (error);
1266 }
1267 return (0);
1268}
1269
1270static int
1271lagg_transmit(struct ifnet *ifp, struct mbuf *m)
1272{
1273 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1274 int error, len, mcast;
1275
1276 len = m->m_pkthdr.len;
1277 mcast = (m->m_flags & (M_MCAST | M_BCAST)) ? 1 : 0;
1278
1279 LAGG_RLOCK(sc);
1280 /* We need a Tx algorithm and at least one port */
1281 if (sc->sc_proto == LAGG_PROTO_NONE || sc->sc_count == 0) {
1282 LAGG_RUNLOCK(sc);
1283 m_freem(m);
1284 ifp->if_oerrors++;
1285 return (ENXIO);
1286 }
1287
1288 ETHER_BPF_MTAP(ifp, m);
1289
1290 error = (*sc->sc_start)(sc, m);
1291 LAGG_RUNLOCK(sc);
1292
1293 if (error == 0) {
1294 counter_u64_add(sc->sc_opackets, 1);
1295 counter_u64_add(sc->sc_obytes, len);
1296 ifp->if_omcasts += mcast;
1297 } else
1298 ifp->if_oerrors++;
1299
1300 return (error);
1301}
1302
1303/*
1304 * The ifp->if_qflush entry point for lagg(4) is no-op.
1305 */
1306static void
1307lagg_qflush(struct ifnet *ifp __unused)
1308{
1309}
1310
1311static struct mbuf *
1312lagg_input(struct ifnet *ifp, struct mbuf *m)
1313{
1314 struct lagg_port *lp = ifp->if_lagg;
1315 struct lagg_softc *sc = lp->lp_softc;
1316 struct ifnet *scifp = sc->sc_ifp;
1317
1318 LAGG_RLOCK(sc);
1319 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
1320 (lp->lp_flags & LAGG_PORT_DISABLED) ||
1321 sc->sc_proto == LAGG_PROTO_NONE) {
1322 LAGG_RUNLOCK(sc);
1323 m_freem(m);
1324 return (NULL);
1325 }
1326
1327 ETHER_BPF_MTAP(scifp, m);
1328
1329 m = (*sc->sc_input)(sc, lp, m);
1330
1331 if (m != NULL) {
1332 counter_u64_add(sc->sc_ipackets, 1);
1333 counter_u64_add(sc->sc_ibytes, m->m_pkthdr.len);
1334
1335 if (scifp->if_flags & IFF_MONITOR) {
1336 m_freem(m);
1337 m = NULL;
1338 }
1339 }
1340
1341 LAGG_RUNLOCK(sc);
1342 return (m);
1343}
1344
1345static int
1346lagg_media_change(struct ifnet *ifp)
1347{
1348 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1349
1350 if (sc->sc_ifflags & IFF_DEBUG)
1351 printf("%s\n", __func__);
1352
1353 /* Ignore */
1354 return (0);
1355}
1356
1357static void
1358lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
1359{
1360 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1361 struct lagg_port *lp;
1362
1363 imr->ifm_status = IFM_AVALID;
1364 imr->ifm_active = IFM_ETHER | IFM_AUTO;
1365
1366 LAGG_RLOCK(sc);
1367 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1368 if (LAGG_PORTACTIVE(lp))
1369 imr->ifm_status |= IFM_ACTIVE;
1370 }
1371 LAGG_RUNLOCK(sc);
1372}
1373
1374static void
1375lagg_linkstate(struct lagg_softc *sc)
1376{
1377 struct lagg_port *lp;
1378 int new_link = LINK_STATE_DOWN;
1379 uint64_t speed;
1380
1381 /* Our link is considered up if at least one of our ports is active */
1382 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1383 if (lp->lp_link_state == LINK_STATE_UP) {
1384 new_link = LINK_STATE_UP;
1385 break;
1386 }
1387 }
1388 if_link_state_change(sc->sc_ifp, new_link);
1389
1390 /* Update if_baudrate to reflect the max possible speed */
1391 switch (sc->sc_proto) {
1392 case LAGG_PROTO_FAILOVER:
1393 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
1394 sc->sc_primary->lp_ifp->if_baudrate : 0;
1395 break;
1396 case LAGG_PROTO_ROUNDROBIN:
1397 case LAGG_PROTO_LOADBALANCE:
1398 case LAGG_PROTO_ETHERCHANNEL:
1399 speed = 0;
1400 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1401 speed += lp->lp_ifp->if_baudrate;
1402 sc->sc_ifp->if_baudrate = speed;
1403 break;
1404 case LAGG_PROTO_LACP:
1405 /* LACP updates if_baudrate itself */
1406 break;
1407 }
1408}
1409
1410static void
1411lagg_port_state(struct ifnet *ifp, int state)
1412{
1413 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
1414 struct lagg_softc *sc = NULL;
1415
1416 if (lp != NULL)
1417 sc = lp->lp_softc;
1418 if (sc == NULL)
1419 return;
1420
1421 LAGG_WLOCK(sc);
1422 lagg_linkstate(sc);
1423 if (sc->sc_linkstate != NULL)
1424 (*sc->sc_linkstate)(lp);
1425 LAGG_WUNLOCK(sc);
1426}
1427
1428struct lagg_port *
1429lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
1430{
1431 struct lagg_port *lp_next, *rval = NULL;
1432 // int new_link = LINK_STATE_DOWN;
1433
1434 LAGG_RLOCK_ASSERT(sc);
1435 /*
1436 * Search a port which reports an active link state.
1437 */
1438
1439 if (lp == NULL)
1440 goto search;
1441 if (LAGG_PORTACTIVE(lp)) {
1442 rval = lp;
1443 goto found;
1444 }
1445 if ((lp_next = SLIST_NEXT(lp, lp_entries)) != NULL &&
1446 LAGG_PORTACTIVE(lp_next)) {
1447 rval = lp_next;
1448 goto found;
1449 }
1450
1451search:
1452 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1453 if (LAGG_PORTACTIVE(lp_next)) {
1454 rval = lp_next;
1455 goto found;
1456 }
1457 }
1458
1459found:
1460 if (rval != NULL) {
1461 /*
1462 * The IEEE 802.1D standard assumes that a lagg with
1463 * multiple ports is always full duplex. This is valid
1464 * for load sharing laggs and if at least two links
1465 * are active. Unfortunately, checking the latter would
1466 * be too expensive at this point.
1467 XXX
1468 if ((sc->sc_capabilities & IFCAP_LAGG_FULLDUPLEX) &&
1469 (sc->sc_count > 1))
1470 new_link = LINK_STATE_FULL_DUPLEX;
1471 else
1472 new_link = rval->lp_link_state;
1473 */
1474 }
1475
1476 return (rval);
1477}
1478
1479static const void *
1480lagg_gethdr(struct mbuf *m, u_int off, u_int len, void *buf)
1481{
1482 if (m->m_pkthdr.len < (off + len)) {
1483 return (NULL);
1484 } else if (m->m_len < (off + len)) {
1485 m_copydata(m, off, len, buf);
1486 return (buf);
1487 }
1488 return (mtod(m, char *) + off);
1489}
1490
1491uint32_t
1492lagg_hashmbuf(struct lagg_softc *sc, struct mbuf *m, uint32_t key)
1493{
1494 uint16_t etype;
1495 uint32_t p = key;
1496 int off;
1497 struct ether_header *eh;
1498 const struct ether_vlan_header *vlan;
1499#ifdef INET
1500 const struct ip *ip;
1501 const uint32_t *ports;
1502 int iphlen;
1503#endif
1504#ifdef INET6
1505 const struct ip6_hdr *ip6;
1506 uint32_t flow;
1507#endif
1508 union {
1509#ifdef INET
1510 struct ip ip;
1511#endif
1512#ifdef INET6
1513 struct ip6_hdr ip6;
1514#endif
1515 struct ether_vlan_header vlan;
1516 uint32_t port;
1517 } buf;
1518
1519
1520 off = sizeof(*eh);
1521 if (m->m_len < off)
1522 goto out;
1523 eh = mtod(m, struct ether_header *);
1524 etype = ntohs(eh->ether_type);
1525 if (sc->sc_flags & LAGG_F_HASHL2) {
1526 p = hash32_buf(&eh->ether_shost, ETHER_ADDR_LEN, p);
1527 p = hash32_buf(&eh->ether_dhost, ETHER_ADDR_LEN, p);
1528 }
1529
1530 /* Special handling for encapsulating VLAN frames */
1531 if ((m->m_flags & M_VLANTAG) && (sc->sc_flags & LAGG_F_HASHL2)) {
1532 p = hash32_buf(&m->m_pkthdr.ether_vtag,
1533 sizeof(m->m_pkthdr.ether_vtag), p);
1534 } else if (etype == ETHERTYPE_VLAN) {
1535 vlan = lagg_gethdr(m, off, sizeof(*vlan), &buf);
1536 if (vlan == NULL)
1537 goto out;
1538
1539 if (sc->sc_flags & LAGG_F_HASHL2)
1540 p = hash32_buf(&vlan->evl_tag, sizeof(vlan->evl_tag), p);
1541 etype = ntohs(vlan->evl_proto);
1542 off += sizeof(*vlan) - sizeof(*eh);
1543 }
1544
1545 switch (etype) {
1546#ifdef INET
1547 case ETHERTYPE_IP:
1548 ip = lagg_gethdr(m, off, sizeof(*ip), &buf);
1549 if (ip == NULL)
1550 goto out;
1551
1552 if (sc->sc_flags & LAGG_F_HASHL3) {
1553 p = hash32_buf(&ip->ip_src, sizeof(struct in_addr), p);
1554 p = hash32_buf(&ip->ip_dst, sizeof(struct in_addr), p);
1555 }
1556 if (!(sc->sc_flags & LAGG_F_HASHL4))
1557 break;
1558 switch (ip->ip_p) {
1559 case IPPROTO_TCP:
1560 case IPPROTO_UDP:
1561 case IPPROTO_SCTP:
1562 iphlen = ip->ip_hl << 2;
1563 if (iphlen < sizeof(*ip))
1564 break;
1565 off += iphlen;
1566 ports = lagg_gethdr(m, off, sizeof(*ports), &buf);
1567 if (ports == NULL)
1568 break;
1569 p = hash32_buf(ports, sizeof(*ports), p);
1570 break;
1571 }
1572 break;
1573#endif
1574#ifdef INET6
1575 case ETHERTYPE_IPV6:
1576 if (!(sc->sc_flags & LAGG_F_HASHL3))
1577 break;
1578 ip6 = lagg_gethdr(m, off, sizeof(*ip6), &buf);
1579 if (ip6 == NULL)
1580 goto out;
1581
1582 p = hash32_buf(&ip6->ip6_src, sizeof(struct in6_addr), p);
1583 p = hash32_buf(&ip6->ip6_dst, sizeof(struct in6_addr), p);
1584 flow = ip6->ip6_flow & IPV6_FLOWLABEL_MASK;
1585 p = hash32_buf(&flow, sizeof(flow), p); /* IPv6 flow label */
1586 break;
1587#endif
1588 }
1589out:
1590 return (p);
1591}
1592
1593int
1594lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
1595{
1596
1597 return (ifp->if_transmit)(ifp, m);
1598}
1599
1600/*
1601 * Simple round robin aggregation
1602 */
1603
1604static int
1605lagg_rr_attach(struct lagg_softc *sc)
1606{
1607 sc->sc_detach = lagg_rr_detach;
1608 sc->sc_start = lagg_rr_start;
1609 sc->sc_input = lagg_rr_input;
1610 sc->sc_port_create = NULL;
1611 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1612 sc->sc_seq = 0;
1613
1614 return (0);
1615}
1616
1617static int
1618lagg_rr_detach(struct lagg_softc *sc)
1619{
1620 return (0);
1621}
1622
1623static int
1624lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
1625{
1626 struct lagg_port *lp;
1627 uint32_t p;
1628
1629 p = atomic_fetchadd_32(&sc->sc_seq, 1);
1630 p %= sc->sc_count;
1631 lp = SLIST_FIRST(&sc->sc_ports);
1632 while (p--)
1633 lp = SLIST_NEXT(lp, lp_entries);
1634
1635 /*
1636 * Check the port's link state. This will return the next active
1637 * port if the link is down or the port is NULL.
1638 */
1639 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1640 m_freem(m);
1641 return (ENETDOWN);
1642 }
1643
1644 /* Send mbuf */
1645 return (lagg_enqueue(lp->lp_ifp, m));
1646}
1647
1648static struct mbuf *
1649lagg_rr_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1650{
1651 struct ifnet *ifp = sc->sc_ifp;
1652
1653 /* Just pass in the packet to our lagg device */
1654 m->m_pkthdr.rcvif = ifp;
1655
1656 return (m);
1657}
1658
1659/*
1660 * Active failover
1661 */
1662
1663static int
1664lagg_fail_attach(struct lagg_softc *sc)
1665{
1666 sc->sc_detach = lagg_fail_detach;
1667 sc->sc_start = lagg_fail_start;
1668 sc->sc_input = lagg_fail_input;
1669 sc->sc_port_create = NULL;
1670 sc->sc_port_destroy = NULL;
1671
1672 return (0);
1673}
1674
1675static int
1676lagg_fail_detach(struct lagg_softc *sc)
1677{
1678 return (0);
1679}
1680
1681static int
1682lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
1683{
1684 struct lagg_port *lp;
1685
1686 /* Use the master port if active or the next available port */
1687 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
1688 m_freem(m);
1689 return (ENETDOWN);
1690 }
1691
1692 /* Send mbuf */
1693 return (lagg_enqueue(lp->lp_ifp, m));
1694}
1695
1696static struct mbuf *
1697lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1698{
1699 struct ifnet *ifp = sc->sc_ifp;
1700 struct lagg_port *tmp_tp;
1701
1702 if (lp == sc->sc_primary || lagg_failover_rx_all) {
1703 m->m_pkthdr.rcvif = ifp;
1704 return (m);
1705 }
1706
1707 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
1708 tmp_tp = lagg_link_active(sc, sc->sc_primary);
1709 /*
1710 * If tmp_tp is null, we've recieved a packet when all
1711 * our links are down. Weird, but process it anyways.
1712 */
1713 if ((tmp_tp == NULL || tmp_tp == lp)) {
1714 m->m_pkthdr.rcvif = ifp;
1715 return (m);
1716 }
1717 }
1718
1719 m_freem(m);
1720 return (NULL);
1721}
1722
1723/*
1724 * Loadbalancing
1725 */
1726
1727static int
1728lagg_lb_attach(struct lagg_softc *sc)
1729{
1730 struct lagg_port *lp;
1731 struct lagg_lb *lb;
1732
1733 if ((lb = (struct lagg_lb *)malloc(sizeof(struct lagg_lb),
1734 M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
1735 return (ENOMEM);
1736
1737 sc->sc_detach = lagg_lb_detach;
1738 sc->sc_start = lagg_lb_start;
1739 sc->sc_input = lagg_lb_input;
1740 sc->sc_port_create = lagg_lb_port_create;
1741 sc->sc_port_destroy = lagg_lb_port_destroy;
1742 sc->sc_capabilities = IFCAP_LAGG_FULLDUPLEX;
1743
1744 lb->lb_key = arc4random();
1745 sc->sc_psc = (caddr_t)lb;
1746
1747 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1748 lagg_lb_port_create(lp);
1749
1750 return (0);
1751}
1752
1753static int
1754lagg_lb_detach(struct lagg_softc *sc)
1755{
1756 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1757 if (lb != NULL)
1758 free(lb, M_DEVBUF);
1759 return (0);
1760}
1761
1762static int
1763lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
1764{
1765 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1766 struct lagg_port *lp_next;
1767 int i = 0;
1768
1769 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
1770 SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
1771 if (lp_next == lp)
1772 continue;
1773 if (i >= LAGG_MAX_PORTS)
1774 return (EINVAL);
1775 if (sc->sc_ifflags & IFF_DEBUG)
1776 printf("%s: port %s at index %d\n",
1777 sc->sc_ifname, lp_next->lp_ifname, i);
1778 lb->lb_ports[i++] = lp_next;
1779 }
1780
1781 return (0);
1782}
1783
1784static int
1785lagg_lb_port_create(struct lagg_port *lp)
1786{
1787 struct lagg_softc *sc = lp->lp_softc;
1788 return (lagg_lb_porttable(sc, NULL));
1789}
1790
1791static void
1792lagg_lb_port_destroy(struct lagg_port *lp)
1793{
1794 struct lagg_softc *sc = lp->lp_softc;
1795 lagg_lb_porttable(sc, lp);
1796}
1797
1798static int
1799lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
1800{
1801 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
1802 struct lagg_port *lp = NULL;
1803 uint32_t p = 0;
1804
1805 if (sc->use_flowid && (m->m_flags & M_FLOWID))
1806 p = m->m_pkthdr.flowid;
1807 else
1808 p = lagg_hashmbuf(sc, m, lb->lb_key);
1809 p %= sc->sc_count;
1810 lp = lb->lb_ports[p];
1811
1812 /*
1813 * Check the port's link state. This will return the next active
1814 * port if the link is down or the port is NULL.
1815 */
1816 if ((lp = lagg_link_active(sc, lp)) == NULL) {
1817 m_freem(m);
1818 return (ENETDOWN);
1819 }
1820
1821 /* Send mbuf */
1822 return (lagg_enqueue(lp->lp_ifp, m));
1823}
1824
1825static struct mbuf *
1826lagg_lb_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1827{
1828 struct ifnet *ifp = sc->sc_ifp;
1829
1830 /* Just pass in the packet to our lagg device */
1831 m->m_pkthdr.rcvif = ifp;
1832
1833 return (m);
1834}
1835
1836/*
1837 * 802.3ad LACP
1838 */
1839
1840static int
1841lagg_lacp_attach(struct lagg_softc *sc)
1842{
1843 struct lagg_port *lp;
1844 int error;
1845
1846 sc->sc_detach = lagg_lacp_detach;
1847 sc->sc_port_create = lacp_port_create;
1848 sc->sc_port_destroy = lacp_port_destroy;
1849 sc->sc_linkstate = lacp_linkstate;
1850 sc->sc_start = lagg_lacp_start;
1851 sc->sc_input = lagg_lacp_input;
1852 sc->sc_init = lacp_init;
1853 sc->sc_stop = lacp_stop;
1854 sc->sc_lladdr = lagg_lacp_lladdr;
1855 sc->sc_req = lacp_req;
1856 sc->sc_portreq = lacp_portreq;
1857
1858 error = lacp_attach(sc);
1859 if (error)
1860 return (error);
1861
1862 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1863 lacp_port_create(lp);
1864
1865 return (error);
1866}
1867
1868static int
1869lagg_lacp_detach(struct lagg_softc *sc)
1870{
1871 struct lagg_port *lp;
1872 int error;
1873
1874 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1875 lacp_port_destroy(lp);
1876
1877 /* unlocking is safe here */
1878 LAGG_WUNLOCK(sc);
1879 error = lacp_detach(sc);
1880 LAGG_WLOCK(sc);
1881
1882 return (error);
1883}
1884
1885static void
1886lagg_lacp_lladdr(struct lagg_softc *sc)
1887{
1888 struct lagg_port *lp;
1889
1890 /* purge all the lacp ports */
1891 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1892 lacp_port_destroy(lp);
1893
1894 /* add them back in */
1895 SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1896 lacp_port_create(lp);
1897}
1898
1899static int
1900lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
1901{
1902 struct lagg_port *lp;
1903
1904 lp = lacp_select_tx_port(sc, m);
1905 if (lp == NULL) {
1906 m_freem(m);
1907 return (ENETDOWN);
1908 }
1909
1910 /* Send mbuf */
1911 return (lagg_enqueue(lp->lp_ifp, m));
1912}
1913
1914static struct mbuf *
1915lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
1916{
1917 struct ifnet *ifp = sc->sc_ifp;
1918 struct ether_header *eh;
1919 u_short etype;
1920
1921 eh = mtod(m, struct ether_header *);
1922 etype = ntohs(eh->ether_type);
1923
1924 /* Tap off LACP control messages */
1925 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
1926 m = lacp_input(lp, m);
1927 if (m == NULL)
1928 return (NULL);
1929 }
1930
1931 /*
1932 * If the port is not collecting or not in the active aggregator then
1933 * free and return.
1934 */
1935 if (lacp_iscollecting(lp) == 0 || lacp_isactive(lp) == 0) {
1936 m_freem(m);
1937 return (NULL);
1938 }
1939
1940 m->m_pkthdr.rcvif = ifp;
1941 return (m);
1942}
1943
1944static void
1945lagg_callout(void *arg)
1946{
1947 struct lagg_softc *sc = (struct lagg_softc *)arg;
1948 struct ifnet *ifp = sc->sc_ifp;
1949
1950 ifp->if_ipackets = counter_u64_fetch(sc->sc_ipackets);
1951 ifp->if_opackets = counter_u64_fetch(sc->sc_opackets);
1952 ifp->if_ibytes = counter_u64_fetch(sc->sc_ibytes);
1953 ifp->if_obytes = counter_u64_fetch(sc->sc_obytes);
1954
1955 callout_reset(&sc->sc_callout, hz, lagg_callout, sc);
1956}