1/* $NetBSD: if_l2tp.c,v 1.49 2023/11/02 09:43:46 yamaguchi Exp $ */ 2 3/* 4 * Copyright (c) 2017 Internet Initiative Japan Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 26 * POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29/* 30 * L2TPv3 kernel interface 31 */ 32 33#include <sys/cdefs.h> 34__KERNEL_RCSID(0, "$NetBSD: if_l2tp.c,v 1.49 2023/11/02 09:43:46 yamaguchi Exp $"); 35 36#ifdef _KERNEL_OPT 37#include "opt_inet.h" 38#include "opt_net_mpsafe.h" 39#endif 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/kernel.h> 44#include <sys/mbuf.h> 45#include <sys/socket.h> 46#include <sys/sockio.h> 47#include <sys/errno.h> 48#include <sys/ioctl.h> 49#include <sys/time.h> 50#include <sys/syslog.h> 51#include <sys/proc.h> 52#include <sys/conf.h> 53#include <sys/kauth.h> 54#include <sys/cpu.h> 55#include <sys/cprng.h> 56#include <sys/intr.h> 57#include <sys/kmem.h> 58#include <sys/mutex.h> 59#include <sys/atomic.h> 60#include <sys/pserialize.h> 61#include <sys/device.h> 62#include <sys/module.h> 63 64#include <net/if.h> 65#include <net/if_dl.h> 66#include <net/if_ether.h> 67#include <net/if_types.h> 68#include <net/route.h> 69#include <net/bpf.h> 70#include <net/if_vlanvar.h> 71 72#include <netinet/in.h> 73#include <netinet/in_systm.h> 74#include <netinet/ip.h> 75#include <netinet/ip_encap.h> 76#ifdef INET 77#include <netinet/in_var.h> 78#include <netinet/in_l2tp.h> 79#endif /* INET */ 80#ifdef INET6 81#include <netinet6/in6_l2tp.h> 82#endif 83 84#include <net/if_l2tp.h> 85 86#include <net/if_vlanvar.h> 87 88/* TODO: IP_TCPMSS support */ 89#undef IP_TCPMSS 90#ifdef IP_TCPMSS 91#include <netinet/ip_tcpmss.h> 92#endif 93 94/* 95 * l2tp global variable definitions 96 */ 97static struct { 98 LIST_HEAD(l2tp_sclist, l2tp_softc) list; 99 kmutex_t lock; 100} l2tp_softcs __cacheline_aligned; 101 102 103#if !defined(L2TP_ID_HASH_SIZE) 104#define L2TP_ID_HASH_SIZE 64 105#endif 106static struct { 107 kmutex_t lock; 108 struct pslist_head *lists; 109 u_long mask; 110} l2tp_hash __cacheline_aligned = { 111 .lists = NULL, 112}; 113 114pserialize_t l2tp_psz __read_mostly; 115struct psref_class *lv_psref_class __read_mostly; 116 117static void l2tp_ifq_init_pc(void *, void *, struct cpu_info *); 118static void l2tp_ifq_fini_pc(void *, void *, struct cpu_info *); 119 120static int l2tp_clone_create(struct if_clone *, int); 121static int l2tp_clone_destroy(struct ifnet *); 122 123struct if_clone l2tp_cloner = 124 IF_CLONE_INITIALIZER("l2tp", l2tp_clone_create, l2tp_clone_destroy); 125 126static int l2tp_tx_enqueue(struct l2tp_variant *, struct mbuf *); 127static int l2tp_output(struct ifnet *, struct mbuf *, 128 const struct sockaddr *, const struct rtentry *); 129static void l2tp_sendit(struct l2tp_variant *, struct mbuf *); 130static void l2tpintr(struct l2tp_variant *); 131static void l2tpintr_softint(void *); 132 133static void l2tp_hash_init(void); 134static int l2tp_hash_fini(void); 135 136static void l2tp_start(struct ifnet *); 137static int l2tp_transmit(struct ifnet *, struct mbuf *); 138 139static int l2tp_set_tunnel(struct ifnet *, struct sockaddr *, 140 struct sockaddr *); 141static void l2tp_delete_tunnel(struct ifnet *); 142 143static int id_hash_func(uint32_t, u_long); 144 145static void l2tp_variant_update(struct l2tp_softc *, struct l2tp_variant *); 146static int l2tp_set_session(struct l2tp_softc *, uint32_t, uint32_t); 147static int l2tp_clear_session(struct l2tp_softc *); 148static int l2tp_set_cookie(struct l2tp_softc *, uint64_t, u_int, uint64_t, u_int); 149static void l2tp_clear_cookie(struct l2tp_softc *); 150static void l2tp_set_state(struct l2tp_softc *, int); 151static int l2tp_encap_attach(struct l2tp_variant *); 152static int l2tp_encap_detach(struct l2tp_variant *); 153 154static inline struct ifqueue * 155l2tp_ifq_percpu_getref(percpu_t *pc) 156{ 157 158 return *(struct ifqueue **)percpu_getref(pc); 159} 160 161static inline void 162l2tp_ifq_percpu_putref(percpu_t *pc) 163{ 164 165 percpu_putref(pc); 166} 167 168#ifndef MAX_L2TP_NEST 169/* 170 * This macro controls the upper limitation on nesting of l2tp tunnels. 171 * Since, setting a large value to this macro with a careless configuration 172 * may introduce system crash, we don't allow any nestings by default. 173 * If you need to configure nested l2tp tunnels, you can define this macro 174 * in your kernel configuration file. However, if you do so, please be 175 * careful to configure the tunnels so that it won't make a loop. 176 */ 177/* 178 * XXX 179 * Currently, if in_l2tp_output recursively calls, it causes locking against 180 * myself of struct l2tp_ro->lr_lock. So, nested l2tp tunnels is prohibited. 181 */ 182#define MAX_L2TP_NEST 0 183#endif 184 185static int max_l2tp_nesting = MAX_L2TP_NEST; 186 187/* ARGSUSED */ 188void 189l2tpattach(int count) 190{ 191 /* 192 * Nothing to do here, initialization is handled by the 193 * module initialization code in l2tpinit() below). 194 */ 195} 196 197static void 198l2tpinit(void) 199{ 200 201 mutex_init(&l2tp_softcs.lock, MUTEX_DEFAULT, IPL_NONE); 202 LIST_INIT(&l2tp_softcs.list); 203 204 mutex_init(&l2tp_hash.lock, MUTEX_DEFAULT, IPL_NONE); 205 l2tp_psz = pserialize_create(); 206 lv_psref_class = psref_class_create("l2tpvar", IPL_SOFTNET); 207 if_clone_attach(&l2tp_cloner); 208 209 l2tp_hash_init(); 210} 211 212static int 213l2tpdetach(void) 214{ 215 int error; 216 217 mutex_enter(&l2tp_softcs.lock); 218 if (!LIST_EMPTY(&l2tp_softcs.list)) { 219 mutex_exit(&l2tp_softcs.lock); 220 return EBUSY; 221 } 222 mutex_exit(&l2tp_softcs.lock); 223 224 error = l2tp_hash_fini(); 225 if (error) 226 return error; 227 228 if_clone_detach(&l2tp_cloner); 229 psref_class_destroy(lv_psref_class); 230 pserialize_destroy(l2tp_psz); 231 mutex_destroy(&l2tp_hash.lock); 232 233 mutex_destroy(&l2tp_softcs.lock); 234 235 return error; 236} 237 238static int 239l2tp_clone_create(struct if_clone *ifc, int unit) 240{ 241 struct l2tp_softc *sc; 242 struct l2tp_variant *var; 243 int rv; 244 u_int si_flags = SOFTINT_NET; 245#ifdef NET_MPSAFE 246 si_flags |= SOFTINT_MPSAFE; 247#endif 248 sc = kmem_zalloc(sizeof(struct l2tp_softc), KM_SLEEP); 249 if_initname(&sc->l2tp_ec.ec_if, ifc->ifc_name, unit); 250 rv = l2tpattach0(sc); 251 if (rv != 0) { 252 kmem_free(sc, sizeof(struct l2tp_softc)); 253 return rv; 254 } 255 256 var = kmem_zalloc(sizeof(struct l2tp_variant), KM_SLEEP); 257 var->lv_softc = sc; 258 var->lv_state = L2TP_STATE_DOWN; 259 var->lv_use_cookie = L2TP_COOKIE_OFF; 260 psref_target_init(&var->lv_psref, lv_psref_class); 261 262 sc->l2tp_var = var; 263 mutex_init(&sc->l2tp_lock, MUTEX_DEFAULT, IPL_NONE); 264 sc->l2tp_psz = pserialize_create(); 265 PSLIST_ENTRY_INIT(sc, l2tp_hash); 266 267 sc->l2tp_ro_percpu = if_tunnel_alloc_ro_percpu(); 268 269 sc->l2tp_ifq_percpu = percpu_create(sizeof(struct ifqueue *), 270 l2tp_ifq_init_pc, l2tp_ifq_fini_pc, NULL); 271 sc->l2tp_si = softint_establish(si_flags, l2tpintr_softint, sc); 272 273 mutex_enter(&l2tp_softcs.lock); 274 LIST_INSERT_HEAD(&l2tp_softcs.list, sc, l2tp_list); 275 mutex_exit(&l2tp_softcs.lock); 276 277 return (0); 278} 279 280int 281l2tpattach0(struct l2tp_softc *sc) 282{ 283 284 sc->l2tp_ec.ec_capabilities = 0; 285 sc->l2tp_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 286 sc->l2tp_ec.ec_capabilities |= ETHERCAP_JUMBO_MTU; 287 288 ether_ifattach(&sc->l2tp_ec.ec_if, NULL); 289 290 sc->l2tp_ec.ec_if.if_addrlen = 0; 291 sc->l2tp_ec.ec_if.if_mtu = L2TP_MTU; 292 sc->l2tp_ec.ec_if.if_flags = IFF_POINTOPOINT|IFF_MULTICAST|IFF_SIMPLEX; 293#ifdef NET_MPSAFE 294 sc->l2tp_ec.ec_if.if_extflags = IFEF_MPSAFE; 295#endif 296 sc->l2tp_ec.ec_if.if_ioctl = l2tp_ioctl; 297 sc->l2tp_ec.ec_if.if_output = l2tp_output; 298 sc->l2tp_ec.ec_if.if_type = IFT_L2TP; 299 sc->l2tp_ec.ec_if.if_dlt = DLT_NULL; 300 sc->l2tp_ec.ec_if.if_start = l2tp_start; 301 sc->l2tp_ec.ec_if.if_transmit = l2tp_transmit; 302 sc->l2tp_ec.ec_if._if_input = ether_input; 303 IFQ_SET_READY(&sc->l2tp_ec.ec_if.if_snd); 304 305 /* XXX 306 * It may improve performance to use if_initialize()/if_register() 307 * so that l2tp_input() calls if_input() instead of 308 * if_percpuq_enqueue(). However, that causes recursive softnet_lock 309 * when NET_MPSAFE is not set. 310 */ 311 if_attach(&sc->l2tp_ec.ec_if); 312 if_link_state_change(&sc->l2tp_ec.ec_if, LINK_STATE_DOWN); 313 if_alloc_sadl(&sc->l2tp_ec.ec_if); 314 315 return 0; 316} 317 318void 319l2tp_ifq_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused) 320{ 321 struct ifqueue **ifqp = p; 322 323 *ifqp = kmem_zalloc(sizeof(**ifqp), KM_SLEEP); 324 (*ifqp)->ifq_maxlen = IFQ_MAXLEN; 325} 326 327void 328l2tp_ifq_fini_pc(void *p, void *arg __unused, struct cpu_info *ci __unused) 329{ 330 struct ifqueue **ifqp = p; 331 332 kmem_free(*ifqp, sizeof(**ifqp)); 333} 334 335static int 336l2tp_clone_destroy(struct ifnet *ifp) 337{ 338 struct l2tp_variant *var; 339 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 340 l2tp_ec.ec_if); 341 342 ether_ifdetach(ifp); 343 344 l2tp_clear_session(sc); 345 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if); 346 /* 347 * To avoid for l2tp_transmit() and l2tpintr_softint() to access 348 * sc->l2tp_var after free it. 349 */ 350 mutex_enter(&sc->l2tp_lock); 351 var = sc->l2tp_var; 352 l2tp_variant_update(sc, NULL); 353 mutex_exit(&sc->l2tp_lock); 354 355 softint_disestablish(sc->l2tp_si); 356 percpu_free(sc->l2tp_ifq_percpu, sizeof(struct ifqueue *)); 357 358 mutex_enter(&l2tp_softcs.lock); 359 LIST_REMOVE(sc, l2tp_list); 360 mutex_exit(&l2tp_softcs.lock); 361 362 if_detach(ifp); 363 364 if_tunnel_free_ro_percpu(sc->l2tp_ro_percpu); 365 366 kmem_free(var, sizeof(struct l2tp_variant)); 367 pserialize_destroy(sc->l2tp_psz); 368 mutex_destroy(&sc->l2tp_lock); 369 kmem_free(sc, sizeof(struct l2tp_softc)); 370 371 return 0; 372} 373 374static int 375l2tp_tx_enqueue(struct l2tp_variant *var, struct mbuf *m) 376{ 377 struct l2tp_softc *sc; 378 struct ifnet *ifp; 379 struct ifqueue *ifq; 380 int s; 381 382 KASSERT(psref_held(&var->lv_psref, lv_psref_class)); 383 384 sc = var->lv_softc; 385 ifp = &sc->l2tp_ec.ec_if; 386 387 s = splsoftnet(); 388 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu); 389 if (IF_QFULL(ifq)) { 390 if_statinc(ifp, if_oerrors); 391 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu); 392 splx(s); 393 m_freem(m); 394 return ENOBUFS; 395 } 396 397 IF_ENQUEUE(ifq, m); 398 percpu_putref(sc->l2tp_ifq_percpu); 399 softint_schedule(sc->l2tp_si); 400 /* counter is incremented in l2tpintr() */ 401 splx(s); 402 return 0; 403} 404 405static int 406l2tp_output(struct ifnet *ifp, struct mbuf *m, const struct sockaddr *dst, 407 const struct rtentry *rt) 408{ 409 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 410 l2tp_ec.ec_if); 411 struct l2tp_variant *var; 412 struct psref psref; 413 int error = 0; 414 415 var = l2tp_getref_variant(sc, &psref); 416 if (var == NULL) { 417 m_freem(m); 418 return ENETDOWN; 419 } 420 421 IFQ_CLASSIFY(&ifp->if_snd, m, dst->sa_family); 422 423 m->m_flags &= ~(M_BCAST|M_MCAST); 424 425 if ((ifp->if_flags & IFF_UP) == 0) { 426 m_freem(m); 427 error = ENETDOWN; 428 goto end; 429 } 430 431 if (var->lv_psrc == NULL || var->lv_pdst == NULL) { 432 m_freem(m); 433 error = ENETDOWN; 434 goto end; 435 } 436 437 /* XXX should we check if our outer source is legal? */ 438 439 /* use DLT_NULL encapsulation here to pass inner af type */ 440 M_PREPEND(m, sizeof(int), M_DONTWAIT); 441 if (!m) { 442 error = ENOBUFS; 443 goto end; 444 } 445 *mtod(m, int *) = dst->sa_family; 446 447 error = l2tp_tx_enqueue(var, m); 448end: 449 l2tp_putref_variant(var, &psref); 450 if (error) 451 if_statinc(ifp, if_oerrors); 452 453 return error; 454} 455 456static void 457l2tp_sendit(struct l2tp_variant *var, struct mbuf *m) 458{ 459 int len; 460 int error; 461 struct l2tp_softc *sc; 462 struct ifnet *ifp; 463 464 KASSERT(psref_held(&var->lv_psref, lv_psref_class)); 465 466 sc = var->lv_softc; 467 ifp = &sc->l2tp_ec.ec_if; 468 469 len = m->m_pkthdr.len; 470 m->m_flags &= ~(M_BCAST|M_MCAST); 471 bpf_mtap(ifp, m, BPF_D_OUT); 472 473 switch (var->lv_psrc->sa_family) { 474#ifdef INET 475 case AF_INET: 476 error = in_l2tp_output(var, m); 477 break; 478#endif 479#ifdef INET6 480 case AF_INET6: 481 error = in6_l2tp_output(var, m); 482 break; 483#endif 484 default: 485 m_freem(m); 486 error = ENETDOWN; 487 break; 488 } 489 if (error) { 490 if_statinc(ifp, if_oerrors); 491 } else { 492 if_statadd2(ifp, if_opackets, 1, if_obytes, len); 493 } 494} 495 496static void 497l2tpintr(struct l2tp_variant *var) 498{ 499 struct l2tp_softc *sc; 500 struct ifnet *ifp; 501 struct mbuf *m; 502 struct ifqueue *ifq; 503 u_int cpuid = cpu_index(curcpu()); 504 505 KASSERT(psref_held(&var->lv_psref, lv_psref_class)); 506 507 sc = var->lv_softc; 508 ifp = &sc->l2tp_ec.ec_if; 509 510 /* output processing */ 511 if (var->lv_my_sess_id == 0 || var->lv_peer_sess_id == 0) { 512 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu); 513 IF_PURGE(ifq); 514 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu); 515 if (cpuid == 0) 516 IFQ_PURGE(&ifp->if_snd); 517 return; 518 } 519 520 /* Currently, l2tpintr() is always called in softint context. */ 521 ifq = l2tp_ifq_percpu_getref(sc->l2tp_ifq_percpu); 522 for (;;) { 523 IF_DEQUEUE(ifq, m); 524 if (m != NULL) 525 l2tp_sendit(var, m); 526 else 527 break; 528 } 529 l2tp_ifq_percpu_putref(sc->l2tp_ifq_percpu); 530 531 if (cpuid == 0) { 532 for (;;) { 533 IFQ_DEQUEUE(&ifp->if_snd, m); 534 if (m != NULL) 535 l2tp_sendit(var, m); 536 else 537 break; 538 } 539 } 540} 541 542static void 543l2tpintr_softint(void *arg) 544{ 545 struct l2tp_variant *var; 546 struct psref psref; 547 struct l2tp_softc *sc = arg; 548 549 var = l2tp_getref_variant(sc, &psref); 550 if (var == NULL) 551 return; 552 553 l2tpintr(var); 554 l2tp_putref_variant(var, &psref); 555} 556 557void 558l2tp_input(struct mbuf *m, struct ifnet *ifp) 559{ 560 vaddr_t addr; 561 562 KASSERT(ifp != NULL); 563 564 /* 565 * Currently, l2tp(4) supports only ethernet as inner protocol. 566 */ 567 if (m->m_pkthdr.len < sizeof(struct ether_header)) { 568 m_freem(m); 569 return; 570 } 571 572 /* 573 * If the head of the payload is not aligned, align it. 574 */ 575 addr = mtod(m, vaddr_t); 576 if ((addr & 0x03) != 0x2) { 577 /* copy and align head of payload */ 578 struct mbuf *m_head; 579 int copy_length; 580 u_int pad = roundup(sizeof(struct ether_header), 4) 581 - sizeof(struct ether_header); 582 583#define L2TP_COPY_LENGTH 60 584 585 if (m->m_pkthdr.len < L2TP_COPY_LENGTH) { 586 copy_length = m->m_pkthdr.len; 587 } else { 588 copy_length = L2TP_COPY_LENGTH; 589 } 590 591 if (m->m_len < copy_length) { 592 m = m_pullup(m, copy_length); 593 if (m == NULL) 594 return; 595 } 596 597 MGETHDR(m_head, M_DONTWAIT, MT_HEADER); 598 if (m_head == NULL) { 599 m_freem(m); 600 return; 601 } 602 m_move_pkthdr(m_head, m); 603 604 /* 605 * m_head should be: 606 * L2TP_COPY_LENGTH 607 * <- + roundup(pad, 4) - pad -> 608 * +-------+--------+-----+--------------+-------------+ 609 * | m_hdr | pkthdr | ... | ether header | payload | 610 * +-------+--------+-----+--------------+-------------+ 611 * ^ ^ 612 * m_data 4 byte aligned 613 */ 614 m_align(m_head, L2TP_COPY_LENGTH + roundup(pad, 4)); 615 m_head->m_data += pad; 616 617 memcpy(mtod(m_head, void *), mtod(m, void *), copy_length); 618 m_head->m_len = copy_length; 619 m->m_data += copy_length; 620 m->m_len -= copy_length; 621 622 /* construct chain */ 623 if (m->m_len == 0) { 624 m_head->m_next = m_free(m); 625 } else { 626 m_head->m_next = m; 627 } 628 629 /* override m */ 630 m = m_head; 631 } 632 633 m_set_rcvif(m, ifp); 634 635 /* 636 * bpf_mtap() and ifp->if_ipackets++ is done in if_input() 637 * 638 * obytes is incremented at ether_output() or bridge_enqueue(). 639 */ 640 if_percpuq_enqueue(ifp->if_percpuq, m); 641} 642 643void 644l2tp_start(struct ifnet *ifp) 645{ 646 struct psref psref; 647 struct l2tp_variant *var; 648 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 649 l2tp_ec.ec_if); 650 651 var = l2tp_getref_variant(sc, &psref); 652 if (var == NULL) 653 return; 654 655 if (var->lv_psrc == NULL || var->lv_pdst == NULL) 656 return; 657 658 kpreempt_disable(); 659 softint_schedule(sc->l2tp_si); 660 kpreempt_enable(); 661 l2tp_putref_variant(var, &psref); 662} 663 664int 665l2tp_transmit(struct ifnet *ifp, struct mbuf *m) 666{ 667 int error; 668 struct psref psref; 669 struct l2tp_variant *var; 670 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 671 l2tp_ec.ec_if); 672 673 var = l2tp_getref_variant(sc, &psref); 674 if (var == NULL) { 675 m_freem(m); 676 return ENETDOWN; 677 } 678 679 if (var->lv_psrc == NULL || var->lv_pdst == NULL) { 680 m_freem(m); 681 error = ENETDOWN; 682 goto out; 683 } 684 685 m->m_flags &= ~(M_BCAST|M_MCAST); 686 687 error = l2tp_tx_enqueue(var, m); 688out: 689 l2tp_putref_variant(var, &psref); 690 return error; 691} 692 693/* XXX how should we handle IPv6 scope on SIOC[GS]IFPHYADDR? */ 694int 695l2tp_ioctl(struct ifnet *ifp, u_long cmd, void *data) 696{ 697 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 698 l2tp_ec.ec_if); 699 struct l2tp_variant *var, *var_tmp; 700 struct ifreq *ifr = data; 701 int error = 0, size; 702 struct sockaddr *dst, *src; 703 struct l2tp_req l2tpr; 704 u_long mtu; 705 int bound; 706 struct psref psref; 707 708 switch (cmd) { 709 case SIOCSIFADDR: 710 ifp->if_flags |= IFF_UP; 711 break; 712 713 case SIOCSIFDSTADDR: 714 break; 715 716 case SIOCADDMULTI: 717 case SIOCDELMULTI: 718 switch (ifr->ifr_addr.sa_family) { 719#ifdef INET 720 case AF_INET: /* IP supports Multicast */ 721 break; 722#endif /* INET */ 723#ifdef INET6 724 case AF_INET6: /* IP6 supports Multicast */ 725 break; 726#endif /* INET6 */ 727 default: /* Other protocols doesn't support Multicast */ 728 error = EAFNOSUPPORT; 729 break; 730 } 731 break; 732 733 case SIOCSIFMTU: 734 mtu = ifr->ifr_mtu; 735 if (mtu < L2TP_MTU_MIN || mtu > L2TP_MTU_MAX) 736 return (EINVAL); 737 ifp->if_mtu = mtu; 738 break; 739 740#ifdef INET 741 case SIOCSIFPHYADDR: 742 src = (struct sockaddr *) 743 &(((struct in_aliasreq *)data)->ifra_addr); 744 dst = (struct sockaddr *) 745 &(((struct in_aliasreq *)data)->ifra_dstaddr); 746 if (src->sa_family != AF_INET || dst->sa_family != AF_INET) 747 return EAFNOSUPPORT; 748 else if (src->sa_len != sizeof(struct sockaddr_in) 749 || dst->sa_len != sizeof(struct sockaddr_in)) 750 return EINVAL; 751 752 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst); 753 break; 754 755#endif /* INET */ 756#ifdef INET6 757 case SIOCSIFPHYADDR_IN6: 758 src = (struct sockaddr *) 759 &(((struct in6_aliasreq *)data)->ifra_addr); 760 dst = (struct sockaddr *) 761 &(((struct in6_aliasreq *)data)->ifra_dstaddr); 762 if (src->sa_family != AF_INET6 || dst->sa_family != AF_INET6) 763 return EAFNOSUPPORT; 764 else if (src->sa_len != sizeof(struct sockaddr_in6) 765 || dst->sa_len != sizeof(struct sockaddr_in6)) 766 return EINVAL; 767 768 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst); 769 break; 770 771#endif /* INET6 */ 772 case SIOCSLIFPHYADDR: 773 src = (struct sockaddr *) 774 &(((struct if_laddrreq *)data)->addr); 775 dst = (struct sockaddr *) 776 &(((struct if_laddrreq *)data)->dstaddr); 777 if (src->sa_family != dst->sa_family) 778 return EINVAL; 779 else if (src->sa_family == AF_INET 780 && src->sa_len != sizeof(struct sockaddr_in)) 781 return EINVAL; 782 else if (src->sa_family == AF_INET6 783 && src->sa_len != sizeof(struct sockaddr_in6)) 784 return EINVAL; 785 else if (dst->sa_family == AF_INET 786 && dst->sa_len != sizeof(struct sockaddr_in)) 787 return EINVAL; 788 else if (dst->sa_family == AF_INET6 789 && dst->sa_len != sizeof(struct sockaddr_in6)) 790 return EINVAL; 791 792 error = l2tp_set_tunnel(&sc->l2tp_ec.ec_if, src, dst); 793 break; 794 795 case SIOCDIFPHYADDR: 796 l2tp_delete_tunnel(&sc->l2tp_ec.ec_if); 797 break; 798 799 case SIOCGIFPSRCADDR: 800#ifdef INET6 801 case SIOCGIFPSRCADDR_IN6: 802#endif /* INET6 */ 803 bound = curlwp_bind(); 804 var = l2tp_getref_variant(sc, &psref); 805 if (var == NULL) { 806 curlwp_bindx(bound); 807 error = EADDRNOTAVAIL; 808 goto bad; 809 } 810 if (var->lv_psrc == NULL) { 811 l2tp_putref_variant(var, &psref); 812 curlwp_bindx(bound); 813 error = EADDRNOTAVAIL; 814 goto bad; 815 } 816 src = var->lv_psrc; 817 switch (cmd) { 818#ifdef INET 819 case SIOCGIFPSRCADDR: 820 dst = &ifr->ifr_addr; 821 size = sizeof(ifr->ifr_addr); 822 break; 823#endif /* INET */ 824#ifdef INET6 825 case SIOCGIFPSRCADDR_IN6: 826 dst = (struct sockaddr *) 827 &(((struct in6_ifreq *)data)->ifr_addr); 828 size = sizeof(((struct in6_ifreq *)data)->ifr_addr); 829 break; 830#endif /* INET6 */ 831 default: 832 l2tp_putref_variant(var, &psref); 833 curlwp_bindx(bound); 834 error = EADDRNOTAVAIL; 835 goto bad; 836 } 837 if (src->sa_len > size) { 838 l2tp_putref_variant(var, &psref); 839 curlwp_bindx(bound); 840 return EINVAL; 841 } 842 sockaddr_copy(dst, src->sa_len, src); 843 l2tp_putref_variant(var, &psref); 844 curlwp_bindx(bound); 845 break; 846 847 case SIOCGIFPDSTADDR: 848#ifdef INET6 849 case SIOCGIFPDSTADDR_IN6: 850#endif /* INET6 */ 851 bound = curlwp_bind(); 852 var = l2tp_getref_variant(sc, &psref); 853 if (var == NULL) { 854 curlwp_bindx(bound); 855 error = EADDRNOTAVAIL; 856 goto bad; 857 } 858 if (var->lv_pdst == NULL) { 859 l2tp_putref_variant(var, &psref); 860 curlwp_bindx(bound); 861 error = EADDRNOTAVAIL; 862 goto bad; 863 } 864 src = var->lv_pdst; 865 switch (cmd) { 866#ifdef INET 867 case SIOCGIFPDSTADDR: 868 dst = &ifr->ifr_addr; 869 size = sizeof(ifr->ifr_addr); 870 break; 871#endif /* INET */ 872#ifdef INET6 873 case SIOCGIFPDSTADDR_IN6: 874 dst = (struct sockaddr *) 875 &(((struct in6_ifreq *)data)->ifr_addr); 876 size = sizeof(((struct in6_ifreq *)data)->ifr_addr); 877 break; 878#endif /* INET6 */ 879 default: 880 l2tp_putref_variant(var, &psref); 881 curlwp_bindx(bound); 882 error = EADDRNOTAVAIL; 883 goto bad; 884 } 885 if (src->sa_len > size) { 886 l2tp_putref_variant(var, &psref); 887 curlwp_bindx(bound); 888 return EINVAL; 889 } 890 sockaddr_copy(dst, src->sa_len, src); 891 l2tp_putref_variant(var, &psref); 892 curlwp_bindx(bound); 893 break; 894 895 case SIOCGLIFPHYADDR: 896 bound = curlwp_bind(); 897 var = l2tp_getref_variant(sc, &psref); 898 if (var == NULL) { 899 curlwp_bindx(bound); 900 error = EADDRNOTAVAIL; 901 goto bad; 902 } 903 if (var->lv_psrc == NULL || var->lv_pdst == NULL) { 904 l2tp_putref_variant(var, &psref); 905 curlwp_bindx(bound); 906 error = EADDRNOTAVAIL; 907 goto bad; 908 } 909 910 /* copy src */ 911 src = var->lv_psrc; 912 dst = (struct sockaddr *) 913 &(((struct if_laddrreq *)data)->addr); 914 size = sizeof(((struct if_laddrreq *)data)->addr); 915 if (src->sa_len > size) { 916 l2tp_putref_variant(var, &psref); 917 curlwp_bindx(bound); 918 return EINVAL; 919 } 920 sockaddr_copy(dst, src->sa_len, src); 921 922 /* copy dst */ 923 src = var->lv_pdst; 924 dst = (struct sockaddr *) 925 &(((struct if_laddrreq *)data)->dstaddr); 926 size = sizeof(((struct if_laddrreq *)data)->dstaddr); 927 if (src->sa_len > size) { 928 l2tp_putref_variant(var, &psref); 929 curlwp_bindx(bound); 930 return EINVAL; 931 } 932 sockaddr_copy(dst, src->sa_len, src); 933 l2tp_putref_variant(var, &psref); 934 curlwp_bindx(bound); 935 break; 936 937 case SIOCSL2TPSESSION: 938 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0) 939 break; 940 941 /* session id must not zero */ 942 if (l2tpr.my_sess_id == 0 || l2tpr.peer_sess_id == 0) 943 return EINVAL; 944 945 bound = curlwp_bind(); 946 var_tmp = l2tp_lookup_session_ref(l2tpr.my_sess_id, &psref); 947 if (var_tmp != NULL) { 948 /* duplicate session id */ 949 log(LOG_WARNING, "%s: duplicate session id %" PRIu32 " of %s\n", 950 sc->l2tp_ec.ec_if.if_xname, l2tpr.my_sess_id, 951 var_tmp->lv_softc->l2tp_ec.ec_if.if_xname); 952 psref_release(&psref, &var_tmp->lv_psref, 953 lv_psref_class); 954 curlwp_bindx(bound); 955 return EINVAL; 956 } 957 curlwp_bindx(bound); 958 959 error = l2tp_set_session(sc, l2tpr.my_sess_id, l2tpr.peer_sess_id); 960 break; 961 case SIOCDL2TPSESSION: 962 l2tp_clear_session(sc); 963 break; 964 case SIOCSL2TPCOOKIE: 965 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0) 966 break; 967 968 error = l2tp_set_cookie(sc, l2tpr.my_cookie, l2tpr.my_cookie_len, 969 l2tpr.peer_cookie, l2tpr.peer_cookie_len); 970 break; 971 case SIOCDL2TPCOOKIE: 972 l2tp_clear_cookie(sc); 973 break; 974 case SIOCSL2TPSTATE: 975 if ((error = copyin(ifr->ifr_data, &l2tpr, sizeof(l2tpr))) != 0) 976 break; 977 978 l2tp_set_state(sc, l2tpr.state); 979 break; 980 case SIOCGL2TP: 981 /* get L2TPV3 session info */ 982 memset(&l2tpr, 0, sizeof(l2tpr)); 983 984 bound = curlwp_bind(); 985 var = l2tp_getref_variant(sc, &psref); 986 if (var == NULL) { 987 curlwp_bindx(bound); 988 error = EADDRNOTAVAIL; 989 goto bad; 990 } 991 992 l2tpr.state = var->lv_state; 993 l2tpr.my_sess_id = var->lv_my_sess_id; 994 l2tpr.peer_sess_id = var->lv_peer_sess_id; 995 l2tpr.my_cookie = var->lv_my_cookie; 996 l2tpr.my_cookie_len = var->lv_my_cookie_len; 997 l2tpr.peer_cookie = var->lv_peer_cookie; 998 l2tpr.peer_cookie_len = var->lv_peer_cookie_len; 999 l2tp_putref_variant(var, &psref); 1000 curlwp_bindx(bound); 1001 1002 error = copyout(&l2tpr, ifr->ifr_data, sizeof(l2tpr)); 1003 break; 1004 1005 default: 1006 error = ifioctl_common(ifp, cmd, data); 1007 break; 1008 } 1009 bad: 1010 return error; 1011} 1012 1013static int 1014l2tp_set_tunnel(struct ifnet *ifp, struct sockaddr *src, struct sockaddr *dst) 1015{ 1016 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 1017 l2tp_ec.ec_if); 1018 struct sockaddr *osrc, *odst; 1019 struct sockaddr *nsrc, *ndst; 1020 struct l2tp_variant *ovar, *nvar; 1021 int error; 1022 1023 nsrc = sockaddr_dup(src, M_WAITOK); 1024 ndst = sockaddr_dup(dst, M_WAITOK); 1025 1026 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1027 1028 error = encap_lock_enter(); 1029 if (error) 1030 goto error; 1031 1032 mutex_enter(&sc->l2tp_lock); 1033 1034 ovar = sc->l2tp_var; 1035 osrc = ovar->lv_psrc; 1036 odst = ovar->lv_pdst; 1037 *nvar = *ovar; 1038 psref_target_init(&nvar->lv_psref, lv_psref_class); 1039 nvar->lv_psrc = nsrc; 1040 nvar->lv_pdst = ndst; 1041 error = l2tp_encap_attach(nvar); 1042 if (error) { 1043 mutex_exit(&sc->l2tp_lock); 1044 encap_lock_exit(); 1045 goto error; 1046 } 1047 l2tp_variant_update(sc, nvar); 1048 1049 mutex_exit(&sc->l2tp_lock); 1050 1051 (void)l2tp_encap_detach(ovar); 1052 encap_lock_exit(); 1053 1054 if (osrc) 1055 sockaddr_free(osrc); 1056 if (odst) 1057 sockaddr_free(odst); 1058 kmem_free(ovar, sizeof(*ovar)); 1059 return 0; 1060 1061error: 1062 sockaddr_free(nsrc); 1063 sockaddr_free(ndst); 1064 kmem_free(nvar, sizeof(*nvar)); 1065 1066 return error; 1067} 1068 1069static void 1070l2tp_delete_tunnel(struct ifnet *ifp) 1071{ 1072 struct l2tp_softc *sc = container_of(ifp, struct l2tp_softc, 1073 l2tp_ec.ec_if); 1074 struct sockaddr *osrc, *odst; 1075 struct l2tp_variant *ovar, *nvar; 1076 int error; 1077 1078 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1079 1080 error = encap_lock_enter(); 1081 if (error) { 1082 kmem_free(nvar, sizeof(*nvar)); 1083 return; 1084 } 1085 mutex_enter(&sc->l2tp_lock); 1086 1087 ovar = sc->l2tp_var; 1088 osrc = ovar->lv_psrc; 1089 odst = ovar->lv_pdst; 1090 *nvar = *ovar; 1091 psref_target_init(&nvar->lv_psref, lv_psref_class); 1092 nvar->lv_psrc = NULL; 1093 nvar->lv_pdst = NULL; 1094 l2tp_variant_update(sc, nvar); 1095 1096 mutex_exit(&sc->l2tp_lock); 1097 1098 (void)l2tp_encap_detach(ovar); 1099 encap_lock_exit(); 1100 1101 if (osrc) 1102 sockaddr_free(osrc); 1103 if (odst) 1104 sockaddr_free(odst); 1105 kmem_free(ovar, sizeof(*ovar)); 1106} 1107 1108static int 1109id_hash_func(uint32_t id, u_long mask) 1110{ 1111 uint32_t hash; 1112 1113 hash = (id >> 16) ^ id; 1114 hash = (hash >> 4) ^ hash; 1115 1116 return hash & mask; 1117} 1118 1119static void 1120l2tp_hash_init(void) 1121{ 1122 1123 l2tp_hash.lists = hashinit(L2TP_ID_HASH_SIZE, HASH_PSLIST, true, 1124 &l2tp_hash.mask); 1125} 1126 1127static int 1128l2tp_hash_fini(void) 1129{ 1130 int i; 1131 1132 mutex_enter(&l2tp_hash.lock); 1133 1134 for (i = 0; i < l2tp_hash.mask + 1; i++) { 1135 if (PSLIST_WRITER_FIRST(&l2tp_hash.lists[i], struct l2tp_softc, 1136 l2tp_hash) != NULL) { 1137 mutex_exit(&l2tp_hash.lock); 1138 return EBUSY; 1139 } 1140 } 1141 for (i = 0; i < l2tp_hash.mask + 1; i++) 1142 PSLIST_DESTROY(&l2tp_hash.lists[i]); 1143 1144 mutex_exit(&l2tp_hash.lock); 1145 1146 hashdone(l2tp_hash.lists, HASH_PSLIST, l2tp_hash.mask); 1147 1148 return 0; 1149} 1150 1151static int 1152l2tp_set_session(struct l2tp_softc *sc, uint32_t my_sess_id, 1153 uint32_t peer_sess_id) 1154{ 1155 uint32_t idx; 1156 struct l2tp_variant *nvar; 1157 struct l2tp_variant *ovar; 1158 struct ifnet *ifp = &sc->l2tp_ec.ec_if; 1159 1160 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1161 1162 mutex_enter(&sc->l2tp_lock); 1163 ovar = sc->l2tp_var; 1164 *nvar = *ovar; 1165 psref_target_init(&nvar->lv_psref, lv_psref_class); 1166 nvar->lv_my_sess_id = my_sess_id; 1167 nvar->lv_peer_sess_id = peer_sess_id; 1168 1169 mutex_enter(&l2tp_hash.lock); 1170 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) { 1171 PSLIST_WRITER_REMOVE(sc, l2tp_hash); 1172 pserialize_perform(l2tp_psz); 1173 } 1174 mutex_exit(&l2tp_hash.lock); 1175 PSLIST_ENTRY_DESTROY(sc, l2tp_hash); 1176 1177 l2tp_variant_update(sc, nvar); 1178 mutex_exit(&sc->l2tp_lock); 1179 1180 idx = id_hash_func(nvar->lv_my_sess_id, l2tp_hash.mask); 1181 if ((ifp->if_flags & IFF_DEBUG) != 0) 1182 log(LOG_DEBUG, "%s: add hash entry: sess_id=%" PRIu32 ", idx=%" PRIu32 "\n", 1183 sc->l2tp_ec.ec_if.if_xname, nvar->lv_my_sess_id, idx); 1184 1185 PSLIST_ENTRY_INIT(sc, l2tp_hash); 1186 mutex_enter(&l2tp_hash.lock); 1187 PSLIST_WRITER_INSERT_HEAD(&l2tp_hash.lists[idx], sc, l2tp_hash); 1188 mutex_exit(&l2tp_hash.lock); 1189 1190 kmem_free(ovar, sizeof(*ovar)); 1191 return 0; 1192} 1193 1194static int 1195l2tp_clear_session(struct l2tp_softc *sc) 1196{ 1197 struct l2tp_variant *nvar; 1198 struct l2tp_variant *ovar; 1199 1200 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1201 1202 mutex_enter(&sc->l2tp_lock); 1203 ovar = sc->l2tp_var; 1204 *nvar = *ovar; 1205 psref_target_init(&nvar->lv_psref, lv_psref_class); 1206 nvar->lv_my_sess_id = 0; 1207 nvar->lv_peer_sess_id = 0; 1208 1209 mutex_enter(&l2tp_hash.lock); 1210 if (ovar->lv_my_sess_id > 0 && ovar->lv_peer_sess_id > 0) { 1211 PSLIST_WRITER_REMOVE(sc, l2tp_hash); 1212 pserialize_perform(l2tp_psz); 1213 } 1214 mutex_exit(&l2tp_hash.lock); 1215 1216 l2tp_variant_update(sc, nvar); 1217 mutex_exit(&sc->l2tp_lock); 1218 kmem_free(ovar, sizeof(*ovar)); 1219 return 0; 1220} 1221 1222struct l2tp_variant * 1223l2tp_lookup_session_ref(uint32_t id, struct psref *psref) 1224{ 1225 int idx; 1226 int s; 1227 struct l2tp_softc *sc; 1228 1229 idx = id_hash_func(id, l2tp_hash.mask); 1230 1231 s = pserialize_read_enter(); 1232 PSLIST_READER_FOREACH(sc, &l2tp_hash.lists[idx], struct l2tp_softc, 1233 l2tp_hash) { 1234 struct l2tp_variant *var = atomic_load_consume(&sc->l2tp_var); 1235 if (var == NULL) 1236 continue; 1237 if (var->lv_my_sess_id != id) 1238 continue; 1239 psref_acquire(psref, &var->lv_psref, lv_psref_class); 1240 pserialize_read_exit(s); 1241 return var; 1242 } 1243 pserialize_read_exit(s); 1244 return NULL; 1245} 1246 1247/* 1248 * l2tp_variant update API. 1249 * 1250 * Assumption: 1251 * reader side dereferences sc->l2tp_var in reader critical section only, 1252 * that is, all of reader sides do not reader the sc->l2tp_var after 1253 * pserialize_perform(). 1254 */ 1255static void 1256l2tp_variant_update(struct l2tp_softc *sc, struct l2tp_variant *nvar) 1257{ 1258 struct ifnet *ifp = &sc->l2tp_ec.ec_if; 1259 struct l2tp_variant *ovar = sc->l2tp_var; 1260 1261 KASSERT(mutex_owned(&sc->l2tp_lock)); 1262 1263 atomic_store_release(&sc->l2tp_var, nvar); 1264 pserialize_perform(sc->l2tp_psz); 1265 psref_target_destroy(&ovar->lv_psref, lv_psref_class); 1266 1267 if (nvar != NULL) { 1268 if (nvar->lv_psrc != NULL && nvar->lv_pdst != NULL) 1269 ifp->if_flags |= IFF_RUNNING; 1270 else 1271 ifp->if_flags &= ~IFF_RUNNING; 1272 } 1273} 1274 1275static int 1276l2tp_set_cookie(struct l2tp_softc *sc, uint64_t my_cookie, u_int my_cookie_len, 1277 uint64_t peer_cookie, u_int peer_cookie_len) 1278{ 1279 struct l2tp_variant *nvar; 1280 1281 if (my_cookie == 0 || peer_cookie == 0) 1282 return EINVAL; 1283 1284 if (my_cookie_len != 4 && my_cookie_len != 8 1285 && peer_cookie_len != 4 && peer_cookie_len != 8) 1286 return EINVAL; 1287 1288 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1289 1290 mutex_enter(&sc->l2tp_lock); 1291 1292 *nvar = *sc->l2tp_var; 1293 psref_target_init(&nvar->lv_psref, lv_psref_class); 1294 nvar->lv_my_cookie = my_cookie; 1295 nvar->lv_my_cookie_len = my_cookie_len; 1296 nvar->lv_peer_cookie = peer_cookie; 1297 nvar->lv_peer_cookie_len = peer_cookie_len; 1298 nvar->lv_use_cookie = L2TP_COOKIE_ON; 1299 l2tp_variant_update(sc, nvar); 1300 1301 mutex_exit(&sc->l2tp_lock); 1302 1303 struct ifnet *ifp = &sc->l2tp_ec.ec_if; 1304 if ((ifp->if_flags & IFF_DEBUG) != 0) { 1305 log(LOG_DEBUG, 1306 "%s: set cookie: " 1307 "local cookie_len=%u local cookie=%" PRIu64 ", " 1308 "remote cookie_len=%u remote cookie=%" PRIu64 "\n", 1309 ifp->if_xname, my_cookie_len, my_cookie, 1310 peer_cookie_len, peer_cookie); 1311 } 1312 1313 return 0; 1314} 1315 1316static void 1317l2tp_clear_cookie(struct l2tp_softc *sc) 1318{ 1319 struct l2tp_variant *nvar; 1320 1321 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1322 1323 mutex_enter(&sc->l2tp_lock); 1324 1325 *nvar = *sc->l2tp_var; 1326 psref_target_init(&nvar->lv_psref, lv_psref_class); 1327 nvar->lv_my_cookie = 0; 1328 nvar->lv_my_cookie_len = 0; 1329 nvar->lv_peer_cookie = 0; 1330 nvar->lv_peer_cookie_len = 0; 1331 nvar->lv_use_cookie = L2TP_COOKIE_OFF; 1332 l2tp_variant_update(sc, nvar); 1333 1334 mutex_exit(&sc->l2tp_lock); 1335} 1336 1337static void 1338l2tp_set_state(struct l2tp_softc *sc, int state) 1339{ 1340 struct ifnet *ifp = &sc->l2tp_ec.ec_if; 1341 struct l2tp_variant *nvar; 1342 int ostate; 1343 1344 nvar = kmem_alloc(sizeof(*nvar), KM_SLEEP); 1345 1346 mutex_enter(&sc->l2tp_lock); 1347 1348 *nvar = *sc->l2tp_var; 1349 psref_target_init(&nvar->lv_psref, lv_psref_class); 1350 ostate = nvar->lv_state; 1351 nvar->lv_state = state; 1352 l2tp_variant_update(sc, nvar); 1353 mutex_exit(&sc->l2tp_lock); 1354 1355 if (ostate != state) { 1356 int lstate; 1357 1358 if (state == L2TP_STATE_UP) 1359 lstate = LINK_STATE_UP; 1360 else 1361 lstate = LINK_STATE_DOWN; 1362 1363 if_link_state_change(ifp, lstate); 1364 } 1365 1366#ifdef NOTYET 1367 vlan_linkstate_notify(ifp, ifp->if_link_state); 1368#endif 1369} 1370 1371static int 1372l2tp_encap_attach(struct l2tp_variant *var) 1373{ 1374 int error; 1375 1376 if (var == NULL || var->lv_psrc == NULL) 1377 return EINVAL; 1378 1379 switch (var->lv_psrc->sa_family) { 1380#ifdef INET 1381 case AF_INET: 1382 error = in_l2tp_attach(var); 1383 break; 1384#endif 1385#ifdef INET6 1386 case AF_INET6: 1387 error = in6_l2tp_attach(var); 1388 break; 1389#endif 1390 default: 1391 error = EINVAL; 1392 break; 1393 } 1394 1395 return error; 1396} 1397 1398static int 1399l2tp_encap_detach(struct l2tp_variant *var) 1400{ 1401 int error; 1402 1403 if (var == NULL || var->lv_psrc == NULL) 1404 return EINVAL; 1405 1406 switch (var->lv_psrc->sa_family) { 1407#ifdef INET 1408 case AF_INET: 1409 error = in_l2tp_detach(var); 1410 break; 1411#endif 1412#ifdef INET6 1413 case AF_INET6: 1414 error = in6_l2tp_detach(var); 1415 break; 1416#endif 1417 default: 1418 error = EINVAL; 1419 break; 1420 } 1421 1422 return error; 1423} 1424 1425int 1426l2tp_check_nesting(struct ifnet *ifp, struct mbuf *m) 1427{ 1428 1429 return if_tunnel_check_nesting(ifp, m, max_l2tp_nesting); 1430} 1431 1432/* 1433 * Module infrastructure 1434 */ 1435#include "if_module.h" 1436 1437IF_MODULE(MODULE_CLASS_DRIVER, l2tp, NULL) 1438 1439 1440/* TODO: IP_TCPMSS support */ 1441#ifdef IP_TCPMSS 1442static int l2tp_need_tcpmss_clamp(struct ifnet *); 1443#ifdef INET 1444static struct mbuf *l2tp_tcpmss4_clamp(struct ifnet *, struct mbuf *); 1445#endif 1446#ifdef INET6 1447static struct mbuf *l2tp_tcpmss6_clamp(struct ifnet *, struct mbuf *); 1448#endif 1449 1450struct mbuf * 1451l2tp_tcpmss_clamp(struct ifnet *ifp, struct mbuf *m) 1452{ 1453 struct ether_header *eh; 1454 struct ether_vlan_header evh; 1455 1456 if (!l2tp_need_tcpmss_clamp(ifp)) { 1457 return m; 1458 } 1459 1460 if (m->m_pkthdr.len < sizeof(evh)) { 1461 m_freem(m); 1462 return NULL; 1463 } 1464 1465 /* save ether header */ 1466 m_copydata(m, 0, sizeof(evh), (void *)&evh); 1467 eh = (struct ether_header *)&evh; 1468 1469 switch (ntohs(eh->ether_type)) { 1470 case ETHERTYPE_VLAN: /* Ether + VLAN */ 1471 if (m->m_pkthdr.len <= sizeof(struct ether_vlan_header)) 1472 break; 1473 m_adj(m, sizeof(struct ether_vlan_header)); 1474 switch (ntohs(evh.evl_proto)) { 1475#ifdef INET 1476 case ETHERTYPE_IP: /* Ether + VLAN + IPv4 */ 1477 m = l2tp_tcpmss4_clamp(ifp, m); 1478 if (m == NULL) 1479 return NULL; 1480 break; 1481#endif /* INET */ 1482#ifdef INET6 1483 case ETHERTYPE_IPV6: /* Ether + VLAN + IPv6 */ 1484 m = l2tp_tcpmss6_clamp(ifp, m); 1485 if (m == NULL) 1486 return NULL; 1487 break; 1488#endif /* INET6 */ 1489 default: 1490 break; 1491 } 1492 1493 /* restore ether header */ 1494 M_PREPEND(m, sizeof(struct ether_vlan_header), 1495 M_DONTWAIT); 1496 if (m == NULL) 1497 return NULL; 1498 *mtod(m, struct ether_vlan_header *) = evh; 1499 break; 1500 1501#ifdef INET 1502 case ETHERTYPE_IP: /* Ether + IPv4 */ 1503 if (m->m_pkthdr.len <= sizeof(struct ether_header)) 1504 break; 1505 m_adj(m, sizeof(struct ether_header)); 1506 m = l2tp_tcpmss4_clamp(ifp, m); 1507 if (m == NULL) 1508 return NULL; 1509 /* restore ether header */ 1510 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT); 1511 if (m == NULL) 1512 return NULL; 1513 *mtod(m, struct ether_header *) = *eh; 1514 break; 1515#endif /* INET */ 1516 1517#ifdef INET6 1518 case ETHERTYPE_IPV6: /* Ether + IPv6 */ 1519 if (m->m_pkthdr.len <= sizeof(struct ether_header)) 1520 break; 1521 m_adj(m, sizeof(struct ether_header)); 1522 m = l2tp_tcpmss6_clamp(ifp, m); 1523 if (m == NULL) 1524 return NULL; 1525 /* restore ether header */ 1526 M_PREPEND(m, sizeof(struct ether_header), M_DONTWAIT); 1527 if (m == NULL) 1528 return NULL; 1529 *mtod(m, struct ether_header *) = *eh; 1530 break; 1531#endif /* INET6 */ 1532 1533 default: 1534 break; 1535 } 1536 1537 return m; 1538} 1539 1540static int 1541l2tp_need_tcpmss_clamp(struct ifnet *ifp) 1542{ 1543 int ret = 0; 1544 1545#ifdef INET 1546 if (ifp->if_tcpmss != 0) 1547 ret = 1; 1548#endif 1549 1550#ifdef INET6 1551 if (ifp->if_tcpmss6 != 0) 1552 ret = 1; 1553#endif 1554 1555 return ret; 1556} 1557 1558#ifdef INET 1559static struct mbuf * 1560l2tp_tcpmss4_clamp(struct ifnet *ifp, struct mbuf *m) 1561{ 1562 1563 if (ifp->if_tcpmss != 0) { 1564 return ip_tcpmss(m, (ifp->if_tcpmss < 0) ? 1565 ifp->if_mtu - IP_TCPMSS_EXTLEN : 1566 ifp->if_tcpmss); 1567 } 1568 return m; 1569} 1570#endif /* INET */ 1571 1572#ifdef INET6 1573static struct mbuf * 1574l2tp_tcpmss6_clamp(struct ifnet *ifp, struct mbuf *m) 1575{ 1576 int ip6hdrlen; 1577 1578 if (ifp->if_tcpmss6 != 0 && 1579 ip6_tcpmss_applicable(m, &ip6hdrlen)) { 1580 return ip6_tcpmss(m, ip6hdrlen, 1581 (ifp->if_tcpmss6 < 0) ? 1582 ifp->if_mtu - IP6_TCPMSS_EXTLEN : 1583 ifp->if_tcpmss6); 1584 } 1585 return m; 1586} 1587#endif /* INET6 */ 1588 1589#endif /* IP_TCPMSS */ 1590