1/*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/tom/t4_tom.c 355242 2019-11-30 19:33:14Z np $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33 34#include <sys/param.h> 35#include <sys/types.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/ktr.h> 39#include <sys/lock.h> 40#include <sys/limits.h> 41#include <sys/module.h> 42#include <sys/protosw.h> 43#include <sys/domain.h> 44#include <sys/refcount.h> 45#include <sys/rmlock.h> 46#include <sys/socket.h> 47#include <sys/socketvar.h> 48#include <sys/taskqueue.h> 49#include <net/if.h> 50#include <net/if_var.h> 51#include <net/if_types.h> 52#include <net/if_vlan_var.h> 53#include <netinet/in.h> 54#include <netinet/in_pcb.h> 55#include <netinet/in_var.h> 56#include <netinet/ip.h> 57#include <netinet/ip6.h> 58#include <netinet6/scope6_var.h> 59#define TCPSTATES 60#include <netinet/tcp_fsm.h> 61#include <netinet/tcp_timer.h> 62#include <netinet/tcp_var.h> 63#include <netinet/toecore.h> 64 65#ifdef TCP_OFFLOAD 66#include "common/common.h" 67#include "common/t4_msg.h" 68#include "common/t4_regs.h" 69#include "common/t4_regs_values.h" 70#include "common/t4_tcb.h" 71#include "t4_clip.h" 72#include "tom/t4_tom_l2t.h" 73#include "tom/t4_tom.h" 74#include "tom/t4_tls.h" 75 76static struct protosw toe_protosw; 77static struct pr_usrreqs toe_usrreqs; 78 79static struct protosw toe6_protosw; 80static struct pr_usrreqs toe6_usrreqs; 81 82/* Module ops */ 83static int t4_tom_mod_load(void); 84static int t4_tom_mod_unload(void); 85static int t4_tom_modevent(module_t, int, void *); 86 87/* ULD ops and helpers */ 88static int t4_tom_activate(struct adapter *); 89static int t4_tom_deactivate(struct adapter *); 90 91static struct uld_info tom_uld_info = { 92 .uld_id = ULD_TOM, 93 .activate = t4_tom_activate, 94 .deactivate = t4_tom_deactivate, 95}; 96 97static void release_offload_resources(struct toepcb *); 98static int alloc_tid_tabs(struct tid_info *); 99static void free_tid_tabs(struct tid_info *); 100static void free_tom_data(struct adapter *, struct tom_data *); 101static void reclaim_wr_resources(void *, int); 102 103struct toepcb * 104alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) 105{ 106 struct port_info *pi = vi->pi; 107 struct adapter *sc = pi->adapter; 108 struct toepcb *toep; 109 int tx_credits, txsd_total, len; 110 111 /* 112 * The firmware counts tx work request credits in units of 16 bytes 113 * each. Reserve room for an ABORT_REQ so the driver never has to worry 114 * about tx credits if it wants to abort a connection. 115 */ 116 tx_credits = sc->params.ofldq_wr_cred; 117 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 118 119 /* 120 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 121 * immediate payload, and firmware counts tx work request credits in 122 * units of 16 byte. Calculate the maximum work requests possible. 123 */ 124 txsd_total = tx_credits / 125 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 126 127 KASSERT(txqid >= vi->first_ofld_txq && 128 txqid < vi->first_ofld_txq + vi->nofldtxq, 129 ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, 130 vi->first_ofld_txq, vi->nofldtxq)); 131 132 KASSERT(rxqid >= vi->first_ofld_rxq && 133 rxqid < vi->first_ofld_rxq + vi->nofldrxq, 134 ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, 135 vi->first_ofld_rxq, vi->nofldrxq)); 136 137 len = offsetof(struct toepcb, txsd) + 138 txsd_total * sizeof(struct ofld_tx_sdesc); 139 140 toep = malloc(len, M_CXGBE, M_ZERO | flags); 141 if (toep == NULL) 142 return (NULL); 143 144 refcount_init(&toep->refcount, 1); 145 toep->td = sc->tom_softc; 146 toep->vi = vi; 147 toep->tc_idx = -1; 148 toep->tx_total = tx_credits; 149 toep->tx_credits = tx_credits; 150 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 151 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 152 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 153 mbufq_init(&toep->ulp_pduq, INT_MAX); 154 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 155 toep->txsd_total = txsd_total; 156 toep->txsd_avail = txsd_total; 157 toep->txsd_pidx = 0; 158 toep->txsd_cidx = 0; 159 aiotx_init_toep(toep); 160 161 return (toep); 162} 163 164struct toepcb * 165hold_toepcb(struct toepcb *toep) 166{ 167 168 refcount_acquire(&toep->refcount); 169 return (toep); 170} 171 172void 173free_toepcb(struct toepcb *toep) 174{ 175 176 if (refcount_release(&toep->refcount) == 0) 177 return; 178 179 KASSERT(!(toep->flags & TPF_ATTACHED), 180 ("%s: attached to an inpcb", __func__)); 181 KASSERT(!(toep->flags & TPF_CPL_PENDING), 182 ("%s: CPL pending", __func__)); 183 184 if (toep->ulp_mode == ULP_MODE_TCPDDP) 185 ddp_uninit_toep(toep); 186 tls_uninit_toep(toep); 187 free(toep, M_CXGBE); 188} 189 190/* 191 * Set up the socket for TCP offload. 192 */ 193void 194offload_socket(struct socket *so, struct toepcb *toep) 195{ 196 struct tom_data *td = toep->td; 197 struct inpcb *inp = sotoinpcb(so); 198 struct tcpcb *tp = intotcpcb(inp); 199 struct sockbuf *sb; 200 201 INP_WLOCK_ASSERT(inp); 202 203 /* Update socket */ 204 sb = &so->so_snd; 205 SOCKBUF_LOCK(sb); 206 sb->sb_flags |= SB_NOCOALESCE; 207 SOCKBUF_UNLOCK(sb); 208 sb = &so->so_rcv; 209 SOCKBUF_LOCK(sb); 210 sb->sb_flags |= SB_NOCOALESCE; 211 if (inp->inp_vflag & INP_IPV6) 212 so->so_proto = &toe6_protosw; 213 else 214 so->so_proto = &toe_protosw; 215 SOCKBUF_UNLOCK(sb); 216 217 /* Update TCP PCB */ 218 tp->tod = &td->tod; 219 tp->t_toe = toep; 220 tp->t_flags |= TF_TOE; 221 222 /* Install an extra hold on inp */ 223 toep->inp = inp; 224 toep->flags |= TPF_ATTACHED; 225 in_pcbref(inp); 226 227 /* Add the TOE PCB to the active list */ 228 mtx_lock(&td->toep_list_lock); 229 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 230 mtx_unlock(&td->toep_list_lock); 231} 232 233/* This is _not_ the normal way to "unoffload" a socket. */ 234void 235undo_offload_socket(struct socket *so) 236{ 237 struct inpcb *inp = sotoinpcb(so); 238 struct tcpcb *tp = intotcpcb(inp); 239 struct toepcb *toep = tp->t_toe; 240 struct tom_data *td = toep->td; 241 struct sockbuf *sb; 242 243 INP_WLOCK_ASSERT(inp); 244 245 sb = &so->so_snd; 246 SOCKBUF_LOCK(sb); 247 sb->sb_flags &= ~SB_NOCOALESCE; 248 SOCKBUF_UNLOCK(sb); 249 sb = &so->so_rcv; 250 SOCKBUF_LOCK(sb); 251 sb->sb_flags &= ~SB_NOCOALESCE; 252 SOCKBUF_UNLOCK(sb); 253 254 tp->tod = NULL; 255 tp->t_toe = NULL; 256 tp->t_flags &= ~TF_TOE; 257 258 toep->inp = NULL; 259 toep->flags &= ~TPF_ATTACHED; 260 if (in_pcbrele_wlocked(inp)) 261 panic("%s: inp freed.", __func__); 262 263 mtx_lock(&td->toep_list_lock); 264 TAILQ_REMOVE(&td->toep_list, toep, link); 265 mtx_unlock(&td->toep_list_lock); 266} 267 268static void 269release_offload_resources(struct toepcb *toep) 270{ 271 struct tom_data *td = toep->td; 272 struct adapter *sc = td_adapter(td); 273 int tid = toep->tid; 274 275 KASSERT(!(toep->flags & TPF_CPL_PENDING), 276 ("%s: %p has CPL pending.", __func__, toep)); 277 KASSERT(!(toep->flags & TPF_ATTACHED), 278 ("%s: %p is still attached.", __func__, toep)); 279 280 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 281 __func__, toep, tid, toep->l2te, toep->ce); 282 283 /* 284 * These queues should have been emptied at approximately the same time 285 * that a normal connection's socket's so_snd would have been purged or 286 * drained. Do _not_ clean up here. 287 */ 288 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 289 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 290#ifdef INVARIANTS 291 if (toep->ulp_mode == ULP_MODE_TCPDDP) 292 ddp_assert_empty(toep); 293#endif 294 295 if (toep->l2te) 296 t4_l2t_release(toep->l2te); 297 298 if (tid >= 0) { 299 remove_tid(sc, tid, toep->ce ? 2 : 1); 300 release_tid(sc, tid, toep->ctrlq); 301 } 302 303 if (toep->ce) 304 t4_release_lip(sc, toep->ce); 305 306 if (toep->tc_idx != -1) 307 t4_release_cl_rl(sc, toep->vi->pi->port_id, toep->tc_idx); 308 309 mtx_lock(&td->toep_list_lock); 310 TAILQ_REMOVE(&td->toep_list, toep, link); 311 mtx_unlock(&td->toep_list_lock); 312 313 free_toepcb(toep); 314} 315 316/* 317 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 318 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 319 * pending CPL) then it is time to release all resources tied to the toepcb. 320 * 321 * Also gets called when an offloaded active open fails and the TOM wants the 322 * kernel to take the TCP PCB back. 323 */ 324static void 325t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 326{ 327#if defined(KTR) || defined(INVARIANTS) 328 struct inpcb *inp = tp->t_inpcb; 329#endif 330 struct toepcb *toep = tp->t_toe; 331 332 INP_WLOCK_ASSERT(inp); 333 334 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 335 KASSERT(toep->flags & TPF_ATTACHED, 336 ("%s: not attached", __func__)); 337 338#ifdef KTR 339 if (tp->t_state == TCPS_SYN_SENT) { 340 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 341 __func__, toep->tid, toep, toep->flags, inp, 342 inp->inp_flags); 343 } else { 344 CTR6(KTR_CXGBE, 345 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 346 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 347 inp->inp_flags); 348 } 349#endif 350 351 tp->t_toe = NULL; 352 tp->t_flags &= ~TF_TOE; 353 toep->flags &= ~TPF_ATTACHED; 354 355 if (!(toep->flags & TPF_CPL_PENDING)) 356 release_offload_resources(toep); 357} 358 359/* 360 * setsockopt handler. 361 */ 362static void 363t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 364{ 365 struct adapter *sc = tod->tod_softc; 366 struct toepcb *toep = tp->t_toe; 367 368 if (dir == SOPT_GET) 369 return; 370 371 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 372 373 switch (name) { 374 case TCP_NODELAY: 375 if (tp->t_state != TCPS_ESTABLISHED) 376 break; 377 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 378 V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 379 0, 0); 380 break; 381 default: 382 break; 383 } 384} 385 386static inline int 387get_tcb_bit(u_char *tcb, int bit) 388{ 389 int ix, shift; 390 391 ix = 127 - (bit >> 3); 392 shift = bit & 0x7; 393 394 return ((tcb[ix] >> shift) & 1); 395} 396 397static inline uint64_t 398get_tcb_bits(u_char *tcb, int hi, int lo) 399{ 400 uint64_t rc = 0; 401 402 while (hi >= lo) { 403 rc = (rc << 1) | get_tcb_bit(tcb, hi); 404 --hi; 405 } 406 407 return (rc); 408} 409 410/* 411 * Called by the kernel to allow the TOE driver to "refine" values filled up in 412 * the tcp_info for an offloaded connection. 413 */ 414static void 415t4_tcp_info(struct toedev *tod, struct tcpcb *tp, struct tcp_info *ti) 416{ 417 int i, j, k, rc; 418 struct adapter *sc = tod->tod_softc; 419 struct toepcb *toep = tp->t_toe; 420 uint32_t addr, v; 421 uint32_t buf[TCB_SIZE / sizeof(uint32_t)]; 422 u_char *tcb, tmp; 423 424 INP_WLOCK_ASSERT(tp->t_inpcb); 425 MPASS(ti != NULL); 426 427 addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + toep->tid * TCB_SIZE; 428 rc = read_via_memwin(sc, 2, addr, &buf[0], TCB_SIZE); 429 if (rc != 0) 430 return; 431 432 tcb = (u_char *)&buf[0]; 433 for (i = 0, j = TCB_SIZE - 16; i < j; i += 16, j -= 16) { 434 for (k = 0; k < 16; k++) { 435 tmp = tcb[i + k]; 436 tcb[i + k] = tcb[j + k]; 437 tcb[j + k] = tmp; 438 } 439 } 440 441 ti->tcpi_state = get_tcb_bits(tcb, 115, 112); 442 443 v = get_tcb_bits(tcb, 271, 256); 444 ti->tcpi_rtt = tcp_ticks_to_us(sc, v); 445 446 v = get_tcb_bits(tcb, 287, 272); 447 ti->tcpi_rttvar = tcp_ticks_to_us(sc, v); 448 449 ti->tcpi_snd_ssthresh = get_tcb_bits(tcb, 487, 460); 450 ti->tcpi_snd_cwnd = get_tcb_bits(tcb, 459, 432); 451 ti->tcpi_rcv_nxt = get_tcb_bits(tcb, 553, 522); 452 453 ti->tcpi_snd_nxt = get_tcb_bits(tcb, 319, 288) - 454 get_tcb_bits(tcb, 375, 348); 455 456 /* Receive window being advertised by us. */ 457 ti->tcpi_rcv_space = get_tcb_bits(tcb, 581, 554); 458 459 /* Send window ceiling. */ 460 v = get_tcb_bits(tcb, 159, 144) << get_tcb_bits(tcb, 131, 128); 461 ti->tcpi_snd_wnd = min(v, ti->tcpi_snd_cwnd); 462} 463 464/* 465 * The TOE driver will not receive any more CPLs for the tid associated with the 466 * toepcb; release the hold on the inpcb. 467 */ 468void 469final_cpl_received(struct toepcb *toep) 470{ 471 struct inpcb *inp = toep->inp; 472 473 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 474 INP_WLOCK_ASSERT(inp); 475 KASSERT(toep->flags & TPF_CPL_PENDING, 476 ("%s: CPL not pending already?", __func__)); 477 478 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 479 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 480 481 if (toep->ulp_mode == ULP_MODE_TCPDDP) 482 release_ddp_resources(toep); 483 toep->inp = NULL; 484 toep->flags &= ~TPF_CPL_PENDING; 485 mbufq_drain(&toep->ulp_pdu_reclaimq); 486 487 if (!(toep->flags & TPF_ATTACHED)) 488 release_offload_resources(toep); 489 490 if (!in_pcbrele_wlocked(inp)) 491 INP_WUNLOCK(inp); 492} 493 494void 495insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 496{ 497 struct tid_info *t = &sc->tids; 498 499 MPASS(tid >= t->tid_base); 500 MPASS(tid - t->tid_base < t->ntids); 501 502 t->tid_tab[tid - t->tid_base] = ctx; 503 atomic_add_int(&t->tids_in_use, ntids); 504} 505 506void * 507lookup_tid(struct adapter *sc, int tid) 508{ 509 struct tid_info *t = &sc->tids; 510 511 return (t->tid_tab[tid - t->tid_base]); 512} 513 514void 515update_tid(struct adapter *sc, int tid, void *ctx) 516{ 517 struct tid_info *t = &sc->tids; 518 519 t->tid_tab[tid - t->tid_base] = ctx; 520} 521 522void 523remove_tid(struct adapter *sc, int tid, int ntids) 524{ 525 struct tid_info *t = &sc->tids; 526 527 t->tid_tab[tid - t->tid_base] = NULL; 528 atomic_subtract_int(&t->tids_in_use, ntids); 529} 530 531/* 532 * What mtu_idx to use, given a 4-tuple. Note that both s->mss and tcp_mssopt 533 * have the MSS that we should advertise in our SYN. Advertised MSS doesn't 534 * account for any TCP options so the effective MSS (only payload, no headers or 535 * options) could be different. 536 */ 537int 538find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, 539 struct offload_settings *s) 540{ 541 unsigned short *mtus = &sc->params.mtus[0]; 542 int i, mss, mtu; 543 544 MPASS(inc != NULL); 545 546 mss = s->mss > 0 ? s->mss : tcp_mssopt(inc); 547 if (inc->inc_flags & INC_ISIPV6) 548 mtu = mss + sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 549 else 550 mtu = mss + sizeof(struct ip) + sizeof(struct tcphdr); 551 552 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mtu; i++) 553 continue; 554 555 return (i); 556} 557 558/* 559 * Determine the receive window size for a socket. 560 */ 561u_long 562select_rcv_wnd(struct socket *so) 563{ 564 unsigned long wnd; 565 566 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 567 568 wnd = sbspace(&so->so_rcv); 569 if (wnd < MIN_RCV_WND) 570 wnd = MIN_RCV_WND; 571 572 return min(wnd, MAX_RCV_WND); 573} 574 575int 576select_rcv_wscale(void) 577{ 578 int wscale = 0; 579 unsigned long space = sb_max; 580 581 if (space > MAX_RCV_WND) 582 space = MAX_RCV_WND; 583 584 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 585 wscale++; 586 587 return (wscale); 588} 589 590/* 591 * socket so could be a listening socket too. 592 */ 593uint64_t 594calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, 595 int mtu_idx, int rscale, int rx_credits, int ulp_mode, 596 struct offload_settings *s) 597{ 598 int keepalive; 599 uint64_t opt0; 600 601 MPASS(so != NULL); 602 MPASS(vi != NULL); 603 KASSERT(rx_credits <= M_RCV_BUFSIZ, 604 ("%s: rcv_bufsiz too high", __func__)); 605 606 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 607 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits) | 608 V_L2T_IDX(e->idx) | V_SMAC_SEL(vi->smt_idx) | 609 V_TX_CHAN(vi->pi->tx_chan); 610 611 keepalive = tcp_always_keepalive || so_options_get(so) & SO_KEEPALIVE; 612 opt0 |= V_KEEP_ALIVE(keepalive != 0); 613 614 if (s->nagle < 0) { 615 struct inpcb *inp = sotoinpcb(so); 616 struct tcpcb *tp = intotcpcb(inp); 617 618 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 619 } else 620 opt0 |= V_NAGLE(s->nagle != 0); 621 622 return htobe64(opt0); 623} 624 625uint64_t 626select_ntuple(struct vi_info *vi, struct l2t_entry *e) 627{ 628 struct adapter *sc = vi->pi->adapter; 629 struct tp_params *tp = &sc->params.tp; 630 uint64_t ntuple = 0; 631 632 /* 633 * Initialize each of the fields which we care about which are present 634 * in the Compressed Filter Tuple. 635 */ 636 if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) 637 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 638 639 if (tp->port_shift >= 0) 640 ntuple |= (uint64_t)e->lport << tp->port_shift; 641 642 if (tp->protocol_shift >= 0) 643 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 644 645 if (tp->vnic_shift >= 0 && tp->ingress_config & F_VNIC) { 646 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vi->vin) | 647 V_FT_VNID_ID_PF(sc->pf) | V_FT_VNID_ID_VLD(vi->vfvld)) << 648 tp->vnic_shift; 649 } 650 651 if (is_t4(sc)) 652 return (htobe32((uint32_t)ntuple)); 653 else 654 return (htobe64(V_FILTER_TUPLE(ntuple))); 655} 656 657static int 658is_tls_sock(struct socket *so, struct adapter *sc) 659{ 660 struct inpcb *inp = sotoinpcb(so); 661 int i, rc; 662 663 /* XXX: Eventually add a SO_WANT_TLS socket option perhaps? */ 664 rc = 0; 665 ADAPTER_LOCK(sc); 666 for (i = 0; i < sc->tt.num_tls_rx_ports; i++) { 667 if (inp->inp_lport == htons(sc->tt.tls_rx_ports[i]) || 668 inp->inp_fport == htons(sc->tt.tls_rx_ports[i])) { 669 rc = 1; 670 break; 671 } 672 } 673 ADAPTER_UNLOCK(sc); 674 return (rc); 675} 676 677int 678select_ulp_mode(struct socket *so, struct adapter *sc, 679 struct offload_settings *s) 680{ 681 682 if (can_tls_offload(sc) && 683 (s->tls > 0 || (s->tls < 0 && is_tls_sock(so, sc)))) 684 return (ULP_MODE_TLS); 685 else if (s->ddp > 0 || 686 (s->ddp < 0 && sc->tt.ddp && (so->so_options & SO_NO_DDP) == 0)) 687 return (ULP_MODE_TCPDDP); 688 else 689 return (ULP_MODE_NONE); 690} 691 692void 693set_ulp_mode(struct toepcb *toep, int ulp_mode) 694{ 695 696 CTR4(KTR_CXGBE, "%s: toep %p (tid %d) ulp_mode %d", 697 __func__, toep, toep->tid, ulp_mode); 698 toep->ulp_mode = ulp_mode; 699 tls_init_toep(toep); 700 if (toep->ulp_mode == ULP_MODE_TCPDDP) 701 ddp_init_toep(toep); 702} 703 704int 705negative_advice(int status) 706{ 707 708 return (status == CPL_ERR_RTX_NEG_ADVICE || 709 status == CPL_ERR_PERSIST_NEG_ADVICE || 710 status == CPL_ERR_KEEPALV_NEG_ADVICE); 711} 712 713static int 714alloc_tid_tab(struct tid_info *t, int flags) 715{ 716 717 MPASS(t->ntids > 0); 718 MPASS(t->tid_tab == NULL); 719 720 t->tid_tab = malloc(t->ntids * sizeof(*t->tid_tab), M_CXGBE, 721 M_ZERO | flags); 722 if (t->tid_tab == NULL) 723 return (ENOMEM); 724 atomic_store_rel_int(&t->tids_in_use, 0); 725 726 return (0); 727} 728 729static void 730free_tid_tab(struct tid_info *t) 731{ 732 733 KASSERT(t->tids_in_use == 0, 734 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 735 736 free(t->tid_tab, M_CXGBE); 737 t->tid_tab = NULL; 738} 739 740static int 741alloc_stid_tab(struct tid_info *t, int flags) 742{ 743 744 MPASS(t->nstids > 0); 745 MPASS(t->stid_tab == NULL); 746 747 t->stid_tab = malloc(t->nstids * sizeof(*t->stid_tab), M_CXGBE, 748 M_ZERO | flags); 749 if (t->stid_tab == NULL) 750 return (ENOMEM); 751 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 752 t->stids_in_use = 0; 753 TAILQ_INIT(&t->stids); 754 t->nstids_free_head = t->nstids; 755 756 return (0); 757} 758 759static void 760free_stid_tab(struct tid_info *t) 761{ 762 763 KASSERT(t->stids_in_use == 0, 764 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 765 766 if (mtx_initialized(&t->stid_lock)) 767 mtx_destroy(&t->stid_lock); 768 free(t->stid_tab, M_CXGBE); 769 t->stid_tab = NULL; 770} 771 772static void 773free_tid_tabs(struct tid_info *t) 774{ 775 776 free_tid_tab(t); 777 free_atid_tab(t); 778 free_stid_tab(t); 779} 780 781static int 782alloc_tid_tabs(struct tid_info *t) 783{ 784 int rc; 785 786 rc = alloc_tid_tab(t, M_NOWAIT); 787 if (rc != 0) 788 goto failed; 789 790 rc = alloc_atid_tab(t, M_NOWAIT); 791 if (rc != 0) 792 goto failed; 793 794 rc = alloc_stid_tab(t, M_NOWAIT); 795 if (rc != 0) 796 goto failed; 797 798 return (0); 799failed: 800 free_tid_tabs(t); 801 return (rc); 802} 803 804static void 805free_tom_data(struct adapter *sc, struct tom_data *td) 806{ 807 808 ASSERT_SYNCHRONIZED_OP(sc); 809 810 KASSERT(TAILQ_EMPTY(&td->toep_list), 811 ("%s: TOE PCB list is not empty.", __func__)); 812 KASSERT(td->lctx_count == 0, 813 ("%s: lctx hash table is not empty.", __func__)); 814 815 t4_free_ppod_region(&td->pr); 816 817 if (td->listen_mask != 0) 818 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 819 820 if (mtx_initialized(&td->unsent_wr_lock)) 821 mtx_destroy(&td->unsent_wr_lock); 822 if (mtx_initialized(&td->lctx_hash_lock)) 823 mtx_destroy(&td->lctx_hash_lock); 824 if (mtx_initialized(&td->toep_list_lock)) 825 mtx_destroy(&td->toep_list_lock); 826 827 free_tid_tabs(&sc->tids); 828 free(td, M_CXGBE); 829} 830 831static char * 832prepare_pkt(int open_type, uint16_t vtag, struct inpcb *inp, int *pktlen, 833 int *buflen) 834{ 835 char *pkt; 836 struct tcphdr *th; 837 int ipv6, len; 838 const int maxlen = 839 max(sizeof(struct ether_header), sizeof(struct ether_vlan_header)) + 840 max(sizeof(struct ip), sizeof(struct ip6_hdr)) + 841 sizeof(struct tcphdr); 842 843 MPASS(open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN); 844 845 pkt = malloc(maxlen, M_CXGBE, M_ZERO | M_NOWAIT); 846 if (pkt == NULL) 847 return (NULL); 848 849 ipv6 = inp->inp_vflag & INP_IPV6; 850 len = 0; 851 852 if (vtag == 0xffff) { 853 struct ether_header *eh = (void *)pkt; 854 855 if (ipv6) 856 eh->ether_type = htons(ETHERTYPE_IPV6); 857 else 858 eh->ether_type = htons(ETHERTYPE_IP); 859 860 len += sizeof(*eh); 861 } else { 862 struct ether_vlan_header *evh = (void *)pkt; 863 864 evh->evl_encap_proto = htons(ETHERTYPE_VLAN); 865 evh->evl_tag = htons(vtag); 866 if (ipv6) 867 evh->evl_proto = htons(ETHERTYPE_IPV6); 868 else 869 evh->evl_proto = htons(ETHERTYPE_IP); 870 871 len += sizeof(*evh); 872 } 873 874 if (ipv6) { 875 struct ip6_hdr *ip6 = (void *)&pkt[len]; 876 877 ip6->ip6_vfc = IPV6_VERSION; 878 ip6->ip6_plen = htons(sizeof(struct tcphdr)); 879 ip6->ip6_nxt = IPPROTO_TCP; 880 if (open_type == OPEN_TYPE_ACTIVE) { 881 ip6->ip6_src = inp->in6p_laddr; 882 ip6->ip6_dst = inp->in6p_faddr; 883 } else if (open_type == OPEN_TYPE_LISTEN) { 884 ip6->ip6_src = inp->in6p_laddr; 885 ip6->ip6_dst = ip6->ip6_src; 886 } 887 888 len += sizeof(*ip6); 889 } else { 890 struct ip *ip = (void *)&pkt[len]; 891 892 ip->ip_v = IPVERSION; 893 ip->ip_hl = sizeof(*ip) >> 2; 894 ip->ip_tos = inp->inp_ip_tos; 895 ip->ip_len = htons(sizeof(struct ip) + sizeof(struct tcphdr)); 896 ip->ip_ttl = inp->inp_ip_ttl; 897 ip->ip_p = IPPROTO_TCP; 898 if (open_type == OPEN_TYPE_ACTIVE) { 899 ip->ip_src = inp->inp_laddr; 900 ip->ip_dst = inp->inp_faddr; 901 } else if (open_type == OPEN_TYPE_LISTEN) { 902 ip->ip_src = inp->inp_laddr; 903 ip->ip_dst = ip->ip_src; 904 } 905 906 len += sizeof(*ip); 907 } 908 909 th = (void *)&pkt[len]; 910 if (open_type == OPEN_TYPE_ACTIVE) { 911 th->th_sport = inp->inp_lport; /* network byte order already */ 912 th->th_dport = inp->inp_fport; /* ditto */ 913 } else if (open_type == OPEN_TYPE_LISTEN) { 914 th->th_sport = inp->inp_lport; /* network byte order already */ 915 th->th_dport = th->th_sport; 916 } 917 len += sizeof(th); 918 919 *pktlen = *buflen = len; 920 return (pkt); 921} 922 923const struct offload_settings * 924lookup_offload_policy(struct adapter *sc, int open_type, struct mbuf *m, 925 uint16_t vtag, struct inpcb *inp) 926{ 927 const struct t4_offload_policy *op; 928 char *pkt; 929 struct offload_rule *r; 930 int i, matched, pktlen, buflen; 931 static const struct offload_settings allow_offloading_settings = { 932 .offload = 1, 933 .rx_coalesce = -1, 934 .cong_algo = -1, 935 .sched_class = -1, 936 .tstamp = -1, 937 .sack = -1, 938 .nagle = -1, 939 .ecn = -1, 940 .ddp = -1, 941 .tls = -1, 942 .txq = -1, 943 .rxq = -1, 944 .mss = -1, 945 }; 946 static const struct offload_settings disallow_offloading_settings = { 947 .offload = 0, 948 /* rest is irrelevant when offload is off. */ 949 }; 950 951 rw_assert(&sc->policy_lock, RA_LOCKED); 952 953 /* 954 * If there's no Connection Offloading Policy attached to the device 955 * then we need to return a default static policy. If 956 * "cop_managed_offloading" is true, then we need to disallow 957 * offloading until a COP is attached to the device. Otherwise we 958 * allow offloading ... 959 */ 960 op = sc->policy; 961 if (op == NULL) { 962 if (sc->tt.cop_managed_offloading) 963 return (&disallow_offloading_settings); 964 else 965 return (&allow_offloading_settings); 966 } 967 968 switch (open_type) { 969 case OPEN_TYPE_ACTIVE: 970 case OPEN_TYPE_LISTEN: 971 pkt = prepare_pkt(open_type, vtag, inp, &pktlen, &buflen); 972 break; 973 case OPEN_TYPE_PASSIVE: 974 MPASS(m != NULL); 975 pkt = mtod(m, char *); 976 MPASS(*pkt == CPL_PASS_ACCEPT_REQ); 977 pkt += sizeof(struct cpl_pass_accept_req); 978 pktlen = m->m_pkthdr.len - sizeof(struct cpl_pass_accept_req); 979 buflen = m->m_len - sizeof(struct cpl_pass_accept_req); 980 break; 981 default: 982 MPASS(0); 983 return (&disallow_offloading_settings); 984 } 985 986 if (pkt == NULL || pktlen == 0 || buflen == 0) 987 return (&disallow_offloading_settings); 988 989 matched = 0; 990 r = &op->rule[0]; 991 for (i = 0; i < op->nrules; i++, r++) { 992 if (r->open_type != open_type && 993 r->open_type != OPEN_TYPE_DONTCARE) { 994 continue; 995 } 996 matched = bpf_filter(r->bpf_prog.bf_insns, pkt, pktlen, buflen); 997 if (matched) 998 break; 999 } 1000 1001 if (open_type == OPEN_TYPE_ACTIVE || open_type == OPEN_TYPE_LISTEN) 1002 free(pkt, M_CXGBE); 1003 1004 return (matched ? &r->settings : &disallow_offloading_settings); 1005} 1006 1007static void 1008reclaim_wr_resources(void *arg, int count) 1009{ 1010 struct tom_data *td = arg; 1011 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 1012 struct cpl_act_open_req *cpl; 1013 u_int opcode, atid, tid; 1014 struct wrqe *wr; 1015 struct adapter *sc = td_adapter(td); 1016 1017 mtx_lock(&td->unsent_wr_lock); 1018 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 1019 mtx_unlock(&td->unsent_wr_lock); 1020 1021 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 1022 STAILQ_REMOVE_HEAD(&twr_list, link); 1023 1024 cpl = wrtod(wr); 1025 opcode = GET_OPCODE(cpl); 1026 1027 switch (opcode) { 1028 case CPL_ACT_OPEN_REQ: 1029 case CPL_ACT_OPEN_REQ6: 1030 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1031 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1032 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1033 free(wr, M_CXGBE); 1034 break; 1035 case CPL_PASS_ACCEPT_RPL: 1036 tid = GET_TID(cpl); 1037 CTR2(KTR_CXGBE, "%s: tid %u ", __func__, tid); 1038 synack_failure_cleanup(sc, tid); 1039 free(wr, M_CXGBE); 1040 break; 1041 default: 1042 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1043 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1044 /* WR not freed here; go look at it with a debugger. */ 1045 } 1046 } 1047} 1048 1049/* 1050 * Ground control to Major TOM 1051 * Commencing countdown, engines on 1052 */ 1053static int 1054t4_tom_activate(struct adapter *sc) 1055{ 1056 struct tom_data *td; 1057 struct toedev *tod; 1058 struct vi_info *vi; 1059 int i, rc, v; 1060 1061 ASSERT_SYNCHRONIZED_OP(sc); 1062 1063 /* per-adapter softc for TOM */ 1064 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1065 if (td == NULL) 1066 return (ENOMEM); 1067 1068 /* List of TOE PCBs and associated lock */ 1069 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1070 TAILQ_INIT(&td->toep_list); 1071 1072 /* Listen context */ 1073 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1074 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1075 &td->listen_mask, HASH_NOWAIT); 1076 1077 /* List of WRs for which L2 resolution failed */ 1078 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1079 STAILQ_INIT(&td->unsent_wr_list); 1080 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1081 1082 /* TID tables */ 1083 rc = alloc_tid_tabs(&sc->tids); 1084 if (rc != 0) 1085 goto done; 1086 1087 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1088 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1089 if (rc != 0) 1090 goto done; 1091 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1092 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1093 1094 /* toedev ops */ 1095 tod = &td->tod; 1096 init_toedev(tod); 1097 tod->tod_softc = sc; 1098 tod->tod_connect = t4_connect; 1099 tod->tod_listen_start = t4_listen_start; 1100 tod->tod_listen_stop = t4_listen_stop; 1101 tod->tod_rcvd = t4_rcvd; 1102 tod->tod_output = t4_tod_output; 1103 tod->tod_send_rst = t4_send_rst; 1104 tod->tod_send_fin = t4_send_fin; 1105 tod->tod_pcb_detach = t4_pcb_detach; 1106 tod->tod_l2_update = t4_l2_update; 1107 tod->tod_syncache_added = t4_syncache_added; 1108 tod->tod_syncache_removed = t4_syncache_removed; 1109 tod->tod_syncache_respond = t4_syncache_respond; 1110 tod->tod_offload_socket = t4_offload_socket; 1111 tod->tod_ctloutput = t4_ctloutput; 1112#if 0 1113 tod->tod_tcp_info = t4_tcp_info; 1114#else 1115 (void)&t4_tcp_info; 1116#endif 1117 1118 for_each_port(sc, i) { 1119 for_each_vi(sc->port[i], v, vi) { 1120 TOEDEV(vi->ifp) = &td->tod; 1121 } 1122 } 1123 1124 sc->tom_softc = td; 1125 register_toedev(sc->tom_softc); 1126 1127done: 1128 if (rc != 0) 1129 free_tom_data(sc, td); 1130 return (rc); 1131} 1132 1133static int 1134t4_tom_deactivate(struct adapter *sc) 1135{ 1136 int rc = 0; 1137 struct tom_data *td = sc->tom_softc; 1138 1139 ASSERT_SYNCHRONIZED_OP(sc); 1140 1141 if (td == NULL) 1142 return (0); /* XXX. KASSERT? */ 1143 1144 if (sc->offload_map != 0) 1145 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1146 1147 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1148 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1149 1150 mtx_lock(&td->toep_list_lock); 1151 if (!TAILQ_EMPTY(&td->toep_list)) 1152 rc = EBUSY; 1153 mtx_unlock(&td->toep_list_lock); 1154 1155 mtx_lock(&td->lctx_hash_lock); 1156 if (td->lctx_count > 0) 1157 rc = EBUSY; 1158 mtx_unlock(&td->lctx_hash_lock); 1159 1160 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1161 mtx_lock(&td->unsent_wr_lock); 1162 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1163 rc = EBUSY; 1164 mtx_unlock(&td->unsent_wr_lock); 1165 1166 if (rc == 0) { 1167 unregister_toedev(sc->tom_softc); 1168 free_tom_data(sc, td); 1169 sc->tom_softc = NULL; 1170 } 1171 1172 return (rc); 1173} 1174 1175static int 1176t4_aio_queue_tom(struct socket *so, struct kaiocb *job) 1177{ 1178 struct tcpcb *tp = so_sototcpcb(so); 1179 struct toepcb *toep = tp->t_toe; 1180 int error; 1181 1182 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 1183 error = t4_aio_queue_ddp(so, job); 1184 if (error != EOPNOTSUPP) 1185 return (error); 1186 } 1187 1188 return (t4_aio_queue_aiotx(so, job)); 1189} 1190 1191static int 1192t4_ctloutput_tom(struct socket *so, struct sockopt *sopt) 1193{ 1194 1195 if (sopt->sopt_level != IPPROTO_TCP) 1196 return (tcp_ctloutput(so, sopt)); 1197 1198 switch (sopt->sopt_name) { 1199 case TCP_TLSOM_SET_TLS_CONTEXT: 1200 case TCP_TLSOM_GET_TLS_TOM: 1201 case TCP_TLSOM_CLR_TLS_TOM: 1202 case TCP_TLSOM_CLR_QUIES: 1203 return (t4_ctloutput_tls(so, sopt)); 1204 default: 1205 return (tcp_ctloutput(so, sopt)); 1206 } 1207} 1208 1209static int 1210t4_tom_mod_load(void) 1211{ 1212 struct protosw *tcp_protosw, *tcp6_protosw; 1213 1214 /* CPL handlers */ 1215 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl2, 1216 CPL_COOKIE_TOM); 1217 t4_init_connect_cpl_handlers(); 1218 t4_init_listen_cpl_handlers(); 1219 t4_init_cpl_io_handlers(); 1220 1221 t4_ddp_mod_load(); 1222 t4_tls_mod_load(); 1223 1224 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1225 if (tcp_protosw == NULL) 1226 return (ENOPROTOOPT); 1227 bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw)); 1228 bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs)); 1229 toe_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1230 toe_protosw.pr_ctloutput = t4_ctloutput_tom; 1231 toe_protosw.pr_usrreqs = &toe_usrreqs; 1232 1233 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1234 if (tcp6_protosw == NULL) 1235 return (ENOPROTOOPT); 1236 bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw)); 1237 bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs)); 1238 toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom; 1239 toe6_protosw.pr_ctloutput = t4_ctloutput_tom; 1240 toe6_protosw.pr_usrreqs = &toe6_usrreqs; 1241 1242 return (t4_register_uld(&tom_uld_info)); 1243} 1244 1245static void 1246tom_uninit(struct adapter *sc, void *arg __unused) 1247{ 1248 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1249 return; 1250 1251 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1252 if (uld_active(sc, ULD_TOM)) 1253 t4_deactivate_uld(sc, ULD_TOM); 1254 1255 end_synchronized_op(sc, 0); 1256} 1257 1258static int 1259t4_tom_mod_unload(void) 1260{ 1261 t4_iterate(tom_uninit, NULL); 1262 1263 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1264 return (EBUSY); 1265 1266 t4_tls_mod_unload(); 1267 t4_ddp_mod_unload(); 1268 1269 t4_uninit_connect_cpl_handlers(); 1270 t4_uninit_listen_cpl_handlers(); 1271 t4_uninit_cpl_io_handlers(); 1272 t4_register_shared_cpl_handler(CPL_L2T_WRITE_RPL, NULL, CPL_COOKIE_TOM); 1273 1274 return (0); 1275} 1276#endif /* TCP_OFFLOAD */ 1277 1278static int 1279t4_tom_modevent(module_t mod, int cmd, void *arg) 1280{ 1281 int rc = 0; 1282 1283#ifdef TCP_OFFLOAD 1284 switch (cmd) { 1285 case MOD_LOAD: 1286 rc = t4_tom_mod_load(); 1287 break; 1288 1289 case MOD_UNLOAD: 1290 rc = t4_tom_mod_unload(); 1291 break; 1292 1293 default: 1294 rc = EINVAL; 1295 } 1296#else 1297 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1298 rc = EOPNOTSUPP; 1299#endif 1300 return (rc); 1301} 1302 1303static moduledata_t t4_tom_moddata= { 1304 "t4_tom", 1305 t4_tom_modevent, 1306 0 1307}; 1308 1309MODULE_VERSION(t4_tom, 1); 1310MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1311MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1312DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1313