1/*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/tom/t4_tom.c 330303 2018-03-03 00:54:12Z jhb $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33 34#include <sys/param.h> 35#include <sys/types.h> 36#include <sys/systm.h> 37#include <sys/kernel.h> 38#include <sys/ktr.h> 39#include <sys/limits.h> 40#include <sys/module.h> 41#include <sys/protosw.h> 42#include <sys/domain.h> 43#include <sys/socket.h> 44#include <sys/socketvar.h> 45#include <sys/taskqueue.h> 46#include <net/if.h> 47#include <netinet/in.h> 48#include <netinet/in_pcb.h> 49#include <netinet/in_var.h> 50#include <netinet/ip.h> 51#include <netinet/ip6.h> 52#include <netinet/tcp_var.h> 53#include <netinet6/scope6_var.h> 54#define TCPSTATES 55#include <netinet/tcp_fsm.h> 56#include <netinet/tcp_timer.h> 57#include <netinet/toecore.h> 58 59#ifdef TCP_OFFLOAD 60#include "common/common.h" 61#include "common/t4_msg.h" 62#include "common/t4_regs.h" 63#include "common/t4_regs_values.h" 64#include "common/t4_tcb.h" 65#include "tom/t4_tom_l2t.h" 66#include "tom/t4_tom.h" 67 68static struct protosw ddp_protosw; 69static struct pr_usrreqs ddp_usrreqs; 70 71static struct protosw ddp6_protosw; 72static struct pr_usrreqs ddp6_usrreqs; 73 74/* Module ops */ 75static int t4_tom_mod_load(void); 76static int t4_tom_mod_unload(void); 77static int t4_tom_modevent(module_t, int, void *); 78 79/* ULD ops and helpers */ 80static int t4_tom_activate(struct adapter *); 81static int t4_tom_deactivate(struct adapter *); 82 83static struct uld_info tom_uld_info = { 84 .uld_id = ULD_TOM, 85 .activate = t4_tom_activate, 86 .deactivate = t4_tom_deactivate, 87}; 88 89static void queue_tid_release(struct adapter *, int); 90static void release_offload_resources(struct toepcb *); 91static int alloc_tid_tabs(struct tid_info *); 92static void free_tid_tabs(struct tid_info *); 93static int add_lip(struct adapter *, struct in6_addr *); 94static int delete_lip(struct adapter *, struct in6_addr *); 95static struct clip_entry *search_lip(struct tom_data *, struct in6_addr *); 96static void init_clip_table(struct adapter *, struct tom_data *); 97static void update_clip(struct adapter *, void *); 98static void t4_clip_task(void *, int); 99static void update_clip_table(struct adapter *, struct tom_data *); 100static void destroy_clip_table(struct adapter *, struct tom_data *); 101static void free_tom_data(struct adapter *, struct tom_data *); 102static void reclaim_wr_resources(void *, int); 103 104static int in6_ifaddr_gen; 105static eventhandler_tag ifaddr_evhandler; 106static struct timeout_task clip_task; 107 108static void 109mbufq_init(struct mbufq *q, int limit) 110{ 111 112 q->head = q->tail = NULL; 113} 114 115static void 116mbufq_drain(struct mbufq *q) 117{ 118 struct mbuf *m; 119 120 while ((m = q->head) != NULL) { 121 q->head = m->m_nextpkt; 122 m_freem(m); 123 } 124 q->tail = NULL; 125} 126 127#ifdef INVARIANTS 128static inline int 129mbufq_len(const struct mbufq *q) 130{ 131 struct mbuf *m; 132 int len; 133 134 len = 0; 135 for (m = q->head; m != NULL; m = m->m_nextpkt) 136 len++; 137 return (len); 138} 139#endif 140 141struct toepcb * 142alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags) 143{ 144 struct port_info *pi = vi->pi; 145 struct adapter *sc = pi->adapter; 146 struct toepcb *toep; 147 int tx_credits, txsd_total, len; 148 149 /* 150 * The firmware counts tx work request credits in units of 16 bytes 151 * each. Reserve room for an ABORT_REQ so the driver never has to worry 152 * about tx credits if it wants to abort a connection. 153 */ 154 tx_credits = sc->params.ofldq_wr_cred; 155 tx_credits -= howmany(sizeof(struct cpl_abort_req), 16); 156 157 /* 158 * Shortest possible tx work request is a fw_ofld_tx_data_wr + 1 byte 159 * immediate payload, and firmware counts tx work request credits in 160 * units of 16 byte. Calculate the maximum work requests possible. 161 */ 162 txsd_total = tx_credits / 163 howmany(sizeof(struct fw_ofld_tx_data_wr) + 1, 16); 164 165 if (txqid < 0) 166 txqid = (arc4random() % vi->nofldtxq) + vi->first_ofld_txq; 167 KASSERT(txqid >= vi->first_ofld_txq && 168 txqid < vi->first_ofld_txq + vi->nofldtxq, 169 ("%s: txqid %d for vi %p (first %d, n %d)", __func__, txqid, vi, 170 vi->first_ofld_txq, vi->nofldtxq)); 171 172 if (rxqid < 0) 173 rxqid = (arc4random() % vi->nofldrxq) + vi->first_ofld_rxq; 174 KASSERT(rxqid >= vi->first_ofld_rxq && 175 rxqid < vi->first_ofld_rxq + vi->nofldrxq, 176 ("%s: rxqid %d for vi %p (first %d, n %d)", __func__, rxqid, vi, 177 vi->first_ofld_rxq, vi->nofldrxq)); 178 179 len = offsetof(struct toepcb, txsd) + 180 txsd_total * sizeof(struct ofld_tx_sdesc); 181 182 toep = malloc(len, M_CXGBE, M_ZERO | flags); 183 if (toep == NULL) 184 return (NULL); 185 186 toep->td = sc->tom_softc; 187 toep->vi = vi; 188 toep->tx_total = tx_credits; 189 toep->tx_credits = tx_credits; 190 toep->ofld_txq = &sc->sge.ofld_txq[txqid]; 191 toep->ofld_rxq = &sc->sge.ofld_rxq[rxqid]; 192 toep->ctrlq = &sc->sge.ctrlq[pi->port_id]; 193 mbufq_init(&toep->ulp_pduq, INT_MAX); 194 mbufq_init(&toep->ulp_pdu_reclaimq, INT_MAX); 195 toep->txsd_total = txsd_total; 196 toep->txsd_avail = txsd_total; 197 toep->txsd_pidx = 0; 198 toep->txsd_cidx = 0; 199 200 return (toep); 201} 202 203void 204free_toepcb(struct toepcb *toep) 205{ 206 207 KASSERT(!(toep->flags & TPF_ATTACHED), 208 ("%s: attached to an inpcb", __func__)); 209 KASSERT(!(toep->flags & TPF_CPL_PENDING), 210 ("%s: CPL pending", __func__)); 211 212 free(toep, M_CXGBE); 213} 214 215/* 216 * Set up the socket for TCP offload. 217 */ 218void 219offload_socket(struct socket *so, struct toepcb *toep) 220{ 221 struct tom_data *td = toep->td; 222 struct inpcb *inp = sotoinpcb(so); 223 struct tcpcb *tp = intotcpcb(inp); 224 struct sockbuf *sb; 225 226 INP_WLOCK_ASSERT(inp); 227 228 /* Update socket */ 229 sb = &so->so_snd; 230 SOCKBUF_LOCK(sb); 231 sb->sb_flags |= SB_NOCOALESCE; 232 SOCKBUF_UNLOCK(sb); 233 sb = &so->so_rcv; 234 SOCKBUF_LOCK(sb); 235 sb->sb_flags |= SB_NOCOALESCE; 236 if (toep->ulp_mode == ULP_MODE_TCPDDP) { 237 if (inp->inp_vflag & INP_IPV6) 238 so->so_proto = &ddp6_protosw; 239 else 240 so->so_proto = &ddp_protosw; 241 } 242 SOCKBUF_UNLOCK(sb); 243 244 /* Update TCP PCB */ 245 tp->tod = &td->tod; 246 tp->t_toe = toep; 247 tp->t_flags |= TF_TOE; 248 249 /* Install an extra hold on inp */ 250 toep->inp = inp; 251 toep->flags |= TPF_ATTACHED; 252 in_pcbref(inp); 253 254 /* Add the TOE PCB to the active list */ 255 mtx_lock(&td->toep_list_lock); 256 TAILQ_INSERT_HEAD(&td->toep_list, toep, link); 257 mtx_unlock(&td->toep_list_lock); 258} 259 260/* This is _not_ the normal way to "unoffload" a socket. */ 261void 262undo_offload_socket(struct socket *so) 263{ 264 struct inpcb *inp = sotoinpcb(so); 265 struct tcpcb *tp = intotcpcb(inp); 266 struct toepcb *toep = tp->t_toe; 267 struct tom_data *td = toep->td; 268 struct sockbuf *sb; 269 270 INP_WLOCK_ASSERT(inp); 271 272 sb = &so->so_snd; 273 SOCKBUF_LOCK(sb); 274 sb->sb_flags &= ~SB_NOCOALESCE; 275 SOCKBUF_UNLOCK(sb); 276 sb = &so->so_rcv; 277 SOCKBUF_LOCK(sb); 278 sb->sb_flags &= ~SB_NOCOALESCE; 279 SOCKBUF_UNLOCK(sb); 280 281 tp->tod = NULL; 282 tp->t_toe = NULL; 283 tp->t_flags &= ~TF_TOE; 284 285 toep->inp = NULL; 286 toep->flags &= ~TPF_ATTACHED; 287 if (in_pcbrele_wlocked(inp)) 288 panic("%s: inp freed.", __func__); 289 290 mtx_lock(&td->toep_list_lock); 291 TAILQ_REMOVE(&td->toep_list, toep, link); 292 mtx_unlock(&td->toep_list_lock); 293} 294 295static void 296release_offload_resources(struct toepcb *toep) 297{ 298 struct tom_data *td = toep->td; 299 struct adapter *sc = td_adapter(td); 300 int tid = toep->tid; 301 302 KASSERT(!(toep->flags & TPF_CPL_PENDING), 303 ("%s: %p has CPL pending.", __func__, toep)); 304 KASSERT(!(toep->flags & TPF_ATTACHED), 305 ("%s: %p is still attached.", __func__, toep)); 306 307 CTR5(KTR_CXGBE, "%s: toep %p (tid %d, l2te %p, ce %p)", 308 __func__, toep, tid, toep->l2te, toep->ce); 309 310 /* 311 * These queues should have been emptied at approximately the same time 312 * that a normal connection's socket's so_snd would have been purged or 313 * drained. Do _not_ clean up here. 314 */ 315 MPASS(mbufq_len(&toep->ulp_pduq) == 0); 316 MPASS(mbufq_len(&toep->ulp_pdu_reclaimq) == 0); 317 318 if (toep->ulp_mode == ULP_MODE_TCPDDP) 319 release_ddp_resources(toep); 320 321 if (toep->l2te) 322 t4_l2t_release(toep->l2te); 323 324 if (tid >= 0) { 325 remove_tid(sc, tid, toep->ce ? 2 : 1); 326 release_tid(sc, tid, toep->ctrlq); 327 } 328 329 if (toep->ce) 330 release_lip(td, toep->ce); 331 332 mtx_lock(&td->toep_list_lock); 333 TAILQ_REMOVE(&td->toep_list, toep, link); 334 mtx_unlock(&td->toep_list_lock); 335 336 free_toepcb(toep); 337} 338 339/* 340 * The kernel is done with the TCP PCB and this is our opportunity to unhook the 341 * toepcb hanging off of it. If the TOE driver is also done with the toepcb (no 342 * pending CPL) then it is time to release all resources tied to the toepcb. 343 * 344 * Also gets called when an offloaded active open fails and the TOM wants the 345 * kernel to take the TCP PCB back. 346 */ 347static void 348t4_pcb_detach(struct toedev *tod __unused, struct tcpcb *tp) 349{ 350#if defined(KTR) || defined(INVARIANTS) 351 struct inpcb *inp = tp->t_inpcb; 352#endif 353 struct toepcb *toep = tp->t_toe; 354 355 INP_WLOCK_ASSERT(inp); 356 357 KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); 358 KASSERT(toep->flags & TPF_ATTACHED, 359 ("%s: not attached", __func__)); 360 361#ifdef KTR 362 if (tp->t_state == TCPS_SYN_SENT) { 363 CTR6(KTR_CXGBE, "%s: atid %d, toep %p (0x%x), inp %p (0x%x)", 364 __func__, toep->tid, toep, toep->flags, inp, 365 inp->inp_flags); 366 } else { 367 CTR6(KTR_CXGBE, 368 "t4_pcb_detach: tid %d (%s), toep %p (0x%x), inp %p (0x%x)", 369 toep->tid, tcpstates[tp->t_state], toep, toep->flags, inp, 370 inp->inp_flags); 371 } 372#endif 373 374 tp->t_toe = NULL; 375 tp->t_flags &= ~TF_TOE; 376 toep->flags &= ~TPF_ATTACHED; 377 378 if (!(toep->flags & TPF_CPL_PENDING)) 379 release_offload_resources(toep); 380} 381 382/* 383 * setsockopt handler. 384 */ 385static void 386t4_ctloutput(struct toedev *tod, struct tcpcb *tp, int dir, int name) 387{ 388 struct adapter *sc = tod->tod_softc; 389 struct toepcb *toep = tp->t_toe; 390 391 if (dir == SOPT_GET) 392 return; 393 394 CTR4(KTR_CXGBE, "%s: tp %p, dir %u, name %u", __func__, tp, dir, name); 395 396 switch (name) { 397 case TCP_NODELAY: 398 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_T_FLAGS, 399 V_TF_NAGLE(1), V_TF_NAGLE(tp->t_flags & TF_NODELAY ? 0 : 1), 400 0, 0, toep->ofld_rxq->iq.abs_id); 401 break; 402 default: 403 break; 404 } 405} 406 407/* 408 * The TOE driver will not receive any more CPLs for the tid associated with the 409 * toepcb; release the hold on the inpcb. 410 */ 411void 412final_cpl_received(struct toepcb *toep) 413{ 414 struct inpcb *inp = toep->inp; 415 416 KASSERT(inp != NULL, ("%s: inp is NULL", __func__)); 417 INP_WLOCK_ASSERT(inp); 418 KASSERT(toep->flags & TPF_CPL_PENDING, 419 ("%s: CPL not pending already?", __func__)); 420 421 CTR6(KTR_CXGBE, "%s: tid %d, toep %p (0x%x), inp %p (0x%x)", 422 __func__, toep->tid, toep, toep->flags, inp, inp->inp_flags); 423 424 toep->inp = NULL; 425 toep->flags &= ~TPF_CPL_PENDING; 426 mbufq_drain(&toep->ulp_pdu_reclaimq); 427 428 if (!(toep->flags & TPF_ATTACHED)) 429 release_offload_resources(toep); 430 431 if (!in_pcbrele_wlocked(inp)) 432 INP_WUNLOCK(inp); 433} 434 435void 436insert_tid(struct adapter *sc, int tid, void *ctx, int ntids) 437{ 438 struct tid_info *t = &sc->tids; 439 440 t->tid_tab[tid] = ctx; 441 atomic_add_int(&t->tids_in_use, ntids); 442} 443 444void * 445lookup_tid(struct adapter *sc, int tid) 446{ 447 struct tid_info *t = &sc->tids; 448 449 return (t->tid_tab[tid]); 450} 451 452void 453update_tid(struct adapter *sc, int tid, void *ctx) 454{ 455 struct tid_info *t = &sc->tids; 456 457 t->tid_tab[tid] = ctx; 458} 459 460void 461remove_tid(struct adapter *sc, int tid, int ntids) 462{ 463 struct tid_info *t = &sc->tids; 464 465 t->tid_tab[tid] = NULL; 466 atomic_subtract_int(&t->tids_in_use, ntids); 467} 468 469void 470release_tid(struct adapter *sc, int tid, struct sge_wrq *ctrlq) 471{ 472 struct wrqe *wr; 473 struct cpl_tid_release *req; 474 475 wr = alloc_wrqe(sizeof(*req), ctrlq); 476 if (wr == NULL) { 477 queue_tid_release(sc, tid); /* defer */ 478 return; 479 } 480 req = wrtod(wr); 481 482 INIT_TP_WR_MIT_CPL(req, CPL_TID_RELEASE, tid); 483 484 t4_wrq_tx(sc, wr); 485} 486 487static void 488queue_tid_release(struct adapter *sc, int tid) 489{ 490 491 CXGBE_UNIMPLEMENTED("deferred tid release"); 492} 493 494/* 495 * What mtu_idx to use, given a 4-tuple and/or an MSS cap 496 */ 497int 498find_best_mtu_idx(struct adapter *sc, struct in_conninfo *inc, int pmss) 499{ 500 unsigned short *mtus = &sc->params.mtus[0]; 501 int i, mss, n; 502 503 KASSERT(inc != NULL || pmss > 0, 504 ("%s: at least one of inc/pmss must be specified", __func__)); 505 506 mss = inc ? tcp_mssopt(inc) : pmss; 507 if (pmss > 0 && mss > pmss) 508 mss = pmss; 509 510 if (inc->inc_flags & INC_ISIPV6) 511 n = sizeof(struct ip6_hdr) + sizeof(struct tcphdr); 512 else 513 n = sizeof(struct ip) + sizeof(struct tcphdr); 514 515 for (i = 0; i < NMTUS - 1 && mtus[i + 1] <= mss + n; i++) 516 continue; 517 518 return (i); 519} 520 521/* 522 * Determine the receive window size for a socket. 523 */ 524u_long 525select_rcv_wnd(struct socket *so) 526{ 527 unsigned long wnd; 528 529 SOCKBUF_LOCK_ASSERT(&so->so_rcv); 530 531 wnd = sbspace(&so->so_rcv); 532 if (wnd < MIN_RCV_WND) 533 wnd = MIN_RCV_WND; 534 535 return min(wnd, MAX_RCV_WND); 536} 537 538int 539select_rcv_wscale(void) 540{ 541 int wscale = 0; 542 unsigned long space = sb_max; 543 544 if (space > MAX_RCV_WND) 545 space = MAX_RCV_WND; 546 547 while (wscale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << wscale) < space) 548 wscale++; 549 550 return (wscale); 551} 552 553/* 554 * socket so could be a listening socket too. 555 */ 556uint64_t 557calc_opt0(struct socket *so, struct vi_info *vi, struct l2t_entry *e, 558 int mtu_idx, int rscale, int rx_credits, int ulp_mode) 559{ 560 uint64_t opt0; 561 562 KASSERT(rx_credits <= M_RCV_BUFSIZ, 563 ("%s: rcv_bufsiz too high", __func__)); 564 565 opt0 = F_TCAM_BYPASS | V_WND_SCALE(rscale) | V_MSS_IDX(mtu_idx) | 566 V_ULP_MODE(ulp_mode) | V_RCV_BUFSIZ(rx_credits); 567 568 if (so != NULL) { 569 struct inpcb *inp = sotoinpcb(so); 570 struct tcpcb *tp = intotcpcb(inp); 571 int keepalive = tcp_always_keepalive || 572 so_options_get(so) & SO_KEEPALIVE; 573 574 opt0 |= V_NAGLE((tp->t_flags & TF_NODELAY) == 0); 575 opt0 |= V_KEEP_ALIVE(keepalive != 0); 576 } 577 578 if (e != NULL) 579 opt0 |= V_L2T_IDX(e->idx); 580 581 if (vi != NULL) { 582 opt0 |= V_SMAC_SEL(vi->smt_idx); 583 opt0 |= V_TX_CHAN(vi->pi->tx_chan); 584 } 585 586 return htobe64(opt0); 587} 588 589uint64_t 590select_ntuple(struct vi_info *vi, struct l2t_entry *e) 591{ 592 struct adapter *sc = vi->pi->adapter; 593 struct tp_params *tp = &sc->params.tp; 594 uint16_t viid = vi->viid; 595 uint64_t ntuple = 0; 596 597 /* 598 * Initialize each of the fields which we care about which are present 599 * in the Compressed Filter Tuple. 600 */ 601 if (tp->vlan_shift >= 0 && e->vlan != CPL_L2T_VLAN_NONE) 602 ntuple |= (uint64_t)(F_FT_VLAN_VLD | e->vlan) << tp->vlan_shift; 603 604 if (tp->port_shift >= 0) 605 ntuple |= (uint64_t)e->lport << tp->port_shift; 606 607 if (tp->protocol_shift >= 0) 608 ntuple |= (uint64_t)IPPROTO_TCP << tp->protocol_shift; 609 610 if (tp->vnic_shift >= 0) { 611 uint32_t vf = G_FW_VIID_VIN(viid); 612 uint32_t pf = G_FW_VIID_PFN(viid); 613 uint32_t vld = G_FW_VIID_VIVLD(viid); 614 615 ntuple |= (uint64_t)(V_FT_VNID_ID_VF(vf) | V_FT_VNID_ID_PF(pf) | 616 V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift; 617 } 618 619 if (is_t4(sc)) 620 return (htobe32((uint32_t)ntuple)); 621 else 622 return (htobe64(V_FILTER_TUPLE(ntuple))); 623} 624 625void 626set_tcpddp_ulp_mode(struct toepcb *toep) 627{ 628 629 toep->ulp_mode = ULP_MODE_TCPDDP; 630 toep->ddp_flags = DDP_OK; 631 toep->ddp_score = DDP_LOW_SCORE; 632} 633 634int 635negative_advice(int status) 636{ 637 638 return (status == CPL_ERR_RTX_NEG_ADVICE || 639 status == CPL_ERR_PERSIST_NEG_ADVICE || 640 status == CPL_ERR_KEEPALV_NEG_ADVICE); 641} 642 643static int 644alloc_tid_tabs(struct tid_info *t) 645{ 646 size_t size; 647 unsigned int i; 648 649 size = t->ntids * sizeof(*t->tid_tab) + 650 t->natids * sizeof(*t->atid_tab) + 651 t->nstids * sizeof(*t->stid_tab); 652 653 t->tid_tab = malloc(size, M_CXGBE, M_ZERO | M_NOWAIT); 654 if (t->tid_tab == NULL) 655 return (ENOMEM); 656 657 mtx_init(&t->atid_lock, "atid lock", NULL, MTX_DEF); 658 t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids]; 659 t->afree = t->atid_tab; 660 t->atids_in_use = 0; 661 for (i = 1; i < t->natids; i++) 662 t->atid_tab[i - 1].next = &t->atid_tab[i]; 663 t->atid_tab[t->natids - 1].next = NULL; 664 665 mtx_init(&t->stid_lock, "stid lock", NULL, MTX_DEF); 666 t->stid_tab = (struct listen_ctx **)&t->atid_tab[t->natids]; 667 t->stids_in_use = 0; 668 TAILQ_INIT(&t->stids); 669 t->nstids_free_head = t->nstids; 670 671 atomic_store_rel_int(&t->tids_in_use, 0); 672 673 return (0); 674} 675 676static void 677free_tid_tabs(struct tid_info *t) 678{ 679 KASSERT(t->tids_in_use == 0, 680 ("%s: %d tids still in use.", __func__, t->tids_in_use)); 681 KASSERT(t->atids_in_use == 0, 682 ("%s: %d atids still in use.", __func__, t->atids_in_use)); 683 KASSERT(t->stids_in_use == 0, 684 ("%s: %d tids still in use.", __func__, t->stids_in_use)); 685 686 free(t->tid_tab, M_CXGBE); 687 t->tid_tab = NULL; 688 689 if (mtx_initialized(&t->atid_lock)) 690 mtx_destroy(&t->atid_lock); 691 if (mtx_initialized(&t->stid_lock)) 692 mtx_destroy(&t->stid_lock); 693} 694 695static int 696add_lip(struct adapter *sc, struct in6_addr *lip) 697{ 698 struct fw_clip_cmd c; 699 700 ASSERT_SYNCHRONIZED_OP(sc); 701 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 702 703 memset(&c, 0, sizeof(c)); 704 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 705 F_FW_CMD_WRITE); 706 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_ALLOC | FW_LEN16(c)); 707 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 708 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 709 710 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 711} 712 713static int 714delete_lip(struct adapter *sc, struct in6_addr *lip) 715{ 716 struct fw_clip_cmd c; 717 718 ASSERT_SYNCHRONIZED_OP(sc); 719 /* mtx_assert(&td->clip_table_lock, MA_OWNED); */ 720 721 memset(&c, 0, sizeof(c)); 722 c.op_to_write = htonl(V_FW_CMD_OP(FW_CLIP_CMD) | F_FW_CMD_REQUEST | 723 F_FW_CMD_READ); 724 c.alloc_to_len16 = htonl(F_FW_CLIP_CMD_FREE | FW_LEN16(c)); 725 c.ip_hi = *(uint64_t *)&lip->s6_addr[0]; 726 c.ip_lo = *(uint64_t *)&lip->s6_addr[8]; 727 728 return (-t4_wr_mbox_ns(sc, sc->mbox, &c, sizeof(c), &c)); 729} 730 731static struct clip_entry * 732search_lip(struct tom_data *td, struct in6_addr *lip) 733{ 734 struct clip_entry *ce; 735 736 mtx_assert(&td->clip_table_lock, MA_OWNED); 737 738 TAILQ_FOREACH(ce, &td->clip_table, link) { 739 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 740 return (ce); 741 } 742 743 return (NULL); 744} 745 746struct clip_entry * 747hold_lip(struct tom_data *td, struct in6_addr *lip, struct clip_entry *ce) 748{ 749 750 mtx_lock(&td->clip_table_lock); 751 if (ce == NULL) 752 ce = search_lip(td, lip); 753 if (ce != NULL) 754 ce->refcount++; 755 mtx_unlock(&td->clip_table_lock); 756 757 return (ce); 758} 759 760void 761release_lip(struct tom_data *td, struct clip_entry *ce) 762{ 763 764 mtx_lock(&td->clip_table_lock); 765 KASSERT(search_lip(td, &ce->lip) == ce, 766 ("%s: CLIP entry %p p not in CLIP table.", __func__, ce)); 767 KASSERT(ce->refcount > 0, 768 ("%s: CLIP entry %p has refcount 0", __func__, ce)); 769 --ce->refcount; 770 mtx_unlock(&td->clip_table_lock); 771} 772 773static void 774init_clip_table(struct adapter *sc, struct tom_data *td) 775{ 776 777 ASSERT_SYNCHRONIZED_OP(sc); 778 779 mtx_init(&td->clip_table_lock, "CLIP table lock", NULL, MTX_DEF); 780 TAILQ_INIT(&td->clip_table); 781 td->clip_gen = -1; 782 783 update_clip_table(sc, td); 784} 785 786static void 787update_clip(struct adapter *sc, void *arg __unused) 788{ 789 790 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4tomuc")) 791 return; 792 793 if (uld_active(sc, ULD_TOM)) 794 update_clip_table(sc, sc->tom_softc); 795 796 end_synchronized_op(sc, LOCK_HELD); 797} 798 799static void 800t4_clip_task(void *arg, int count) 801{ 802 803 t4_iterate(update_clip, NULL); 804} 805 806static void 807update_clip_table(struct adapter *sc, struct tom_data *td) 808{ 809 struct in6_ifaddr *ia; 810 struct in6_addr *lip, tlip; 811 struct clip_head stale; 812 struct clip_entry *ce, *ce_temp; 813 struct vi_info *vi; 814 int rc, gen, i, j; 815 uintptr_t last_vnet; 816 817 ASSERT_SYNCHRONIZED_OP(sc); 818 819 IN6_IFADDR_RLOCK(); 820 mtx_lock(&td->clip_table_lock); 821 822 gen = atomic_load_acq_int(&in6_ifaddr_gen); 823 if (gen == td->clip_gen) 824 goto done; 825 826 TAILQ_INIT(&stale); 827 TAILQ_CONCAT(&stale, &td->clip_table, link); 828 829 /* 830 * last_vnet optimizes the common cases where all if_vnet = NULL (no 831 * VIMAGE) or all if_vnet = vnet0. 832 */ 833 last_vnet = (uintptr_t)(-1); 834 for_each_port(sc, i) 835 for_each_vi(sc->port[i], j, vi) { 836 if (last_vnet == (uintptr_t)vi->ifp->if_vnet) 837 continue; 838 839 /* XXX: races with if_vmove */ 840 CURVNET_SET(vi->ifp->if_vnet); 841 TAILQ_FOREACH(ia, &V_in6_ifaddrhead, ia_link) { 842 lip = &ia->ia_addr.sin6_addr; 843 844 KASSERT(!IN6_IS_ADDR_MULTICAST(lip), 845 ("%s: mcast address in in6_ifaddr list", __func__)); 846 847 if (IN6_IS_ADDR_LOOPBACK(lip)) 848 continue; 849 if (IN6_IS_SCOPE_EMBED(lip)) { 850 /* Remove the embedded scope */ 851 tlip = *lip; 852 lip = &tlip; 853 in6_clearscope(lip); 854 } 855 /* 856 * XXX: how to weed out the link local address for the 857 * loopback interface? It's fe80::1 usually (always?). 858 */ 859 860 /* 861 * If it's in the main list then we already know it's 862 * not stale. 863 */ 864 TAILQ_FOREACH(ce, &td->clip_table, link) { 865 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) 866 goto next; 867 } 868 869 /* 870 * If it's in the stale list we should move it to the 871 * main list. 872 */ 873 TAILQ_FOREACH(ce, &stale, link) { 874 if (IN6_ARE_ADDR_EQUAL(&ce->lip, lip)) { 875 TAILQ_REMOVE(&stale, ce, link); 876 TAILQ_INSERT_TAIL(&td->clip_table, ce, 877 link); 878 goto next; 879 } 880 } 881 882 /* A new IP6 address; add it to the CLIP table */ 883 ce = malloc(sizeof(*ce), M_CXGBE, M_NOWAIT); 884 memcpy(&ce->lip, lip, sizeof(ce->lip)); 885 ce->refcount = 0; 886 rc = add_lip(sc, lip); 887 if (rc == 0) 888 TAILQ_INSERT_TAIL(&td->clip_table, ce, link); 889 else { 890 char ip[INET6_ADDRSTRLEN]; 891 892 inet_ntop(AF_INET6, &ce->lip, &ip[0], 893 sizeof(ip)); 894 log(LOG_ERR, "%s: could not add %s (%d)\n", 895 __func__, ip, rc); 896 free(ce, M_CXGBE); 897 } 898next: 899 continue; 900 } 901 CURVNET_RESTORE(); 902 last_vnet = (uintptr_t)vi->ifp->if_vnet; 903 } 904 905 /* 906 * Remove stale addresses (those no longer in V_in6_ifaddrhead) that are 907 * no longer referenced by the driver. 908 */ 909 TAILQ_FOREACH_SAFE(ce, &stale, link, ce_temp) { 910 if (ce->refcount == 0) { 911 rc = delete_lip(sc, &ce->lip); 912 if (rc == 0) { 913 TAILQ_REMOVE(&stale, ce, link); 914 free(ce, M_CXGBE); 915 } else { 916 char ip[INET6_ADDRSTRLEN]; 917 918 inet_ntop(AF_INET6, &ce->lip, &ip[0], 919 sizeof(ip)); 920 log(LOG_ERR, "%s: could not delete %s (%d)\n", 921 __func__, ip, rc); 922 } 923 } 924 } 925 /* The ones that are still referenced need to stay in the CLIP table */ 926 TAILQ_CONCAT(&td->clip_table, &stale, link); 927 928 td->clip_gen = gen; 929done: 930 mtx_unlock(&td->clip_table_lock); 931 IN6_IFADDR_RUNLOCK(); 932} 933 934static void 935destroy_clip_table(struct adapter *sc, struct tom_data *td) 936{ 937 struct clip_entry *ce, *ce_temp; 938 939 if (mtx_initialized(&td->clip_table_lock)) { 940 mtx_lock(&td->clip_table_lock); 941 TAILQ_FOREACH_SAFE(ce, &td->clip_table, link, ce_temp) { 942 KASSERT(ce->refcount == 0, 943 ("%s: CLIP entry %p still in use (%d)", __func__, 944 ce, ce->refcount)); 945 TAILQ_REMOVE(&td->clip_table, ce, link); 946 delete_lip(sc, &ce->lip); 947 free(ce, M_CXGBE); 948 } 949 mtx_unlock(&td->clip_table_lock); 950 mtx_destroy(&td->clip_table_lock); 951 } 952} 953 954static void 955free_tom_data(struct adapter *sc, struct tom_data *td) 956{ 957 958 ASSERT_SYNCHRONIZED_OP(sc); 959 960 KASSERT(TAILQ_EMPTY(&td->toep_list), 961 ("%s: TOE PCB list is not empty.", __func__)); 962 KASSERT(td->lctx_count == 0, 963 ("%s: lctx hash table is not empty.", __func__)); 964 965 t4_free_ppod_region(&td->pr); 966 destroy_clip_table(sc, td); 967 968 if (td->listen_mask != 0) 969 hashdestroy(td->listen_hash, M_CXGBE, td->listen_mask); 970 971 if (mtx_initialized(&td->unsent_wr_lock)) 972 mtx_destroy(&td->unsent_wr_lock); 973 if (mtx_initialized(&td->lctx_hash_lock)) 974 mtx_destroy(&td->lctx_hash_lock); 975 if (mtx_initialized(&td->toep_list_lock)) 976 mtx_destroy(&td->toep_list_lock); 977 978 free_tid_tabs(&sc->tids); 979 free(td, M_CXGBE); 980} 981 982static void 983reclaim_wr_resources(void *arg, int count) 984{ 985 struct tom_data *td = arg; 986 STAILQ_HEAD(, wrqe) twr_list = STAILQ_HEAD_INITIALIZER(twr_list); 987 struct cpl_act_open_req *cpl; 988 u_int opcode, atid; 989 struct wrqe *wr; 990 struct adapter *sc; 991 992 mtx_lock(&td->unsent_wr_lock); 993 STAILQ_SWAP(&td->unsent_wr_list, &twr_list, wrqe); 994 mtx_unlock(&td->unsent_wr_lock); 995 996 while ((wr = STAILQ_FIRST(&twr_list)) != NULL) { 997 STAILQ_REMOVE_HEAD(&twr_list, link); 998 999 cpl = wrtod(wr); 1000 opcode = GET_OPCODE(cpl); 1001 1002 switch (opcode) { 1003 case CPL_ACT_OPEN_REQ: 1004 case CPL_ACT_OPEN_REQ6: 1005 atid = G_TID_TID(be32toh(OPCODE_TID(cpl))); 1006 sc = td_adapter(td); 1007 1008 CTR2(KTR_CXGBE, "%s: atid %u ", __func__, atid); 1009 act_open_failure_cleanup(sc, atid, EHOSTUNREACH); 1010 free(wr, M_CXGBE); 1011 break; 1012 default: 1013 log(LOG_ERR, "%s: leaked work request %p, wr_len %d, " 1014 "opcode %x\n", __func__, wr, wr->wr_len, opcode); 1015 /* WR not freed here; go look at it with a debugger. */ 1016 } 1017 } 1018} 1019 1020/* 1021 * Ground control to Major TOM 1022 * Commencing countdown, engines on 1023 */ 1024static int 1025t4_tom_activate(struct adapter *sc) 1026{ 1027 struct tom_data *td; 1028 struct toedev *tod; 1029 struct vi_info *vi; 1030 struct sge_ofld_rxq *ofld_rxq; 1031 int i, j, rc, v; 1032 1033 ASSERT_SYNCHRONIZED_OP(sc); 1034 1035 /* per-adapter softc for TOM */ 1036 td = malloc(sizeof(*td), M_CXGBE, M_ZERO | M_NOWAIT); 1037 if (td == NULL) 1038 return (ENOMEM); 1039 1040 /* List of TOE PCBs and associated lock */ 1041 mtx_init(&td->toep_list_lock, "PCB list lock", NULL, MTX_DEF); 1042 TAILQ_INIT(&td->toep_list); 1043 1044 /* Listen context */ 1045 mtx_init(&td->lctx_hash_lock, "lctx hash lock", NULL, MTX_DEF); 1046 td->listen_hash = hashinit_flags(LISTEN_HASH_SIZE, M_CXGBE, 1047 &td->listen_mask, HASH_NOWAIT); 1048 1049 /* List of WRs for which L2 resolution failed */ 1050 mtx_init(&td->unsent_wr_lock, "Unsent WR list lock", NULL, MTX_DEF); 1051 STAILQ_INIT(&td->unsent_wr_list); 1052 TASK_INIT(&td->reclaim_wr_resources, 0, reclaim_wr_resources, td); 1053 1054 /* TID tables */ 1055 rc = alloc_tid_tabs(&sc->tids); 1056 if (rc != 0) 1057 goto done; 1058 1059 rc = t4_init_ppod_region(&td->pr, &sc->vres.ddp, 1060 t4_read_reg(sc, A_ULP_RX_TDDP_PSZ), "TDDP page pods"); 1061 if (rc != 0) 1062 goto done; 1063 t4_set_reg_field(sc, A_ULP_RX_TDDP_TAGMASK, 1064 V_TDDPTAGMASK(M_TDDPTAGMASK), td->pr.pr_tag_mask); 1065 1066 /* CLIP table for IPv6 offload */ 1067 init_clip_table(sc, td); 1068 1069 /* toedev ops */ 1070 tod = &td->tod; 1071 init_toedev(tod); 1072 tod->tod_softc = sc; 1073 tod->tod_connect = t4_connect; 1074 tod->tod_listen_start = t4_listen_start; 1075 tod->tod_listen_stop = t4_listen_stop; 1076 tod->tod_rcvd = t4_rcvd; 1077 tod->tod_output = t4_tod_output; 1078 tod->tod_send_rst = t4_send_rst; 1079 tod->tod_send_fin = t4_send_fin; 1080 tod->tod_pcb_detach = t4_pcb_detach; 1081 tod->tod_l2_update = t4_l2_update; 1082 tod->tod_syncache_added = t4_syncache_added; 1083 tod->tod_syncache_removed = t4_syncache_removed; 1084 tod->tod_syncache_respond = t4_syncache_respond; 1085 tod->tod_offload_socket = t4_offload_socket; 1086 tod->tod_ctloutput = t4_ctloutput; 1087 1088 for_each_port(sc, i) { 1089 for_each_vi(sc->port[i], v, vi) { 1090 TOEDEV(vi->ifp) = &td->tod; 1091 for_each_ofld_rxq(vi, j, ofld_rxq) { 1092 ofld_rxq->iq.set_tcb_rpl = do_set_tcb_rpl; 1093 ofld_rxq->iq.l2t_write_rpl = do_l2t_write_rpl2; 1094 } 1095 } 1096 } 1097 1098 sc->tom_softc = td; 1099 register_toedev(sc->tom_softc); 1100 1101done: 1102 if (rc != 0) 1103 free_tom_data(sc, td); 1104 return (rc); 1105} 1106 1107static int 1108t4_tom_deactivate(struct adapter *sc) 1109{ 1110 int rc = 0; 1111 struct tom_data *td = sc->tom_softc; 1112 1113 ASSERT_SYNCHRONIZED_OP(sc); 1114 1115 if (td == NULL) 1116 return (0); /* XXX. KASSERT? */ 1117 1118 if (sc->offload_map != 0) 1119 return (EBUSY); /* at least one port has IFCAP_TOE enabled */ 1120 1121 if (uld_active(sc, ULD_IWARP) || uld_active(sc, ULD_ISCSI)) 1122 return (EBUSY); /* both iWARP and iSCSI rely on the TOE. */ 1123 1124 mtx_lock(&td->toep_list_lock); 1125 if (!TAILQ_EMPTY(&td->toep_list)) 1126 rc = EBUSY; 1127 mtx_unlock(&td->toep_list_lock); 1128 1129 mtx_lock(&td->lctx_hash_lock); 1130 if (td->lctx_count > 0) 1131 rc = EBUSY; 1132 mtx_unlock(&td->lctx_hash_lock); 1133 1134 taskqueue_drain(taskqueue_thread, &td->reclaim_wr_resources); 1135 mtx_lock(&td->unsent_wr_lock); 1136 if (!STAILQ_EMPTY(&td->unsent_wr_list)) 1137 rc = EBUSY; 1138 mtx_unlock(&td->unsent_wr_lock); 1139 1140 if (rc == 0) { 1141 unregister_toedev(sc->tom_softc); 1142 free_tom_data(sc, td); 1143 sc->tom_softc = NULL; 1144 } 1145 1146 return (rc); 1147} 1148 1149static void 1150t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp) 1151{ 1152 1153 atomic_add_rel_int(&in6_ifaddr_gen, 1); 1154 taskqueue_enqueue_timeout(taskqueue_thread, &clip_task, -hz / 4); 1155} 1156 1157static int 1158t4_tom_mod_load(void) 1159{ 1160 int rc; 1161 struct protosw *tcp_protosw, *tcp6_protosw; 1162 1163 /* CPL handlers */ 1164 t4_init_connect_cpl_handlers(); 1165 t4_init_listen_cpl_handlers(); 1166 t4_init_cpl_io_handlers(); 1167 1168 rc = t4_ddp_mod_load(); 1169 if (rc != 0) 1170 return (rc); 1171 1172 tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM); 1173 if (tcp_protosw == NULL) 1174 return (ENOPROTOOPT); 1175 bcopy(tcp_protosw, &ddp_protosw, sizeof(ddp_protosw)); 1176 bcopy(tcp_protosw->pr_usrreqs, &ddp_usrreqs, sizeof(ddp_usrreqs)); 1177 ddp_usrreqs.pru_soreceive = t4_soreceive_ddp; 1178 ddp_protosw.pr_usrreqs = &ddp_usrreqs; 1179 1180 tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM); 1181 if (tcp6_protosw == NULL) 1182 return (ENOPROTOOPT); 1183 bcopy(tcp6_protosw, &ddp6_protosw, sizeof(ddp6_protosw)); 1184 bcopy(tcp6_protosw->pr_usrreqs, &ddp6_usrreqs, sizeof(ddp6_usrreqs)); 1185 ddp6_usrreqs.pru_soreceive = t4_soreceive_ddp; 1186 ddp6_protosw.pr_usrreqs = &ddp6_usrreqs; 1187 1188 TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL); 1189 ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event, 1190 t4_tom_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY); 1191 1192 rc = t4_register_uld(&tom_uld_info); 1193 if (rc != 0) 1194 t4_tom_mod_unload(); 1195 1196 return (rc); 1197} 1198 1199static void 1200tom_uninit(struct adapter *sc, void *arg __unused) 1201{ 1202 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4tomun")) 1203 return; 1204 1205 /* Try to free resources (works only if no port has IFCAP_TOE) */ 1206 if (uld_active(sc, ULD_TOM)) 1207 t4_deactivate_uld(sc, ULD_TOM); 1208 1209 end_synchronized_op(sc, 0); 1210} 1211 1212static int 1213t4_tom_mod_unload(void) 1214{ 1215 t4_iterate(tom_uninit, NULL); 1216 1217 if (t4_unregister_uld(&tom_uld_info) == EBUSY) 1218 return (EBUSY); 1219 1220 if (ifaddr_evhandler) { 1221 EVENTHANDLER_DEREGISTER(ifaddr_event, ifaddr_evhandler); 1222 taskqueue_cancel_timeout(taskqueue_thread, &clip_task, NULL); 1223 } 1224 1225 t4_ddp_mod_unload(); 1226 1227 t4_uninit_connect_cpl_handlers(); 1228 t4_uninit_listen_cpl_handlers(); 1229 t4_uninit_cpl_io_handlers(); 1230 1231 return (0); 1232} 1233#endif /* TCP_OFFLOAD */ 1234 1235static int 1236t4_tom_modevent(module_t mod, int cmd, void *arg) 1237{ 1238 int rc = 0; 1239 1240#ifdef TCP_OFFLOAD 1241 switch (cmd) { 1242 case MOD_LOAD: 1243 rc = t4_tom_mod_load(); 1244 break; 1245 1246 case MOD_UNLOAD: 1247 rc = t4_tom_mod_unload(); 1248 break; 1249 1250 default: 1251 rc = EINVAL; 1252 } 1253#else 1254 printf("t4_tom: compiled without TCP_OFFLOAD support.\n"); 1255 rc = EOPNOTSUPP; 1256#endif 1257 return (rc); 1258} 1259 1260static moduledata_t t4_tom_moddata= { 1261 "t4_tom", 1262 t4_tom_modevent, 1263 0 1264}; 1265 1266MODULE_VERSION(t4_tom, 1); 1267MODULE_DEPEND(t4_tom, toecore, 1, 1, 1); 1268MODULE_DEPEND(t4_tom, t4nex, 1, 1, 1); 1269DECLARE_MODULE(t4_tom, t4_tom_moddata, SI_SUB_EXEC, SI_ORDER_ANY); 1270