t4_sge.c revision 220649
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/cxgbe/t4_sge.c 220649 2011-04-15 03:09:27Z np $"); 30 31#include "opt_inet.h" 32 33#include <sys/types.h> 34#include <sys/mbuf.h> 35#include <sys/socket.h> 36#include <sys/kernel.h> 37#include <sys/malloc.h> 38#include <sys/queue.h> 39#include <sys/taskqueue.h> 40#include <sys/sysctl.h> 41#include <net/bpf.h> 42#include <net/ethernet.h> 43#include <net/if.h> 44#include <net/if_vlan_var.h> 45#include <netinet/in.h> 46#include <netinet/ip.h> 47#include <netinet/tcp.h> 48 49#include "common/common.h" 50#include "common/t4_regs.h" 51#include "common/t4_regs_values.h" 52#include "common/t4_msg.h" 53#include "common/t4fw_interface.h" 54 55struct fl_buf_info { 56 int size; 57 int type; 58 uma_zone_t zone; 59}; 60 61/* Filled up by t4_sge_modload */ 62static struct fl_buf_info fl_buf_info[FL_BUF_SIZES]; 63 64#define FL_BUF_SIZE(x) (fl_buf_info[x].size) 65#define FL_BUF_TYPE(x) (fl_buf_info[x].type) 66#define FL_BUF_ZONE(x) (fl_buf_info[x].zone) 67 68enum { 69 FL_PKTSHIFT = 2 70}; 71 72#define FL_ALIGN min(CACHE_LINE_SIZE, 32) 73#if CACHE_LINE_SIZE > 64 74#define SPG_LEN 128 75#else 76#define SPG_LEN 64 77#endif 78 79/* Used to track coalesced tx work request */ 80struct txpkts { 81 uint64_t *flitp; /* ptr to flit where next pkt should start */ 82 uint8_t npkt; /* # of packets in this work request */ 83 uint8_t nflits; /* # of flits used by this work request */ 84 uint16_t plen; /* total payload (sum of all packets) */ 85}; 86 87/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 88struct sgl { 89 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 90 int nflits; /* # of flits needed for the SGL */ 91 bus_dma_segment_t seg[TX_SGL_SEGS]; 92}; 93 94static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int, 95 int, iq_intr_handler_t *, char *); 96static inline void init_fl(struct sge_fl *, int, char *); 97static inline void init_txq(struct sge_txq *, int, char *); 98static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 99 bus_addr_t *, void **); 100static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 101 void *); 102static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 103 int); 104static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 105static int alloc_iq(struct sge_iq *, int); 106static int free_iq(struct sge_iq *); 107static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int); 108static int free_rxq(struct port_info *, struct sge_rxq *); 109static int alloc_txq(struct port_info *, struct sge_txq *, int); 110static int free_txq(struct port_info *, struct sge_txq *); 111static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 112static inline bool is_new_response(const struct sge_iq *, struct rsp_ctrl **); 113static inline void iq_next(struct sge_iq *); 114static inline void ring_fl_db(struct adapter *, struct sge_fl *); 115static void refill_fl(struct sge_fl *, int); 116static int alloc_fl_sdesc(struct sge_fl *); 117static void free_fl_sdesc(struct sge_fl *); 118static int alloc_eq_maps(struct sge_eq *); 119static void free_eq_maps(struct sge_eq *); 120static void set_fl_tag_idx(struct sge_fl *, int); 121 122static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 123static int free_pkt_sgl(struct sge_txq *, struct sgl *); 124static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 125 struct sgl *); 126static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 127 struct mbuf *, struct sgl *); 128static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 129static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 130 struct txpkts *, struct mbuf *, struct sgl *); 131static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 132static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 133static inline void ring_tx_db(struct adapter *, struct sge_eq *); 134static inline int reclaimable(struct sge_eq *); 135static int reclaim_tx_descs(struct sge_eq *, int, int); 136static void write_eqflush_wr(struct sge_eq *); 137static __be64 get_flit(bus_dma_segment_t *, int, int); 138static int handle_sge_egr_update(struct adapter *, 139 const struct cpl_sge_egr_update *); 140 141/* 142 * Called on MOD_LOAD and fills up fl_buf_info[]. 143 */ 144void 145t4_sge_modload(void) 146{ 147 int i; 148 int bufsize[FL_BUF_SIZES] = { 149 MCLBYTES, 150#if MJUMPAGESIZE != MCLBYTES 151 MJUMPAGESIZE, 152#endif 153 MJUM9BYTES, 154 MJUM16BYTES 155 }; 156 157 for (i = 0; i < FL_BUF_SIZES; i++) { 158 FL_BUF_SIZE(i) = bufsize[i]; 159 FL_BUF_TYPE(i) = m_gettype(bufsize[i]); 160 FL_BUF_ZONE(i) = m_getzone(bufsize[i]); 161 } 162} 163 164/** 165 * t4_sge_init - initialize SGE 166 * @sc: the adapter 167 * 168 * Performs SGE initialization needed every time after a chip reset. 169 * We do not initialize any of the queues here, instead the driver 170 * top-level must request them individually. 171 */ 172void 173t4_sge_init(struct adapter *sc) 174{ 175 struct sge *s = &sc->sge; 176 int i; 177 178 t4_set_reg_field(sc, A_SGE_CONTROL, V_PKTSHIFT(M_PKTSHIFT) | 179 V_INGPADBOUNDARY(M_INGPADBOUNDARY) | 180 F_EGRSTATUSPAGESIZE, 181 V_INGPADBOUNDARY(ilog2(FL_ALIGN) - 5) | 182 V_PKTSHIFT(FL_PKTSHIFT) | 183 F_RXPKTCPLMODE | 184 V_EGRSTATUSPAGESIZE(SPG_LEN == 128)); 185 t4_set_reg_field(sc, A_SGE_HOST_PAGE_SIZE, 186 V_HOSTPAGESIZEPF0(M_HOSTPAGESIZEPF0), 187 V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10)); 188 189 for (i = 0; i < FL_BUF_SIZES; i++) { 190 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 191 FL_BUF_SIZE(i)); 192 } 193 194 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, 195 V_THRESHOLD_0(s->counter_val[0]) | 196 V_THRESHOLD_1(s->counter_val[1]) | 197 V_THRESHOLD_2(s->counter_val[2]) | 198 V_THRESHOLD_3(s->counter_val[3])); 199 200 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, 201 V_TIMERVALUE0(us_to_core_ticks(sc, s->timer_val[0])) | 202 V_TIMERVALUE1(us_to_core_ticks(sc, s->timer_val[1]))); 203 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, 204 V_TIMERVALUE2(us_to_core_ticks(sc, s->timer_val[2])) | 205 V_TIMERVALUE3(us_to_core_ticks(sc, s->timer_val[3]))); 206 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, 207 V_TIMERVALUE4(us_to_core_ticks(sc, s->timer_val[4])) | 208 V_TIMERVALUE5(us_to_core_ticks(sc, s->timer_val[5]))); 209} 210 211int 212t4_create_dma_tag(struct adapter *sc) 213{ 214 int rc; 215 216 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 217 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 218 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 219 NULL, &sc->dmat); 220 if (rc != 0) { 221 device_printf(sc->dev, 222 "failed to create main DMA tag: %d\n", rc); 223 } 224 225 return (rc); 226} 227 228int 229t4_destroy_dma_tag(struct adapter *sc) 230{ 231 if (sc->dmat) 232 bus_dma_tag_destroy(sc->dmat); 233 234 return (0); 235} 236 237/* 238 * Allocate and initialize the firmware event queue and the forwarded interrupt 239 * queues, if any. The adapter owns all these queues as they are not associated 240 * with any particular port. 241 * 242 * Returns errno on failure. Resources allocated up to that point may still be 243 * allocated. Caller is responsible for cleanup in case this function fails. 244 */ 245int 246t4_setup_adapter_iqs(struct adapter *sc) 247{ 248 int i, rc; 249 struct sge_iq *iq, *fwq; 250 iq_intr_handler_t *handler; 251 char name[16]; 252 253 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 254 255 fwq = &sc->sge.fwq; 256 if (sc->flags & INTR_FWD) { 257 iq = &sc->sge.fiq[0]; 258 259 /* 260 * Forwarded interrupt queues - allocate 1 if there's only 1 261 * vector available, one less than the number of vectors 262 * otherwise (the first vector is reserved for the error 263 * interrupt in that case). 264 */ 265 i = sc->intr_count > 1 ? 1 : 0; 266 for (; i < sc->intr_count; i++, iq++) { 267 268 snprintf(name, sizeof(name), "%s fiq%d", 269 device_get_nameunit(sc->dev), i); 270 init_iq(iq, sc, 0, 0, (sc->sge.nrxq + 1) * 2, 16, NULL, 271 name); 272 273 rc = alloc_iq(iq, i); 274 if (rc != 0) { 275 device_printf(sc->dev, 276 "failed to create fwd intr queue %d: %d\n", 277 i, rc); 278 return (rc); 279 } 280 } 281 282 handler = t4_evt_rx; 283 i = 0; /* forward fwq's interrupt to the first fiq */ 284 } else { 285 handler = NULL; 286 i = 1; /* fwq should use vector 1 (0 is used by error) */ 287 } 288 289 snprintf(name, sizeof(name), "%s fwq", device_get_nameunit(sc->dev)); 290 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE, FW_IQ_ESIZE, handler, name); 291 rc = alloc_iq(fwq, i); 292 if (rc != 0) { 293 device_printf(sc->dev, 294 "failed to create firmware event queue: %d\n", rc); 295 } 296 297 return (rc); 298} 299 300/* 301 * Idempotent 302 */ 303int 304t4_teardown_adapter_iqs(struct adapter *sc) 305{ 306 int i; 307 struct sge_iq *iq; 308 309 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 310 311 iq = &sc->sge.fwq; 312 free_iq(iq); 313 if (sc->flags & INTR_FWD) { 314 for (i = 0; i < NFIQ(sc); i++) { 315 iq = &sc->sge.fiq[i]; 316 free_iq(iq); 317 } 318 } 319 320 return (0); 321} 322 323int 324t4_setup_eth_queues(struct port_info *pi) 325{ 326 int rc = 0, i, intr_idx; 327 struct sge_rxq *rxq; 328 struct sge_txq *txq; 329 char name[16]; 330 struct adapter *sc = pi->adapter; 331 332 if (sysctl_ctx_init(&pi->ctx) == 0) { 333 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 334 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 335 336 pi->oid_rxq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 337 "rxq", CTLFLAG_RD, NULL, "rx queues"); 338 pi->oid_txq = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, 339 "txq", CTLFLAG_RD, NULL, "tx queues"); 340 } 341 342 for_each_rxq(pi, i, rxq) { 343 344 snprintf(name, sizeof(name), "%s rxq%d-iq", 345 device_get_nameunit(pi->dev), i); 346 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 347 pi->qsize_rxq, RX_IQ_ESIZE, 348 sc->flags & INTR_FWD ? t4_eth_rx : NULL, name); 349 350 snprintf(name, sizeof(name), "%s rxq%d-fl", 351 device_get_nameunit(pi->dev), i); 352 init_fl(&rxq->fl, pi->qsize_rxq / 8, name); 353 354 if (sc->flags & INTR_FWD) 355 intr_idx = (pi->first_rxq + i) % NFIQ(sc); 356 else 357 intr_idx = pi->first_rxq + i + 2; 358 359 rc = alloc_rxq(pi, rxq, intr_idx, i); 360 if (rc != 0) 361 goto done; 362 363 intr_idx++; 364 } 365 366 for_each_txq(pi, i, txq) { 367 368 snprintf(name, sizeof(name), "%s txq%d", 369 device_get_nameunit(pi->dev), i); 370 init_txq(txq, pi->qsize_txq, name); 371 372 rc = alloc_txq(pi, txq, i); 373 if (rc != 0) 374 goto done; 375 } 376 377done: 378 if (rc) 379 t4_teardown_eth_queues(pi); 380 381 return (rc); 382} 383 384/* 385 * Idempotent 386 */ 387int 388t4_teardown_eth_queues(struct port_info *pi) 389{ 390 int i; 391 struct sge_rxq *rxq; 392 struct sge_txq *txq; 393 394 /* Do this before freeing the queues */ 395 if (pi->oid_txq || pi->oid_rxq) { 396 sysctl_ctx_free(&pi->ctx); 397 pi->oid_txq = pi->oid_rxq = NULL; 398 } 399 400 for_each_txq(pi, i, txq) { 401 free_txq(pi, txq); 402 } 403 404 for_each_rxq(pi, i, rxq) { 405 free_rxq(pi, rxq); 406 } 407 408 return (0); 409} 410 411/* Deals with errors and forwarded interrupts */ 412void 413t4_intr_all(void *arg) 414{ 415 struct adapter *sc = arg; 416 417 t4_intr_err(arg); 418 t4_intr_fwd(&sc->sge.fiq[0]); 419} 420 421/* Deals with forwarded interrupts on the given ingress queue */ 422void 423t4_intr_fwd(void *arg) 424{ 425 struct sge_iq *iq = arg, *q; 426 struct adapter *sc = iq->adapter; 427 struct rsp_ctrl *ctrl; 428 int ndesc_pending = 0, ndesc_total = 0; 429 int qid; 430 431 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 432 return; 433 434 while (is_new_response(iq, &ctrl)) { 435 436 rmb(); 437 438 /* Only interrupt muxing expected on this queue */ 439 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_INTR, 440 ("unexpected event on forwarded interrupt queue: %x", 441 G_RSPD_TYPE(ctrl->u.type_gen))); 442 443 qid = ntohl(ctrl->pldbuflen_qid) - sc->sge.iq_start; 444 q = sc->sge.iqmap[qid]; 445 446 q->handler(q); 447 448 ndesc_total++; 449 if (++ndesc_pending >= iq->qsize / 4) { 450 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 451 V_CIDXINC(ndesc_pending) | 452 V_INGRESSQID(iq->cntxt_id) | 453 V_SEINTARM( 454 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 455 ndesc_pending = 0; 456 } 457 458 iq_next(iq); 459 } 460 461 if (ndesc_total > 0) { 462 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 463 V_CIDXINC(ndesc_pending) | V_INGRESSQID((u32)iq->cntxt_id) | 464 V_SEINTARM(iq->intr_params)); 465 } 466 467 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 468} 469 470/* Deals with error interrupts */ 471void 472t4_intr_err(void *arg) 473{ 474 struct adapter *sc = arg; 475 476 if (sc->intr_type == INTR_INTX) 477 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 478 479 t4_slow_intr_handler(sc); 480} 481 482/* Deals with the firmware event queue */ 483void 484t4_intr_evt(void *arg) 485{ 486 struct sge_iq *iq = arg; 487 488 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 489 return; 490 491 t4_evt_rx(arg); 492 493 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 494} 495 496void 497t4_intr_data(void *arg) 498{ 499 struct sge_iq *iq = arg; 500 501 if (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_BUSY)) 502 return; 503 504 t4_eth_rx(arg); 505 506 atomic_cmpset_32(&iq->state, IQS_BUSY, IQS_IDLE); 507} 508 509void 510t4_evt_rx(void *arg) 511{ 512 struct sge_iq *iq = arg; 513 struct adapter *sc = iq->adapter; 514 struct rsp_ctrl *ctrl; 515 const struct rss_header *rss; 516 int ndesc_pending = 0, ndesc_total = 0; 517 518 KASSERT(iq == &sc->sge.fwq, ("%s: unexpected ingress queue", __func__)); 519 520 while (is_new_response(iq, &ctrl)) { 521 522 rmb(); 523 524 rss = (const void *)iq->cdesc; 525 526 /* Should only get CPL on this queue */ 527 KASSERT(G_RSPD_TYPE(ctrl->u.type_gen) == X_RSPD_TYPE_CPL, 528 ("%s: unexpected type %d", __func__, 529 G_RSPD_TYPE(ctrl->u.type_gen))); 530 531 switch (rss->opcode) { 532 case CPL_FW4_MSG: 533 case CPL_FW6_MSG: { 534 const struct cpl_fw6_msg *cpl; 535 536 cpl = (const void *)(rss + 1); 537 if (cpl->type == FW6_TYPE_CMD_RPL) 538 t4_handle_fw_rpl(sc, cpl->data); 539 540 break; 541 } 542 case CPL_SGE_EGR_UPDATE: 543 handle_sge_egr_update(sc, (const void *)(rss + 1)); 544 break; 545 546 default: 547 device_printf(sc->dev, 548 "can't handle CPL opcode %d.", rss->opcode); 549 } 550 551 ndesc_total++; 552 if (++ndesc_pending >= iq->qsize / 4) { 553 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 554 V_CIDXINC(ndesc_pending) | 555 V_INGRESSQID(iq->cntxt_id) | 556 V_SEINTARM( 557 V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 558 ndesc_pending = 0; 559 } 560 iq_next(iq); 561 } 562 563 if (ndesc_total > 0) { 564 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 565 V_CIDXINC(ndesc_pending) | V_INGRESSQID(iq->cntxt_id) | 566 V_SEINTARM(iq->intr_params)); 567 } 568} 569 570void 571t4_eth_rx(void *arg) 572{ 573 struct sge_rxq *rxq = arg; 574 struct sge_iq *iq = arg; 575 struct adapter *sc = iq->adapter; 576 struct rsp_ctrl *ctrl; 577 struct ifnet *ifp = rxq->ifp; 578 struct sge_fl *fl = &rxq->fl; 579 struct fl_sdesc *sd = &fl->sdesc[fl->cidx], *sd_next; 580 const struct rss_header *rss; 581 const struct cpl_rx_pkt *cpl; 582 uint32_t len; 583 int ndescs = 0, i; 584 struct mbuf *m0, *m; 585#ifdef INET 586 struct lro_ctrl *lro = &rxq->lro; 587 struct lro_entry *l; 588#endif 589 590 prefetch(sd->m); 591 prefetch(sd->cl); 592 593 iq->intr_next = iq->intr_params; 594 while (is_new_response(iq, &ctrl)) { 595 596 rmb(); 597 598 rss = (const void *)iq->cdesc; 599 i = G_RSPD_TYPE(ctrl->u.type_gen); 600 601 if (__predict_false(i == X_RSPD_TYPE_CPL)) { 602 603 /* Can't be anything except an egress update */ 604 KASSERT(rss->opcode == CPL_SGE_EGR_UPDATE, 605 ("%s: unexpected CPL %x", __func__, rss->opcode)); 606 607 handle_sge_egr_update(sc, (const void *)(rss + 1)); 608 goto nextdesc; 609 } 610 KASSERT(i == X_RSPD_TYPE_FLBUF && rss->opcode == CPL_RX_PKT, 611 ("%s: unexpected CPL %x rsp %d", __func__, rss->opcode, i)); 612 613 sd_next = sd + 1; 614 if (__predict_false(fl->cidx + 1 == fl->cap)) 615 sd_next = fl->sdesc; 616 prefetch(sd_next->m); 617 prefetch(sd_next->cl); 618 619 cpl = (const void *)(rss + 1); 620 621 m0 = sd->m; 622 sd->m = NULL; /* consumed */ 623 624 len = be32toh(ctrl->pldbuflen_qid); 625 if (__predict_false((len & F_RSPD_NEWBUF) == 0)) 626 panic("%s: cannot handle packed frames", __func__); 627 len = G_RSPD_LEN(len); 628 629 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 630 BUS_DMASYNC_POSTREAD); 631 632 m_init(m0, NULL, 0, M_NOWAIT, MT_DATA, M_PKTHDR); 633 if (len < MINCLSIZE) { 634 /* copy data to mbuf, buffer will be recycled */ 635 bcopy(sd->cl, mtod(m0, caddr_t), len); 636 m0->m_len = len; 637 } else { 638 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 639 m_cljset(m0, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 640 sd->cl = NULL; /* consumed */ 641 m0->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 642 } 643 644 len -= FL_PKTSHIFT; 645 m0->m_len -= FL_PKTSHIFT; 646 m0->m_data += FL_PKTSHIFT; 647 648 m0->m_pkthdr.len = len; 649 m0->m_pkthdr.rcvif = ifp; 650 m0->m_flags |= M_FLOWID; 651 m0->m_pkthdr.flowid = rss->hash_val; 652 653 if (cpl->csum_calc && !cpl->err_vec && 654 ifp->if_capenable & IFCAP_RXCSUM) { 655 m0->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | 656 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 657 if (cpl->ip_frag) 658 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 659 else 660 m0->m_pkthdr.csum_data = 0xffff; 661 rxq->rxcsum++; 662 } 663 664 if (cpl->vlan_ex) { 665 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 666 m0->m_flags |= M_VLANTAG; 667 rxq->vlan_extraction++; 668 } 669 670 i = 1; /* # of fl sdesc used */ 671 sd = sd_next; 672 if (__predict_false(++fl->cidx == fl->cap)) 673 fl->cidx = 0; 674 675 len -= m0->m_len; 676 m = m0; 677 while (len) { 678 i++; 679 680 sd_next = sd + 1; 681 if (__predict_false(fl->cidx + 1 == fl->cap)) 682 sd_next = fl->sdesc; 683 prefetch(sd_next->m); 684 prefetch(sd_next->cl); 685 686 m->m_next = sd->m; 687 sd->m = NULL; /* consumed */ 688 m = m->m_next; 689 690 bus_dmamap_sync(fl->tag[sd->tag_idx], sd->map, 691 BUS_DMASYNC_POSTREAD); 692 693 m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 0); 694 if (len <= MLEN) { 695 bcopy(sd->cl, mtod(m, caddr_t), len); 696 m->m_len = len; 697 } else { 698 bus_dmamap_unload(fl->tag[sd->tag_idx], 699 sd->map); 700 m_cljset(m, sd->cl, FL_BUF_TYPE(sd->tag_idx)); 701 sd->cl = NULL; /* consumed */ 702 m->m_len = min(len, FL_BUF_SIZE(sd->tag_idx)); 703 } 704 705 i++; 706 sd = sd_next; 707 if (__predict_false(++fl->cidx == fl->cap)) 708 fl->cidx = 0; 709 710 len -= m->m_len; 711 } 712 713#ifdef INET 714 if (cpl->l2info & htobe32(F_RXF_LRO) && 715 rxq->flags & RXQ_LRO_ENABLED && 716 tcp_lro_rx(lro, m0, 0) == 0) { 717 /* queued for LRO */ 718 } else 719#endif 720 ifp->if_input(ifp, m0); 721 722 FL_LOCK(fl); 723 fl->needed += i; 724 if (fl->needed >= 32) 725 refill_fl(fl, 64); 726 if (fl->pending >= 32) 727 ring_fl_db(sc, fl); 728 FL_UNLOCK(fl); 729 730nextdesc: ndescs++; 731 iq_next(iq); 732 733 if (ndescs > 32) { 734 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 735 V_CIDXINC(ndescs) | 736 V_INGRESSQID((u32)iq->cntxt_id) | 737 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 738 ndescs = 0; 739 } 740 } 741 742#ifdef INET 743 while (!SLIST_EMPTY(&lro->lro_active)) { 744 l = SLIST_FIRST(&lro->lro_active); 745 SLIST_REMOVE_HEAD(&lro->lro_active, next); 746 tcp_lro_flush(lro, l); 747 } 748#endif 749 750 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 751 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next)); 752 753 FL_LOCK(fl); 754 if (fl->needed >= 32) 755 refill_fl(fl, 128); 756 if (fl->pending >= 8) 757 ring_fl_db(sc, fl); 758 FL_UNLOCK(fl); 759} 760 761/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 762#define TXPKTS_PKT_HDR ((\ 763 sizeof(struct ulp_txpkt) + \ 764 sizeof(struct ulptx_idata) + \ 765 sizeof(struct cpl_tx_pkt_core) \ 766 ) / 8) 767 768/* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 769#define TXPKTS_WR_HDR (\ 770 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 771 TXPKTS_PKT_HDR) 772 773/* Header of a tx WR, before SGL of first packet (in flits) */ 774#define TXPKT_WR_HDR ((\ 775 sizeof(struct fw_eth_tx_pkt_wr) + \ 776 sizeof(struct cpl_tx_pkt_core) \ 777 ) / 8 ) 778 779/* Header of a tx LSO WR, before SGL of first packet (in flits) */ 780#define TXPKT_LSO_WR_HDR ((\ 781 sizeof(struct fw_eth_tx_pkt_wr) + \ 782 sizeof(struct cpl_tx_pkt_lso) + \ 783 sizeof(struct cpl_tx_pkt_core) \ 784 ) / 8 ) 785 786int 787t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 788{ 789 struct port_info *pi = (void *)ifp->if_softc; 790 struct adapter *sc = pi->adapter; 791 struct sge_eq *eq = &txq->eq; 792 struct buf_ring *br = eq->br; 793 struct mbuf *next; 794 int rc, coalescing, can_reclaim; 795 struct txpkts txpkts; 796 struct sgl sgl; 797 798 TXQ_LOCK_ASSERT_OWNED(txq); 799 KASSERT(m, ("%s: called with nothing to do.", __func__)); 800 801 prefetch(&eq->desc[eq->pidx]); 802 prefetch(&eq->sdesc[eq->pidx]); 803 804 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 805 coalescing = 0; 806 807 if (eq->avail < 8) 808 reclaim_tx_descs(eq, 0, 8); 809 810 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 811 812 if (eq->avail < 8) 813 break; 814 815 next = m->m_nextpkt; 816 m->m_nextpkt = NULL; 817 818 if (next || buf_ring_peek(br)) 819 coalescing = 1; 820 821 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 822 if (rc != 0) { 823 if (rc == ENOMEM) { 824 825 /* Short of resources, suspend tx */ 826 827 m->m_nextpkt = next; 828 break; 829 } 830 831 /* 832 * Unrecoverable error for this packet, throw it away 833 * and move on to the next. get_pkt_sgl may already 834 * have freed m (it will be NULL in that case and the 835 * m_freem here is still safe). 836 */ 837 838 m_freem(m); 839 continue; 840 } 841 842 if (coalescing && 843 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 844 845 /* Successfully absorbed into txpkts */ 846 847 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 848 goto doorbell; 849 } 850 851 /* 852 * We weren't coalescing to begin with, or current frame could 853 * not be coalesced (add_to_txpkts flushes txpkts if a frame 854 * given to it can't be coalesced). Either way there should be 855 * nothing in txpkts. 856 */ 857 KASSERT(txpkts.npkt == 0, 858 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 859 860 /* We're sending out individual packets now */ 861 coalescing = 0; 862 863 if (eq->avail < 8) 864 reclaim_tx_descs(eq, 0, 8); 865 rc = write_txpkt_wr(pi, txq, m, &sgl); 866 if (rc != 0) { 867 868 /* Short of hardware descriptors, suspend tx */ 869 870 /* 871 * This is an unlikely but expensive failure. We've 872 * done all the hard work (DMA mappings etc.) and now we 873 * can't send out the packet. What's worse, we have to 874 * spend even more time freeing up everything in sgl. 875 */ 876 txq->no_desc++; 877 free_pkt_sgl(txq, &sgl); 878 879 m->m_nextpkt = next; 880 break; 881 } 882 883 ETHER_BPF_MTAP(ifp, m); 884 if (sgl.nsegs == 0) 885 m_freem(m); 886 887doorbell: 888 /* Fewer and fewer doorbells as the queue fills up */ 889 if (eq->pending >= (1 << (fls(eq->qsize - eq->avail) / 2))) 890 ring_tx_db(sc, eq); 891 892 can_reclaim = reclaimable(eq); 893 if (can_reclaim >= 32) 894 reclaim_tx_descs(eq, can_reclaim, 32); 895 } 896 897 if (txpkts.npkt > 0) 898 write_txpkts_wr(txq, &txpkts); 899 900 /* 901 * m not NULL means there was an error but we haven't thrown it away. 902 * This can happen when we're short of tx descriptors (no_desc) or maybe 903 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 904 * will get things going again. 905 * 906 * If eq->avail is already 0 we know a credit flush was requested in the 907 * WR that reduced it to 0 so we don't need another flush (we don't have 908 * any descriptor for a flush WR anyway, duh). 909 */ 910 if (m && eq->avail > 0 && !(eq->flags & EQ_CRFLUSHED)) 911 write_eqflush_wr(eq); 912 txq->m = m; 913 914 if (eq->pending) 915 ring_tx_db(sc, eq); 916 917 can_reclaim = reclaimable(eq); 918 if (can_reclaim >= 32) 919 reclaim_tx_descs(eq, can_reclaim, 128); 920 921 return (0); 922} 923 924void 925t4_update_fl_bufsize(struct ifnet *ifp) 926{ 927 struct port_info *pi = ifp->if_softc; 928 struct sge_rxq *rxq; 929 struct sge_fl *fl; 930 int i; 931 932 for_each_rxq(pi, i, rxq) { 933 fl = &rxq->fl; 934 935 FL_LOCK(fl); 936 set_fl_tag_idx(fl, ifp->if_mtu); 937 FL_UNLOCK(fl); 938 } 939} 940 941/* 942 * A non-NULL handler indicates this iq will not receive direct interrupts, the 943 * handler will be invoked by a forwarded interrupt queue. 944 */ 945static inline void 946init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 947 int qsize, int esize, iq_intr_handler_t *handler, char *name) 948{ 949 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 950 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 951 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 952 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 953 954 iq->flags = 0; 955 iq->adapter = sc; 956 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx) | 957 V_QINTR_CNT_EN(pktc_idx >= 0); 958 iq->intr_pktc_idx = pktc_idx; 959 iq->qsize = roundup(qsize, 16); /* See FW_IQ_CMD/iqsize */ 960 iq->esize = max(esize, 16); /* See FW_IQ_CMD/iqesize */ 961 iq->handler = handler; 962 strlcpy(iq->lockname, name, sizeof(iq->lockname)); 963} 964 965static inline void 966init_fl(struct sge_fl *fl, int qsize, char *name) 967{ 968 fl->qsize = qsize; 969 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 970} 971 972static inline void 973init_txq(struct sge_txq *txq, int qsize, char *name) 974{ 975 txq->eq.qsize = qsize; 976 strlcpy(txq->eq.lockname, name, sizeof(txq->eq.lockname)); 977} 978 979static int 980alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 981 bus_dmamap_t *map, bus_addr_t *pa, void **va) 982{ 983 int rc; 984 985 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 986 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 987 if (rc != 0) { 988 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 989 goto done; 990 } 991 992 rc = bus_dmamem_alloc(*tag, va, 993 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 994 if (rc != 0) { 995 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 996 goto done; 997 } 998 999 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 1000 if (rc != 0) { 1001 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 1002 goto done; 1003 } 1004done: 1005 if (rc) 1006 free_ring(sc, *tag, *map, *pa, *va); 1007 1008 return (rc); 1009} 1010 1011static int 1012free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 1013 bus_addr_t pa, void *va) 1014{ 1015 if (pa) 1016 bus_dmamap_unload(tag, map); 1017 if (va) 1018 bus_dmamem_free(tag, va, map); 1019 if (tag) 1020 bus_dma_tag_destroy(tag); 1021 1022 return (0); 1023} 1024 1025/* 1026 * Allocates the ring for an ingress queue and an optional freelist. If the 1027 * freelist is specified it will be allocated and then associated with the 1028 * ingress queue. 1029 * 1030 * Returns errno on failure. Resources allocated up to that point may still be 1031 * allocated. Caller is responsible for cleanup in case this function fails. 1032 * 1033 * If the ingress queue will take interrupts directly (iq->handler == NULL) then 1034 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 1035 * the index of the queue to which its interrupts will be forwarded. 1036 */ 1037static int 1038alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 1039 int intr_idx) 1040{ 1041 int rc, i, cntxt_id; 1042 size_t len; 1043 struct fw_iq_cmd c; 1044 struct adapter *sc = iq->adapter; 1045 __be32 v = 0; 1046 1047 /* The adapter queues are nominally allocated in port[0]'s name */ 1048 if (pi == NULL) 1049 pi = sc->port[0]; 1050 1051 len = iq->qsize * iq->esize; 1052 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 1053 (void **)&iq->desc); 1054 if (rc != 0) 1055 return (rc); 1056 1057 bzero(&c, sizeof(c)); 1058 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 1059 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 1060 V_FW_IQ_CMD_VFN(0)); 1061 1062 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 1063 FW_LEN16(c)); 1064 1065 /* Special handling for firmware event queue */ 1066 if (iq == &sc->sge.fwq) 1067 v |= F_FW_IQ_CMD_IQASYNCH; 1068 1069 if (iq->handler) { 1070 KASSERT(intr_idx < NFIQ(sc), 1071 ("%s: invalid indirect intr_idx %d", __func__, intr_idx)); 1072 v |= F_FW_IQ_CMD_IQANDST; 1073 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fiq[intr_idx].abs_id); 1074 } else { 1075 KASSERT(intr_idx < sc->intr_count, 1076 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 1077 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 1078 } 1079 1080 c.type_to_iqandstindex = htobe32(v | 1081 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 1082 V_FW_IQ_CMD_VIID(pi->viid) | 1083 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 1084 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 1085 F_FW_IQ_CMD_IQGTSMODE | 1086 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 1087 V_FW_IQ_CMD_IQESIZE(ilog2(iq->esize) - 4)); 1088 c.iqsize = htobe16(iq->qsize); 1089 c.iqaddr = htobe64(iq->ba); 1090 1091 if (fl) { 1092 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 1093 1094 for (i = 0; i < FL_BUF_SIZES; i++) { 1095 1096 /* 1097 * A freelist buffer must be 16 byte aligned as the SGE 1098 * uses the low 4 bits of the bus addr to figure out the 1099 * buffer size. 1100 */ 1101 rc = bus_dma_tag_create(sc->dmat, 16, 0, 1102 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 1103 FL_BUF_SIZE(i), 1, FL_BUF_SIZE(i), BUS_DMA_ALLOCNOW, 1104 NULL, NULL, &fl->tag[i]); 1105 if (rc != 0) { 1106 device_printf(sc->dev, 1107 "failed to create fl DMA tag[%d]: %d\n", 1108 i, rc); 1109 return (rc); 1110 } 1111 } 1112 len = fl->qsize * RX_FL_ESIZE; 1113 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 1114 &fl->ba, (void **)&fl->desc); 1115 if (rc) 1116 return (rc); 1117 1118 /* Allocate space for one software descriptor per buffer. */ 1119 fl->cap = (fl->qsize - SPG_LEN / RX_FL_ESIZE) * 8; 1120 FL_LOCK(fl); 1121 set_fl_tag_idx(fl, pi->ifp->if_mtu); 1122 rc = alloc_fl_sdesc(fl); 1123 FL_UNLOCK(fl); 1124 if (rc != 0) { 1125 device_printf(sc->dev, 1126 "failed to setup fl software descriptors: %d\n", 1127 rc); 1128 return (rc); 1129 } 1130 fl->needed = fl->cap - 1; /* one less to avoid cidx = pidx */ 1131 1132 c.iqns_to_fl0congen = 1133 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE)); 1134 c.fl0dcaen_to_fl0cidxfthresh = 1135 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 1136 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 1137 c.fl0size = htobe16(fl->qsize); 1138 c.fl0addr = htobe64(fl->ba); 1139 } 1140 1141 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1142 if (rc != 0) { 1143 device_printf(sc->dev, 1144 "failed to create ingress queue: %d\n", rc); 1145 return (rc); 1146 } 1147 1148 iq->cdesc = iq->desc; 1149 iq->cidx = 0; 1150 iq->gen = 1; 1151 iq->intr_next = iq->intr_params; 1152 iq->cntxt_id = be16toh(c.iqid); 1153 iq->abs_id = be16toh(c.physiqid); 1154 iq->flags |= (IQ_ALLOCATED | IQ_STARTED); 1155 1156 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 1157 KASSERT(cntxt_id < sc->sge.niq, 1158 ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 1159 cntxt_id, sc->sge.niq - 1)); 1160 sc->sge.iqmap[cntxt_id] = iq; 1161 1162 if (fl) { 1163 fl->cntxt_id = be16toh(c.fl0id); 1164 fl->pidx = fl->cidx = 0; 1165 1166 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 1167 KASSERT(cntxt_id < sc->sge.neq, 1168 ("%s: fl->cntxt_id (%d) more than the max (%d)", __func__, 1169 cntxt_id, sc->sge.neq - 1)); 1170 sc->sge.eqmap[cntxt_id] = (void *)fl; 1171 1172 FL_LOCK(fl); 1173 refill_fl(fl, -1); 1174 if (fl->pending >= 8) 1175 ring_fl_db(sc, fl); 1176 FL_UNLOCK(fl); 1177 } 1178 1179 /* Enable IQ interrupts */ 1180 atomic_store_rel_32(&iq->state, IQS_IDLE); 1181 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 1182 V_INGRESSQID(iq->cntxt_id)); 1183 1184 return (0); 1185} 1186 1187/* 1188 * This can be called with the iq/fl in any state - fully allocated and 1189 * functional, partially allocated, even all-zeroed out. 1190 */ 1191static int 1192free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 1193{ 1194 int i, rc; 1195 struct adapter *sc = iq->adapter; 1196 device_t dev; 1197 1198 if (sc == NULL) 1199 return (0); /* nothing to do */ 1200 1201 dev = pi ? pi->dev : sc->dev; 1202 1203 if (iq->flags & IQ_STARTED) { 1204 rc = -t4_iq_start_stop(sc, sc->mbox, 0, sc->pf, 0, 1205 iq->cntxt_id, fl ? fl->cntxt_id : 0xffff, 0xffff); 1206 if (rc != 0) { 1207 device_printf(dev, 1208 "failed to stop queue %p: %d\n", iq, rc); 1209 return (rc); 1210 } 1211 iq->flags &= ~IQ_STARTED; 1212 1213 /* Synchronize with the interrupt handler */ 1214 while (!atomic_cmpset_32(&iq->state, IQS_IDLE, IQS_DISABLED)) 1215 pause("iqfree", hz / 1000); 1216 } 1217 1218 if (iq->flags & IQ_ALLOCATED) { 1219 1220 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 1221 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 1222 fl ? fl->cntxt_id : 0xffff, 0xffff); 1223 if (rc != 0) { 1224 device_printf(dev, 1225 "failed to free queue %p: %d\n", iq, rc); 1226 return (rc); 1227 } 1228 iq->flags &= ~IQ_ALLOCATED; 1229 } 1230 1231 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 1232 1233 bzero(iq, sizeof(*iq)); 1234 1235 if (fl) { 1236 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 1237 fl->desc); 1238 1239 if (fl->sdesc) { 1240 FL_LOCK(fl); 1241 free_fl_sdesc(fl); 1242 FL_UNLOCK(fl); 1243 } 1244 1245 if (mtx_initialized(&fl->fl_lock)) 1246 mtx_destroy(&fl->fl_lock); 1247 1248 for (i = 0; i < FL_BUF_SIZES; i++) { 1249 if (fl->tag[i]) 1250 bus_dma_tag_destroy(fl->tag[i]); 1251 } 1252 1253 bzero(fl, sizeof(*fl)); 1254 } 1255 1256 return (0); 1257} 1258 1259static int 1260alloc_iq(struct sge_iq *iq, int intr_idx) 1261{ 1262 return alloc_iq_fl(NULL, iq, NULL, intr_idx); 1263} 1264 1265static int 1266free_iq(struct sge_iq *iq) 1267{ 1268 return free_iq_fl(NULL, iq, NULL); 1269} 1270 1271static int 1272alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx) 1273{ 1274 int rc; 1275 struct sysctl_oid *oid; 1276 struct sysctl_oid_list *children; 1277 char name[16]; 1278 1279 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx); 1280 if (rc != 0) 1281 return (rc); 1282 1283#ifdef INET 1284 rc = tcp_lro_init(&rxq->lro); 1285 if (rc != 0) 1286 return (rc); 1287 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 1288 1289 if (pi->ifp->if_capenable & IFCAP_LRO) 1290 rxq->flags |= RXQ_LRO_ENABLED; 1291#endif 1292 rxq->ifp = pi->ifp; 1293 1294 children = SYSCTL_CHILDREN(pi->oid_rxq); 1295 1296 snprintf(name, sizeof(name), "%d", idx); 1297 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1298 NULL, "rx queue"); 1299 children = SYSCTL_CHILDREN(oid); 1300 1301#ifdef INET 1302 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 1303 &rxq->lro.lro_queued, 0, NULL); 1304 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 1305 &rxq->lro.lro_flushed, 0, NULL); 1306#endif 1307 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 1308 &rxq->rxcsum, "# of times hardware assisted with checksum"); 1309 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 1310 CTLFLAG_RD, &rxq->vlan_extraction, 1311 "# of times hardware extracted 802.1Q tag"); 1312 1313 return (rc); 1314} 1315 1316static int 1317free_rxq(struct port_info *pi, struct sge_rxq *rxq) 1318{ 1319 int rc; 1320 1321#ifdef INET 1322 if (rxq->lro.ifp) { 1323 tcp_lro_free(&rxq->lro); 1324 rxq->lro.ifp = NULL; 1325 } 1326#endif 1327 1328 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 1329 if (rc == 0) 1330 bzero(rxq, sizeof(*rxq)); 1331 1332 return (rc); 1333} 1334 1335static int 1336alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx) 1337{ 1338 int rc, cntxt_id; 1339 size_t len; 1340 struct adapter *sc = pi->adapter; 1341 struct fw_eq_eth_cmd c; 1342 struct sge_eq *eq = &txq->eq; 1343 char name[16]; 1344 struct sysctl_oid *oid; 1345 struct sysctl_oid_list *children; 1346 1347 txq->ifp = pi->ifp; 1348 TASK_INIT(&txq->resume_tx, 0, cxgbe_txq_start, txq); 1349 1350 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 1351 1352 len = eq->qsize * TX_EQ_ESIZE; 1353 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 1354 &eq->ba, (void **)&eq->desc); 1355 if (rc) 1356 return (rc); 1357 1358 eq->cap = eq->qsize - SPG_LEN / TX_EQ_ESIZE; 1359 eq->spg = (void *)&eq->desc[eq->cap]; 1360 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 1361 eq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 1362 M_ZERO | M_WAITOK); 1363 eq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 1364 eq->iqid = sc->sge.rxq[pi->first_rxq].iq.cntxt_id; 1365 1366 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 1367 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 1368 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &eq->tx_tag); 1369 if (rc != 0) { 1370 device_printf(sc->dev, 1371 "failed to create tx DMA tag: %d\n", rc); 1372 return (rc); 1373 } 1374 1375 rc = alloc_eq_maps(eq); 1376 if (rc != 0) { 1377 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 1378 return (rc); 1379 } 1380 1381 bzero(&c, sizeof(c)); 1382 1383 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 1384 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 1385 V_FW_EQ_ETH_CMD_VFN(0)); 1386 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 1387 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 1388 c.viid_pkd = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 1389 c.fetchszm_to_iqid = 1390 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 1391 V_FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 1392 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 1393 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 1394 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 1395 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 1396 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 1397 c.eqaddr = htobe64(eq->ba); 1398 1399 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 1400 if (rc != 0) { 1401 device_printf(pi->dev, 1402 "failed to create egress queue: %d\n", rc); 1403 return (rc); 1404 } 1405 1406 eq->pidx = eq->cidx = 0; 1407 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 1408 eq->flags |= (EQ_ALLOCATED | EQ_STARTED); 1409 1410 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 1411 KASSERT(cntxt_id < sc->sge.neq, 1412 ("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 1413 cntxt_id, sc->sge.neq - 1)); 1414 sc->sge.eqmap[cntxt_id] = eq; 1415 1416 children = SYSCTL_CHILDREN(pi->oid_txq); 1417 1418 snprintf(name, sizeof(name), "%d", idx); 1419 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 1420 NULL, "tx queue"); 1421 children = SYSCTL_CHILDREN(oid); 1422 1423 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 1424 &txq->txcsum, "# of times hardware assisted with checksum"); 1425 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 1426 CTLFLAG_RD, &txq->vlan_insertion, 1427 "# of times hardware inserted 802.1Q tag"); 1428 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 1429 &txq->tso_wrs, "# of IPv4 TSO work requests"); 1430 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 1431 &txq->imm_wrs, "# of work requests with immediate data"); 1432 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 1433 &txq->sgl_wrs, "# of work requests with direct SGL"); 1434 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 1435 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 1436 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 1437 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 1438 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 1439 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 1440 1441 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 1442 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 1443 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 1444 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 1445 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 1446 &txq->egr_update, 0, "egress update notifications from the SGE"); 1447 1448 return (rc); 1449} 1450 1451static int 1452free_txq(struct port_info *pi, struct sge_txq *txq) 1453{ 1454 int rc; 1455 struct adapter *sc = pi->adapter; 1456 struct sge_eq *eq = &txq->eq; 1457 1458 if (eq->flags & (EQ_ALLOCATED | EQ_STARTED)) { 1459 1460 /* 1461 * Wait for the response to a credit flush if there's one 1462 * pending. Clearing the flag tells handle_sge_egr_update or 1463 * cxgbe_txq_start (depending on how far the response has made 1464 * it) that they should ignore the response and wake up free_txq 1465 * instead. 1466 * 1467 * The interface has been marked down by the time we get here 1468 * (both IFF_UP and IFF_DRV_RUNNING cleared). qflush has 1469 * emptied the tx buf_rings and we know nothing new is being 1470 * queued for tx so we don't have to worry about a new credit 1471 * flush request. 1472 */ 1473 TXQ_LOCK(txq); 1474 if (eq->flags & EQ_CRFLUSHED) { 1475 eq->flags &= ~EQ_CRFLUSHED; 1476 msleep(txq, &eq->eq_lock, 0, "crflush", 0); 1477 } 1478 TXQ_UNLOCK(txq); 1479 1480 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, eq->cntxt_id); 1481 if (rc != 0) { 1482 device_printf(pi->dev, 1483 "failed to free egress queue %p: %d\n", eq, rc); 1484 return (rc); 1485 } 1486 eq->flags &= ~(EQ_ALLOCATED | EQ_STARTED); 1487 } 1488 1489 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 1490 1491 free(eq->sdesc, M_CXGBE); 1492 1493 if (eq->maps) 1494 free_eq_maps(eq); 1495 1496 buf_ring_free(eq->br, M_CXGBE); 1497 1498 if (eq->tx_tag) 1499 bus_dma_tag_destroy(eq->tx_tag); 1500 1501 if (mtx_initialized(&eq->eq_lock)) 1502 mtx_destroy(&eq->eq_lock); 1503 1504 bzero(txq, sizeof(*txq)); 1505 return (0); 1506} 1507 1508static void 1509oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1510{ 1511 bus_addr_t *ba = arg; 1512 1513 KASSERT(nseg == 1, 1514 ("%s meant for single segment mappings only.", __func__)); 1515 1516 *ba = error ? 0 : segs->ds_addr; 1517} 1518 1519static inline bool 1520is_new_response(const struct sge_iq *iq, struct rsp_ctrl **ctrl) 1521{ 1522 *ctrl = (void *)((uintptr_t)iq->cdesc + 1523 (iq->esize - sizeof(struct rsp_ctrl))); 1524 1525 return (((*ctrl)->u.type_gen >> S_RSPD_GEN) == iq->gen); 1526} 1527 1528static inline void 1529iq_next(struct sge_iq *iq) 1530{ 1531 iq->cdesc = (void *) ((uintptr_t)iq->cdesc + iq->esize); 1532 if (__predict_false(++iq->cidx == iq->qsize - 1)) { 1533 iq->cidx = 0; 1534 iq->gen ^= 1; 1535 iq->cdesc = iq->desc; 1536 } 1537} 1538 1539static inline void 1540ring_fl_db(struct adapter *sc, struct sge_fl *fl) 1541{ 1542 int ndesc = fl->pending / 8; 1543 1544 /* Caller responsible for ensuring there's something useful to do */ 1545 KASSERT(ndesc > 0, ("%s called with no useful work to do.", __func__)); 1546 1547 wmb(); 1548 1549 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), F_DBPRIO | 1550 V_QID(fl->cntxt_id) | V_PIDX(ndesc)); 1551 1552 fl->pending &= 7; 1553} 1554 1555static void 1556refill_fl(struct sge_fl *fl, int nbufs) 1557{ 1558 __be64 *d = &fl->desc[fl->pidx]; 1559 struct fl_sdesc *sd = &fl->sdesc[fl->pidx]; 1560 bus_dma_tag_t tag; 1561 bus_addr_t pa; 1562 caddr_t cl; 1563 int rc; 1564 1565 FL_LOCK_ASSERT_OWNED(fl); 1566 1567 if (nbufs < 0 || nbufs > fl->needed) 1568 nbufs = fl->needed; 1569 1570 while (nbufs--) { 1571 1572 if (sd->cl != NULL) { 1573 1574 /* 1575 * This happens when a frame small enough to fit 1576 * entirely in an mbuf was received in cl last time. 1577 * We'd held on to cl and can reuse it now. Note that 1578 * we reuse a cluster of the old size if fl->tag_idx is 1579 * no longer the same as sd->tag_idx. 1580 */ 1581 1582 KASSERT(*d == sd->ba_tag, 1583 ("%s: recyling problem at pidx %d", 1584 __func__, fl->pidx)); 1585 1586 d++; 1587 goto recycled; 1588 } 1589 1590 1591 if (fl->tag_idx != sd->tag_idx) { 1592 bus_dmamap_t map; 1593 bus_dma_tag_t newtag = fl->tag[fl->tag_idx]; 1594 bus_dma_tag_t oldtag = fl->tag[sd->tag_idx]; 1595 1596 /* 1597 * An MTU change can get us here. Discard the old map 1598 * which was created with the old tag, but only if 1599 * we're able to get a new one. 1600 */ 1601 rc = bus_dmamap_create(newtag, 0, &map); 1602 if (rc == 0) { 1603 bus_dmamap_destroy(oldtag, sd->map); 1604 sd->map = map; 1605 sd->tag_idx = fl->tag_idx; 1606 } 1607 } 1608 1609 tag = fl->tag[sd->tag_idx]; 1610 1611 cl = m_cljget(NULL, M_NOWAIT, FL_BUF_SIZE(sd->tag_idx)); 1612 if (cl == NULL) 1613 break; 1614 1615 rc = bus_dmamap_load(tag, sd->map, cl, FL_BUF_SIZE(sd->tag_idx), 1616 oneseg_dma_callback, &pa, 0); 1617 if (rc != 0 || pa == 0) { 1618 fl->dmamap_failed++; 1619 uma_zfree(FL_BUF_ZONE(sd->tag_idx), cl); 1620 break; 1621 } 1622 1623 sd->cl = cl; 1624 *d++ = htobe64(pa | sd->tag_idx); 1625 1626#ifdef INVARIANTS 1627 sd->ba_tag = htobe64(pa | sd->tag_idx); 1628#endif 1629 1630recycled: 1631 /* sd->m is never recycled, should always be NULL */ 1632 KASSERT(sd->m == NULL, ("%s: stray mbuf", __func__)); 1633 1634 sd->m = m_gethdr(M_NOWAIT, MT_NOINIT); 1635 if (sd->m == NULL) 1636 break; 1637 1638 fl->pending++; 1639 fl->needed--; 1640 sd++; 1641 if (++fl->pidx == fl->cap) { 1642 fl->pidx = 0; 1643 sd = fl->sdesc; 1644 d = fl->desc; 1645 } 1646 } 1647} 1648 1649static int 1650alloc_fl_sdesc(struct sge_fl *fl) 1651{ 1652 struct fl_sdesc *sd; 1653 bus_dma_tag_t tag; 1654 int i, rc; 1655 1656 FL_LOCK_ASSERT_OWNED(fl); 1657 1658 fl->sdesc = malloc(fl->cap * sizeof(struct fl_sdesc), M_CXGBE, 1659 M_ZERO | M_WAITOK); 1660 1661 tag = fl->tag[fl->tag_idx]; 1662 sd = fl->sdesc; 1663 for (i = 0; i < fl->cap; i++, sd++) { 1664 1665 sd->tag_idx = fl->tag_idx; 1666 rc = bus_dmamap_create(tag, 0, &sd->map); 1667 if (rc != 0) 1668 goto failed; 1669 } 1670 1671 return (0); 1672failed: 1673 while (--i >= 0) { 1674 sd--; 1675 bus_dmamap_destroy(tag, sd->map); 1676 if (sd->m) { 1677 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0); 1678 m_free(sd->m); 1679 sd->m = NULL; 1680 } 1681 } 1682 KASSERT(sd == fl->sdesc, ("%s: EDOOFUS", __func__)); 1683 1684 free(fl->sdesc, M_CXGBE); 1685 fl->sdesc = NULL; 1686 1687 return (rc); 1688} 1689 1690static void 1691free_fl_sdesc(struct sge_fl *fl) 1692{ 1693 struct fl_sdesc *sd; 1694 int i; 1695 1696 FL_LOCK_ASSERT_OWNED(fl); 1697 1698 sd = fl->sdesc; 1699 for (i = 0; i < fl->cap; i++, sd++) { 1700 1701 if (sd->m) { 1702 m_init(sd->m, NULL, 0, M_NOWAIT, MT_DATA, 0); 1703 m_free(sd->m); 1704 sd->m = NULL; 1705 } 1706 1707 if (sd->cl) { 1708 bus_dmamap_unload(fl->tag[sd->tag_idx], sd->map); 1709 uma_zfree(FL_BUF_ZONE(sd->tag_idx), sd->cl); 1710 sd->cl = NULL; 1711 } 1712 1713 bus_dmamap_destroy(fl->tag[sd->tag_idx], sd->map); 1714 } 1715 1716 free(fl->sdesc, M_CXGBE); 1717 fl->sdesc = NULL; 1718} 1719 1720static int 1721alloc_eq_maps(struct sge_eq *eq) 1722{ 1723 struct tx_map *txm; 1724 int i, rc, count; 1725 1726 /* 1727 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 1728 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 1729 * sized for the worst case. 1730 */ 1731 count = eq->qsize * 10 / 8; 1732 eq->map_total = eq->map_avail = count; 1733 eq->map_cidx = eq->map_pidx = 0; 1734 1735 eq->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 1736 M_ZERO | M_WAITOK); 1737 1738 txm = eq->maps; 1739 for (i = 0; i < count; i++, txm++) { 1740 rc = bus_dmamap_create(eq->tx_tag, 0, &txm->map); 1741 if (rc != 0) 1742 goto failed; 1743 } 1744 1745 return (0); 1746failed: 1747 while (--i >= 0) { 1748 txm--; 1749 bus_dmamap_destroy(eq->tx_tag, txm->map); 1750 } 1751 KASSERT(txm == eq->maps, ("%s: EDOOFUS", __func__)); 1752 1753 free(eq->maps, M_CXGBE); 1754 eq->maps = NULL; 1755 1756 return (rc); 1757} 1758 1759static void 1760free_eq_maps(struct sge_eq *eq) 1761{ 1762 struct tx_map *txm; 1763 int i; 1764 1765 txm = eq->maps; 1766 for (i = 0; i < eq->map_total; i++, txm++) { 1767 1768 if (txm->m) { 1769 bus_dmamap_unload(eq->tx_tag, txm->map); 1770 m_freem(txm->m); 1771 txm->m = NULL; 1772 } 1773 1774 bus_dmamap_destroy(eq->tx_tag, txm->map); 1775 } 1776 1777 free(eq->maps, M_CXGBE); 1778 eq->maps = NULL; 1779} 1780 1781/* 1782 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 1783 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 1784 * of immediate data. 1785 */ 1786#define IMM_LEN ( \ 1787 2 * TX_EQ_ESIZE \ 1788 - sizeof(struct fw_eth_tx_pkt_wr) \ 1789 - sizeof(struct cpl_tx_pkt_core)) 1790 1791/* 1792 * Returns non-zero on failure, no need to cleanup anything in that case. 1793 * 1794 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 1795 * if the resulting chain still won't fit in a tx descriptor. 1796 * 1797 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 1798 * does not have the TCP header in it. 1799 */ 1800static int 1801get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 1802 int sgl_only) 1803{ 1804 struct mbuf *m = *fp; 1805 struct sge_eq *eq = &txq->eq; 1806 struct tx_map *txm; 1807 int rc, defragged = 0, n; 1808 1809 TXQ_LOCK_ASSERT_OWNED(txq); 1810 1811 if (m->m_pkthdr.tso_segsz) 1812 sgl_only = 1; /* Do not allow immediate data with LSO */ 1813 1814start: sgl->nsegs = 0; 1815 1816 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 1817 return (0); /* nsegs = 0 tells caller to use imm. tx */ 1818 1819 if (eq->map_avail == 0) { 1820 txq->no_dmamap++; 1821 return (ENOMEM); 1822 } 1823 txm = &eq->maps[eq->map_pidx]; 1824 1825 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 1826 *fp = m_pullup(m, 50); 1827 m = *fp; 1828 if (m == NULL) 1829 return (ENOBUFS); 1830 } 1831 1832 rc = bus_dmamap_load_mbuf_sg(eq->tx_tag, txm->map, m, sgl->seg, 1833 &sgl->nsegs, BUS_DMA_NOWAIT); 1834 if (rc == EFBIG && defragged == 0) { 1835 m = m_defrag(m, M_DONTWAIT); 1836 if (m == NULL) 1837 return (EFBIG); 1838 1839 defragged = 1; 1840 *fp = m; 1841 goto start; 1842 } 1843 if (rc != 0) 1844 return (rc); 1845 1846 txm->m = m; 1847 eq->map_avail--; 1848 if (++eq->map_pidx == eq->map_total) 1849 eq->map_pidx = 0; 1850 1851 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 1852 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 1853 1854 /* 1855 * Store the # of flits required to hold this frame's SGL in nflits. An 1856 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 1857 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 1858 * then len1 must be set to 0. 1859 */ 1860 n = sgl->nsegs - 1; 1861 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 1862 1863 return (0); 1864} 1865 1866 1867/* 1868 * Releases all the txq resources used up in the specified sgl. 1869 */ 1870static int 1871free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 1872{ 1873 struct sge_eq *eq = &txq->eq; 1874 struct tx_map *txm; 1875 1876 TXQ_LOCK_ASSERT_OWNED(txq); 1877 1878 if (sgl->nsegs == 0) 1879 return (0); /* didn't use any map */ 1880 1881 /* 1 pkt uses exactly 1 map, back it out */ 1882 1883 eq->map_avail++; 1884 if (eq->map_pidx > 0) 1885 eq->map_pidx--; 1886 else 1887 eq->map_pidx = eq->map_total - 1; 1888 1889 txm = &eq->maps[eq->map_pidx]; 1890 bus_dmamap_unload(eq->tx_tag, txm->map); 1891 txm->m = NULL; 1892 1893 return (0); 1894} 1895 1896static int 1897write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 1898 struct sgl *sgl) 1899{ 1900 struct sge_eq *eq = &txq->eq; 1901 struct fw_eth_tx_pkt_wr *wr; 1902 struct cpl_tx_pkt_core *cpl; 1903 uint32_t ctrl; /* used in many unrelated places */ 1904 uint64_t ctrl1; 1905 int nflits, ndesc, pktlen; 1906 struct tx_sdesc *txsd; 1907 caddr_t dst; 1908 1909 TXQ_LOCK_ASSERT_OWNED(txq); 1910 1911 pktlen = m->m_pkthdr.len; 1912 1913 /* 1914 * Do we have enough flits to send this frame out? 1915 */ 1916 ctrl = sizeof(struct cpl_tx_pkt_core); 1917 if (m->m_pkthdr.tso_segsz) { 1918 nflits = TXPKT_LSO_WR_HDR; 1919 ctrl += sizeof(struct cpl_tx_pkt_lso); 1920 } else 1921 nflits = TXPKT_WR_HDR; 1922 if (sgl->nsegs > 0) 1923 nflits += sgl->nflits; 1924 else { 1925 nflits += howmany(pktlen, 8); 1926 ctrl += pktlen; 1927 } 1928 ndesc = howmany(nflits, 8); 1929 if (ndesc > eq->avail) 1930 return (ENOMEM); 1931 1932 /* Firmware work request header */ 1933 wr = (void *)&eq->desc[eq->pidx]; 1934 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 1935 V_FW_WR_IMMDLEN(ctrl)); 1936 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 1937 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) { 1938 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 1939 eq->flags |= EQ_CRFLUSHED; 1940 } 1941 1942 wr->equiq_to_len16 = htobe32(ctrl); 1943 wr->r3 = 0; 1944 1945 if (m->m_pkthdr.tso_segsz) { 1946 struct cpl_tx_pkt_lso *lso = (void *)(wr + 1); 1947 struct ether_header *eh; 1948 struct ip *ip; 1949 struct tcphdr *tcp; 1950 1951 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 1952 F_LSO_LAST_SLICE; 1953 1954 eh = mtod(m, struct ether_header *); 1955 if (eh->ether_type == htons(ETHERTYPE_VLAN)) { 1956 ctrl |= V_LSO_ETHHDR_LEN(1); 1957 ip = (void *)((struct ether_vlan_header *)eh + 1); 1958 } else 1959 ip = (void *)(eh + 1); 1960 1961 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 1962 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 1963 V_LSO_TCPHDR_LEN(tcp->th_off); 1964 1965 lso->lso_ctrl = htobe32(ctrl); 1966 lso->ipid_ofst = htobe16(0); 1967 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 1968 lso->seqno_offset = htobe32(0); 1969 lso->len = htobe32(pktlen); 1970 1971 cpl = (void *)(lso + 1); 1972 1973 txq->tso_wrs++; 1974 } else 1975 cpl = (void *)(wr + 1); 1976 1977 /* Checksum offload */ 1978 ctrl1 = 0; 1979 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 1980 ctrl1 |= F_TXPKT_IPCSUM_DIS; 1981 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 1982 ctrl1 |= F_TXPKT_L4CSUM_DIS; 1983 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 1984 txq->txcsum++; /* some hardware assistance provided */ 1985 1986 /* VLAN tag insertion */ 1987 if (m->m_flags & M_VLANTAG) { 1988 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 1989 txq->vlan_insertion++; 1990 } 1991 1992 /* CPL header */ 1993 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 1994 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 1995 cpl->pack = 0; 1996 cpl->len = htobe16(pktlen); 1997 cpl->ctrl1 = htobe64(ctrl1); 1998 1999 /* Software descriptor */ 2000 txsd = &eq->sdesc[eq->pidx]; 2001 txsd->desc_used = ndesc; 2002 2003 eq->pending += ndesc; 2004 eq->avail -= ndesc; 2005 eq->pidx += ndesc; 2006 if (eq->pidx >= eq->cap) 2007 eq->pidx -= eq->cap; 2008 2009 /* SGL */ 2010 dst = (void *)(cpl + 1); 2011 if (sgl->nsegs > 0) { 2012 txsd->map_used = 1; 2013 txq->sgl_wrs++; 2014 write_sgl_to_txd(eq, sgl, &dst); 2015 } else { 2016 txsd->map_used = 0; 2017 txq->imm_wrs++; 2018 for (; m; m = m->m_next) { 2019 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 2020#ifdef INVARIANTS 2021 pktlen -= m->m_len; 2022#endif 2023 } 2024#ifdef INVARIANTS 2025 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 2026#endif 2027 2028 } 2029 2030 txq->txpkt_wrs++; 2031 return (0); 2032} 2033 2034/* 2035 * Returns 0 to indicate that m has been accepted into a coalesced tx work 2036 * request. It has either been folded into txpkts or txpkts was flushed and m 2037 * has started a new coalesced work request (as the first frame in a fresh 2038 * txpkts). 2039 * 2040 * Returns non-zero to indicate a failure - caller is responsible for 2041 * transmitting m, if there was anything in txpkts it has been flushed. 2042 */ 2043static int 2044add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 2045 struct mbuf *m, struct sgl *sgl) 2046{ 2047 struct sge_eq *eq = &txq->eq; 2048 int can_coalesce; 2049 struct tx_sdesc *txsd; 2050 int flits; 2051 2052 TXQ_LOCK_ASSERT_OWNED(txq); 2053 2054 if (txpkts->npkt > 0) { 2055 flits = TXPKTS_PKT_HDR + sgl->nflits; 2056 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 2057 txpkts->nflits + flits <= TX_WR_FLITS && 2058 txpkts->nflits + flits <= eq->avail * 8 && 2059 txpkts->plen + m->m_pkthdr.len < 65536; 2060 2061 if (can_coalesce) { 2062 txpkts->npkt++; 2063 txpkts->nflits += flits; 2064 txpkts->plen += m->m_pkthdr.len; 2065 2066 txsd = &eq->sdesc[eq->pidx]; 2067 txsd->map_used++; 2068 2069 return (0); 2070 } 2071 2072 /* 2073 * Couldn't coalesce m into txpkts. The first order of business 2074 * is to send txpkts on its way. Then we'll revisit m. 2075 */ 2076 write_txpkts_wr(txq, txpkts); 2077 } 2078 2079 /* 2080 * Check if we can start a new coalesced tx work request with m as 2081 * the first packet in it. 2082 */ 2083 2084 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 2085 2086 flits = TXPKTS_WR_HDR + sgl->nflits; 2087 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 2088 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 2089 2090 if (can_coalesce == 0) 2091 return (EINVAL); 2092 2093 /* 2094 * Start a fresh coalesced tx WR with m as the first frame in it. 2095 */ 2096 txpkts->npkt = 1; 2097 txpkts->nflits = flits; 2098 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 2099 txpkts->plen = m->m_pkthdr.len; 2100 2101 txsd = &eq->sdesc[eq->pidx]; 2102 txsd->map_used = 1; 2103 2104 return (0); 2105} 2106 2107/* 2108 * Note that write_txpkts_wr can never run out of hardware descriptors (but 2109 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 2110 * coalescing only if sufficient hardware descriptors are available. 2111 */ 2112static void 2113write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 2114{ 2115 struct sge_eq *eq = &txq->eq; 2116 struct fw_eth_tx_pkts_wr *wr; 2117 struct tx_sdesc *txsd; 2118 uint32_t ctrl; 2119 int ndesc; 2120 2121 TXQ_LOCK_ASSERT_OWNED(txq); 2122 2123 ndesc = howmany(txpkts->nflits, 8); 2124 2125 wr = (void *)&eq->desc[eq->pidx]; 2126 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR) | 2127 V_FW_WR_IMMDLEN(0)); /* immdlen does not matter in this WR */ 2128 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 2129 if (eq->avail == ndesc && !(eq->flags & EQ_CRFLUSHED)) { 2130 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 2131 eq->flags |= EQ_CRFLUSHED; 2132 } 2133 wr->equiq_to_len16 = htobe32(ctrl); 2134 wr->plen = htobe16(txpkts->plen); 2135 wr->npkt = txpkts->npkt; 2136 wr->r3 = wr->r4 = 0; 2137 2138 /* Everything else already written */ 2139 2140 txsd = &eq->sdesc[eq->pidx]; 2141 txsd->desc_used = ndesc; 2142 2143 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); 2144 2145 eq->pending += ndesc; 2146 eq->avail -= ndesc; 2147 eq->pidx += ndesc; 2148 if (eq->pidx >= eq->cap) 2149 eq->pidx -= eq->cap; 2150 2151 txq->txpkts_pkts += txpkts->npkt; 2152 txq->txpkts_wrs++; 2153 txpkts->npkt = 0; /* emptied */ 2154} 2155 2156static inline void 2157write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 2158 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 2159{ 2160 struct ulp_txpkt *ulpmc; 2161 struct ulptx_idata *ulpsc; 2162 struct cpl_tx_pkt_core *cpl; 2163 struct sge_eq *eq = &txq->eq; 2164 uintptr_t flitp, start, end; 2165 uint64_t ctrl; 2166 caddr_t dst; 2167 2168 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 2169 2170 start = (uintptr_t)eq->desc; 2171 end = (uintptr_t)eq->spg; 2172 2173 /* Checksum offload */ 2174 ctrl = 0; 2175 if (!(m->m_pkthdr.csum_flags & CSUM_IP)) 2176 ctrl |= F_TXPKT_IPCSUM_DIS; 2177 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))) 2178 ctrl |= F_TXPKT_L4CSUM_DIS; 2179 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP)) 2180 txq->txcsum++; /* some hardware assistance provided */ 2181 2182 /* VLAN tag insertion */ 2183 if (m->m_flags & M_VLANTAG) { 2184 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 2185 txq->vlan_insertion++; 2186 } 2187 2188 /* 2189 * The previous packet's SGL must have ended at a 16 byte boundary (this 2190 * is required by the firmware/hardware). It follows that flitp cannot 2191 * wrap around between the ULPTX master command and ULPTX subcommand (8 2192 * bytes each), and that it can not wrap around in the middle of the 2193 * cpl_tx_pkt_core either. 2194 */ 2195 flitp = (uintptr_t)txpkts->flitp; 2196 KASSERT((flitp & 0xf) == 0, 2197 ("%s: last SGL did not end at 16 byte boundary: %p", 2198 __func__, txpkts->flitp)); 2199 2200 /* ULP master command */ 2201 ulpmc = (void *)flitp; 2202 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 2203 V_ULP_TXPKT_FID(eq->iqid)); 2204 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 2205 sizeof(*cpl) + 8 * sgl->nflits, 16)); 2206 2207 /* ULP subcommand */ 2208 ulpsc = (void *)(ulpmc + 1); 2209 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 2210 F_ULP_TX_SC_MORE); 2211 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 2212 2213 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 2214 if (flitp == end) 2215 flitp = start; 2216 2217 /* CPL_TX_PKT */ 2218 cpl = (void *)flitp; 2219 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2220 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 2221 cpl->pack = 0; 2222 cpl->len = htobe16(m->m_pkthdr.len); 2223 cpl->ctrl1 = htobe64(ctrl); 2224 2225 flitp += sizeof(*cpl); 2226 if (flitp == end) 2227 flitp = start; 2228 2229 /* SGL for this frame */ 2230 dst = (caddr_t)flitp; 2231 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 2232 txpkts->flitp = (void *)dst; 2233 2234 KASSERT(((uintptr_t)dst & 0xf) == 0, 2235 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 2236} 2237 2238/* 2239 * If the SGL ends on an address that is not 16 byte aligned, this function will 2240 * add a 0 filled flit at the end. It returns 1 in that case. 2241 */ 2242static int 2243write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 2244{ 2245 __be64 *flitp, *end; 2246 struct ulptx_sgl *usgl; 2247 bus_dma_segment_t *seg; 2248 int i, padded; 2249 2250 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 2251 ("%s: bad SGL - nsegs=%d, nflits=%d", 2252 __func__, sgl->nsegs, sgl->nflits)); 2253 2254 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 2255 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 2256 2257 flitp = (__be64 *)(*to); 2258 end = flitp + sgl->nflits; 2259 seg = &sgl->seg[0]; 2260 usgl = (void *)flitp; 2261 2262 /* 2263 * We start at a 16 byte boundary somewhere inside the tx descriptor 2264 * ring, so we're at least 16 bytes away from the status page. There is 2265 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 2266 */ 2267 2268 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 2269 V_ULPTX_NSGE(sgl->nsegs)); 2270 usgl->len0 = htobe32(seg->ds_len); 2271 usgl->addr0 = htobe64(seg->ds_addr); 2272 seg++; 2273 2274 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 2275 2276 /* Won't wrap around at all */ 2277 2278 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 2279 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 2280 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 2281 } 2282 if (i & 1) 2283 usgl->sge[i / 2].len[1] = htobe32(0); 2284 } else { 2285 2286 /* Will wrap somewhere in the rest of the SGL */ 2287 2288 /* 2 flits already written, write the rest flit by flit */ 2289 flitp = (void *)(usgl + 1); 2290 for (i = 0; i < sgl->nflits - 2; i++) { 2291 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 2292 flitp = (void *)eq->desc; 2293 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 2294 } 2295 end = flitp; 2296 } 2297 2298 if ((uintptr_t)end & 0xf) { 2299 *(uint64_t *)end = 0; 2300 end++; 2301 padded = 1; 2302 } else 2303 padded = 0; 2304 2305 if ((uintptr_t)end == (uintptr_t)eq->spg) 2306 *to = (void *)eq->desc; 2307 else 2308 *to = (void *)end; 2309 2310 return (padded); 2311} 2312 2313static inline void 2314copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 2315{ 2316 if ((uintptr_t)(*to) + len <= (uintptr_t)eq->spg) { 2317 bcopy(from, *to, len); 2318 (*to) += len; 2319 } else { 2320 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 2321 2322 bcopy(from, *to, portion); 2323 from += portion; 2324 portion = len - portion; /* remaining */ 2325 bcopy(from, (void *)eq->desc, portion); 2326 (*to) = (caddr_t)eq->desc + portion; 2327 } 2328} 2329 2330static inline void 2331ring_tx_db(struct adapter *sc, struct sge_eq *eq) 2332{ 2333 wmb(); 2334 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 2335 V_QID(eq->cntxt_id) | V_PIDX(eq->pending)); 2336 eq->pending = 0; 2337} 2338 2339static inline int 2340reclaimable(struct sge_eq *eq) 2341{ 2342 unsigned int cidx; 2343 2344 cidx = eq->spg->cidx; /* stable snapshot */ 2345 cidx = be16_to_cpu(cidx); 2346 2347 if (cidx >= eq->cidx) 2348 return (cidx - eq->cidx); 2349 else 2350 return (cidx + eq->cap - eq->cidx); 2351} 2352 2353/* 2354 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as 2355 * many as possible but stop when there are around "n" mbufs to free. 2356 * 2357 * The actual number reclaimed is provided as the return value. 2358 */ 2359static int 2360reclaim_tx_descs(struct sge_eq *eq, int can_reclaim, int n) 2361{ 2362 struct tx_sdesc *txsd; 2363 struct tx_map *txm; 2364 unsigned int reclaimed, maps; 2365 2366 EQ_LOCK_ASSERT_OWNED(eq); 2367 2368 if (can_reclaim == 0) 2369 can_reclaim = reclaimable(eq); 2370 2371 maps = reclaimed = 0; 2372 while (can_reclaim && maps < n) { 2373 int ndesc; 2374 2375 txsd = &eq->sdesc[eq->cidx]; 2376 ndesc = txsd->desc_used; 2377 2378 /* Firmware doesn't return "partial" credits. */ 2379 KASSERT(can_reclaim >= ndesc, 2380 ("%s: unexpected number of credits: %d, %d", 2381 __func__, can_reclaim, ndesc)); 2382 2383 maps += txsd->map_used; 2384 2385 reclaimed += ndesc; 2386 can_reclaim -= ndesc; 2387 2388 eq->cidx += ndesc; 2389 if (__predict_false(eq->cidx >= eq->cap)) 2390 eq->cidx -= eq->cap; 2391 } 2392 2393 txm = &eq->maps[eq->map_cidx]; 2394 if (maps) 2395 prefetch(txm->m); 2396 2397 eq->avail += reclaimed; 2398 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 2399 ("%s: too many descriptors available", __func__)); 2400 2401 eq->map_avail += maps; 2402 KASSERT(eq->map_avail <= eq->map_total, 2403 ("%s: too many maps available", __func__)); 2404 2405 while (maps--) { 2406 struct tx_map *next; 2407 2408 next = txm + 1; 2409 if (__predict_false(eq->map_cidx + 1 == eq->map_total)) 2410 next = eq->maps; 2411 prefetch(next->m); 2412 2413 bus_dmamap_unload(eq->tx_tag, txm->map); 2414 m_freem(txm->m); 2415 txm->m = NULL; 2416 2417 txm = next; 2418 if (__predict_false(++eq->map_cidx == eq->map_total)) 2419 eq->map_cidx = 0; 2420 } 2421 2422 return (reclaimed); 2423} 2424 2425static void 2426write_eqflush_wr(struct sge_eq *eq) 2427{ 2428 struct fw_eq_flush_wr *wr; 2429 struct tx_sdesc *txsd; 2430 2431 EQ_LOCK_ASSERT_OWNED(eq); 2432 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 2433 2434 wr = (void *)&eq->desc[eq->pidx]; 2435 bzero(wr, sizeof(*wr)); 2436 wr->opcode = FW_EQ_FLUSH_WR; 2437 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 2438 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 2439 2440 txsd = &eq->sdesc[eq->pidx]; 2441 txsd->desc_used = 1; 2442 txsd->map_used = 0; 2443 2444 eq->flags |= EQ_CRFLUSHED; 2445 eq->pending++; 2446 eq->avail--; 2447 if (++eq->pidx == eq->cap) 2448 eq->pidx = 0; 2449} 2450 2451static __be64 2452get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 2453{ 2454 int i = (idx / 3) * 2; 2455 2456 switch (idx % 3) { 2457 case 0: { 2458 __be64 rc; 2459 2460 rc = htobe32(sgl[i].ds_len); 2461 if (i + 1 < nsegs) 2462 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 2463 2464 return (rc); 2465 } 2466 case 1: 2467 return htobe64(sgl[i].ds_addr); 2468 case 2: 2469 return htobe64(sgl[i + 1].ds_addr); 2470 } 2471 2472 return (0); 2473} 2474 2475static void 2476set_fl_tag_idx(struct sge_fl *fl, int mtu) 2477{ 2478 int i; 2479 2480 FL_LOCK_ASSERT_OWNED(fl); 2481 2482 for (i = 0; i < FL_BUF_SIZES - 1; i++) { 2483 if (FL_BUF_SIZE(i) >= (mtu + FL_PKTSHIFT)) 2484 break; 2485 } 2486 2487 fl->tag_idx = i; 2488} 2489 2490static int 2491handle_sge_egr_update(struct adapter *sc, const struct cpl_sge_egr_update *cpl) 2492{ 2493 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 2494 struct sge *s = &sc->sge; 2495 struct sge_txq *txq; 2496 struct port_info *pi; 2497 2498 txq = (void *)s->eqmap[qid - s->eq_start]; 2499 TXQ_LOCK(txq); 2500 if (txq->eq.flags & EQ_CRFLUSHED) { 2501 pi = txq->ifp->if_softc; 2502 taskqueue_enqueue(pi->tq, &txq->resume_tx); 2503 txq->egr_update++; 2504 } else 2505 wakeup_one(txq); /* txq is going away, wakeup free_txq */ 2506 TXQ_UNLOCK(txq); 2507 2508 return (0); 2509} 2510