t4_sge.c revision 346861
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/dev/cxgbe/t4_sge.c 346861 2019-04-29 00:22:34Z np $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33 34#include <sys/types.h> 35#include <sys/eventhandler.h> 36#include <sys/mbuf.h> 37#include <sys/socket.h> 38#include <sys/kernel.h> 39#include <sys/malloc.h> 40#include <sys/queue.h> 41#include <sys/sbuf.h> 42#include <sys/taskqueue.h> 43#include <sys/time.h> 44#include <sys/sglist.h> 45#include <sys/sysctl.h> 46#include <sys/smp.h> 47#include <sys/counter.h> 48#include <net/bpf.h> 49#include <net/ethernet.h> 50#include <net/if.h> 51#include <net/if_vlan_var.h> 52#include <netinet/in.h> 53#include <netinet/ip.h> 54#include <netinet/ip6.h> 55#include <netinet/tcp.h> 56#include <machine/in_cksum.h> 57#include <machine/md_var.h> 58#include <vm/vm.h> 59#include <vm/pmap.h> 60#ifdef DEV_NETMAP 61#include <machine/bus.h> 62#include <sys/selinfo.h> 63#include <net/if_var.h> 64#include <net/netmap.h> 65#include <dev/netmap/netmap_kern.h> 66#endif 67 68#include "common/common.h" 69#include "common/t4_regs.h" 70#include "common/t4_regs_values.h" 71#include "common/t4_msg.h" 72#include "t4_l2t.h" 73#include "t4_mp_ring.h" 74 75#ifdef T4_PKT_TIMESTAMP 76#define RX_COPY_THRESHOLD (MINCLSIZE - 8) 77#else 78#define RX_COPY_THRESHOLD MINCLSIZE 79#endif 80 81/* 82 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 83 * 0-7 are valid values. 84 */ 85static int fl_pktshift = 2; 86SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 87 "payload DMA offset in rx buffer (bytes)"); 88 89/* 90 * Pad ethernet payload up to this boundary. 91 * -1: driver should figure out a good value. 92 * 0: disable padding. 93 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 94 */ 95int fl_pad = -1; 96SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 97 "payload pad boundary (bytes)"); 98 99/* 100 * Status page length. 101 * -1: driver should figure out a good value. 102 * 64 or 128 are the only other valid values. 103 */ 104static int spg_len = -1; 105SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 106 "status page size (bytes)"); 107 108/* 109 * Congestion drops. 110 * -1: no congestion feedback (not recommended). 111 * 0: backpressure the channel instead of dropping packets right away. 112 * 1: no backpressure, drop packets for the congested queue immediately. 113 */ 114static int cong_drop = 0; 115SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 116 "Congestion control for RX queues (0 = backpressure, 1 = drop"); 117 118/* 119 * Deliver multiple frames in the same free list buffer if they fit. 120 * -1: let the driver decide whether to enable buffer packing or not. 121 * 0: disable buffer packing. 122 * 1: enable buffer packing. 123 */ 124static int buffer_packing = -1; 125SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 126 0, "Enable buffer packing"); 127 128/* 129 * Start next frame in a packed buffer at this boundary. 130 * -1: driver should figure out a good value. 131 * T4: driver will ignore this and use the same value as fl_pad above. 132 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 133 */ 134static int fl_pack = -1; 135SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 136 "payload pack boundary (bytes)"); 137 138/* 139 * Allow the driver to create mbuf(s) in a cluster allocated for rx. 140 * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 141 * 1: ok to create mbuf(s) within a cluster if there is room. 142 */ 143static int allow_mbufs_in_cluster = 1; 144SYSCTL_INT(_hw_cxgbe, OID_AUTO, allow_mbufs_in_cluster, CTLFLAG_RDTUN, 145 &allow_mbufs_in_cluster, 0, 146 "Allow driver to create mbufs within a rx cluster"); 147 148/* 149 * Largest rx cluster size that the driver is allowed to allocate. 150 */ 151static int largest_rx_cluster = MJUM16BYTES; 152SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 153 &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 154 155/* 156 * Size of cluster allocation that's most likely to succeed. The driver will 157 * fall back to this size if it fails to allocate clusters larger than this. 158 */ 159static int safest_rx_cluster = PAGE_SIZE; 160SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 161 &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 162 163/* 164 * The interrupt holdoff timers are multiplied by this value on T6+. 165 * 1 and 3-17 (both inclusive) are legal values. 166 */ 167static int tscale = 1; 168SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 169 "Interrupt holdoff timer scale on T6+"); 170 171/* 172 * Number of LRO entries in the lro_ctrl structure per rx queue. 173 */ 174static int lro_entries = TCP_LRO_ENTRIES; 175SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 176 "Number of LRO entries per RX queue"); 177 178/* 179 * This enables presorting of frames before they're fed into tcp_lro_rx. 180 */ 181static int lro_mbufs = 0; 182SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 183 "Enable presorting of LRO frames"); 184 185struct txpkts { 186 u_int wr_type; /* type 0 or type 1 */ 187 u_int npkt; /* # of packets in this work request */ 188 u_int plen; /* total payload (sum of all packets) */ 189 u_int len16; /* # of 16B pieces used by this work request */ 190}; 191 192/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 193struct sgl { 194 struct sglist sg; 195 struct sglist_seg seg[TX_SGL_SEGS]; 196}; 197 198static int service_iq(struct sge_iq *, int); 199static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 200static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 201static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 202static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 203static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 204 uint16_t, char *); 205static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 206 bus_addr_t *, void **); 207static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 208 void *); 209static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 210 int, int); 211static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 212static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 213 struct sge_iq *); 214static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 215 struct sysctl_oid *, struct sge_fl *); 216static int alloc_fwq(struct adapter *); 217static int free_fwq(struct adapter *); 218static int alloc_mgmtq(struct adapter *); 219static int free_mgmtq(struct adapter *); 220static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 221 struct sysctl_oid *); 222static int free_rxq(struct vi_info *, struct sge_rxq *); 223#ifdef TCP_OFFLOAD 224static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 225 struct sysctl_oid *); 226static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 227#endif 228#ifdef DEV_NETMAP 229static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, 230 struct sysctl_oid *); 231static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 232static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, 233 struct sysctl_oid *); 234static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 235#endif 236static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 237static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 238#ifdef TCP_OFFLOAD 239static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 240#endif 241static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 242static int free_eq(struct adapter *, struct sge_eq *); 243static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 244 struct sysctl_oid *); 245static int free_wrq(struct adapter *, struct sge_wrq *); 246static int alloc_txq(struct vi_info *, struct sge_txq *, int, 247 struct sysctl_oid *); 248static int free_txq(struct vi_info *, struct sge_txq *); 249static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 250static inline void ring_fl_db(struct adapter *, struct sge_fl *); 251static int refill_fl(struct adapter *, struct sge_fl *, int); 252static void refill_sfl(void *); 253static int alloc_fl_sdesc(struct sge_fl *); 254static void free_fl_sdesc(struct adapter *, struct sge_fl *); 255static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 256static void find_safe_refill_source(struct adapter *, struct sge_fl *); 257static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 258 259static inline void get_pkt_gl(struct mbuf *, struct sglist *); 260static inline u_int txpkt_len16(u_int, u_int); 261static inline u_int txpkt_vm_len16(u_int, u_int); 262static inline u_int txpkts0_len16(u_int); 263static inline u_int txpkts1_len16(void); 264static u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, 265 struct mbuf *, u_int); 266static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 267 struct fw_eth_tx_pkt_vm_wr *, struct mbuf *, u_int); 268static int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); 269static int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); 270static u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, 271 struct mbuf *, const struct txpkts *, u_int); 272static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 273static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 274static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 275static inline uint16_t read_hw_cidx(struct sge_eq *); 276static inline u_int reclaimable_tx_desc(struct sge_eq *); 277static inline u_int total_available_tx_desc(struct sge_eq *); 278static u_int reclaim_tx_descs(struct sge_txq *, u_int); 279static void tx_reclaim(void *, int); 280static __be64 get_flit(struct sglist_seg *, int, int); 281static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 282 struct mbuf *); 283static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 284 struct mbuf *); 285static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 286static void wrq_tx_drain(void *, int); 287static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 288 289static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 290static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 291static int sysctl_tc(SYSCTL_HANDLER_ARGS); 292 293static counter_u64_t extfree_refs; 294static counter_u64_t extfree_rels; 295 296an_handler_t t4_an_handler; 297fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 298cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 299cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 300cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 301cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 302cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 303 304void 305t4_register_an_handler(an_handler_t h) 306{ 307 uintptr_t *loc; 308 309 MPASS(h == NULL || t4_an_handler == NULL); 310 311 loc = (uintptr_t *)&t4_an_handler; 312 atomic_store_rel_ptr(loc, (uintptr_t)h); 313} 314 315void 316t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 317{ 318 uintptr_t *loc; 319 320 MPASS(type < nitems(t4_fw_msg_handler)); 321 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 322 /* 323 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 324 * handler dispatch table. Reject any attempt to install a handler for 325 * this subtype. 326 */ 327 MPASS(type != FW_TYPE_RSSCPL); 328 MPASS(type != FW6_TYPE_RSSCPL); 329 330 loc = (uintptr_t *)&t4_fw_msg_handler[type]; 331 atomic_store_rel_ptr(loc, (uintptr_t)h); 332} 333 334void 335t4_register_cpl_handler(int opcode, cpl_handler_t h) 336{ 337 uintptr_t *loc; 338 339 MPASS(opcode < nitems(t4_cpl_handler)); 340 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 341 342 loc = (uintptr_t *)&t4_cpl_handler[opcode]; 343 atomic_store_rel_ptr(loc, (uintptr_t)h); 344} 345 346static int 347set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 348 struct mbuf *m) 349{ 350 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 351 u_int tid; 352 int cookie; 353 354 MPASS(m == NULL); 355 356 tid = GET_TID(cpl); 357 if (is_ftid(iq->adapter, tid)) { 358 /* 359 * The return code for filter-write is put in the CPL cookie so 360 * we have to rely on the hardware tid (is_ftid) to determine 361 * that this is a response to a filter. 362 */ 363 cookie = CPL_COOKIE_FILTER; 364 } else { 365 cookie = G_COOKIE(cpl->cookie); 366 } 367 MPASS(cookie > CPL_COOKIE_RESERVED); 368 MPASS(cookie < nitems(set_tcb_rpl_handlers)); 369 370 return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 371} 372 373static int 374l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 375 struct mbuf *m) 376{ 377 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 378 unsigned int cookie; 379 380 MPASS(m == NULL); 381 382 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 383 return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 384} 385 386static int 387act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 388 struct mbuf *m) 389{ 390 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 391 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 392 393 MPASS(m == NULL); 394 MPASS(cookie != CPL_COOKIE_RESERVED); 395 396 return (act_open_rpl_handlers[cookie](iq, rss, m)); 397} 398 399static int 400abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 401 struct mbuf *m) 402{ 403 struct adapter *sc = iq->adapter; 404 u_int cookie; 405 406 MPASS(m == NULL); 407 if (is_hashfilter(sc)) 408 cookie = CPL_COOKIE_HASHFILTER; 409 else 410 cookie = CPL_COOKIE_TOM; 411 412 return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 413} 414 415static void 416t4_init_shared_cpl_handlers(void) 417{ 418 419 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 420 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 421 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 422 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 423} 424 425void 426t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 427{ 428 uintptr_t *loc; 429 430 MPASS(opcode < nitems(t4_cpl_handler)); 431 MPASS(cookie > CPL_COOKIE_RESERVED); 432 MPASS(cookie < NUM_CPL_COOKIES); 433 MPASS(t4_cpl_handler[opcode] != NULL); 434 435 switch (opcode) { 436 case CPL_SET_TCB_RPL: 437 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 438 break; 439 case CPL_L2T_WRITE_RPL: 440 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 441 break; 442 case CPL_ACT_OPEN_RPL: 443 loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 444 break; 445 case CPL_ABORT_RPL_RSS: 446 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 447 break; 448 default: 449 MPASS(0); 450 return; 451 } 452 MPASS(h == NULL || *loc == (uintptr_t)NULL); 453 atomic_store_rel_ptr(loc, (uintptr_t)h); 454} 455 456/* 457 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 458 */ 459void 460t4_sge_modload(void) 461{ 462 463 if (fl_pktshift < 0 || fl_pktshift > 7) { 464 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 465 " using 2 instead.\n", fl_pktshift); 466 fl_pktshift = 2; 467 } 468 469 if (spg_len != 64 && spg_len != 128) { 470 int len; 471 472#if defined(__i386__) || defined(__amd64__) 473 len = cpu_clflush_line_size > 64 ? 128 : 64; 474#else 475 len = 64; 476#endif 477 if (spg_len != -1) { 478 printf("Invalid hw.cxgbe.spg_len value (%d)," 479 " using %d instead.\n", spg_len, len); 480 } 481 spg_len = len; 482 } 483 484 if (cong_drop < -1 || cong_drop > 1) { 485 printf("Invalid hw.cxgbe.cong_drop value (%d)," 486 " using 0 instead.\n", cong_drop); 487 cong_drop = 0; 488 } 489 490 if (tscale != 1 && (tscale < 3 || tscale > 17)) { 491 printf("Invalid hw.cxgbe.tscale value (%d)," 492 " using 1 instead.\n", tscale); 493 tscale = 1; 494 } 495 496 extfree_refs = counter_u64_alloc(M_WAITOK); 497 extfree_rels = counter_u64_alloc(M_WAITOK); 498 counter_u64_zero(extfree_refs); 499 counter_u64_zero(extfree_rels); 500 501 t4_init_shared_cpl_handlers(); 502 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 503 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 504 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 505 t4_register_cpl_handler(CPL_RX_PKT, t4_eth_rx); 506 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 507 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 508} 509 510void 511t4_sge_modunload(void) 512{ 513 514 counter_u64_free(extfree_refs); 515 counter_u64_free(extfree_rels); 516} 517 518uint64_t 519t4_sge_extfree_refs(void) 520{ 521 uint64_t refs, rels; 522 523 rels = counter_u64_fetch(extfree_rels); 524 refs = counter_u64_fetch(extfree_refs); 525 526 return (refs - rels); 527} 528 529static inline void 530setup_pad_and_pack_boundaries(struct adapter *sc) 531{ 532 uint32_t v, m; 533 int pad, pack, pad_shift; 534 535 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 536 X_INGPADBOUNDARY_SHIFT; 537 pad = fl_pad; 538 if (fl_pad < (1 << pad_shift) || 539 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 540 !powerof2(fl_pad)) { 541 /* 542 * If there is any chance that we might use buffer packing and 543 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 544 * it to the minimum allowed in all other cases. 545 */ 546 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 547 548 /* 549 * For fl_pad = 0 we'll still write a reasonable value to the 550 * register but all the freelists will opt out of padding. 551 * We'll complain here only if the user tried to set it to a 552 * value greater than 0 that was invalid. 553 */ 554 if (fl_pad > 0) { 555 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 556 " (%d), using %d instead.\n", fl_pad, pad); 557 } 558 } 559 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 560 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 561 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 562 563 if (is_t4(sc)) { 564 if (fl_pack != -1 && fl_pack != pad) { 565 /* Complain but carry on. */ 566 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 567 " using %d instead.\n", fl_pack, pad); 568 } 569 return; 570 } 571 572 pack = fl_pack; 573 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 574 !powerof2(fl_pack)) { 575 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 576 MPASS(powerof2(pack)); 577 if (pack < 16) 578 pack = 16; 579 if (pack == 32) 580 pack = 64; 581 if (pack > 4096) 582 pack = 4096; 583 if (fl_pack != -1) { 584 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 585 " (%d), using %d instead.\n", fl_pack, pack); 586 } 587 } 588 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 589 if (pack == 16) 590 v = V_INGPACKBOUNDARY(0); 591 else 592 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 593 594 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 595 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 596} 597 598/* 599 * adap->params.vpd.cclk must be set up before this is called. 600 */ 601void 602t4_tweak_chip_settings(struct adapter *sc) 603{ 604 int i; 605 uint32_t v, m; 606 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 607 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 608 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 609 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 610 static int sge_flbuf_sizes[] = { 611 MCLBYTES, 612#if MJUMPAGESIZE != MCLBYTES 613 MJUMPAGESIZE, 614 MJUMPAGESIZE - CL_METADATA_SIZE, 615 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 616#endif 617 MJUM9BYTES, 618 MJUM16BYTES, 619 MCLBYTES - MSIZE - CL_METADATA_SIZE, 620 MJUM9BYTES - CL_METADATA_SIZE, 621 MJUM16BYTES - CL_METADATA_SIZE, 622 }; 623 624 KASSERT(sc->flags & MASTER_PF, 625 ("%s: trying to change chip settings when not master.", __func__)); 626 627 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 628 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 629 V_EGRSTATUSPAGESIZE(spg_len == 128); 630 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 631 632 setup_pad_and_pack_boundaries(sc); 633 634 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 635 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 636 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 637 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 638 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 639 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 640 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 641 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 642 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 643 644 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 645 ("%s: hw buffer size table too big", __func__)); 646 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 647 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 648 sge_flbuf_sizes[i]); 649 } 650 651 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 652 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 653 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 654 655 KASSERT(intr_timer[0] <= timer_max, 656 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 657 timer_max)); 658 for (i = 1; i < nitems(intr_timer); i++) { 659 KASSERT(intr_timer[i] >= intr_timer[i - 1], 660 ("%s: timers not listed in increasing order (%d)", 661 __func__, i)); 662 663 while (intr_timer[i] > timer_max) { 664 if (i == nitems(intr_timer) - 1) { 665 intr_timer[i] = timer_max; 666 break; 667 } 668 intr_timer[i] += intr_timer[i - 1]; 669 intr_timer[i] /= 2; 670 } 671 } 672 673 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 674 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 675 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 676 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 677 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 678 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 679 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 680 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 681 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 682 683 if (chip_id(sc) >= CHELSIO_T6) { 684 m = V_TSCALE(M_TSCALE); 685 if (tscale == 1) 686 v = 0; 687 else 688 v = V_TSCALE(tscale - 2); 689 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 690 691 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 692 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 693 V_WRTHRTHRESH(M_WRTHRTHRESH); 694 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 695 v &= ~m; 696 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 697 V_WRTHRTHRESH(16); 698 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 699 } 700 } 701 702 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 703 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 704 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 705 706 m = v = F_TDDPTAGTCB; 707 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 708 709 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 710 F_RESETDDPOFFSET; 711 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 712 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 713} 714 715/* 716 * SGE wants the buffer to be at least 64B and then a multiple of 16. If 717 * padding is in use, the buffer's start and end need to be aligned to the pad 718 * boundary as well. We'll just make sure that the size is a multiple of the 719 * boundary here, it is up to the buffer allocation code to make sure the start 720 * of the buffer is aligned as well. 721 */ 722static inline int 723hwsz_ok(struct adapter *sc, int hwsz) 724{ 725 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 726 727 return (hwsz >= 64 && (hwsz & mask) == 0); 728} 729 730/* 731 * XXX: driver really should be able to deal with unexpected settings. 732 */ 733int 734t4_read_chip_settings(struct adapter *sc) 735{ 736 struct sge *s = &sc->sge; 737 struct sge_params *sp = &sc->params.sge; 738 int i, j, n, rc = 0; 739 uint32_t m, v, r; 740 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 741 static int sw_buf_sizes[] = { /* Sorted by size */ 742 MCLBYTES, 743#if MJUMPAGESIZE != MCLBYTES 744 MJUMPAGESIZE, 745#endif 746 MJUM9BYTES, 747 MJUM16BYTES 748 }; 749 struct sw_zone_info *swz, *safe_swz; 750 struct hw_buf_info *hwb; 751 752 m = F_RXPKTCPLMODE; 753 v = F_RXPKTCPLMODE; 754 r = sc->params.sge.sge_control; 755 if ((r & m) != v) { 756 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 757 rc = EINVAL; 758 } 759 760 /* 761 * If this changes then every single use of PAGE_SHIFT in the driver 762 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 763 */ 764 if (sp->page_shift != PAGE_SHIFT) { 765 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 766 rc = EINVAL; 767 } 768 769 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 770 hwb = &s->hw_buf_info[0]; 771 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 772 r = sc->params.sge.sge_fl_buffer_size[i]; 773 hwb->size = r; 774 hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 775 hwb->next = -1; 776 } 777 778 /* 779 * Create a sorted list in decreasing order of hw buffer sizes (and so 780 * increasing order of spare area) for each software zone. 781 * 782 * If padding is enabled then the start and end of the buffer must align 783 * to the pad boundary; if packing is enabled then they must align with 784 * the pack boundary as well. Allocations from the cluster zones are 785 * aligned to min(size, 4K), so the buffer starts at that alignment and 786 * ends at hwb->size alignment. If mbuf inlining is allowed the 787 * starting alignment will be reduced to MSIZE and the driver will 788 * exercise appropriate caution when deciding on the best buffer layout 789 * to use. 790 */ 791 n = 0; /* no usable buffer size to begin with */ 792 swz = &s->sw_zone_info[0]; 793 safe_swz = NULL; 794 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 795 int8_t head = -1, tail = -1; 796 797 swz->size = sw_buf_sizes[i]; 798 swz->zone = m_getzone(swz->size); 799 swz->type = m_gettype(swz->size); 800 801 if (swz->size < PAGE_SIZE) { 802 MPASS(powerof2(swz->size)); 803 if (fl_pad && (swz->size % sp->pad_boundary != 0)) 804 continue; 805 } 806 807 if (swz->size == safest_rx_cluster) 808 safe_swz = swz; 809 810 hwb = &s->hw_buf_info[0]; 811 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 812 if (hwb->zidx != -1 || hwb->size > swz->size) 813 continue; 814#ifdef INVARIANTS 815 if (fl_pad) 816 MPASS(hwb->size % sp->pad_boundary == 0); 817#endif 818 hwb->zidx = i; 819 if (head == -1) 820 head = tail = j; 821 else if (hwb->size < s->hw_buf_info[tail].size) { 822 s->hw_buf_info[tail].next = j; 823 tail = j; 824 } else { 825 int8_t *cur; 826 struct hw_buf_info *t; 827 828 for (cur = &head; *cur != -1; cur = &t->next) { 829 t = &s->hw_buf_info[*cur]; 830 if (hwb->size == t->size) { 831 hwb->zidx = -2; 832 break; 833 } 834 if (hwb->size > t->size) { 835 hwb->next = *cur; 836 *cur = j; 837 break; 838 } 839 } 840 } 841 } 842 swz->head_hwidx = head; 843 swz->tail_hwidx = tail; 844 845 if (tail != -1) { 846 n++; 847 if (swz->size - s->hw_buf_info[tail].size >= 848 CL_METADATA_SIZE) 849 sc->flags |= BUF_PACKING_OK; 850 } 851 } 852 if (n == 0) { 853 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 854 rc = EINVAL; 855 } 856 857 s->safe_hwidx1 = -1; 858 s->safe_hwidx2 = -1; 859 if (safe_swz != NULL) { 860 s->safe_hwidx1 = safe_swz->head_hwidx; 861 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 862 int spare; 863 864 hwb = &s->hw_buf_info[i]; 865#ifdef INVARIANTS 866 if (fl_pad) 867 MPASS(hwb->size % sp->pad_boundary == 0); 868#endif 869 spare = safe_swz->size - hwb->size; 870 if (spare >= CL_METADATA_SIZE) { 871 s->safe_hwidx2 = i; 872 break; 873 } 874 } 875 } 876 877 if (sc->flags & IS_VF) 878 return (0); 879 880 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 881 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 882 if (r != v) { 883 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 884 rc = EINVAL; 885 } 886 887 m = v = F_TDDPTAGTCB; 888 r = t4_read_reg(sc, A_ULP_RX_CTL); 889 if ((r & m) != v) { 890 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 891 rc = EINVAL; 892 } 893 894 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 895 F_RESETDDPOFFSET; 896 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 897 r = t4_read_reg(sc, A_TP_PARA_REG5); 898 if ((r & m) != v) { 899 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 900 rc = EINVAL; 901 } 902 903 t4_init_tp_params(sc, 1); 904 905 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 906 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 907 908 return (rc); 909} 910 911int 912t4_create_dma_tag(struct adapter *sc) 913{ 914 int rc; 915 916 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 917 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 918 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 919 NULL, &sc->dmat); 920 if (rc != 0) { 921 device_printf(sc->dev, 922 "failed to create main DMA tag: %d\n", rc); 923 } 924 925 return (rc); 926} 927 928void 929t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 930 struct sysctl_oid_list *children) 931{ 932 struct sge_params *sp = &sc->params.sge; 933 934 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 935 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 936 "freelist buffer sizes"); 937 938 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 939 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 940 941 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 942 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 943 944 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 945 NULL, sp->spg_len, "status page size (bytes)"); 946 947 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 948 NULL, cong_drop, "congestion drop setting"); 949 950 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 951 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 952} 953 954int 955t4_destroy_dma_tag(struct adapter *sc) 956{ 957 if (sc->dmat) 958 bus_dma_tag_destroy(sc->dmat); 959 960 return (0); 961} 962 963/* 964 * Allocate and initialize the firmware event queue and the management queue. 965 * 966 * Returns errno on failure. Resources allocated up to that point may still be 967 * allocated. Caller is responsible for cleanup in case this function fails. 968 */ 969int 970t4_setup_adapter_queues(struct adapter *sc) 971{ 972 int rc; 973 974 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 975 976 sysctl_ctx_init(&sc->ctx); 977 sc->flags |= ADAP_SYSCTL_CTX; 978 979 /* 980 * Firmware event queue 981 */ 982 rc = alloc_fwq(sc); 983 if (rc != 0) 984 return (rc); 985 986 /* 987 * Management queue. This is just a control queue that uses the fwq as 988 * its associated iq. 989 */ 990 if (!(sc->flags & IS_VF)) 991 rc = alloc_mgmtq(sc); 992 993 return (rc); 994} 995 996/* 997 * Idempotent 998 */ 999int 1000t4_teardown_adapter_queues(struct adapter *sc) 1001{ 1002 1003 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1004 1005 /* Do this before freeing the queue */ 1006 if (sc->flags & ADAP_SYSCTL_CTX) { 1007 sysctl_ctx_free(&sc->ctx); 1008 sc->flags &= ~ADAP_SYSCTL_CTX; 1009 } 1010 1011 free_mgmtq(sc); 1012 free_fwq(sc); 1013 1014 return (0); 1015} 1016 1017/* Maximum payload that can be delivered with a single iq descriptor */ 1018static inline int 1019mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 1020{ 1021 int payload; 1022 1023#ifdef TCP_OFFLOAD 1024 if (toe) { 1025 int rxcs = G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)); 1026 1027 /* Note that COP can set rx_coalesce on/off per connection. */ 1028 payload = max(mtu, rxcs); 1029 } else { 1030#endif 1031 /* large enough even when hw VLAN extraction is disabled */ 1032 payload = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 1033 ETHER_VLAN_ENCAP_LEN + mtu; 1034#ifdef TCP_OFFLOAD 1035 } 1036#endif 1037 1038 return (payload); 1039} 1040 1041int 1042t4_setup_vi_queues(struct vi_info *vi) 1043{ 1044 int rc = 0, i, intr_idx, iqidx; 1045 struct sge_rxq *rxq; 1046 struct sge_txq *txq; 1047 struct sge_wrq *ctrlq; 1048#ifdef TCP_OFFLOAD 1049 struct sge_ofld_rxq *ofld_rxq; 1050 struct sge_wrq *ofld_txq; 1051#endif 1052#ifdef DEV_NETMAP 1053 int saved_idx; 1054 struct sge_nm_rxq *nm_rxq; 1055 struct sge_nm_txq *nm_txq; 1056#endif 1057 char name[16]; 1058 struct port_info *pi = vi->pi; 1059 struct adapter *sc = pi->adapter; 1060 struct ifnet *ifp = vi->ifp; 1061 struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1062 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 1063 int maxp, mtu = ifp->if_mtu; 1064 1065 /* Interrupt vector to start from (when using multiple vectors) */ 1066 intr_idx = vi->first_intr; 1067 1068#ifdef DEV_NETMAP 1069 saved_idx = intr_idx; 1070 if (ifp->if_capabilities & IFCAP_NETMAP) { 1071 1072 /* netmap is supported with direct interrupts only. */ 1073 MPASS(!forwarding_intr_to_fwq(sc)); 1074 1075 /* 1076 * We don't have buffers to back the netmap rx queues 1077 * right now so we create the queues in a way that 1078 * doesn't set off any congestion signal in the chip. 1079 */ 1080 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 1081 CTLFLAG_RD, NULL, "rx queues"); 1082 for_each_nm_rxq(vi, i, nm_rxq) { 1083 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1084 if (rc != 0) 1085 goto done; 1086 intr_idx++; 1087 } 1088 1089 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 1090 CTLFLAG_RD, NULL, "tx queues"); 1091 for_each_nm_txq(vi, i, nm_txq) { 1092 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1093 rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1094 if (rc != 0) 1095 goto done; 1096 } 1097 } 1098 1099 /* Normal rx queues and netmap rx queues share the same interrupts. */ 1100 intr_idx = saved_idx; 1101#endif 1102 1103 /* 1104 * Allocate rx queues first because a default iqid is required when 1105 * creating a tx queue. 1106 */ 1107 maxp = mtu_to_max_payload(sc, mtu, 0); 1108 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 1109 CTLFLAG_RD, NULL, "rx queues"); 1110 for_each_rxq(vi, i, rxq) { 1111 1112 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 1113 1114 snprintf(name, sizeof(name), "%s rxq%d-fl", 1115 device_get_nameunit(vi->dev), i); 1116 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 1117 1118 rc = alloc_rxq(vi, rxq, 1119 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1120 if (rc != 0) 1121 goto done; 1122 intr_idx++; 1123 } 1124#ifdef DEV_NETMAP 1125 if (ifp->if_capabilities & IFCAP_NETMAP) 1126 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 1127#endif 1128#ifdef TCP_OFFLOAD 1129 maxp = mtu_to_max_payload(sc, mtu, 1); 1130 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 1131 CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); 1132 for_each_ofld_rxq(vi, i, ofld_rxq) { 1133 1134 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1135 vi->qsize_rxq); 1136 1137 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1138 device_get_nameunit(vi->dev), i); 1139 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1140 1141 rc = alloc_ofld_rxq(vi, ofld_rxq, 1142 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1143 if (rc != 0) 1144 goto done; 1145 intr_idx++; 1146 } 1147#endif 1148 1149 /* 1150 * Now the tx queues. 1151 */ 1152 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1153 NULL, "tx queues"); 1154 for_each_txq(vi, i, txq) { 1155 iqidx = vi->first_rxq + (i % vi->nrxq); 1156 snprintf(name, sizeof(name), "%s txq%d", 1157 device_get_nameunit(vi->dev), i); 1158 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1159 sc->sge.rxq[iqidx].iq.cntxt_id, name); 1160 1161 rc = alloc_txq(vi, txq, i, oid); 1162 if (rc != 0) 1163 goto done; 1164 } 1165#ifdef TCP_OFFLOAD 1166 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 1167 CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1168 for_each_ofld_txq(vi, i, ofld_txq) { 1169 struct sysctl_oid *oid2; 1170 1171 iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1172 snprintf(name, sizeof(name), "%s ofld_txq%d", 1173 device_get_nameunit(vi->dev), i); 1174 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, pi->tx_chan, 1175 sc->sge.ofld_rxq[iqidx].iq.cntxt_id, name); 1176 1177 snprintf(name, sizeof(name), "%d", i); 1178 oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1179 name, CTLFLAG_RD, NULL, "offload tx queue"); 1180 1181 rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1182 if (rc != 0) 1183 goto done; 1184 } 1185#endif 1186 1187 /* 1188 * Finally, the control queue. 1189 */ 1190 if (!IS_MAIN_VI(vi) || sc->flags & IS_VF) 1191 goto done; 1192 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1193 NULL, "ctrl queue"); 1194 ctrlq = &sc->sge.ctrlq[pi->port_id]; 1195 snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(vi->dev)); 1196 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, 1197 sc->sge.rxq[vi->first_rxq].iq.cntxt_id, name); 1198 rc = alloc_wrq(sc, vi, ctrlq, oid); 1199 1200done: 1201 if (rc) 1202 t4_teardown_vi_queues(vi); 1203 1204 return (rc); 1205} 1206 1207/* 1208 * Idempotent 1209 */ 1210int 1211t4_teardown_vi_queues(struct vi_info *vi) 1212{ 1213 int i; 1214 struct port_info *pi = vi->pi; 1215 struct adapter *sc = pi->adapter; 1216 struct sge_rxq *rxq; 1217 struct sge_txq *txq; 1218#ifdef TCP_OFFLOAD 1219 struct sge_ofld_rxq *ofld_rxq; 1220 struct sge_wrq *ofld_txq; 1221#endif 1222#ifdef DEV_NETMAP 1223 struct sge_nm_rxq *nm_rxq; 1224 struct sge_nm_txq *nm_txq; 1225#endif 1226 1227 /* Do this before freeing the queues */ 1228 if (vi->flags & VI_SYSCTL_CTX) { 1229 sysctl_ctx_free(&vi->ctx); 1230 vi->flags &= ~VI_SYSCTL_CTX; 1231 } 1232 1233#ifdef DEV_NETMAP 1234 if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1235 for_each_nm_txq(vi, i, nm_txq) { 1236 free_nm_txq(vi, nm_txq); 1237 } 1238 1239 for_each_nm_rxq(vi, i, nm_rxq) { 1240 free_nm_rxq(vi, nm_rxq); 1241 } 1242 } 1243#endif 1244 1245 /* 1246 * Take down all the tx queues first, as they reference the rx queues 1247 * (for egress updates, etc.). 1248 */ 1249 1250 if (IS_MAIN_VI(vi) && !(sc->flags & IS_VF)) 1251 free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1252 1253 for_each_txq(vi, i, txq) { 1254 free_txq(vi, txq); 1255 } 1256#ifdef TCP_OFFLOAD 1257 for_each_ofld_txq(vi, i, ofld_txq) { 1258 free_wrq(sc, ofld_txq); 1259 } 1260#endif 1261 1262 /* 1263 * Then take down the rx queues. 1264 */ 1265 1266 for_each_rxq(vi, i, rxq) { 1267 free_rxq(vi, rxq); 1268 } 1269#ifdef TCP_OFFLOAD 1270 for_each_ofld_rxq(vi, i, ofld_rxq) { 1271 free_ofld_rxq(vi, ofld_rxq); 1272 } 1273#endif 1274 1275 return (0); 1276} 1277 1278/* 1279 * Deals with errors and the firmware event queue. All data rx queues forward 1280 * their interrupt to the firmware event queue. 1281 */ 1282void 1283t4_intr_all(void *arg) 1284{ 1285 struct adapter *sc = arg; 1286 struct sge_iq *fwq = &sc->sge.fwq; 1287 1288 t4_intr_err(arg); 1289 if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1290 service_iq(fwq, 0); 1291 atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1292 } 1293} 1294 1295/* Deals with error interrupts */ 1296void 1297t4_intr_err(void *arg) 1298{ 1299 struct adapter *sc = arg; 1300 1301 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1302 t4_slow_intr_handler(sc); 1303} 1304 1305void 1306t4_intr_evt(void *arg) 1307{ 1308 struct sge_iq *iq = arg; 1309 1310 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1311 service_iq(iq, 0); 1312 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1313 } 1314} 1315 1316void 1317t4_intr(void *arg) 1318{ 1319 struct sge_iq *iq = arg; 1320 1321 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1322 service_iq(iq, 0); 1323 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1324 } 1325} 1326 1327void 1328t4_vi_intr(void *arg) 1329{ 1330 struct irq *irq = arg; 1331 1332#ifdef DEV_NETMAP 1333 if (atomic_cmpset_int(&irq->nm_state, NM_ON, NM_BUSY)) { 1334 t4_nm_intr(irq->nm_rxq); 1335 atomic_cmpset_int(&irq->nm_state, NM_BUSY, NM_ON); 1336 } 1337#endif 1338 if (irq->rxq != NULL) 1339 t4_intr(irq->rxq); 1340} 1341 1342static inline int 1343sort_before_lro(struct lro_ctrl *lro) 1344{ 1345 1346 return (lro->lro_mbuf_max != 0); 1347} 1348 1349/* 1350 * Deals with anything and everything on the given ingress queue. 1351 */ 1352static int 1353service_iq(struct sge_iq *iq, int budget) 1354{ 1355 struct sge_iq *q; 1356 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1357 struct sge_fl *fl; /* Use iff IQ_HAS_FL */ 1358 struct adapter *sc = iq->adapter; 1359 struct iq_desc *d = &iq->desc[iq->cidx]; 1360 int ndescs = 0, limit; 1361 int rsp_type, refill; 1362 uint32_t lq; 1363 uint16_t fl_hw_cidx; 1364 struct mbuf *m0; 1365 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1366#if defined(INET) || defined(INET6) 1367 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1368 struct lro_ctrl *lro = &rxq->lro; 1369#endif 1370 1371 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1372 1373 limit = budget ? budget : iq->qsize / 16; 1374 1375 if (iq->flags & IQ_HAS_FL) { 1376 fl = &rxq->fl; 1377 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1378 } else { 1379 fl = NULL; 1380 fl_hw_cidx = 0; /* to silence gcc warning */ 1381 } 1382 1383#if defined(INET) || defined(INET6) 1384 if (iq->flags & IQ_ADJ_CREDIT) { 1385 MPASS(sort_before_lro(lro)); 1386 iq->flags &= ~IQ_ADJ_CREDIT; 1387 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 1388 tcp_lro_flush_all(lro); 1389 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 1390 V_INGRESSQID((u32)iq->cntxt_id) | 1391 V_SEINTARM(iq->intr_params)); 1392 return (0); 1393 } 1394 ndescs = 1; 1395 } 1396#else 1397 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1398#endif 1399 1400 /* 1401 * We always come back and check the descriptor ring for new indirect 1402 * interrupts and other responses after running a single handler. 1403 */ 1404 for (;;) { 1405 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1406 1407 rmb(); 1408 1409 refill = 0; 1410 m0 = NULL; 1411 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1412 lq = be32toh(d->rsp.pldbuflen_qid); 1413 1414 switch (rsp_type) { 1415 case X_RSPD_TYPE_FLBUF: 1416 1417 KASSERT(iq->flags & IQ_HAS_FL, 1418 ("%s: data for an iq (%p) with no freelist", 1419 __func__, iq)); 1420 1421 m0 = get_fl_payload(sc, fl, lq); 1422 if (__predict_false(m0 == NULL)) 1423 goto process_iql; 1424 refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1425#ifdef T4_PKT_TIMESTAMP 1426 /* 1427 * 60 bit timestamp for the payload is 1428 * *(uint64_t *)m0->m_pktdat. Note that it is 1429 * in the leading free-space in the mbuf. The 1430 * kernel can clobber it during a pullup, 1431 * m_copymdata, etc. You need to make sure that 1432 * the mbuf reaches you unmolested if you care 1433 * about the timestamp. 1434 */ 1435 *(uint64_t *)m0->m_pktdat = 1436 be64toh(ctrl->u.last_flit) & 1437 0xfffffffffffffff; 1438#endif 1439 1440 /* fall through */ 1441 1442 case X_RSPD_TYPE_CPL: 1443 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1444 ("%s: bad opcode %02x.", __func__, 1445 d->rss.opcode)); 1446 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1447 break; 1448 1449 case X_RSPD_TYPE_INTR: 1450 1451 /* 1452 * Interrupts should be forwarded only to queues 1453 * that are not forwarding their interrupts. 1454 * This means service_iq can recurse but only 1 1455 * level deep. 1456 */ 1457 KASSERT(budget == 0, 1458 ("%s: budget %u, rsp_type %u", __func__, 1459 budget, rsp_type)); 1460 1461 /* 1462 * There are 1K interrupt-capable queues (qids 0 1463 * through 1023). A response type indicating a 1464 * forwarded interrupt with a qid >= 1K is an 1465 * iWARP async notification. 1466 */ 1467 if (lq >= 1024) { 1468 t4_an_handler(iq, &d->rsp); 1469 break; 1470 } 1471 1472 q = sc->sge.iqmap[lq - sc->sge.iq_start - 1473 sc->sge.iq_base]; 1474 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1475 IQS_BUSY)) { 1476 if (service_iq(q, q->qsize / 16) == 0) { 1477 atomic_cmpset_int(&q->state, 1478 IQS_BUSY, IQS_IDLE); 1479 } else { 1480 STAILQ_INSERT_TAIL(&iql, q, 1481 link); 1482 } 1483 } 1484 break; 1485 1486 default: 1487 KASSERT(0, 1488 ("%s: illegal response type %d on iq %p", 1489 __func__, rsp_type, iq)); 1490 log(LOG_ERR, 1491 "%s: illegal response type %d on iq %p", 1492 device_get_nameunit(sc->dev), rsp_type, iq); 1493 break; 1494 } 1495 1496 d++; 1497 if (__predict_false(++iq->cidx == iq->sidx)) { 1498 iq->cidx = 0; 1499 iq->gen ^= F_RSPD_GEN; 1500 d = &iq->desc[0]; 1501 } 1502 if (__predict_false(++ndescs == limit)) { 1503 t4_write_reg(sc, sc->sge_gts_reg, 1504 V_CIDXINC(ndescs) | 1505 V_INGRESSQID(iq->cntxt_id) | 1506 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1507 ndescs = 0; 1508 1509#if defined(INET) || defined(INET6) 1510 if (iq->flags & IQ_LRO_ENABLED && 1511 !sort_before_lro(lro) && 1512 sc->lro_timeout != 0) { 1513 tcp_lro_flush_inactive(lro, 1514 &lro_timeout); 1515 } 1516#endif 1517 1518 if (budget) { 1519 if (iq->flags & IQ_HAS_FL) { 1520 FL_LOCK(fl); 1521 refill_fl(sc, fl, 32); 1522 FL_UNLOCK(fl); 1523 } 1524 return (EINPROGRESS); 1525 } 1526 } 1527 if (refill) { 1528 FL_LOCK(fl); 1529 refill_fl(sc, fl, 32); 1530 FL_UNLOCK(fl); 1531 fl_hw_cidx = fl->hw_cidx; 1532 } 1533 } 1534 1535process_iql: 1536 if (STAILQ_EMPTY(&iql)) 1537 break; 1538 1539 /* 1540 * Process the head only, and send it to the back of the list if 1541 * it's still not done. 1542 */ 1543 q = STAILQ_FIRST(&iql); 1544 STAILQ_REMOVE_HEAD(&iql, link); 1545 if (service_iq(q, q->qsize / 8) == 0) 1546 atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1547 else 1548 STAILQ_INSERT_TAIL(&iql, q, link); 1549 } 1550 1551#if defined(INET) || defined(INET6) 1552 if (iq->flags & IQ_LRO_ENABLED) { 1553 if (ndescs > 0 && lro->lro_mbuf_count > 8) { 1554 MPASS(sort_before_lro(lro)); 1555 /* hold back one credit and don't flush LRO state */ 1556 iq->flags |= IQ_ADJ_CREDIT; 1557 ndescs--; 1558 } else { 1559 tcp_lro_flush_all(lro); 1560 } 1561 } 1562#endif 1563 1564 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1565 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1566 1567 if (iq->flags & IQ_HAS_FL) { 1568 int starved; 1569 1570 FL_LOCK(fl); 1571 starved = refill_fl(sc, fl, 64); 1572 FL_UNLOCK(fl); 1573 if (__predict_false(starved != 0)) 1574 add_fl_to_sfl(sc, fl); 1575 } 1576 1577 return (0); 1578} 1579 1580static inline int 1581cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1582{ 1583 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1584 1585 if (rc) 1586 MPASS(cll->region3 >= CL_METADATA_SIZE); 1587 1588 return (rc); 1589} 1590 1591static inline struct cluster_metadata * 1592cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1593 caddr_t cl) 1594{ 1595 1596 if (cl_has_metadata(fl, cll)) { 1597 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1598 1599 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1600 } 1601 return (NULL); 1602} 1603 1604static void 1605rxb_free(struct mbuf *m, void *arg1, void *arg2) 1606{ 1607 uma_zone_t zone = arg1; 1608 caddr_t cl = arg2; 1609 1610 uma_zfree(zone, cl); 1611 counter_u64_add(extfree_rels, 1); 1612} 1613 1614/* 1615 * The mbuf returned by this function could be allocated from zone_mbuf or 1616 * constructed in spare room in the cluster. 1617 * 1618 * The mbuf carries the payload in one of these ways 1619 * a) frame inside the mbuf (mbuf from zone_mbuf) 1620 * b) m_cljset (for clusters without metadata) zone_mbuf 1621 * c) m_extaddref (cluster with metadata) inline mbuf 1622 * d) m_extaddref (cluster with metadata) zone_mbuf 1623 */ 1624static struct mbuf * 1625get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1626 int remaining) 1627{ 1628 struct mbuf *m; 1629 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1630 struct cluster_layout *cll = &sd->cll; 1631 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1632 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1633 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1634 int len, blen; 1635 caddr_t payload; 1636 1637 blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1638 len = min(remaining, blen); 1639 payload = sd->cl + cll->region1 + fl->rx_offset; 1640 if (fl->flags & FL_BUF_PACKING) { 1641 const u_int l = fr_offset + len; 1642 const u_int pad = roundup2(l, fl->buf_boundary) - l; 1643 1644 if (fl->rx_offset + len + pad < hwb->size) 1645 blen = len + pad; 1646 MPASS(fl->rx_offset + blen <= hwb->size); 1647 } else { 1648 MPASS(fl->rx_offset == 0); /* not packing */ 1649 } 1650 1651 1652 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1653 1654 /* 1655 * Copy payload into a freshly allocated mbuf. 1656 */ 1657 1658 m = fr_offset == 0 ? 1659 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1660 if (m == NULL) 1661 return (NULL); 1662 fl->mbuf_allocated++; 1663#ifdef T4_PKT_TIMESTAMP 1664 /* Leave room for a timestamp */ 1665 m->m_data += 8; 1666#endif 1667 /* copy data to mbuf */ 1668 bcopy(payload, mtod(m, caddr_t), len); 1669 1670 } else if (sd->nmbuf * MSIZE < cll->region1) { 1671 1672 /* 1673 * There's spare room in the cluster for an mbuf. Create one 1674 * and associate it with the payload that's in the cluster. 1675 */ 1676 1677 MPASS(clm != NULL); 1678 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1679 /* No bzero required */ 1680 if (m_init(m, M_NOWAIT, MT_DATA, 1681 fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1682 return (NULL); 1683 fl->mbuf_inlined++; 1684 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1685 swz->zone, sd->cl); 1686 if (sd->nmbuf++ == 0) 1687 counter_u64_add(extfree_refs, 1); 1688 1689 } else { 1690 1691 /* 1692 * Grab an mbuf from zone_mbuf and associate it with the 1693 * payload in the cluster. 1694 */ 1695 1696 m = fr_offset == 0 ? 1697 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1698 if (m == NULL) 1699 return (NULL); 1700 fl->mbuf_allocated++; 1701 if (clm != NULL) { 1702 m_extaddref(m, payload, blen, &clm->refcount, 1703 rxb_free, swz->zone, sd->cl); 1704 if (sd->nmbuf++ == 0) 1705 counter_u64_add(extfree_refs, 1); 1706 } else { 1707 m_cljset(m, sd->cl, swz->type); 1708 sd->cl = NULL; /* consumed, not a recycle candidate */ 1709 } 1710 } 1711 if (fr_offset == 0) 1712 m->m_pkthdr.len = remaining; 1713 m->m_len = len; 1714 1715 if (fl->flags & FL_BUF_PACKING) { 1716 fl->rx_offset += blen; 1717 MPASS(fl->rx_offset <= hwb->size); 1718 if (fl->rx_offset < hwb->size) 1719 return (m); /* without advancing the cidx */ 1720 } 1721 1722 if (__predict_false(++fl->cidx % 8 == 0)) { 1723 uint16_t cidx = fl->cidx / 8; 1724 1725 if (__predict_false(cidx == fl->sidx)) 1726 fl->cidx = cidx = 0; 1727 fl->hw_cidx = cidx; 1728 } 1729 fl->rx_offset = 0; 1730 1731 return (m); 1732} 1733 1734static struct mbuf * 1735get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1736{ 1737 struct mbuf *m0, *m, **pnext; 1738 u_int remaining; 1739 const u_int total = G_RSPD_LEN(len_newbuf); 1740 1741 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1742 M_ASSERTPKTHDR(fl->m0); 1743 MPASS(fl->m0->m_pkthdr.len == total); 1744 MPASS(fl->remaining < total); 1745 1746 m0 = fl->m0; 1747 pnext = fl->pnext; 1748 remaining = fl->remaining; 1749 fl->flags &= ~FL_BUF_RESUME; 1750 goto get_segment; 1751 } 1752 1753 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1754 fl->rx_offset = 0; 1755 if (__predict_false(++fl->cidx % 8 == 0)) { 1756 uint16_t cidx = fl->cidx / 8; 1757 1758 if (__predict_false(cidx == fl->sidx)) 1759 fl->cidx = cidx = 0; 1760 fl->hw_cidx = cidx; 1761 } 1762 } 1763 1764 /* 1765 * Payload starts at rx_offset in the current hw buffer. Its length is 1766 * 'len' and it may span multiple hw buffers. 1767 */ 1768 1769 m0 = get_scatter_segment(sc, fl, 0, total); 1770 if (m0 == NULL) 1771 return (NULL); 1772 remaining = total - m0->m_len; 1773 pnext = &m0->m_next; 1774 while (remaining > 0) { 1775get_segment: 1776 MPASS(fl->rx_offset == 0); 1777 m = get_scatter_segment(sc, fl, total - remaining, remaining); 1778 if (__predict_false(m == NULL)) { 1779 fl->m0 = m0; 1780 fl->pnext = pnext; 1781 fl->remaining = remaining; 1782 fl->flags |= FL_BUF_RESUME; 1783 return (NULL); 1784 } 1785 *pnext = m; 1786 pnext = &m->m_next; 1787 remaining -= m->m_len; 1788 } 1789 *pnext = NULL; 1790 1791 M_ASSERTPKTHDR(m0); 1792 return (m0); 1793} 1794 1795static int 1796t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1797{ 1798 struct sge_rxq *rxq = iq_to_rxq(iq); 1799 struct ifnet *ifp = rxq->ifp; 1800 struct adapter *sc = iq->adapter; 1801 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1802#if defined(INET) || defined(INET6) 1803 struct lro_ctrl *lro = &rxq->lro; 1804#endif 1805 static const int sw_hashtype[4][2] = { 1806 {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 1807 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 1808 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 1809 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 1810 }; 1811 1812 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1813 rss->opcode)); 1814 1815 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 1816 m0->m_len -= sc->params.sge.fl_pktshift; 1817 m0->m_data += sc->params.sge.fl_pktshift; 1818 1819 m0->m_pkthdr.rcvif = ifp; 1820 M_HASHTYPE_SET(m0, sw_hashtype[rss->hash_type][rss->ipv6]); 1821 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1822 1823 if (cpl->csum_calc && !(cpl->err_vec & sc->params.tp.err_vec_mask)) { 1824 if (ifp->if_capenable & IFCAP_RXCSUM && 1825 cpl->l2info & htobe32(F_RXF_IP)) { 1826 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1827 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1828 rxq->rxcsum++; 1829 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1830 cpl->l2info & htobe32(F_RXF_IP6)) { 1831 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1832 CSUM_PSEUDO_HDR); 1833 rxq->rxcsum++; 1834 } 1835 1836 if (__predict_false(cpl->ip_frag)) 1837 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1838 else 1839 m0->m_pkthdr.csum_data = 0xffff; 1840 } 1841 1842 if (cpl->vlan_ex) { 1843 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1844 m0->m_flags |= M_VLANTAG; 1845 rxq->vlan_extraction++; 1846 } 1847 1848#if defined(INET) || defined(INET6) 1849 if (iq->flags & IQ_LRO_ENABLED) { 1850 if (sort_before_lro(lro)) { 1851 tcp_lro_queue_mbuf(lro, m0); 1852 return (0); /* queued for sort, then LRO */ 1853 } 1854 if (tcp_lro_rx(lro, m0, 0) == 0) 1855 return (0); /* queued for LRO */ 1856 } 1857#endif 1858 ifp->if_input(ifp, m0); 1859 1860 return (0); 1861} 1862 1863/* 1864 * Must drain the wrq or make sure that someone else will. 1865 */ 1866static void 1867wrq_tx_drain(void *arg, int n) 1868{ 1869 struct sge_wrq *wrq = arg; 1870 struct sge_eq *eq = &wrq->eq; 1871 1872 EQ_LOCK(eq); 1873 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 1874 drain_wrq_wr_list(wrq->adapter, wrq); 1875 EQ_UNLOCK(eq); 1876} 1877 1878static void 1879drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 1880{ 1881 struct sge_eq *eq = &wrq->eq; 1882 u_int available, dbdiff; /* # of hardware descriptors */ 1883 u_int n; 1884 struct wrqe *wr; 1885 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 1886 1887 EQ_LOCK_ASSERT_OWNED(eq); 1888 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 1889 wr = STAILQ_FIRST(&wrq->wr_list); 1890 MPASS(wr != NULL); /* Must be called with something useful to do */ 1891 MPASS(eq->pidx == eq->dbidx); 1892 dbdiff = 0; 1893 1894 do { 1895 eq->cidx = read_hw_cidx(eq); 1896 if (eq->pidx == eq->cidx) 1897 available = eq->sidx - 1; 1898 else 1899 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 1900 1901 MPASS(wr->wrq == wrq); 1902 n = howmany(wr->wr_len, EQ_ESIZE); 1903 if (available < n) 1904 break; 1905 1906 dst = (void *)&eq->desc[eq->pidx]; 1907 if (__predict_true(eq->sidx - eq->pidx > n)) { 1908 /* Won't wrap, won't end exactly at the status page. */ 1909 bcopy(&wr->wr[0], dst, wr->wr_len); 1910 eq->pidx += n; 1911 } else { 1912 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 1913 1914 bcopy(&wr->wr[0], dst, first_portion); 1915 if (wr->wr_len > first_portion) { 1916 bcopy(&wr->wr[first_portion], &eq->desc[0], 1917 wr->wr_len - first_portion); 1918 } 1919 eq->pidx = n - (eq->sidx - eq->pidx); 1920 } 1921 wrq->tx_wrs_copied++; 1922 1923 if (available < eq->sidx / 4 && 1924 atomic_cmpset_int(&eq->equiq, 0, 1)) { 1925 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 1926 F_FW_WR_EQUEQ); 1927 eq->equeqidx = eq->pidx; 1928 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 1929 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1930 eq->equeqidx = eq->pidx; 1931 } 1932 1933 dbdiff += n; 1934 if (dbdiff >= 16) { 1935 ring_eq_db(sc, eq, dbdiff); 1936 dbdiff = 0; 1937 } 1938 1939 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1940 free_wrqe(wr); 1941 MPASS(wrq->nwr_pending > 0); 1942 wrq->nwr_pending--; 1943 MPASS(wrq->ndesc_needed >= n); 1944 wrq->ndesc_needed -= n; 1945 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 1946 1947 if (dbdiff) 1948 ring_eq_db(sc, eq, dbdiff); 1949} 1950 1951/* 1952 * Doesn't fail. Holds on to work requests it can't send right away. 1953 */ 1954void 1955t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1956{ 1957#ifdef INVARIANTS 1958 struct sge_eq *eq = &wrq->eq; 1959#endif 1960 1961 EQ_LOCK_ASSERT_OWNED(eq); 1962 MPASS(wr != NULL); 1963 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 1964 MPASS((wr->wr_len & 0x7) == 0); 1965 1966 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1967 wrq->nwr_pending++; 1968 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 1969 1970 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 1971 return; /* commit_wrq_wr will drain wr_list as well. */ 1972 1973 drain_wrq_wr_list(sc, wrq); 1974 1975 /* Doorbell must have caught up to the pidx. */ 1976 MPASS(eq->pidx == eq->dbidx); 1977} 1978 1979void 1980t4_update_fl_bufsize(struct ifnet *ifp) 1981{ 1982 struct vi_info *vi = ifp->if_softc; 1983 struct adapter *sc = vi->pi->adapter; 1984 struct sge_rxq *rxq; 1985#ifdef TCP_OFFLOAD 1986 struct sge_ofld_rxq *ofld_rxq; 1987#endif 1988 struct sge_fl *fl; 1989 int i, maxp, mtu = ifp->if_mtu; 1990 1991 maxp = mtu_to_max_payload(sc, mtu, 0); 1992 for_each_rxq(vi, i, rxq) { 1993 fl = &rxq->fl; 1994 1995 FL_LOCK(fl); 1996 find_best_refill_source(sc, fl, maxp); 1997 FL_UNLOCK(fl); 1998 } 1999#ifdef TCP_OFFLOAD 2000 maxp = mtu_to_max_payload(sc, mtu, 1); 2001 for_each_ofld_rxq(vi, i, ofld_rxq) { 2002 fl = &ofld_rxq->fl; 2003 2004 FL_LOCK(fl); 2005 find_best_refill_source(sc, fl, maxp); 2006 FL_UNLOCK(fl); 2007 } 2008#endif 2009} 2010 2011static inline int 2012mbuf_nsegs(struct mbuf *m) 2013{ 2014 2015 M_ASSERTPKTHDR(m); 2016 KASSERT(m->m_pkthdr.l5hlen > 0, 2017 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 2018 2019 return (m->m_pkthdr.l5hlen); 2020} 2021 2022static inline void 2023set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 2024{ 2025 2026 M_ASSERTPKTHDR(m); 2027 m->m_pkthdr.l5hlen = nsegs; 2028} 2029 2030static inline int 2031mbuf_len16(struct mbuf *m) 2032{ 2033 int n; 2034 2035 M_ASSERTPKTHDR(m); 2036 n = m->m_pkthdr.PH_loc.eight[0]; 2037 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2038 2039 return (n); 2040} 2041 2042static inline void 2043set_mbuf_len16(struct mbuf *m, uint8_t len16) 2044{ 2045 2046 M_ASSERTPKTHDR(m); 2047 m->m_pkthdr.PH_loc.eight[0] = len16; 2048} 2049 2050static inline int 2051needs_tso(struct mbuf *m) 2052{ 2053 2054 M_ASSERTPKTHDR(m); 2055 2056 return (m->m_pkthdr.csum_flags & CSUM_TSO); 2057} 2058 2059static inline int 2060needs_l3_csum(struct mbuf *m) 2061{ 2062 2063 M_ASSERTPKTHDR(m); 2064 2065 return (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)); 2066} 2067 2068static inline int 2069needs_l4_csum(struct mbuf *m) 2070{ 2071 2072 M_ASSERTPKTHDR(m); 2073 2074 return (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2075 CSUM_TCP_IPV6 | CSUM_TSO)); 2076} 2077 2078static inline int 2079needs_vlan_insertion(struct mbuf *m) 2080{ 2081 2082 M_ASSERTPKTHDR(m); 2083 2084 return (m->m_flags & M_VLANTAG); 2085} 2086 2087static void * 2088m_advance(struct mbuf **pm, int *poffset, int len) 2089{ 2090 struct mbuf *m = *pm; 2091 int offset = *poffset; 2092 uintptr_t p = 0; 2093 2094 MPASS(len > 0); 2095 2096 for (;;) { 2097 if (offset + len < m->m_len) { 2098 offset += len; 2099 p = mtod(m, uintptr_t) + offset; 2100 break; 2101 } 2102 len -= m->m_len - offset; 2103 m = m->m_next; 2104 offset = 0; 2105 MPASS(m != NULL); 2106 } 2107 *poffset = offset; 2108 *pm = m; 2109 return ((void *)p); 2110} 2111 2112/* 2113 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2114 * must have at least one mbuf that's not empty. 2115 */ 2116static inline int 2117count_mbuf_nsegs(struct mbuf *m) 2118{ 2119 vm_paddr_t lastb, next; 2120 vm_offset_t va; 2121 int len, nsegs; 2122 2123 MPASS(m != NULL); 2124 2125 nsegs = 0; 2126 lastb = 0; 2127 for (; m; m = m->m_next) { 2128 2129 len = m->m_len; 2130 if (__predict_false(len == 0)) 2131 continue; 2132 va = mtod(m, vm_offset_t); 2133 next = pmap_kextract(va); 2134 nsegs += sglist_count(m->m_data, len); 2135 if (lastb + 1 == next) 2136 nsegs--; 2137 lastb = pmap_kextract(va + len - 1); 2138 } 2139 2140 MPASS(nsegs > 0); 2141 return (nsegs); 2142} 2143 2144/* 2145 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2146 * a) caller can assume it's been freed if this function returns with an error. 2147 * b) it may get defragged up if the gather list is too long for the hardware. 2148 */ 2149int 2150parse_pkt(struct adapter *sc, struct mbuf **mp) 2151{ 2152 struct mbuf *m0 = *mp, *m; 2153 int rc, nsegs, defragged = 0, offset; 2154 struct ether_header *eh; 2155 void *l3hdr; 2156#if defined(INET) || defined(INET6) 2157 struct tcphdr *tcp; 2158#endif 2159 uint16_t eh_type; 2160 2161 M_ASSERTPKTHDR(m0); 2162 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2163 rc = EINVAL; 2164fail: 2165 m_freem(m0); 2166 *mp = NULL; 2167 return (rc); 2168 } 2169restart: 2170 /* 2171 * First count the number of gather list segments in the payload. 2172 * Defrag the mbuf if nsegs exceeds the hardware limit. 2173 */ 2174 M_ASSERTPKTHDR(m0); 2175 MPASS(m0->m_pkthdr.len > 0); 2176 nsegs = count_mbuf_nsegs(m0); 2177 if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 2178 if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 2179 rc = EFBIG; 2180 goto fail; 2181 } 2182 *mp = m0 = m; /* update caller's copy after defrag */ 2183 goto restart; 2184 } 2185 2186 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { 2187 m0 = m_pullup(m0, m0->m_pkthdr.len); 2188 if (m0 == NULL) { 2189 /* Should have left well enough alone. */ 2190 rc = EFBIG; 2191 goto fail; 2192 } 2193 *mp = m0; /* update caller's copy after pullup */ 2194 goto restart; 2195 } 2196 set_mbuf_nsegs(m0, nsegs); 2197 if (sc->flags & IS_VF) 2198 set_mbuf_len16(m0, txpkt_vm_len16(nsegs, needs_tso(m0))); 2199 else 2200 set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 2201 2202 if (!needs_tso(m0) && 2203 !(sc->flags & IS_VF && (needs_l3_csum(m0) || needs_l4_csum(m0)))) 2204 return (0); 2205 2206 m = m0; 2207 eh = mtod(m, struct ether_header *); 2208 eh_type = ntohs(eh->ether_type); 2209 if (eh_type == ETHERTYPE_VLAN) { 2210 struct ether_vlan_header *evh = (void *)eh; 2211 2212 eh_type = ntohs(evh->evl_proto); 2213 m0->m_pkthdr.l2hlen = sizeof(*evh); 2214 } else 2215 m0->m_pkthdr.l2hlen = sizeof(*eh); 2216 2217 offset = 0; 2218 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2219 2220 switch (eh_type) { 2221#ifdef INET6 2222 case ETHERTYPE_IPV6: 2223 { 2224 struct ip6_hdr *ip6 = l3hdr; 2225 2226 MPASS(!needs_tso(m0) || ip6->ip6_nxt == IPPROTO_TCP); 2227 2228 m0->m_pkthdr.l3hlen = sizeof(*ip6); 2229 break; 2230 } 2231#endif 2232#ifdef INET 2233 case ETHERTYPE_IP: 2234 { 2235 struct ip *ip = l3hdr; 2236 2237 m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 2238 break; 2239 } 2240#endif 2241 default: 2242 panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2243 " with the same INET/INET6 options as the kernel.", 2244 __func__, eh_type); 2245 } 2246 2247#if defined(INET) || defined(INET6) 2248 if (needs_tso(m0)) { 2249 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2250 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2251 } 2252#endif 2253 MPASS(m0 == *mp); 2254 return (0); 2255} 2256 2257void * 2258start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2259{ 2260 struct sge_eq *eq = &wrq->eq; 2261 struct adapter *sc = wrq->adapter; 2262 int ndesc, available; 2263 struct wrqe *wr; 2264 void *w; 2265 2266 MPASS(len16 > 0); 2267 ndesc = howmany(len16, EQ_ESIZE / 16); 2268 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2269 2270 EQ_LOCK(eq); 2271 2272 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2273 drain_wrq_wr_list(sc, wrq); 2274 2275 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2276slowpath: 2277 EQ_UNLOCK(eq); 2278 wr = alloc_wrqe(len16 * 16, wrq); 2279 if (__predict_false(wr == NULL)) 2280 return (NULL); 2281 cookie->pidx = -1; 2282 cookie->ndesc = ndesc; 2283 return (&wr->wr); 2284 } 2285 2286 eq->cidx = read_hw_cidx(eq); 2287 if (eq->pidx == eq->cidx) 2288 available = eq->sidx - 1; 2289 else 2290 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2291 if (available < ndesc) 2292 goto slowpath; 2293 2294 cookie->pidx = eq->pidx; 2295 cookie->ndesc = ndesc; 2296 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2297 2298 w = &eq->desc[eq->pidx]; 2299 IDXINCR(eq->pidx, ndesc, eq->sidx); 2300 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 2301 w = &wrq->ss[0]; 2302 wrq->ss_pidx = cookie->pidx; 2303 wrq->ss_len = len16 * 16; 2304 } 2305 2306 EQ_UNLOCK(eq); 2307 2308 return (w); 2309} 2310 2311void 2312commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2313{ 2314 struct sge_eq *eq = &wrq->eq; 2315 struct adapter *sc = wrq->adapter; 2316 int ndesc, pidx; 2317 struct wrq_cookie *prev, *next; 2318 2319 if (cookie->pidx == -1) { 2320 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2321 2322 t4_wrq_tx(sc, wr); 2323 return; 2324 } 2325 2326 if (__predict_false(w == &wrq->ss[0])) { 2327 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2328 2329 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2330 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2331 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2332 wrq->tx_wrs_ss++; 2333 } else 2334 wrq->tx_wrs_direct++; 2335 2336 EQ_LOCK(eq); 2337 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2338 pidx = cookie->pidx; 2339 MPASS(pidx >= 0 && pidx < eq->sidx); 2340 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2341 next = TAILQ_NEXT(cookie, link); 2342 if (prev == NULL) { 2343 MPASS(pidx == eq->dbidx); 2344 if (next == NULL || ndesc >= 16) { 2345 int available; 2346 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2347 2348 /* 2349 * Note that the WR via which we'll request tx updates 2350 * is at pidx and not eq->pidx, which has moved on 2351 * already. 2352 */ 2353 dst = (void *)&eq->desc[pidx]; 2354 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2355 if (available < eq->sidx / 4 && 2356 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2357 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2358 F_FW_WR_EQUEQ); 2359 eq->equeqidx = pidx; 2360 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2361 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2362 eq->equeqidx = pidx; 2363 } 2364 2365 ring_eq_db(wrq->adapter, eq, ndesc); 2366 } else { 2367 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2368 next->pidx = pidx; 2369 next->ndesc += ndesc; 2370 } 2371 } else { 2372 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2373 prev->ndesc += ndesc; 2374 } 2375 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2376 2377 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2378 drain_wrq_wr_list(sc, wrq); 2379 2380#ifdef INVARIANTS 2381 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2382 /* Doorbell must have caught up to the pidx. */ 2383 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2384 } 2385#endif 2386 EQ_UNLOCK(eq); 2387} 2388 2389static u_int 2390can_resume_eth_tx(struct mp_ring *r) 2391{ 2392 struct sge_eq *eq = r->cookie; 2393 2394 return (total_available_tx_desc(eq) > eq->sidx / 8); 2395} 2396 2397static inline int 2398cannot_use_txpkts(struct mbuf *m) 2399{ 2400 /* maybe put a GL limit too, to avoid silliness? */ 2401 2402 return (needs_tso(m)); 2403} 2404 2405static inline int 2406discard_tx(struct sge_eq *eq) 2407{ 2408 2409 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 2410} 2411 2412/* 2413 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 2414 * be consumed. Return the actual number consumed. 0 indicates a stall. 2415 */ 2416static u_int 2417eth_tx(struct mp_ring *r, u_int cidx, u_int pidx) 2418{ 2419 struct sge_txq *txq = r->cookie; 2420 struct sge_eq *eq = &txq->eq; 2421 struct ifnet *ifp = txq->ifp; 2422 struct vi_info *vi = ifp->if_softc; 2423 struct port_info *pi = vi->pi; 2424 struct adapter *sc = pi->adapter; 2425 u_int total, remaining; /* # of packets */ 2426 u_int available, dbdiff; /* # of hardware descriptors */ 2427 u_int n, next_cidx; 2428 struct mbuf *m0, *tail; 2429 struct txpkts txp; 2430 struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ 2431 2432 remaining = IDXDIFF(pidx, cidx, r->size); 2433 MPASS(remaining > 0); /* Must not be called without work to do. */ 2434 total = 0; 2435 2436 TXQ_LOCK(txq); 2437 if (__predict_false(discard_tx(eq))) { 2438 while (cidx != pidx) { 2439 m0 = r->items[cidx]; 2440 m_freem(m0); 2441 if (++cidx == r->size) 2442 cidx = 0; 2443 } 2444 reclaim_tx_descs(txq, 2048); 2445 total = remaining; 2446 goto done; 2447 } 2448 2449 /* How many hardware descriptors do we have readily available. */ 2450 if (eq->pidx == eq->cidx) 2451 available = eq->sidx - 1; 2452 else 2453 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2454 dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 2455 2456 while (remaining > 0) { 2457 2458 m0 = r->items[cidx]; 2459 M_ASSERTPKTHDR(m0); 2460 MPASS(m0->m_nextpkt == NULL); 2461 2462 if (available < SGE_MAX_WR_NDESC) { 2463 available += reclaim_tx_descs(txq, 64); 2464 if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) 2465 break; /* out of descriptors */ 2466 } 2467 2468 next_cidx = cidx + 1; 2469 if (__predict_false(next_cidx == r->size)) 2470 next_cidx = 0; 2471 2472 wr = (void *)&eq->desc[eq->pidx]; 2473 if (sc->flags & IS_VF) { 2474 total++; 2475 remaining--; 2476 ETHER_BPF_MTAP(ifp, m0); 2477 n = write_txpkt_vm_wr(sc, txq, (void *)wr, m0, 2478 available); 2479 } else if (remaining > 1 && 2480 try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { 2481 2482 /* pkts at cidx, next_cidx should both be in txp. */ 2483 MPASS(txp.npkt == 2); 2484 tail = r->items[next_cidx]; 2485 MPASS(tail->m_nextpkt == NULL); 2486 ETHER_BPF_MTAP(ifp, m0); 2487 ETHER_BPF_MTAP(ifp, tail); 2488 m0->m_nextpkt = tail; 2489 2490 if (__predict_false(++next_cidx == r->size)) 2491 next_cidx = 0; 2492 2493 while (next_cidx != pidx) { 2494 if (add_to_txpkts(r->items[next_cidx], &txp, 2495 available) != 0) 2496 break; 2497 tail->m_nextpkt = r->items[next_cidx]; 2498 tail = tail->m_nextpkt; 2499 ETHER_BPF_MTAP(ifp, tail); 2500 if (__predict_false(++next_cidx == r->size)) 2501 next_cidx = 0; 2502 } 2503 2504 n = write_txpkts_wr(txq, wr, m0, &txp, available); 2505 total += txp.npkt; 2506 remaining -= txp.npkt; 2507 } else { 2508 total++; 2509 remaining--; 2510 ETHER_BPF_MTAP(ifp, m0); 2511 n = write_txpkt_wr(txq, (void *)wr, m0, available); 2512 } 2513 MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); 2514 2515 available -= n; 2516 dbdiff += n; 2517 IDXINCR(eq->pidx, n, eq->sidx); 2518 2519 if (total_available_tx_desc(eq) < eq->sidx / 4 && 2520 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2521 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2522 F_FW_WR_EQUEQ); 2523 eq->equeqidx = eq->pidx; 2524 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2525 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2526 eq->equeqidx = eq->pidx; 2527 } 2528 2529 if (dbdiff >= 16 && remaining >= 4) { 2530 ring_eq_db(sc, eq, dbdiff); 2531 available += reclaim_tx_descs(txq, 4 * dbdiff); 2532 dbdiff = 0; 2533 } 2534 2535 cidx = next_cidx; 2536 } 2537 if (dbdiff != 0) { 2538 ring_eq_db(sc, eq, dbdiff); 2539 reclaim_tx_descs(txq, 32); 2540 } 2541done: 2542 TXQ_UNLOCK(txq); 2543 2544 return (total); 2545} 2546 2547static inline void 2548init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2549 int qsize) 2550{ 2551 2552 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2553 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2554 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2555 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2556 2557 iq->flags = 0; 2558 iq->adapter = sc; 2559 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2560 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2561 if (pktc_idx >= 0) { 2562 iq->intr_params |= F_QINTR_CNT_EN; 2563 iq->intr_pktc_idx = pktc_idx; 2564 } 2565 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2566 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 2567} 2568 2569static inline void 2570init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2571{ 2572 2573 fl->qsize = qsize; 2574 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 2575 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2576 if (sc->flags & BUF_PACKING_OK && 2577 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2578 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2579 fl->flags |= FL_BUF_PACKING; 2580 find_best_refill_source(sc, fl, maxp); 2581 find_safe_refill_source(sc, fl); 2582} 2583 2584static inline void 2585init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 2586 uint8_t tx_chan, uint16_t iqid, char *name) 2587{ 2588 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2589 2590 eq->flags = eqtype & EQ_TYPEMASK; 2591 eq->tx_chan = tx_chan; 2592 eq->iqid = iqid; 2593 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 2594 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2595} 2596 2597static int 2598alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2599 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2600{ 2601 int rc; 2602 2603 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2604 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2605 if (rc != 0) { 2606 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2607 goto done; 2608 } 2609 2610 rc = bus_dmamem_alloc(*tag, va, 2611 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2612 if (rc != 0) { 2613 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2614 goto done; 2615 } 2616 2617 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2618 if (rc != 0) { 2619 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2620 goto done; 2621 } 2622done: 2623 if (rc) 2624 free_ring(sc, *tag, *map, *pa, *va); 2625 2626 return (rc); 2627} 2628 2629static int 2630free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2631 bus_addr_t pa, void *va) 2632{ 2633 if (pa) 2634 bus_dmamap_unload(tag, map); 2635 if (va) 2636 bus_dmamem_free(tag, va, map); 2637 if (tag) 2638 bus_dma_tag_destroy(tag); 2639 2640 return (0); 2641} 2642 2643/* 2644 * Allocates the ring for an ingress queue and an optional freelist. If the 2645 * freelist is specified it will be allocated and then associated with the 2646 * ingress queue. 2647 * 2648 * Returns errno on failure. Resources allocated up to that point may still be 2649 * allocated. Caller is responsible for cleanup in case this function fails. 2650 * 2651 * If the ingress queue will take interrupts directly then the intr_idx 2652 * specifies the vector, starting from 0. -1 means the interrupts for this 2653 * queue should be forwarded to the fwq. 2654 */ 2655static int 2656alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 2657 int intr_idx, int cong) 2658{ 2659 int rc, i, cntxt_id; 2660 size_t len; 2661 struct fw_iq_cmd c; 2662 struct port_info *pi = vi->pi; 2663 struct adapter *sc = iq->adapter; 2664 struct sge_params *sp = &sc->params.sge; 2665 __be32 v = 0; 2666 2667 len = iq->qsize * IQ_ESIZE; 2668 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2669 (void **)&iq->desc); 2670 if (rc != 0) 2671 return (rc); 2672 2673 bzero(&c, sizeof(c)); 2674 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2675 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2676 V_FW_IQ_CMD_VFN(0)); 2677 2678 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2679 FW_LEN16(c)); 2680 2681 /* Special handling for firmware event queue */ 2682 if (iq == &sc->sge.fwq) 2683 v |= F_FW_IQ_CMD_IQASYNCH; 2684 2685 if (intr_idx < 0) { 2686 /* Forwarded interrupts, all headed to fwq */ 2687 v |= F_FW_IQ_CMD_IQANDST; 2688 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 2689 } else { 2690 KASSERT(intr_idx < sc->intr_count, 2691 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2692 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2693 } 2694 2695 c.type_to_iqandstindex = htobe32(v | 2696 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2697 V_FW_IQ_CMD_VIID(vi->viid) | 2698 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2699 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2700 F_FW_IQ_CMD_IQGTSMODE | 2701 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2702 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 2703 c.iqsize = htobe16(iq->qsize); 2704 c.iqaddr = htobe64(iq->ba); 2705 if (cong >= 0) 2706 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2707 2708 if (fl) { 2709 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2710 2711 len = fl->qsize * EQ_ESIZE; 2712 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2713 &fl->ba, (void **)&fl->desc); 2714 if (rc) 2715 return (rc); 2716 2717 /* Allocate space for one software descriptor per buffer. */ 2718 rc = alloc_fl_sdesc(fl); 2719 if (rc != 0) { 2720 device_printf(sc->dev, 2721 "failed to setup fl software descriptors: %d\n", 2722 rc); 2723 return (rc); 2724 } 2725 2726 if (fl->flags & FL_BUF_PACKING) { 2727 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 2728 fl->buf_boundary = sp->pack_boundary; 2729 } else { 2730 fl->lowat = roundup2(sp->fl_starve_threshold, 8); 2731 fl->buf_boundary = 16; 2732 } 2733 if (fl_pad && fl->buf_boundary < sp->pad_boundary) 2734 fl->buf_boundary = sp->pad_boundary; 2735 2736 c.iqns_to_fl0congen |= 2737 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2738 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2739 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2740 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2741 0)); 2742 if (cong >= 0) { 2743 c.iqns_to_fl0congen |= 2744 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2745 F_FW_IQ_CMD_FL0CONGCIF | 2746 F_FW_IQ_CMD_FL0CONGEN); 2747 } 2748 c.fl0dcaen_to_fl0cidxfthresh = 2749 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 2750 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B) | 2751 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 2752 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 2753 c.fl0size = htobe16(fl->qsize); 2754 c.fl0addr = htobe64(fl->ba); 2755 } 2756 2757 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2758 if (rc != 0) { 2759 device_printf(sc->dev, 2760 "failed to create ingress queue: %d\n", rc); 2761 return (rc); 2762 } 2763 2764 iq->cidx = 0; 2765 iq->gen = F_RSPD_GEN; 2766 iq->intr_next = iq->intr_params; 2767 iq->cntxt_id = be16toh(c.iqid); 2768 iq->abs_id = be16toh(c.physiqid); 2769 iq->flags |= IQ_ALLOCATED; 2770 2771 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2772 if (cntxt_id >= sc->sge.niq) { 2773 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2774 cntxt_id, sc->sge.niq - 1); 2775 } 2776 sc->sge.iqmap[cntxt_id] = iq; 2777 2778 if (fl) { 2779 u_int qid; 2780 2781 iq->flags |= IQ_HAS_FL; 2782 fl->cntxt_id = be16toh(c.fl0id); 2783 fl->pidx = fl->cidx = 0; 2784 2785 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2786 if (cntxt_id >= sc->sge.neq) { 2787 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2788 __func__, cntxt_id, sc->sge.neq - 1); 2789 } 2790 sc->sge.eqmap[cntxt_id] = (void *)fl; 2791 2792 qid = fl->cntxt_id; 2793 if (isset(&sc->doorbells, DOORBELL_UDB)) { 2794 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 2795 uint32_t mask = (1 << s_qpp) - 1; 2796 volatile uint8_t *udb; 2797 2798 udb = sc->udbs_base + UDBS_DB_OFFSET; 2799 udb += (qid >> s_qpp) << PAGE_SHIFT; 2800 qid &= mask; 2801 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 2802 udb += qid << UDBS_SEG_SHIFT; 2803 qid = 0; 2804 } 2805 fl->udb = (volatile void *)udb; 2806 } 2807 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 2808 2809 FL_LOCK(fl); 2810 /* Enough to make sure the SGE doesn't think it's starved */ 2811 refill_fl(sc, fl, fl->lowat); 2812 FL_UNLOCK(fl); 2813 } 2814 2815 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 2816 uint32_t param, val; 2817 2818 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2819 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2820 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2821 if (cong == 0) 2822 val = 1 << 19; 2823 else { 2824 val = 2 << 19; 2825 for (i = 0; i < 4; i++) { 2826 if (cong & (1 << i)) 2827 val |= 1 << (i << 2); 2828 } 2829 } 2830 2831 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2832 if (rc != 0) { 2833 /* report error but carry on */ 2834 device_printf(sc->dev, 2835 "failed to set congestion manager context for " 2836 "ingress queue %d: %d\n", iq->cntxt_id, rc); 2837 } 2838 } 2839 2840 /* Enable IQ interrupts */ 2841 atomic_store_rel_int(&iq->state, IQS_IDLE); 2842 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 2843 V_INGRESSQID(iq->cntxt_id)); 2844 2845 return (0); 2846} 2847 2848static int 2849free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 2850{ 2851 int rc; 2852 struct adapter *sc = iq->adapter; 2853 device_t dev; 2854 2855 if (sc == NULL) 2856 return (0); /* nothing to do */ 2857 2858 dev = vi ? vi->dev : sc->dev; 2859 2860 if (iq->flags & IQ_ALLOCATED) { 2861 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2862 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2863 fl ? fl->cntxt_id : 0xffff, 0xffff); 2864 if (rc != 0) { 2865 device_printf(dev, 2866 "failed to free queue %p: %d\n", iq, rc); 2867 return (rc); 2868 } 2869 iq->flags &= ~IQ_ALLOCATED; 2870 } 2871 2872 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2873 2874 bzero(iq, sizeof(*iq)); 2875 2876 if (fl) { 2877 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2878 fl->desc); 2879 2880 if (fl->sdesc) 2881 free_fl_sdesc(sc, fl); 2882 2883 if (mtx_initialized(&fl->fl_lock)) 2884 mtx_destroy(&fl->fl_lock); 2885 2886 bzero(fl, sizeof(*fl)); 2887 } 2888 2889 return (0); 2890} 2891 2892static void 2893add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2894 struct sge_iq *iq) 2895{ 2896 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2897 2898 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 2899 "bus address of descriptor ring"); 2900 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 2901 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 2902 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 2903 CTLTYPE_INT | CTLFLAG_RD, &iq->abs_id, 0, sysctl_uint16, "I", 2904 "absolute id of the queue"); 2905 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2906 CTLTYPE_INT | CTLFLAG_RD, &iq->cntxt_id, 0, sysctl_uint16, "I", 2907 "SGE context id of the queue"); 2908 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 2909 CTLTYPE_INT | CTLFLAG_RD, &iq->cidx, 0, sysctl_uint16, "I", 2910 "consumer index"); 2911} 2912 2913static void 2914add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 2915 struct sysctl_oid *oid, struct sge_fl *fl) 2916{ 2917 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2918 2919 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2920 "freelist"); 2921 children = SYSCTL_CHILDREN(oid); 2922 2923 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 2924 &fl->ba, "bus address of descriptor ring"); 2925 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 2926 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 2927 "desc ring size in bytes"); 2928 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2929 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2930 "SGE context id of the freelist"); 2931 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 2932 fl_pad ? 1 : 0, "padding enabled"); 2933 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 2934 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 2935 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2936 0, "consumer index"); 2937 if (fl->flags & FL_BUF_PACKING) { 2938 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2939 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2940 } 2941 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2942 0, "producer index"); 2943 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2944 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2945 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2946 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2947 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2948 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2949 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2950 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2951 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2952 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2953} 2954 2955static int 2956alloc_fwq(struct adapter *sc) 2957{ 2958 int rc, intr_idx; 2959 struct sge_iq *fwq = &sc->sge.fwq; 2960 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2961 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2962 2963 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 2964 if (sc->flags & IS_VF) 2965 intr_idx = 0; 2966 else 2967 intr_idx = sc->intr_count > 1 ? 1 : 0; 2968 rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 2969 if (rc != 0) { 2970 device_printf(sc->dev, 2971 "failed to create firmware event queue: %d\n", rc); 2972 return (rc); 2973 } 2974 2975 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2976 NULL, "firmware event queue"); 2977 add_iq_sysctls(&sc->ctx, oid, fwq); 2978 2979 return (0); 2980} 2981 2982static int 2983free_fwq(struct adapter *sc) 2984{ 2985 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2986} 2987 2988static int 2989alloc_mgmtq(struct adapter *sc) 2990{ 2991 int rc; 2992 struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2993 char name[16]; 2994 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2995 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2996 2997 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2998 NULL, "management queue"); 2999 3000 snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 3001 init_eq(sc, &mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 3002 sc->sge.fwq.cntxt_id, name); 3003 rc = alloc_wrq(sc, NULL, mgmtq, oid); 3004 if (rc != 0) { 3005 device_printf(sc->dev, 3006 "failed to create management queue: %d\n", rc); 3007 return (rc); 3008 } 3009 3010 return (0); 3011} 3012 3013static int 3014free_mgmtq(struct adapter *sc) 3015{ 3016 3017 return free_wrq(sc, &sc->sge.mgmtq); 3018} 3019 3020int 3021tnl_cong(struct port_info *pi, int drop) 3022{ 3023 3024 if (drop == -1) 3025 return (-1); 3026 else if (drop == 1) 3027 return (0); 3028 else 3029 return (pi->rx_e_chan_map); 3030} 3031 3032static int 3033alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3034 struct sysctl_oid *oid) 3035{ 3036 int rc; 3037 struct adapter *sc = vi->pi->adapter; 3038 struct sysctl_oid_list *children; 3039 char name[16]; 3040 3041 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3042 tnl_cong(vi->pi, cong_drop)); 3043 if (rc != 0) 3044 return (rc); 3045 3046 if (idx == 0) 3047 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3048 else 3049 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3050 ("iq_base mismatch")); 3051 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3052 ("PF with non-zero iq_base")); 3053 3054 /* 3055 * The freelist is just barely above the starvation threshold right now, 3056 * fill it up a bit more. 3057 */ 3058 FL_LOCK(&rxq->fl); 3059 refill_fl(sc, &rxq->fl, 128); 3060 FL_UNLOCK(&rxq->fl); 3061 3062#if defined(INET) || defined(INET6) 3063 rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 3064 if (rc != 0) 3065 return (rc); 3066 MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 3067 3068 if (vi->ifp->if_capenable & IFCAP_LRO) 3069 rxq->iq.flags |= IQ_LRO_ENABLED; 3070#endif 3071 rxq->ifp = vi->ifp; 3072 3073 children = SYSCTL_CHILDREN(oid); 3074 3075 snprintf(name, sizeof(name), "%d", idx); 3076 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3077 NULL, "rx queue"); 3078 children = SYSCTL_CHILDREN(oid); 3079 3080 add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3081#if defined(INET) || defined(INET6) 3082 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 3083 &rxq->lro.lro_queued, 0, NULL); 3084 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 3085 &rxq->lro.lro_flushed, 0, NULL); 3086#endif 3087 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 3088 &rxq->rxcsum, "# of times hardware assisted with checksum"); 3089 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 3090 CTLFLAG_RD, &rxq->vlan_extraction, 3091 "# of times hardware extracted 802.1Q tag"); 3092 3093 add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 3094 3095 return (rc); 3096} 3097 3098static int 3099free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 3100{ 3101 int rc; 3102 3103#if defined(INET) || defined(INET6) 3104 if (rxq->lro.ifp) { 3105 tcp_lro_free(&rxq->lro); 3106 rxq->lro.ifp = NULL; 3107 } 3108#endif 3109 3110 rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 3111 if (rc == 0) 3112 bzero(rxq, sizeof(*rxq)); 3113 3114 return (rc); 3115} 3116 3117#ifdef TCP_OFFLOAD 3118static int 3119alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3120 int intr_idx, int idx, struct sysctl_oid *oid) 3121{ 3122 struct port_info *pi = vi->pi; 3123 int rc; 3124 struct sysctl_oid_list *children; 3125 char name[16]; 3126 3127 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3128 if (rc != 0) 3129 return (rc); 3130 3131 children = SYSCTL_CHILDREN(oid); 3132 3133 snprintf(name, sizeof(name), "%d", idx); 3134 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3135 NULL, "rx queue"); 3136 add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3137 add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3138 3139 return (rc); 3140} 3141 3142static int 3143free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3144{ 3145 int rc; 3146 3147 rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3148 if (rc == 0) 3149 bzero(ofld_rxq, sizeof(*ofld_rxq)); 3150 3151 return (rc); 3152} 3153#endif 3154 3155#ifdef DEV_NETMAP 3156static int 3157alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3158 int idx, struct sysctl_oid *oid) 3159{ 3160 int rc; 3161 struct sysctl_oid_list *children; 3162 struct sysctl_ctx_list *ctx; 3163 char name[16]; 3164 size_t len; 3165 struct adapter *sc = vi->pi->adapter; 3166 struct netmap_adapter *na = NA(vi->ifp); 3167 3168 MPASS(na != NULL); 3169 3170 len = vi->qsize_rxq * IQ_ESIZE; 3171 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3172 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3173 if (rc != 0) 3174 return (rc); 3175 3176 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3177 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3178 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3179 if (rc != 0) 3180 return (rc); 3181 3182 nm_rxq->vi = vi; 3183 nm_rxq->nid = idx; 3184 nm_rxq->iq_cidx = 0; 3185 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 3186 nm_rxq->iq_gen = F_RSPD_GEN; 3187 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3188 nm_rxq->fl_sidx = na->num_rx_desc; 3189 nm_rxq->intr_idx = intr_idx; 3190 3191 ctx = &vi->ctx; 3192 children = SYSCTL_CHILDREN(oid); 3193 3194 snprintf(name, sizeof(name), "%d", idx); 3195 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 3196 "rx queue"); 3197 children = SYSCTL_CHILDREN(oid); 3198 3199 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3200 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 3201 "I", "absolute id of the queue"); 3202 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3203 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 3204 "I", "SGE context id of the queue"); 3205 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3206 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 3207 "consumer index"); 3208 3209 children = SYSCTL_CHILDREN(oid); 3210 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3211 "freelist"); 3212 children = SYSCTL_CHILDREN(oid); 3213 3214 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3215 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 3216 "I", "SGE context id of the freelist"); 3217 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3218 &nm_rxq->fl_cidx, 0, "consumer index"); 3219 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3220 &nm_rxq->fl_pidx, 0, "producer index"); 3221 3222 return (rc); 3223} 3224 3225 3226static int 3227free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 3228{ 3229 struct adapter *sc = vi->pi->adapter; 3230 3231 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3232 nm_rxq->iq_desc); 3233 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3234 nm_rxq->fl_desc); 3235 3236 return (0); 3237} 3238 3239static int 3240alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3241 struct sysctl_oid *oid) 3242{ 3243 int rc; 3244 size_t len; 3245 struct port_info *pi = vi->pi; 3246 struct adapter *sc = pi->adapter; 3247 struct netmap_adapter *na = NA(vi->ifp); 3248 char name[16]; 3249 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3250 3251 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3252 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3253 &nm_txq->ba, (void **)&nm_txq->desc); 3254 if (rc) 3255 return (rc); 3256 3257 nm_txq->pidx = nm_txq->cidx = 0; 3258 nm_txq->sidx = na->num_tx_desc; 3259 nm_txq->nid = idx; 3260 nm_txq->iqidx = iqidx; 3261 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3262 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | 3263 V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | 3264 V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); 3265 3266 snprintf(name, sizeof(name), "%d", idx); 3267 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3268 NULL, "netmap tx queue"); 3269 children = SYSCTL_CHILDREN(oid); 3270 3271 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3272 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3273 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 3274 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 3275 "consumer index"); 3276 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 3277 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 3278 "producer index"); 3279 3280 return (rc); 3281} 3282 3283static int 3284free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 3285{ 3286 struct adapter *sc = vi->pi->adapter; 3287 3288 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3289 nm_txq->desc); 3290 3291 return (0); 3292} 3293#endif 3294 3295static int 3296ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3297{ 3298 int rc, cntxt_id; 3299 struct fw_eq_ctrl_cmd c; 3300 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3301 3302 bzero(&c, sizeof(c)); 3303 3304 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3305 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3306 V_FW_EQ_CTRL_CMD_VFN(0)); 3307 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3308 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 3309 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3310 c.physeqid_pkd = htobe32(0); 3311 c.fetchszm_to_iqid = 3312 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 3313 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 3314 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3315 c.dcaen_to_eqsize = 3316 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3317 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3318 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 3319 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3320 c.eqaddr = htobe64(eq->ba); 3321 3322 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3323 if (rc != 0) { 3324 device_printf(sc->dev, 3325 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3326 return (rc); 3327 } 3328 eq->flags |= EQ_ALLOCATED; 3329 3330 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3331 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3332 if (cntxt_id >= sc->sge.neq) 3333 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3334 cntxt_id, sc->sge.neq - 1); 3335 sc->sge.eqmap[cntxt_id] = eq; 3336 3337 return (rc); 3338} 3339 3340static int 3341eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3342{ 3343 int rc, cntxt_id; 3344 struct fw_eq_eth_cmd c; 3345 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3346 3347 bzero(&c, sizeof(c)); 3348 3349 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 3350 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 3351 V_FW_EQ_ETH_CMD_VFN(0)); 3352 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 3353 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 3354 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3355 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 3356 c.fetchszm_to_iqid = 3357 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3358 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3359 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3360 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3361 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3362 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 3363 c.eqaddr = htobe64(eq->ba); 3364 3365 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3366 if (rc != 0) { 3367 device_printf(vi->dev, 3368 "failed to create Ethernet egress queue: %d\n", rc); 3369 return (rc); 3370 } 3371 eq->flags |= EQ_ALLOCATED; 3372 3373 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3374 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 3375 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3376 if (cntxt_id >= sc->sge.neq) 3377 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3378 cntxt_id, sc->sge.neq - 1); 3379 sc->sge.eqmap[cntxt_id] = eq; 3380 3381 return (rc); 3382} 3383 3384#ifdef TCP_OFFLOAD 3385static int 3386ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3387{ 3388 int rc, cntxt_id; 3389 struct fw_eq_ofld_cmd c; 3390 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3391 3392 bzero(&c, sizeof(c)); 3393 3394 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3395 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3396 V_FW_EQ_OFLD_CMD_VFN(0)); 3397 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3398 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3399 c.fetchszm_to_iqid = 3400 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3401 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3402 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3403 c.dcaen_to_eqsize = 3404 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3405 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3406 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3407 c.eqaddr = htobe64(eq->ba); 3408 3409 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3410 if (rc != 0) { 3411 device_printf(vi->dev, 3412 "failed to create egress queue for TCP offload: %d\n", rc); 3413 return (rc); 3414 } 3415 eq->flags |= EQ_ALLOCATED; 3416 3417 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 3418 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3419 if (cntxt_id >= sc->sge.neq) 3420 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3421 cntxt_id, sc->sge.neq - 1); 3422 sc->sge.eqmap[cntxt_id] = eq; 3423 3424 return (rc); 3425} 3426#endif 3427 3428static int 3429alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 3430{ 3431 int rc, qsize; 3432 size_t len; 3433 3434 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 3435 3436 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 3437 len = qsize * EQ_ESIZE; 3438 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 3439 &eq->ba, (void **)&eq->desc); 3440 if (rc) 3441 return (rc); 3442 3443 eq->pidx = eq->cidx = 0; 3444 eq->equeqidx = eq->dbidx = 0; 3445 eq->doorbells = sc->doorbells; 3446 3447 switch (eq->flags & EQ_TYPEMASK) { 3448 case EQ_CTRL: 3449 rc = ctrl_eq_alloc(sc, eq); 3450 break; 3451 3452 case EQ_ETH: 3453 rc = eth_eq_alloc(sc, vi, eq); 3454 break; 3455 3456#ifdef TCP_OFFLOAD 3457 case EQ_OFLD: 3458 rc = ofld_eq_alloc(sc, vi, eq); 3459 break; 3460#endif 3461 3462 default: 3463 panic("%s: invalid eq type %d.", __func__, 3464 eq->flags & EQ_TYPEMASK); 3465 } 3466 if (rc != 0) { 3467 device_printf(sc->dev, 3468 "failed to allocate egress queue(%d): %d\n", 3469 eq->flags & EQ_TYPEMASK, rc); 3470 } 3471 3472 if (isset(&eq->doorbells, DOORBELL_UDB) || 3473 isset(&eq->doorbells, DOORBELL_UDBWC) || 3474 isset(&eq->doorbells, DOORBELL_WCWR)) { 3475 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3476 uint32_t mask = (1 << s_qpp) - 1; 3477 volatile uint8_t *udb; 3478 3479 udb = sc->udbs_base + UDBS_DB_OFFSET; 3480 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3481 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3482 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3483 clrbit(&eq->doorbells, DOORBELL_WCWR); 3484 else { 3485 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3486 eq->udb_qid = 0; 3487 } 3488 eq->udb = (volatile void *)udb; 3489 } 3490 3491 return (rc); 3492} 3493 3494static int 3495free_eq(struct adapter *sc, struct sge_eq *eq) 3496{ 3497 int rc; 3498 3499 if (eq->flags & EQ_ALLOCATED) { 3500 switch (eq->flags & EQ_TYPEMASK) { 3501 case EQ_CTRL: 3502 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3503 eq->cntxt_id); 3504 break; 3505 3506 case EQ_ETH: 3507 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3508 eq->cntxt_id); 3509 break; 3510 3511#ifdef TCP_OFFLOAD 3512 case EQ_OFLD: 3513 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3514 eq->cntxt_id); 3515 break; 3516#endif 3517 3518 default: 3519 panic("%s: invalid eq type %d.", __func__, 3520 eq->flags & EQ_TYPEMASK); 3521 } 3522 if (rc != 0) { 3523 device_printf(sc->dev, 3524 "failed to free egress queue (%d): %d\n", 3525 eq->flags & EQ_TYPEMASK, rc); 3526 return (rc); 3527 } 3528 eq->flags &= ~EQ_ALLOCATED; 3529 } 3530 3531 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3532 3533 if (mtx_initialized(&eq->eq_lock)) 3534 mtx_destroy(&eq->eq_lock); 3535 3536 bzero(eq, sizeof(*eq)); 3537 return (0); 3538} 3539 3540static int 3541alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 3542 struct sysctl_oid *oid) 3543{ 3544 int rc; 3545 struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 3546 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3547 3548 rc = alloc_eq(sc, vi, &wrq->eq); 3549 if (rc) 3550 return (rc); 3551 3552 wrq->adapter = sc; 3553 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 3554 TAILQ_INIT(&wrq->incomplete_wrs); 3555 STAILQ_INIT(&wrq->wr_list); 3556 wrq->nwr_pending = 0; 3557 wrq->ndesc_needed = 0; 3558 3559 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3560 &wrq->eq.ba, "bus address of descriptor ring"); 3561 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3562 wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 3563 "desc ring size in bytes"); 3564 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3565 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3566 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3567 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3568 "consumer index"); 3569 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3570 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3571 "producer index"); 3572 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 3573 wrq->eq.sidx, "status page index"); 3574 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 3575 &wrq->tx_wrs_direct, "# of work requests (direct)"); 3576 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 3577 &wrq->tx_wrs_copied, "# of work requests (copied)"); 3578 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 3579 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 3580 3581 return (rc); 3582} 3583 3584static int 3585free_wrq(struct adapter *sc, struct sge_wrq *wrq) 3586{ 3587 int rc; 3588 3589 rc = free_eq(sc, &wrq->eq); 3590 if (rc) 3591 return (rc); 3592 3593 bzero(wrq, sizeof(*wrq)); 3594 return (0); 3595} 3596 3597static int 3598alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 3599 struct sysctl_oid *oid) 3600{ 3601 int rc; 3602 struct port_info *pi = vi->pi; 3603 struct adapter *sc = pi->adapter; 3604 struct sge_eq *eq = &txq->eq; 3605 char name[16]; 3606 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3607 3608 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 3609 M_CXGBE, M_WAITOK); 3610 if (rc != 0) { 3611 device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 3612 return (rc); 3613 } 3614 3615 rc = alloc_eq(sc, vi, eq); 3616 if (rc != 0) { 3617 mp_ring_free(txq->r); 3618 txq->r = NULL; 3619 return (rc); 3620 } 3621 3622 /* Can't fail after this point. */ 3623 3624 if (idx == 0) 3625 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 3626 else 3627 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 3628 ("eq_base mismatch")); 3629 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 3630 ("PF with non-zero eq_base")); 3631 3632 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 3633 txq->ifp = vi->ifp; 3634 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 3635 if (sc->flags & IS_VF) 3636 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 3637 V_TXPKT_INTF(pi->tx_chan)); 3638 else 3639 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3640 V_TXPKT_INTF(pi->tx_chan) | 3641 V_TXPKT_PF(G_FW_VIID_PFN(vi->viid)) | 3642 V_TXPKT_VF(G_FW_VIID_VIN(vi->viid)) | 3643 V_TXPKT_VF_VLD(G_FW_VIID_VIVLD(vi->viid))); 3644 txq->tc_idx = -1; 3645 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 3646 M_ZERO | M_WAITOK); 3647 3648 snprintf(name, sizeof(name), "%d", idx); 3649 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3650 NULL, "tx queue"); 3651 children = SYSCTL_CHILDREN(oid); 3652 3653 SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3654 &eq->ba, "bus address of descriptor ring"); 3655 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3656 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3657 "desc ring size in bytes"); 3658 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 3659 &eq->abs_id, 0, "absolute id of the queue"); 3660 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3661 &eq->cntxt_id, 0, "SGE context id of the queue"); 3662 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 3663 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 3664 "consumer index"); 3665 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 3666 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 3667 "producer index"); 3668 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 3669 eq->sidx, "status page index"); 3670 3671 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 3672 CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I", 3673 "traffic class (-1 means none)"); 3674 3675 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 3676 &txq->txcsum, "# of times hardware assisted with checksum"); 3677 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 3678 CTLFLAG_RD, &txq->vlan_insertion, 3679 "# of times hardware inserted 802.1Q tag"); 3680 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 3681 &txq->tso_wrs, "# of TSO work requests"); 3682 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 3683 &txq->imm_wrs, "# of work requests with immediate data"); 3684 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 3685 &txq->sgl_wrs, "# of work requests with direct SGL"); 3686 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 3687 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 3688 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 3689 CTLFLAG_RD, &txq->txpkts0_wrs, 3690 "# of txpkts (type 0) work requests"); 3691 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 3692 CTLFLAG_RD, &txq->txpkts1_wrs, 3693 "# of txpkts (type 1) work requests"); 3694 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 3695 CTLFLAG_RD, &txq->txpkts0_pkts, 3696 "# of frames tx'd using type0 txpkts work requests"); 3697 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 3698 CTLFLAG_RD, &txq->txpkts1_pkts, 3699 "# of frames tx'd using type1 txpkts work requests"); 3700 3701 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_enqueues", 3702 CTLFLAG_RD, &txq->r->enqueues, 3703 "# of enqueues to the mp_ring for this queue"); 3704 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_drops", 3705 CTLFLAG_RD, &txq->r->drops, 3706 "# of drops in the mp_ring for this queue"); 3707 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_starts", 3708 CTLFLAG_RD, &txq->r->starts, 3709 "# of normal consumer starts in the mp_ring for this queue"); 3710 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_stalls", 3711 CTLFLAG_RD, &txq->r->stalls, 3712 "# of consumer stalls in the mp_ring for this queue"); 3713 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_restarts", 3714 CTLFLAG_RD, &txq->r->restarts, 3715 "# of consumer restarts in the mp_ring for this queue"); 3716 SYSCTL_ADD_COUNTER_U64(&vi->ctx, children, OID_AUTO, "r_abdications", 3717 CTLFLAG_RD, &txq->r->abdications, 3718 "# of consumer abdications in the mp_ring for this queue"); 3719 3720 return (0); 3721} 3722 3723static int 3724free_txq(struct vi_info *vi, struct sge_txq *txq) 3725{ 3726 int rc; 3727 struct adapter *sc = vi->pi->adapter; 3728 struct sge_eq *eq = &txq->eq; 3729 3730 rc = free_eq(sc, eq); 3731 if (rc) 3732 return (rc); 3733 3734 sglist_free(txq->gl); 3735 free(txq->sdesc, M_CXGBE); 3736 mp_ring_free(txq->r); 3737 3738 bzero(txq, sizeof(*txq)); 3739 return (0); 3740} 3741 3742static void 3743oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3744{ 3745 bus_addr_t *ba = arg; 3746 3747 KASSERT(nseg == 1, 3748 ("%s meant for single segment mappings only.", __func__)); 3749 3750 *ba = error ? 0 : segs->ds_addr; 3751} 3752 3753static inline void 3754ring_fl_db(struct adapter *sc, struct sge_fl *fl) 3755{ 3756 uint32_t n, v; 3757 3758 n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 3759 MPASS(n > 0); 3760 3761 wmb(); 3762 v = fl->dbval | V_PIDX(n); 3763 if (fl->udb) 3764 *fl->udb = htole32(v); 3765 else 3766 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 3767 IDXINCR(fl->dbidx, n, fl->sidx); 3768} 3769 3770/* 3771 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 3772 * recycled do not count towards this allocation budget. 3773 * 3774 * Returns non-zero to indicate that this freelist should be added to the list 3775 * of starving freelists. 3776 */ 3777static int 3778refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 3779{ 3780 __be64 *d; 3781 struct fl_sdesc *sd; 3782 uintptr_t pa; 3783 caddr_t cl; 3784 struct cluster_layout *cll; 3785 struct sw_zone_info *swz; 3786 struct cluster_metadata *clm; 3787 uint16_t max_pidx; 3788 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 3789 3790 FL_LOCK_ASSERT_OWNED(fl); 3791 3792 /* 3793 * We always stop at the beginning of the hardware descriptor that's just 3794 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 3795 * which would mean an empty freelist to the chip. 3796 */ 3797 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 3798 if (fl->pidx == max_pidx * 8) 3799 return (0); 3800 3801 d = &fl->desc[fl->pidx]; 3802 sd = &fl->sdesc[fl->pidx]; 3803 cll = &fl->cll_def; /* default layout */ 3804 swz = &sc->sge.sw_zone_info[cll->zidx]; 3805 3806 while (n > 0) { 3807 3808 if (sd->cl != NULL) { 3809 3810 if (sd->nmbuf == 0) { 3811 /* 3812 * Fast recycle without involving any atomics on 3813 * the cluster's metadata (if the cluster has 3814 * metadata). This happens when all frames 3815 * received in the cluster were small enough to 3816 * fit within a single mbuf each. 3817 */ 3818 fl->cl_fast_recycled++; 3819#ifdef INVARIANTS 3820 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3821 if (clm != NULL) 3822 MPASS(clm->refcount == 1); 3823#endif 3824 goto recycled_fast; 3825 } 3826 3827 /* 3828 * Cluster is guaranteed to have metadata. Clusters 3829 * without metadata always take the fast recycle path 3830 * when they're recycled. 3831 */ 3832 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3833 MPASS(clm != NULL); 3834 3835 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3836 fl->cl_recycled++; 3837 counter_u64_add(extfree_rels, 1); 3838 goto recycled; 3839 } 3840 sd->cl = NULL; /* gave up my reference */ 3841 } 3842 MPASS(sd->cl == NULL); 3843alloc: 3844 cl = uma_zalloc(swz->zone, M_NOWAIT); 3845 if (__predict_false(cl == NULL)) { 3846 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3847 fl->cll_def.zidx == fl->cll_alt.zidx) 3848 break; 3849 3850 /* fall back to the safe zone */ 3851 cll = &fl->cll_alt; 3852 swz = &sc->sge.sw_zone_info[cll->zidx]; 3853 goto alloc; 3854 } 3855 fl->cl_allocated++; 3856 n--; 3857 3858 pa = pmap_kextract((vm_offset_t)cl); 3859 pa += cll->region1; 3860 sd->cl = cl; 3861 sd->cll = *cll; 3862 *d = htobe64(pa | cll->hwidx); 3863 clm = cl_metadata(sc, fl, cll, cl); 3864 if (clm != NULL) { 3865recycled: 3866#ifdef INVARIANTS 3867 clm->sd = sd; 3868#endif 3869 clm->refcount = 1; 3870 } 3871 sd->nmbuf = 0; 3872recycled_fast: 3873 d++; 3874 sd++; 3875 if (__predict_false(++fl->pidx % 8 == 0)) { 3876 uint16_t pidx = fl->pidx / 8; 3877 3878 if (__predict_false(pidx == fl->sidx)) { 3879 fl->pidx = 0; 3880 pidx = 0; 3881 sd = fl->sdesc; 3882 d = fl->desc; 3883 } 3884 if (pidx == max_pidx) 3885 break; 3886 3887 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 3888 ring_fl_db(sc, fl); 3889 } 3890 } 3891 3892 if (fl->pidx / 8 != fl->dbidx) 3893 ring_fl_db(sc, fl); 3894 3895 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3896} 3897 3898/* 3899 * Attempt to refill all starving freelists. 3900 */ 3901static void 3902refill_sfl(void *arg) 3903{ 3904 struct adapter *sc = arg; 3905 struct sge_fl *fl, *fl_temp; 3906 3907 mtx_assert(&sc->sfl_lock, MA_OWNED); 3908 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3909 FL_LOCK(fl); 3910 refill_fl(sc, fl, 64); 3911 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3912 TAILQ_REMOVE(&sc->sfl, fl, link); 3913 fl->flags &= ~FL_STARVING; 3914 } 3915 FL_UNLOCK(fl); 3916 } 3917 3918 if (!TAILQ_EMPTY(&sc->sfl)) 3919 callout_schedule(&sc->sfl_callout, hz / 5); 3920} 3921 3922static int 3923alloc_fl_sdesc(struct sge_fl *fl) 3924{ 3925 3926 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 3927 M_ZERO | M_WAITOK); 3928 3929 return (0); 3930} 3931 3932static void 3933free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3934{ 3935 struct fl_sdesc *sd; 3936 struct cluster_metadata *clm; 3937 struct cluster_layout *cll; 3938 int i; 3939 3940 sd = fl->sdesc; 3941 for (i = 0; i < fl->sidx * 8; i++, sd++) { 3942 if (sd->cl == NULL) 3943 continue; 3944 3945 cll = &sd->cll; 3946 clm = cl_metadata(sc, fl, cll, sd->cl); 3947 if (sd->nmbuf == 0) 3948 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3949 else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3950 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3951 counter_u64_add(extfree_rels, 1); 3952 } 3953 sd->cl = NULL; 3954 } 3955 3956 free(fl->sdesc, M_CXGBE); 3957 fl->sdesc = NULL; 3958} 3959 3960static inline void 3961get_pkt_gl(struct mbuf *m, struct sglist *gl) 3962{ 3963 int rc; 3964 3965 M_ASSERTPKTHDR(m); 3966 3967 sglist_reset(gl); 3968 rc = sglist_append_mbuf(gl, m); 3969 if (__predict_false(rc != 0)) { 3970 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 3971 "with %d.", __func__, m, mbuf_nsegs(m), rc); 3972 } 3973 3974 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 3975 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 3976 mbuf_nsegs(m), gl->sg_nseg)); 3977 KASSERT(gl->sg_nseg > 0 && 3978 gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 3979 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 3980 gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 3981} 3982 3983/* 3984 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 3985 */ 3986static inline u_int 3987txpkt_len16(u_int nsegs, u_int tso) 3988{ 3989 u_int n; 3990 3991 MPASS(nsegs > 0); 3992 3993 nsegs--; /* first segment is part of ulptx_sgl */ 3994 n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 3995 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 3996 if (tso) 3997 n += sizeof(struct cpl_tx_pkt_lso_core); 3998 3999 return (howmany(n, 16)); 4000} 4001 4002/* 4003 * len16 for a txpkt_vm WR with a GL. Includes the firmware work 4004 * request header. 4005 */ 4006static inline u_int 4007txpkt_vm_len16(u_int nsegs, u_int tso) 4008{ 4009 u_int n; 4010 4011 MPASS(nsegs > 0); 4012 4013 nsegs--; /* first segment is part of ulptx_sgl */ 4014 n = sizeof(struct fw_eth_tx_pkt_vm_wr) + 4015 sizeof(struct cpl_tx_pkt_core) + 4016 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4017 if (tso) 4018 n += sizeof(struct cpl_tx_pkt_lso_core); 4019 4020 return (howmany(n, 16)); 4021} 4022 4023/* 4024 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 4025 * request header. 4026 */ 4027static inline u_int 4028txpkts0_len16(u_int nsegs) 4029{ 4030 u_int n; 4031 4032 MPASS(nsegs > 0); 4033 4034 nsegs--; /* first segment is part of ulptx_sgl */ 4035 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 4036 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 4037 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4038 4039 return (howmany(n, 16)); 4040} 4041 4042/* 4043 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 4044 * request header. 4045 */ 4046static inline u_int 4047txpkts1_len16(void) 4048{ 4049 u_int n; 4050 4051 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 4052 4053 return (howmany(n, 16)); 4054} 4055 4056static inline u_int 4057imm_payload(u_int ndesc) 4058{ 4059 u_int n; 4060 4061 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 4062 sizeof(struct cpl_tx_pkt_core); 4063 4064 return (n); 4065} 4066 4067/* 4068 * Write a VM txpkt WR for this packet to the hardware descriptors, update the 4069 * software descriptor, and advance the pidx. It is guaranteed that enough 4070 * descriptors are available. 4071 * 4072 * The return value is the # of hardware descriptors used. 4073 */ 4074static u_int 4075write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, 4076 struct fw_eth_tx_pkt_vm_wr *wr, struct mbuf *m0, u_int available) 4077{ 4078 struct sge_eq *eq = &txq->eq; 4079 struct tx_sdesc *txsd; 4080 struct cpl_tx_pkt_core *cpl; 4081 uint32_t ctrl; /* used in many unrelated places */ 4082 uint64_t ctrl1; 4083 int csum_type, len16, ndesc, pktlen, nsegs; 4084 caddr_t dst; 4085 4086 TXQ_LOCK_ASSERT_OWNED(txq); 4087 M_ASSERTPKTHDR(m0); 4088 MPASS(available > 0 && available < eq->sidx); 4089 4090 len16 = mbuf_len16(m0); 4091 nsegs = mbuf_nsegs(m0); 4092 pktlen = m0->m_pkthdr.len; 4093 ctrl = sizeof(struct cpl_tx_pkt_core); 4094 if (needs_tso(m0)) 4095 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4096 ndesc = howmany(len16, EQ_ESIZE / 16); 4097 MPASS(ndesc <= available); 4098 4099 /* Firmware work request header */ 4100 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4101 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 4102 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4103 4104 ctrl = V_FW_WR_LEN16(len16); 4105 wr->equiq_to_len16 = htobe32(ctrl); 4106 wr->r3[0] = 0; 4107 wr->r3[1] = 0; 4108 4109 /* 4110 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 4111 * vlantci is ignored unless the ethtype is 0x8100, so it's 4112 * simpler to always copy it rather than making it 4113 * conditional. Also, it seems that we do not have to set 4114 * vlantci or fake the ethtype when doing VLAN tag insertion. 4115 */ 4116 m_copydata(m0, 0, sizeof(struct ether_header) + 2, wr->ethmacdst); 4117 4118 csum_type = -1; 4119 if (needs_tso(m0)) { 4120 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4121 4122 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4123 m0->m_pkthdr.l4hlen > 0, 4124 ("%s: mbuf %p needs TSO but missing header lengths", 4125 __func__, m0)); 4126 4127 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4128 F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 4129 | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4130 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 4131 ctrl |= V_LSO_ETHHDR_LEN(1); 4132 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4133 ctrl |= F_LSO_IPV6; 4134 4135 lso->lso_ctrl = htobe32(ctrl); 4136 lso->ipid_ofst = htobe16(0); 4137 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4138 lso->seqno_offset = htobe32(0); 4139 lso->len = htobe32(pktlen); 4140 4141 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4142 csum_type = TX_CSUM_TCPIP6; 4143 else 4144 csum_type = TX_CSUM_TCPIP; 4145 4146 cpl = (void *)(lso + 1); 4147 4148 txq->tso_wrs++; 4149 } else { 4150 if (m0->m_pkthdr.csum_flags & CSUM_IP_TCP) 4151 csum_type = TX_CSUM_TCPIP; 4152 else if (m0->m_pkthdr.csum_flags & CSUM_IP_UDP) 4153 csum_type = TX_CSUM_UDPIP; 4154 else if (m0->m_pkthdr.csum_flags & CSUM_IP6_TCP) 4155 csum_type = TX_CSUM_TCPIP6; 4156 else if (m0->m_pkthdr.csum_flags & CSUM_IP6_UDP) 4157 csum_type = TX_CSUM_UDPIP6; 4158#if defined(INET) 4159 else if (m0->m_pkthdr.csum_flags & CSUM_IP) { 4160 /* 4161 * XXX: The firmware appears to stomp on the 4162 * fragment/flags field of the IP header when 4163 * using TX_CSUM_IP. Fall back to doing 4164 * software checksums. 4165 */ 4166 u_short *sump; 4167 struct mbuf *m; 4168 int offset; 4169 4170 m = m0; 4171 offset = 0; 4172 sump = m_advance(&m, &offset, m0->m_pkthdr.l2hlen + 4173 offsetof(struct ip, ip_sum)); 4174 *sump = in_cksum_skip(m0, m0->m_pkthdr.l2hlen + 4175 m0->m_pkthdr.l3hlen, m0->m_pkthdr.l2hlen); 4176 m0->m_pkthdr.csum_flags &= ~CSUM_IP; 4177 } 4178#endif 4179 4180 cpl = (void *)(wr + 1); 4181 } 4182 4183 /* Checksum offload */ 4184 ctrl1 = 0; 4185 if (needs_l3_csum(m0) == 0) 4186 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4187 if (csum_type >= 0) { 4188 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0, 4189 ("%s: mbuf %p needs checksum offload but missing header lengths", 4190 __func__, m0)); 4191 4192 if (chip_id(sc) <= CHELSIO_T5) { 4193 ctrl1 |= V_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - 4194 ETHER_HDR_LEN); 4195 } else { 4196 ctrl1 |= V_T6_TXPKT_ETHHDR_LEN(m0->m_pkthdr.l2hlen - 4197 ETHER_HDR_LEN); 4198 } 4199 ctrl1 |= V_TXPKT_IPHDR_LEN(m0->m_pkthdr.l3hlen); 4200 ctrl1 |= V_TXPKT_CSUM_TYPE(csum_type); 4201 } else 4202 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4203 if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4204 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4205 txq->txcsum++; /* some hardware assistance provided */ 4206 4207 /* VLAN tag insertion */ 4208 if (needs_vlan_insertion(m0)) { 4209 ctrl1 |= F_TXPKT_VLAN_VLD | 4210 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4211 txq->vlan_insertion++; 4212 } 4213 4214 /* CPL header */ 4215 cpl->ctrl0 = txq->cpl_ctrl0; 4216 cpl->pack = 0; 4217 cpl->len = htobe16(pktlen); 4218 cpl->ctrl1 = htobe64(ctrl1); 4219 4220 /* SGL */ 4221 dst = (void *)(cpl + 1); 4222 4223 /* 4224 * A packet using TSO will use up an entire descriptor for the 4225 * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 4226 * If this descriptor is the last descriptor in the ring, wrap 4227 * around to the front of the ring explicitly for the start of 4228 * the sgl. 4229 */ 4230 if (dst == (void *)&eq->desc[eq->sidx]) { 4231 dst = (void *)&eq->desc[0]; 4232 write_gl_to_txd(txq, m0, &dst, 0); 4233 } else 4234 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4235 txq->sgl_wrs++; 4236 4237 txq->txpkt_wrs++; 4238 4239 txsd = &txq->sdesc[eq->pidx]; 4240 txsd->m = m0; 4241 txsd->desc_used = ndesc; 4242 4243 return (ndesc); 4244} 4245 4246/* 4247 * Write a txpkt WR for this packet to the hardware descriptors, update the 4248 * software descriptor, and advance the pidx. It is guaranteed that enough 4249 * descriptors are available. 4250 * 4251 * The return value is the # of hardware descriptors used. 4252 */ 4253static u_int 4254write_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, 4255 struct mbuf *m0, u_int available) 4256{ 4257 struct sge_eq *eq = &txq->eq; 4258 struct tx_sdesc *txsd; 4259 struct cpl_tx_pkt_core *cpl; 4260 uint32_t ctrl; /* used in many unrelated places */ 4261 uint64_t ctrl1; 4262 int len16, ndesc, pktlen, nsegs; 4263 caddr_t dst; 4264 4265 TXQ_LOCK_ASSERT_OWNED(txq); 4266 M_ASSERTPKTHDR(m0); 4267 MPASS(available > 0 && available < eq->sidx); 4268 4269 len16 = mbuf_len16(m0); 4270 nsegs = mbuf_nsegs(m0); 4271 pktlen = m0->m_pkthdr.len; 4272 ctrl = sizeof(struct cpl_tx_pkt_core); 4273 if (needs_tso(m0)) 4274 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 4275 else if (pktlen <= imm_payload(2) && available >= 2) { 4276 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 4277 ctrl += pktlen; 4278 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 4279 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 4280 nsegs = 0; 4281 } 4282 ndesc = howmany(len16, EQ_ESIZE / 16); 4283 MPASS(ndesc <= available); 4284 4285 /* Firmware work request header */ 4286 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4287 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 4288 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 4289 4290 ctrl = V_FW_WR_LEN16(len16); 4291 wr->equiq_to_len16 = htobe32(ctrl); 4292 wr->r3 = 0; 4293 4294 if (needs_tso(m0)) { 4295 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 4296 4297 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4298 m0->m_pkthdr.l4hlen > 0, 4299 ("%s: mbuf %p needs TSO but missing header lengths", 4300 __func__, m0)); 4301 4302 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 4303 F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 4304 | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4305 if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 4306 ctrl |= V_LSO_ETHHDR_LEN(1); 4307 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4308 ctrl |= F_LSO_IPV6; 4309 4310 lso->lso_ctrl = htobe32(ctrl); 4311 lso->ipid_ofst = htobe16(0); 4312 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4313 lso->seqno_offset = htobe32(0); 4314 lso->len = htobe32(pktlen); 4315 4316 cpl = (void *)(lso + 1); 4317 4318 txq->tso_wrs++; 4319 } else 4320 cpl = (void *)(wr + 1); 4321 4322 /* Checksum offload */ 4323 ctrl1 = 0; 4324 if (needs_l3_csum(m0) == 0) 4325 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4326 if (needs_l4_csum(m0) == 0) 4327 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4328 if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4329 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4330 txq->txcsum++; /* some hardware assistance provided */ 4331 4332 /* VLAN tag insertion */ 4333 if (needs_vlan_insertion(m0)) { 4334 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 4335 txq->vlan_insertion++; 4336 } 4337 4338 /* CPL header */ 4339 cpl->ctrl0 = txq->cpl_ctrl0; 4340 cpl->pack = 0; 4341 cpl->len = htobe16(pktlen); 4342 cpl->ctrl1 = htobe64(ctrl1); 4343 4344 /* SGL */ 4345 dst = (void *)(cpl + 1); 4346 if (nsegs > 0) { 4347 4348 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4349 txq->sgl_wrs++; 4350 } else { 4351 struct mbuf *m; 4352 4353 for (m = m0; m != NULL; m = m->m_next) { 4354 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4355#ifdef INVARIANTS 4356 pktlen -= m->m_len; 4357#endif 4358 } 4359#ifdef INVARIANTS 4360 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4361#endif 4362 txq->imm_wrs++; 4363 } 4364 4365 txq->txpkt_wrs++; 4366 4367 txsd = &txq->sdesc[eq->pidx]; 4368 txsd->m = m0; 4369 txsd->desc_used = ndesc; 4370 4371 return (ndesc); 4372} 4373 4374static int 4375try_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) 4376{ 4377 u_int needed, nsegs1, nsegs2, l1, l2; 4378 4379 if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) 4380 return (1); 4381 4382 nsegs1 = mbuf_nsegs(m); 4383 nsegs2 = mbuf_nsegs(n); 4384 if (nsegs1 + nsegs2 == 2) { 4385 txp->wr_type = 1; 4386 l1 = l2 = txpkts1_len16(); 4387 } else { 4388 txp->wr_type = 0; 4389 l1 = txpkts0_len16(nsegs1); 4390 l2 = txpkts0_len16(nsegs2); 4391 } 4392 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; 4393 needed = howmany(txp->len16, EQ_ESIZE / 16); 4394 if (needed > SGE_MAX_WR_NDESC || needed > available) 4395 return (1); 4396 4397 txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; 4398 if (txp->plen > 65535) 4399 return (1); 4400 4401 txp->npkt = 2; 4402 set_mbuf_len16(m, l1); 4403 set_mbuf_len16(n, l2); 4404 4405 return (0); 4406} 4407 4408static int 4409add_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) 4410{ 4411 u_int plen, len16, needed, nsegs; 4412 4413 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 4414 4415 nsegs = mbuf_nsegs(m); 4416 if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1)) 4417 return (1); 4418 4419 plen = txp->plen + m->m_pkthdr.len; 4420 if (plen > 65535) 4421 return (1); 4422 4423 if (txp->wr_type == 0) 4424 len16 = txpkts0_len16(nsegs); 4425 else 4426 len16 = txpkts1_len16(); 4427 needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); 4428 if (needed > SGE_MAX_WR_NDESC || needed > available) 4429 return (1); 4430 4431 txp->npkt++; 4432 txp->plen = plen; 4433 txp->len16 += len16; 4434 set_mbuf_len16(m, len16); 4435 4436 return (0); 4437} 4438 4439/* 4440 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 4441 * the software descriptor, and advance the pidx. It is guaranteed that enough 4442 * descriptors are available. 4443 * 4444 * The return value is the # of hardware descriptors used. 4445 */ 4446static u_int 4447write_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, 4448 struct mbuf *m0, const struct txpkts *txp, u_int available) 4449{ 4450 struct sge_eq *eq = &txq->eq; 4451 struct tx_sdesc *txsd; 4452 struct cpl_tx_pkt_core *cpl; 4453 uint32_t ctrl; 4454 uint64_t ctrl1; 4455 int ndesc, checkwrap; 4456 struct mbuf *m; 4457 void *flitp; 4458 4459 TXQ_LOCK_ASSERT_OWNED(txq); 4460 MPASS(txp->npkt > 0); 4461 MPASS(txp->plen < 65536); 4462 MPASS(m0 != NULL); 4463 MPASS(m0->m_nextpkt != NULL); 4464 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 4465 MPASS(available > 0 && available < eq->sidx); 4466 4467 ndesc = howmany(txp->len16, EQ_ESIZE / 16); 4468 MPASS(ndesc <= available); 4469 4470 MPASS(wr == (void *)&eq->desc[eq->pidx]); 4471 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 4472 ctrl = V_FW_WR_LEN16(txp->len16); 4473 wr->equiq_to_len16 = htobe32(ctrl); 4474 wr->plen = htobe16(txp->plen); 4475 wr->npkt = txp->npkt; 4476 wr->r3 = 0; 4477 wr->type = txp->wr_type; 4478 flitp = wr + 1; 4479 4480 /* 4481 * At this point we are 16B into a hardware descriptor. If checkwrap is 4482 * set then we know the WR is going to wrap around somewhere. We'll 4483 * check for that at appropriate points. 4484 */ 4485 checkwrap = eq->sidx - ndesc < eq->pidx; 4486 for (m = m0; m != NULL; m = m->m_nextpkt) { 4487 if (txp->wr_type == 0) { 4488 struct ulp_txpkt *ulpmc; 4489 struct ulptx_idata *ulpsc; 4490 4491 /* ULP master command */ 4492 ulpmc = flitp; 4493 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 4494 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 4495 ulpmc->len = htobe32(mbuf_len16(m)); 4496 4497 /* ULP subcommand */ 4498 ulpsc = (void *)(ulpmc + 1); 4499 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 4500 F_ULP_TX_SC_MORE); 4501 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4502 4503 cpl = (void *)(ulpsc + 1); 4504 if (checkwrap && 4505 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 4506 cpl = (void *)&eq->desc[0]; 4507 } else { 4508 cpl = flitp; 4509 } 4510 4511 /* Checksum offload */ 4512 ctrl1 = 0; 4513 if (needs_l3_csum(m) == 0) 4514 ctrl1 |= F_TXPKT_IPCSUM_DIS; 4515 if (needs_l4_csum(m) == 0) 4516 ctrl1 |= F_TXPKT_L4CSUM_DIS; 4517 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4518 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4519 txq->txcsum++; /* some hardware assistance provided */ 4520 4521 /* VLAN tag insertion */ 4522 if (needs_vlan_insertion(m)) { 4523 ctrl1 |= F_TXPKT_VLAN_VLD | 4524 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 4525 txq->vlan_insertion++; 4526 } 4527 4528 /* CPL header */ 4529 cpl->ctrl0 = txq->cpl_ctrl0; 4530 cpl->pack = 0; 4531 cpl->len = htobe16(m->m_pkthdr.len); 4532 cpl->ctrl1 = htobe64(ctrl1); 4533 4534 flitp = cpl + 1; 4535 if (checkwrap && 4536 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 4537 flitp = (void *)&eq->desc[0]; 4538 4539 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 4540 4541 } 4542 4543 if (txp->wr_type == 0) { 4544 txq->txpkts0_pkts += txp->npkt; 4545 txq->txpkts0_wrs++; 4546 } else { 4547 txq->txpkts1_pkts += txp->npkt; 4548 txq->txpkts1_wrs++; 4549 } 4550 4551 txsd = &txq->sdesc[eq->pidx]; 4552 txsd->m = m0; 4553 txsd->desc_used = ndesc; 4554 4555 return (ndesc); 4556} 4557 4558/* 4559 * If the SGL ends on an address that is not 16 byte aligned, this function will 4560 * add a 0 filled flit at the end. 4561 */ 4562static void 4563write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 4564{ 4565 struct sge_eq *eq = &txq->eq; 4566 struct sglist *gl = txq->gl; 4567 struct sglist_seg *seg; 4568 __be64 *flitp, *wrap; 4569 struct ulptx_sgl *usgl; 4570 int i, nflits, nsegs; 4571 4572 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 4573 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 4574 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4575 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4576 4577 get_pkt_gl(m, gl); 4578 nsegs = gl->sg_nseg; 4579 MPASS(nsegs > 0); 4580 4581 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 4582 flitp = (__be64 *)(*to); 4583 wrap = (__be64 *)(&eq->desc[eq->sidx]); 4584 seg = &gl->sg_segs[0]; 4585 usgl = (void *)flitp; 4586 4587 /* 4588 * We start at a 16 byte boundary somewhere inside the tx descriptor 4589 * ring, so we're at least 16 bytes away from the status page. There is 4590 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 4591 */ 4592 4593 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 4594 V_ULPTX_NSGE(nsegs)); 4595 usgl->len0 = htobe32(seg->ss_len); 4596 usgl->addr0 = htobe64(seg->ss_paddr); 4597 seg++; 4598 4599 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 4600 4601 /* Won't wrap around at all */ 4602 4603 for (i = 0; i < nsegs - 1; i++, seg++) { 4604 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 4605 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 4606 } 4607 if (i & 1) 4608 usgl->sge[i / 2].len[1] = htobe32(0); 4609 flitp += nflits; 4610 } else { 4611 4612 /* Will wrap somewhere in the rest of the SGL */ 4613 4614 /* 2 flits already written, write the rest flit by flit */ 4615 flitp = (void *)(usgl + 1); 4616 for (i = 0; i < nflits - 2; i++) { 4617 if (flitp == wrap) 4618 flitp = (void *)eq->desc; 4619 *flitp++ = get_flit(seg, nsegs - 1, i); 4620 } 4621 } 4622 4623 if (nflits & 1) { 4624 MPASS(((uintptr_t)flitp) & 0xf); 4625 *flitp++ = 0; 4626 } 4627 4628 MPASS((((uintptr_t)flitp) & 0xf) == 0); 4629 if (__predict_false(flitp == wrap)) 4630 *to = (void *)eq->desc; 4631 else 4632 *to = (void *)flitp; 4633} 4634 4635static inline void 4636copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 4637{ 4638 4639 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4640 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4641 4642 if (__predict_true((uintptr_t)(*to) + len <= 4643 (uintptr_t)&eq->desc[eq->sidx])) { 4644 bcopy(from, *to, len); 4645 (*to) += len; 4646 } else { 4647 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 4648 4649 bcopy(from, *to, portion); 4650 from += portion; 4651 portion = len - portion; /* remaining */ 4652 bcopy(from, (void *)eq->desc, portion); 4653 (*to) = (caddr_t)eq->desc + portion; 4654 } 4655} 4656 4657static inline void 4658ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 4659{ 4660 u_int db; 4661 4662 MPASS(n > 0); 4663 4664 db = eq->doorbells; 4665 if (n > 1) 4666 clrbit(&db, DOORBELL_WCWR); 4667 wmb(); 4668 4669 switch (ffs(db) - 1) { 4670 case DOORBELL_UDB: 4671 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4672 break; 4673 4674 case DOORBELL_WCWR: { 4675 volatile uint64_t *dst, *src; 4676 int i; 4677 4678 /* 4679 * Queues whose 128B doorbell segment fits in the page do not 4680 * use relative qid (udb_qid is always 0). Only queues with 4681 * doorbell segments can do WCWR. 4682 */ 4683 KASSERT(eq->udb_qid == 0 && n == 1, 4684 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 4685 __func__, eq->doorbells, n, eq->dbidx, eq)); 4686 4687 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 4688 UDBS_DB_OFFSET); 4689 i = eq->dbidx; 4690 src = (void *)&eq->desc[i]; 4691 while (src != (void *)&eq->desc[i + 1]) 4692 *dst++ = *src++; 4693 wmb(); 4694 break; 4695 } 4696 4697 case DOORBELL_UDBWC: 4698 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4699 wmb(); 4700 break; 4701 4702 case DOORBELL_KDB: 4703 t4_write_reg(sc, sc->sge_kdoorbell_reg, 4704 V_QID(eq->cntxt_id) | V_PIDX(n)); 4705 break; 4706 } 4707 4708 IDXINCR(eq->dbidx, n, eq->sidx); 4709} 4710 4711static inline u_int 4712reclaimable_tx_desc(struct sge_eq *eq) 4713{ 4714 uint16_t hw_cidx; 4715 4716 hw_cidx = read_hw_cidx(eq); 4717 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 4718} 4719 4720static inline u_int 4721total_available_tx_desc(struct sge_eq *eq) 4722{ 4723 uint16_t hw_cidx, pidx; 4724 4725 hw_cidx = read_hw_cidx(eq); 4726 pidx = eq->pidx; 4727 4728 if (pidx == hw_cidx) 4729 return (eq->sidx - 1); 4730 else 4731 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 4732} 4733 4734static inline uint16_t 4735read_hw_cidx(struct sge_eq *eq) 4736{ 4737 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4738 uint16_t cidx = spg->cidx; /* stable snapshot */ 4739 4740 return (be16toh(cidx)); 4741} 4742 4743/* 4744 * Reclaim 'n' descriptors approximately. 4745 */ 4746static u_int 4747reclaim_tx_descs(struct sge_txq *txq, u_int n) 4748{ 4749 struct tx_sdesc *txsd; 4750 struct sge_eq *eq = &txq->eq; 4751 u_int can_reclaim, reclaimed; 4752 4753 TXQ_LOCK_ASSERT_OWNED(txq); 4754 MPASS(n > 0); 4755 4756 reclaimed = 0; 4757 can_reclaim = reclaimable_tx_desc(eq); 4758 while (can_reclaim && reclaimed < n) { 4759 int ndesc; 4760 struct mbuf *m, *nextpkt; 4761 4762 txsd = &txq->sdesc[eq->cidx]; 4763 ndesc = txsd->desc_used; 4764 4765 /* Firmware doesn't return "partial" credits. */ 4766 KASSERT(can_reclaim >= ndesc, 4767 ("%s: unexpected number of credits: %d, %d", 4768 __func__, can_reclaim, ndesc)); 4769 KASSERT(ndesc != 0, 4770 ("%s: descriptor with no credits: cidx %d", 4771 __func__, eq->cidx)); 4772 4773 for (m = txsd->m; m != NULL; m = nextpkt) { 4774 nextpkt = m->m_nextpkt; 4775 m->m_nextpkt = NULL; 4776 m_freem(m); 4777 } 4778 reclaimed += ndesc; 4779 can_reclaim -= ndesc; 4780 IDXINCR(eq->cidx, ndesc, eq->sidx); 4781 } 4782 4783 return (reclaimed); 4784} 4785 4786static void 4787tx_reclaim(void *arg, int n) 4788{ 4789 struct sge_txq *txq = arg; 4790 struct sge_eq *eq = &txq->eq; 4791 4792 do { 4793 if (TXQ_TRYLOCK(txq) == 0) 4794 break; 4795 n = reclaim_tx_descs(txq, 32); 4796 if (eq->cidx == eq->pidx) 4797 eq->equeqidx = eq->pidx; 4798 TXQ_UNLOCK(txq); 4799 } while (n > 0); 4800} 4801 4802static __be64 4803get_flit(struct sglist_seg *segs, int nsegs, int idx) 4804{ 4805 int i = (idx / 3) * 2; 4806 4807 switch (idx % 3) { 4808 case 0: { 4809 uint64_t rc; 4810 4811 rc = (uint64_t)segs[i].ss_len << 32; 4812 if (i + 1 < nsegs) 4813 rc |= (uint64_t)(segs[i + 1].ss_len); 4814 4815 return (htobe64(rc)); 4816 } 4817 case 1: 4818 return (htobe64(segs[i].ss_paddr)); 4819 case 2: 4820 return (htobe64(segs[i + 1].ss_paddr)); 4821 } 4822 4823 return (0); 4824} 4825 4826static void 4827find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4828{ 4829 int8_t zidx, hwidx, idx; 4830 uint16_t region1, region3; 4831 int spare, spare_needed, n; 4832 struct sw_zone_info *swz; 4833 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4834 4835 /* 4836 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4837 * large enough for the max payload and cluster metadata. Otherwise 4838 * settle for the largest bufsize that leaves enough room in the cluster 4839 * for metadata. 4840 * 4841 * Without buffer packing: Look for the smallest zone which has a 4842 * bufsize large enough for the max payload. Settle for the largest 4843 * bufsize available if there's nothing big enough for max payload. 4844 */ 4845 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4846 swz = &sc->sge.sw_zone_info[0]; 4847 hwidx = -1; 4848 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4849 if (swz->size > largest_rx_cluster) { 4850 if (__predict_true(hwidx != -1)) 4851 break; 4852 4853 /* 4854 * This is a misconfiguration. largest_rx_cluster is 4855 * preventing us from finding a refill source. See 4856 * dev.t5nex.<n>.buffer_sizes to figure out why. 4857 */ 4858 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4859 " refill source for fl %p (dma %u). Ignored.\n", 4860 largest_rx_cluster, fl, maxp); 4861 } 4862 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4863 hwb = &hwb_list[idx]; 4864 spare = swz->size - hwb->size; 4865 if (spare < spare_needed) 4866 continue; 4867 4868 hwidx = idx; /* best option so far */ 4869 if (hwb->size >= maxp) { 4870 4871 if ((fl->flags & FL_BUF_PACKING) == 0) 4872 goto done; /* stop looking (not packing) */ 4873 4874 if (swz->size >= safest_rx_cluster) 4875 goto done; /* stop looking (packing) */ 4876 } 4877 break; /* keep looking, next zone */ 4878 } 4879 } 4880done: 4881 /* A usable hwidx has been located. */ 4882 MPASS(hwidx != -1); 4883 hwb = &hwb_list[hwidx]; 4884 zidx = hwb->zidx; 4885 swz = &sc->sge.sw_zone_info[zidx]; 4886 region1 = 0; 4887 region3 = swz->size - hwb->size; 4888 4889 /* 4890 * Stay within this zone and see if there is a better match when mbuf 4891 * inlining is allowed. Remember that the hwidx's are sorted in 4892 * decreasing order of size (so in increasing order of spare area). 4893 */ 4894 for (idx = hwidx; idx != -1; idx = hwb->next) { 4895 hwb = &hwb_list[idx]; 4896 spare = swz->size - hwb->size; 4897 4898 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4899 break; 4900 4901 /* 4902 * Do not inline mbufs if doing so would violate the pad/pack 4903 * boundary alignment requirement. 4904 */ 4905 if (fl_pad && (MSIZE % sc->params.sge.pad_boundary) != 0) 4906 continue; 4907 if (fl->flags & FL_BUF_PACKING && 4908 (MSIZE % sc->params.sge.pack_boundary) != 0) 4909 continue; 4910 4911 if (spare < CL_METADATA_SIZE + MSIZE) 4912 continue; 4913 n = (spare - CL_METADATA_SIZE) / MSIZE; 4914 if (n > howmany(hwb->size, maxp)) 4915 break; 4916 4917 hwidx = idx; 4918 if (fl->flags & FL_BUF_PACKING) { 4919 region1 = n * MSIZE; 4920 region3 = spare - region1; 4921 } else { 4922 region1 = MSIZE; 4923 region3 = spare - region1; 4924 break; 4925 } 4926 } 4927 4928 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4929 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4930 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4931 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4932 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4933 sc->sge.sw_zone_info[zidx].size, 4934 ("%s: bad buffer layout for fl %p, maxp %d. " 4935 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4936 sc->sge.sw_zone_info[zidx].size, region1, 4937 sc->sge.hw_buf_info[hwidx].size, region3)); 4938 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4939 KASSERT(region3 >= CL_METADATA_SIZE, 4940 ("%s: no room for metadata. fl %p, maxp %d; " 4941 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4942 sc->sge.sw_zone_info[zidx].size, region1, 4943 sc->sge.hw_buf_info[hwidx].size, region3)); 4944 KASSERT(region1 % MSIZE == 0, 4945 ("%s: bad mbuf region for fl %p, maxp %d. " 4946 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4947 sc->sge.sw_zone_info[zidx].size, region1, 4948 sc->sge.hw_buf_info[hwidx].size, region3)); 4949 } 4950 4951 fl->cll_def.zidx = zidx; 4952 fl->cll_def.hwidx = hwidx; 4953 fl->cll_def.region1 = region1; 4954 fl->cll_def.region3 = region3; 4955} 4956 4957static void 4958find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4959{ 4960 struct sge *s = &sc->sge; 4961 struct hw_buf_info *hwb; 4962 struct sw_zone_info *swz; 4963 int spare; 4964 int8_t hwidx; 4965 4966 if (fl->flags & FL_BUF_PACKING) 4967 hwidx = s->safe_hwidx2; /* with room for metadata */ 4968 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4969 hwidx = s->safe_hwidx2; 4970 hwb = &s->hw_buf_info[hwidx]; 4971 swz = &s->sw_zone_info[hwb->zidx]; 4972 spare = swz->size - hwb->size; 4973 4974 /* no good if there isn't room for an mbuf as well */ 4975 if (spare < CL_METADATA_SIZE + MSIZE) 4976 hwidx = s->safe_hwidx1; 4977 } else 4978 hwidx = s->safe_hwidx1; 4979 4980 if (hwidx == -1) { 4981 /* No fallback source */ 4982 fl->cll_alt.hwidx = -1; 4983 fl->cll_alt.zidx = -1; 4984 4985 return; 4986 } 4987 4988 hwb = &s->hw_buf_info[hwidx]; 4989 swz = &s->sw_zone_info[hwb->zidx]; 4990 spare = swz->size - hwb->size; 4991 fl->cll_alt.hwidx = hwidx; 4992 fl->cll_alt.zidx = hwb->zidx; 4993 if (allow_mbufs_in_cluster && 4994 (fl_pad == 0 || (MSIZE % sc->params.sge.pad_boundary) == 0)) 4995 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4996 else 4997 fl->cll_alt.region1 = 0; 4998 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4999} 5000 5001static void 5002add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5003{ 5004 mtx_lock(&sc->sfl_lock); 5005 FL_LOCK(fl); 5006 if ((fl->flags & FL_DOOMED) == 0) { 5007 fl->flags |= FL_STARVING; 5008 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5009 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5010 } 5011 FL_UNLOCK(fl); 5012 mtx_unlock(&sc->sfl_lock); 5013} 5014 5015static void 5016handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 5017{ 5018 struct sge_wrq *wrq = (void *)eq; 5019 5020 atomic_readandclear_int(&eq->equiq); 5021 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 5022} 5023 5024static void 5025handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 5026{ 5027 struct sge_txq *txq = (void *)eq; 5028 5029 MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 5030 5031 atomic_readandclear_int(&eq->equiq); 5032 mp_ring_check_drainage(txq->r, 0); 5033 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5034} 5035 5036static int 5037handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5038 struct mbuf *m) 5039{ 5040 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5041 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5042 struct adapter *sc = iq->adapter; 5043 struct sge *s = &sc->sge; 5044 struct sge_eq *eq; 5045 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 5046 &handle_wrq_egr_update, &handle_eth_egr_update, 5047 &handle_wrq_egr_update}; 5048 5049 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5050 rss->opcode)); 5051 5052 eq = s->eqmap[qid - s->eq_start - s->eq_base]; 5053 (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5054 5055 return (0); 5056} 5057 5058/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 5059CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 5060 offsetof(struct cpl_fw6_msg, data)); 5061 5062static int 5063handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 5064{ 5065 struct adapter *sc = iq->adapter; 5066 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 5067 5068 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5069 rss->opcode)); 5070 5071 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 5072 const struct rss_header *rss2; 5073 5074 rss2 = (const struct rss_header *)&cpl->data[0]; 5075 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 5076 } 5077 5078 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5079} 5080 5081/** 5082 * t4_handle_wrerr_rpl - process a FW work request error message 5083 * @adap: the adapter 5084 * @rpl: start of the FW message 5085 */ 5086static int 5087t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5088{ 5089 u8 opcode = *(const u8 *)rpl; 5090 const struct fw_error_cmd *e = (const void *)rpl; 5091 unsigned int i; 5092 5093 if (opcode != FW_ERROR_CMD) { 5094 log(LOG_ERR, 5095 "%s: Received WRERR_RPL message with opcode %#x\n", 5096 device_get_nameunit(adap->dev), opcode); 5097 return (EINVAL); 5098 } 5099 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5100 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5101 "non-fatal"); 5102 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5103 case FW_ERROR_TYPE_EXCEPTION: 5104 log(LOG_ERR, "exception info:\n"); 5105 for (i = 0; i < nitems(e->u.exception.info); i++) 5106 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5107 be32toh(e->u.exception.info[i])); 5108 log(LOG_ERR, "\n"); 5109 break; 5110 case FW_ERROR_TYPE_HWMODULE: 5111 log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5112 be32toh(e->u.hwmodule.regaddr), 5113 be32toh(e->u.hwmodule.regval)); 5114 break; 5115 case FW_ERROR_TYPE_WR: 5116 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 5117 be16toh(e->u.wr.cidx), 5118 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 5119 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 5120 be32toh(e->u.wr.eqid)); 5121 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 5122 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 5123 e->u.wr.wrhdr[i]); 5124 log(LOG_ERR, "\n"); 5125 break; 5126 case FW_ERROR_TYPE_ACL: 5127 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 5128 be16toh(e->u.acl.cidx), 5129 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 5130 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 5131 be32toh(e->u.acl.eqid), 5132 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 5133 "MAC"); 5134 for (i = 0; i < nitems(e->u.acl.val); i++) 5135 log(LOG_ERR, " %02x", e->u.acl.val[i]); 5136 log(LOG_ERR, "\n"); 5137 break; 5138 default: 5139 log(LOG_ERR, "type %#x\n", 5140 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 5141 return (EINVAL); 5142 } 5143 return (0); 5144} 5145 5146static int 5147sysctl_uint16(SYSCTL_HANDLER_ARGS) 5148{ 5149 uint16_t *id = arg1; 5150 int i = *id; 5151 5152 return sysctl_handle_int(oidp, &i, 0, req); 5153} 5154 5155static int 5156sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 5157{ 5158 struct sge *s = arg1; 5159 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 5160 struct sw_zone_info *swz = &s->sw_zone_info[0]; 5161 int i, rc; 5162 struct sbuf sb; 5163 char c; 5164 5165 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 5166 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 5167 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 5168 c = '*'; 5169 else 5170 c = '\0'; 5171 5172 sbuf_printf(&sb, "%u%c ", hwb->size, c); 5173 } 5174 sbuf_trim(&sb); 5175 sbuf_finish(&sb); 5176 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 5177 sbuf_delete(&sb); 5178 return (rc); 5179} 5180 5181static int 5182sysctl_tc(SYSCTL_HANDLER_ARGS) 5183{ 5184 struct vi_info *vi = arg1; 5185 struct port_info *pi; 5186 struct adapter *sc; 5187 struct sge_txq *txq; 5188 struct tx_cl_rl_params *tc; 5189 int qidx = arg2, rc, tc_idx; 5190 uint32_t fw_queue, fw_class; 5191 5192 MPASS(qidx >= 0 && qidx < vi->ntxq); 5193 pi = vi->pi; 5194 sc = pi->adapter; 5195 txq = &sc->sge.txq[vi->first_txq + qidx]; 5196 5197 tc_idx = txq->tc_idx; 5198 rc = sysctl_handle_int(oidp, &tc_idx, 0, req); 5199 if (rc != 0 || req->newptr == NULL) 5200 return (rc); 5201 5202 if (sc->flags & IS_VF) 5203 return (EPERM); 5204 5205 /* Note that -1 is legitimate input (it means unbind). */ 5206 if (tc_idx < -1 || tc_idx >= sc->chip_params->nsched_cls) 5207 return (EINVAL); 5208 5209 mtx_lock(&sc->tc_lock); 5210 if (tc_idx == txq->tc_idx) { 5211 rc = 0; /* No change, nothing to do. */ 5212 goto done; 5213 } 5214 5215 fw_queue = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 5216 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH) | 5217 V_FW_PARAMS_PARAM_YZ(txq->eq.cntxt_id); 5218 5219 if (tc_idx == -1) 5220 fw_class = 0xffffffff; /* Unbind. */ 5221 else { 5222 /* 5223 * Bind to a different class. 5224 */ 5225 tc = &pi->sched_params->cl_rl[tc_idx]; 5226 if (tc->flags & TX_CLRL_ERROR) { 5227 /* Previous attempt to set the cl-rl params failed. */ 5228 rc = EIO; 5229 goto done; 5230 } else { 5231 /* 5232 * Ok to proceed. Place a reference on the new class 5233 * while still holding on to the reference on the 5234 * previous class, if any. 5235 */ 5236 fw_class = tc_idx; 5237 tc->refcount++; 5238 } 5239 } 5240 mtx_unlock(&sc->tc_lock); 5241 5242 rc = begin_synchronized_op(sc, vi, SLEEP_OK | INTR_OK, "t4stc"); 5243 if (rc) 5244 return (rc); 5245 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &fw_queue, &fw_class); 5246 end_synchronized_op(sc, 0); 5247 5248 mtx_lock(&sc->tc_lock); 5249 if (rc == 0) { 5250 if (txq->tc_idx != -1) { 5251 tc = &pi->sched_params->cl_rl[txq->tc_idx]; 5252 MPASS(tc->refcount > 0); 5253 tc->refcount--; 5254 } 5255 txq->tc_idx = tc_idx; 5256 } else if (tc_idx != -1) { 5257 tc = &pi->sched_params->cl_rl[tc_idx]; 5258 MPASS(tc->refcount > 0); 5259 tc->refcount--; 5260 } 5261done: 5262 mtx_unlock(&sc->tc_lock); 5263 return (rc); 5264} 5265