t4_sge.c revision 281253
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/cxgbe/t4_sge.c 281253 2015-04-08 01:43:29Z np $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33 34#include <sys/types.h> 35#include <sys/mbuf.h> 36#include <sys/socket.h> 37#include <sys/kernel.h> 38#include <sys/kdb.h> 39#include <sys/malloc.h> 40#include <sys/queue.h> 41#include <sys/sbuf.h> 42#include <sys/taskqueue.h> 43#include <sys/time.h> 44#include <sys/sysctl.h> 45#include <sys/smp.h> 46#include <sys/counter.h> 47#include <net/bpf.h> 48#include <net/ethernet.h> 49#include <net/if.h> 50#include <net/if_vlan_var.h> 51#include <netinet/in.h> 52#include <netinet/ip.h> 53#include <netinet/ip6.h> 54#include <netinet/tcp.h> 55#include <machine/md_var.h> 56#include <vm/vm.h> 57#include <vm/pmap.h> 58#ifdef DEV_NETMAP 59#include <machine/bus.h> 60#include <sys/selinfo.h> 61#include <net/if_var.h> 62#include <net/netmap.h> 63#include <dev/netmap/netmap_kern.h> 64#endif 65 66#include "common/common.h" 67#include "common/t4_regs.h" 68#include "common/t4_regs_values.h" 69#include "common/t4_msg.h" 70 71#ifdef T4_PKT_TIMESTAMP 72#define RX_COPY_THRESHOLD (MINCLSIZE - 8) 73#else 74#define RX_COPY_THRESHOLD MINCLSIZE 75#endif 76 77/* 78 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 79 * 0-7 are valid values. 80 */ 81int fl_pktshift = 2; 82TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); 83 84/* 85 * Pad ethernet payload up to this boundary. 86 * -1: driver should figure out a good value. 87 * 0: disable padding. 88 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 89 */ 90int fl_pad = -1; 91TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); 92 93/* 94 * Status page length. 95 * -1: driver should figure out a good value. 96 * 64 or 128 are the only other valid values. 97 */ 98int spg_len = -1; 99TUNABLE_INT("hw.cxgbe.spg_len", &spg_len); 100 101/* 102 * Congestion drops. 103 * -1: no congestion feedback (not recommended). 104 * 0: backpressure the channel instead of dropping packets right away. 105 * 1: no backpressure, drop packets for the congested queue immediately. 106 */ 107static int cong_drop = 0; 108TUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); 109 110/* 111 * Deliver multiple frames in the same free list buffer if they fit. 112 * -1: let the driver decide whether to enable buffer packing or not. 113 * 0: disable buffer packing. 114 * 1: enable buffer packing. 115 */ 116static int buffer_packing = -1; 117TUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); 118 119/* 120 * Start next frame in a packed buffer at this boundary. 121 * -1: driver should figure out a good value. 122 * T4: driver will ignore this and use the same value as fl_pad above. 123 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 124 */ 125static int fl_pack = -1; 126TUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); 127 128/* 129 * Allow the driver to create mbuf(s) in a cluster allocated for rx. 130 * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 131 * 1: ok to create mbuf(s) within a cluster if there is room. 132 */ 133static int allow_mbufs_in_cluster = 1; 134TUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); 135 136/* 137 * Largest rx cluster size that the driver is allowed to allocate. 138 */ 139static int largest_rx_cluster = MJUM16BYTES; 140TUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); 141 142/* 143 * Size of cluster allocation that's most likely to succeed. The driver will 144 * fall back to this size if it fails to allocate clusters larger than this. 145 */ 146static int safest_rx_cluster = PAGE_SIZE; 147TUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); 148 149/* Used to track coalesced tx work request */ 150struct txpkts { 151 uint64_t *flitp; /* ptr to flit where next pkt should start */ 152 uint8_t npkt; /* # of packets in this work request */ 153 uint8_t nflits; /* # of flits used by this work request */ 154 uint16_t plen; /* total payload (sum of all packets) */ 155}; 156 157/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 158struct sgl { 159 int nsegs; /* # of segments in the SGL, 0 means imm. tx */ 160 int nflits; /* # of flits needed for the SGL */ 161 bus_dma_segment_t seg[TX_SGL_SEGS]; 162}; 163 164static int service_iq(struct sge_iq *, int); 165static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 166static int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 167static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 168static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 169static inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t, 170 char *); 171static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 172 bus_addr_t *, void **); 173static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 174 void *); 175static int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 176 int, int); 177static int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 178static void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 179 struct sge_fl *); 180static int alloc_fwq(struct adapter *); 181static int free_fwq(struct adapter *); 182static int alloc_mgmtq(struct adapter *); 183static int free_mgmtq(struct adapter *); 184static int alloc_rxq(struct port_info *, struct sge_rxq *, int, int, 185 struct sysctl_oid *); 186static int free_rxq(struct port_info *, struct sge_rxq *); 187#ifdef TCP_OFFLOAD 188static int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int, 189 struct sysctl_oid *); 190static int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *); 191#endif 192#ifdef DEV_NETMAP 193static int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int, 194 struct sysctl_oid *); 195static int free_nm_rxq(struct port_info *, struct sge_nm_rxq *); 196static int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int, 197 struct sysctl_oid *); 198static int free_nm_txq(struct port_info *, struct sge_nm_txq *); 199#endif 200static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 201static int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 202#ifdef TCP_OFFLOAD 203static int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 204#endif 205static int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *); 206static int free_eq(struct adapter *, struct sge_eq *); 207static int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *, 208 struct sysctl_oid *); 209static int free_wrq(struct adapter *, struct sge_wrq *); 210static int alloc_txq(struct port_info *, struct sge_txq *, int, 211 struct sysctl_oid *); 212static int free_txq(struct port_info *, struct sge_txq *); 213static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 214static inline void ring_fl_db(struct adapter *, struct sge_fl *); 215static int refill_fl(struct adapter *, struct sge_fl *, int); 216static void refill_sfl(void *); 217static int alloc_fl_sdesc(struct sge_fl *); 218static void free_fl_sdesc(struct adapter *, struct sge_fl *); 219static void find_best_refill_source(struct adapter *, struct sge_fl *, int); 220static void find_safe_refill_source(struct adapter *, struct sge_fl *); 221static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 222 223static int get_pkt_sgl(struct sge_txq *, struct mbuf **, struct sgl *, int); 224static int free_pkt_sgl(struct sge_txq *, struct sgl *); 225static int write_txpkt_wr(struct port_info *, struct sge_txq *, struct mbuf *, 226 struct sgl *); 227static int add_to_txpkts(struct port_info *, struct sge_txq *, struct txpkts *, 228 struct mbuf *, struct sgl *); 229static void write_txpkts_wr(struct sge_txq *, struct txpkts *); 230static inline void write_ulp_cpl_sgl(struct port_info *, struct sge_txq *, 231 struct txpkts *, struct mbuf *, struct sgl *); 232static int write_sgl_to_txd(struct sge_eq *, struct sgl *, caddr_t *); 233static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 234static inline void ring_eq_db(struct adapter *, struct sge_eq *); 235static inline int reclaimable(struct sge_eq *); 236static int reclaim_tx_descs(struct sge_txq *, int, int); 237static void write_eqflush_wr(struct sge_eq *); 238static __be64 get_flit(bus_dma_segment_t *, int, int); 239static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 240 struct mbuf *); 241static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 242 struct mbuf *); 243 244static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 245static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 246 247static counter_u64_t extfree_refs; 248static counter_u64_t extfree_rels; 249 250/* 251 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 252 */ 253void 254t4_sge_modload(void) 255{ 256 257 if (fl_pktshift < 0 || fl_pktshift > 7) { 258 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 259 " using 2 instead.\n", fl_pktshift); 260 fl_pktshift = 2; 261 } 262 263 if (spg_len != 64 && spg_len != 128) { 264 int len; 265 266#if defined(__i386__) || defined(__amd64__) 267 len = cpu_clflush_line_size > 64 ? 128 : 64; 268#else 269 len = 64; 270#endif 271 if (spg_len != -1) { 272 printf("Invalid hw.cxgbe.spg_len value (%d)," 273 " using %d instead.\n", spg_len, len); 274 } 275 spg_len = len; 276 } 277 278 if (cong_drop < -1 || cong_drop > 1) { 279 printf("Invalid hw.cxgbe.cong_drop value (%d)," 280 " using 0 instead.\n", cong_drop); 281 cong_drop = 0; 282 } 283 284 extfree_refs = counter_u64_alloc(M_WAITOK); 285 extfree_rels = counter_u64_alloc(M_WAITOK); 286 counter_u64_zero(extfree_refs); 287 counter_u64_zero(extfree_rels); 288} 289 290void 291t4_sge_modunload(void) 292{ 293 294 counter_u64_free(extfree_refs); 295 counter_u64_free(extfree_rels); 296} 297 298uint64_t 299t4_sge_extfree_refs(void) 300{ 301 uint64_t refs, rels; 302 303 rels = counter_u64_fetch(extfree_rels); 304 refs = counter_u64_fetch(extfree_refs); 305 306 return (refs - rels); 307} 308 309void 310t4_init_sge_cpl_handlers(struct adapter *sc) 311{ 312 313 t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); 314 t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); 315 t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 316 t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); 317 t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 318} 319 320static inline void 321setup_pad_and_pack_boundaries(struct adapter *sc) 322{ 323 uint32_t v, m; 324 int pad, pack; 325 326 pad = fl_pad; 327 if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) { 328 /* 329 * If there is any chance that we might use buffer packing and 330 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 331 * it to 32 in all other cases. 332 */ 333 pad = is_t4(sc) && buffer_packing ? 64 : 32; 334 335 /* 336 * For fl_pad = 0 we'll still write a reasonable value to the 337 * register but all the freelists will opt out of padding. 338 * We'll complain here only if the user tried to set it to a 339 * value greater than 0 that was invalid. 340 */ 341 if (fl_pad > 0) { 342 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 343 " (%d), using %d instead.\n", fl_pad, pad); 344 } 345 } 346 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 347 v = V_INGPADBOUNDARY(ilog2(pad) - 5); 348 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 349 350 if (is_t4(sc)) { 351 if (fl_pack != -1 && fl_pack != pad) { 352 /* Complain but carry on. */ 353 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 354 " using %d instead.\n", fl_pack, pad); 355 } 356 return; 357 } 358 359 pack = fl_pack; 360 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 361 !powerof2(fl_pack)) { 362 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 363 MPASS(powerof2(pack)); 364 if (pack < 16) 365 pack = 16; 366 if (pack == 32) 367 pack = 64; 368 if (pack > 4096) 369 pack = 4096; 370 if (fl_pack != -1) { 371 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 372 " (%d), using %d instead.\n", fl_pack, pack); 373 } 374 } 375 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 376 if (pack == 16) 377 v = V_INGPACKBOUNDARY(0); 378 else 379 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 380 381 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 382 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 383} 384 385/* 386 * adap->params.vpd.cclk must be set up before this is called. 387 */ 388void 389t4_tweak_chip_settings(struct adapter *sc) 390{ 391 int i; 392 uint32_t v, m; 393 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 394 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 395 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 396 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 397 static int sge_flbuf_sizes[] = { 398 MCLBYTES, 399#if MJUMPAGESIZE != MCLBYTES 400 MJUMPAGESIZE, 401 MJUMPAGESIZE - CL_METADATA_SIZE, 402 MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 403#endif 404 MJUM9BYTES, 405 MJUM16BYTES, 406 MCLBYTES - MSIZE - CL_METADATA_SIZE, 407 MJUM9BYTES - CL_METADATA_SIZE, 408 MJUM16BYTES - CL_METADATA_SIZE, 409 }; 410 411 KASSERT(sc->flags & MASTER_PF, 412 ("%s: trying to change chip settings when not master.", __func__)); 413 414 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 415 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 416 V_EGRSTATUSPAGESIZE(spg_len == 128); 417 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 418 419 setup_pad_and_pack_boundaries(sc); 420 421 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 422 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 423 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 424 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 425 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 426 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 427 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 428 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 429 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 430 431 KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 432 ("%s: hw buffer size table too big", __func__)); 433 for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 434 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 435 sge_flbuf_sizes[i]); 436 } 437 438 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 439 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 440 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 441 442 KASSERT(intr_timer[0] <= timer_max, 443 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 444 timer_max)); 445 for (i = 1; i < nitems(intr_timer); i++) { 446 KASSERT(intr_timer[i] >= intr_timer[i - 1], 447 ("%s: timers not listed in increasing order (%d)", 448 __func__, i)); 449 450 while (intr_timer[i] > timer_max) { 451 if (i == nitems(intr_timer) - 1) { 452 intr_timer[i] = timer_max; 453 break; 454 } 455 intr_timer[i] += intr_timer[i - 1]; 456 intr_timer[i] /= 2; 457 } 458 } 459 460 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 461 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 462 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 463 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 464 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 465 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 466 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 467 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 468 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 469 470 if (cong_drop == 0) { 471 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 472 F_TUNNELCNGDROP3; 473 t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0); 474 } 475 476 /* 4K, 16K, 64K, 256K DDP "page sizes" */ 477 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 478 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 479 480 m = v = F_TDDPTAGTCB; 481 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 482 483 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 484 F_RESETDDPOFFSET; 485 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 486 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 487} 488 489/* 490 * SGE wants the buffer to be at least 64B and then a multiple of 16. If 491 * padding is is use the buffer's start and end need to be aligned to the pad 492 * boundary as well. We'll just make sure that the size is a multiple of the 493 * boundary here, it is up to the buffer allocation code to make sure the start 494 * of the buffer is aligned as well. 495 */ 496static inline int 497hwsz_ok(struct adapter *sc, int hwsz) 498{ 499 int mask = fl_pad ? sc->sge.pad_boundary - 1 : 16 - 1; 500 501 return (hwsz >= 64 && (hwsz & mask) == 0); 502} 503 504/* 505 * XXX: driver really should be able to deal with unexpected settings. 506 */ 507int 508t4_read_chip_settings(struct adapter *sc) 509{ 510 struct sge *s = &sc->sge; 511 int i, j, n, rc = 0; 512 uint32_t m, v, r; 513 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 514 static int sw_buf_sizes[] = { /* Sorted by size */ 515 MCLBYTES, 516#if MJUMPAGESIZE != MCLBYTES 517 MJUMPAGESIZE, 518#endif 519 MJUM9BYTES, 520 MJUM16BYTES 521 }; 522 struct sw_zone_info *swz, *safe_swz; 523 struct hw_buf_info *hwb; 524 525 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 526 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 527 V_EGRSTATUSPAGESIZE(spg_len == 128); 528 r = t4_read_reg(sc, A_SGE_CONTROL); 529 if ((r & m) != v) { 530 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 531 rc = EINVAL; 532 } 533 s->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 534 535 if (is_t4(sc)) 536 s->pack_boundary = s->pad_boundary; 537 else { 538 r = t4_read_reg(sc, A_SGE_CONTROL2); 539 if (G_INGPACKBOUNDARY(r) == 0) 540 s->pack_boundary = 16; 541 else 542 s->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 543 } 544 545 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 546 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 547 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 548 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 549 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 550 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 551 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 552 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 553 r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE); 554 if (r != v) { 555 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 556 rc = EINVAL; 557 } 558 559 /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 560 hwb = &s->hw_buf_info[0]; 561 for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 562 r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 563 hwb->size = r; 564 hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 565 hwb->next = -1; 566 } 567 568 /* 569 * Create a sorted list in decreasing order of hw buffer sizes (and so 570 * increasing order of spare area) for each software zone. 571 * 572 * If padding is enabled then the start and end of the buffer must align 573 * to the pad boundary; if packing is enabled then they must align with 574 * the pack boundary as well. Allocations from the cluster zones are 575 * aligned to min(size, 4K), so the buffer starts at that alignment and 576 * ends at hwb->size alignment. If mbuf inlining is allowed the 577 * starting alignment will be reduced to MSIZE and the driver will 578 * exercise appropriate caution when deciding on the best buffer layout 579 * to use. 580 */ 581 n = 0; /* no usable buffer size to begin with */ 582 swz = &s->sw_zone_info[0]; 583 safe_swz = NULL; 584 for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 585 int8_t head = -1, tail = -1; 586 587 swz->size = sw_buf_sizes[i]; 588 swz->zone = m_getzone(swz->size); 589 swz->type = m_gettype(swz->size); 590 591 if (swz->size < PAGE_SIZE) { 592 MPASS(powerof2(swz->size)); 593 if (fl_pad && (swz->size % sc->sge.pad_boundary != 0)) 594 continue; 595 } 596 597 if (swz->size == safest_rx_cluster) 598 safe_swz = swz; 599 600 hwb = &s->hw_buf_info[0]; 601 for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 602 if (hwb->zidx != -1 || hwb->size > swz->size) 603 continue; 604#ifdef INVARIANTS 605 if (fl_pad) 606 MPASS(hwb->size % sc->sge.pad_boundary == 0); 607#endif 608 hwb->zidx = i; 609 if (head == -1) 610 head = tail = j; 611 else if (hwb->size < s->hw_buf_info[tail].size) { 612 s->hw_buf_info[tail].next = j; 613 tail = j; 614 } else { 615 int8_t *cur; 616 struct hw_buf_info *t; 617 618 for (cur = &head; *cur != -1; cur = &t->next) { 619 t = &s->hw_buf_info[*cur]; 620 if (hwb->size == t->size) { 621 hwb->zidx = -2; 622 break; 623 } 624 if (hwb->size > t->size) { 625 hwb->next = *cur; 626 *cur = j; 627 break; 628 } 629 } 630 } 631 } 632 swz->head_hwidx = head; 633 swz->tail_hwidx = tail; 634 635 if (tail != -1) { 636 n++; 637 if (swz->size - s->hw_buf_info[tail].size >= 638 CL_METADATA_SIZE) 639 sc->flags |= BUF_PACKING_OK; 640 } 641 } 642 if (n == 0) { 643 device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 644 rc = EINVAL; 645 } 646 647 s->safe_hwidx1 = -1; 648 s->safe_hwidx2 = -1; 649 if (safe_swz != NULL) { 650 s->safe_hwidx1 = safe_swz->head_hwidx; 651 for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 652 int spare; 653 654 hwb = &s->hw_buf_info[i]; 655#ifdef INVARIANTS 656 if (fl_pad) 657 MPASS(hwb->size % sc->sge.pad_boundary == 0); 658#endif 659 spare = safe_swz->size - hwb->size; 660 if (spare >= CL_METADATA_SIZE) { 661 s->safe_hwidx2 = i; 662 break; 663 } 664 } 665 } 666 667 r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD); 668 s->counter_val[0] = G_THRESHOLD_0(r); 669 s->counter_val[1] = G_THRESHOLD_1(r); 670 s->counter_val[2] = G_THRESHOLD_2(r); 671 s->counter_val[3] = G_THRESHOLD_3(r); 672 673 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1); 674 s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc); 675 s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc); 676 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3); 677 s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc); 678 s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc); 679 r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5); 680 s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc); 681 s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc); 682 683 if (cong_drop == 0) { 684 m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 685 F_TUNNELCNGDROP3; 686 r = t4_read_reg(sc, A_TP_PARA_REG3); 687 if (r & m) { 688 device_printf(sc->dev, 689 "invalid TP_PARA_REG3(0x%x)\n", r); 690 rc = EINVAL; 691 } 692 } 693 694 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 695 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 696 if (r != v) { 697 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 698 rc = EINVAL; 699 } 700 701 m = v = F_TDDPTAGTCB; 702 r = t4_read_reg(sc, A_ULP_RX_CTL); 703 if ((r & m) != v) { 704 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 705 rc = EINVAL; 706 } 707 708 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 709 F_RESETDDPOFFSET; 710 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 711 r = t4_read_reg(sc, A_TP_PARA_REG5); 712 if ((r & m) != v) { 713 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 714 rc = EINVAL; 715 } 716 717 r = t4_read_reg(sc, A_SGE_CONM_CTRL); 718 s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 719 if (is_t4(sc)) 720 s->fl_starve_threshold2 = s->fl_starve_threshold; 721 else 722 s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 723 724 /* egress queues: log2 of # of doorbells per BAR2 page */ 725 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 726 r >>= S_QUEUESPERPAGEPF0 + 727 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 728 s->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 729 730 /* ingress queues: log2 of # of doorbells per BAR2 page */ 731 r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 732 r >>= S_QUEUESPERPAGEPF0 + 733 (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 734 s->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 735 736 t4_init_tp_params(sc); 737 738 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 739 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 740 741 return (rc); 742} 743 744int 745t4_create_dma_tag(struct adapter *sc) 746{ 747 int rc; 748 749 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 750 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 751 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 752 NULL, &sc->dmat); 753 if (rc != 0) { 754 device_printf(sc->dev, 755 "failed to create main DMA tag: %d\n", rc); 756 } 757 758 return (rc); 759} 760 761void 762t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 763 struct sysctl_oid_list *children) 764{ 765 766 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 767 CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 768 "freelist buffer sizes"); 769 770 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 771 NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 772 773 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 774 NULL, sc->sge.pad_boundary, "payload pad boundary (bytes)"); 775 776 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 777 NULL, spg_len, "status page size (bytes)"); 778 779 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 780 NULL, cong_drop, "congestion drop setting"); 781 782 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 783 NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)"); 784} 785 786int 787t4_destroy_dma_tag(struct adapter *sc) 788{ 789 if (sc->dmat) 790 bus_dma_tag_destroy(sc->dmat); 791 792 return (0); 793} 794 795/* 796 * Allocate and initialize the firmware event queue and the management queue. 797 * 798 * Returns errno on failure. Resources allocated up to that point may still be 799 * allocated. Caller is responsible for cleanup in case this function fails. 800 */ 801int 802t4_setup_adapter_queues(struct adapter *sc) 803{ 804 int rc; 805 806 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 807 808 sysctl_ctx_init(&sc->ctx); 809 sc->flags |= ADAP_SYSCTL_CTX; 810 811 /* 812 * Firmware event queue 813 */ 814 rc = alloc_fwq(sc); 815 if (rc != 0) 816 return (rc); 817 818 /* 819 * Management queue. This is just a control queue that uses the fwq as 820 * its associated iq. 821 */ 822 rc = alloc_mgmtq(sc); 823 824 return (rc); 825} 826 827/* 828 * Idempotent 829 */ 830int 831t4_teardown_adapter_queues(struct adapter *sc) 832{ 833 834 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 835 836 /* Do this before freeing the queue */ 837 if (sc->flags & ADAP_SYSCTL_CTX) { 838 sysctl_ctx_free(&sc->ctx); 839 sc->flags &= ~ADAP_SYSCTL_CTX; 840 } 841 842 free_mgmtq(sc); 843 free_fwq(sc); 844 845 return (0); 846} 847 848static inline int 849port_intr_count(struct port_info *pi) 850{ 851 int rc = 0; 852 853 if (pi->flags & INTR_RXQ) 854 rc += pi->nrxq; 855#ifdef TCP_OFFLOAD 856 if (pi->flags & INTR_OFLD_RXQ) 857 rc += pi->nofldrxq; 858#endif 859#ifdef DEV_NETMAP 860 if (pi->flags & INTR_NM_RXQ) 861 rc += pi->nnmrxq; 862#endif 863 return (rc); 864} 865 866static inline int 867first_vector(struct port_info *pi) 868{ 869 struct adapter *sc = pi->adapter; 870 int rc = T4_EXTRA_INTR, i; 871 872 if (sc->intr_count == 1) 873 return (0); 874 875 for_each_port(sc, i) { 876 if (i == pi->port_id) 877 break; 878 879 rc += port_intr_count(sc->port[i]); 880 } 881 882 return (rc); 883} 884 885/* 886 * Given an arbitrary "index," come up with an iq that can be used by other 887 * queues (of this port) for interrupt forwarding, SGE egress updates, etc. 888 * The iq returned is guaranteed to be something that takes direct interrupts. 889 */ 890static struct sge_iq * 891port_intr_iq(struct port_info *pi, int idx) 892{ 893 struct adapter *sc = pi->adapter; 894 struct sge *s = &sc->sge; 895 struct sge_iq *iq = NULL; 896 int nintr, i; 897 898 if (sc->intr_count == 1) 899 return (&sc->sge.fwq); 900 901 nintr = port_intr_count(pi); 902 KASSERT(nintr != 0, 903 ("%s: pi %p has no exclusive interrupts, total interrupts = %d", 904 __func__, pi, sc->intr_count)); 905#ifdef DEV_NETMAP 906 /* Exclude netmap queues as they can't take anyone else's interrupts */ 907 if (pi->flags & INTR_NM_RXQ) 908 nintr -= pi->nnmrxq; 909 KASSERT(nintr > 0, 910 ("%s: pi %p has nintr %d after netmap adjustment of %d", __func__, 911 pi, nintr, pi->nnmrxq)); 912#endif 913 i = idx % nintr; 914 915 if (pi->flags & INTR_RXQ) { 916 if (i < pi->nrxq) { 917 iq = &s->rxq[pi->first_rxq + i].iq; 918 goto done; 919 } 920 i -= pi->nrxq; 921 } 922#ifdef TCP_OFFLOAD 923 if (pi->flags & INTR_OFLD_RXQ) { 924 if (i < pi->nofldrxq) { 925 iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq; 926 goto done; 927 } 928 i -= pi->nofldrxq; 929 } 930#endif 931 panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__, 932 pi, pi->flags & INTR_ALL, idx, nintr); 933done: 934 MPASS(iq != NULL); 935 KASSERT(iq->flags & IQ_INTR, 936 ("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi, 937 pi->flags & INTR_ALL, idx)); 938 return (iq); 939} 940 941/* Maximum payload that can be delivered with a single iq descriptor */ 942static inline int 943mtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 944{ 945 int payload; 946 947#ifdef TCP_OFFLOAD 948 if (toe) { 949 payload = sc->tt.rx_coalesce ? 950 G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; 951 } else { 952#endif 953 /* large enough even when hw VLAN extraction is disabled */ 954 payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 955 mtu; 956#ifdef TCP_OFFLOAD 957 } 958#endif 959 960 return (payload); 961} 962 963int 964t4_setup_port_queues(struct port_info *pi) 965{ 966 int rc = 0, i, j, intr_idx, iqid; 967 struct sge_rxq *rxq; 968 struct sge_txq *txq; 969 struct sge_wrq *ctrlq; 970#ifdef TCP_OFFLOAD 971 struct sge_ofld_rxq *ofld_rxq; 972 struct sge_wrq *ofld_txq; 973#endif 974#ifdef DEV_NETMAP 975 struct sge_nm_rxq *nm_rxq; 976 struct sge_nm_txq *nm_txq; 977#endif 978 char name[16]; 979 struct adapter *sc = pi->adapter; 980 struct ifnet *ifp = pi->ifp; 981 struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 982 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 983 int maxp, mtu = ifp->if_mtu; 984 985 /* Interrupt vector to start from (when using multiple vectors) */ 986 intr_idx = first_vector(pi); 987 988 /* 989 * First pass over all NIC and TOE rx queues: 990 * a) initialize iq and fl 991 * b) allocate queue iff it will take direct interrupts. 992 */ 993 maxp = mtu_to_max_payload(sc, mtu, 0); 994 if (pi->flags & INTR_RXQ) { 995 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 996 CTLFLAG_RD, NULL, "rx queues"); 997 } 998 for_each_rxq(pi, i, rxq) { 999 1000 init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq); 1001 1002 snprintf(name, sizeof(name), "%s rxq%d-fl", 1003 device_get_nameunit(pi->dev), i); 1004 init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, name); 1005 1006 if (pi->flags & INTR_RXQ) { 1007 rxq->iq.flags |= IQ_INTR; 1008 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1009 if (rc != 0) 1010 goto done; 1011 intr_idx++; 1012 } 1013 } 1014#ifdef TCP_OFFLOAD 1015 maxp = mtu_to_max_payload(sc, mtu, 1); 1016 if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) { 1017 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1018 CTLFLAG_RD, NULL, 1019 "rx queues for offloaded TCP connections"); 1020 } 1021 for_each_ofld_rxq(pi, i, ofld_rxq) { 1022 1023 init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 1024 pi->qsize_rxq); 1025 1026 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1027 device_get_nameunit(pi->dev), i); 1028 init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, name); 1029 1030 if (pi->flags & INTR_OFLD_RXQ) { 1031 ofld_rxq->iq.flags |= IQ_INTR; 1032 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1033 if (rc != 0) 1034 goto done; 1035 intr_idx++; 1036 } 1037 } 1038#endif 1039#ifdef DEV_NETMAP 1040 /* 1041 * We don't have buffers to back the netmap rx queues right now so we 1042 * create the queues in a way that doesn't set off any congestion signal 1043 * in the chip. 1044 */ 1045 if (pi->flags & INTR_NM_RXQ) { 1046 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq", 1047 CTLFLAG_RD, NULL, "rx queues for netmap"); 1048 for_each_nm_rxq(pi, i, nm_rxq) { 1049 rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid); 1050 if (rc != 0) 1051 goto done; 1052 intr_idx++; 1053 } 1054 } 1055#endif 1056 1057 /* 1058 * Second pass over all NIC and TOE rx queues. The queues forwarding 1059 * their interrupts are allocated now. 1060 */ 1061 j = 0; 1062 if (!(pi->flags & INTR_RXQ)) { 1063 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1064 CTLFLAG_RD, NULL, "rx queues"); 1065 for_each_rxq(pi, i, rxq) { 1066 MPASS(!(rxq->iq.flags & IQ_INTR)); 1067 1068 intr_idx = port_intr_iq(pi, j)->abs_id; 1069 1070 rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1071 if (rc != 0) 1072 goto done; 1073 j++; 1074 } 1075 } 1076#ifdef TCP_OFFLOAD 1077 if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) { 1078 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1079 CTLFLAG_RD, NULL, 1080 "rx queues for offloaded TCP connections"); 1081 for_each_ofld_rxq(pi, i, ofld_rxq) { 1082 MPASS(!(ofld_rxq->iq.flags & IQ_INTR)); 1083 1084 intr_idx = port_intr_iq(pi, j)->abs_id; 1085 1086 rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1087 if (rc != 0) 1088 goto done; 1089 j++; 1090 } 1091 } 1092#endif 1093#ifdef DEV_NETMAP 1094 if (!(pi->flags & INTR_NM_RXQ)) 1095 CXGBE_UNIMPLEMENTED(__func__); 1096#endif 1097 1098 /* 1099 * Now the tx queues. Only one pass needed. 1100 */ 1101 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1102 NULL, "tx queues"); 1103 j = 0; 1104 for_each_txq(pi, i, txq) { 1105 iqid = port_intr_iq(pi, j)->cntxt_id; 1106 snprintf(name, sizeof(name), "%s txq%d", 1107 device_get_nameunit(pi->dev), i); 1108 init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid, 1109 name); 1110 1111 rc = alloc_txq(pi, txq, i, oid); 1112 if (rc != 0) 1113 goto done; 1114 j++; 1115 } 1116#ifdef TCP_OFFLOAD 1117 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq", 1118 CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1119 for_each_ofld_txq(pi, i, ofld_txq) { 1120 struct sysctl_oid *oid2; 1121 1122 iqid = port_intr_iq(pi, j)->cntxt_id; 1123 snprintf(name, sizeof(name), "%s ofld_txq%d", 1124 device_get_nameunit(pi->dev), i); 1125 init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan, 1126 iqid, name); 1127 1128 snprintf(name, sizeof(name), "%d", i); 1129 oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1130 name, CTLFLAG_RD, NULL, "offload tx queue"); 1131 1132 rc = alloc_wrq(sc, pi, ofld_txq, oid2); 1133 if (rc != 0) 1134 goto done; 1135 j++; 1136 } 1137#endif 1138#ifdef DEV_NETMAP 1139 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq", 1140 CTLFLAG_RD, NULL, "tx queues for netmap use"); 1141 for_each_nm_txq(pi, i, nm_txq) { 1142 iqid = pi->first_nm_rxq + (j % pi->nnmrxq); 1143 rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid); 1144 if (rc != 0) 1145 goto done; 1146 j++; 1147 } 1148#endif 1149 1150 /* 1151 * Finally, the control queue. 1152 */ 1153 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1154 NULL, "ctrl queue"); 1155 ctrlq = &sc->sge.ctrlq[pi->port_id]; 1156 iqid = port_intr_iq(pi, 0)->cntxt_id; 1157 snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev)); 1158 init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); 1159 rc = alloc_wrq(sc, pi, ctrlq, oid); 1160 1161done: 1162 if (rc) 1163 t4_teardown_port_queues(pi); 1164 1165 return (rc); 1166} 1167 1168/* 1169 * Idempotent 1170 */ 1171int 1172t4_teardown_port_queues(struct port_info *pi) 1173{ 1174 int i; 1175 struct adapter *sc = pi->adapter; 1176 struct sge_rxq *rxq; 1177 struct sge_txq *txq; 1178#ifdef TCP_OFFLOAD 1179 struct sge_ofld_rxq *ofld_rxq; 1180 struct sge_wrq *ofld_txq; 1181#endif 1182#ifdef DEV_NETMAP 1183 struct sge_nm_rxq *nm_rxq; 1184 struct sge_nm_txq *nm_txq; 1185#endif 1186 1187 /* Do this before freeing the queues */ 1188 if (pi->flags & PORT_SYSCTL_CTX) { 1189 sysctl_ctx_free(&pi->ctx); 1190 pi->flags &= ~PORT_SYSCTL_CTX; 1191 } 1192 1193 /* 1194 * Take down all the tx queues first, as they reference the rx queues 1195 * (for egress updates, etc.). 1196 */ 1197 1198 free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1199 1200 for_each_txq(pi, i, txq) { 1201 free_txq(pi, txq); 1202 } 1203#ifdef TCP_OFFLOAD 1204 for_each_ofld_txq(pi, i, ofld_txq) { 1205 free_wrq(sc, ofld_txq); 1206 } 1207#endif 1208#ifdef DEV_NETMAP 1209 for_each_nm_txq(pi, i, nm_txq) 1210 free_nm_txq(pi, nm_txq); 1211#endif 1212 1213 /* 1214 * Then take down the rx queues that forward their interrupts, as they 1215 * reference other rx queues. 1216 */ 1217 1218 for_each_rxq(pi, i, rxq) { 1219 if ((rxq->iq.flags & IQ_INTR) == 0) 1220 free_rxq(pi, rxq); 1221 } 1222#ifdef TCP_OFFLOAD 1223 for_each_ofld_rxq(pi, i, ofld_rxq) { 1224 if ((ofld_rxq->iq.flags & IQ_INTR) == 0) 1225 free_ofld_rxq(pi, ofld_rxq); 1226 } 1227#endif 1228#ifdef DEV_NETMAP 1229 for_each_nm_rxq(pi, i, nm_rxq) 1230 free_nm_rxq(pi, nm_rxq); 1231#endif 1232 1233 /* 1234 * Then take down the rx queues that take direct interrupts. 1235 */ 1236 1237 for_each_rxq(pi, i, rxq) { 1238 if (rxq->iq.flags & IQ_INTR) 1239 free_rxq(pi, rxq); 1240 } 1241#ifdef TCP_OFFLOAD 1242 for_each_ofld_rxq(pi, i, ofld_rxq) { 1243 if (ofld_rxq->iq.flags & IQ_INTR) 1244 free_ofld_rxq(pi, ofld_rxq); 1245 } 1246#endif 1247#ifdef DEV_NETMAP 1248 CXGBE_UNIMPLEMENTED(__func__); 1249#endif 1250 1251 return (0); 1252} 1253 1254/* 1255 * Deals with errors and the firmware event queue. All data rx queues forward 1256 * their interrupt to the firmware event queue. 1257 */ 1258void 1259t4_intr_all(void *arg) 1260{ 1261 struct adapter *sc = arg; 1262 struct sge_iq *fwq = &sc->sge.fwq; 1263 1264 t4_intr_err(arg); 1265 if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1266 service_iq(fwq, 0); 1267 atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1268 } 1269} 1270 1271/* Deals with error interrupts */ 1272void 1273t4_intr_err(void *arg) 1274{ 1275 struct adapter *sc = arg; 1276 1277 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1278 t4_slow_intr_handler(sc); 1279} 1280 1281void 1282t4_intr_evt(void *arg) 1283{ 1284 struct sge_iq *iq = arg; 1285 1286 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1287 service_iq(iq, 0); 1288 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1289 } 1290} 1291 1292void 1293t4_intr(void *arg) 1294{ 1295 struct sge_iq *iq = arg; 1296 1297 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1298 service_iq(iq, 0); 1299 atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1300 } 1301} 1302 1303/* 1304 * Deals with anything and everything on the given ingress queue. 1305 */ 1306static int 1307service_iq(struct sge_iq *iq, int budget) 1308{ 1309 struct sge_iq *q; 1310 struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1311 struct sge_fl *fl; /* Use iff IQ_HAS_FL */ 1312 struct adapter *sc = iq->adapter; 1313 struct iq_desc *d = &iq->desc[iq->cidx]; 1314 int ndescs = 0, limit; 1315 int rsp_type, refill; 1316 uint32_t lq; 1317 uint16_t fl_hw_cidx; 1318 struct mbuf *m0; 1319 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1320#if defined(INET) || defined(INET6) 1321 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1322#endif 1323 1324 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1325 1326 limit = budget ? budget : iq->qsize / 16; 1327 1328 if (iq->flags & IQ_HAS_FL) { 1329 fl = &rxq->fl; 1330 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1331 } else { 1332 fl = NULL; 1333 fl_hw_cidx = 0; /* to silence gcc warning */ 1334 } 1335 1336 /* 1337 * We always come back and check the descriptor ring for new indirect 1338 * interrupts and other responses after running a single handler. 1339 */ 1340 for (;;) { 1341 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1342 1343 rmb(); 1344 1345 refill = 0; 1346 m0 = NULL; 1347 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1348 lq = be32toh(d->rsp.pldbuflen_qid); 1349 1350 switch (rsp_type) { 1351 case X_RSPD_TYPE_FLBUF: 1352 1353 KASSERT(iq->flags & IQ_HAS_FL, 1354 ("%s: data for an iq (%p) with no freelist", 1355 __func__, iq)); 1356 1357 m0 = get_fl_payload(sc, fl, lq); 1358 if (__predict_false(m0 == NULL)) 1359 goto process_iql; 1360 refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1361#ifdef T4_PKT_TIMESTAMP 1362 /* 1363 * 60 bit timestamp for the payload is 1364 * *(uint64_t *)m0->m_pktdat. Note that it is 1365 * in the leading free-space in the mbuf. The 1366 * kernel can clobber it during a pullup, 1367 * m_copymdata, etc. You need to make sure that 1368 * the mbuf reaches you unmolested if you care 1369 * about the timestamp. 1370 */ 1371 *(uint64_t *)m0->m_pktdat = 1372 be64toh(ctrl->u.last_flit) & 1373 0xfffffffffffffff; 1374#endif 1375 1376 /* fall through */ 1377 1378 case X_RSPD_TYPE_CPL: 1379 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1380 ("%s: bad opcode %02x.", __func__, 1381 d->rss.opcode)); 1382 sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1383 break; 1384 1385 case X_RSPD_TYPE_INTR: 1386 1387 /* 1388 * Interrupts should be forwarded only to queues 1389 * that are not forwarding their interrupts. 1390 * This means service_iq can recurse but only 1 1391 * level deep. 1392 */ 1393 KASSERT(budget == 0, 1394 ("%s: budget %u, rsp_type %u", __func__, 1395 budget, rsp_type)); 1396 1397 /* 1398 * There are 1K interrupt-capable queues (qids 0 1399 * through 1023). A response type indicating a 1400 * forwarded interrupt with a qid >= 1K is an 1401 * iWARP async notification. 1402 */ 1403 if (lq >= 1024) { 1404 sc->an_handler(iq, &d->rsp); 1405 break; 1406 } 1407 1408 q = sc->sge.iqmap[lq - sc->sge.iq_start]; 1409 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1410 IQS_BUSY)) { 1411 if (service_iq(q, q->qsize / 16) == 0) { 1412 atomic_cmpset_int(&q->state, 1413 IQS_BUSY, IQS_IDLE); 1414 } else { 1415 STAILQ_INSERT_TAIL(&iql, q, 1416 link); 1417 } 1418 } 1419 break; 1420 1421 default: 1422 KASSERT(0, 1423 ("%s: illegal response type %d on iq %p", 1424 __func__, rsp_type, iq)); 1425 log(LOG_ERR, 1426 "%s: illegal response type %d on iq %p", 1427 device_get_nameunit(sc->dev), rsp_type, iq); 1428 break; 1429 } 1430 1431 d++; 1432 if (__predict_false(++iq->cidx == iq->sidx)) { 1433 iq->cidx = 0; 1434 iq->gen ^= F_RSPD_GEN; 1435 d = &iq->desc[0]; 1436 } 1437 if (__predict_false(++ndescs == limit)) { 1438 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1439 V_CIDXINC(ndescs) | 1440 V_INGRESSQID(iq->cntxt_id) | 1441 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1442 ndescs = 0; 1443 1444#if defined(INET) || defined(INET6) 1445 if (iq->flags & IQ_LRO_ENABLED && 1446 sc->lro_timeout != 0) { 1447 tcp_lro_flush_inactive(&rxq->lro, 1448 &lro_timeout); 1449 } 1450#endif 1451 1452 if (budget) { 1453 if (iq->flags & IQ_HAS_FL) { 1454 FL_LOCK(fl); 1455 refill_fl(sc, fl, 32); 1456 FL_UNLOCK(fl); 1457 } 1458 return (EINPROGRESS); 1459 } 1460 } 1461 if (refill) { 1462 FL_LOCK(fl); 1463 refill_fl(sc, fl, 32); 1464 FL_UNLOCK(fl); 1465 fl_hw_cidx = fl->hw_cidx; 1466 } 1467 } 1468 1469process_iql: 1470 if (STAILQ_EMPTY(&iql)) 1471 break; 1472 1473 /* 1474 * Process the head only, and send it to the back of the list if 1475 * it's still not done. 1476 */ 1477 q = STAILQ_FIRST(&iql); 1478 STAILQ_REMOVE_HEAD(&iql, link); 1479 if (service_iq(q, q->qsize / 8) == 0) 1480 atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1481 else 1482 STAILQ_INSERT_TAIL(&iql, q, link); 1483 } 1484 1485#if defined(INET) || defined(INET6) 1486 if (iq->flags & IQ_LRO_ENABLED) { 1487 struct lro_ctrl *lro = &rxq->lro; 1488 struct lro_entry *l; 1489 1490 while (!SLIST_EMPTY(&lro->lro_active)) { 1491 l = SLIST_FIRST(&lro->lro_active); 1492 SLIST_REMOVE_HEAD(&lro->lro_active, next); 1493 tcp_lro_flush(lro, l); 1494 } 1495 } 1496#endif 1497 1498 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 1499 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1500 1501 if (iq->flags & IQ_HAS_FL) { 1502 int starved; 1503 1504 FL_LOCK(fl); 1505 starved = refill_fl(sc, fl, 64); 1506 FL_UNLOCK(fl); 1507 if (__predict_false(starved != 0)) 1508 add_fl_to_sfl(sc, fl); 1509 } 1510 1511 return (0); 1512} 1513 1514static inline int 1515cl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1516{ 1517 int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1518 1519 if (rc) 1520 MPASS(cll->region3 >= CL_METADATA_SIZE); 1521 1522 return (rc); 1523} 1524 1525static inline struct cluster_metadata * 1526cl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1527 caddr_t cl) 1528{ 1529 1530 if (cl_has_metadata(fl, cll)) { 1531 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1532 1533 return ((struct cluster_metadata *)(cl + swz->size) - 1); 1534 } 1535 return (NULL); 1536} 1537 1538static int 1539rxb_free(struct mbuf *m, void *arg1, void *arg2) 1540{ 1541 uma_zone_t zone = arg1; 1542 caddr_t cl = arg2; 1543 1544 uma_zfree(zone, cl); 1545 counter_u64_add(extfree_rels, 1); 1546 1547 return (EXT_FREE_OK); 1548} 1549 1550/* 1551 * The mbuf returned by this function could be allocated from zone_mbuf or 1552 * constructed in spare room in the cluster. 1553 * 1554 * The mbuf carries the payload in one of these ways 1555 * a) frame inside the mbuf (mbuf from zone_mbuf) 1556 * b) m_cljset (for clusters without metadata) zone_mbuf 1557 * c) m_extaddref (cluster with metadata) inline mbuf 1558 * d) m_extaddref (cluster with metadata) zone_mbuf 1559 */ 1560static struct mbuf * 1561get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1562 int remaining) 1563{ 1564 struct mbuf *m; 1565 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1566 struct cluster_layout *cll = &sd->cll; 1567 struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1568 struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1569 struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1570 int len, blen; 1571 caddr_t payload; 1572 1573 blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1574 len = min(remaining, blen); 1575 payload = sd->cl + cll->region1 + fl->rx_offset; 1576 if (fl->flags & FL_BUF_PACKING) { 1577 const u_int l = fr_offset + len; 1578 const u_int pad = roundup2(l, fl->buf_boundary) - l; 1579 1580 if (fl->rx_offset + len + pad < hwb->size) 1581 blen = len + pad; 1582 MPASS(fl->rx_offset + blen <= hwb->size); 1583 } else { 1584 MPASS(fl->rx_offset == 0); /* not packing */ 1585 } 1586 1587 1588 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1589 1590 /* 1591 * Copy payload into a freshly allocated mbuf. 1592 */ 1593 1594 m = fr_offset == 0 ? 1595 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1596 if (m == NULL) 1597 return (NULL); 1598 fl->mbuf_allocated++; 1599#ifdef T4_PKT_TIMESTAMP 1600 /* Leave room for a timestamp */ 1601 m->m_data += 8; 1602#endif 1603 /* copy data to mbuf */ 1604 bcopy(payload, mtod(m, caddr_t), len); 1605 1606 } else if (sd->nmbuf * MSIZE < cll->region1) { 1607 1608 /* 1609 * There's spare room in the cluster for an mbuf. Create one 1610 * and associate it with the payload that's in the cluster. 1611 */ 1612 1613 MPASS(clm != NULL); 1614 m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1615 /* No bzero required */ 1616 if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 1617 fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1618 return (NULL); 1619 fl->mbuf_inlined++; 1620 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1621 swz->zone, sd->cl); 1622 if (sd->nmbuf++ == 0) 1623 counter_u64_add(extfree_refs, 1); 1624 1625 } else { 1626 1627 /* 1628 * Grab an mbuf from zone_mbuf and associate it with the 1629 * payload in the cluster. 1630 */ 1631 1632 m = fr_offset == 0 ? 1633 m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1634 if (m == NULL) 1635 return (NULL); 1636 fl->mbuf_allocated++; 1637 if (clm != NULL) { 1638 m_extaddref(m, payload, blen, &clm->refcount, 1639 rxb_free, swz->zone, sd->cl); 1640 if (sd->nmbuf++ == 0) 1641 counter_u64_add(extfree_refs, 1); 1642 } else { 1643 m_cljset(m, sd->cl, swz->type); 1644 sd->cl = NULL; /* consumed, not a recycle candidate */ 1645 } 1646 } 1647 if (fr_offset == 0) 1648 m->m_pkthdr.len = remaining; 1649 m->m_len = len; 1650 1651 if (fl->flags & FL_BUF_PACKING) { 1652 fl->rx_offset += blen; 1653 MPASS(fl->rx_offset <= hwb->size); 1654 if (fl->rx_offset < hwb->size) 1655 return (m); /* without advancing the cidx */ 1656 } 1657 1658 if (__predict_false(++fl->cidx % 8 == 0)) { 1659 uint16_t cidx = fl->cidx / 8; 1660 1661 if (__predict_false(cidx == fl->sidx)) 1662 fl->cidx = cidx = 0; 1663 fl->hw_cidx = cidx; 1664 } 1665 fl->rx_offset = 0; 1666 1667 return (m); 1668} 1669 1670static struct mbuf * 1671get_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1672{ 1673 struct mbuf *m0, *m, **pnext; 1674 u_int remaining; 1675 const u_int total = G_RSPD_LEN(len_newbuf); 1676 1677 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1678 M_ASSERTPKTHDR(fl->m0); 1679 MPASS(fl->m0->m_pkthdr.len == total); 1680 MPASS(fl->remaining < total); 1681 1682 m0 = fl->m0; 1683 pnext = fl->pnext; 1684 remaining = fl->remaining; 1685 fl->flags &= ~FL_BUF_RESUME; 1686 goto get_segment; 1687 } 1688 1689 if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1690 fl->rx_offset = 0; 1691 if (__predict_false(++fl->cidx % 8 == 0)) { 1692 uint16_t cidx = fl->cidx / 8; 1693 1694 if (__predict_false(cidx == fl->sidx)) 1695 fl->cidx = cidx = 0; 1696 fl->hw_cidx = cidx; 1697 } 1698 } 1699 1700 /* 1701 * Payload starts at rx_offset in the current hw buffer. Its length is 1702 * 'len' and it may span multiple hw buffers. 1703 */ 1704 1705 m0 = get_scatter_segment(sc, fl, 0, total); 1706 if (m0 == NULL) 1707 return (NULL); 1708 remaining = total - m0->m_len; 1709 pnext = &m0->m_next; 1710 while (remaining > 0) { 1711get_segment: 1712 MPASS(fl->rx_offset == 0); 1713 m = get_scatter_segment(sc, fl, total - remaining, remaining); 1714 if (__predict_false(m == NULL)) { 1715 fl->m0 = m0; 1716 fl->pnext = pnext; 1717 fl->remaining = remaining; 1718 fl->flags |= FL_BUF_RESUME; 1719 return (NULL); 1720 } 1721 *pnext = m; 1722 pnext = &m->m_next; 1723 remaining -= m->m_len; 1724 } 1725 *pnext = NULL; 1726 1727 return (m0); 1728} 1729 1730static int 1731t4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1732{ 1733 struct sge_rxq *rxq = iq_to_rxq(iq); 1734 struct ifnet *ifp = rxq->ifp; 1735 const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1736#if defined(INET) || defined(INET6) 1737 struct lro_ctrl *lro = &rxq->lro; 1738#endif 1739 1740 KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1741 rss->opcode)); 1742 1743 m0->m_pkthdr.len -= fl_pktshift; 1744 m0->m_len -= fl_pktshift; 1745 m0->m_data += fl_pktshift; 1746 1747 m0->m_pkthdr.rcvif = ifp; 1748 m0->m_flags |= M_FLOWID; 1749 m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1750 1751 if (cpl->csum_calc && !cpl->err_vec) { 1752 if (ifp->if_capenable & IFCAP_RXCSUM && 1753 cpl->l2info & htobe32(F_RXF_IP)) { 1754 m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1755 CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1756 rxq->rxcsum++; 1757 } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1758 cpl->l2info & htobe32(F_RXF_IP6)) { 1759 m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1760 CSUM_PSEUDO_HDR); 1761 rxq->rxcsum++; 1762 } 1763 1764 if (__predict_false(cpl->ip_frag)) 1765 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1766 else 1767 m0->m_pkthdr.csum_data = 0xffff; 1768 } 1769 1770 if (cpl->vlan_ex) { 1771 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1772 m0->m_flags |= M_VLANTAG; 1773 rxq->vlan_extraction++; 1774 } 1775 1776#if defined(INET) || defined(INET6) 1777 if (cpl->l2info & htobe32(F_RXF_LRO) && 1778 iq->flags & IQ_LRO_ENABLED && 1779 tcp_lro_rx(lro, m0, 0) == 0) { 1780 /* queued for LRO */ 1781 } else 1782#endif 1783 ifp->if_input(ifp, m0); 1784 1785 return (0); 1786} 1787 1788/* 1789 * Doesn't fail. Holds on to work requests it can't send right away. 1790 */ 1791void 1792t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1793{ 1794 struct sge_eq *eq = &wrq->eq; 1795 int can_reclaim; 1796 caddr_t dst; 1797 1798 TXQ_LOCK_ASSERT_OWNED(wrq); 1799#ifdef TCP_OFFLOAD 1800 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_OFLD || 1801 (eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1802 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1803#else 1804 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_CTRL, 1805 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1806#endif 1807 1808 if (__predict_true(wr != NULL)) 1809 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1810 1811 can_reclaim = reclaimable(eq); 1812 if (__predict_false(eq->flags & EQ_STALLED)) { 1813 if (eq->avail + can_reclaim < tx_resume_threshold(eq)) 1814 return; 1815 eq->flags &= ~EQ_STALLED; 1816 eq->unstalled++; 1817 } 1818 eq->cidx += can_reclaim; 1819 eq->avail += can_reclaim; 1820 if (__predict_false(eq->cidx >= eq->cap)) 1821 eq->cidx -= eq->cap; 1822 1823 while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL) { 1824 int ndesc; 1825 1826 if (__predict_false(wr->wr_len < 0 || 1827 wr->wr_len > SGE_MAX_WR_LEN || (wr->wr_len & 0x7))) { 1828 1829#ifdef INVARIANTS 1830 panic("%s: work request with length %d", __func__, 1831 wr->wr_len); 1832#endif 1833#ifdef KDB 1834 kdb_backtrace(); 1835#endif 1836 log(LOG_ERR, "%s: %s work request with length %d", 1837 device_get_nameunit(sc->dev), __func__, wr->wr_len); 1838 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1839 free_wrqe(wr); 1840 continue; 1841 } 1842 1843 ndesc = howmany(wr->wr_len, EQ_ESIZE); 1844 if (eq->avail < ndesc) { 1845 wrq->no_desc++; 1846 break; 1847 } 1848 1849 dst = (void *)&eq->desc[eq->pidx]; 1850 copy_to_txd(eq, wrtod(wr), &dst, wr->wr_len); 1851 1852 eq->pidx += ndesc; 1853 eq->avail -= ndesc; 1854 if (__predict_false(eq->pidx >= eq->cap)) 1855 eq->pidx -= eq->cap; 1856 1857 eq->pending += ndesc; 1858 if (eq->pending >= 8) 1859 ring_eq_db(sc, eq); 1860 1861 wrq->tx_wrs++; 1862 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1863 free_wrqe(wr); 1864 1865 if (eq->avail < 8) { 1866 can_reclaim = reclaimable(eq); 1867 eq->cidx += can_reclaim; 1868 eq->avail += can_reclaim; 1869 if (__predict_false(eq->cidx >= eq->cap)) 1870 eq->cidx -= eq->cap; 1871 } 1872 } 1873 1874 if (eq->pending) 1875 ring_eq_db(sc, eq); 1876 1877 if (wr != NULL) { 1878 eq->flags |= EQ_STALLED; 1879 if (callout_pending(&eq->tx_callout) == 0) 1880 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 1881 } 1882} 1883 1884/* Per-packet header in a coalesced tx WR, before the SGL starts (in flits) */ 1885#define TXPKTS_PKT_HDR ((\ 1886 sizeof(struct ulp_txpkt) + \ 1887 sizeof(struct ulptx_idata) + \ 1888 sizeof(struct cpl_tx_pkt_core) \ 1889 ) / 8) 1890 1891/* Header of a coalesced tx WR, before SGL of first packet (in flits) */ 1892#define TXPKTS_WR_HDR (\ 1893 sizeof(struct fw_eth_tx_pkts_wr) / 8 + \ 1894 TXPKTS_PKT_HDR) 1895 1896/* Header of a tx WR, before SGL of first packet (in flits) */ 1897#define TXPKT_WR_HDR ((\ 1898 sizeof(struct fw_eth_tx_pkt_wr) + \ 1899 sizeof(struct cpl_tx_pkt_core) \ 1900 ) / 8 ) 1901 1902/* Header of a tx LSO WR, before SGL of first packet (in flits) */ 1903#define TXPKT_LSO_WR_HDR ((\ 1904 sizeof(struct fw_eth_tx_pkt_wr) + \ 1905 sizeof(struct cpl_tx_pkt_lso_core) + \ 1906 sizeof(struct cpl_tx_pkt_core) \ 1907 ) / 8 ) 1908 1909int 1910t4_eth_tx(struct ifnet *ifp, struct sge_txq *txq, struct mbuf *m) 1911{ 1912 struct port_info *pi = (void *)ifp->if_softc; 1913 struct adapter *sc = pi->adapter; 1914 struct sge_eq *eq = &txq->eq; 1915 struct buf_ring *br = txq->br; 1916 struct mbuf *next; 1917 int rc, coalescing, can_reclaim; 1918 struct txpkts txpkts; 1919 struct sgl sgl; 1920 1921 TXQ_LOCK_ASSERT_OWNED(txq); 1922 KASSERT(m, ("%s: called with nothing to do.", __func__)); 1923 KASSERT((eq->flags & EQ_TYPEMASK) == EQ_ETH, 1924 ("%s: eq type %d", __func__, eq->flags & EQ_TYPEMASK)); 1925 1926 prefetch(&eq->desc[eq->pidx]); 1927 prefetch(&txq->sdesc[eq->pidx]); 1928 1929 txpkts.npkt = 0;/* indicates there's nothing in txpkts */ 1930 coalescing = 0; 1931 1932 can_reclaim = reclaimable(eq); 1933 if (__predict_false(eq->flags & EQ_STALLED)) { 1934 if (eq->avail + can_reclaim < tx_resume_threshold(eq)) { 1935 txq->m = m; 1936 return (0); 1937 } 1938 eq->flags &= ~EQ_STALLED; 1939 eq->unstalled++; 1940 } 1941 1942 if (__predict_false(eq->flags & EQ_DOOMED)) { 1943 m_freem(m); 1944 while ((m = buf_ring_dequeue_sc(txq->br)) != NULL) 1945 m_freem(m); 1946 return (ENETDOWN); 1947 } 1948 1949 if (eq->avail < 8 && can_reclaim) 1950 reclaim_tx_descs(txq, can_reclaim, 32); 1951 1952 for (; m; m = next ? next : drbr_dequeue(ifp, br)) { 1953 1954 if (eq->avail < 8) 1955 break; 1956 1957 next = m->m_nextpkt; 1958 m->m_nextpkt = NULL; 1959 1960 if (next || buf_ring_peek(br)) 1961 coalescing = 1; 1962 1963 rc = get_pkt_sgl(txq, &m, &sgl, coalescing); 1964 if (rc != 0) { 1965 if (rc == ENOMEM) { 1966 1967 /* Short of resources, suspend tx */ 1968 1969 m->m_nextpkt = next; 1970 break; 1971 } 1972 1973 /* 1974 * Unrecoverable error for this packet, throw it away 1975 * and move on to the next. get_pkt_sgl may already 1976 * have freed m (it will be NULL in that case and the 1977 * m_freem here is still safe). 1978 */ 1979 1980 m_freem(m); 1981 continue; 1982 } 1983 1984 if (coalescing && 1985 add_to_txpkts(pi, txq, &txpkts, m, &sgl) == 0) { 1986 1987 /* Successfully absorbed into txpkts */ 1988 1989 write_ulp_cpl_sgl(pi, txq, &txpkts, m, &sgl); 1990 goto doorbell; 1991 } 1992 1993 /* 1994 * We weren't coalescing to begin with, or current frame could 1995 * not be coalesced (add_to_txpkts flushes txpkts if a frame 1996 * given to it can't be coalesced). Either way there should be 1997 * nothing in txpkts. 1998 */ 1999 KASSERT(txpkts.npkt == 0, 2000 ("%s: txpkts not empty: %d", __func__, txpkts.npkt)); 2001 2002 /* We're sending out individual packets now */ 2003 coalescing = 0; 2004 2005 if (eq->avail < 8) 2006 reclaim_tx_descs(txq, 0, 8); 2007 rc = write_txpkt_wr(pi, txq, m, &sgl); 2008 if (rc != 0) { 2009 2010 /* Short of hardware descriptors, suspend tx */ 2011 2012 /* 2013 * This is an unlikely but expensive failure. We've 2014 * done all the hard work (DMA mappings etc.) and now we 2015 * can't send out the packet. What's worse, we have to 2016 * spend even more time freeing up everything in sgl. 2017 */ 2018 txq->no_desc++; 2019 free_pkt_sgl(txq, &sgl); 2020 2021 m->m_nextpkt = next; 2022 break; 2023 } 2024 2025 ETHER_BPF_MTAP(ifp, m); 2026 if (sgl.nsegs == 0) 2027 m_freem(m); 2028doorbell: 2029 if (eq->pending >= 8) 2030 ring_eq_db(sc, eq); 2031 2032 can_reclaim = reclaimable(eq); 2033 if (can_reclaim >= 32) 2034 reclaim_tx_descs(txq, can_reclaim, 64); 2035 } 2036 2037 if (txpkts.npkt > 0) 2038 write_txpkts_wr(txq, &txpkts); 2039 2040 /* 2041 * m not NULL means there was an error but we haven't thrown it away. 2042 * This can happen when we're short of tx descriptors (no_desc) or maybe 2043 * even DMA maps (no_dmamap). Either way, a credit flush and reclaim 2044 * will get things going again. 2045 */ 2046 if (m && !(eq->flags & EQ_CRFLUSHED)) { 2047 struct tx_sdesc *txsd = &txq->sdesc[eq->pidx]; 2048 2049 /* 2050 * If EQ_CRFLUSHED is not set then we know we have at least one 2051 * available descriptor because any WR that reduces eq->avail to 2052 * 0 also sets EQ_CRFLUSHED. 2053 */ 2054 KASSERT(eq->avail > 0, ("%s: no space for eqflush.", __func__)); 2055 2056 txsd->desc_used = 1; 2057 txsd->credits = 0; 2058 write_eqflush_wr(eq); 2059 } 2060 txq->m = m; 2061 2062 if (eq->pending) 2063 ring_eq_db(sc, eq); 2064 2065 reclaim_tx_descs(txq, 0, 128); 2066 2067 if (eq->flags & EQ_STALLED && callout_pending(&eq->tx_callout) == 0) 2068 callout_reset(&eq->tx_callout, 1, t4_tx_callout, eq); 2069 2070 return (0); 2071} 2072 2073void 2074t4_update_fl_bufsize(struct ifnet *ifp) 2075{ 2076 struct port_info *pi = ifp->if_softc; 2077 struct adapter *sc = pi->adapter; 2078 struct sge_rxq *rxq; 2079#ifdef TCP_OFFLOAD 2080 struct sge_ofld_rxq *ofld_rxq; 2081#endif 2082 struct sge_fl *fl; 2083 int i, maxp, mtu = ifp->if_mtu; 2084 2085 maxp = mtu_to_max_payload(sc, mtu, 0); 2086 for_each_rxq(pi, i, rxq) { 2087 fl = &rxq->fl; 2088 2089 FL_LOCK(fl); 2090 find_best_refill_source(sc, fl, maxp); 2091 FL_UNLOCK(fl); 2092 } 2093#ifdef TCP_OFFLOAD 2094 maxp = mtu_to_max_payload(sc, mtu, 1); 2095 for_each_ofld_rxq(pi, i, ofld_rxq) { 2096 fl = &ofld_rxq->fl; 2097 2098 FL_LOCK(fl); 2099 find_best_refill_source(sc, fl, maxp); 2100 FL_UNLOCK(fl); 2101 } 2102#endif 2103} 2104 2105int 2106can_resume_tx(struct sge_eq *eq) 2107{ 2108 2109 return (eq->avail + reclaimable(eq) >= tx_resume_threshold(eq)); 2110} 2111 2112static inline void 2113init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2114 int qsize) 2115{ 2116 2117 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2118 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2119 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2120 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2121 2122 iq->flags = 0; 2123 iq->adapter = sc; 2124 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2125 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2126 if (pktc_idx >= 0) { 2127 iq->intr_params |= F_QINTR_CNT_EN; 2128 iq->intr_pktc_idx = pktc_idx; 2129 } 2130 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2131 iq->sidx = iq->qsize - spg_len / IQ_ESIZE; 2132} 2133 2134static inline void 2135init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2136{ 2137 2138 fl->qsize = qsize; 2139 fl->sidx = qsize - spg_len / EQ_ESIZE; 2140 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2141 if (sc->flags & BUF_PACKING_OK && 2142 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2143 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2144 fl->flags |= FL_BUF_PACKING; 2145 find_best_refill_source(sc, fl, maxp); 2146 find_safe_refill_source(sc, fl); 2147} 2148 2149static inline void 2150init_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, 2151 uint16_t iqid, char *name) 2152{ 2153 KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan)); 2154 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2155 2156 eq->flags = eqtype & EQ_TYPEMASK; 2157 eq->tx_chan = tx_chan; 2158 eq->iqid = iqid; 2159 eq->qsize = qsize; 2160 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2161 2162 TASK_INIT(&eq->tx_task, 0, t4_tx_task, eq); 2163 callout_init(&eq->tx_callout, CALLOUT_MPSAFE); 2164} 2165 2166static int 2167alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2168 bus_dmamap_t *map, bus_addr_t *pa, void **va) 2169{ 2170 int rc; 2171 2172 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2173 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2174 if (rc != 0) { 2175 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2176 goto done; 2177 } 2178 2179 rc = bus_dmamem_alloc(*tag, va, 2180 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2181 if (rc != 0) { 2182 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2183 goto done; 2184 } 2185 2186 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2187 if (rc != 0) { 2188 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2189 goto done; 2190 } 2191done: 2192 if (rc) 2193 free_ring(sc, *tag, *map, *pa, *va); 2194 2195 return (rc); 2196} 2197 2198static int 2199free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2200 bus_addr_t pa, void *va) 2201{ 2202 if (pa) 2203 bus_dmamap_unload(tag, map); 2204 if (va) 2205 bus_dmamem_free(tag, va, map); 2206 if (tag) 2207 bus_dma_tag_destroy(tag); 2208 2209 return (0); 2210} 2211 2212/* 2213 * Allocates the ring for an ingress queue and an optional freelist. If the 2214 * freelist is specified it will be allocated and then associated with the 2215 * ingress queue. 2216 * 2217 * Returns errno on failure. Resources allocated up to that point may still be 2218 * allocated. Caller is responsible for cleanup in case this function fails. 2219 * 2220 * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then 2221 * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 2222 * the abs_id of the ingress queue to which its interrupts should be forwarded. 2223 */ 2224static int 2225alloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 2226 int intr_idx, int cong) 2227{ 2228 int rc, i, cntxt_id; 2229 size_t len; 2230 struct fw_iq_cmd c; 2231 struct adapter *sc = iq->adapter; 2232 __be32 v = 0; 2233 2234 len = iq->qsize * IQ_ESIZE; 2235 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2236 (void **)&iq->desc); 2237 if (rc != 0) 2238 return (rc); 2239 2240 bzero(&c, sizeof(c)); 2241 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2242 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2243 V_FW_IQ_CMD_VFN(0)); 2244 2245 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2246 FW_LEN16(c)); 2247 2248 /* Special handling for firmware event queue */ 2249 if (iq == &sc->sge.fwq) 2250 v |= F_FW_IQ_CMD_IQASYNCH; 2251 2252 if (iq->flags & IQ_INTR) { 2253 KASSERT(intr_idx < sc->intr_count, 2254 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2255 } else 2256 v |= F_FW_IQ_CMD_IQANDST; 2257 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2258 2259 c.type_to_iqandstindex = htobe32(v | 2260 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2261 V_FW_IQ_CMD_VIID(pi->viid) | 2262 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2263 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2264 F_FW_IQ_CMD_IQGTSMODE | 2265 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2266 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 2267 c.iqsize = htobe16(iq->qsize); 2268 c.iqaddr = htobe64(iq->ba); 2269 if (cong >= 0) 2270 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2271 2272 if (fl) { 2273 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2274 2275 len = fl->qsize * EQ_ESIZE; 2276 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2277 &fl->ba, (void **)&fl->desc); 2278 if (rc) 2279 return (rc); 2280 2281 /* Allocate space for one software descriptor per buffer. */ 2282 rc = alloc_fl_sdesc(fl); 2283 if (rc != 0) { 2284 device_printf(sc->dev, 2285 "failed to setup fl software descriptors: %d\n", 2286 rc); 2287 return (rc); 2288 } 2289 2290 if (fl->flags & FL_BUF_PACKING) { 2291 fl->lowat = roundup2(sc->sge.fl_starve_threshold2, 8); 2292 fl->buf_boundary = sc->sge.pack_boundary; 2293 } else { 2294 fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8); 2295 fl->buf_boundary = 16; 2296 } 2297 if (fl_pad && fl->buf_boundary < sc->sge.pad_boundary) 2298 fl->buf_boundary = sc->sge.pad_boundary; 2299 2300 c.iqns_to_fl0congen |= 2301 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2302 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2303 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2304 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2305 0)); 2306 if (cong >= 0) { 2307 c.iqns_to_fl0congen |= 2308 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2309 F_FW_IQ_CMD_FL0CONGCIF | 2310 F_FW_IQ_CMD_FL0CONGEN); 2311 } 2312 c.fl0dcaen_to_fl0cidxfthresh = 2313 htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_64B) | 2314 V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 2315 c.fl0size = htobe16(fl->qsize); 2316 c.fl0addr = htobe64(fl->ba); 2317 } 2318 2319 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2320 if (rc != 0) { 2321 device_printf(sc->dev, 2322 "failed to create ingress queue: %d\n", rc); 2323 return (rc); 2324 } 2325 2326 iq->cidx = 0; 2327 iq->gen = F_RSPD_GEN; 2328 iq->intr_next = iq->intr_params; 2329 iq->cntxt_id = be16toh(c.iqid); 2330 iq->abs_id = be16toh(c.physiqid); 2331 iq->flags |= IQ_ALLOCATED; 2332 2333 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2334 if (cntxt_id >= sc->sge.niq) { 2335 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2336 cntxt_id, sc->sge.niq - 1); 2337 } 2338 sc->sge.iqmap[cntxt_id] = iq; 2339 2340 if (fl) { 2341 u_int qid; 2342 2343 iq->flags |= IQ_HAS_FL; 2344 fl->cntxt_id = be16toh(c.fl0id); 2345 fl->pidx = fl->cidx = 0; 2346 2347 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2348 if (cntxt_id >= sc->sge.neq) { 2349 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2350 __func__, cntxt_id, sc->sge.neq - 1); 2351 } 2352 sc->sge.eqmap[cntxt_id] = (void *)fl; 2353 2354 qid = fl->cntxt_id; 2355 if (isset(&sc->doorbells, DOORBELL_UDB)) { 2356 uint32_t s_qpp = sc->sge.eq_s_qpp; 2357 uint32_t mask = (1 << s_qpp) - 1; 2358 volatile uint8_t *udb; 2359 2360 udb = sc->udbs_base + UDBS_DB_OFFSET; 2361 udb += (qid >> s_qpp) << PAGE_SHIFT; 2362 qid &= mask; 2363 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 2364 udb += qid << UDBS_SEG_SHIFT; 2365 qid = 0; 2366 } 2367 fl->udb = (volatile void *)udb; 2368 } 2369 fl->dbval = F_DBPRIO | V_QID(qid); 2370 if (is_t5(sc)) 2371 fl->dbval |= F_DBTYPE; 2372 2373 FL_LOCK(fl); 2374 /* Enough to make sure the SGE doesn't think it's starved */ 2375 refill_fl(sc, fl, fl->lowat); 2376 FL_UNLOCK(fl); 2377 } 2378 2379 if (is_t5(sc) && cong >= 0) { 2380 uint32_t param, val; 2381 2382 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2383 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2384 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2385 if (cong == 0) 2386 val = 1 << 19; 2387 else { 2388 val = 2 << 19; 2389 for (i = 0; i < 4; i++) { 2390 if (cong & (1 << i)) 2391 val |= 1 << (i << 2); 2392 } 2393 } 2394 2395 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2396 if (rc != 0) { 2397 /* report error but carry on */ 2398 device_printf(sc->dev, 2399 "failed to set congestion manager context for " 2400 "ingress queue %d: %d\n", iq->cntxt_id, rc); 2401 } 2402 } 2403 2404 /* Enable IQ interrupts */ 2405 atomic_store_rel_int(&iq->state, IQS_IDLE); 2406 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 2407 V_INGRESSQID(iq->cntxt_id)); 2408 2409 return (0); 2410} 2411 2412static int 2413free_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 2414{ 2415 int rc; 2416 struct adapter *sc = iq->adapter; 2417 device_t dev; 2418 2419 if (sc == NULL) 2420 return (0); /* nothing to do */ 2421 2422 dev = pi ? pi->dev : sc->dev; 2423 2424 if (iq->flags & IQ_ALLOCATED) { 2425 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2426 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2427 fl ? fl->cntxt_id : 0xffff, 0xffff); 2428 if (rc != 0) { 2429 device_printf(dev, 2430 "failed to free queue %p: %d\n", iq, rc); 2431 return (rc); 2432 } 2433 iq->flags &= ~IQ_ALLOCATED; 2434 } 2435 2436 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2437 2438 bzero(iq, sizeof(*iq)); 2439 2440 if (fl) { 2441 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2442 fl->desc); 2443 2444 if (fl->sdesc) 2445 free_fl_sdesc(sc, fl); 2446 2447 if (mtx_initialized(&fl->fl_lock)) 2448 mtx_destroy(&fl->fl_lock); 2449 2450 bzero(fl, sizeof(*fl)); 2451 } 2452 2453 return (0); 2454} 2455 2456static void 2457add_fl_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2458 struct sge_fl *fl) 2459{ 2460 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2461 2462 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2463 "freelist"); 2464 children = SYSCTL_CHILDREN(oid); 2465 2466 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2467 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2468 "SGE context id of the freelist"); 2469 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 2470 fl_pad ? 1 : 0, "padding enabled"); 2471 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 2472 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 2473 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2474 0, "consumer index"); 2475 if (fl->flags & FL_BUF_PACKING) { 2476 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2477 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2478 } 2479 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2480 0, "producer index"); 2481 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2482 CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2483 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2484 CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2485 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2486 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2487 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2488 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2489 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2490 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2491} 2492 2493static int 2494alloc_fwq(struct adapter *sc) 2495{ 2496 int rc, intr_idx; 2497 struct sge_iq *fwq = &sc->sge.fwq; 2498 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2499 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2500 2501 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 2502 fwq->flags |= IQ_INTR; /* always */ 2503 intr_idx = sc->intr_count > 1 ? 1 : 0; 2504 rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1); 2505 if (rc != 0) { 2506 device_printf(sc->dev, 2507 "failed to create firmware event queue: %d\n", rc); 2508 return (rc); 2509 } 2510 2511 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2512 NULL, "firmware event queue"); 2513 children = SYSCTL_CHILDREN(oid); 2514 2515 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", 2516 CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", 2517 "absolute id of the queue"); 2518 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", 2519 CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", 2520 "SGE context id of the queue"); 2521 SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", 2522 CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", 2523 "consumer index"); 2524 2525 return (0); 2526} 2527 2528static int 2529free_fwq(struct adapter *sc) 2530{ 2531 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2532} 2533 2534static int 2535alloc_mgmtq(struct adapter *sc) 2536{ 2537 int rc; 2538 struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2539 char name[16]; 2540 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2541 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2542 2543 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2544 NULL, "management queue"); 2545 2546 snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 2547 init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 2548 sc->sge.fwq.cntxt_id, name); 2549 rc = alloc_wrq(sc, NULL, mgmtq, oid); 2550 if (rc != 0) { 2551 device_printf(sc->dev, 2552 "failed to create management queue: %d\n", rc); 2553 return (rc); 2554 } 2555 2556 return (0); 2557} 2558 2559static int 2560free_mgmtq(struct adapter *sc) 2561{ 2562 2563 return free_wrq(sc, &sc->sge.mgmtq); 2564} 2565 2566int 2567tnl_cong(struct port_info *pi) 2568{ 2569 2570 if (cong_drop == -1) 2571 return (-1); 2572 else if (cong_drop == 1) 2573 return (0); 2574 else 2575 return (pi->rx_chan_map); 2576} 2577 2578static int 2579alloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx, 2580 struct sysctl_oid *oid) 2581{ 2582 int rc; 2583 struct sysctl_oid_list *children; 2584 char name[16]; 2585 2586 rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi)); 2587 if (rc != 0) 2588 return (rc); 2589 2590 /* 2591 * The freelist is just barely above the starvation threshold right now, 2592 * fill it up a bit more. 2593 */ 2594 FL_LOCK(&rxq->fl); 2595 refill_fl(pi->adapter, &rxq->fl, 128); 2596 FL_UNLOCK(&rxq->fl); 2597 2598#if defined(INET) || defined(INET6) 2599 rc = tcp_lro_init(&rxq->lro); 2600 if (rc != 0) 2601 return (rc); 2602 rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 2603 2604 if (pi->ifp->if_capenable & IFCAP_LRO) 2605 rxq->iq.flags |= IQ_LRO_ENABLED; 2606#endif 2607 rxq->ifp = pi->ifp; 2608 2609 children = SYSCTL_CHILDREN(oid); 2610 2611 snprintf(name, sizeof(name), "%d", idx); 2612 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2613 NULL, "rx queue"); 2614 children = SYSCTL_CHILDREN(oid); 2615 2616 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2617 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", 2618 "absolute id of the queue"); 2619 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2620 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", 2621 "SGE context id of the queue"); 2622 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2623 CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", 2624 "consumer index"); 2625#if defined(INET) || defined(INET6) 2626 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 2627 &rxq->lro.lro_queued, 0, NULL); 2628 SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 2629 &rxq->lro.lro_flushed, 0, NULL); 2630#endif 2631 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 2632 &rxq->rxcsum, "# of times hardware assisted with checksum"); 2633 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 2634 CTLFLAG_RD, &rxq->vlan_extraction, 2635 "# of times hardware extracted 802.1Q tag"); 2636 2637 add_fl_sysctls(&pi->ctx, oid, &rxq->fl); 2638 2639 return (rc); 2640} 2641 2642static int 2643free_rxq(struct port_info *pi, struct sge_rxq *rxq) 2644{ 2645 int rc; 2646 2647#if defined(INET) || defined(INET6) 2648 if (rxq->lro.ifp) { 2649 tcp_lro_free(&rxq->lro); 2650 rxq->lro.ifp = NULL; 2651 } 2652#endif 2653 2654 rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 2655 if (rc == 0) 2656 bzero(rxq, sizeof(*rxq)); 2657 2658 return (rc); 2659} 2660 2661#ifdef TCP_OFFLOAD 2662static int 2663alloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq, 2664 int intr_idx, int idx, struct sysctl_oid *oid) 2665{ 2666 int rc; 2667 struct sysctl_oid_list *children; 2668 char name[16]; 2669 2670 rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 2671 pi->rx_chan_map); 2672 if (rc != 0) 2673 return (rc); 2674 2675 children = SYSCTL_CHILDREN(oid); 2676 2677 snprintf(name, sizeof(name), "%d", idx); 2678 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2679 NULL, "rx queue"); 2680 children = SYSCTL_CHILDREN(oid); 2681 2682 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2683 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, 2684 "I", "absolute id of the queue"); 2685 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2686 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, 2687 "I", "SGE context id of the queue"); 2688 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2689 CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", 2690 "consumer index"); 2691 2692 add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl); 2693 2694 return (rc); 2695} 2696 2697static int 2698free_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq) 2699{ 2700 int rc; 2701 2702 rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl); 2703 if (rc == 0) 2704 bzero(ofld_rxq, sizeof(*ofld_rxq)); 2705 2706 return (rc); 2707} 2708#endif 2709 2710#ifdef DEV_NETMAP 2711static int 2712alloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx, 2713 int idx, struct sysctl_oid *oid) 2714{ 2715 int rc; 2716 struct sysctl_oid_list *children; 2717 struct sysctl_ctx_list *ctx; 2718 char name[16]; 2719 size_t len; 2720 struct adapter *sc = pi->adapter; 2721 struct netmap_adapter *na = NA(pi->nm_ifp); 2722 2723 MPASS(na != NULL); 2724 2725 len = pi->qsize_rxq * IQ_ESIZE; 2726 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 2727 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 2728 if (rc != 0) 2729 return (rc); 2730 2731 len = na->num_rx_desc * EQ_ESIZE + spg_len; 2732 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 2733 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 2734 if (rc != 0) 2735 return (rc); 2736 2737 nm_rxq->pi = pi; 2738 nm_rxq->nid = idx; 2739 nm_rxq->iq_cidx = 0; 2740 nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / IQ_ESIZE; 2741 nm_rxq->iq_gen = F_RSPD_GEN; 2742 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 2743 nm_rxq->fl_sidx = na->num_rx_desc; 2744 nm_rxq->intr_idx = intr_idx; 2745 2746 ctx = &pi->ctx; 2747 children = SYSCTL_CHILDREN(oid); 2748 2749 snprintf(name, sizeof(name), "%d", idx); 2750 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 2751 "rx queue"); 2752 children = SYSCTL_CHILDREN(oid); 2753 2754 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 2755 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 2756 "I", "absolute id of the queue"); 2757 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2758 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 2759 "I", "SGE context id of the queue"); 2760 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 2761 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 2762 "consumer index"); 2763 2764 children = SYSCTL_CHILDREN(oid); 2765 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2766 "freelist"); 2767 children = SYSCTL_CHILDREN(oid); 2768 2769 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2770 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 2771 "I", "SGE context id of the freelist"); 2772 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 2773 &nm_rxq->fl_cidx, 0, "consumer index"); 2774 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 2775 &nm_rxq->fl_pidx, 0, "producer index"); 2776 2777 return (rc); 2778} 2779 2780 2781static int 2782free_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) 2783{ 2784 struct adapter *sc = pi->adapter; 2785 2786 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 2787 nm_rxq->iq_desc); 2788 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 2789 nm_rxq->fl_desc); 2790 2791 return (0); 2792} 2793 2794static int 2795alloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 2796 struct sysctl_oid *oid) 2797{ 2798 int rc; 2799 size_t len; 2800 struct adapter *sc = pi->adapter; 2801 struct netmap_adapter *na = NA(pi->nm_ifp); 2802 char name[16]; 2803 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2804 2805 len = na->num_tx_desc * EQ_ESIZE + spg_len; 2806 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 2807 &nm_txq->ba, (void **)&nm_txq->desc); 2808 if (rc) 2809 return (rc); 2810 2811 nm_txq->pidx = nm_txq->cidx = 0; 2812 nm_txq->sidx = na->num_tx_desc; 2813 nm_txq->nid = idx; 2814 nm_txq->iqidx = iqidx; 2815 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 2816 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 2817 2818 snprintf(name, sizeof(name), "%d", idx); 2819 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2820 NULL, "netmap tx queue"); 2821 children = SYSCTL_CHILDREN(oid); 2822 2823 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 2824 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 2825 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2826 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 2827 "consumer index"); 2828 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 2829 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 2830 "producer index"); 2831 2832 return (rc); 2833} 2834 2835static int 2836free_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq) 2837{ 2838 struct adapter *sc = pi->adapter; 2839 2840 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 2841 nm_txq->desc); 2842 2843 return (0); 2844} 2845#endif 2846 2847static int 2848ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 2849{ 2850 int rc, cntxt_id; 2851 struct fw_eq_ctrl_cmd c; 2852 2853 bzero(&c, sizeof(c)); 2854 2855 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 2856 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 2857 V_FW_EQ_CTRL_CMD_VFN(0)); 2858 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 2859 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 2860 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); /* XXX */ 2861 c.physeqid_pkd = htobe32(0); 2862 c.fetchszm_to_iqid = 2863 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2864 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 2865 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 2866 c.dcaen_to_eqsize = 2867 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2868 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2869 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2870 V_FW_EQ_CTRL_CMD_EQSIZE(eq->qsize)); 2871 c.eqaddr = htobe64(eq->ba); 2872 2873 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2874 if (rc != 0) { 2875 device_printf(sc->dev, 2876 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 2877 return (rc); 2878 } 2879 eq->flags |= EQ_ALLOCATED; 2880 2881 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 2882 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2883 if (cntxt_id >= sc->sge.neq) 2884 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2885 cntxt_id, sc->sge.neq - 1); 2886 sc->sge.eqmap[cntxt_id] = eq; 2887 2888 return (rc); 2889} 2890 2891static int 2892eth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2893{ 2894 int rc, cntxt_id; 2895 struct fw_eq_eth_cmd c; 2896 2897 bzero(&c, sizeof(c)); 2898 2899 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 2900 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 2901 V_FW_EQ_ETH_CMD_VFN(0)); 2902 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 2903 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 2904 c.autoequiqe_to_viid = htobe32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); 2905 c.fetchszm_to_iqid = 2906 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2907 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 2908 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 2909 c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2910 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2911 V_FW_EQ_ETH_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2912 V_FW_EQ_ETH_CMD_EQSIZE(eq->qsize)); 2913 c.eqaddr = htobe64(eq->ba); 2914 2915 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2916 if (rc != 0) { 2917 device_printf(pi->dev, 2918 "failed to create Ethernet egress queue: %d\n", rc); 2919 return (rc); 2920 } 2921 eq->flags |= EQ_ALLOCATED; 2922 2923 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 2924 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2925 if (cntxt_id >= sc->sge.neq) 2926 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2927 cntxt_id, sc->sge.neq - 1); 2928 sc->sge.eqmap[cntxt_id] = eq; 2929 2930 return (rc); 2931} 2932 2933#ifdef TCP_OFFLOAD 2934static int 2935ofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2936{ 2937 int rc, cntxt_id; 2938 struct fw_eq_ofld_cmd c; 2939 2940 bzero(&c, sizeof(c)); 2941 2942 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 2943 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 2944 V_FW_EQ_OFLD_CMD_VFN(0)); 2945 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 2946 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2947 c.fetchszm_to_iqid = 2948 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 2949 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 2950 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 2951 c.dcaen_to_eqsize = 2952 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 2953 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 2954 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(X_CIDXFLUSHTHRESH_32) | 2955 V_FW_EQ_OFLD_CMD_EQSIZE(eq->qsize)); 2956 c.eqaddr = htobe64(eq->ba); 2957 2958 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2959 if (rc != 0) { 2960 device_printf(pi->dev, 2961 "failed to create egress queue for TCP offload: %d\n", rc); 2962 return (rc); 2963 } 2964 eq->flags |= EQ_ALLOCATED; 2965 2966 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 2967 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 2968 if (cntxt_id >= sc->sge.neq) 2969 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 2970 cntxt_id, sc->sge.neq - 1); 2971 sc->sge.eqmap[cntxt_id] = eq; 2972 2973 return (rc); 2974} 2975#endif 2976 2977static int 2978alloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 2979{ 2980 int rc; 2981 size_t len; 2982 2983 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 2984 2985 len = eq->qsize * EQ_ESIZE; 2986 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 2987 &eq->ba, (void **)&eq->desc); 2988 if (rc) 2989 return (rc); 2990 2991 eq->cap = eq->qsize - spg_len / EQ_ESIZE; 2992 eq->spg = (void *)&eq->desc[eq->cap]; 2993 eq->avail = eq->cap - 1; /* one less to avoid cidx = pidx */ 2994 eq->pidx = eq->cidx = 0; 2995 eq->doorbells = sc->doorbells; 2996 2997 switch (eq->flags & EQ_TYPEMASK) { 2998 case EQ_CTRL: 2999 rc = ctrl_eq_alloc(sc, eq); 3000 break; 3001 3002 case EQ_ETH: 3003 rc = eth_eq_alloc(sc, pi, eq); 3004 break; 3005 3006#ifdef TCP_OFFLOAD 3007 case EQ_OFLD: 3008 rc = ofld_eq_alloc(sc, pi, eq); 3009 break; 3010#endif 3011 3012 default: 3013 panic("%s: invalid eq type %d.", __func__, 3014 eq->flags & EQ_TYPEMASK); 3015 } 3016 if (rc != 0) { 3017 device_printf(sc->dev, 3018 "failed to allocate egress queue(%d): %d\n", 3019 eq->flags & EQ_TYPEMASK, rc); 3020 } 3021 3022 eq->tx_callout.c_cpu = eq->cntxt_id % mp_ncpus; 3023 3024 if (isset(&eq->doorbells, DOORBELL_UDB) || 3025 isset(&eq->doorbells, DOORBELL_UDBWC) || 3026 isset(&eq->doorbells, DOORBELL_WCWR)) { 3027 uint32_t s_qpp = sc->sge.eq_s_qpp; 3028 uint32_t mask = (1 << s_qpp) - 1; 3029 volatile uint8_t *udb; 3030 3031 udb = sc->udbs_base + UDBS_DB_OFFSET; 3032 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3033 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3034 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3035 clrbit(&eq->doorbells, DOORBELL_WCWR); 3036 else { 3037 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3038 eq->udb_qid = 0; 3039 } 3040 eq->udb = (volatile void *)udb; 3041 } 3042 3043 return (rc); 3044} 3045 3046static int 3047free_eq(struct adapter *sc, struct sge_eq *eq) 3048{ 3049 int rc; 3050 3051 if (eq->flags & EQ_ALLOCATED) { 3052 switch (eq->flags & EQ_TYPEMASK) { 3053 case EQ_CTRL: 3054 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3055 eq->cntxt_id); 3056 break; 3057 3058 case EQ_ETH: 3059 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3060 eq->cntxt_id); 3061 break; 3062 3063#ifdef TCP_OFFLOAD 3064 case EQ_OFLD: 3065 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3066 eq->cntxt_id); 3067 break; 3068#endif 3069 3070 default: 3071 panic("%s: invalid eq type %d.", __func__, 3072 eq->flags & EQ_TYPEMASK); 3073 } 3074 if (rc != 0) { 3075 device_printf(sc->dev, 3076 "failed to free egress queue (%d): %d\n", 3077 eq->flags & EQ_TYPEMASK, rc); 3078 return (rc); 3079 } 3080 eq->flags &= ~EQ_ALLOCATED; 3081 } 3082 3083 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3084 3085 if (mtx_initialized(&eq->eq_lock)) 3086 mtx_destroy(&eq->eq_lock); 3087 3088 bzero(eq, sizeof(*eq)); 3089 return (0); 3090} 3091 3092static int 3093alloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq, 3094 struct sysctl_oid *oid) 3095{ 3096 int rc; 3097 struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx; 3098 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3099 3100 rc = alloc_eq(sc, pi, &wrq->eq); 3101 if (rc) 3102 return (rc); 3103 3104 wrq->adapter = sc; 3105 STAILQ_INIT(&wrq->wr_list); 3106 3107 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3108 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3109 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3110 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3111 "consumer index"); 3112 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3113 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3114 "producer index"); 3115 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs", CTLFLAG_RD, 3116 &wrq->tx_wrs, "# of work requests"); 3117 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 3118 &wrq->no_desc, 0, 3119 "# of times queue ran out of hardware descriptors"); 3120 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 3121 &wrq->eq.unstalled, 0, "# of times queue recovered after stall"); 3122 3123 return (rc); 3124} 3125 3126static int 3127free_wrq(struct adapter *sc, struct sge_wrq *wrq) 3128{ 3129 int rc; 3130 3131 rc = free_eq(sc, &wrq->eq); 3132 if (rc) 3133 return (rc); 3134 3135 bzero(wrq, sizeof(*wrq)); 3136 return (0); 3137} 3138 3139static int 3140alloc_txq(struct port_info *pi, struct sge_txq *txq, int idx, 3141 struct sysctl_oid *oid) 3142{ 3143 int rc; 3144 struct adapter *sc = pi->adapter; 3145 struct sge_eq *eq = &txq->eq; 3146 char name[16]; 3147 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3148 3149 rc = alloc_eq(sc, pi, eq); 3150 if (rc) 3151 return (rc); 3152 3153 txq->ifp = pi->ifp; 3154 3155 txq->sdesc = malloc(eq->cap * sizeof(struct tx_sdesc), M_CXGBE, 3156 M_ZERO | M_WAITOK); 3157 txq->br = buf_ring_alloc(eq->qsize, M_CXGBE, M_WAITOK, &eq->eq_lock); 3158 3159 rc = bus_dma_tag_create(sc->dmat, 1, 0, BUS_SPACE_MAXADDR, 3160 BUS_SPACE_MAXADDR, NULL, NULL, 64 * 1024, TX_SGL_SEGS, 3161 BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &txq->tx_tag); 3162 if (rc != 0) { 3163 device_printf(sc->dev, 3164 "failed to create tx DMA tag: %d\n", rc); 3165 return (rc); 3166 } 3167 3168 /* 3169 * We can stuff ~10 frames in an 8-descriptor txpkts WR (8 is the SGE 3170 * limit for any WR). txq->no_dmamap events shouldn't occur if maps is 3171 * sized for the worst case. 3172 */ 3173 rc = t4_alloc_tx_maps(&txq->txmaps, txq->tx_tag, eq->qsize * 10 / 8, 3174 M_WAITOK); 3175 if (rc != 0) { 3176 device_printf(sc->dev, "failed to setup tx DMA maps: %d\n", rc); 3177 return (rc); 3178 } 3179 3180 snprintf(name, sizeof(name), "%d", idx); 3181 oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3182 NULL, "tx queue"); 3183 children = SYSCTL_CHILDREN(oid); 3184 3185 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3186 &eq->cntxt_id, 0, "SGE context id of the queue"); 3187 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3188 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 3189 "consumer index"); 3190 SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3191 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 3192 "producer index"); 3193 3194 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 3195 &txq->txcsum, "# of times hardware assisted with checksum"); 3196 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 3197 CTLFLAG_RD, &txq->vlan_insertion, 3198 "# of times hardware inserted 802.1Q tag"); 3199 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 3200 &txq->tso_wrs, "# of TSO work requests"); 3201 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 3202 &txq->imm_wrs, "# of work requests with immediate data"); 3203 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 3204 &txq->sgl_wrs, "# of work requests with direct SGL"); 3205 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 3206 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 3207 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_wrs", CTLFLAG_RD, 3208 &txq->txpkts_wrs, "# of txpkts work requests (multiple pkts/WR)"); 3209 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts_pkts", CTLFLAG_RD, 3210 &txq->txpkts_pkts, "# of frames tx'd using txpkts work requests"); 3211 3212 SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "br_drops", CTLFLAG_RD, 3213 &txq->br->br_drops, "# of drops in the buf_ring for this queue"); 3214 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_dmamap", CTLFLAG_RD, 3215 &txq->no_dmamap, 0, "# of times txq ran out of DMA maps"); 3216 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "no_desc", CTLFLAG_RD, 3217 &txq->no_desc, 0, "# of times txq ran out of hardware descriptors"); 3218 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "egr_update", CTLFLAG_RD, 3219 &eq->egr_update, 0, "egress update notifications from the SGE"); 3220 SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "unstalled", CTLFLAG_RD, 3221 &eq->unstalled, 0, "# of times txq recovered after stall"); 3222 3223 return (rc); 3224} 3225 3226static int 3227free_txq(struct port_info *pi, struct sge_txq *txq) 3228{ 3229 int rc; 3230 struct adapter *sc = pi->adapter; 3231 struct sge_eq *eq = &txq->eq; 3232 3233 rc = free_eq(sc, eq); 3234 if (rc) 3235 return (rc); 3236 3237 free(txq->sdesc, M_CXGBE); 3238 3239 if (txq->txmaps.maps) 3240 t4_free_tx_maps(&txq->txmaps, txq->tx_tag); 3241 3242 buf_ring_free(txq->br, M_CXGBE); 3243 3244 if (txq->tx_tag) 3245 bus_dma_tag_destroy(txq->tx_tag); 3246 3247 bzero(txq, sizeof(*txq)); 3248 return (0); 3249} 3250 3251static void 3252oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3253{ 3254 bus_addr_t *ba = arg; 3255 3256 KASSERT(nseg == 1, 3257 ("%s meant for single segment mappings only.", __func__)); 3258 3259 *ba = error ? 0 : segs->ds_addr; 3260} 3261 3262static inline void 3263ring_fl_db(struct adapter *sc, struct sge_fl *fl) 3264{ 3265 uint32_t n, v; 3266 3267 n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 3268 MPASS(n > 0); 3269 3270 wmb(); 3271 v = fl->dbval | V_PIDX(n); 3272 if (fl->udb) 3273 *fl->udb = htole32(v); 3274 else 3275 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v); 3276 IDXINCR(fl->dbidx, n, fl->sidx); 3277} 3278 3279/* 3280 * Fills up the freelist by allocating upto 'n' buffers. Buffers that are 3281 * recycled do not count towards this allocation budget. 3282 * 3283 * Returns non-zero to indicate that this freelist should be added to the list 3284 * of starving freelists. 3285 */ 3286static int 3287refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 3288{ 3289 __be64 *d; 3290 struct fl_sdesc *sd; 3291 uintptr_t pa; 3292 caddr_t cl; 3293 struct cluster_layout *cll; 3294 struct sw_zone_info *swz; 3295 struct cluster_metadata *clm; 3296 uint16_t max_pidx; 3297 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 3298 3299 FL_LOCK_ASSERT_OWNED(fl); 3300 3301 /* 3302 * We always stop at the begining of the hardware descriptor that's just 3303 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 3304 * which would mean an empty freelist to the chip. 3305 */ 3306 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 3307 if (fl->pidx == max_pidx * 8) 3308 return (0); 3309 3310 d = &fl->desc[fl->pidx]; 3311 sd = &fl->sdesc[fl->pidx]; 3312 cll = &fl->cll_def; /* default layout */ 3313 swz = &sc->sge.sw_zone_info[cll->zidx]; 3314 3315 while (n > 0) { 3316 3317 if (sd->cl != NULL) { 3318 3319 if (sd->nmbuf == 0) { 3320 /* 3321 * Fast recycle without involving any atomics on 3322 * the cluster's metadata (if the cluster has 3323 * metadata). This happens when all frames 3324 * received in the cluster were small enough to 3325 * fit within a single mbuf each. 3326 */ 3327 fl->cl_fast_recycled++; 3328#ifdef INVARIANTS 3329 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3330 if (clm != NULL) 3331 MPASS(clm->refcount == 1); 3332#endif 3333 goto recycled_fast; 3334 } 3335 3336 /* 3337 * Cluster is guaranteed to have metadata. Clusters 3338 * without metadata always take the fast recycle path 3339 * when they're recycled. 3340 */ 3341 clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3342 MPASS(clm != NULL); 3343 3344 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3345 fl->cl_recycled++; 3346 counter_u64_add(extfree_rels, 1); 3347 goto recycled; 3348 } 3349 sd->cl = NULL; /* gave up my reference */ 3350 } 3351 MPASS(sd->cl == NULL); 3352alloc: 3353 cl = uma_zalloc(swz->zone, M_NOWAIT); 3354 if (__predict_false(cl == NULL)) { 3355 if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3356 fl->cll_def.zidx == fl->cll_alt.zidx) 3357 break; 3358 3359 /* fall back to the safe zone */ 3360 cll = &fl->cll_alt; 3361 swz = &sc->sge.sw_zone_info[cll->zidx]; 3362 goto alloc; 3363 } 3364 fl->cl_allocated++; 3365 n--; 3366 3367 pa = pmap_kextract((vm_offset_t)cl); 3368 pa += cll->region1; 3369 sd->cl = cl; 3370 sd->cll = *cll; 3371 *d = htobe64(pa | cll->hwidx); 3372 clm = cl_metadata(sc, fl, cll, cl); 3373 if (clm != NULL) { 3374recycled: 3375#ifdef INVARIANTS 3376 clm->sd = sd; 3377#endif 3378 clm->refcount = 1; 3379 } 3380 sd->nmbuf = 0; 3381recycled_fast: 3382 d++; 3383 sd++; 3384 if (__predict_false(++fl->pidx % 8 == 0)) { 3385 uint16_t pidx = fl->pidx / 8; 3386 3387 if (__predict_false(pidx == fl->sidx)) { 3388 fl->pidx = 0; 3389 pidx = 0; 3390 sd = fl->sdesc; 3391 d = fl->desc; 3392 } 3393 if (pidx == max_pidx) 3394 break; 3395 3396 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 3397 ring_fl_db(sc, fl); 3398 } 3399 } 3400 3401 if (fl->pidx / 8 != fl->dbidx) 3402 ring_fl_db(sc, fl); 3403 3404 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3405} 3406 3407/* 3408 * Attempt to refill all starving freelists. 3409 */ 3410static void 3411refill_sfl(void *arg) 3412{ 3413 struct adapter *sc = arg; 3414 struct sge_fl *fl, *fl_temp; 3415 3416 mtx_lock(&sc->sfl_lock); 3417 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3418 FL_LOCK(fl); 3419 refill_fl(sc, fl, 64); 3420 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3421 TAILQ_REMOVE(&sc->sfl, fl, link); 3422 fl->flags &= ~FL_STARVING; 3423 } 3424 FL_UNLOCK(fl); 3425 } 3426 3427 if (!TAILQ_EMPTY(&sc->sfl)) 3428 callout_schedule(&sc->sfl_callout, hz / 5); 3429 mtx_unlock(&sc->sfl_lock); 3430} 3431 3432static int 3433alloc_fl_sdesc(struct sge_fl *fl) 3434{ 3435 3436 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 3437 M_ZERO | M_WAITOK); 3438 3439 return (0); 3440} 3441 3442static void 3443free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3444{ 3445 struct fl_sdesc *sd; 3446 struct cluster_metadata *clm; 3447 struct cluster_layout *cll; 3448 int i; 3449 3450 sd = fl->sdesc; 3451 for (i = 0; i < fl->sidx * 8; i++, sd++) { 3452 if (sd->cl == NULL) 3453 continue; 3454 3455 cll = &sd->cll; 3456 clm = cl_metadata(sc, fl, cll, sd->cl); 3457 if (sd->nmbuf == 0) 3458 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3459 else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3460 uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3461 counter_u64_add(extfree_rels, 1); 3462 } 3463 sd->cl = NULL; 3464 } 3465 3466 free(fl->sdesc, M_CXGBE); 3467 fl->sdesc = NULL; 3468} 3469 3470int 3471t4_alloc_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag, int count, 3472 int flags) 3473{ 3474 struct tx_map *txm; 3475 int i, rc; 3476 3477 txmaps->map_total = txmaps->map_avail = count; 3478 txmaps->map_cidx = txmaps->map_pidx = 0; 3479 3480 txmaps->maps = malloc(count * sizeof(struct tx_map), M_CXGBE, 3481 M_ZERO | flags); 3482 3483 txm = txmaps->maps; 3484 for (i = 0; i < count; i++, txm++) { 3485 rc = bus_dmamap_create(tx_tag, 0, &txm->map); 3486 if (rc != 0) 3487 goto failed; 3488 } 3489 3490 return (0); 3491failed: 3492 while (--i >= 0) { 3493 txm--; 3494 bus_dmamap_destroy(tx_tag, txm->map); 3495 } 3496 KASSERT(txm == txmaps->maps, ("%s: EDOOFUS", __func__)); 3497 3498 free(txmaps->maps, M_CXGBE); 3499 txmaps->maps = NULL; 3500 3501 return (rc); 3502} 3503 3504void 3505t4_free_tx_maps(struct tx_maps *txmaps, bus_dma_tag_t tx_tag) 3506{ 3507 struct tx_map *txm; 3508 int i; 3509 3510 txm = txmaps->maps; 3511 for (i = 0; i < txmaps->map_total; i++, txm++) { 3512 3513 if (txm->m) { 3514 bus_dmamap_unload(tx_tag, txm->map); 3515 m_freem(txm->m); 3516 txm->m = NULL; 3517 } 3518 3519 bus_dmamap_destroy(tx_tag, txm->map); 3520 } 3521 3522 free(txmaps->maps, M_CXGBE); 3523 txmaps->maps = NULL; 3524} 3525 3526/* 3527 * We'll do immediate data tx for non-TSO, but only when not coalescing. We're 3528 * willing to use upto 2 hardware descriptors which means a maximum of 96 bytes 3529 * of immediate data. 3530 */ 3531#define IMM_LEN ( \ 3532 2 * EQ_ESIZE \ 3533 - sizeof(struct fw_eth_tx_pkt_wr) \ 3534 - sizeof(struct cpl_tx_pkt_core)) 3535 3536/* 3537 * Returns non-zero on failure, no need to cleanup anything in that case. 3538 * 3539 * Note 1: We always try to defrag the mbuf if required and return EFBIG only 3540 * if the resulting chain still won't fit in a tx descriptor. 3541 * 3542 * Note 2: We'll pullup the mbuf chain if TSO is requested and the first mbuf 3543 * does not have the TCP header in it. 3544 */ 3545static int 3546get_pkt_sgl(struct sge_txq *txq, struct mbuf **fp, struct sgl *sgl, 3547 int sgl_only) 3548{ 3549 struct mbuf *m = *fp; 3550 struct tx_maps *txmaps; 3551 struct tx_map *txm; 3552 int rc, defragged = 0, n; 3553 3554 TXQ_LOCK_ASSERT_OWNED(txq); 3555 3556 if (m->m_pkthdr.tso_segsz) 3557 sgl_only = 1; /* Do not allow immediate data with LSO */ 3558 3559start: sgl->nsegs = 0; 3560 3561 if (m->m_pkthdr.len <= IMM_LEN && !sgl_only) 3562 return (0); /* nsegs = 0 tells caller to use imm. tx */ 3563 3564 txmaps = &txq->txmaps; 3565 if (txmaps->map_avail == 0) { 3566 txq->no_dmamap++; 3567 return (ENOMEM); 3568 } 3569 txm = &txmaps->maps[txmaps->map_pidx]; 3570 3571 if (m->m_pkthdr.tso_segsz && m->m_len < 50) { 3572 *fp = m_pullup(m, 50); 3573 m = *fp; 3574 if (m == NULL) 3575 return (ENOBUFS); 3576 } 3577 3578 rc = bus_dmamap_load_mbuf_sg(txq->tx_tag, txm->map, m, sgl->seg, 3579 &sgl->nsegs, BUS_DMA_NOWAIT); 3580 if (rc == EFBIG && defragged == 0) { 3581 m = m_defrag(m, M_NOWAIT); 3582 if (m == NULL) 3583 return (EFBIG); 3584 3585 defragged = 1; 3586 *fp = m; 3587 goto start; 3588 } 3589 if (rc != 0) 3590 return (rc); 3591 3592 txm->m = m; 3593 txmaps->map_avail--; 3594 if (++txmaps->map_pidx == txmaps->map_total) 3595 txmaps->map_pidx = 0; 3596 3597 KASSERT(sgl->nsegs > 0 && sgl->nsegs <= TX_SGL_SEGS, 3598 ("%s: bad DMA mapping (%d segments)", __func__, sgl->nsegs)); 3599 3600 /* 3601 * Store the # of flits required to hold this frame's SGL in nflits. An 3602 * SGL has a (ULPTX header + len0, addr0) tuple optionally followed by 3603 * multiple (len0 + len1, addr0, addr1) tuples. If addr1 is not used 3604 * then len1 must be set to 0. 3605 */ 3606 n = sgl->nsegs - 1; 3607 sgl->nflits = (3 * n) / 2 + (n & 1) + 2; 3608 3609 return (0); 3610} 3611 3612 3613/* 3614 * Releases all the txq resources used up in the specified sgl. 3615 */ 3616static int 3617free_pkt_sgl(struct sge_txq *txq, struct sgl *sgl) 3618{ 3619 struct tx_maps *txmaps; 3620 struct tx_map *txm; 3621 3622 TXQ_LOCK_ASSERT_OWNED(txq); 3623 3624 if (sgl->nsegs == 0) 3625 return (0); /* didn't use any map */ 3626 3627 txmaps = &txq->txmaps; 3628 3629 /* 1 pkt uses exactly 1 map, back it out */ 3630 3631 txmaps->map_avail++; 3632 if (txmaps->map_pidx > 0) 3633 txmaps->map_pidx--; 3634 else 3635 txmaps->map_pidx = txmaps->map_total - 1; 3636 3637 txm = &txmaps->maps[txmaps->map_pidx]; 3638 bus_dmamap_unload(txq->tx_tag, txm->map); 3639 txm->m = NULL; 3640 3641 return (0); 3642} 3643 3644static int 3645write_txpkt_wr(struct port_info *pi, struct sge_txq *txq, struct mbuf *m, 3646 struct sgl *sgl) 3647{ 3648 struct sge_eq *eq = &txq->eq; 3649 struct fw_eth_tx_pkt_wr *wr; 3650 struct cpl_tx_pkt_core *cpl; 3651 uint32_t ctrl; /* used in many unrelated places */ 3652 uint64_t ctrl1; 3653 int nflits, ndesc, pktlen; 3654 struct tx_sdesc *txsd; 3655 caddr_t dst; 3656 3657 TXQ_LOCK_ASSERT_OWNED(txq); 3658 3659 pktlen = m->m_pkthdr.len; 3660 3661 /* 3662 * Do we have enough flits to send this frame out? 3663 */ 3664 ctrl = sizeof(struct cpl_tx_pkt_core); 3665 if (m->m_pkthdr.tso_segsz) { 3666 nflits = TXPKT_LSO_WR_HDR; 3667 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 3668 } else 3669 nflits = TXPKT_WR_HDR; 3670 if (sgl->nsegs > 0) 3671 nflits += sgl->nflits; 3672 else { 3673 nflits += howmany(pktlen, 8); 3674 ctrl += pktlen; 3675 } 3676 ndesc = howmany(nflits, 8); 3677 if (ndesc > eq->avail) 3678 return (ENOMEM); 3679 3680 /* Firmware work request header */ 3681 wr = (void *)&eq->desc[eq->pidx]; 3682 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 3683 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 3684 ctrl = V_FW_WR_LEN16(howmany(nflits, 2)); 3685 if (eq->avail == ndesc) { 3686 if (!(eq->flags & EQ_CRFLUSHED)) { 3687 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3688 eq->flags |= EQ_CRFLUSHED; 3689 } 3690 eq->flags |= EQ_STALLED; 3691 } 3692 3693 wr->equiq_to_len16 = htobe32(ctrl); 3694 wr->r3 = 0; 3695 3696 if (m->m_pkthdr.tso_segsz) { 3697 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 3698 struct ether_header *eh; 3699 void *l3hdr; 3700#if defined(INET) || defined(INET6) 3701 struct tcphdr *tcp; 3702#endif 3703 uint16_t eh_type; 3704 3705 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 3706 F_LSO_LAST_SLICE; 3707 3708 eh = mtod(m, struct ether_header *); 3709 eh_type = ntohs(eh->ether_type); 3710 if (eh_type == ETHERTYPE_VLAN) { 3711 struct ether_vlan_header *evh = (void *)eh; 3712 3713 ctrl |= V_LSO_ETHHDR_LEN(1); 3714 l3hdr = evh + 1; 3715 eh_type = ntohs(evh->evl_proto); 3716 } else 3717 l3hdr = eh + 1; 3718 3719 switch (eh_type) { 3720#ifdef INET6 3721 case ETHERTYPE_IPV6: 3722 { 3723 struct ip6_hdr *ip6 = l3hdr; 3724 3725 /* 3726 * XXX-BZ For now we do not pretend to support 3727 * IPv6 extension headers. 3728 */ 3729 KASSERT(ip6->ip6_nxt == IPPROTO_TCP, ("%s: CSUM_TSO " 3730 "with ip6_nxt != TCP: %u", __func__, ip6->ip6_nxt)); 3731 tcp = (struct tcphdr *)(ip6 + 1); 3732 ctrl |= F_LSO_IPV6; 3733 ctrl |= V_LSO_IPHDR_LEN(sizeof(*ip6) >> 2) | 3734 V_LSO_TCPHDR_LEN(tcp->th_off); 3735 break; 3736 } 3737#endif 3738#ifdef INET 3739 case ETHERTYPE_IP: 3740 { 3741 struct ip *ip = l3hdr; 3742 3743 tcp = (void *)((uintptr_t)ip + ip->ip_hl * 4); 3744 ctrl |= V_LSO_IPHDR_LEN(ip->ip_hl) | 3745 V_LSO_TCPHDR_LEN(tcp->th_off); 3746 break; 3747 } 3748#endif 3749 default: 3750 panic("%s: CSUM_TSO but no supported IP version " 3751 "(0x%04x)", __func__, eh_type); 3752 } 3753 3754 lso->lso_ctrl = htobe32(ctrl); 3755 lso->ipid_ofst = htobe16(0); 3756 lso->mss = htobe16(m->m_pkthdr.tso_segsz); 3757 lso->seqno_offset = htobe32(0); 3758 lso->len = htobe32(pktlen); 3759 3760 cpl = (void *)(lso + 1); 3761 3762 txq->tso_wrs++; 3763 } else 3764 cpl = (void *)(wr + 1); 3765 3766 /* Checksum offload */ 3767 ctrl1 = 0; 3768 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3769 ctrl1 |= F_TXPKT_IPCSUM_DIS; 3770 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3771 CSUM_TCP_IPV6 | CSUM_TSO))) 3772 ctrl1 |= F_TXPKT_L4CSUM_DIS; 3773 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3774 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3775 txq->txcsum++; /* some hardware assistance provided */ 3776 3777 /* VLAN tag insertion */ 3778 if (m->m_flags & M_VLANTAG) { 3779 ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3780 txq->vlan_insertion++; 3781 } 3782 3783 /* CPL header */ 3784 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3785 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 3786 cpl->pack = 0; 3787 cpl->len = htobe16(pktlen); 3788 cpl->ctrl1 = htobe64(ctrl1); 3789 3790 /* Software descriptor */ 3791 txsd = &txq->sdesc[eq->pidx]; 3792 txsd->desc_used = ndesc; 3793 3794 eq->pending += ndesc; 3795 eq->avail -= ndesc; 3796 eq->pidx += ndesc; 3797 if (eq->pidx >= eq->cap) 3798 eq->pidx -= eq->cap; 3799 3800 /* SGL */ 3801 dst = (void *)(cpl + 1); 3802 if (sgl->nsegs > 0) { 3803 txsd->credits = 1; 3804 txq->sgl_wrs++; 3805 write_sgl_to_txd(eq, sgl, &dst); 3806 } else { 3807 txsd->credits = 0; 3808 txq->imm_wrs++; 3809 for (; m; m = m->m_next) { 3810 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 3811#ifdef INVARIANTS 3812 pktlen -= m->m_len; 3813#endif 3814 } 3815#ifdef INVARIANTS 3816 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 3817#endif 3818 3819 } 3820 3821 txq->txpkt_wrs++; 3822 return (0); 3823} 3824 3825/* 3826 * Returns 0 to indicate that m has been accepted into a coalesced tx work 3827 * request. It has either been folded into txpkts or txpkts was flushed and m 3828 * has started a new coalesced work request (as the first frame in a fresh 3829 * txpkts). 3830 * 3831 * Returns non-zero to indicate a failure - caller is responsible for 3832 * transmitting m, if there was anything in txpkts it has been flushed. 3833 */ 3834static int 3835add_to_txpkts(struct port_info *pi, struct sge_txq *txq, struct txpkts *txpkts, 3836 struct mbuf *m, struct sgl *sgl) 3837{ 3838 struct sge_eq *eq = &txq->eq; 3839 int can_coalesce; 3840 struct tx_sdesc *txsd; 3841 int flits; 3842 3843 TXQ_LOCK_ASSERT_OWNED(txq); 3844 3845 KASSERT(sgl->nsegs, ("%s: can't coalesce imm data", __func__)); 3846 3847 if (txpkts->npkt > 0) { 3848 flits = TXPKTS_PKT_HDR + sgl->nflits; 3849 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3850 txpkts->nflits + flits <= TX_WR_FLITS && 3851 txpkts->nflits + flits <= eq->avail * 8 && 3852 txpkts->plen + m->m_pkthdr.len < 65536; 3853 3854 if (can_coalesce) { 3855 txpkts->npkt++; 3856 txpkts->nflits += flits; 3857 txpkts->plen += m->m_pkthdr.len; 3858 3859 txsd = &txq->sdesc[eq->pidx]; 3860 txsd->credits++; 3861 3862 return (0); 3863 } 3864 3865 /* 3866 * Couldn't coalesce m into txpkts. The first order of business 3867 * is to send txpkts on its way. Then we'll revisit m. 3868 */ 3869 write_txpkts_wr(txq, txpkts); 3870 } 3871 3872 /* 3873 * Check if we can start a new coalesced tx work request with m as 3874 * the first packet in it. 3875 */ 3876 3877 KASSERT(txpkts->npkt == 0, ("%s: txpkts not empty", __func__)); 3878 3879 flits = TXPKTS_WR_HDR + sgl->nflits; 3880 can_coalesce = m->m_pkthdr.tso_segsz == 0 && 3881 flits <= eq->avail * 8 && flits <= TX_WR_FLITS; 3882 3883 if (can_coalesce == 0) 3884 return (EINVAL); 3885 3886 /* 3887 * Start a fresh coalesced tx WR with m as the first frame in it. 3888 */ 3889 txpkts->npkt = 1; 3890 txpkts->nflits = flits; 3891 txpkts->flitp = &eq->desc[eq->pidx].flit[2]; 3892 txpkts->plen = m->m_pkthdr.len; 3893 3894 txsd = &txq->sdesc[eq->pidx]; 3895 txsd->credits = 1; 3896 3897 return (0); 3898} 3899 3900/* 3901 * Note that write_txpkts_wr can never run out of hardware descriptors (but 3902 * write_txpkt_wr can). add_to_txpkts ensures that a frame is accepted for 3903 * coalescing only if sufficient hardware descriptors are available. 3904 */ 3905static void 3906write_txpkts_wr(struct sge_txq *txq, struct txpkts *txpkts) 3907{ 3908 struct sge_eq *eq = &txq->eq; 3909 struct fw_eth_tx_pkts_wr *wr; 3910 struct tx_sdesc *txsd; 3911 uint32_t ctrl; 3912 int ndesc; 3913 3914 TXQ_LOCK_ASSERT_OWNED(txq); 3915 3916 ndesc = howmany(txpkts->nflits, 8); 3917 3918 wr = (void *)&eq->desc[eq->pidx]; 3919 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3920 ctrl = V_FW_WR_LEN16(howmany(txpkts->nflits, 2)); 3921 if (eq->avail == ndesc) { 3922 if (!(eq->flags & EQ_CRFLUSHED)) { 3923 ctrl |= F_FW_WR_EQUEQ | F_FW_WR_EQUIQ; 3924 eq->flags |= EQ_CRFLUSHED; 3925 } 3926 eq->flags |= EQ_STALLED; 3927 } 3928 wr->equiq_to_len16 = htobe32(ctrl); 3929 wr->plen = htobe16(txpkts->plen); 3930 wr->npkt = txpkts->npkt; 3931 wr->r3 = wr->type = 0; 3932 3933 /* Everything else already written */ 3934 3935 txsd = &txq->sdesc[eq->pidx]; 3936 txsd->desc_used = ndesc; 3937 3938 KASSERT(eq->avail >= ndesc, ("%s: out of descriptors", __func__)); 3939 3940 eq->pending += ndesc; 3941 eq->avail -= ndesc; 3942 eq->pidx += ndesc; 3943 if (eq->pidx >= eq->cap) 3944 eq->pidx -= eq->cap; 3945 3946 txq->txpkts_pkts += txpkts->npkt; 3947 txq->txpkts_wrs++; 3948 txpkts->npkt = 0; /* emptied */ 3949} 3950 3951static inline void 3952write_ulp_cpl_sgl(struct port_info *pi, struct sge_txq *txq, 3953 struct txpkts *txpkts, struct mbuf *m, struct sgl *sgl) 3954{ 3955 struct ulp_txpkt *ulpmc; 3956 struct ulptx_idata *ulpsc; 3957 struct cpl_tx_pkt_core *cpl; 3958 struct sge_eq *eq = &txq->eq; 3959 uintptr_t flitp, start, end; 3960 uint64_t ctrl; 3961 caddr_t dst; 3962 3963 KASSERT(txpkts->npkt > 0, ("%s: txpkts is empty", __func__)); 3964 3965 start = (uintptr_t)eq->desc; 3966 end = (uintptr_t)eq->spg; 3967 3968 /* Checksum offload */ 3969 ctrl = 0; 3970 if (!(m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO))) 3971 ctrl |= F_TXPKT_IPCSUM_DIS; 3972 if (!(m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 3973 CSUM_TCP_IPV6 | CSUM_TSO))) 3974 ctrl |= F_TXPKT_L4CSUM_DIS; 3975 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3976 CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3977 txq->txcsum++; /* some hardware assistance provided */ 3978 3979 /* VLAN tag insertion */ 3980 if (m->m_flags & M_VLANTAG) { 3981 ctrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 3982 txq->vlan_insertion++; 3983 } 3984 3985 /* 3986 * The previous packet's SGL must have ended at a 16 byte boundary (this 3987 * is required by the firmware/hardware). It follows that flitp cannot 3988 * wrap around between the ULPTX master command and ULPTX subcommand (8 3989 * bytes each), and that it can not wrap around in the middle of the 3990 * cpl_tx_pkt_core either. 3991 */ 3992 flitp = (uintptr_t)txpkts->flitp; 3993 KASSERT((flitp & 0xf) == 0, 3994 ("%s: last SGL did not end at 16 byte boundary: %p", 3995 __func__, txpkts->flitp)); 3996 3997 /* ULP master command */ 3998 ulpmc = (void *)flitp; 3999 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0) | 4000 V_ULP_TXPKT_FID(eq->iqid)); 4001 ulpmc->len = htonl(howmany(sizeof(*ulpmc) + sizeof(*ulpsc) + 4002 sizeof(*cpl) + 8 * sgl->nflits, 16)); 4003 4004 /* ULP subcommand */ 4005 ulpsc = (void *)(ulpmc + 1); 4006 ulpsc->cmd_more = htobe32(V_ULPTX_CMD((u32)ULP_TX_SC_IMM) | 4007 F_ULP_TX_SC_MORE); 4008 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4009 4010 flitp += sizeof(*ulpmc) + sizeof(*ulpsc); 4011 if (flitp == end) 4012 flitp = start; 4013 4014 /* CPL_TX_PKT */ 4015 cpl = (void *)flitp; 4016 cpl->ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 4017 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(pi->adapter->pf)); 4018 cpl->pack = 0; 4019 cpl->len = htobe16(m->m_pkthdr.len); 4020 cpl->ctrl1 = htobe64(ctrl); 4021 4022 flitp += sizeof(*cpl); 4023 if (flitp == end) 4024 flitp = start; 4025 4026 /* SGL for this frame */ 4027 dst = (caddr_t)flitp; 4028 txpkts->nflits += write_sgl_to_txd(eq, sgl, &dst); 4029 txpkts->flitp = (void *)dst; 4030 4031 KASSERT(((uintptr_t)dst & 0xf) == 0, 4032 ("%s: SGL ends at %p (not a 16 byte boundary)", __func__, dst)); 4033} 4034 4035/* 4036 * If the SGL ends on an address that is not 16 byte aligned, this function will 4037 * add a 0 filled flit at the end. It returns 1 in that case. 4038 */ 4039static int 4040write_sgl_to_txd(struct sge_eq *eq, struct sgl *sgl, caddr_t *to) 4041{ 4042 __be64 *flitp, *end; 4043 struct ulptx_sgl *usgl; 4044 bus_dma_segment_t *seg; 4045 int i, padded; 4046 4047 KASSERT(sgl->nsegs > 0 && sgl->nflits > 0, 4048 ("%s: bad SGL - nsegs=%d, nflits=%d", 4049 __func__, sgl->nsegs, sgl->nflits)); 4050 4051 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 4052 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 4053 4054 flitp = (__be64 *)(*to); 4055 end = flitp + sgl->nflits; 4056 seg = &sgl->seg[0]; 4057 usgl = (void *)flitp; 4058 4059 /* 4060 * We start at a 16 byte boundary somewhere inside the tx descriptor 4061 * ring, so we're at least 16 bytes away from the status page. There is 4062 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 4063 */ 4064 4065 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 4066 V_ULPTX_NSGE(sgl->nsegs)); 4067 usgl->len0 = htobe32(seg->ds_len); 4068 usgl->addr0 = htobe64(seg->ds_addr); 4069 seg++; 4070 4071 if ((uintptr_t)end <= (uintptr_t)eq->spg) { 4072 4073 /* Won't wrap around at all */ 4074 4075 for (i = 0; i < sgl->nsegs - 1; i++, seg++) { 4076 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ds_len); 4077 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ds_addr); 4078 } 4079 if (i & 1) 4080 usgl->sge[i / 2].len[1] = htobe32(0); 4081 } else { 4082 4083 /* Will wrap somewhere in the rest of the SGL */ 4084 4085 /* 2 flits already written, write the rest flit by flit */ 4086 flitp = (void *)(usgl + 1); 4087 for (i = 0; i < sgl->nflits - 2; i++) { 4088 if ((uintptr_t)flitp == (uintptr_t)eq->spg) 4089 flitp = (void *)eq->desc; 4090 *flitp++ = get_flit(seg, sgl->nsegs - 1, i); 4091 } 4092 end = flitp; 4093 } 4094 4095 if ((uintptr_t)end & 0xf) { 4096 *(uint64_t *)end = 0; 4097 end++; 4098 padded = 1; 4099 } else 4100 padded = 0; 4101 4102 if ((uintptr_t)end == (uintptr_t)eq->spg) 4103 *to = (void *)eq->desc; 4104 else 4105 *to = (void *)end; 4106 4107 return (padded); 4108} 4109 4110static inline void 4111copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 4112{ 4113 if (__predict_true((uintptr_t)(*to) + len <= (uintptr_t)eq->spg)) { 4114 bcopy(from, *to, len); 4115 (*to) += len; 4116 } else { 4117 int portion = (uintptr_t)eq->spg - (uintptr_t)(*to); 4118 4119 bcopy(from, *to, portion); 4120 from += portion; 4121 portion = len - portion; /* remaining */ 4122 bcopy(from, (void *)eq->desc, portion); 4123 (*to) = (caddr_t)eq->desc + portion; 4124 } 4125} 4126 4127static inline void 4128ring_eq_db(struct adapter *sc, struct sge_eq *eq) 4129{ 4130 u_int db, pending; 4131 4132 db = eq->doorbells; 4133 pending = eq->pending; 4134 if (pending > 1) 4135 clrbit(&db, DOORBELL_WCWR); 4136 eq->pending = 0; 4137 wmb(); 4138 4139 switch (ffs(db) - 1) { 4140 case DOORBELL_UDB: 4141 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 4142 return; 4143 4144 case DOORBELL_WCWR: { 4145 volatile uint64_t *dst, *src; 4146 int i; 4147 4148 /* 4149 * Queues whose 128B doorbell segment fits in the page do not 4150 * use relative qid (udb_qid is always 0). Only queues with 4151 * doorbell segments can do WCWR. 4152 */ 4153 KASSERT(eq->udb_qid == 0 && pending == 1, 4154 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 4155 __func__, eq->doorbells, pending, eq->pidx, eq)); 4156 4157 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 4158 UDBS_DB_OFFSET); 4159 i = eq->pidx ? eq->pidx - 1 : eq->cap - 1; 4160 src = (void *)&eq->desc[i]; 4161 while (src != (void *)&eq->desc[i + 1]) 4162 *dst++ = *src++; 4163 wmb(); 4164 return; 4165 } 4166 4167 case DOORBELL_UDBWC: 4168 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(pending)); 4169 wmb(); 4170 return; 4171 4172 case DOORBELL_KDB: 4173 t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 4174 V_QID(eq->cntxt_id) | V_PIDX(pending)); 4175 return; 4176 } 4177} 4178 4179static inline int 4180reclaimable(struct sge_eq *eq) 4181{ 4182 unsigned int cidx; 4183 4184 cidx = eq->spg->cidx; /* stable snapshot */ 4185 cidx = be16toh(cidx); 4186 4187 if (cidx >= eq->cidx) 4188 return (cidx - eq->cidx); 4189 else 4190 return (cidx + eq->cap - eq->cidx); 4191} 4192 4193/* 4194 * There are "can_reclaim" tx descriptors ready to be reclaimed. Reclaim as 4195 * many as possible but stop when there are around "n" mbufs to free. 4196 * 4197 * The actual number reclaimed is provided as the return value. 4198 */ 4199static int 4200reclaim_tx_descs(struct sge_txq *txq, int can_reclaim, int n) 4201{ 4202 struct tx_sdesc *txsd; 4203 struct tx_maps *txmaps; 4204 struct tx_map *txm; 4205 unsigned int reclaimed, maps; 4206 struct sge_eq *eq = &txq->eq; 4207 4208 TXQ_LOCK_ASSERT_OWNED(txq); 4209 4210 if (can_reclaim == 0) 4211 can_reclaim = reclaimable(eq); 4212 4213 maps = reclaimed = 0; 4214 while (can_reclaim && maps < n) { 4215 int ndesc; 4216 4217 txsd = &txq->sdesc[eq->cidx]; 4218 ndesc = txsd->desc_used; 4219 4220 /* Firmware doesn't return "partial" credits. */ 4221 KASSERT(can_reclaim >= ndesc, 4222 ("%s: unexpected number of credits: %d, %d", 4223 __func__, can_reclaim, ndesc)); 4224 4225 maps += txsd->credits; 4226 4227 reclaimed += ndesc; 4228 can_reclaim -= ndesc; 4229 4230 eq->cidx += ndesc; 4231 if (__predict_false(eq->cidx >= eq->cap)) 4232 eq->cidx -= eq->cap; 4233 } 4234 4235 txmaps = &txq->txmaps; 4236 txm = &txmaps->maps[txmaps->map_cidx]; 4237 if (maps) 4238 prefetch(txm->m); 4239 4240 eq->avail += reclaimed; 4241 KASSERT(eq->avail < eq->cap, /* avail tops out at (cap - 1) */ 4242 ("%s: too many descriptors available", __func__)); 4243 4244 txmaps->map_avail += maps; 4245 KASSERT(txmaps->map_avail <= txmaps->map_total, 4246 ("%s: too many maps available", __func__)); 4247 4248 while (maps--) { 4249 struct tx_map *next; 4250 4251 next = txm + 1; 4252 if (__predict_false(txmaps->map_cidx + 1 == txmaps->map_total)) 4253 next = txmaps->maps; 4254 prefetch(next->m); 4255 4256 bus_dmamap_unload(txq->tx_tag, txm->map); 4257 m_freem(txm->m); 4258 txm->m = NULL; 4259 4260 txm = next; 4261 if (__predict_false(++txmaps->map_cidx == txmaps->map_total)) 4262 txmaps->map_cidx = 0; 4263 } 4264 4265 return (reclaimed); 4266} 4267 4268static void 4269write_eqflush_wr(struct sge_eq *eq) 4270{ 4271 struct fw_eq_flush_wr *wr; 4272 4273 EQ_LOCK_ASSERT_OWNED(eq); 4274 KASSERT(eq->avail > 0, ("%s: no descriptors left.", __func__)); 4275 KASSERT(!(eq->flags & EQ_CRFLUSHED), ("%s: flushed already", __func__)); 4276 4277 wr = (void *)&eq->desc[eq->pidx]; 4278 bzero(wr, sizeof(*wr)); 4279 wr->opcode = FW_EQ_FLUSH_WR; 4280 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(sizeof(*wr) / 16) | 4281 F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 4282 4283 eq->flags |= (EQ_CRFLUSHED | EQ_STALLED); 4284 eq->pending++; 4285 eq->avail--; 4286 if (++eq->pidx == eq->cap) 4287 eq->pidx = 0; 4288} 4289 4290static __be64 4291get_flit(bus_dma_segment_t *sgl, int nsegs, int idx) 4292{ 4293 int i = (idx / 3) * 2; 4294 4295 switch (idx % 3) { 4296 case 0: { 4297 __be64 rc; 4298 4299 rc = htobe32(sgl[i].ds_len); 4300 if (i + 1 < nsegs) 4301 rc |= (uint64_t)htobe32(sgl[i + 1].ds_len) << 32; 4302 4303 return (rc); 4304 } 4305 case 1: 4306 return htobe64(sgl[i].ds_addr); 4307 case 2: 4308 return htobe64(sgl[i + 1].ds_addr); 4309 } 4310 4311 return (0); 4312} 4313 4314static void 4315find_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4316{ 4317 int8_t zidx, hwidx, idx; 4318 uint16_t region1, region3; 4319 int spare, spare_needed, n; 4320 struct sw_zone_info *swz; 4321 struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4322 4323 /* 4324 * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4325 * large enough for the max payload and cluster metadata. Otherwise 4326 * settle for the largest bufsize that leaves enough room in the cluster 4327 * for metadata. 4328 * 4329 * Without buffer packing: Look for the smallest zone which has a 4330 * bufsize large enough for the max payload. Settle for the largest 4331 * bufsize available if there's nothing big enough for max payload. 4332 */ 4333 spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4334 swz = &sc->sge.sw_zone_info[0]; 4335 hwidx = -1; 4336 for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4337 if (swz->size > largest_rx_cluster) { 4338 if (__predict_true(hwidx != -1)) 4339 break; 4340 4341 /* 4342 * This is a misconfiguration. largest_rx_cluster is 4343 * preventing us from finding a refill source. See 4344 * dev.t5nex.<n>.buffer_sizes to figure out why. 4345 */ 4346 device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4347 " refill source for fl %p (dma %u). Ignored.\n", 4348 largest_rx_cluster, fl, maxp); 4349 } 4350 for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4351 hwb = &hwb_list[idx]; 4352 spare = swz->size - hwb->size; 4353 if (spare < spare_needed) 4354 continue; 4355 4356 hwidx = idx; /* best option so far */ 4357 if (hwb->size >= maxp) { 4358 4359 if ((fl->flags & FL_BUF_PACKING) == 0) 4360 goto done; /* stop looking (not packing) */ 4361 4362 if (swz->size >= safest_rx_cluster) 4363 goto done; /* stop looking (packing) */ 4364 } 4365 break; /* keep looking, next zone */ 4366 } 4367 } 4368done: 4369 /* A usable hwidx has been located. */ 4370 MPASS(hwidx != -1); 4371 hwb = &hwb_list[hwidx]; 4372 zidx = hwb->zidx; 4373 swz = &sc->sge.sw_zone_info[zidx]; 4374 region1 = 0; 4375 region3 = swz->size - hwb->size; 4376 4377 /* 4378 * Stay within this zone and see if there is a better match when mbuf 4379 * inlining is allowed. Remember that the hwidx's are sorted in 4380 * decreasing order of size (so in increasing order of spare area). 4381 */ 4382 for (idx = hwidx; idx != -1; idx = hwb->next) { 4383 hwb = &hwb_list[idx]; 4384 spare = swz->size - hwb->size; 4385 4386 if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4387 break; 4388 4389 /* 4390 * Do not inline mbufs if doing so would violate the pad/pack 4391 * boundary alignment requirement. 4392 */ 4393 if (fl_pad && (MSIZE % sc->sge.pad_boundary) != 0) 4394 continue; 4395 if (fl->flags & FL_BUF_PACKING && 4396 (MSIZE % sc->sge.pack_boundary) != 0) 4397 continue; 4398 4399 if (spare < CL_METADATA_SIZE + MSIZE) 4400 continue; 4401 n = (spare - CL_METADATA_SIZE) / MSIZE; 4402 if (n > howmany(hwb->size, maxp)) 4403 break; 4404 4405 hwidx = idx; 4406 if (fl->flags & FL_BUF_PACKING) { 4407 region1 = n * MSIZE; 4408 region3 = spare - region1; 4409 } else { 4410 region1 = MSIZE; 4411 region3 = spare - region1; 4412 break; 4413 } 4414 } 4415 4416 KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4417 ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4418 KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4419 ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4420 KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4421 sc->sge.sw_zone_info[zidx].size, 4422 ("%s: bad buffer layout for fl %p, maxp %d. " 4423 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4424 sc->sge.sw_zone_info[zidx].size, region1, 4425 sc->sge.hw_buf_info[hwidx].size, region3)); 4426 if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4427 KASSERT(region3 >= CL_METADATA_SIZE, 4428 ("%s: no room for metadata. fl %p, maxp %d; " 4429 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4430 sc->sge.sw_zone_info[zidx].size, region1, 4431 sc->sge.hw_buf_info[hwidx].size, region3)); 4432 KASSERT(region1 % MSIZE == 0, 4433 ("%s: bad mbuf region for fl %p, maxp %d. " 4434 "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4435 sc->sge.sw_zone_info[zidx].size, region1, 4436 sc->sge.hw_buf_info[hwidx].size, region3)); 4437 } 4438 4439 fl->cll_def.zidx = zidx; 4440 fl->cll_def.hwidx = hwidx; 4441 fl->cll_def.region1 = region1; 4442 fl->cll_def.region3 = region3; 4443} 4444 4445static void 4446find_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4447{ 4448 struct sge *s = &sc->sge; 4449 struct hw_buf_info *hwb; 4450 struct sw_zone_info *swz; 4451 int spare; 4452 int8_t hwidx; 4453 4454 if (fl->flags & FL_BUF_PACKING) 4455 hwidx = s->safe_hwidx2; /* with room for metadata */ 4456 else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4457 hwidx = s->safe_hwidx2; 4458 hwb = &s->hw_buf_info[hwidx]; 4459 swz = &s->sw_zone_info[hwb->zidx]; 4460 spare = swz->size - hwb->size; 4461 4462 /* no good if there isn't room for an mbuf as well */ 4463 if (spare < CL_METADATA_SIZE + MSIZE) 4464 hwidx = s->safe_hwidx1; 4465 } else 4466 hwidx = s->safe_hwidx1; 4467 4468 if (hwidx == -1) { 4469 /* No fallback source */ 4470 fl->cll_alt.hwidx = -1; 4471 fl->cll_alt.zidx = -1; 4472 4473 return; 4474 } 4475 4476 hwb = &s->hw_buf_info[hwidx]; 4477 swz = &s->sw_zone_info[hwb->zidx]; 4478 spare = swz->size - hwb->size; 4479 fl->cll_alt.hwidx = hwidx; 4480 fl->cll_alt.zidx = hwb->zidx; 4481 if (allow_mbufs_in_cluster && 4482 (fl_pad == 0 || (MSIZE % sc->sge.pad_boundary) == 0)) 4483 fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4484 else 4485 fl->cll_alt.region1 = 0; 4486 fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4487} 4488 4489static void 4490add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 4491{ 4492 mtx_lock(&sc->sfl_lock); 4493 FL_LOCK(fl); 4494 if ((fl->flags & FL_DOOMED) == 0) { 4495 fl->flags |= FL_STARVING; 4496 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 4497 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 4498 } 4499 FL_UNLOCK(fl); 4500 mtx_unlock(&sc->sfl_lock); 4501} 4502 4503static int 4504handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 4505 struct mbuf *m) 4506{ 4507 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 4508 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 4509 struct adapter *sc = iq->adapter; 4510 struct sge *s = &sc->sge; 4511 struct sge_eq *eq; 4512 4513 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4514 rss->opcode)); 4515 4516 eq = s->eqmap[qid - s->eq_start]; 4517 EQ_LOCK(eq); 4518 KASSERT(eq->flags & EQ_CRFLUSHED, 4519 ("%s: unsolicited egress update", __func__)); 4520 eq->flags &= ~EQ_CRFLUSHED; 4521 eq->egr_update++; 4522 4523 if (__predict_false(eq->flags & EQ_DOOMED)) 4524 wakeup_one(eq); 4525 else if (eq->flags & EQ_STALLED && can_resume_tx(eq)) 4526 taskqueue_enqueue(sc->tq[eq->tx_chan], &eq->tx_task); 4527 EQ_UNLOCK(eq); 4528 4529 return (0); 4530} 4531 4532/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 4533CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 4534 offsetof(struct cpl_fw6_msg, data)); 4535 4536static int 4537handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4538{ 4539 struct adapter *sc = iq->adapter; 4540 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 4541 4542 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4543 rss->opcode)); 4544 4545 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 4546 const struct rss_header *rss2; 4547 4548 rss2 = (const struct rss_header *)&cpl->data[0]; 4549 return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); 4550 } 4551 4552 return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); 4553} 4554 4555static int 4556sysctl_uint16(SYSCTL_HANDLER_ARGS) 4557{ 4558 uint16_t *id = arg1; 4559 int i = *id; 4560 4561 return sysctl_handle_int(oidp, &i, 0, req); 4562} 4563 4564static int 4565sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 4566{ 4567 struct sge *s = arg1; 4568 struct hw_buf_info *hwb = &s->hw_buf_info[0]; 4569 struct sw_zone_info *swz = &s->sw_zone_info[0]; 4570 int i, rc; 4571 struct sbuf sb; 4572 char c; 4573 4574 sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4575 for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 4576 if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 4577 c = '*'; 4578 else 4579 c = '\0'; 4580 4581 sbuf_printf(&sb, "%u%c ", hwb->size, c); 4582 } 4583 sbuf_trim(&sb); 4584 sbuf_finish(&sb); 4585 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4586 sbuf_delete(&sb); 4587 return (rc); 4588} 4589