1218792Snp/*- 2218792Snp * Copyright (c) 2011 Chelsio Communications, Inc. 3218792Snp * All rights reserved. 4218792Snp * Written by: Navdeep Parhar <np@FreeBSD.org> 5218792Snp * 6218792Snp * Redistribution and use in source and binary forms, with or without 7218792Snp * modification, are permitted provided that the following conditions 8218792Snp * are met: 9218792Snp * 1. Redistributions of source code must retain the above copyright 10218792Snp * notice, this list of conditions and the following disclaimer. 11218792Snp * 2. Redistributions in binary form must reproduce the above copyright 12218792Snp * notice, this list of conditions and the following disclaimer in the 13218792Snp * documentation and/or other materials provided with the distribution. 14218792Snp * 15218792Snp * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16218792Snp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17218792Snp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18218792Snp * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19218792Snp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20218792Snp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21218792Snp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22218792Snp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23218792Snp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24218792Snp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25218792Snp * SUCH DAMAGE. 26218792Snp */ 27218792Snp 28218792Snp#include <sys/cdefs.h> 29218792Snp__FBSDID("$FreeBSD: releng/10.2/sys/dev/cxgbe/t4_sge.c 284093 2015-06-06 18:46:37Z np $"); 30218792Snp 31218792Snp#include "opt_inet.h" 32237819Snp#include "opt_inet6.h" 33218792Snp 34218792Snp#include <sys/types.h> 35218792Snp#include <sys/mbuf.h> 36218792Snp#include <sys/socket.h> 37218792Snp#include <sys/kernel.h> 38219286Snp#include <sys/malloc.h> 39219286Snp#include <sys/queue.h> 40265425Snp#include <sys/sbuf.h> 41219286Snp#include <sys/taskqueue.h> 42255015Snp#include <sys/time.h> 43284052Snp#include <sys/sglist.h> 44218792Snp#include <sys/sysctl.h> 45228561Snp#include <sys/smp.h> 46269356Snp#include <sys/counter.h> 47218792Snp#include <net/bpf.h> 48218792Snp#include <net/ethernet.h> 49218792Snp#include <net/if.h> 50218792Snp#include <net/if_vlan_var.h> 51218792Snp#include <netinet/in.h> 52218792Snp#include <netinet/ip.h> 53237819Snp#include <netinet/ip6.h> 54218792Snp#include <netinet/tcp.h> 55256131Sdim#include <machine/md_var.h> 56265425Snp#include <vm/vm.h> 57265425Snp#include <vm/pmap.h> 58270297Snp#ifdef DEV_NETMAP 59270297Snp#include <machine/bus.h> 60270297Snp#include <sys/selinfo.h> 61270297Snp#include <net/if_var.h> 62270297Snp#include <net/netmap.h> 63270297Snp#include <dev/netmap/netmap_kern.h> 64270297Snp#endif 65218792Snp 66218792Snp#include "common/common.h" 67218792Snp#include "common/t4_regs.h" 68218792Snp#include "common/t4_regs_values.h" 69218792Snp#include "common/t4_msg.h" 70284052Snp#include "t4_mp_ring.h" 71218792Snp 72248925Snp#ifdef T4_PKT_TIMESTAMP 73248925Snp#define RX_COPY_THRESHOLD (MINCLSIZE - 8) 74248925Snp#else 75248925Snp#define RX_COPY_THRESHOLD MINCLSIZE 76248925Snp#endif 77248925Snp 78239258Snp/* 79239258Snp * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 80239258Snp * 0-7 are valid values. 81239258Snp */ 82270297Snpint fl_pktshift = 2; 83239258SnpTUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift); 84218792Snp 85239258Snp/* 86239258Snp * Pad ethernet payload up to this boundary. 87239258Snp * -1: driver should figure out a good value. 88255050Snp * 0: disable padding. 89255050Snp * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 90239258Snp */ 91270297Snpint fl_pad = -1; 92239258SnpTUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad); 93218792Snp 94239258Snp/* 95239258Snp * Status page length. 96239258Snp * -1: driver should figure out a good value. 97239258Snp * 64 or 128 are the only other valid values. 98239258Snp */ 99270297Snpint spg_len = -1; 100239258SnpTUNABLE_INT("hw.cxgbe.spg_len", &spg_len); 101239258Snp 102239258Snp/* 103239258Snp * Congestion drops. 104239258Snp * -1: no congestion feedback (not recommended). 105239258Snp * 0: backpressure the channel instead of dropping packets right away. 106239258Snp * 1: no backpressure, drop packets for the congested queue immediately. 107239258Snp */ 108239258Snpstatic int cong_drop = 0; 109239258SnpTUNABLE_INT("hw.cxgbe.cong_drop", &cong_drop); 110239258Snp 111255050Snp/* 112255050Snp * Deliver multiple frames in the same free list buffer if they fit. 113255050Snp * -1: let the driver decide whether to enable buffer packing or not. 114255050Snp * 0: disable buffer packing. 115255050Snp * 1: enable buffer packing. 116255050Snp */ 117255050Snpstatic int buffer_packing = -1; 118255050SnpTUNABLE_INT("hw.cxgbe.buffer_packing", &buffer_packing); 119255050Snp 120255050Snp/* 121255050Snp * Start next frame in a packed buffer at this boundary. 122255050Snp * -1: driver should figure out a good value. 123281212Snp * T4: driver will ignore this and use the same value as fl_pad above. 124281212Snp * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 125255050Snp */ 126255050Snpstatic int fl_pack = -1; 127255050SnpTUNABLE_INT("hw.cxgbe.fl_pack", &fl_pack); 128255050Snp 129265425Snp/* 130265425Snp * Allow the driver to create mbuf(s) in a cluster allocated for rx. 131265425Snp * 0: never; always allocate mbufs from the zone_mbuf UMA zone. 132265425Snp * 1: ok to create mbuf(s) within a cluster if there is room. 133265425Snp */ 134265425Snpstatic int allow_mbufs_in_cluster = 1; 135265425SnpTUNABLE_INT("hw.cxgbe.allow_mbufs_in_cluster", &allow_mbufs_in_cluster); 136265425Snp 137265425Snp/* 138265425Snp * Largest rx cluster size that the driver is allowed to allocate. 139265425Snp */ 140265425Snpstatic int largest_rx_cluster = MJUM16BYTES; 141265425SnpTUNABLE_INT("hw.cxgbe.largest_rx_cluster", &largest_rx_cluster); 142265425Snp 143265425Snp/* 144265425Snp * Size of cluster allocation that's most likely to succeed. The driver will 145265425Snp * fall back to this size if it fails to allocate clusters larger than this. 146265425Snp */ 147265425Snpstatic int safest_rx_cluster = PAGE_SIZE; 148265425SnpTUNABLE_INT("hw.cxgbe.safest_rx_cluster", &safest_rx_cluster); 149265425Snp 150218792Snpstruct txpkts { 151284052Snp u_int wr_type; /* type 0 or type 1 */ 152284052Snp u_int npkt; /* # of packets in this work request */ 153284052Snp u_int plen; /* total payload (sum of all packets) */ 154284052Snp u_int len16; /* # of 16B pieces used by this work request */ 155218792Snp}; 156218792Snp 157218792Snp/* A packet's SGL. This + m_pkthdr has all info needed for tx */ 158218792Snpstruct sgl { 159284052Snp struct sglist sg; 160284052Snp struct sglist_seg seg[TX_SGL_SEGS]; 161218792Snp}; 162218792Snp 163228561Snpstatic int service_iq(struct sge_iq *, int); 164270297Snpstatic struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 165228561Snpstatic int t4_eth_rx(struct sge_iq *, const struct rss_header *, struct mbuf *); 166270297Snpstatic inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 167281212Snpstatic inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 168228561Snpstatic inline void init_eq(struct sge_eq *, int, int, uint8_t, uint16_t, 169228561Snp char *); 170218792Snpstatic int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 171218792Snp bus_addr_t *, void **); 172218792Snpstatic int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 173218792Snp void *); 174218792Snpstatic int alloc_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *, 175222085Snp int, int); 176218792Snpstatic int free_iq_fl(struct port_info *, struct sge_iq *, struct sge_fl *); 177265425Snpstatic void add_fl_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 178265425Snp struct sge_fl *); 179228561Snpstatic int alloc_fwq(struct adapter *); 180228561Snpstatic int free_fwq(struct adapter *); 181228561Snpstatic int alloc_mgmtq(struct adapter *); 182228561Snpstatic int free_mgmtq(struct adapter *); 183228561Snpstatic int alloc_rxq(struct port_info *, struct sge_rxq *, int, int, 184228561Snp struct sysctl_oid *); 185218792Snpstatic int free_rxq(struct port_info *, struct sge_rxq *); 186237263Snp#ifdef TCP_OFFLOAD 187228561Snpstatic int alloc_ofld_rxq(struct port_info *, struct sge_ofld_rxq *, int, int, 188228561Snp struct sysctl_oid *); 189228561Snpstatic int free_ofld_rxq(struct port_info *, struct sge_ofld_rxq *); 190228561Snp#endif 191270297Snp#ifdef DEV_NETMAP 192270297Snpstatic int alloc_nm_rxq(struct port_info *, struct sge_nm_rxq *, int, int, 193270297Snp struct sysctl_oid *); 194270297Snpstatic int free_nm_rxq(struct port_info *, struct sge_nm_rxq *); 195270297Snpstatic int alloc_nm_txq(struct port_info *, struct sge_nm_txq *, int, int, 196270297Snp struct sysctl_oid *); 197270297Snpstatic int free_nm_txq(struct port_info *, struct sge_nm_txq *); 198270297Snp#endif 199228561Snpstatic int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 200228561Snpstatic int eth_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 201237263Snp#ifdef TCP_OFFLOAD 202228561Snpstatic int ofld_eq_alloc(struct adapter *, struct port_info *, struct sge_eq *); 203228561Snp#endif 204228561Snpstatic int alloc_eq(struct adapter *, struct port_info *, struct sge_eq *); 205228561Snpstatic int free_eq(struct adapter *, struct sge_eq *); 206228561Snpstatic int alloc_wrq(struct adapter *, struct port_info *, struct sge_wrq *, 207228561Snp struct sysctl_oid *); 208228561Snpstatic int free_wrq(struct adapter *, struct sge_wrq *); 209228561Snpstatic int alloc_txq(struct port_info *, struct sge_txq *, int, 210228561Snp struct sysctl_oid *); 211218792Snpstatic int free_txq(struct port_info *, struct sge_txq *); 212218792Snpstatic void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 213218792Snpstatic inline void ring_fl_db(struct adapter *, struct sge_fl *); 214228561Snpstatic int refill_fl(struct adapter *, struct sge_fl *, int); 215228561Snpstatic void refill_sfl(void *); 216218792Snpstatic int alloc_fl_sdesc(struct sge_fl *); 217255050Snpstatic void free_fl_sdesc(struct adapter *, struct sge_fl *); 218265425Snpstatic void find_best_refill_source(struct adapter *, struct sge_fl *, int); 219265425Snpstatic void find_safe_refill_source(struct adapter *, struct sge_fl *); 220228561Snpstatic void add_fl_to_sfl(struct adapter *, struct sge_fl *); 221218792Snp 222284052Snpstatic inline void get_pkt_gl(struct mbuf *, struct sglist *); 223284052Snpstatic inline u_int txpkt_len16(u_int, u_int); 224284052Snpstatic inline u_int txpkts0_len16(u_int); 225284052Snpstatic inline u_int txpkts1_len16(void); 226284052Snpstatic u_int write_txpkt_wr(struct sge_txq *, struct fw_eth_tx_pkt_wr *, 227284052Snp struct mbuf *, u_int); 228284052Snpstatic int try_txpkts(struct mbuf *, struct mbuf *, struct txpkts *, u_int); 229284052Snpstatic int add_to_txpkts(struct mbuf *, struct txpkts *, u_int); 230284052Snpstatic u_int write_txpkts_wr(struct sge_txq *, struct fw_eth_tx_pkts_wr *, 231284052Snp struct mbuf *, const struct txpkts *, u_int); 232284052Snpstatic void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 233218792Snpstatic inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 234284052Snpstatic inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 235284052Snpstatic inline uint16_t read_hw_cidx(struct sge_eq *); 236284052Snpstatic inline u_int reclaimable_tx_desc(struct sge_eq *); 237284052Snpstatic inline u_int total_available_tx_desc(struct sge_eq *); 238284052Snpstatic u_int reclaim_tx_descs(struct sge_txq *, u_int); 239284052Snpstatic void tx_reclaim(void *, int); 240284052Snpstatic __be64 get_flit(struct sglist_seg *, int, int); 241228561Snpstatic int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 242228561Snp struct mbuf *); 243239336Snpstatic int handle_fw_msg(struct sge_iq *, const struct rss_header *, 244228561Snp struct mbuf *); 245284052Snpstatic void wrq_tx_drain(void *, int); 246284052Snpstatic void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 247218792Snp 248222510Snpstatic int sysctl_uint16(SYSCTL_HANDLER_ARGS); 249265425Snpstatic int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 250220873Snp 251269356Snpstatic counter_u64_t extfree_refs; 252269356Snpstatic counter_u64_t extfree_rels; 253269356Snp 254219392Snp/* 255255050Snp * Called on MOD_LOAD. Validates and calculates the SGE tunables. 256219392Snp */ 257219392Snpvoid 258219392Snpt4_sge_modload(void) 259219392Snp{ 260255050Snp 261239258Snp if (fl_pktshift < 0 || fl_pktshift > 7) { 262239258Snp printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 263239258Snp " using 2 instead.\n", fl_pktshift); 264239258Snp fl_pktshift = 2; 265239258Snp } 266239258Snp 267239258Snp if (spg_len != 64 && spg_len != 128) { 268239258Snp int len; 269239258Snp 270239258Snp#if defined(__i386__) || defined(__amd64__) 271239258Snp len = cpu_clflush_line_size > 64 ? 128 : 64; 272239258Snp#else 273239258Snp len = 64; 274239258Snp#endif 275239258Snp if (spg_len != -1) { 276239258Snp printf("Invalid hw.cxgbe.spg_len value (%d)," 277239258Snp " using %d instead.\n", spg_len, len); 278239258Snp } 279239258Snp spg_len = len; 280239258Snp } 281239258Snp 282239258Snp if (cong_drop < -1 || cong_drop > 1) { 283239258Snp printf("Invalid hw.cxgbe.cong_drop value (%d)," 284239258Snp " using 0 instead.\n", cong_drop); 285239258Snp cong_drop = 0; 286239258Snp } 287269356Snp 288269356Snp extfree_refs = counter_u64_alloc(M_WAITOK); 289269356Snp extfree_rels = counter_u64_alloc(M_WAITOK); 290269356Snp counter_u64_zero(extfree_refs); 291269356Snp counter_u64_zero(extfree_rels); 292219392Snp} 293219392Snp 294248925Snpvoid 295269356Snpt4_sge_modunload(void) 296269356Snp{ 297269356Snp 298269356Snp counter_u64_free(extfree_refs); 299269356Snp counter_u64_free(extfree_rels); 300269356Snp} 301269356Snp 302269356Snpuint64_t 303269356Snpt4_sge_extfree_refs(void) 304269356Snp{ 305269356Snp uint64_t refs, rels; 306269356Snp 307269356Snp rels = counter_u64_fetch(extfree_rels); 308269356Snp refs = counter_u64_fetch(extfree_refs); 309269356Snp 310269356Snp return (refs - rels); 311269356Snp} 312269356Snp 313269356Snpvoid 314248925Snpt4_init_sge_cpl_handlers(struct adapter *sc) 315218792Snp{ 316218792Snp 317248925Snp t4_register_cpl_handler(sc, CPL_FW4_MSG, handle_fw_msg); 318248925Snp t4_register_cpl_handler(sc, CPL_FW6_MSG, handle_fw_msg); 319248925Snp t4_register_cpl_handler(sc, CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 320248925Snp t4_register_cpl_handler(sc, CPL_RX_PKT, t4_eth_rx); 321248925Snp t4_register_fw_msg_handler(sc, FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 322248925Snp} 323248925Snp 324281212Snpstatic inline void 325281212Snpsetup_pad_and_pack_boundaries(struct adapter *sc) 326281212Snp{ 327281212Snp uint32_t v, m; 328281212Snp int pad, pack; 329281212Snp 330281212Snp pad = fl_pad; 331281212Snp if (fl_pad < 32 || fl_pad > 4096 || !powerof2(fl_pad)) { 332281212Snp /* 333281212Snp * If there is any chance that we might use buffer packing and 334281212Snp * the chip is a T4, then pick 64 as the pad/pack boundary. Set 335281212Snp * it to 32 in all other cases. 336281212Snp */ 337281212Snp pad = is_t4(sc) && buffer_packing ? 64 : 32; 338281212Snp 339281212Snp /* 340281212Snp * For fl_pad = 0 we'll still write a reasonable value to the 341281212Snp * register but all the freelists will opt out of padding. 342281212Snp * We'll complain here only if the user tried to set it to a 343281212Snp * value greater than 0 that was invalid. 344281212Snp */ 345281212Snp if (fl_pad > 0) { 346281212Snp device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 347281212Snp " (%d), using %d instead.\n", fl_pad, pad); 348281212Snp } 349281212Snp } 350281212Snp m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 351281212Snp v = V_INGPADBOUNDARY(ilog2(pad) - 5); 352281212Snp t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 353281212Snp 354281212Snp if (is_t4(sc)) { 355281212Snp if (fl_pack != -1 && fl_pack != pad) { 356281212Snp /* Complain but carry on. */ 357281212Snp device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 358281212Snp " using %d instead.\n", fl_pack, pad); 359281212Snp } 360281212Snp return; 361281212Snp } 362281212Snp 363281212Snp pack = fl_pack; 364281212Snp if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 365281212Snp !powerof2(fl_pack)) { 366281212Snp pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 367281212Snp MPASS(powerof2(pack)); 368281212Snp if (pack < 16) 369281212Snp pack = 16; 370281212Snp if (pack == 32) 371281212Snp pack = 64; 372281212Snp if (pack > 4096) 373281212Snp pack = 4096; 374281212Snp if (fl_pack != -1) { 375281212Snp device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 376281212Snp " (%d), using %d instead.\n", fl_pack, pack); 377281212Snp } 378281212Snp } 379281212Snp m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 380281212Snp if (pack == 16) 381281212Snp v = V_INGPACKBOUNDARY(0); 382281212Snp else 383281212Snp v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 384281212Snp 385281212Snp MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 386281212Snp t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 387281212Snp} 388281212Snp 389249391Snp/* 390249391Snp * adap->params.vpd.cclk must be set up before this is called. 391249391Snp */ 392248925Snpvoid 393248925Snpt4_tweak_chip_settings(struct adapter *sc) 394248925Snp{ 395248925Snp int i; 396248925Snp uint32_t v, m; 397248925Snp int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 398249391Snp int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 399248925Snp int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 400248925Snp uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 401265425Snp static int sge_flbuf_sizes[] = { 402255050Snp MCLBYTES, 403255050Snp#if MJUMPAGESIZE != MCLBYTES 404255050Snp MJUMPAGESIZE, 405265425Snp MJUMPAGESIZE - CL_METADATA_SIZE, 406265425Snp MJUMPAGESIZE - 2 * MSIZE - CL_METADATA_SIZE, 407255050Snp#endif 408255050Snp MJUM9BYTES, 409255050Snp MJUM16BYTES, 410265425Snp MCLBYTES - MSIZE - CL_METADATA_SIZE, 411265425Snp MJUM9BYTES - CL_METADATA_SIZE, 412265425Snp MJUM16BYTES - CL_METADATA_SIZE, 413255050Snp }; 414248925Snp 415248925Snp KASSERT(sc->flags & MASTER_PF, 416248925Snp ("%s: trying to change chip settings when not master.", __func__)); 417248925Snp 418255050Snp m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 419248925Snp v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 420237512Snp V_EGRSTATUSPAGESIZE(spg_len == 128); 421248925Snp t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 422218792Snp 423281212Snp setup_pad_and_pack_boundaries(sc); 424255050Snp 425248925Snp v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 426228561Snp V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 427228561Snp V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 428228561Snp V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 429228561Snp V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 430228561Snp V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 431228561Snp V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 432228561Snp V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 433248925Snp t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 434228561Snp 435265425Snp KASSERT(nitems(sge_flbuf_sizes) <= SGE_FLBUF_SIZES, 436265425Snp ("%s: hw buffer size table too big", __func__)); 437265425Snp for (i = 0; i < min(nitems(sge_flbuf_sizes), SGE_FLBUF_SIZES); i++) { 438248925Snp t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i), 439265425Snp sge_flbuf_sizes[i]); 440248925Snp } 441228561Snp 442248925Snp v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 443248925Snp V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 444248925Snp t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 445228561Snp 446249391Snp KASSERT(intr_timer[0] <= timer_max, 447249391Snp ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 448249391Snp timer_max)); 449249391Snp for (i = 1; i < nitems(intr_timer); i++) { 450249391Snp KASSERT(intr_timer[i] >= intr_timer[i - 1], 451249391Snp ("%s: timers not listed in increasing order (%d)", 452249391Snp __func__, i)); 453249391Snp 454249391Snp while (intr_timer[i] > timer_max) { 455249391Snp if (i == nitems(intr_timer) - 1) { 456249391Snp intr_timer[i] = timer_max; 457249391Snp break; 458249391Snp } 459249391Snp intr_timer[i] += intr_timer[i - 1]; 460249391Snp intr_timer[i] /= 2; 461249391Snp } 462249391Snp } 463249391Snp 464248925Snp v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 465248925Snp V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 466248925Snp t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 467248925Snp v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 468248925Snp V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 469248925Snp t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 470248925Snp v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 471248925Snp V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 472248925Snp t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 473228561Snp 474248925Snp if (cong_drop == 0) { 475248925Snp m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 476248925Snp F_TUNNELCNGDROP3; 477248925Snp t4_set_reg_field(sc, A_TP_PARA_REG3, m, 0); 478228561Snp } 479228561Snp 480248925Snp /* 4K, 16K, 64K, 256K DDP "page sizes" */ 481248925Snp v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 482248925Snp t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 483248925Snp 484248925Snp m = v = F_TDDPTAGTCB; 485248925Snp t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 486248925Snp 487248925Snp m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 488248925Snp F_RESETDDPOFFSET; 489248925Snp v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 490248925Snp t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 491248925Snp} 492248925Snp 493248925Snp/* 494281212Snp * SGE wants the buffer to be at least 64B and then a multiple of 16. If 495281212Snp * padding is is use the buffer's start and end need to be aligned to the pad 496281212Snp * boundary as well. We'll just make sure that the size is a multiple of the 497281212Snp * boundary here, it is up to the buffer allocation code to make sure the start 498281212Snp * of the buffer is aligned as well. 499265425Snp */ 500265425Snpstatic inline int 501281212Snphwsz_ok(struct adapter *sc, int hwsz) 502265425Snp{ 503281212Snp int mask = fl_pad ? sc->sge.pad_boundary - 1 : 16 - 1; 504265425Snp 505265425Snp return (hwsz >= 64 && (hwsz & mask) == 0); 506265425Snp} 507265425Snp 508265425Snp/* 509248925Snp * XXX: driver really should be able to deal with unexpected settings. 510248925Snp */ 511248925Snpint 512248925Snpt4_read_chip_settings(struct adapter *sc) 513248925Snp{ 514248925Snp struct sge *s = &sc->sge; 515255050Snp int i, j, n, rc = 0; 516248925Snp uint32_t m, v, r; 517248925Snp uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 518265425Snp static int sw_buf_sizes[] = { /* Sorted by size */ 519255050Snp MCLBYTES, 520255050Snp#if MJUMPAGESIZE != MCLBYTES 521255050Snp MJUMPAGESIZE, 522255050Snp#endif 523255050Snp MJUM9BYTES, 524255050Snp MJUM16BYTES 525255050Snp }; 526265425Snp struct sw_zone_info *swz, *safe_swz; 527265425Snp struct hw_buf_info *hwb; 528248925Snp 529255050Snp m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 530248925Snp v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 531248925Snp V_EGRSTATUSPAGESIZE(spg_len == 128); 532248925Snp r = t4_read_reg(sc, A_SGE_CONTROL); 533248925Snp if ((r & m) != v) { 534248925Snp device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 535228561Snp rc = EINVAL; 536228561Snp } 537281212Snp s->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5); 538228561Snp 539281212Snp if (is_t4(sc)) 540281212Snp s->pack_boundary = s->pad_boundary; 541281212Snp else { 542281212Snp r = t4_read_reg(sc, A_SGE_CONTROL2); 543281212Snp if (G_INGPACKBOUNDARY(r) == 0) 544281212Snp s->pack_boundary = 16; 545255050Snp else 546281212Snp s->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5); 547255050Snp } 548255050Snp 549248925Snp v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 550248925Snp V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 551248925Snp V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 552248925Snp V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 553248925Snp V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 554248925Snp V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 555248925Snp V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 556248925Snp V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 557248925Snp r = t4_read_reg(sc, A_SGE_HOST_PAGE_SIZE); 558248925Snp if (r != v) { 559248925Snp device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 560228561Snp rc = EINVAL; 561228561Snp } 562228561Snp 563265425Snp /* Filter out unusable hw buffer sizes entirely (mark with -2). */ 564265425Snp hwb = &s->hw_buf_info[0]; 565265425Snp for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) { 566265425Snp r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i)); 567265425Snp hwb->size = r; 568281212Snp hwb->zidx = hwsz_ok(sc, r) ? -1 : -2; 569265425Snp hwb->next = -1; 570265425Snp } 571265425Snp 572255050Snp /* 573265425Snp * Create a sorted list in decreasing order of hw buffer sizes (and so 574265425Snp * increasing order of spare area) for each software zone. 575281212Snp * 576281212Snp * If padding is enabled then the start and end of the buffer must align 577281212Snp * to the pad boundary; if packing is enabled then they must align with 578281212Snp * the pack boundary as well. Allocations from the cluster zones are 579281212Snp * aligned to min(size, 4K), so the buffer starts at that alignment and 580281212Snp * ends at hwb->size alignment. If mbuf inlining is allowed the 581281212Snp * starting alignment will be reduced to MSIZE and the driver will 582281212Snp * exercise appropriate caution when deciding on the best buffer layout 583281212Snp * to use. 584255050Snp */ 585265425Snp n = 0; /* no usable buffer size to begin with */ 586265425Snp swz = &s->sw_zone_info[0]; 587265425Snp safe_swz = NULL; 588265425Snp for (i = 0; i < SW_ZONE_SIZES; i++, swz++) { 589265425Snp int8_t head = -1, tail = -1; 590265425Snp 591265425Snp swz->size = sw_buf_sizes[i]; 592265425Snp swz->zone = m_getzone(swz->size); 593265425Snp swz->type = m_gettype(swz->size); 594265425Snp 595281212Snp if (swz->size < PAGE_SIZE) { 596281212Snp MPASS(powerof2(swz->size)); 597281212Snp if (fl_pad && (swz->size % sc->sge.pad_boundary != 0)) 598281212Snp continue; 599281212Snp } 600281212Snp 601265425Snp if (swz->size == safest_rx_cluster) 602265425Snp safe_swz = swz; 603265425Snp 604265425Snp hwb = &s->hw_buf_info[0]; 605265425Snp for (j = 0; j < SGE_FLBUF_SIZES; j++, hwb++) { 606265425Snp if (hwb->zidx != -1 || hwb->size > swz->size) 607265425Snp continue; 608281212Snp#ifdef INVARIANTS 609281212Snp if (fl_pad) 610281212Snp MPASS(hwb->size % sc->sge.pad_boundary == 0); 611281212Snp#endif 612265425Snp hwb->zidx = i; 613265425Snp if (head == -1) 614265425Snp head = tail = j; 615265425Snp else if (hwb->size < s->hw_buf_info[tail].size) { 616265425Snp s->hw_buf_info[tail].next = j; 617265425Snp tail = j; 618265425Snp } else { 619265425Snp int8_t *cur; 620265425Snp struct hw_buf_info *t; 621265425Snp 622265425Snp for (cur = &head; *cur != -1; cur = &t->next) { 623265425Snp t = &s->hw_buf_info[*cur]; 624265425Snp if (hwb->size == t->size) { 625265425Snp hwb->zidx = -2; 626265425Snp break; 627265425Snp } 628265425Snp if (hwb->size > t->size) { 629265425Snp hwb->next = *cur; 630265425Snp *cur = j; 631265425Snp break; 632265425Snp } 633265425Snp } 634265425Snp } 635228561Snp } 636265425Snp swz->head_hwidx = head; 637265425Snp swz->tail_hwidx = tail; 638265425Snp 639265425Snp if (tail != -1) { 640255050Snp n++; 641265425Snp if (swz->size - s->hw_buf_info[tail].size >= 642265425Snp CL_METADATA_SIZE) 643265425Snp sc->flags |= BUF_PACKING_OK; 644255050Snp } 645255050Snp } 646255050Snp if (n == 0) { 647255050Snp device_printf(sc->dev, "no usable SGE FL buffer size.\n"); 648255050Snp rc = EINVAL; 649255050Snp } 650218792Snp 651265425Snp s->safe_hwidx1 = -1; 652265425Snp s->safe_hwidx2 = -1; 653265425Snp if (safe_swz != NULL) { 654265425Snp s->safe_hwidx1 = safe_swz->head_hwidx; 655265425Snp for (i = safe_swz->head_hwidx; i != -1; i = hwb->next) { 656265425Snp int spare; 657265425Snp 658265425Snp hwb = &s->hw_buf_info[i]; 659281212Snp#ifdef INVARIANTS 660281212Snp if (fl_pad) 661281212Snp MPASS(hwb->size % sc->sge.pad_boundary == 0); 662281212Snp#endif 663265425Snp spare = safe_swz->size - hwb->size; 664281212Snp if (spare >= CL_METADATA_SIZE) { 665265425Snp s->safe_hwidx2 = i; 666265425Snp break; 667281212Snp } 668265425Snp } 669265425Snp } 670265425Snp 671248925Snp r = t4_read_reg(sc, A_SGE_INGRESS_RX_THRESHOLD); 672248925Snp s->counter_val[0] = G_THRESHOLD_0(r); 673248925Snp s->counter_val[1] = G_THRESHOLD_1(r); 674248925Snp s->counter_val[2] = G_THRESHOLD_2(r); 675248925Snp s->counter_val[3] = G_THRESHOLD_3(r); 676222701Snp 677248925Snp r = t4_read_reg(sc, A_SGE_TIMER_VALUE_0_AND_1); 678248925Snp s->timer_val[0] = G_TIMERVALUE0(r) / core_ticks_per_usec(sc); 679248925Snp s->timer_val[1] = G_TIMERVALUE1(r) / core_ticks_per_usec(sc); 680248925Snp r = t4_read_reg(sc, A_SGE_TIMER_VALUE_2_AND_3); 681248925Snp s->timer_val[2] = G_TIMERVALUE2(r) / core_ticks_per_usec(sc); 682248925Snp s->timer_val[3] = G_TIMERVALUE3(r) / core_ticks_per_usec(sc); 683248925Snp r = t4_read_reg(sc, A_SGE_TIMER_VALUE_4_AND_5); 684248925Snp s->timer_val[4] = G_TIMERVALUE4(r) / core_ticks_per_usec(sc); 685248925Snp s->timer_val[5] = G_TIMERVALUE5(r) / core_ticks_per_usec(sc); 686218792Snp 687248925Snp if (cong_drop == 0) { 688248925Snp m = F_TUNNELCNGDROP0 | F_TUNNELCNGDROP1 | F_TUNNELCNGDROP2 | 689248925Snp F_TUNNELCNGDROP3; 690248925Snp r = t4_read_reg(sc, A_TP_PARA_REG3); 691248925Snp if (r & m) { 692248925Snp device_printf(sc->dev, 693248925Snp "invalid TP_PARA_REG3(0x%x)\n", r); 694248925Snp rc = EINVAL; 695248925Snp } 696248925Snp } 697228561Snp 698248925Snp v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 699248925Snp r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 700248925Snp if (r != v) { 701248925Snp device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 702248925Snp rc = EINVAL; 703248925Snp } 704228561Snp 705248925Snp m = v = F_TDDPTAGTCB; 706248925Snp r = t4_read_reg(sc, A_ULP_RX_CTL); 707248925Snp if ((r & m) != v) { 708248925Snp device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 709248925Snp rc = EINVAL; 710248925Snp } 711239336Snp 712248925Snp m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 713248925Snp F_RESETDDPOFFSET; 714248925Snp v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 715248925Snp r = t4_read_reg(sc, A_TP_PARA_REG5); 716248925Snp if ((r & m) != v) { 717248925Snp device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 718248925Snp rc = EINVAL; 719248925Snp } 720248925Snp 721248925Snp r = t4_read_reg(sc, A_SGE_CONM_CTRL); 722248925Snp s->fl_starve_threshold = G_EGRTHRESHOLD(r) * 2 + 1; 723265410Snp if (is_t4(sc)) 724265410Snp s->fl_starve_threshold2 = s->fl_starve_threshold; 725265410Snp else 726265410Snp s->fl_starve_threshold2 = G_EGRTHRESHOLDPACKING(r) * 2 + 1; 727248925Snp 728256794Snp /* egress queues: log2 of # of doorbells per BAR2 page */ 729256794Snp r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF); 730256794Snp r >>= S_QUEUESPERPAGEPF0 + 731256794Snp (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 732256794Snp s->eq_s_qpp = r & M_QUEUESPERPAGEPF0; 733248925Snp 734256794Snp /* ingress queues: log2 of # of doorbells per BAR2 page */ 735256794Snp r = t4_read_reg(sc, A_SGE_INGRESS_QUEUES_PER_PAGE_PF); 736256794Snp r >>= S_QUEUESPERPAGEPF0 + 737256794Snp (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf; 738256794Snp s->iq_s_qpp = r & M_QUEUESPERPAGEPF0; 739256794Snp 740252705Snp t4_init_tp_params(sc); 741248925Snp 742248925Snp t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 743248925Snp t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 744248925Snp 745228561Snp return (rc); 746218792Snp} 747218792Snp 748218792Snpint 749218792Snpt4_create_dma_tag(struct adapter *sc) 750218792Snp{ 751218792Snp int rc; 752218792Snp 753218792Snp rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 754218792Snp BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 755218792Snp BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 756218792Snp NULL, &sc->dmat); 757218792Snp if (rc != 0) { 758218792Snp device_printf(sc->dev, 759218792Snp "failed to create main DMA tag: %d\n", rc); 760218792Snp } 761218792Snp 762218792Snp return (rc); 763218792Snp} 764218792Snp 765253829Snpvoid 766253829Snpt4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 767253829Snp struct sysctl_oid_list *children) 768253829Snp{ 769253829Snp 770265425Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 771265425Snp CTLTYPE_STRING | CTLFLAG_RD, &sc->sge, 0, sysctl_bufsizes, "A", 772265425Snp "freelist buffer sizes"); 773265425Snp 774253829Snp SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 775253829Snp NULL, fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 776253829Snp 777253829Snp SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 778281212Snp NULL, sc->sge.pad_boundary, "payload pad boundary (bytes)"); 779253829Snp 780253829Snp SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 781253829Snp NULL, spg_len, "status page size (bytes)"); 782253829Snp 783253829Snp SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 784253829Snp NULL, cong_drop, "congestion drop setting"); 785255050Snp 786255050Snp SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 787265425Snp NULL, sc->sge.pack_boundary, "payload pack boundary (bytes)"); 788253829Snp} 789253829Snp 790218792Snpint 791218792Snpt4_destroy_dma_tag(struct adapter *sc) 792218792Snp{ 793218792Snp if (sc->dmat) 794218792Snp bus_dma_tag_destroy(sc->dmat); 795218792Snp 796218792Snp return (0); 797218792Snp} 798218792Snp 799218792Snp/* 800228561Snp * Allocate and initialize the firmware event queue and the management queue. 801218792Snp * 802218792Snp * Returns errno on failure. Resources allocated up to that point may still be 803218792Snp * allocated. Caller is responsible for cleanup in case this function fails. 804218792Snp */ 805218792Snpint 806220873Snpt4_setup_adapter_queues(struct adapter *sc) 807218792Snp{ 808228561Snp int rc; 809218792Snp 810218792Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 811218792Snp 812228561Snp sysctl_ctx_init(&sc->ctx); 813228561Snp sc->flags |= ADAP_SYSCTL_CTX; 814220873Snp 815222510Snp /* 816222510Snp * Firmware event queue 817222510Snp */ 818228561Snp rc = alloc_fwq(sc); 819241398Snp if (rc != 0) 820220873Snp return (rc); 821218792Snp 822220873Snp /* 823228561Snp * Management queue. This is just a control queue that uses the fwq as 824228561Snp * its associated iq. 825220873Snp */ 826228561Snp rc = alloc_mgmtq(sc); 827220873Snp 828218792Snp return (rc); 829218792Snp} 830218792Snp 831218792Snp/* 832218792Snp * Idempotent 833218792Snp */ 834218792Snpint 835220873Snpt4_teardown_adapter_queues(struct adapter *sc) 836218792Snp{ 837218792Snp 838218792Snp ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 839218792Snp 840228561Snp /* Do this before freeing the queue */ 841228561Snp if (sc->flags & ADAP_SYSCTL_CTX) { 842220873Snp sysctl_ctx_free(&sc->ctx); 843228561Snp sc->flags &= ~ADAP_SYSCTL_CTX; 844220873Snp } 845220873Snp 846228561Snp free_mgmtq(sc); 847228561Snp free_fwq(sc); 848220873Snp 849228561Snp return (0); 850228561Snp} 851222510Snp 852228561Snpstatic inline int 853270297Snpport_intr_count(struct port_info *pi) 854270297Snp{ 855270297Snp int rc = 0; 856270297Snp 857270297Snp if (pi->flags & INTR_RXQ) 858270297Snp rc += pi->nrxq; 859270297Snp#ifdef TCP_OFFLOAD 860270297Snp if (pi->flags & INTR_OFLD_RXQ) 861270297Snp rc += pi->nofldrxq; 862270297Snp#endif 863270297Snp#ifdef DEV_NETMAP 864270297Snp if (pi->flags & INTR_NM_RXQ) 865270297Snp rc += pi->nnmrxq; 866270297Snp#endif 867270297Snp return (rc); 868270297Snp} 869270297Snp 870270297Snpstatic inline int 871228561Snpfirst_vector(struct port_info *pi) 872228561Snp{ 873228561Snp struct adapter *sc = pi->adapter; 874228561Snp int rc = T4_EXTRA_INTR, i; 875228561Snp 876228561Snp if (sc->intr_count == 1) 877228561Snp return (0); 878228561Snp 879228561Snp for_each_port(sc, i) { 880228561Snp if (i == pi->port_id) 881228561Snp break; 882228561Snp 883270297Snp rc += port_intr_count(sc->port[i]); 884218792Snp } 885218792Snp 886228561Snp return (rc); 887218792Snp} 888218792Snp 889228561Snp/* 890228561Snp * Given an arbitrary "index," come up with an iq that can be used by other 891228561Snp * queues (of this port) for interrupt forwarding, SGE egress updates, etc. 892228561Snp * The iq returned is guaranteed to be something that takes direct interrupts. 893228561Snp */ 894228561Snpstatic struct sge_iq * 895228561Snpport_intr_iq(struct port_info *pi, int idx) 896228561Snp{ 897228561Snp struct adapter *sc = pi->adapter; 898228561Snp struct sge *s = &sc->sge; 899228561Snp struct sge_iq *iq = NULL; 900270297Snp int nintr, i; 901228561Snp 902228561Snp if (sc->intr_count == 1) 903228561Snp return (&sc->sge.fwq); 904228561Snp 905270297Snp nintr = port_intr_count(pi); 906270297Snp KASSERT(nintr != 0, 907270297Snp ("%s: pi %p has no exclusive interrupts, total interrupts = %d", 908270297Snp __func__, pi, sc->intr_count)); 909270297Snp#ifdef DEV_NETMAP 910270297Snp /* Exclude netmap queues as they can't take anyone else's interrupts */ 911270297Snp if (pi->flags & INTR_NM_RXQ) 912270297Snp nintr -= pi->nnmrxq; 913270297Snp KASSERT(nintr > 0, 914270297Snp ("%s: pi %p has nintr %d after netmap adjustment of %d", __func__, 915270297Snp pi, nintr, pi->nnmrxq)); 916270297Snp#endif 917270297Snp i = idx % nintr; 918265425Snp 919270297Snp if (pi->flags & INTR_RXQ) { 920270297Snp if (i < pi->nrxq) { 921270297Snp iq = &s->rxq[pi->first_rxq + i].iq; 922270297Snp goto done; 923270297Snp } 924270297Snp i -= pi->nrxq; 925228561Snp } 926270297Snp#ifdef TCP_OFFLOAD 927270297Snp if (pi->flags & INTR_OFLD_RXQ) { 928270297Snp if (i < pi->nofldrxq) { 929270297Snp iq = &s->ofld_rxq[pi->first_ofld_rxq + i].iq; 930270297Snp goto done; 931270297Snp } 932270297Snp i -= pi->nofldrxq; 933270297Snp } 934228561Snp#endif 935270297Snp panic("%s: pi %p, intr_flags 0x%lx, idx %d, total intr %d\n", __func__, 936270297Snp pi, pi->flags & INTR_ALL, idx, nintr); 937270297Snpdone: 938270297Snp MPASS(iq != NULL); 939270297Snp KASSERT(iq->flags & IQ_INTR, 940270297Snp ("%s: iq %p (port %p, intr_flags 0x%lx, idx %d)", __func__, iq, pi, 941270297Snp pi->flags & INTR_ALL, idx)); 942228561Snp return (iq); 943228561Snp} 944228561Snp 945265425Snp/* Maximum payload that can be delivered with a single iq descriptor */ 946239266Snpstatic inline int 947265425Snpmtu_to_max_payload(struct adapter *sc, int mtu, const int toe) 948239266Snp{ 949265425Snp int payload; 950239266Snp 951252728Snp#ifdef TCP_OFFLOAD 952265425Snp if (toe) { 953265425Snp payload = sc->tt.rx_coalesce ? 954265425Snp G_RXCOALESCESIZE(t4_read_reg(sc, A_TP_PARA_REG2)) : mtu; 955265425Snp } else { 956265425Snp#endif 957265425Snp /* large enough even when hw VLAN extraction is disabled */ 958265425Snp payload = fl_pktshift + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + 959265425Snp mtu; 960265425Snp#ifdef TCP_OFFLOAD 961265425Snp } 962265425Snp#endif 963252728Snp 964265425Snp return (payload); 965252728Snp} 966252728Snp 967218792Snpint 968228561Snpt4_setup_port_queues(struct port_info *pi) 969218792Snp{ 970228561Snp int rc = 0, i, j, intr_idx, iqid; 971218792Snp struct sge_rxq *rxq; 972218792Snp struct sge_txq *txq; 973228561Snp struct sge_wrq *ctrlq; 974237263Snp#ifdef TCP_OFFLOAD 975228561Snp struct sge_ofld_rxq *ofld_rxq; 976228561Snp struct sge_wrq *ofld_txq; 977228561Snp#endif 978270297Snp#ifdef DEV_NETMAP 979270297Snp struct sge_nm_rxq *nm_rxq; 980270297Snp struct sge_nm_txq *nm_txq; 981270297Snp#endif 982218792Snp char name[16]; 983218792Snp struct adapter *sc = pi->adapter; 984252728Snp struct ifnet *ifp = pi->ifp; 985237263Snp struct sysctl_oid *oid = device_get_sysctl_tree(pi->dev); 986228561Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 987281212Snp int maxp, mtu = ifp->if_mtu; 988218792Snp 989228561Snp /* Interrupt vector to start from (when using multiple vectors) */ 990228561Snp intr_idx = first_vector(pi); 991228561Snp 992228561Snp /* 993270297Snp * First pass over all NIC and TOE rx queues: 994228561Snp * a) initialize iq and fl 995228561Snp * b) allocate queue iff it will take direct interrupts. 996228561Snp */ 997265425Snp maxp = mtu_to_max_payload(sc, mtu, 0); 998270297Snp if (pi->flags & INTR_RXQ) { 999270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1000270297Snp CTLFLAG_RD, NULL, "rx queues"); 1001270297Snp } 1002218792Snp for_each_rxq(pi, i, rxq) { 1003218792Snp 1004270297Snp init_iq(&rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, pi->qsize_rxq); 1005218792Snp 1006218792Snp snprintf(name, sizeof(name), "%s rxq%d-fl", 1007218792Snp device_get_nameunit(pi->dev), i); 1008281212Snp init_fl(sc, &rxq->fl, pi->qsize_rxq / 8, maxp, name); 1009218792Snp 1010270297Snp if (pi->flags & INTR_RXQ) { 1011228561Snp rxq->iq.flags |= IQ_INTR; 1012228561Snp rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1013228561Snp if (rc != 0) 1014228561Snp goto done; 1015228561Snp intr_idx++; 1016228561Snp } 1017228561Snp } 1018237263Snp#ifdef TCP_OFFLOAD 1019265425Snp maxp = mtu_to_max_payload(sc, mtu, 1); 1020270297Snp if (is_offload(sc) && pi->flags & INTR_OFLD_RXQ) { 1021270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1022270297Snp CTLFLAG_RD, NULL, 1023270297Snp "rx queues for offloaded TCP connections"); 1024270297Snp } 1025228561Snp for_each_ofld_rxq(pi, i, ofld_rxq) { 1026228561Snp 1027228561Snp init_iq(&ofld_rxq->iq, sc, pi->tmr_idx, pi->pktc_idx, 1028270297Snp pi->qsize_rxq); 1029228561Snp 1030228561Snp snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1031228561Snp device_get_nameunit(pi->dev), i); 1032281212Snp init_fl(sc, &ofld_rxq->fl, pi->qsize_rxq / 8, maxp, name); 1033228561Snp 1034270297Snp if (pi->flags & INTR_OFLD_RXQ) { 1035228561Snp ofld_rxq->iq.flags |= IQ_INTR; 1036270297Snp rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1037228561Snp if (rc != 0) 1038228561Snp goto done; 1039228561Snp intr_idx++; 1040228561Snp } 1041228561Snp } 1042228561Snp#endif 1043270297Snp#ifdef DEV_NETMAP 1044270297Snp /* 1045270297Snp * We don't have buffers to back the netmap rx queues right now so we 1046270297Snp * create the queues in a way that doesn't set off any congestion signal 1047270297Snp * in the chip. 1048270297Snp */ 1049270297Snp if (pi->flags & INTR_NM_RXQ) { 1050270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_rxq", 1051270297Snp CTLFLAG_RD, NULL, "rx queues for netmap"); 1052270297Snp for_each_nm_rxq(pi, i, nm_rxq) { 1053270297Snp rc = alloc_nm_rxq(pi, nm_rxq, intr_idx, i, oid); 1054270297Snp if (rc != 0) 1055270297Snp goto done; 1056270297Snp intr_idx++; 1057270297Snp } 1058270297Snp } 1059270297Snp#endif 1060228561Snp 1061228561Snp /* 1062270297Snp * Second pass over all NIC and TOE rx queues. The queues forwarding 1063228561Snp * their interrupts are allocated now. 1064228561Snp */ 1065228561Snp j = 0; 1066270297Snp if (!(pi->flags & INTR_RXQ)) { 1067270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "rxq", 1068270297Snp CTLFLAG_RD, NULL, "rx queues"); 1069270297Snp for_each_rxq(pi, i, rxq) { 1070270297Snp MPASS(!(rxq->iq.flags & IQ_INTR)); 1071228561Snp 1072270297Snp intr_idx = port_intr_iq(pi, j)->abs_id; 1073228561Snp 1074270297Snp rc = alloc_rxq(pi, rxq, intr_idx, i, oid); 1075270297Snp if (rc != 0) 1076270297Snp goto done; 1077270297Snp j++; 1078270297Snp } 1079218792Snp } 1080237263Snp#ifdef TCP_OFFLOAD 1081270297Snp if (is_offload(sc) && !(pi->flags & INTR_OFLD_RXQ)) { 1082270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_rxq", 1083270297Snp CTLFLAG_RD, NULL, 1084270297Snp "rx queues for offloaded TCP connections"); 1085270297Snp for_each_ofld_rxq(pi, i, ofld_rxq) { 1086270297Snp MPASS(!(ofld_rxq->iq.flags & IQ_INTR)); 1087228561Snp 1088270297Snp intr_idx = port_intr_iq(pi, j)->abs_id; 1089228561Snp 1090270297Snp rc = alloc_ofld_rxq(pi, ofld_rxq, intr_idx, i, oid); 1091270297Snp if (rc != 0) 1092270297Snp goto done; 1093270297Snp j++; 1094270297Snp } 1095228561Snp } 1096228561Snp#endif 1097270297Snp#ifdef DEV_NETMAP 1098270297Snp if (!(pi->flags & INTR_NM_RXQ)) 1099270297Snp CXGBE_UNIMPLEMENTED(__func__); 1100270297Snp#endif 1101228561Snp 1102228561Snp /* 1103228561Snp * Now the tx queues. Only one pass needed. 1104228561Snp */ 1105228561Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1106228561Snp NULL, "tx queues"); 1107228561Snp j = 0; 1108218792Snp for_each_txq(pi, i, txq) { 1109228561Snp iqid = port_intr_iq(pi, j)->cntxt_id; 1110218792Snp snprintf(name, sizeof(name), "%s txq%d", 1111218792Snp device_get_nameunit(pi->dev), i); 1112228561Snp init_eq(&txq->eq, EQ_ETH, pi->qsize_txq, pi->tx_chan, iqid, 1113228561Snp name); 1114218792Snp 1115228561Snp rc = alloc_txq(pi, txq, i, oid); 1116218792Snp if (rc != 0) 1117218792Snp goto done; 1118228561Snp j++; 1119218792Snp } 1120237263Snp#ifdef TCP_OFFLOAD 1121228561Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ofld_txq", 1122228561Snp CTLFLAG_RD, NULL, "tx queues for offloaded TCP connections"); 1123228561Snp for_each_ofld_txq(pi, i, ofld_txq) { 1124270297Snp struct sysctl_oid *oid2; 1125228561Snp 1126228561Snp iqid = port_intr_iq(pi, j)->cntxt_id; 1127228561Snp snprintf(name, sizeof(name), "%s ofld_txq%d", 1128228561Snp device_get_nameunit(pi->dev), i); 1129228561Snp init_eq(&ofld_txq->eq, EQ_OFLD, pi->qsize_txq, pi->tx_chan, 1130228561Snp iqid, name); 1131228561Snp 1132228561Snp snprintf(name, sizeof(name), "%d", i); 1133228561Snp oid2 = SYSCTL_ADD_NODE(&pi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1134228561Snp name, CTLFLAG_RD, NULL, "offload tx queue"); 1135228561Snp 1136228561Snp rc = alloc_wrq(sc, pi, ofld_txq, oid2); 1137228561Snp if (rc != 0) 1138228561Snp goto done; 1139228561Snp j++; 1140228561Snp } 1141228561Snp#endif 1142270297Snp#ifdef DEV_NETMAP 1143270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "nm_txq", 1144270297Snp CTLFLAG_RD, NULL, "tx queues for netmap use"); 1145270297Snp for_each_nm_txq(pi, i, nm_txq) { 1146270297Snp iqid = pi->first_nm_rxq + (j % pi->nnmrxq); 1147270297Snp rc = alloc_nm_txq(pi, nm_txq, iqid, i, oid); 1148270297Snp if (rc != 0) 1149270297Snp goto done; 1150270297Snp j++; 1151270297Snp } 1152270297Snp#endif 1153228561Snp 1154228561Snp /* 1155228561Snp * Finally, the control queue. 1156228561Snp */ 1157228561Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, "ctrlq", CTLFLAG_RD, 1158228561Snp NULL, "ctrl queue"); 1159228561Snp ctrlq = &sc->sge.ctrlq[pi->port_id]; 1160228561Snp iqid = port_intr_iq(pi, 0)->cntxt_id; 1161228561Snp snprintf(name, sizeof(name), "%s ctrlq", device_get_nameunit(pi->dev)); 1162228561Snp init_eq(&ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, pi->tx_chan, iqid, name); 1163228561Snp rc = alloc_wrq(sc, pi, ctrlq, oid); 1164228561Snp 1165218792Snpdone: 1166218792Snp if (rc) 1167228561Snp t4_teardown_port_queues(pi); 1168218792Snp 1169218792Snp return (rc); 1170218792Snp} 1171218792Snp 1172218792Snp/* 1173218792Snp * Idempotent 1174218792Snp */ 1175218792Snpint 1176228561Snpt4_teardown_port_queues(struct port_info *pi) 1177218792Snp{ 1178218792Snp int i; 1179228561Snp struct adapter *sc = pi->adapter; 1180218792Snp struct sge_rxq *rxq; 1181218792Snp struct sge_txq *txq; 1182237263Snp#ifdef TCP_OFFLOAD 1183228561Snp struct sge_ofld_rxq *ofld_rxq; 1184228561Snp struct sge_wrq *ofld_txq; 1185228561Snp#endif 1186270297Snp#ifdef DEV_NETMAP 1187270297Snp struct sge_nm_rxq *nm_rxq; 1188270297Snp struct sge_nm_txq *nm_txq; 1189270297Snp#endif 1190218792Snp 1191218792Snp /* Do this before freeing the queues */ 1192228561Snp if (pi->flags & PORT_SYSCTL_CTX) { 1193218792Snp sysctl_ctx_free(&pi->ctx); 1194228561Snp pi->flags &= ~PORT_SYSCTL_CTX; 1195218792Snp } 1196218792Snp 1197228561Snp /* 1198228561Snp * Take down all the tx queues first, as they reference the rx queues 1199228561Snp * (for egress updates, etc.). 1200228561Snp */ 1201228561Snp 1202228561Snp free_wrq(sc, &sc->sge.ctrlq[pi->port_id]); 1203228561Snp 1204218792Snp for_each_txq(pi, i, txq) { 1205218792Snp free_txq(pi, txq); 1206218792Snp } 1207237263Snp#ifdef TCP_OFFLOAD 1208228561Snp for_each_ofld_txq(pi, i, ofld_txq) { 1209228561Snp free_wrq(sc, ofld_txq); 1210228561Snp } 1211228561Snp#endif 1212270297Snp#ifdef DEV_NETMAP 1213270297Snp for_each_nm_txq(pi, i, nm_txq) 1214270297Snp free_nm_txq(pi, nm_txq); 1215270297Snp#endif 1216228561Snp 1217228561Snp /* 1218228561Snp * Then take down the rx queues that forward their interrupts, as they 1219228561Snp * reference other rx queues. 1220228561Snp */ 1221228561Snp 1222218792Snp for_each_rxq(pi, i, rxq) { 1223228561Snp if ((rxq->iq.flags & IQ_INTR) == 0) 1224228561Snp free_rxq(pi, rxq); 1225218792Snp } 1226237263Snp#ifdef TCP_OFFLOAD 1227228561Snp for_each_ofld_rxq(pi, i, ofld_rxq) { 1228228561Snp if ((ofld_rxq->iq.flags & IQ_INTR) == 0) 1229228561Snp free_ofld_rxq(pi, ofld_rxq); 1230228561Snp } 1231228561Snp#endif 1232270297Snp#ifdef DEV_NETMAP 1233270297Snp for_each_nm_rxq(pi, i, nm_rxq) 1234270297Snp free_nm_rxq(pi, nm_rxq); 1235270297Snp#endif 1236228561Snp 1237228561Snp /* 1238228561Snp * Then take down the rx queues that take direct interrupts. 1239228561Snp */ 1240228561Snp 1241228561Snp for_each_rxq(pi, i, rxq) { 1242228561Snp if (rxq->iq.flags & IQ_INTR) 1243228561Snp free_rxq(pi, rxq); 1244228561Snp } 1245237263Snp#ifdef TCP_OFFLOAD 1246228561Snp for_each_ofld_rxq(pi, i, ofld_rxq) { 1247228561Snp if (ofld_rxq->iq.flags & IQ_INTR) 1248228561Snp free_ofld_rxq(pi, ofld_rxq); 1249228561Snp } 1250228561Snp#endif 1251228561Snp 1252218792Snp return (0); 1253218792Snp} 1254218792Snp 1255228561Snp/* 1256228561Snp * Deals with errors and the firmware event queue. All data rx queues forward 1257228561Snp * their interrupt to the firmware event queue. 1258228561Snp */ 1259218792Snpvoid 1260218792Snpt4_intr_all(void *arg) 1261218792Snp{ 1262218792Snp struct adapter *sc = arg; 1263228561Snp struct sge_iq *fwq = &sc->sge.fwq; 1264218792Snp 1265218792Snp t4_intr_err(arg); 1266228561Snp if (atomic_cmpset_int(&fwq->state, IQS_IDLE, IQS_BUSY)) { 1267228561Snp service_iq(fwq, 0); 1268228561Snp atomic_cmpset_int(&fwq->state, IQS_BUSY, IQS_IDLE); 1269218792Snp } 1270218792Snp} 1271218792Snp 1272218792Snp/* Deals with error interrupts */ 1273218792Snpvoid 1274218792Snpt4_intr_err(void *arg) 1275218792Snp{ 1276218792Snp struct adapter *sc = arg; 1277218792Snp 1278222510Snp t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1279218792Snp t4_slow_intr_handler(sc); 1280218792Snp} 1281218792Snp 1282218792Snpvoid 1283218792Snpt4_intr_evt(void *arg) 1284218792Snp{ 1285218792Snp struct sge_iq *iq = arg; 1286220649Snp 1287228561Snp if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1288228561Snp service_iq(iq, 0); 1289228561Snp atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1290222510Snp } 1291220649Snp} 1292220649Snp 1293228561Snpvoid 1294228561Snpt4_intr(void *arg) 1295220649Snp{ 1296220649Snp struct sge_iq *iq = arg; 1297228561Snp 1298228561Snp if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1299228561Snp service_iq(iq, 0); 1300228561Snp atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1301228561Snp } 1302228561Snp} 1303228561Snp 1304228561Snp/* 1305228561Snp * Deals with anything and everything on the given ingress queue. 1306228561Snp */ 1307228561Snpstatic int 1308228561Snpservice_iq(struct sge_iq *iq, int budget) 1309228561Snp{ 1310228561Snp struct sge_iq *q; 1311237263Snp struct sge_rxq *rxq = iq_to_rxq(iq); /* Use iff iq is part of rxq */ 1312270297Snp struct sge_fl *fl; /* Use iff IQ_HAS_FL */ 1313218792Snp struct adapter *sc = iq->adapter; 1314270297Snp struct iq_desc *d = &iq->desc[iq->cidx]; 1315270297Snp int ndescs = 0, limit; 1316270297Snp int rsp_type, refill; 1317228561Snp uint32_t lq; 1318270297Snp uint16_t fl_hw_cidx; 1319228561Snp struct mbuf *m0; 1320228561Snp STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1321255015Snp#if defined(INET) || defined(INET6) 1322255015Snp const struct timeval lro_timeout = {0, sc->lro_timeout}; 1323255015Snp#endif 1324218792Snp 1325228561Snp KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1326218792Snp 1327270297Snp limit = budget ? budget : iq->qsize / 16; 1328270297Snp 1329270297Snp if (iq->flags & IQ_HAS_FL) { 1330270297Snp fl = &rxq->fl; 1331270297Snp fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1332270297Snp } else { 1333270297Snp fl = NULL; 1334270297Snp fl_hw_cidx = 0; /* to silence gcc warning */ 1335270297Snp } 1336270297Snp 1337228561Snp /* 1338228561Snp * We always come back and check the descriptor ring for new indirect 1339228561Snp * interrupts and other responses after running a single handler. 1340228561Snp */ 1341228561Snp for (;;) { 1342270297Snp while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1343218792Snp 1344228561Snp rmb(); 1345218792Snp 1346270297Snp refill = 0; 1347228561Snp m0 = NULL; 1348270297Snp rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1349270297Snp lq = be32toh(d->rsp.pldbuflen_qid); 1350218792Snp 1351228561Snp switch (rsp_type) { 1352228561Snp case X_RSPD_TYPE_FLBUF: 1353228561Snp 1354228561Snp KASSERT(iq->flags & IQ_HAS_FL, 1355228561Snp ("%s: data for an iq (%p) with no freelist", 1356228561Snp __func__, iq)); 1357228561Snp 1358270297Snp m0 = get_fl_payload(sc, fl, lq); 1359255050Snp if (__predict_false(m0 == NULL)) 1360255050Snp goto process_iql; 1361270297Snp refill = IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 2; 1362228561Snp#ifdef T4_PKT_TIMESTAMP 1363228561Snp /* 1364228561Snp * 60 bit timestamp for the payload is 1365228561Snp * *(uint64_t *)m0->m_pktdat. Note that it is 1366228561Snp * in the leading free-space in the mbuf. The 1367228561Snp * kernel can clobber it during a pullup, 1368228561Snp * m_copymdata, etc. You need to make sure that 1369228561Snp * the mbuf reaches you unmolested if you care 1370228561Snp * about the timestamp. 1371228561Snp */ 1372228561Snp *(uint64_t *)m0->m_pktdat = 1373228561Snp be64toh(ctrl->u.last_flit) & 1374228561Snp 0xfffffffffffffff; 1375228561Snp#endif 1376228561Snp 1377228561Snp /* fall through */ 1378228561Snp 1379228561Snp case X_RSPD_TYPE_CPL: 1380270297Snp KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1381228561Snp ("%s: bad opcode %02x.", __func__, 1382270297Snp d->rss.opcode)); 1383270297Snp sc->cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1384228561Snp break; 1385228561Snp 1386228561Snp case X_RSPD_TYPE_INTR: 1387228561Snp 1388228561Snp /* 1389228561Snp * Interrupts should be forwarded only to queues 1390228561Snp * that are not forwarding their interrupts. 1391228561Snp * This means service_iq can recurse but only 1 1392228561Snp * level deep. 1393228561Snp */ 1394228561Snp KASSERT(budget == 0, 1395228561Snp ("%s: budget %u, rsp_type %u", __func__, 1396228561Snp budget, rsp_type)); 1397228561Snp 1398255005Snp /* 1399255005Snp * There are 1K interrupt-capable queues (qids 0 1400255005Snp * through 1023). A response type indicating a 1401255005Snp * forwarded interrupt with a qid >= 1K is an 1402255005Snp * iWARP async notification. 1403255005Snp */ 1404255005Snp if (lq >= 1024) { 1405270297Snp sc->an_handler(iq, &d->rsp); 1406255005Snp break; 1407255005Snp } 1408255005Snp 1409228561Snp q = sc->sge.iqmap[lq - sc->sge.iq_start]; 1410228561Snp if (atomic_cmpset_int(&q->state, IQS_IDLE, 1411228561Snp IQS_BUSY)) { 1412270297Snp if (service_iq(q, q->qsize / 16) == 0) { 1413228561Snp atomic_cmpset_int(&q->state, 1414228561Snp IQS_BUSY, IQS_IDLE); 1415228561Snp } else { 1416228561Snp STAILQ_INSERT_TAIL(&iql, q, 1417228561Snp link); 1418228561Snp } 1419228561Snp } 1420228561Snp break; 1421228561Snp 1422228561Snp default: 1423255005Snp KASSERT(0, 1424255005Snp ("%s: illegal response type %d on iq %p", 1425255005Snp __func__, rsp_type, iq)); 1426255005Snp log(LOG_ERR, 1427255005Snp "%s: illegal response type %d on iq %p", 1428255005Snp device_get_nameunit(sc->dev), rsp_type, iq); 1429237263Snp break; 1430228561Snp } 1431228561Snp 1432270297Snp d++; 1433270297Snp if (__predict_false(++iq->cidx == iq->sidx)) { 1434270297Snp iq->cidx = 0; 1435270297Snp iq->gen ^= F_RSPD_GEN; 1436270297Snp d = &iq->desc[0]; 1437265425Snp } 1438270297Snp if (__predict_false(++ndescs == limit)) { 1439228561Snp t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 1440228561Snp V_CIDXINC(ndescs) | 1441228561Snp V_INGRESSQID(iq->cntxt_id) | 1442228561Snp V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1443228561Snp ndescs = 0; 1444228561Snp 1445255015Snp#if defined(INET) || defined(INET6) 1446255015Snp if (iq->flags & IQ_LRO_ENABLED && 1447255015Snp sc->lro_timeout != 0) { 1448255015Snp tcp_lro_flush_inactive(&rxq->lro, 1449255015Snp &lro_timeout); 1450255015Snp } 1451255015Snp#endif 1452255015Snp 1453267244Snp if (budget) { 1454270297Snp if (iq->flags & IQ_HAS_FL) { 1455267244Snp FL_LOCK(fl); 1456267244Snp refill_fl(sc, fl, 32); 1457267244Snp FL_UNLOCK(fl); 1458267244Snp } 1459228561Snp return (EINPROGRESS); 1460267244Snp } 1461228561Snp } 1462270297Snp if (refill) { 1463270297Snp FL_LOCK(fl); 1464270297Snp refill_fl(sc, fl, 32); 1465270297Snp FL_UNLOCK(fl); 1466270297Snp fl_hw_cidx = fl->hw_cidx; 1467270297Snp } 1468218792Snp } 1469222510Snp 1470255050Snpprocess_iql: 1471228561Snp if (STAILQ_EMPTY(&iql)) 1472228561Snp break; 1473228561Snp 1474228561Snp /* 1475228561Snp * Process the head only, and send it to the back of the list if 1476228561Snp * it's still not done. 1477228561Snp */ 1478228561Snp q = STAILQ_FIRST(&iql); 1479228561Snp STAILQ_REMOVE_HEAD(&iql, link); 1480228561Snp if (service_iq(q, q->qsize / 8) == 0) 1481228561Snp atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1482228561Snp else 1483228561Snp STAILQ_INSERT_TAIL(&iql, q, link); 1484218792Snp } 1485218792Snp 1486237819Snp#if defined(INET) || defined(INET6) 1487228561Snp if (iq->flags & IQ_LRO_ENABLED) { 1488228561Snp struct lro_ctrl *lro = &rxq->lro; 1489228561Snp struct lro_entry *l; 1490228561Snp 1491228561Snp while (!SLIST_EMPTY(&lro->lro_active)) { 1492228561Snp l = SLIST_FIRST(&lro->lro_active); 1493228561Snp SLIST_REMOVE_HEAD(&lro->lro_active, next); 1494228561Snp tcp_lro_flush(lro, l); 1495228561Snp } 1496228561Snp } 1497228561Snp#endif 1498228561Snp 1499228561Snp t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | 1500228561Snp V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1501228561Snp 1502228561Snp if (iq->flags & IQ_HAS_FL) { 1503228561Snp int starved; 1504228561Snp 1505228561Snp FL_LOCK(fl); 1506265425Snp starved = refill_fl(sc, fl, 64); 1507228561Snp FL_UNLOCK(fl); 1508228561Snp if (__predict_false(starved != 0)) 1509228561Snp add_fl_to_sfl(sc, fl); 1510228561Snp } 1511228561Snp 1512228561Snp return (0); 1513218792Snp} 1514218792Snp 1515265425Snpstatic inline int 1516265425Snpcl_has_metadata(struct sge_fl *fl, struct cluster_layout *cll) 1517255050Snp{ 1518265425Snp int rc = fl->flags & FL_BUF_PACKING || cll->region1 > 0; 1519255050Snp 1520265425Snp if (rc) 1521265425Snp MPASS(cll->region3 >= CL_METADATA_SIZE); 1522255050Snp 1523265425Snp return (rc); 1524255050Snp} 1525255050Snp 1526265425Snpstatic inline struct cluster_metadata * 1527265425Snpcl_metadata(struct adapter *sc, struct sge_fl *fl, struct cluster_layout *cll, 1528265425Snp caddr_t cl) 1529255050Snp{ 1530255050Snp 1531265425Snp if (cl_has_metadata(fl, cll)) { 1532265425Snp struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1533255050Snp 1534265425Snp return ((struct cluster_metadata *)(cl + swz->size) - 1); 1535255050Snp } 1536265425Snp return (NULL); 1537255050Snp} 1538255050Snp 1539255050Snpstatic int 1540255050Snprxb_free(struct mbuf *m, void *arg1, void *arg2) 1541255050Snp{ 1542255050Snp uma_zone_t zone = arg1; 1543255050Snp caddr_t cl = arg2; 1544255050Snp 1545255050Snp uma_zfree(zone, cl); 1546269356Snp counter_u64_add(extfree_rels, 1); 1547255050Snp 1548255050Snp return (EXT_FREE_OK); 1549255050Snp} 1550255050Snp 1551265425Snp/* 1552265425Snp * The mbuf returned by this function could be allocated from zone_mbuf or 1553265425Snp * constructed in spare room in the cluster. 1554265425Snp * 1555265425Snp * The mbuf carries the payload in one of these ways 1556265425Snp * a) frame inside the mbuf (mbuf from zone_mbuf) 1557265425Snp * b) m_cljset (for clusters without metadata) zone_mbuf 1558265425Snp * c) m_extaddref (cluster with metadata) inline mbuf 1559265425Snp * d) m_extaddref (cluster with metadata) zone_mbuf 1560265425Snp */ 1561255050Snpstatic struct mbuf * 1562281212Snpget_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1563281212Snp int remaining) 1564218792Snp{ 1565265425Snp struct mbuf *m; 1566228561Snp struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1567265425Snp struct cluster_layout *cll = &sd->cll; 1568265425Snp struct sw_zone_info *swz = &sc->sge.sw_zone_info[cll->zidx]; 1569265425Snp struct hw_buf_info *hwb = &sc->sge.hw_buf_info[cll->hwidx]; 1570265425Snp struct cluster_metadata *clm = cl_metadata(sc, fl, cll, sd->cl); 1571281212Snp int len, blen; 1572265425Snp caddr_t payload; 1573218792Snp 1574281212Snp blen = hwb->size - fl->rx_offset; /* max possible in this buf */ 1575281212Snp len = min(remaining, blen); 1576265425Snp payload = sd->cl + cll->region1 + fl->rx_offset; 1577281212Snp if (fl->flags & FL_BUF_PACKING) { 1578281212Snp const u_int l = fr_offset + len; 1579281212Snp const u_int pad = roundup2(l, fl->buf_boundary) - l; 1580219290Snp 1581281212Snp if (fl->rx_offset + len + pad < hwb->size) 1582281212Snp blen = len + pad; 1583281212Snp MPASS(fl->rx_offset + blen <= hwb->size); 1584281212Snp } else { 1585281212Snp MPASS(fl->rx_offset == 0); /* not packing */ 1586281212Snp } 1587281212Snp 1588281212Snp 1589265425Snp if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1590255050Snp 1591265425Snp /* 1592265425Snp * Copy payload into a freshly allocated mbuf. 1593265425Snp */ 1594255050Snp 1595281212Snp m = fr_offset == 0 ? 1596265425Snp m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1597265425Snp if (m == NULL) 1598255050Snp return (NULL); 1599265425Snp fl->mbuf_allocated++; 1600255050Snp#ifdef T4_PKT_TIMESTAMP 1601265425Snp /* Leave room for a timestamp */ 1602265425Snp m->m_data += 8; 1603255050Snp#endif 1604265425Snp /* copy data to mbuf */ 1605265425Snp bcopy(payload, mtod(m, caddr_t), len); 1606255050Snp 1607269356Snp } else if (sd->nmbuf * MSIZE < cll->region1) { 1608255050Snp 1609265425Snp /* 1610265425Snp * There's spare room in the cluster for an mbuf. Create one 1611267694Snp * and associate it with the payload that's in the cluster. 1612265425Snp */ 1613255050Snp 1614265425Snp MPASS(clm != NULL); 1615269356Snp m = (struct mbuf *)(sd->cl + sd->nmbuf * MSIZE); 1616265425Snp /* No bzero required */ 1617281212Snp if (m_init(m, NULL, 0, M_NOWAIT, MT_DATA, 1618281212Snp fr_offset == 0 ? M_PKTHDR | M_NOFREE : M_NOFREE)) 1619265425Snp return (NULL); 1620265425Snp fl->mbuf_inlined++; 1621281212Snp m_extaddref(m, payload, blen, &clm->refcount, rxb_free, 1622265425Snp swz->zone, sd->cl); 1623269356Snp if (sd->nmbuf++ == 0) 1624269356Snp counter_u64_add(extfree_refs, 1); 1625255050Snp 1626265425Snp } else { 1627255050Snp 1628265425Snp /* 1629265425Snp * Grab an mbuf from zone_mbuf and associate it with the 1630265425Snp * payload in the cluster. 1631265425Snp */ 1632255050Snp 1633281212Snp m = fr_offset == 0 ? 1634265425Snp m_gethdr(M_NOWAIT, MT_DATA) : m_get(M_NOWAIT, MT_DATA); 1635265425Snp if (m == NULL) 1636265425Snp return (NULL); 1637265425Snp fl->mbuf_allocated++; 1638267694Snp if (clm != NULL) { 1639281212Snp m_extaddref(m, payload, blen, &clm->refcount, 1640265425Snp rxb_free, swz->zone, sd->cl); 1641269356Snp if (sd->nmbuf++ == 0) 1642269356Snp counter_u64_add(extfree_refs, 1); 1643267694Snp } else { 1644265425Snp m_cljset(m, sd->cl, swz->type); 1645265425Snp sd->cl = NULL; /* consumed, not a recycle candidate */ 1646255050Snp } 1647255050Snp } 1648281212Snp if (fr_offset == 0) 1649281212Snp m->m_pkthdr.len = remaining; 1650265425Snp m->m_len = len; 1651255050Snp 1652265425Snp if (fl->flags & FL_BUF_PACKING) { 1653281212Snp fl->rx_offset += blen; 1654265425Snp MPASS(fl->rx_offset <= hwb->size); 1655265425Snp if (fl->rx_offset < hwb->size) 1656265425Snp return (m); /* without advancing the cidx */ 1657265425Snp } 1658255050Snp 1659270297Snp if (__predict_false(++fl->cidx % 8 == 0)) { 1660270297Snp uint16_t cidx = fl->cidx / 8; 1661270297Snp 1662270297Snp if (__predict_false(cidx == fl->sidx)) 1663270297Snp fl->cidx = cidx = 0; 1664270297Snp fl->hw_cidx = cidx; 1665270297Snp } 1666265425Snp fl->rx_offset = 0; 1667255050Snp 1668265425Snp return (m); 1669255050Snp} 1670255050Snp 1671255050Snpstatic struct mbuf * 1672270297Snpget_fl_payload(struct adapter *sc, struct sge_fl *fl, uint32_t len_newbuf) 1673255050Snp{ 1674265425Snp struct mbuf *m0, *m, **pnext; 1675281212Snp u_int remaining; 1676281212Snp const u_int total = G_RSPD_LEN(len_newbuf); 1677255050Snp 1678270297Snp if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1679266965Snp M_ASSERTPKTHDR(fl->m0); 1680281212Snp MPASS(fl->m0->m_pkthdr.len == total); 1681281212Snp MPASS(fl->remaining < total); 1682218792Snp 1683265425Snp m0 = fl->m0; 1684265425Snp pnext = fl->pnext; 1685281212Snp remaining = fl->remaining; 1686270297Snp fl->flags &= ~FL_BUF_RESUME; 1687265425Snp goto get_segment; 1688255050Snp } 1689255050Snp 1690265425Snp if (fl->rx_offset > 0 && len_newbuf & F_RSPD_NEWBUF) { 1691265425Snp fl->rx_offset = 0; 1692270297Snp if (__predict_false(++fl->cidx % 8 == 0)) { 1693270297Snp uint16_t cidx = fl->cidx / 8; 1694270297Snp 1695270297Snp if (__predict_false(cidx == fl->sidx)) 1696270297Snp fl->cidx = cidx = 0; 1697270297Snp fl->hw_cidx = cidx; 1698270297Snp } 1699228561Snp } 1700218792Snp 1701265425Snp /* 1702265425Snp * Payload starts at rx_offset in the current hw buffer. Its length is 1703265425Snp * 'len' and it may span multiple hw buffers. 1704265425Snp */ 1705218792Snp 1706281212Snp m0 = get_scatter_segment(sc, fl, 0, total); 1707266965Snp if (m0 == NULL) 1708270297Snp return (NULL); 1709281212Snp remaining = total - m0->m_len; 1710265425Snp pnext = &m0->m_next; 1711281212Snp while (remaining > 0) { 1712265425Snpget_segment: 1713265425Snp MPASS(fl->rx_offset == 0); 1714281212Snp m = get_scatter_segment(sc, fl, total - remaining, remaining); 1715270297Snp if (__predict_false(m == NULL)) { 1716265425Snp fl->m0 = m0; 1717265425Snp fl->pnext = pnext; 1718281212Snp fl->remaining = remaining; 1719270297Snp fl->flags |= FL_BUF_RESUME; 1720270297Snp return (NULL); 1721218792Snp } 1722265425Snp *pnext = m; 1723265425Snp pnext = &m->m_next; 1724281212Snp remaining -= m->m_len; 1725265425Snp } 1726265425Snp *pnext = NULL; 1727270297Snp 1728228561Snp return (m0); 1729228561Snp} 1730218792Snp 1731228561Snpstatic int 1732228561Snpt4_eth_rx(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 1733228561Snp{ 1734237463Snp struct sge_rxq *rxq = iq_to_rxq(iq); 1735228561Snp struct ifnet *ifp = rxq->ifp; 1736228561Snp const struct cpl_rx_pkt *cpl = (const void *)(rss + 1); 1737237819Snp#if defined(INET) || defined(INET6) 1738228561Snp struct lro_ctrl *lro = &rxq->lro; 1739228561Snp#endif 1740219290Snp 1741228561Snp KASSERT(m0 != NULL, ("%s: no payload with opcode %02x", __func__, 1742228561Snp rss->opcode)); 1743219290Snp 1744239258Snp m0->m_pkthdr.len -= fl_pktshift; 1745239258Snp m0->m_len -= fl_pktshift; 1746239258Snp m0->m_data += fl_pktshift; 1747219290Snp 1748228561Snp m0->m_pkthdr.rcvif = ifp; 1749281955Shiren M_HASHTYPE_SET(m0, M_HASHTYPE_OPAQUE); 1750259142Snp m0->m_pkthdr.flowid = be32toh(rss->hash_val); 1751219290Snp 1752237799Snp if (cpl->csum_calc && !cpl->err_vec) { 1753237799Snp if (ifp->if_capenable & IFCAP_RXCSUM && 1754237799Snp cpl->l2info & htobe32(F_RXF_IP)) { 1755237831Snp m0->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | 1756237799Snp CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); 1757237799Snp rxq->rxcsum++; 1758237799Snp } else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 && 1759237799Snp cpl->l2info & htobe32(F_RXF_IP6)) { 1760237831Snp m0->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 | 1761237799Snp CSUM_PSEUDO_HDR); 1762237799Snp rxq->rxcsum++; 1763237799Snp } 1764237799Snp 1765237799Snp if (__predict_false(cpl->ip_frag)) 1766228561Snp m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1767228561Snp else 1768228561Snp m0->m_pkthdr.csum_data = 0xffff; 1769228561Snp } 1770219290Snp 1771228561Snp if (cpl->vlan_ex) { 1772228561Snp m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 1773228561Snp m0->m_flags |= M_VLANTAG; 1774228561Snp rxq->vlan_extraction++; 1775228561Snp } 1776219290Snp 1777237819Snp#if defined(INET) || defined(INET6) 1778228561Snp if (cpl->l2info & htobe32(F_RXF_LRO) && 1779228561Snp iq->flags & IQ_LRO_ENABLED && 1780228561Snp tcp_lro_rx(lro, m0, 0) == 0) { 1781228561Snp /* queued for LRO */ 1782228561Snp } else 1783218792Snp#endif 1784228561Snp ifp->if_input(ifp, m0); 1785218792Snp 1786228561Snp return (0); 1787228561Snp} 1788218792Snp 1789228561Snp/* 1790284052Snp * Must drain the wrq or make sure that someone else will. 1791284052Snp */ 1792284052Snpstatic void 1793284052Snpwrq_tx_drain(void *arg, int n) 1794284052Snp{ 1795284052Snp struct sge_wrq *wrq = arg; 1796284052Snp struct sge_eq *eq = &wrq->eq; 1797284052Snp 1798284052Snp EQ_LOCK(eq); 1799284052Snp if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 1800284052Snp drain_wrq_wr_list(wrq->adapter, wrq); 1801284052Snp EQ_UNLOCK(eq); 1802284052Snp} 1803284052Snp 1804284052Snpstatic void 1805284052Snpdrain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 1806284052Snp{ 1807284052Snp struct sge_eq *eq = &wrq->eq; 1808284052Snp u_int available, dbdiff; /* # of hardware descriptors */ 1809284052Snp u_int n; 1810284052Snp struct wrqe *wr; 1811284052Snp struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 1812284052Snp 1813284052Snp EQ_LOCK_ASSERT_OWNED(eq); 1814284052Snp MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 1815284052Snp wr = STAILQ_FIRST(&wrq->wr_list); 1816284052Snp MPASS(wr != NULL); /* Must be called with something useful to do */ 1817284052Snp dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 1818284052Snp 1819284052Snp do { 1820284052Snp eq->cidx = read_hw_cidx(eq); 1821284052Snp if (eq->pidx == eq->cidx) 1822284052Snp available = eq->sidx - 1; 1823284052Snp else 1824284052Snp available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 1825284052Snp 1826284052Snp MPASS(wr->wrq == wrq); 1827284052Snp n = howmany(wr->wr_len, EQ_ESIZE); 1828284052Snp if (available < n) 1829284052Snp return; 1830284052Snp 1831284052Snp dst = (void *)&eq->desc[eq->pidx]; 1832284052Snp if (__predict_true(eq->sidx - eq->pidx > n)) { 1833284052Snp /* Won't wrap, won't end exactly at the status page. */ 1834284052Snp bcopy(&wr->wr[0], dst, wr->wr_len); 1835284052Snp eq->pidx += n; 1836284052Snp } else { 1837284052Snp int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 1838284052Snp 1839284052Snp bcopy(&wr->wr[0], dst, first_portion); 1840284052Snp if (wr->wr_len > first_portion) { 1841284052Snp bcopy(&wr->wr[first_portion], &eq->desc[0], 1842284052Snp wr->wr_len - first_portion); 1843284052Snp } 1844284052Snp eq->pidx = n - (eq->sidx - eq->pidx); 1845284052Snp } 1846284052Snp 1847284052Snp if (available < eq->sidx / 4 && 1848284052Snp atomic_cmpset_int(&eq->equiq, 0, 1)) { 1849284052Snp dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 1850284052Snp F_FW_WR_EQUEQ); 1851284052Snp eq->equeqidx = eq->pidx; 1852284052Snp } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 1853284052Snp dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 1854284052Snp eq->equeqidx = eq->pidx; 1855284052Snp } 1856284052Snp 1857284052Snp dbdiff += n; 1858284052Snp if (dbdiff >= 16) { 1859284052Snp ring_eq_db(sc, eq, dbdiff); 1860284052Snp dbdiff = 0; 1861284052Snp } 1862284052Snp 1863284052Snp STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 1864284052Snp free_wrqe(wr); 1865284052Snp MPASS(wrq->nwr_pending > 0); 1866284052Snp wrq->nwr_pending--; 1867284052Snp MPASS(wrq->ndesc_needed >= n); 1868284052Snp wrq->ndesc_needed -= n; 1869284052Snp } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 1870284052Snp 1871284052Snp if (dbdiff) 1872284052Snp ring_eq_db(sc, eq, dbdiff); 1873284052Snp} 1874284052Snp 1875284052Snp/* 1876228561Snp * Doesn't fail. Holds on to work requests it can't send right away. 1877228561Snp */ 1878237263Snpvoid 1879237263Snpt4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 1880228561Snp{ 1881284052Snp#ifdef INVARIANTS 1882228561Snp struct sge_eq *eq = &wrq->eq; 1883284052Snp#endif 1884228561Snp 1885284052Snp EQ_LOCK_ASSERT_OWNED(eq); 1886284052Snp MPASS(wr != NULL); 1887284052Snp MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 1888284052Snp MPASS((wr->wr_len & 0x7) == 0); 1889284052Snp 1890284052Snp STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 1891284052Snp wrq->nwr_pending++; 1892284052Snp wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 1893284052Snp 1894284052Snp if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 1895284052Snp return; /* commit_wrq_wr will drain wr_list as well. */ 1896284052Snp 1897284052Snp drain_wrq_wr_list(sc, wrq); 1898284052Snp 1899284052Snp /* Doorbell must have caught up to the pidx. */ 1900284052Snp MPASS(eq->pidx == eq->dbidx); 1901284052Snp} 1902284052Snp 1903284052Snpvoid 1904284052Snpt4_update_fl_bufsize(struct ifnet *ifp) 1905284052Snp{ 1906284052Snp struct port_info *pi = ifp->if_softc; 1907284052Snp struct adapter *sc = pi->adapter; 1908284052Snp struct sge_rxq *rxq; 1909237263Snp#ifdef TCP_OFFLOAD 1910284052Snp struct sge_ofld_rxq *ofld_rxq; 1911237263Snp#endif 1912284052Snp struct sge_fl *fl; 1913284052Snp int i, maxp, mtu = ifp->if_mtu; 1914228561Snp 1915284052Snp maxp = mtu_to_max_payload(sc, mtu, 0); 1916284052Snp for_each_rxq(pi, i, rxq) { 1917284052Snp fl = &rxq->fl; 1918218792Snp 1919284052Snp FL_LOCK(fl); 1920284052Snp find_best_refill_source(sc, fl, maxp); 1921284052Snp FL_UNLOCK(fl); 1922218792Snp } 1923284052Snp#ifdef TCP_OFFLOAD 1924284052Snp maxp = mtu_to_max_payload(sc, mtu, 1); 1925284052Snp for_each_ofld_rxq(pi, i, ofld_rxq) { 1926284052Snp fl = &ofld_rxq->fl; 1927228561Snp 1928284052Snp FL_LOCK(fl); 1929284052Snp find_best_refill_source(sc, fl, maxp); 1930284052Snp FL_UNLOCK(fl); 1931284052Snp } 1932284052Snp#endif 1933284052Snp} 1934228561Snp 1935284052Snpstatic inline int 1936284052Snpmbuf_nsegs(struct mbuf *m) 1937284052Snp{ 1938228561Snp 1939284052Snp M_ASSERTPKTHDR(m); 1940284052Snp KASSERT(m->m_pkthdr.l5hlen > 0, 1941284052Snp ("%s: mbuf %p missing information on # of segments.", __func__, m)); 1942218792Snp 1943284052Snp return (m->m_pkthdr.l5hlen); 1944284052Snp} 1945218792Snp 1946284052Snpstatic inline void 1947284052Snpset_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 1948284052Snp{ 1949218792Snp 1950284052Snp M_ASSERTPKTHDR(m); 1951284052Snp m->m_pkthdr.l5hlen = nsegs; 1952284052Snp} 1953228561Snp 1954284052Snpstatic inline int 1955284052Snpmbuf_len16(struct mbuf *m) 1956284052Snp{ 1957284052Snp int n; 1958228561Snp 1959284052Snp M_ASSERTPKTHDR(m); 1960284052Snp n = m->m_pkthdr.PH_loc.eigth[0]; 1961284052Snp MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 1962228561Snp 1963284052Snp return (n); 1964284052Snp} 1965228561Snp 1966284052Snpstatic inline void 1967284052Snpset_mbuf_len16(struct mbuf *m, uint8_t len16) 1968284052Snp{ 1969228561Snp 1970284052Snp M_ASSERTPKTHDR(m); 1971284052Snp m->m_pkthdr.PH_loc.eigth[0] = len16; 1972220873Snp} 1973220873Snp 1974284052Snpstatic inline int 1975284052Snpneeds_tso(struct mbuf *m) 1976284052Snp{ 1977218792Snp 1978284052Snp M_ASSERTPKTHDR(m); 1979218792Snp 1980284052Snp if (m->m_pkthdr.csum_flags & CSUM_TSO) { 1981284052Snp KASSERT(m->m_pkthdr.tso_segsz > 0, 1982284052Snp ("%s: TSO requested in mbuf %p but MSS not provided", 1983284052Snp __func__, m)); 1984284052Snp return (1); 1985284052Snp } 1986218792Snp 1987284052Snp return (0); 1988284052Snp} 1989218792Snp 1990284052Snpstatic inline int 1991284052Snpneeds_l3_csum(struct mbuf *m) 1992218792Snp{ 1993218792Snp 1994284052Snp M_ASSERTPKTHDR(m); 1995218792Snp 1996284052Snp if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TSO)) 1997284052Snp return (1); 1998284052Snp return (0); 1999284052Snp} 2000219292Snp 2001284052Snpstatic inline int 2002284052Snpneeds_l4_csum(struct mbuf *m) 2003284052Snp{ 2004218792Snp 2005284052Snp M_ASSERTPKTHDR(m); 2006218792Snp 2007284052Snp if (m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_UDP_IPV6 | 2008284052Snp CSUM_TCP_IPV6 | CSUM_TSO)) 2009284052Snp return (1); 2010284052Snp return (0); 2011284052Snp} 2012284052Snp 2013284052Snpstatic inline int 2014284052Snpneeds_vlan_insertion(struct mbuf *m) 2015284052Snp{ 2016284052Snp 2017284052Snp M_ASSERTPKTHDR(m); 2018284052Snp 2019284052Snp if (m->m_flags & M_VLANTAG) { 2020284052Snp KASSERT(m->m_pkthdr.ether_vtag != 0, 2021284052Snp ("%s: HWVLAN requested in mbuf %p but tag not provided", 2022284052Snp __func__, m)); 2023284052Snp return (1); 2024228561Snp } 2025284052Snp return (0); 2026284052Snp} 2027228561Snp 2028284052Snpstatic void * 2029284052Snpm_advance(struct mbuf **pm, int *poffset, int len) 2030284052Snp{ 2031284052Snp struct mbuf *m = *pm; 2032284052Snp int offset = *poffset; 2033284052Snp uintptr_t p = 0; 2034228561Snp 2035284052Snp MPASS(len > 0); 2036218792Snp 2037284052Snp while (len) { 2038284052Snp if (offset + len < m->m_len) { 2039284052Snp offset += len; 2040284052Snp p = mtod(m, uintptr_t) + offset; 2041218792Snp break; 2042284052Snp } 2043284052Snp len -= m->m_len - offset; 2044284052Snp m = m->m_next; 2045284052Snp offset = 0; 2046284052Snp MPASS(m != NULL); 2047284052Snp } 2048284052Snp *poffset = offset; 2049284052Snp *pm = m; 2050284052Snp return ((void *)p); 2051284052Snp} 2052218792Snp 2053284052Snpstatic inline int 2054284052Snpsame_paddr(char *a, char *b) 2055284052Snp{ 2056218792Snp 2057284052Snp if (a == b) 2058284052Snp return (1); 2059284052Snp else if (a != NULL && b != NULL) { 2060284052Snp vm_offset_t x = (vm_offset_t)a; 2061284052Snp vm_offset_t y = (vm_offset_t)b; 2062218792Snp 2063284052Snp if ((x & PAGE_MASK) == (y & PAGE_MASK) && 2064284052Snp pmap_kextract(x) == pmap_kextract(y)) 2065284052Snp return (1); 2066284052Snp } 2067218792Snp 2068284052Snp return (0); 2069284052Snp} 2070218792Snp 2071284052Snp/* 2072284052Snp * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2073284052Snp * must have at least one mbuf that's not empty. 2074284052Snp */ 2075284052Snpstatic inline int 2076284052Snpcount_mbuf_nsegs(struct mbuf *m) 2077284052Snp{ 2078284052Snp char *prev_end, *start; 2079284052Snp int len, nsegs; 2080218792Snp 2081284052Snp MPASS(m != NULL); 2082218792Snp 2083284052Snp nsegs = 0; 2084284052Snp prev_end = NULL; 2085284052Snp for (; m; m = m->m_next) { 2086284052Snp 2087284052Snp len = m->m_len; 2088284052Snp if (__predict_false(len == 0)) 2089218792Snp continue; 2090284052Snp start = mtod(m, char *); 2091218792Snp 2092284052Snp nsegs += sglist_count(start, len); 2093284052Snp if (same_paddr(prev_end, start)) 2094284052Snp nsegs--; 2095284052Snp prev_end = start + len; 2096284052Snp } 2097218792Snp 2098284052Snp MPASS(nsegs > 0); 2099284052Snp return (nsegs); 2100284052Snp} 2101218792Snp 2102284052Snp/* 2103284052Snp * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2104284052Snp * a) caller can assume it's been freed if this function returns with an error. 2105284052Snp * b) it may get defragged up if the gather list is too long for the hardware. 2106284052Snp */ 2107284052Snpint 2108284052Snpparse_pkt(struct mbuf **mp) 2109284052Snp{ 2110284052Snp struct mbuf *m0 = *mp, *m; 2111284052Snp int rc, nsegs, defragged = 0, offset; 2112284052Snp struct ether_header *eh; 2113284052Snp void *l3hdr; 2114284052Snp#if defined(INET) || defined(INET6) 2115284052Snp struct tcphdr *tcp; 2116284052Snp#endif 2117284052Snp uint16_t eh_type; 2118284052Snp 2119284052Snp M_ASSERTPKTHDR(m0); 2120284052Snp if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2121284052Snp rc = EINVAL; 2122284052Snpfail: 2123284052Snp m_freem(m0); 2124284052Snp *mp = NULL; 2125284052Snp return (rc); 2126284052Snp } 2127284052Snprestart: 2128284052Snp /* 2129284052Snp * First count the number of gather list segments in the payload. 2130284052Snp * Defrag the mbuf if nsegs exceeds the hardware limit. 2131284052Snp */ 2132284052Snp M_ASSERTPKTHDR(m0); 2133284052Snp MPASS(m0->m_pkthdr.len > 0); 2134284052Snp nsegs = count_mbuf_nsegs(m0); 2135284052Snp if (nsegs > (needs_tso(m0) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)) { 2136284052Snp if (defragged++ > 0 || (m = m_defrag(m0, M_NOWAIT)) == NULL) { 2137284052Snp rc = EFBIG; 2138284052Snp goto fail; 2139218792Snp } 2140284052Snp *mp = m0 = m; /* update caller's copy after defrag */ 2141284052Snp goto restart; 2142284052Snp } 2143218792Snp 2144284052Snp if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN)) { 2145284052Snp m0 = m_pullup(m0, m0->m_pkthdr.len); 2146284052Snp if (m0 == NULL) { 2147284052Snp /* Should have left well enough alone. */ 2148284052Snp rc = EFBIG; 2149284052Snp goto fail; 2150284052Snp } 2151284052Snp *mp = m0; /* update caller's copy after pullup */ 2152284052Snp goto restart; 2153284052Snp } 2154284052Snp set_mbuf_nsegs(m0, nsegs); 2155284052Snp set_mbuf_len16(m0, txpkt_len16(nsegs, needs_tso(m0))); 2156218792Snp 2157284052Snp if (!needs_tso(m0)) 2158284052Snp return (0); 2159218792Snp 2160284052Snp m = m0; 2161284052Snp eh = mtod(m, struct ether_header *); 2162284052Snp eh_type = ntohs(eh->ether_type); 2163284052Snp if (eh_type == ETHERTYPE_VLAN) { 2164284052Snp struct ether_vlan_header *evh = (void *)eh; 2165218792Snp 2166284052Snp eh_type = ntohs(evh->evl_proto); 2167284052Snp m0->m_pkthdr.l2hlen = sizeof(*evh); 2168284052Snp } else 2169284052Snp m0->m_pkthdr.l2hlen = sizeof(*eh); 2170218792Snp 2171284052Snp offset = 0; 2172284052Snp l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2173218792Snp 2174284052Snp switch (eh_type) { 2175284052Snp#ifdef INET6 2176284052Snp case ETHERTYPE_IPV6: 2177284052Snp { 2178284052Snp struct ip6_hdr *ip6 = l3hdr; 2179218792Snp 2180284052Snp MPASS(ip6->ip6_nxt == IPPROTO_TCP); 2181219292Snp 2182284052Snp m0->m_pkthdr.l3hlen = sizeof(*ip6); 2183284052Snp break; 2184218792Snp } 2185284052Snp#endif 2186284052Snp#ifdef INET 2187284052Snp case ETHERTYPE_IP: 2188284052Snp { 2189284052Snp struct ip *ip = l3hdr; 2190218792Snp 2191284052Snp m0->m_pkthdr.l3hlen = ip->ip_hl * 4; 2192284052Snp break; 2193284052Snp } 2194284052Snp#endif 2195284052Snp default: 2196284052Snp panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2197284052Snp " with the same INET/INET6 options as the kernel.", 2198284052Snp __func__, eh_type); 2199284052Snp } 2200218792Snp 2201284052Snp#if defined(INET) || defined(INET6) 2202284052Snp tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2203284052Snp m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2204284052Snp#endif 2205284052Snp MPASS(m0 == *mp); 2206284052Snp return (0); 2207284052Snp} 2208220873Snp 2209284052Snpvoid * 2210284052Snpstart_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2211284052Snp{ 2212284052Snp struct sge_eq *eq = &wrq->eq; 2213284052Snp struct adapter *sc = wrq->adapter; 2214284052Snp int ndesc, available; 2215284052Snp struct wrqe *wr; 2216284052Snp void *w; 2217228561Snp 2218284052Snp MPASS(len16 > 0); 2219284052Snp ndesc = howmany(len16, EQ_ESIZE / 16); 2220284052Snp MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2221284052Snp 2222284052Snp EQ_LOCK(eq); 2223284052Snp 2224284052Snp if (!STAILQ_EMPTY(&wrq->wr_list)) 2225284052Snp drain_wrq_wr_list(sc, wrq); 2226284052Snp 2227284052Snp if (!STAILQ_EMPTY(&wrq->wr_list)) { 2228284052Snpslowpath: 2229284052Snp EQ_UNLOCK(eq); 2230284052Snp wr = alloc_wrqe(len16 * 16, wrq); 2231284052Snp if (__predict_false(wr == NULL)) 2232284052Snp return (NULL); 2233284052Snp cookie->pidx = -1; 2234284052Snp cookie->ndesc = ndesc; 2235284052Snp return (&wr->wr); 2236220873Snp } 2237218792Snp 2238284052Snp eq->cidx = read_hw_cidx(eq); 2239284052Snp if (eq->pidx == eq->cidx) 2240284052Snp available = eq->sidx - 1; 2241284052Snp else 2242284052Snp available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2243284052Snp if (available < ndesc) 2244284052Snp goto slowpath; 2245218792Snp 2246284052Snp cookie->pidx = eq->pidx; 2247284052Snp cookie->ndesc = ndesc; 2248284052Snp TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2249218792Snp 2250284052Snp w = &eq->desc[eq->pidx]; 2251284052Snp IDXINCR(eq->pidx, ndesc, eq->sidx); 2252284052Snp if (__predict_false(eq->pidx < ndesc - 1)) { 2253284052Snp w = &wrq->ss[0]; 2254284052Snp wrq->ss_pidx = cookie->pidx; 2255284052Snp wrq->ss_len = len16 * 16; 2256284052Snp } 2257228561Snp 2258284052Snp EQ_UNLOCK(eq); 2259284052Snp 2260284052Snp return (w); 2261218792Snp} 2262218792Snp 2263218792Snpvoid 2264284052Snpcommit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2265218792Snp{ 2266284052Snp struct sge_eq *eq = &wrq->eq; 2267284052Snp struct adapter *sc = wrq->adapter; 2268284052Snp int ndesc, pidx; 2269284052Snp struct wrq_cookie *prev, *next; 2270218792Snp 2271284052Snp if (cookie->pidx == -1) { 2272284052Snp struct wrqe *wr = __containerof(w, struct wrqe, wr); 2273218792Snp 2274284052Snp t4_wrq_tx(sc, wr); 2275284052Snp return; 2276218792Snp } 2277252728Snp 2278284052Snp ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2279284052Snp pidx = cookie->pidx; 2280284052Snp MPASS(pidx >= 0 && pidx < eq->sidx); 2281284052Snp if (__predict_false(w == &wrq->ss[0])) { 2282284052Snp int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2283284052Snp 2284284052Snp MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2285284052Snp bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2286284052Snp bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2287284052Snp wrq->tx_wrs_ss++; 2288284052Snp } else 2289284052Snp wrq->tx_wrs_direct++; 2290284052Snp 2291284052Snp EQ_LOCK(eq); 2292284052Snp prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2293284052Snp next = TAILQ_NEXT(cookie, link); 2294284052Snp if (prev == NULL) { 2295284052Snp MPASS(pidx == eq->dbidx); 2296284052Snp if (next == NULL || ndesc >= 16) 2297284052Snp ring_eq_db(wrq->adapter, eq, ndesc); 2298284052Snp else { 2299284052Snp MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2300284052Snp next->pidx = pidx; 2301284052Snp next->ndesc += ndesc; 2302284052Snp } 2303284052Snp } else { 2304284052Snp MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2305284052Snp prev->ndesc += ndesc; 2306252728Snp } 2307284052Snp TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2308284052Snp 2309284052Snp if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2310284052Snp drain_wrq_wr_list(sc, wrq); 2311284052Snp 2312284052Snp#ifdef INVARIANTS 2313284052Snp if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2314284052Snp /* Doorbell must have caught up to the pidx. */ 2315284052Snp MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2316284052Snp } 2317252728Snp#endif 2318284052Snp EQ_UNLOCK(eq); 2319218792Snp} 2320218792Snp 2321284052Snpstatic u_int 2322284052Snpcan_resume_eth_tx(struct mp_ring *r) 2323228561Snp{ 2324284052Snp struct sge_eq *eq = r->cookie; 2325267764Snp 2326284052Snp return (total_available_tx_desc(eq) > eq->sidx / 8); 2327228561Snp} 2328228561Snp 2329284052Snpstatic inline int 2330284052Snpcannot_use_txpkts(struct mbuf *m) 2331284052Snp{ 2332284052Snp /* maybe put a GL limit too, to avoid silliness? */ 2333284052Snp 2334284052Snp return (needs_tso(m)); 2335284052Snp} 2336284052Snp 2337284052Snp/* 2338284052Snp * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 2339284052Snp * be consumed. Return the actual number consumed. 0 indicates a stall. 2340284052Snp */ 2341284052Snpstatic u_int 2342284052Snpeth_tx(struct mp_ring *r, u_int cidx, u_int pidx) 2343284052Snp{ 2344284052Snp struct sge_txq *txq = r->cookie; 2345284052Snp struct sge_eq *eq = &txq->eq; 2346284052Snp struct ifnet *ifp = txq->ifp; 2347284052Snp struct port_info *pi = (void *)ifp->if_softc; 2348284052Snp struct adapter *sc = pi->adapter; 2349284052Snp u_int total, remaining; /* # of packets */ 2350284052Snp u_int available, dbdiff; /* # of hardware descriptors */ 2351284052Snp u_int n, next_cidx; 2352284052Snp struct mbuf *m0, *tail; 2353284052Snp struct txpkts txp; 2354284052Snp struct fw_eth_tx_pkts_wr *wr; /* any fw WR struct will do */ 2355284052Snp 2356284052Snp remaining = IDXDIFF(pidx, cidx, r->size); 2357284052Snp MPASS(remaining > 0); /* Must not be called without work to do. */ 2358284052Snp total = 0; 2359284052Snp 2360284052Snp TXQ_LOCK(txq); 2361284052Snp if (__predict_false((eq->flags & EQ_ENABLED) == 0)) { 2362284052Snp while (cidx != pidx) { 2363284052Snp m0 = r->items[cidx]; 2364284052Snp m_freem(m0); 2365284052Snp if (++cidx == r->size) 2366284052Snp cidx = 0; 2367284052Snp } 2368284052Snp reclaim_tx_descs(txq, 2048); 2369284052Snp total = remaining; 2370284052Snp goto done; 2371284052Snp } 2372284052Snp 2373284052Snp /* How many hardware descriptors do we have readily available. */ 2374284052Snp if (eq->pidx == eq->cidx) 2375284052Snp available = eq->sidx - 1; 2376284052Snp else 2377284052Snp available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2378284052Snp dbdiff = IDXDIFF(eq->pidx, eq->dbidx, eq->sidx); 2379284052Snp 2380284052Snp while (remaining > 0) { 2381284052Snp 2382284052Snp m0 = r->items[cidx]; 2383284052Snp M_ASSERTPKTHDR(m0); 2384284052Snp MPASS(m0->m_nextpkt == NULL); 2385284052Snp 2386284052Snp if (available < SGE_MAX_WR_NDESC) { 2387284052Snp available += reclaim_tx_descs(txq, 64); 2388284052Snp if (available < howmany(mbuf_len16(m0), EQ_ESIZE / 16)) 2389284052Snp break; /* out of descriptors */ 2390284052Snp } 2391284052Snp 2392284052Snp next_cidx = cidx + 1; 2393284052Snp if (__predict_false(next_cidx == r->size)) 2394284052Snp next_cidx = 0; 2395284052Snp 2396284052Snp wr = (void *)&eq->desc[eq->pidx]; 2397284052Snp if (remaining > 1 && 2398284052Snp try_txpkts(m0, r->items[next_cidx], &txp, available) == 0) { 2399284052Snp 2400284052Snp /* pkts at cidx, next_cidx should both be in txp. */ 2401284052Snp MPASS(txp.npkt == 2); 2402284052Snp tail = r->items[next_cidx]; 2403284052Snp MPASS(tail->m_nextpkt == NULL); 2404284052Snp ETHER_BPF_MTAP(ifp, m0); 2405284052Snp ETHER_BPF_MTAP(ifp, tail); 2406284052Snp m0->m_nextpkt = tail; 2407284052Snp 2408284052Snp if (__predict_false(++next_cidx == r->size)) 2409284052Snp next_cidx = 0; 2410284052Snp 2411284052Snp while (next_cidx != pidx) { 2412284052Snp if (add_to_txpkts(r->items[next_cidx], &txp, 2413284052Snp available) != 0) 2414284052Snp break; 2415284052Snp tail->m_nextpkt = r->items[next_cidx]; 2416284052Snp tail = tail->m_nextpkt; 2417284052Snp ETHER_BPF_MTAP(ifp, tail); 2418284052Snp if (__predict_false(++next_cidx == r->size)) 2419284052Snp next_cidx = 0; 2420284052Snp } 2421284052Snp 2422284052Snp n = write_txpkts_wr(txq, wr, m0, &txp, available); 2423284052Snp total += txp.npkt; 2424284052Snp remaining -= txp.npkt; 2425284052Snp } else { 2426284052Snp total++; 2427284052Snp remaining--; 2428284052Snp n = write_txpkt_wr(txq, (void *)wr, m0, available); 2429284052Snp ETHER_BPF_MTAP(ifp, m0); 2430284052Snp } 2431284052Snp MPASS(n >= 1 && n <= available && n <= SGE_MAX_WR_NDESC); 2432284052Snp 2433284052Snp available -= n; 2434284052Snp dbdiff += n; 2435284052Snp IDXINCR(eq->pidx, n, eq->sidx); 2436284052Snp 2437284052Snp if (total_available_tx_desc(eq) < eq->sidx / 4 && 2438284052Snp atomic_cmpset_int(&eq->equiq, 0, 1)) { 2439284052Snp wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2440284052Snp F_FW_WR_EQUEQ); 2441284052Snp eq->equeqidx = eq->pidx; 2442284052Snp } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 2443284052Snp wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 2444284052Snp eq->equeqidx = eq->pidx; 2445284052Snp } 2446284052Snp 2447284052Snp if (dbdiff >= 16 && remaining >= 4) { 2448284052Snp ring_eq_db(sc, eq, dbdiff); 2449284052Snp available += reclaim_tx_descs(txq, 4 * dbdiff); 2450284052Snp dbdiff = 0; 2451284052Snp } 2452284052Snp 2453284052Snp cidx = next_cidx; 2454284052Snp } 2455284052Snp if (dbdiff != 0) { 2456284052Snp ring_eq_db(sc, eq, dbdiff); 2457284052Snp reclaim_tx_descs(txq, 32); 2458284052Snp } 2459284052Snpdone: 2460284052Snp TXQ_UNLOCK(txq); 2461284052Snp 2462284052Snp return (total); 2463284052Snp} 2464284052Snp 2465218792Snpstatic inline void 2466218792Snpinit_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 2467270297Snp int qsize) 2468218792Snp{ 2469270297Snp 2470218792Snp KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 2471218792Snp ("%s: bad tmr_idx %d", __func__, tmr_idx)); 2472218792Snp KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 2473218792Snp ("%s: bad pktc_idx %d", __func__, pktc_idx)); 2474218792Snp 2475218792Snp iq->flags = 0; 2476218792Snp iq->adapter = sc; 2477234833Snp iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 2478234833Snp iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 2479234833Snp if (pktc_idx >= 0) { 2480234833Snp iq->intr_params |= F_QINTR_CNT_EN; 2481234833Snp iq->intr_pktc_idx = pktc_idx; 2482234833Snp } 2483248925Snp iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 2484270297Snp iq->sidx = iq->qsize - spg_len / IQ_ESIZE; 2485218792Snp} 2486218792Snp 2487218792Snpstatic inline void 2488281212Snpinit_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 2489218792Snp{ 2490255050Snp 2491218792Snp fl->qsize = qsize; 2492270297Snp fl->sidx = qsize - spg_len / EQ_ESIZE; 2493218792Snp strlcpy(fl->lockname, name, sizeof(fl->lockname)); 2494281212Snp if (sc->flags & BUF_PACKING_OK && 2495281212Snp ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 2496281212Snp (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 2497255050Snp fl->flags |= FL_BUF_PACKING; 2498265425Snp find_best_refill_source(sc, fl, maxp); 2499265425Snp find_safe_refill_source(sc, fl); 2500218792Snp} 2501218792Snp 2502218792Snpstatic inline void 2503228561Snpinit_eq(struct sge_eq *eq, int eqtype, int qsize, uint8_t tx_chan, 2504228561Snp uint16_t iqid, char *name) 2505218792Snp{ 2506228561Snp KASSERT(tx_chan < NCHAN, ("%s: bad tx channel %d", __func__, tx_chan)); 2507228561Snp KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 2508228561Snp 2509228561Snp eq->flags = eqtype & EQ_TYPEMASK; 2510228561Snp eq->tx_chan = tx_chan; 2511228561Snp eq->iqid = iqid; 2512284052Snp eq->sidx = qsize - spg_len / EQ_ESIZE; 2513220873Snp strlcpy(eq->lockname, name, sizeof(eq->lockname)); 2514218792Snp} 2515218792Snp 2516218792Snpstatic int 2517218792Snpalloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 2518218792Snp bus_dmamap_t *map, bus_addr_t *pa, void **va) 2519218792Snp{ 2520218792Snp int rc; 2521218792Snp 2522218792Snp rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 2523218792Snp BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 2524218792Snp if (rc != 0) { 2525218792Snp device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 2526218792Snp goto done; 2527218792Snp } 2528218792Snp 2529218792Snp rc = bus_dmamem_alloc(*tag, va, 2530218792Snp BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 2531218792Snp if (rc != 0) { 2532218792Snp device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 2533218792Snp goto done; 2534218792Snp } 2535218792Snp 2536218792Snp rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 2537218792Snp if (rc != 0) { 2538218792Snp device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 2539218792Snp goto done; 2540218792Snp } 2541218792Snpdone: 2542218792Snp if (rc) 2543218792Snp free_ring(sc, *tag, *map, *pa, *va); 2544218792Snp 2545218792Snp return (rc); 2546218792Snp} 2547218792Snp 2548218792Snpstatic int 2549218792Snpfree_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 2550218792Snp bus_addr_t pa, void *va) 2551218792Snp{ 2552218792Snp if (pa) 2553218792Snp bus_dmamap_unload(tag, map); 2554218792Snp if (va) 2555218792Snp bus_dmamem_free(tag, va, map); 2556218792Snp if (tag) 2557218792Snp bus_dma_tag_destroy(tag); 2558218792Snp 2559218792Snp return (0); 2560218792Snp} 2561218792Snp 2562218792Snp/* 2563218792Snp * Allocates the ring for an ingress queue and an optional freelist. If the 2564218792Snp * freelist is specified it will be allocated and then associated with the 2565218792Snp * ingress queue. 2566218792Snp * 2567218792Snp * Returns errno on failure. Resources allocated up to that point may still be 2568218792Snp * allocated. Caller is responsible for cleanup in case this function fails. 2569218792Snp * 2570228561Snp * If the ingress queue will take interrupts directly (iq->flags & IQ_INTR) then 2571218792Snp * the intr_idx specifies the vector, starting from 0. Otherwise it specifies 2572228561Snp * the abs_id of the ingress queue to which its interrupts should be forwarded. 2573218792Snp */ 2574218792Snpstatic int 2575218792Snpalloc_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl, 2576222085Snp int intr_idx, int cong) 2577218792Snp{ 2578218792Snp int rc, i, cntxt_id; 2579218792Snp size_t len; 2580218792Snp struct fw_iq_cmd c; 2581218792Snp struct adapter *sc = iq->adapter; 2582218792Snp __be32 v = 0; 2583218792Snp 2584270297Snp len = iq->qsize * IQ_ESIZE; 2585218792Snp rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 2586218792Snp (void **)&iq->desc); 2587218792Snp if (rc != 0) 2588218792Snp return (rc); 2589218792Snp 2590218792Snp bzero(&c, sizeof(c)); 2591218792Snp c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 2592218792Snp F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 2593218792Snp V_FW_IQ_CMD_VFN(0)); 2594218792Snp 2595218792Snp c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 2596218792Snp FW_LEN16(c)); 2597218792Snp 2598218792Snp /* Special handling for firmware event queue */ 2599218792Snp if (iq == &sc->sge.fwq) 2600218792Snp v |= F_FW_IQ_CMD_IQASYNCH; 2601218792Snp 2602228561Snp if (iq->flags & IQ_INTR) { 2603218792Snp KASSERT(intr_idx < sc->intr_count, 2604218792Snp ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 2605228561Snp } else 2606228561Snp v |= F_FW_IQ_CMD_IQANDST; 2607228561Snp v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 2608218792Snp 2609218792Snp c.type_to_iqandstindex = htobe32(v | 2610218792Snp V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 2611218792Snp V_FW_IQ_CMD_VIID(pi->viid) | 2612218792Snp V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 2613218792Snp c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 2614218792Snp F_FW_IQ_CMD_IQGTSMODE | 2615218792Snp V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 2616270297Snp V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 2617218792Snp c.iqsize = htobe16(iq->qsize); 2618218792Snp c.iqaddr = htobe64(iq->ba); 2619222085Snp if (cong >= 0) 2620222085Snp c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 2621218792Snp 2622218792Snp if (fl) { 2623218792Snp mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 2624218792Snp 2625270297Snp len = fl->qsize * EQ_ESIZE; 2626218792Snp rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 2627218792Snp &fl->ba, (void **)&fl->desc); 2628218792Snp if (rc) 2629218792Snp return (rc); 2630218792Snp 2631218792Snp /* Allocate space for one software descriptor per buffer. */ 2632218792Snp rc = alloc_fl_sdesc(fl); 2633218792Snp if (rc != 0) { 2634218792Snp device_printf(sc->dev, 2635218792Snp "failed to setup fl software descriptors: %d\n", 2636218792Snp rc); 2637218792Snp return (rc); 2638218792Snp } 2639218792Snp 2640270297Snp if (fl->flags & FL_BUF_PACKING) { 2641270297Snp fl->lowat = roundup2(sc->sge.fl_starve_threshold2, 8); 2642281212Snp fl->buf_boundary = sc->sge.pack_boundary; 2643270297Snp } else { 2644270297Snp fl->lowat = roundup2(sc->sge.fl_starve_threshold, 8); 2645281212Snp fl->buf_boundary = 16; 2646270297Snp } 2647281212Snp if (fl_pad && fl->buf_boundary < sc->sge.pad_boundary) 2648281212Snp fl->buf_boundary = sc->sge.pad_boundary; 2649270297Snp 2650228491Snp c.iqns_to_fl0congen |= 2651222085Snp htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 2652222085Snp F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 2653255050Snp (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 2654255050Snp (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 2655255050Snp 0)); 2656222085Snp if (cong >= 0) { 2657222085Snp c.iqns_to_fl0congen |= 2658222085Snp htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 2659222085Snp F_FW_IQ_CMD_FL0CONGCIF | 2660222085Snp F_FW_IQ_CMD_FL0CONGEN); 2661222085Snp } 2662218792Snp c.fl0dcaen_to_fl0cidxfthresh = 2663284093Snp htobe16(V_FW_IQ_CMD_FL0FBMIN(X_FETCHBURSTMIN_128B) | 2664218792Snp V_FW_IQ_CMD_FL0FBMAX(X_FETCHBURSTMAX_512B)); 2665218792Snp c.fl0size = htobe16(fl->qsize); 2666218792Snp c.fl0addr = htobe64(fl->ba); 2667218792Snp } 2668218792Snp 2669218792Snp rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 2670218792Snp if (rc != 0) { 2671218792Snp device_printf(sc->dev, 2672218792Snp "failed to create ingress queue: %d\n", rc); 2673218792Snp return (rc); 2674218792Snp } 2675218792Snp 2676218792Snp iq->cidx = 0; 2677270297Snp iq->gen = F_RSPD_GEN; 2678218792Snp iq->intr_next = iq->intr_params; 2679218792Snp iq->cntxt_id = be16toh(c.iqid); 2680218792Snp iq->abs_id = be16toh(c.physiqid); 2681228561Snp iq->flags |= IQ_ALLOCATED; 2682218792Snp 2683218792Snp cntxt_id = iq->cntxt_id - sc->sge.iq_start; 2684228561Snp if (cntxt_id >= sc->sge.niq) { 2685228561Snp panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 2686228561Snp cntxt_id, sc->sge.niq - 1); 2687228561Snp } 2688218792Snp sc->sge.iqmap[cntxt_id] = iq; 2689218792Snp 2690218792Snp if (fl) { 2691270297Snp u_int qid; 2692270297Snp 2693270297Snp iq->flags |= IQ_HAS_FL; 2694218792Snp fl->cntxt_id = be16toh(c.fl0id); 2695218792Snp fl->pidx = fl->cidx = 0; 2696218792Snp 2697219883Snp cntxt_id = fl->cntxt_id - sc->sge.eq_start; 2698228561Snp if (cntxt_id >= sc->sge.neq) { 2699228561Snp panic("%s: fl->cntxt_id (%d) more than the max (%d)", 2700228561Snp __func__, cntxt_id, sc->sge.neq - 1); 2701228561Snp } 2702218792Snp sc->sge.eqmap[cntxt_id] = (void *)fl; 2703218792Snp 2704270297Snp qid = fl->cntxt_id; 2705270297Snp if (isset(&sc->doorbells, DOORBELL_UDB)) { 2706270297Snp uint32_t s_qpp = sc->sge.eq_s_qpp; 2707270297Snp uint32_t mask = (1 << s_qpp) - 1; 2708270297Snp volatile uint8_t *udb; 2709270297Snp 2710270297Snp udb = sc->udbs_base + UDBS_DB_OFFSET; 2711270297Snp udb += (qid >> s_qpp) << PAGE_SHIFT; 2712270297Snp qid &= mask; 2713270297Snp if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 2714270297Snp udb += qid << UDBS_SEG_SHIFT; 2715270297Snp qid = 0; 2716270297Snp } 2717270297Snp fl->udb = (volatile void *)udb; 2718270297Snp } 2719270297Snp fl->dbval = F_DBPRIO | V_QID(qid); 2720270297Snp if (is_t5(sc)) 2721270297Snp fl->dbval |= F_DBTYPE; 2722270297Snp 2723218792Snp FL_LOCK(fl); 2724228561Snp /* Enough to make sure the SGE doesn't think it's starved */ 2725228561Snp refill_fl(sc, fl, fl->lowat); 2726218792Snp FL_UNLOCK(fl); 2727218792Snp } 2728218792Snp 2729253873Snp if (is_t5(sc) && cong >= 0) { 2730253873Snp uint32_t param, val; 2731253873Snp 2732253873Snp param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 2733253873Snp V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 2734253873Snp V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 2735253889Snp if (cong == 0) 2736253889Snp val = 1 << 19; 2737253889Snp else { 2738253889Snp val = 2 << 19; 2739253889Snp for (i = 0; i < 4; i++) { 2740253889Snp if (cong & (1 << i)) 2741253889Snp val |= 1 << (i << 2); 2742253889Snp } 2743253889Snp } 2744253889Snp 2745253873Snp rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 2746253873Snp if (rc != 0) { 2747253873Snp /* report error but carry on */ 2748253873Snp device_printf(sc->dev, 2749253873Snp "failed to set congestion manager context for " 2750253873Snp "ingress queue %d: %d\n", iq->cntxt_id, rc); 2751253873Snp } 2752253873Snp } 2753253873Snp 2754218792Snp /* Enable IQ interrupts */ 2755228561Snp atomic_store_rel_int(&iq->state, IQS_IDLE); 2756218792Snp t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) | 2757218792Snp V_INGRESSQID(iq->cntxt_id)); 2758218792Snp 2759218792Snp return (0); 2760218792Snp} 2761218792Snp 2762218792Snpstatic int 2763218792Snpfree_iq_fl(struct port_info *pi, struct sge_iq *iq, struct sge_fl *fl) 2764218792Snp{ 2765265425Snp int rc; 2766218792Snp struct adapter *sc = iq->adapter; 2767218792Snp device_t dev; 2768218792Snp 2769218792Snp if (sc == NULL) 2770218792Snp return (0); /* nothing to do */ 2771218792Snp 2772218792Snp dev = pi ? pi->dev : sc->dev; 2773218792Snp 2774218792Snp if (iq->flags & IQ_ALLOCATED) { 2775218792Snp rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 2776218792Snp FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 2777218792Snp fl ? fl->cntxt_id : 0xffff, 0xffff); 2778218792Snp if (rc != 0) { 2779218792Snp device_printf(dev, 2780218792Snp "failed to free queue %p: %d\n", iq, rc); 2781218792Snp return (rc); 2782218792Snp } 2783218792Snp iq->flags &= ~IQ_ALLOCATED; 2784218792Snp } 2785218792Snp 2786218792Snp free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 2787218792Snp 2788218792Snp bzero(iq, sizeof(*iq)); 2789218792Snp 2790218792Snp if (fl) { 2791218792Snp free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 2792218792Snp fl->desc); 2793218792Snp 2794254727Snp if (fl->sdesc) 2795255050Snp free_fl_sdesc(sc, fl); 2796218792Snp 2797218792Snp if (mtx_initialized(&fl->fl_lock)) 2798218792Snp mtx_destroy(&fl->fl_lock); 2799218792Snp 2800218792Snp bzero(fl, sizeof(*fl)); 2801218792Snp } 2802218792Snp 2803218792Snp return (0); 2804218792Snp} 2805218792Snp 2806265425Snpstatic void 2807265425Snpadd_fl_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 2808265425Snp struct sge_fl *fl) 2809265425Snp{ 2810265425Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2811265425Snp 2812265425Snp oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 2813265425Snp "freelist"); 2814265425Snp children = SYSCTL_CHILDREN(oid); 2815265425Snp 2816265425Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 2817265425Snp CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 2818265425Snp "SGE context id of the freelist"); 2819281212Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 2820281212Snp fl_pad ? 1 : 0, "padding enabled"); 2821281212Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 2822281212Snp fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 2823265425Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 2824265425Snp 0, "consumer index"); 2825265425Snp if (fl->flags & FL_BUF_PACKING) { 2826265425Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 2827265425Snp CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 2828265425Snp } 2829265425Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 2830265425Snp 0, "producer index"); 2831265425Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_allocated", 2832265425Snp CTLFLAG_RD, &fl->mbuf_allocated, "# of mbuf allocated"); 2833265425Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "mbuf_inlined", 2834265425Snp CTLFLAG_RD, &fl->mbuf_inlined, "# of mbuf inlined in clusters"); 2835265425Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 2836265425Snp CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 2837265425Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 2838265425Snp CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 2839265425Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 2840265425Snp CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 2841265425Snp} 2842265425Snp 2843218792Snpstatic int 2844228561Snpalloc_fwq(struct adapter *sc) 2845218792Snp{ 2846228561Snp int rc, intr_idx; 2847228561Snp struct sge_iq *fwq = &sc->sge.fwq; 2848228561Snp struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2849228561Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2850222510Snp 2851270297Snp init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 2852228561Snp fwq->flags |= IQ_INTR; /* always */ 2853228561Snp intr_idx = sc->intr_count > 1 ? 1 : 0; 2854228561Snp rc = alloc_iq_fl(sc->port[0], fwq, NULL, intr_idx, -1); 2855228561Snp if (rc != 0) { 2856228561Snp device_printf(sc->dev, 2857228561Snp "failed to create firmware event queue: %d\n", rc); 2858222510Snp return (rc); 2859228561Snp } 2860222510Snp 2861228561Snp oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 2862228561Snp NULL, "firmware event queue"); 2863222510Snp children = SYSCTL_CHILDREN(oid); 2864222510Snp 2865228561Snp SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "abs_id", 2866228561Snp CTLTYPE_INT | CTLFLAG_RD, &fwq->abs_id, 0, sysctl_uint16, "I", 2867228561Snp "absolute id of the queue"); 2868228561Snp SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cntxt_id", 2869228561Snp CTLTYPE_INT | CTLFLAG_RD, &fwq->cntxt_id, 0, sysctl_uint16, "I", 2870228561Snp "SGE context id of the queue"); 2871222510Snp SYSCTL_ADD_PROC(&sc->ctx, children, OID_AUTO, "cidx", 2872228561Snp CTLTYPE_INT | CTLFLAG_RD, &fwq->cidx, 0, sysctl_uint16, "I", 2873222510Snp "consumer index"); 2874222510Snp 2875228561Snp return (0); 2876218792Snp} 2877218792Snp 2878218792Snpstatic int 2879228561Snpfree_fwq(struct adapter *sc) 2880218792Snp{ 2881228561Snp return free_iq_fl(NULL, &sc->sge.fwq, NULL); 2882218792Snp} 2883218792Snp 2884218792Snpstatic int 2885228561Snpalloc_mgmtq(struct adapter *sc) 2886222510Snp{ 2887222510Snp int rc; 2888228561Snp struct sge_wrq *mgmtq = &sc->sge.mgmtq; 2889228561Snp char name[16]; 2890228561Snp struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 2891228561Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 2892222510Snp 2893228561Snp oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "mgmtq", CTLFLAG_RD, 2894228561Snp NULL, "management queue"); 2895228561Snp 2896228561Snp snprintf(name, sizeof(name), "%s mgmtq", device_get_nameunit(sc->dev)); 2897228561Snp init_eq(&mgmtq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[0]->tx_chan, 2898228561Snp sc->sge.fwq.cntxt_id, name); 2899228561Snp rc = alloc_wrq(sc, NULL, mgmtq, oid); 2900228561Snp if (rc != 0) { 2901228561Snp device_printf(sc->dev, 2902228561Snp "failed to create management queue: %d\n", rc); 2903222510Snp return (rc); 2904228561Snp } 2905222510Snp 2906228561Snp return (0); 2907222510Snp} 2908222510Snp 2909222510Snpstatic int 2910228561Snpfree_mgmtq(struct adapter *sc) 2911222510Snp{ 2912237263Snp 2913228561Snp return free_wrq(sc, &sc->sge.mgmtq); 2914222510Snp} 2915222510Snp 2916281253Snpint 2917239258Snptnl_cong(struct port_info *pi) 2918239258Snp{ 2919239258Snp 2920239258Snp if (cong_drop == -1) 2921239258Snp return (-1); 2922239258Snp else if (cong_drop == 1) 2923239258Snp return (0); 2924239258Snp else 2925265410Snp return (pi->rx_chan_map); 2926239258Snp} 2927239258Snp 2928222510Snpstatic int 2929228561Snpalloc_rxq(struct port_info *pi, struct sge_rxq *rxq, int intr_idx, int idx, 2930228561Snp struct sysctl_oid *oid) 2931218792Snp{ 2932218792Snp int rc; 2933218792Snp struct sysctl_oid_list *children; 2934218792Snp char name[16]; 2935218792Snp 2936239258Snp rc = alloc_iq_fl(pi, &rxq->iq, &rxq->fl, intr_idx, tnl_cong(pi)); 2937218792Snp if (rc != 0) 2938218792Snp return (rc); 2939218792Snp 2940270297Snp /* 2941270297Snp * The freelist is just barely above the starvation threshold right now, 2942270297Snp * fill it up a bit more. 2943270297Snp */ 2944222701Snp FL_LOCK(&rxq->fl); 2945270297Snp refill_fl(pi->adapter, &rxq->fl, 128); 2946222701Snp FL_UNLOCK(&rxq->fl); 2947222701Snp 2948237819Snp#if defined(INET) || defined(INET6) 2949218792Snp rc = tcp_lro_init(&rxq->lro); 2950218792Snp if (rc != 0) 2951218792Snp return (rc); 2952218792Snp rxq->lro.ifp = pi->ifp; /* also indicates LRO init'ed */ 2953218792Snp 2954218792Snp if (pi->ifp->if_capenable & IFCAP_LRO) 2955228561Snp rxq->iq.flags |= IQ_LRO_ENABLED; 2956218792Snp#endif 2957219289Snp rxq->ifp = pi->ifp; 2958218792Snp 2959228561Snp children = SYSCTL_CHILDREN(oid); 2960218792Snp 2961218792Snp snprintf(name, sizeof(name), "%d", idx); 2962218792Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 2963218792Snp NULL, "rx queue"); 2964218792Snp children = SYSCTL_CHILDREN(oid); 2965218792Snp 2966221911Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 2967222510Snp CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.abs_id, 0, sysctl_uint16, "I", 2968221911Snp "absolute id of the queue"); 2969222973Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 2970222973Snp CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cntxt_id, 0, sysctl_uint16, "I", 2971222973Snp "SGE context id of the queue"); 2972222973Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 2973222973Snp CTLTYPE_INT | CTLFLAG_RD, &rxq->iq.cidx, 0, sysctl_uint16, "I", 2974222973Snp "consumer index"); 2975237819Snp#if defined(INET) || defined(INET6) 2976218792Snp SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 2977218792Snp &rxq->lro.lro_queued, 0, NULL); 2978218792Snp SYSCTL_ADD_INT(&pi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 2979218792Snp &rxq->lro.lro_flushed, 0, NULL); 2980219290Snp#endif 2981218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 2982218792Snp &rxq->rxcsum, "# of times hardware assisted with checksum"); 2983218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_extraction", 2984218792Snp CTLFLAG_RD, &rxq->vlan_extraction, 2985218792Snp "# of times hardware extracted 802.1Q tag"); 2986218792Snp 2987265425Snp add_fl_sysctls(&pi->ctx, oid, &rxq->fl); 2988222973Snp 2989218792Snp return (rc); 2990218792Snp} 2991218792Snp 2992218792Snpstatic int 2993218792Snpfree_rxq(struct port_info *pi, struct sge_rxq *rxq) 2994218792Snp{ 2995218792Snp int rc; 2996218792Snp 2997237819Snp#if defined(INET) || defined(INET6) 2998218792Snp if (rxq->lro.ifp) { 2999218792Snp tcp_lro_free(&rxq->lro); 3000218792Snp rxq->lro.ifp = NULL; 3001218792Snp } 3002218792Snp#endif 3003218792Snp 3004218792Snp rc = free_iq_fl(pi, &rxq->iq, &rxq->fl); 3005218792Snp if (rc == 0) 3006218792Snp bzero(rxq, sizeof(*rxq)); 3007218792Snp 3008218792Snp return (rc); 3009218792Snp} 3010218792Snp 3011237263Snp#ifdef TCP_OFFLOAD 3012218792Snpstatic int 3013228561Snpalloc_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq, 3014228561Snp int intr_idx, int idx, struct sysctl_oid *oid) 3015220873Snp{ 3016228561Snp int rc; 3017228561Snp struct sysctl_oid_list *children; 3018220873Snp char name[16]; 3019220873Snp 3020228561Snp rc = alloc_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 3021265410Snp pi->rx_chan_map); 3022228561Snp if (rc != 0) 3023220873Snp return (rc); 3024220873Snp 3025228561Snp children = SYSCTL_CHILDREN(oid); 3026220873Snp 3027228561Snp snprintf(name, sizeof(name), "%d", idx); 3028228561Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3029228561Snp NULL, "rx queue"); 3030228561Snp children = SYSCTL_CHILDREN(oid); 3031228561Snp 3032228561Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "abs_id", 3033228561Snp CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.abs_id, 0, sysctl_uint16, 3034228561Snp "I", "absolute id of the queue"); 3035228561Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cntxt_id", 3036228561Snp CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cntxt_id, 0, sysctl_uint16, 3037228561Snp "I", "SGE context id of the queue"); 3038228561Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3039228561Snp CTLTYPE_INT | CTLFLAG_RD, &ofld_rxq->iq.cidx, 0, sysctl_uint16, "I", 3040228561Snp "consumer index"); 3041228561Snp 3042265425Snp add_fl_sysctls(&pi->ctx, oid, &ofld_rxq->fl); 3043228561Snp 3044228561Snp return (rc); 3045228561Snp} 3046228561Snp 3047228561Snpstatic int 3048228561Snpfree_ofld_rxq(struct port_info *pi, struct sge_ofld_rxq *ofld_rxq) 3049228561Snp{ 3050228561Snp int rc; 3051228561Snp 3052228561Snp rc = free_iq_fl(pi, &ofld_rxq->iq, &ofld_rxq->fl); 3053228561Snp if (rc == 0) 3054228561Snp bzero(ofld_rxq, sizeof(*ofld_rxq)); 3055228561Snp 3056228561Snp return (rc); 3057228561Snp} 3058228561Snp#endif 3059228561Snp 3060270297Snp#ifdef DEV_NETMAP 3061228561Snpstatic int 3062270297Snpalloc_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3063270297Snp int idx, struct sysctl_oid *oid) 3064270297Snp{ 3065270297Snp int rc; 3066270297Snp struct sysctl_oid_list *children; 3067270297Snp struct sysctl_ctx_list *ctx; 3068270297Snp char name[16]; 3069270297Snp size_t len; 3070270297Snp struct adapter *sc = pi->adapter; 3071270297Snp struct netmap_adapter *na = NA(pi->nm_ifp); 3072270297Snp 3073270297Snp MPASS(na != NULL); 3074270297Snp 3075270297Snp len = pi->qsize_rxq * IQ_ESIZE; 3076270297Snp rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3077270297Snp &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3078270297Snp if (rc != 0) 3079270297Snp return (rc); 3080270297Snp 3081270297Snp len = na->num_rx_desc * EQ_ESIZE + spg_len; 3082270297Snp rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3083270297Snp &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3084270297Snp if (rc != 0) 3085270297Snp return (rc); 3086270297Snp 3087270297Snp nm_rxq->pi = pi; 3088270297Snp nm_rxq->nid = idx; 3089270297Snp nm_rxq->iq_cidx = 0; 3090270297Snp nm_rxq->iq_sidx = pi->qsize_rxq - spg_len / IQ_ESIZE; 3091270297Snp nm_rxq->iq_gen = F_RSPD_GEN; 3092270297Snp nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3093270297Snp nm_rxq->fl_sidx = na->num_rx_desc; 3094270297Snp nm_rxq->intr_idx = intr_idx; 3095270297Snp 3096270297Snp ctx = &pi->ctx; 3097270297Snp children = SYSCTL_CHILDREN(oid); 3098270297Snp 3099270297Snp snprintf(name, sizeof(name), "%d", idx); 3100270297Snp oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 3101270297Snp "rx queue"); 3102270297Snp children = SYSCTL_CHILDREN(oid); 3103270297Snp 3104270297Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3105270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 3106270297Snp "I", "absolute id of the queue"); 3107270297Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3108270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 3109270297Snp "I", "SGE context id of the queue"); 3110270297Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3111270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 3112270297Snp "consumer index"); 3113270297Snp 3114270297Snp children = SYSCTL_CHILDREN(oid); 3115270297Snp oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3116270297Snp "freelist"); 3117270297Snp children = SYSCTL_CHILDREN(oid); 3118270297Snp 3119270297Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3120270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 3121270297Snp "I", "SGE context id of the freelist"); 3122270297Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3123270297Snp &nm_rxq->fl_cidx, 0, "consumer index"); 3124270297Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3125270297Snp &nm_rxq->fl_pidx, 0, "producer index"); 3126270297Snp 3127270297Snp return (rc); 3128270297Snp} 3129270297Snp 3130270297Snp 3131270297Snpstatic int 3132270297Snpfree_nm_rxq(struct port_info *pi, struct sge_nm_rxq *nm_rxq) 3133270297Snp{ 3134270297Snp struct adapter *sc = pi->adapter; 3135270297Snp 3136270297Snp free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3137270297Snp nm_rxq->iq_desc); 3138270297Snp free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3139270297Snp nm_rxq->fl_desc); 3140270297Snp 3141270297Snp return (0); 3142270297Snp} 3143270297Snp 3144270297Snpstatic int 3145270297Snpalloc_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3146270297Snp struct sysctl_oid *oid) 3147270297Snp{ 3148270297Snp int rc; 3149270297Snp size_t len; 3150270297Snp struct adapter *sc = pi->adapter; 3151270297Snp struct netmap_adapter *na = NA(pi->nm_ifp); 3152270297Snp char name[16]; 3153270297Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3154270297Snp 3155270297Snp len = na->num_tx_desc * EQ_ESIZE + spg_len; 3156270297Snp rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3157270297Snp &nm_txq->ba, (void **)&nm_txq->desc); 3158270297Snp if (rc) 3159270297Snp return (rc); 3160270297Snp 3161270297Snp nm_txq->pidx = nm_txq->cidx = 0; 3162270297Snp nm_txq->sidx = na->num_tx_desc; 3163270297Snp nm_txq->nid = idx; 3164270297Snp nm_txq->iqidx = iqidx; 3165270297Snp nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3166270297Snp V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 3167270297Snp 3168270297Snp snprintf(name, sizeof(name), "%d", idx); 3169270297Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3170270297Snp NULL, "netmap tx queue"); 3171270297Snp children = SYSCTL_CHILDREN(oid); 3172270297Snp 3173270297Snp SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3174270297Snp &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3175270297Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3176270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 3177270297Snp "consumer index"); 3178270297Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3179270297Snp CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 3180270297Snp "producer index"); 3181270297Snp 3182270297Snp return (rc); 3183270297Snp} 3184270297Snp 3185270297Snpstatic int 3186270297Snpfree_nm_txq(struct port_info *pi, struct sge_nm_txq *nm_txq) 3187270297Snp{ 3188270297Snp struct adapter *sc = pi->adapter; 3189270297Snp 3190270297Snp free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 3191270297Snp nm_txq->desc); 3192270297Snp 3193270297Snp return (0); 3194270297Snp} 3195270297Snp#endif 3196270297Snp 3197270297Snpstatic int 3198228561Snpctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 3199228561Snp{ 3200228561Snp int rc, cntxt_id; 3201228561Snp struct fw_eq_ctrl_cmd c; 3202284052Snp int qsize = eq->sidx + spg_len / EQ_ESIZE; 3203228561Snp 3204220873Snp bzero(&c, sizeof(c)); 3205220873Snp 3206220873Snp c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 3207220873Snp F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 3208220873Snp V_FW_EQ_CTRL_CMD_VFN(0)); 3209220873Snp c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 3210220873Snp F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 3211284052Snp c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 3212220873Snp c.physeqid_pkd = htobe32(0); 3213220873Snp c.fetchszm_to_iqid = 3214284052Snp htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3215228561Snp V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 3216222510Snp F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 3217220873Snp c.dcaen_to_eqsize = 3218220873Snp htobe32(V_FW_EQ_CTRL_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3219220873Snp V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3220284052Snp V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 3221220873Snp c.eqaddr = htobe64(eq->ba); 3222220873Snp 3223220873Snp rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3224220873Snp if (rc != 0) { 3225220873Snp device_printf(sc->dev, 3226228561Snp "failed to create control queue %d: %d\n", eq->tx_chan, rc); 3227220873Snp return (rc); 3228220873Snp } 3229228561Snp eq->flags |= EQ_ALLOCATED; 3230220873Snp 3231220873Snp eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 3232228561Snp cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3233228561Snp if (cntxt_id >= sc->sge.neq) 3234228561Snp panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3235228561Snp cntxt_id, sc->sge.neq - 1); 3236228561Snp sc->sge.eqmap[cntxt_id] = eq; 3237220873Snp 3238228561Snp return (rc); 3239228561Snp} 3240228561Snp 3241228561Snpstatic int 3242228561Snpeth_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3243228561Snp{ 3244228561Snp int rc, cntxt_id; 3245228561Snp struct fw_eq_eth_cmd c; 3246284052Snp int qsize = eq->sidx + spg_len / EQ_ESIZE; 3247228561Snp 3248228561Snp bzero(&c, sizeof(c)); 3249228561Snp 3250228561Snp c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 3251228561Snp F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 3252228561Snp V_FW_EQ_ETH_CMD_VFN(0)); 3253228561Snp c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 3254228561Snp F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 3255284052Snp c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 3256284052Snp F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid)); 3257228561Snp c.fetchszm_to_iqid = 3258284052Snp htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3259228561Snp V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 3260228561Snp V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 3261228561Snp c.dcaen_to_eqsize = htobe32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3262284052Snp V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3263284052Snp V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 3264228561Snp c.eqaddr = htobe64(eq->ba); 3265228561Snp 3266228561Snp rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3267228561Snp if (rc != 0) { 3268228561Snp device_printf(pi->dev, 3269228561Snp "failed to create Ethernet egress queue: %d\n", rc); 3270228561Snp return (rc); 3271228561Snp } 3272228561Snp eq->flags |= EQ_ALLOCATED; 3273228561Snp 3274228561Snp eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 3275220873Snp cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3276228561Snp if (cntxt_id >= sc->sge.neq) 3277228561Snp panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3278228561Snp cntxt_id, sc->sge.neq - 1); 3279220873Snp sc->sge.eqmap[cntxt_id] = eq; 3280220873Snp 3281228561Snp return (rc); 3282228561Snp} 3283220873Snp 3284237263Snp#ifdef TCP_OFFLOAD 3285228561Snpstatic int 3286228561Snpofld_eq_alloc(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3287228561Snp{ 3288228561Snp int rc, cntxt_id; 3289228561Snp struct fw_eq_ofld_cmd c; 3290284052Snp int qsize = eq->sidx + spg_len / EQ_ESIZE; 3291220873Snp 3292228561Snp bzero(&c, sizeof(c)); 3293220873Snp 3294228561Snp c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 3295228561Snp F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 3296228561Snp V_FW_EQ_OFLD_CMD_VFN(0)); 3297228561Snp c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 3298228561Snp F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 3299228561Snp c.fetchszm_to_iqid = 3300284052Snp htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 3301228561Snp V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 3302228561Snp F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 3303228561Snp c.dcaen_to_eqsize = 3304228561Snp htobe32(V_FW_EQ_OFLD_CMD_FBMIN(X_FETCHBURSTMIN_64B) | 3305228561Snp V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 3306284052Snp V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 3307228561Snp c.eqaddr = htobe64(eq->ba); 3308228561Snp 3309228561Snp rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3310228561Snp if (rc != 0) { 3311228561Snp device_printf(pi->dev, 3312228561Snp "failed to create egress queue for TCP offload: %d\n", rc); 3313228561Snp return (rc); 3314228561Snp } 3315228561Snp eq->flags |= EQ_ALLOCATED; 3316228561Snp 3317228561Snp eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 3318228561Snp cntxt_id = eq->cntxt_id - sc->sge.eq_start; 3319228561Snp if (cntxt_id >= sc->sge.neq) 3320228561Snp panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 3321228561Snp cntxt_id, sc->sge.neq - 1); 3322228561Snp sc->sge.eqmap[cntxt_id] = eq; 3323228561Snp 3324220873Snp return (rc); 3325220873Snp} 3326228561Snp#endif 3327220873Snp 3328220873Snpstatic int 3329228561Snpalloc_eq(struct adapter *sc, struct port_info *pi, struct sge_eq *eq) 3330220873Snp{ 3331284052Snp int rc, qsize; 3332228561Snp size_t len; 3333220873Snp 3334228561Snp mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 3335228561Snp 3336284052Snp qsize = eq->sidx + spg_len / EQ_ESIZE; 3337284052Snp len = qsize * EQ_ESIZE; 3338228561Snp rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 3339228561Snp &eq->ba, (void **)&eq->desc); 3340228561Snp if (rc) 3341228561Snp return (rc); 3342228561Snp 3343228561Snp eq->pidx = eq->cidx = 0; 3344284052Snp eq->equeqidx = eq->dbidx = 0; 3345248925Snp eq->doorbells = sc->doorbells; 3346228561Snp 3347228561Snp switch (eq->flags & EQ_TYPEMASK) { 3348228561Snp case EQ_CTRL: 3349228561Snp rc = ctrl_eq_alloc(sc, eq); 3350228561Snp break; 3351228561Snp 3352228561Snp case EQ_ETH: 3353228561Snp rc = eth_eq_alloc(sc, pi, eq); 3354228561Snp break; 3355228561Snp 3356237263Snp#ifdef TCP_OFFLOAD 3357228561Snp case EQ_OFLD: 3358228561Snp rc = ofld_eq_alloc(sc, pi, eq); 3359228561Snp break; 3360228561Snp#endif 3361228561Snp 3362228561Snp default: 3363228561Snp panic("%s: invalid eq type %d.", __func__, 3364228561Snp eq->flags & EQ_TYPEMASK); 3365228561Snp } 3366228561Snp if (rc != 0) { 3367228561Snp device_printf(sc->dev, 3368269082Snp "failed to allocate egress queue(%d): %d\n", 3369228561Snp eq->flags & EQ_TYPEMASK, rc); 3370228561Snp } 3371228561Snp 3372248925Snp if (isset(&eq->doorbells, DOORBELL_UDB) || 3373248925Snp isset(&eq->doorbells, DOORBELL_UDBWC) || 3374249392Snp isset(&eq->doorbells, DOORBELL_WCWR)) { 3375256794Snp uint32_t s_qpp = sc->sge.eq_s_qpp; 3376248925Snp uint32_t mask = (1 << s_qpp) - 1; 3377248925Snp volatile uint8_t *udb; 3378248925Snp 3379248925Snp udb = sc->udbs_base + UDBS_DB_OFFSET; 3380248925Snp udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 3381248925Snp eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 3382270297Snp if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 3383249392Snp clrbit(&eq->doorbells, DOORBELL_WCWR); 3384248925Snp else { 3385248925Snp udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 3386248925Snp eq->udb_qid = 0; 3387248925Snp } 3388248925Snp eq->udb = (volatile void *)udb; 3389248925Snp } 3390248925Snp 3391228561Snp return (rc); 3392228561Snp} 3393228561Snp 3394228561Snpstatic int 3395228561Snpfree_eq(struct adapter *sc, struct sge_eq *eq) 3396228561Snp{ 3397228561Snp int rc; 3398228561Snp 3399228561Snp if (eq->flags & EQ_ALLOCATED) { 3400228561Snp switch (eq->flags & EQ_TYPEMASK) { 3401228561Snp case EQ_CTRL: 3402228561Snp rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 3403228561Snp eq->cntxt_id); 3404228561Snp break; 3405228561Snp 3406228561Snp case EQ_ETH: 3407228561Snp rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 3408228561Snp eq->cntxt_id); 3409228561Snp break; 3410228561Snp 3411237263Snp#ifdef TCP_OFFLOAD 3412228561Snp case EQ_OFLD: 3413228561Snp rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 3414228561Snp eq->cntxt_id); 3415228561Snp break; 3416228561Snp#endif 3417228561Snp 3418228561Snp default: 3419228561Snp panic("%s: invalid eq type %d.", __func__, 3420228561Snp eq->flags & EQ_TYPEMASK); 3421228561Snp } 3422220873Snp if (rc != 0) { 3423220873Snp device_printf(sc->dev, 3424228561Snp "failed to free egress queue (%d): %d\n", 3425228561Snp eq->flags & EQ_TYPEMASK, rc); 3426220873Snp return (rc); 3427220873Snp } 3428228561Snp eq->flags &= ~EQ_ALLOCATED; 3429220873Snp } 3430220873Snp 3431220873Snp free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 3432220873Snp 3433220873Snp if (mtx_initialized(&eq->eq_lock)) 3434220873Snp mtx_destroy(&eq->eq_lock); 3435220873Snp 3436228561Snp bzero(eq, sizeof(*eq)); 3437220873Snp return (0); 3438220873Snp} 3439220873Snp 3440220873Snpstatic int 3441228561Snpalloc_wrq(struct adapter *sc, struct port_info *pi, struct sge_wrq *wrq, 3442228561Snp struct sysctl_oid *oid) 3443218792Snp{ 3444228561Snp int rc; 3445228561Snp struct sysctl_ctx_list *ctx = pi ? &pi->ctx : &sc->ctx; 3446228561Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3447228561Snp 3448228561Snp rc = alloc_eq(sc, pi, &wrq->eq); 3449228561Snp if (rc) 3450228561Snp return (rc); 3451228561Snp 3452228561Snp wrq->adapter = sc; 3453284052Snp TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 3454284052Snp TAILQ_INIT(&wrq->incomplete_wrs); 3455237263Snp STAILQ_INIT(&wrq->wr_list); 3456284052Snp wrq->nwr_pending = 0; 3457284052Snp wrq->ndesc_needed = 0; 3458228561Snp 3459228561Snp SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3460228561Snp &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 3461228561Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3462228561Snp CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 3463228561Snp "consumer index"); 3464228561Snp SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 3465228561Snp CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 3466228561Snp "producer index"); 3467284052Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 3468284052Snp &wrq->tx_wrs_direct, "# of work requests (direct)"); 3469284052Snp SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 3470284052Snp &wrq->tx_wrs_copied, "# of work requests (copied)"); 3471228561Snp 3472228561Snp return (rc); 3473228561Snp} 3474228561Snp 3475228561Snpstatic int 3476228561Snpfree_wrq(struct adapter *sc, struct sge_wrq *wrq) 3477228561Snp{ 3478228561Snp int rc; 3479228561Snp 3480228561Snp rc = free_eq(sc, &wrq->eq); 3481228561Snp if (rc) 3482228561Snp return (rc); 3483228561Snp 3484228561Snp bzero(wrq, sizeof(*wrq)); 3485228561Snp return (0); 3486228561Snp} 3487228561Snp 3488228561Snpstatic int 3489228561Snpalloc_txq(struct port_info *pi, struct sge_txq *txq, int idx, 3490228561Snp struct sysctl_oid *oid) 3491228561Snp{ 3492228561Snp int rc; 3493218792Snp struct adapter *sc = pi->adapter; 3494218792Snp struct sge_eq *eq = &txq->eq; 3495218792Snp char name[16]; 3496228561Snp struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3497218792Snp 3498284052Snp rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 3499284052Snp M_CXGBE, M_WAITOK); 3500218792Snp if (rc != 0) { 3501284052Snp device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 3502218792Snp return (rc); 3503218792Snp } 3504218792Snp 3505284052Snp rc = alloc_eq(sc, pi, eq); 3506218792Snp if (rc != 0) { 3507284052Snp mp_ring_free(txq->r); 3508284052Snp txq->r = NULL; 3509218792Snp return (rc); 3510218792Snp } 3511218792Snp 3512284052Snp /* Can't fail after this point. */ 3513284052Snp 3514284052Snp TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 3515284052Snp txq->ifp = pi->ifp; 3516284052Snp txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 3517284052Snp txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3518284052Snp V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf)); 3519284052Snp txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 3520284052Snp M_ZERO | M_WAITOK); 3521284052Snp 3522218792Snp snprintf(name, sizeof(name), "%d", idx); 3523218792Snp oid = SYSCTL_ADD_NODE(&pi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3524218792Snp NULL, "tx queue"); 3525218792Snp children = SYSCTL_CHILDREN(oid); 3526218792Snp 3527222973Snp SYSCTL_ADD_UINT(&pi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3528222973Snp &eq->cntxt_id, 0, "SGE context id of the queue"); 3529222973Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "cidx", 3530222973Snp CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 3531222973Snp "consumer index"); 3532222973Snp SYSCTL_ADD_PROC(&pi->ctx, children, OID_AUTO, "pidx", 3533222973Snp CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 3534222973Snp "producer index"); 3535222973Snp 3536218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 3537218792Snp &txq->txcsum, "# of times hardware assisted with checksum"); 3538218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "vlan_insertion", 3539218792Snp CTLFLAG_RD, &txq->vlan_insertion, 3540218792Snp "# of times hardware inserted 802.1Q tag"); 3541218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 3542237819Snp &txq->tso_wrs, "# of TSO work requests"); 3543218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 3544218792Snp &txq->imm_wrs, "# of work requests with immediate data"); 3545218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 3546218792Snp &txq->sgl_wrs, "# of work requests with direct SGL"); 3547218792Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 3548218792Snp &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 3549284052Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_wrs", 3550284052Snp CTLFLAG_RD, &txq->txpkts0_wrs, 3551284052Snp "# of txpkts (type 0) work requests"); 3552284052Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_wrs", 3553284052Snp CTLFLAG_RD, &txq->txpkts1_wrs, 3554284052Snp "# of txpkts (type 1) work requests"); 3555284052Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts0_pkts", 3556284052Snp CTLFLAG_RD, &txq->txpkts0_pkts, 3557284052Snp "# of frames tx'd using type0 txpkts work requests"); 3558284052Snp SYSCTL_ADD_UQUAD(&pi->ctx, children, OID_AUTO, "txpkts1_pkts", 3559284052Snp CTLFLAG_RD, &txq->txpkts1_pkts, 3560284052Snp "# of frames tx'd using type1 txpkts work requests"); 3561218792Snp 3562284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_enqueues", 3563284052Snp CTLFLAG_RD, &txq->r->enqueues, 3564284052Snp "# of enqueues to the mp_ring for this queue"); 3565284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_drops", 3566284052Snp CTLFLAG_RD, &txq->r->drops, 3567284052Snp "# of drops in the mp_ring for this queue"); 3568284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_starts", 3569284052Snp CTLFLAG_RD, &txq->r->starts, 3570284052Snp "# of normal consumer starts in the mp_ring for this queue"); 3571284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_stalls", 3572284052Snp CTLFLAG_RD, &txq->r->stalls, 3573284052Snp "# of consumer stalls in the mp_ring for this queue"); 3574284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_restarts", 3575284052Snp CTLFLAG_RD, &txq->r->restarts, 3576284052Snp "# of consumer restarts in the mp_ring for this queue"); 3577284052Snp SYSCTL_ADD_COUNTER_U64(&pi->ctx, children, OID_AUTO, "r_abdications", 3578284052Snp CTLFLAG_RD, &txq->r->abdications, 3579284052Snp "# of consumer abdications in the mp_ring for this queue"); 3580218792Snp 3581284052Snp return (0); 3582218792Snp} 3583218792Snp 3584218792Snpstatic int 3585218792Snpfree_txq(struct port_info *pi, struct sge_txq *txq) 3586218792Snp{ 3587218792Snp int rc; 3588218792Snp struct adapter *sc = pi->adapter; 3589218792Snp struct sge_eq *eq = &txq->eq; 3590218792Snp 3591228561Snp rc = free_eq(sc, eq); 3592228561Snp if (rc) 3593228561Snp return (rc); 3594220649Snp 3595284052Snp sglist_free(txq->gl); 3596220873Snp free(txq->sdesc, M_CXGBE); 3597284052Snp mp_ring_free(txq->r); 3598218792Snp 3599218792Snp bzero(txq, sizeof(*txq)); 3600218792Snp return (0); 3601218792Snp} 3602218792Snp 3603218792Snpstatic void 3604218792Snponeseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 3605218792Snp{ 3606218792Snp bus_addr_t *ba = arg; 3607218792Snp 3608218792Snp KASSERT(nseg == 1, 3609218792Snp ("%s meant for single segment mappings only.", __func__)); 3610218792Snp 3611218792Snp *ba = error ? 0 : segs->ds_addr; 3612218792Snp} 3613218792Snp 3614218792Snpstatic inline void 3615218792Snpring_fl_db(struct adapter *sc, struct sge_fl *fl) 3616218792Snp{ 3617270297Snp uint32_t n, v; 3618218792Snp 3619270297Snp n = IDXDIFF(fl->pidx / 8, fl->dbidx, fl->sidx); 3620270297Snp MPASS(n > 0); 3621218792Snp 3622218792Snp wmb(); 3623270297Snp v = fl->dbval | V_PIDX(n); 3624270297Snp if (fl->udb) 3625270297Snp *fl->udb = htole32(v); 3626270297Snp else 3627270297Snp t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v); 3628270297Snp IDXINCR(fl->dbidx, n, fl->sidx); 3629218792Snp} 3630218792Snp 3631220905Snp/* 3632270297Snp * Fills up the freelist by allocating upto 'n' buffers. Buffers that are 3633270297Snp * recycled do not count towards this allocation budget. 3634228561Snp * 3635270297Snp * Returns non-zero to indicate that this freelist should be added to the list 3636270297Snp * of starving freelists. 3637220905Snp */ 3638228561Snpstatic int 3639270297Snprefill_fl(struct adapter *sc, struct sge_fl *fl, int n) 3640218792Snp{ 3641270297Snp __be64 *d; 3642270297Snp struct fl_sdesc *sd; 3643265425Snp uintptr_t pa; 3644218792Snp caddr_t cl; 3645270297Snp struct cluster_layout *cll; 3646270297Snp struct sw_zone_info *swz; 3647265425Snp struct cluster_metadata *clm; 3648270297Snp uint16_t max_pidx; 3649270297Snp uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 3650218792Snp 3651218792Snp FL_LOCK_ASSERT_OWNED(fl); 3652218792Snp 3653270297Snp /* 3654270297Snp * We always stop at the begining of the hardware descriptor that's just 3655270297Snp * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 3656270297Snp * which would mean an empty freelist to the chip. 3657270297Snp */ 3658270297Snp max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 3659270297Snp if (fl->pidx == max_pidx * 8) 3660270297Snp return (0); 3661218792Snp 3662270297Snp d = &fl->desc[fl->pidx]; 3663270297Snp sd = &fl->sdesc[fl->pidx]; 3664270297Snp cll = &fl->cll_def; /* default layout */ 3665270297Snp swz = &sc->sge.sw_zone_info[cll->zidx]; 3666218792Snp 3667270297Snp while (n > 0) { 3668270297Snp 3669218792Snp if (sd->cl != NULL) { 3670218792Snp 3671269356Snp if (sd->nmbuf == 0) { 3672255050Snp /* 3673265425Snp * Fast recycle without involving any atomics on 3674265425Snp * the cluster's metadata (if the cluster has 3675265425Snp * metadata). This happens when all frames 3676265425Snp * received in the cluster were small enough to 3677265425Snp * fit within a single mbuf each. 3678255050Snp */ 3679265425Snp fl->cl_fast_recycled++; 3680267694Snp#ifdef INVARIANTS 3681267694Snp clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3682267694Snp if (clm != NULL) 3683267694Snp MPASS(clm->refcount == 1); 3684267694Snp#endif 3685265425Snp goto recycled_fast; 3686255050Snp } 3687218792Snp 3688218792Snp /* 3689265425Snp * Cluster is guaranteed to have metadata. Clusters 3690265425Snp * without metadata always take the fast recycle path 3691265425Snp * when they're recycled. 3692218792Snp */ 3693265425Snp clm = cl_metadata(sc, fl, &sd->cll, sd->cl); 3694265425Snp MPASS(clm != NULL); 3695265425Snp 3696265425Snp if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3697265425Snp fl->cl_recycled++; 3698269356Snp counter_u64_add(extfree_rels, 1); 3699265425Snp goto recycled; 3700218792Snp } 3701265425Snp sd->cl = NULL; /* gave up my reference */ 3702218792Snp } 3703265425Snp MPASS(sd->cl == NULL); 3704265425Snpalloc: 3705265425Snp cl = uma_zalloc(swz->zone, M_NOWAIT); 3706265425Snp if (__predict_false(cl == NULL)) { 3707265425Snp if (cll == &fl->cll_alt || fl->cll_alt.zidx == -1 || 3708265425Snp fl->cll_def.zidx == fl->cll_alt.zidx) 3709265425Snp break; 3710218792Snp 3711265425Snp /* fall back to the safe zone */ 3712265425Snp cll = &fl->cll_alt; 3713265425Snp swz = &sc->sge.sw_zone_info[cll->zidx]; 3714265425Snp goto alloc; 3715255050Snp } 3716265425Snp fl->cl_allocated++; 3717270297Snp n--; 3718218792Snp 3719265425Snp pa = pmap_kextract((vm_offset_t)cl); 3720265425Snp pa += cll->region1; 3721218792Snp sd->cl = cl; 3722265425Snp sd->cll = *cll; 3723265425Snp *d = htobe64(pa | cll->hwidx); 3724265425Snp clm = cl_metadata(sc, fl, cll, cl); 3725265425Snp if (clm != NULL) { 3726265425Snprecycled: 3727218792Snp#ifdef INVARIANTS 3728265425Snp clm->sd = sd; 3729218792Snp#endif 3730265425Snp clm->refcount = 1; 3731265425Snp } 3732269356Snp sd->nmbuf = 0; 3733265425Snprecycled_fast: 3734265425Snp d++; 3735218792Snp sd++; 3736270297Snp if (__predict_false(++fl->pidx % 8 == 0)) { 3737270297Snp uint16_t pidx = fl->pidx / 8; 3738270297Snp 3739270297Snp if (__predict_false(pidx == fl->sidx)) { 3740270297Snp fl->pidx = 0; 3741270297Snp pidx = 0; 3742270297Snp sd = fl->sdesc; 3743270297Snp d = fl->desc; 3744270297Snp } 3745270297Snp if (pidx == max_pidx) 3746270297Snp break; 3747270297Snp 3748270297Snp if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 3749270297Snp ring_fl_db(sc, fl); 3750218792Snp } 3751218792Snp } 3752220905Snp 3753270297Snp if (fl->pidx / 8 != fl->dbidx) 3754220905Snp ring_fl_db(sc, fl); 3755228561Snp 3756228561Snp return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 3757218792Snp} 3758218792Snp 3759228561Snp/* 3760228561Snp * Attempt to refill all starving freelists. 3761228561Snp */ 3762228561Snpstatic void 3763228561Snprefill_sfl(void *arg) 3764228561Snp{ 3765228561Snp struct adapter *sc = arg; 3766228561Snp struct sge_fl *fl, *fl_temp; 3767228561Snp 3768228561Snp mtx_lock(&sc->sfl_lock); 3769228561Snp TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 3770228561Snp FL_LOCK(fl); 3771228561Snp refill_fl(sc, fl, 64); 3772228561Snp if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 3773228561Snp TAILQ_REMOVE(&sc->sfl, fl, link); 3774228561Snp fl->flags &= ~FL_STARVING; 3775228561Snp } 3776228561Snp FL_UNLOCK(fl); 3777228561Snp } 3778228561Snp 3779228561Snp if (!TAILQ_EMPTY(&sc->sfl)) 3780228561Snp callout_schedule(&sc->sfl_callout, hz / 5); 3781228561Snp mtx_unlock(&sc->sfl_lock); 3782228561Snp} 3783228561Snp 3784218792Snpstatic int 3785218792Snpalloc_fl_sdesc(struct sge_fl *fl) 3786218792Snp{ 3787218792Snp 3788270297Snp fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 3789218792Snp M_ZERO | M_WAITOK); 3790218792Snp 3791218792Snp return (0); 3792218792Snp} 3793218792Snp 3794218792Snpstatic void 3795255050Snpfree_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 3796218792Snp{ 3797218792Snp struct fl_sdesc *sd; 3798265425Snp struct cluster_metadata *clm; 3799265425Snp struct cluster_layout *cll; 3800218792Snp int i; 3801218792Snp 3802218792Snp sd = fl->sdesc; 3803270297Snp for (i = 0; i < fl->sidx * 8; i++, sd++) { 3804265425Snp if (sd->cl == NULL) 3805265425Snp continue; 3806218792Snp 3807265425Snp cll = &sd->cll; 3808265425Snp clm = cl_metadata(sc, fl, cll, sd->cl); 3809269356Snp if (sd->nmbuf == 0) 3810265425Snp uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3811269356Snp else if (clm && atomic_fetchadd_int(&clm->refcount, -1) == 1) { 3812269356Snp uma_zfree(sc->sge.sw_zone_info[cll->zidx].zone, sd->cl); 3813269356Snp counter_u64_add(extfree_rels, 1); 3814218792Snp } 3815265425Snp sd->cl = NULL; 3816218792Snp } 3817218792Snp 3818218792Snp free(fl->sdesc, M_CXGBE); 3819218792Snp fl->sdesc = NULL; 3820218792Snp} 3821218792Snp 3822284052Snpstatic inline void 3823284052Snpget_pkt_gl(struct mbuf *m, struct sglist *gl) 3824218792Snp{ 3825284052Snp int rc; 3826218792Snp 3827284052Snp M_ASSERTPKTHDR(m); 3828218792Snp 3829284052Snp sglist_reset(gl); 3830284052Snp rc = sglist_append_mbuf(gl, m); 3831284052Snp if (__predict_false(rc != 0)) { 3832284052Snp panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 3833284052Snp "with %d.", __func__, m, mbuf_nsegs(m), rc); 3834218792Snp } 3835218792Snp 3836284052Snp KASSERT(gl->sg_nseg == mbuf_nsegs(m), 3837284052Snp ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 3838284052Snp mbuf_nsegs(m), gl->sg_nseg)); 3839284052Snp KASSERT(gl->sg_nseg > 0 && 3840284052Snp gl->sg_nseg <= (needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS), 3841284052Snp ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 3842284052Snp gl->sg_nseg, needs_tso(m) ? TX_SGL_SEGS_TSO : TX_SGL_SEGS)); 3843218792Snp} 3844218792Snp 3845284052Snp/* 3846284052Snp * len16 for a txpkt WR with a GL. Includes the firmware work request header. 3847284052Snp */ 3848284052Snpstatic inline u_int 3849284052Snptxpkt_len16(u_int nsegs, u_int tso) 3850218792Snp{ 3851284052Snp u_int n; 3852218792Snp 3853284052Snp MPASS(nsegs > 0); 3854218792Snp 3855284052Snp nsegs--; /* first segment is part of ulptx_sgl */ 3856284052Snp n = sizeof(struct fw_eth_tx_pkt_wr) + sizeof(struct cpl_tx_pkt_core) + 3857284052Snp sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 3858284052Snp if (tso) 3859284052Snp n += sizeof(struct cpl_tx_pkt_lso_core); 3860218792Snp 3861284052Snp return (howmany(n, 16)); 3862218792Snp} 3863218792Snp 3864218792Snp/* 3865284052Snp * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 3866284052Snp * request header. 3867218792Snp */ 3868284052Snpstatic inline u_int 3869284052Snptxpkts0_len16(u_int nsegs) 3870218792Snp{ 3871284052Snp u_int n; 3872218792Snp 3873284052Snp MPASS(nsegs > 0); 3874218792Snp 3875284052Snp nsegs--; /* first segment is part of ulptx_sgl */ 3876284052Snp n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 3877284052Snp sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 3878284052Snp 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 3879218792Snp 3880284052Snp return (howmany(n, 16)); 3881218792Snp} 3882218792Snp 3883218792Snp/* 3884284052Snp * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 3885284052Snp * request header. 3886218792Snp */ 3887284052Snpstatic inline u_int 3888284052Snptxpkts1_len16(void) 3889218792Snp{ 3890284052Snp u_int n; 3891218792Snp 3892284052Snp n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 3893218792Snp 3894284052Snp return (howmany(n, 16)); 3895284052Snp} 3896218792Snp 3897284052Snpstatic inline u_int 3898284052Snpimm_payload(u_int ndesc) 3899284052Snp{ 3900284052Snp u_int n; 3901228561Snp 3902284052Snp n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 3903284052Snp sizeof(struct cpl_tx_pkt_core); 3904218792Snp 3905284052Snp return (n); 3906218792Snp} 3907218792Snp 3908284052Snp/* 3909284052Snp * Write a txpkt WR for this packet to the hardware descriptors, update the 3910284052Snp * software descriptor, and advance the pidx. It is guaranteed that enough 3911284052Snp * descriptors are available. 3912284052Snp * 3913284052Snp * The return value is the # of hardware descriptors used. 3914284052Snp */ 3915284052Snpstatic u_int 3916284052Snpwrite_txpkt_wr(struct sge_txq *txq, struct fw_eth_tx_pkt_wr *wr, 3917284052Snp struct mbuf *m0, u_int available) 3918218792Snp{ 3919218792Snp struct sge_eq *eq = &txq->eq; 3920284052Snp struct tx_sdesc *txsd; 3921218792Snp struct cpl_tx_pkt_core *cpl; 3922218792Snp uint32_t ctrl; /* used in many unrelated places */ 3923218792Snp uint64_t ctrl1; 3924284052Snp int len16, ndesc, pktlen, nsegs; 3925218792Snp caddr_t dst; 3926218792Snp 3927218792Snp TXQ_LOCK_ASSERT_OWNED(txq); 3928284052Snp M_ASSERTPKTHDR(m0); 3929284052Snp MPASS(available > 0 && available < eq->sidx); 3930218792Snp 3931284052Snp len16 = mbuf_len16(m0); 3932284052Snp nsegs = mbuf_nsegs(m0); 3933284052Snp pktlen = m0->m_pkthdr.len; 3934218792Snp ctrl = sizeof(struct cpl_tx_pkt_core); 3935284052Snp if (needs_tso(m0)) 3936237436Snp ctrl += sizeof(struct cpl_tx_pkt_lso_core); 3937284052Snp else if (pktlen <= imm_payload(2) && available >= 2) { 3938284052Snp /* Immediate data. Recalculate len16 and set nsegs to 0. */ 3939219286Snp ctrl += pktlen; 3940284052Snp len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 3941284052Snp sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 3942284052Snp nsegs = 0; 3943218792Snp } 3944284052Snp ndesc = howmany(len16, EQ_ESIZE / 16); 3945284052Snp MPASS(ndesc <= available); 3946218792Snp 3947218792Snp /* Firmware work request header */ 3948284052Snp MPASS(wr == (void *)&eq->desc[eq->pidx]); 3949218792Snp wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 3950228561Snp V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 3951220643Snp 3952284052Snp ctrl = V_FW_WR_LEN16(len16); 3953218792Snp wr->equiq_to_len16 = htobe32(ctrl); 3954218792Snp wr->r3 = 0; 3955218792Snp 3956284052Snp if (needs_tso(m0)) { 3957237436Snp struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 3958218792Snp 3959284052Snp KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 3960284052Snp m0->m_pkthdr.l4hlen > 0, 3961284052Snp ("%s: mbuf %p needs TSO but missing header lengths", 3962284052Snp __func__, m0)); 3963284052Snp 3964218792Snp ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | F_LSO_FIRST_SLICE | 3965284052Snp F_LSO_LAST_SLICE | V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) 3966284052Snp | V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 3967284052Snp if (m0->m_pkthdr.l2hlen == sizeof(struct ether_vlan_header)) 3968218792Snp ctrl |= V_LSO_ETHHDR_LEN(1); 3969284052Snp if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 3970237819Snp ctrl |= F_LSO_IPV6; 3971237819Snp 3972218792Snp lso->lso_ctrl = htobe32(ctrl); 3973218792Snp lso->ipid_ofst = htobe16(0); 3974284052Snp lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 3975218792Snp lso->seqno_offset = htobe32(0); 3976219286Snp lso->len = htobe32(pktlen); 3977218792Snp 3978218792Snp cpl = (void *)(lso + 1); 3979218792Snp 3980218792Snp txq->tso_wrs++; 3981218792Snp } else 3982218792Snp cpl = (void *)(wr + 1); 3983218792Snp 3984218792Snp /* Checksum offload */ 3985218792Snp ctrl1 = 0; 3986284052Snp if (needs_l3_csum(m0) == 0) 3987218792Snp ctrl1 |= F_TXPKT_IPCSUM_DIS; 3988284052Snp if (needs_l4_csum(m0) == 0) 3989218792Snp ctrl1 |= F_TXPKT_L4CSUM_DIS; 3990284052Snp if (m0->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 3991247062Snp CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 3992218792Snp txq->txcsum++; /* some hardware assistance provided */ 3993218792Snp 3994218792Snp /* VLAN tag insertion */ 3995284052Snp if (needs_vlan_insertion(m0)) { 3996284052Snp ctrl1 |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 3997218792Snp txq->vlan_insertion++; 3998218792Snp } 3999218792Snp 4000218792Snp /* CPL header */ 4001284052Snp cpl->ctrl0 = txq->cpl_ctrl0; 4002218792Snp cpl->pack = 0; 4003219286Snp cpl->len = htobe16(pktlen); 4004218792Snp cpl->ctrl1 = htobe64(ctrl1); 4005218792Snp 4006218792Snp /* SGL */ 4007218792Snp dst = (void *)(cpl + 1); 4008284052Snp if (nsegs > 0) { 4009284052Snp 4010284052Snp write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 4011218792Snp txq->sgl_wrs++; 4012218792Snp } else { 4013284052Snp struct mbuf *m; 4014284052Snp 4015284052Snp for (m = m0; m != NULL; m = m->m_next) { 4016218792Snp copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 4017219286Snp#ifdef INVARIANTS 4018219286Snp pktlen -= m->m_len; 4019219286Snp#endif 4020218792Snp } 4021219286Snp#ifdef INVARIANTS 4022219286Snp KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 4023219286Snp#endif 4024284052Snp txq->imm_wrs++; 4025218792Snp } 4026218792Snp 4027218792Snp txq->txpkt_wrs++; 4028284052Snp 4029284052Snp txsd = &txq->sdesc[eq->pidx]; 4030284052Snp txsd->m = m0; 4031284052Snp txsd->desc_used = ndesc; 4032284052Snp 4033284052Snp return (ndesc); 4034218792Snp} 4035218792Snp 4036218792Snpstatic int 4037284052Snptry_txpkts(struct mbuf *m, struct mbuf *n, struct txpkts *txp, u_int available) 4038218792Snp{ 4039284052Snp u_int needed, nsegs1, nsegs2, l1, l2; 4040218792Snp 4041284052Snp if (cannot_use_txpkts(m) || cannot_use_txpkts(n)) 4042284052Snp return (1); 4043218792Snp 4044284052Snp nsegs1 = mbuf_nsegs(m); 4045284052Snp nsegs2 = mbuf_nsegs(n); 4046284052Snp if (nsegs1 + nsegs2 == 2) { 4047284052Snp txp->wr_type = 1; 4048284052Snp l1 = l2 = txpkts1_len16(); 4049284052Snp } else { 4050284052Snp txp->wr_type = 0; 4051284052Snp l1 = txpkts0_len16(nsegs1); 4052284052Snp l2 = txpkts0_len16(nsegs2); 4053284052Snp } 4054284052Snp txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + l1 + l2; 4055284052Snp needed = howmany(txp->len16, EQ_ESIZE / 16); 4056284052Snp if (needed > SGE_MAX_WR_NDESC || needed > available) 4057284052Snp return (1); 4058228561Snp 4059284052Snp txp->plen = m->m_pkthdr.len + n->m_pkthdr.len; 4060284052Snp if (txp->plen > 65535) 4061284052Snp return (1); 4062218792Snp 4063284052Snp txp->npkt = 2; 4064284052Snp set_mbuf_len16(m, l1); 4065284052Snp set_mbuf_len16(n, l2); 4066218792Snp 4067284052Snp return (0); 4068284052Snp} 4069218792Snp 4070284052Snpstatic int 4071284052Snpadd_to_txpkts(struct mbuf *m, struct txpkts *txp, u_int available) 4072284052Snp{ 4073284052Snp u_int plen, len16, needed, nsegs; 4074218792Snp 4075284052Snp MPASS(txp->wr_type == 0 || txp->wr_type == 1); 4076218792Snp 4077284052Snp nsegs = mbuf_nsegs(m); 4078284052Snp if (needs_tso(m) || (txp->wr_type == 1 && nsegs != 1)) 4079284052Snp return (1); 4080218792Snp 4081284052Snp plen = txp->plen + m->m_pkthdr.len; 4082284052Snp if (plen > 65535) 4083284052Snp return (1); 4084218792Snp 4085284052Snp if (txp->wr_type == 0) 4086284052Snp len16 = txpkts0_len16(nsegs); 4087284052Snp else 4088284052Snp len16 = txpkts1_len16(); 4089284052Snp needed = howmany(txp->len16 + len16, EQ_ESIZE / 16); 4090284052Snp if (needed > SGE_MAX_WR_NDESC || needed > available) 4091284052Snp return (1); 4092218792Snp 4093284052Snp txp->npkt++; 4094284052Snp txp->plen = plen; 4095284052Snp txp->len16 += len16; 4096284052Snp set_mbuf_len16(m, len16); 4097218792Snp 4098218792Snp return (0); 4099218792Snp} 4100218792Snp 4101218792Snp/* 4102284052Snp * Write a txpkts WR for the packets in txp to the hardware descriptors, update 4103284052Snp * the software descriptor, and advance the pidx. It is guaranteed that enough 4104284052Snp * descriptors are available. 4105284052Snp * 4106284052Snp * The return value is the # of hardware descriptors used. 4107218792Snp */ 4108284052Snpstatic u_int 4109284052Snpwrite_txpkts_wr(struct sge_txq *txq, struct fw_eth_tx_pkts_wr *wr, 4110284052Snp struct mbuf *m0, const struct txpkts *txp, u_int available) 4111218792Snp{ 4112218792Snp struct sge_eq *eq = &txq->eq; 4113218792Snp struct tx_sdesc *txsd; 4114284052Snp struct cpl_tx_pkt_core *cpl; 4115218792Snp uint32_t ctrl; 4116284052Snp uint64_t ctrl1; 4117284052Snp int ndesc, checkwrap; 4118284052Snp struct mbuf *m; 4119284052Snp void *flitp; 4120218792Snp 4121218792Snp TXQ_LOCK_ASSERT_OWNED(txq); 4122284052Snp MPASS(txp->npkt > 0); 4123284052Snp MPASS(txp->plen < 65536); 4124284052Snp MPASS(m0 != NULL); 4125284052Snp MPASS(m0->m_nextpkt != NULL); 4126284052Snp MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 4127284052Snp MPASS(available > 0 && available < eq->sidx); 4128218792Snp 4129284052Snp ndesc = howmany(txp->len16, EQ_ESIZE / 16); 4130284052Snp MPASS(ndesc <= available); 4131218792Snp 4132284052Snp MPASS(wr == (void *)&eq->desc[eq->pidx]); 4133228561Snp wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 4134284052Snp ctrl = V_FW_WR_LEN16(txp->len16); 4135218792Snp wr->equiq_to_len16 = htobe32(ctrl); 4136284052Snp wr->plen = htobe16(txp->plen); 4137284052Snp wr->npkt = txp->npkt; 4138284052Snp wr->r3 = 0; 4139284052Snp wr->type = txp->wr_type; 4140284052Snp flitp = wr + 1; 4141218792Snp 4142284052Snp /* 4143284052Snp * At this point we are 16B into a hardware descriptor. If checkwrap is 4144284052Snp * set then we know the WR is going to wrap around somewhere. We'll 4145284052Snp * check for that at appropriate points. 4146284052Snp */ 4147284052Snp checkwrap = eq->sidx - ndesc < eq->pidx; 4148284052Snp for (m = m0; m != NULL; m = m->m_nextpkt) { 4149284052Snp if (txp->wr_type == 0) { 4150284052Snp struct ulp_txpkt *ulpmc; 4151284052Snp struct ulptx_idata *ulpsc; 4152218792Snp 4153284052Snp /* ULP master command */ 4154284052Snp ulpmc = flitp; 4155284052Snp ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 4156284052Snp V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 4157284052Snp ulpmc->len = htobe32(mbuf_len16(m)); 4158218792Snp 4159284052Snp /* ULP subcommand */ 4160284052Snp ulpsc = (void *)(ulpmc + 1); 4161284052Snp ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 4162284052Snp F_ULP_TX_SC_MORE); 4163284052Snp ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 4164218792Snp 4165284052Snp cpl = (void *)(ulpsc + 1); 4166284052Snp if (checkwrap && 4167284052Snp (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 4168284052Snp cpl = (void *)&eq->desc[0]; 4169284052Snp txq->txpkts0_pkts += txp->npkt; 4170284052Snp txq->txpkts0_wrs++; 4171284052Snp } else { 4172284052Snp cpl = flitp; 4173284052Snp txq->txpkts1_pkts += txp->npkt; 4174284052Snp txq->txpkts1_wrs++; 4175284052Snp } 4176218792Snp 4177284052Snp /* Checksum offload */ 4178284052Snp ctrl1 = 0; 4179284052Snp if (needs_l3_csum(m) == 0) 4180284052Snp ctrl1 |= F_TXPKT_IPCSUM_DIS; 4181284052Snp if (needs_l4_csum(m) == 0) 4182284052Snp ctrl1 |= F_TXPKT_L4CSUM_DIS; 4183284052Snp if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | 4184284052Snp CSUM_UDP_IPV6 | CSUM_TCP_IPV6 | CSUM_TSO)) 4185284052Snp txq->txcsum++; /* some hardware assistance provided */ 4186218792Snp 4187284052Snp /* VLAN tag insertion */ 4188284052Snp if (needs_vlan_insertion(m)) { 4189284052Snp ctrl1 |= F_TXPKT_VLAN_VLD | 4190284052Snp V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 4191284052Snp txq->vlan_insertion++; 4192284052Snp } 4193218792Snp 4194284052Snp /* CPL header */ 4195284052Snp cpl->ctrl0 = txq->cpl_ctrl0; 4196284052Snp cpl->pack = 0; 4197284052Snp cpl->len = htobe16(m->m_pkthdr.len); 4198284052Snp cpl->ctrl1 = htobe64(ctrl1); 4199218792Snp 4200284052Snp flitp = cpl + 1; 4201284052Snp if (checkwrap && 4202284052Snp (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 4203284052Snp flitp = (void *)&eq->desc[0]; 4204218792Snp 4205284052Snp write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 4206218792Snp 4207218792Snp } 4208218792Snp 4209284052Snp txsd = &txq->sdesc[eq->pidx]; 4210284052Snp txsd->m = m0; 4211284052Snp txsd->desc_used = ndesc; 4212218792Snp 4213284052Snp return (ndesc); 4214218792Snp} 4215218792Snp 4216218792Snp/* 4217218792Snp * If the SGL ends on an address that is not 16 byte aligned, this function will 4218284052Snp * add a 0 filled flit at the end. 4219218792Snp */ 4220284052Snpstatic void 4221284052Snpwrite_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 4222218792Snp{ 4223284052Snp struct sge_eq *eq = &txq->eq; 4224284052Snp struct sglist *gl = txq->gl; 4225284052Snp struct sglist_seg *seg; 4226284052Snp __be64 *flitp, *wrap; 4227218792Snp struct ulptx_sgl *usgl; 4228284052Snp int i, nflits, nsegs; 4229218792Snp 4230218792Snp KASSERT(((uintptr_t)(*to) & 0xf) == 0, 4231218792Snp ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 4232284052Snp MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4233284052Snp MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4234218792Snp 4235284052Snp get_pkt_gl(m, gl); 4236284052Snp nsegs = gl->sg_nseg; 4237284052Snp MPASS(nsegs > 0); 4238284052Snp 4239284052Snp nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 4240218792Snp flitp = (__be64 *)(*to); 4241284052Snp wrap = (__be64 *)(&eq->desc[eq->sidx]); 4242284052Snp seg = &gl->sg_segs[0]; 4243218792Snp usgl = (void *)flitp; 4244218792Snp 4245218792Snp /* 4246218792Snp * We start at a 16 byte boundary somewhere inside the tx descriptor 4247218792Snp * ring, so we're at least 16 bytes away from the status page. There is 4248218792Snp * no chance of a wrap around in the middle of usgl (which is 16 bytes). 4249218792Snp */ 4250218792Snp 4251218792Snp usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 4252284052Snp V_ULPTX_NSGE(nsegs)); 4253284052Snp usgl->len0 = htobe32(seg->ss_len); 4254284052Snp usgl->addr0 = htobe64(seg->ss_paddr); 4255218792Snp seg++; 4256218792Snp 4257284052Snp if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 4258218792Snp 4259218792Snp /* Won't wrap around at all */ 4260218792Snp 4261284052Snp for (i = 0; i < nsegs - 1; i++, seg++) { 4262284052Snp usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 4263284052Snp usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 4264218792Snp } 4265218792Snp if (i & 1) 4266218792Snp usgl->sge[i / 2].len[1] = htobe32(0); 4267284052Snp flitp += nflits; 4268218792Snp } else { 4269218792Snp 4270218792Snp /* Will wrap somewhere in the rest of the SGL */ 4271218792Snp 4272218792Snp /* 2 flits already written, write the rest flit by flit */ 4273218792Snp flitp = (void *)(usgl + 1); 4274284052Snp for (i = 0; i < nflits - 2; i++) { 4275284052Snp if (flitp == wrap) 4276218792Snp flitp = (void *)eq->desc; 4277284052Snp *flitp++ = get_flit(seg, nsegs - 1, i); 4278218792Snp } 4279218792Snp } 4280218792Snp 4281284052Snp if (nflits & 1) { 4282284052Snp MPASS(((uintptr_t)flitp) & 0xf); 4283284052Snp *flitp++ = 0; 4284284052Snp } 4285218792Snp 4286284052Snp MPASS((((uintptr_t)flitp) & 0xf) == 0); 4287284052Snp if (__predict_false(flitp == wrap)) 4288218792Snp *to = (void *)eq->desc; 4289218792Snp else 4290284052Snp *to = (void *)flitp; 4291218792Snp} 4292218792Snp 4293218792Snpstatic inline void 4294218792Snpcopy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 4295218792Snp{ 4296284052Snp 4297284052Snp MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 4298284052Snp MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 4299284052Snp 4300284052Snp if (__predict_true((uintptr_t)(*to) + len <= 4301284052Snp (uintptr_t)&eq->desc[eq->sidx])) { 4302218792Snp bcopy(from, *to, len); 4303218792Snp (*to) += len; 4304218792Snp } else { 4305284052Snp int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 4306218792Snp 4307218792Snp bcopy(from, *to, portion); 4308218792Snp from += portion; 4309218792Snp portion = len - portion; /* remaining */ 4310218792Snp bcopy(from, (void *)eq->desc, portion); 4311218792Snp (*to) = (caddr_t)eq->desc + portion; 4312218792Snp } 4313218792Snp} 4314218792Snp 4315218792Snpstatic inline void 4316284052Snpring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 4317218792Snp{ 4318284052Snp u_int db; 4319248925Snp 4320284052Snp MPASS(n > 0); 4321284052Snp 4322248925Snp db = eq->doorbells; 4323284052Snp if (n > 1) 4324249392Snp clrbit(&db, DOORBELL_WCWR); 4325218792Snp wmb(); 4326248925Snp 4327248925Snp switch (ffs(db) - 1) { 4328248925Snp case DOORBELL_UDB: 4329284052Snp *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4330284052Snp break; 4331248925Snp 4332249392Snp case DOORBELL_WCWR: { 4333248925Snp volatile uint64_t *dst, *src; 4334248925Snp int i; 4335248925Snp 4336248925Snp /* 4337248925Snp * Queues whose 128B doorbell segment fits in the page do not 4338248925Snp * use relative qid (udb_qid is always 0). Only queues with 4339249392Snp * doorbell segments can do WCWR. 4340248925Snp */ 4341284052Snp KASSERT(eq->udb_qid == 0 && n == 1, 4342248925Snp ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 4343284052Snp __func__, eq->doorbells, n, eq->dbidx, eq)); 4344248925Snp 4345248925Snp dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 4346248925Snp UDBS_DB_OFFSET); 4347284052Snp i = eq->dbidx; 4348248925Snp src = (void *)&eq->desc[i]; 4349248925Snp while (src != (void *)&eq->desc[i + 1]) 4350248925Snp *dst++ = *src++; 4351248925Snp wmb(); 4352284052Snp break; 4353248925Snp } 4354248925Snp 4355248925Snp case DOORBELL_UDBWC: 4356284052Snp *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 4357248925Snp wmb(); 4358284052Snp break; 4359248925Snp 4360248925Snp case DOORBELL_KDB: 4361248925Snp t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), 4362284052Snp V_QID(eq->cntxt_id) | V_PIDX(n)); 4363284052Snp break; 4364248925Snp } 4365284052Snp 4366284052Snp IDXINCR(eq->dbidx, n, eq->sidx); 4367218792Snp} 4368218792Snp 4369284052Snpstatic inline u_int 4370284052Snpreclaimable_tx_desc(struct sge_eq *eq) 4371218792Snp{ 4372284052Snp uint16_t hw_cidx; 4373218792Snp 4374284052Snp hw_cidx = read_hw_cidx(eq); 4375284052Snp return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 4376284052Snp} 4377218792Snp 4378284052Snpstatic inline u_int 4379284052Snptotal_available_tx_desc(struct sge_eq *eq) 4380284052Snp{ 4381284052Snp uint16_t hw_cidx, pidx; 4382284052Snp 4383284052Snp hw_cidx = read_hw_cidx(eq); 4384284052Snp pidx = eq->pidx; 4385284052Snp 4386284052Snp if (pidx == hw_cidx) 4387284052Snp return (eq->sidx - 1); 4388218792Snp else 4389284052Snp return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 4390219292Snp} 4391218792Snp 4392284052Snpstatic inline uint16_t 4393284052Snpread_hw_cidx(struct sge_eq *eq) 4394284052Snp{ 4395284052Snp struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 4396284052Snp uint16_t cidx = spg->cidx; /* stable snapshot */ 4397284052Snp 4398284052Snp return (be16toh(cidx)); 4399284052Snp} 4400284052Snp 4401219292Snp/* 4402284052Snp * Reclaim 'n' descriptors approximately. 4403219292Snp */ 4404284052Snpstatic u_int 4405284052Snpreclaim_tx_descs(struct sge_txq *txq, u_int n) 4406219292Snp{ 4407219292Snp struct tx_sdesc *txsd; 4408220873Snp struct sge_eq *eq = &txq->eq; 4409284052Snp u_int can_reclaim, reclaimed; 4410218792Snp 4411228561Snp TXQ_LOCK_ASSERT_OWNED(txq); 4412284052Snp MPASS(n > 0); 4413218792Snp 4414284052Snp reclaimed = 0; 4415284052Snp can_reclaim = reclaimable_tx_desc(eq); 4416284052Snp while (can_reclaim && reclaimed < n) { 4417218792Snp int ndesc; 4418284052Snp struct mbuf *m, *nextpkt; 4419218792Snp 4420220873Snp txsd = &txq->sdesc[eq->cidx]; 4421218792Snp ndesc = txsd->desc_used; 4422218792Snp 4423218792Snp /* Firmware doesn't return "partial" credits. */ 4424218792Snp KASSERT(can_reclaim >= ndesc, 4425218792Snp ("%s: unexpected number of credits: %d, %d", 4426218792Snp __func__, can_reclaim, ndesc)); 4427218792Snp 4428284052Snp for (m = txsd->m; m != NULL; m = nextpkt) { 4429284052Snp nextpkt = m->m_nextpkt; 4430284052Snp m->m_nextpkt = NULL; 4431284052Snp m_freem(m); 4432284052Snp } 4433218792Snp reclaimed += ndesc; 4434219292Snp can_reclaim -= ndesc; 4435284052Snp IDXINCR(eq->cidx, ndesc, eq->sidx); 4436219292Snp } 4437218792Snp 4438218792Snp return (reclaimed); 4439218792Snp} 4440218792Snp 4441218792Snpstatic void 4442284052Snptx_reclaim(void *arg, int n) 4443218792Snp{ 4444284052Snp struct sge_txq *txq = arg; 4445284052Snp struct sge_eq *eq = &txq->eq; 4446218792Snp 4447284052Snp do { 4448284052Snp if (TXQ_TRYLOCK(txq) == 0) 4449284052Snp break; 4450284052Snp n = reclaim_tx_descs(txq, 32); 4451284052Snp if (eq->cidx == eq->pidx) 4452284052Snp eq->equeqidx = eq->pidx; 4453284052Snp TXQ_UNLOCK(txq); 4454284052Snp } while (n > 0); 4455218792Snp} 4456218792Snp 4457218792Snpstatic __be64 4458284052Snpget_flit(struct sglist_seg *segs, int nsegs, int idx) 4459218792Snp{ 4460218792Snp int i = (idx / 3) * 2; 4461218792Snp 4462218792Snp switch (idx % 3) { 4463218792Snp case 0: { 4464218792Snp __be64 rc; 4465218792Snp 4466284052Snp rc = htobe32(segs[i].ss_len); 4467218792Snp if (i + 1 < nsegs) 4468284052Snp rc |= (uint64_t)htobe32(segs[i + 1].ss_len) << 32; 4469218792Snp 4470218792Snp return (rc); 4471218792Snp } 4472218792Snp case 1: 4473284052Snp return (htobe64(segs[i].ss_paddr)); 4474218792Snp case 2: 4475284052Snp return (htobe64(segs[i + 1].ss_paddr)); 4476218792Snp } 4477218792Snp 4478218792Snp return (0); 4479218792Snp} 4480218792Snp 4481218792Snpstatic void 4482265425Snpfind_best_refill_source(struct adapter *sc, struct sge_fl *fl, int maxp) 4483218792Snp{ 4484265425Snp int8_t zidx, hwidx, idx; 4485265425Snp uint16_t region1, region3; 4486265425Snp int spare, spare_needed, n; 4487265425Snp struct sw_zone_info *swz; 4488265425Snp struct hw_buf_info *hwb, *hwb_list = &sc->sge.hw_buf_info[0]; 4489218792Snp 4490265425Snp /* 4491265425Snp * Buffer Packing: Look for PAGE_SIZE or larger zone which has a bufsize 4492265425Snp * large enough for the max payload and cluster metadata. Otherwise 4493265425Snp * settle for the largest bufsize that leaves enough room in the cluster 4494265425Snp * for metadata. 4495265425Snp * 4496265425Snp * Without buffer packing: Look for the smallest zone which has a 4497265425Snp * bufsize large enough for the max payload. Settle for the largest 4498265425Snp * bufsize available if there's nothing big enough for max payload. 4499265425Snp */ 4500265425Snp spare_needed = fl->flags & FL_BUF_PACKING ? CL_METADATA_SIZE : 0; 4501265425Snp swz = &sc->sge.sw_zone_info[0]; 4502265425Snp hwidx = -1; 4503265425Snp for (zidx = 0; zidx < SW_ZONE_SIZES; zidx++, swz++) { 4504265425Snp if (swz->size > largest_rx_cluster) { 4505265425Snp if (__predict_true(hwidx != -1)) 4506265425Snp break; 4507218792Snp 4508265425Snp /* 4509265425Snp * This is a misconfiguration. largest_rx_cluster is 4510265425Snp * preventing us from finding a refill source. See 4511265425Snp * dev.t5nex.<n>.buffer_sizes to figure out why. 4512265425Snp */ 4513265425Snp device_printf(sc->dev, "largest_rx_cluster=%u leaves no" 4514265425Snp " refill source for fl %p (dma %u). Ignored.\n", 4515265425Snp largest_rx_cluster, fl, maxp); 4516265425Snp } 4517265425Snp for (idx = swz->head_hwidx; idx != -1; idx = hwb->next) { 4518265425Snp hwb = &hwb_list[idx]; 4519265425Snp spare = swz->size - hwb->size; 4520265425Snp if (spare < spare_needed) 4521265425Snp continue; 4522265425Snp 4523265425Snp hwidx = idx; /* best option so far */ 4524265425Snp if (hwb->size >= maxp) { 4525265425Snp 4526265425Snp if ((fl->flags & FL_BUF_PACKING) == 0) 4527265425Snp goto done; /* stop looking (not packing) */ 4528265425Snp 4529265425Snp if (swz->size >= safest_rx_cluster) 4530265425Snp goto done; /* stop looking (packing) */ 4531265425Snp } 4532265425Snp break; /* keep looking, next zone */ 4533265425Snp } 4534255050Snp } 4535265425Snpdone: 4536265425Snp /* A usable hwidx has been located. */ 4537265425Snp MPASS(hwidx != -1); 4538265425Snp hwb = &hwb_list[hwidx]; 4539265425Snp zidx = hwb->zidx; 4540265425Snp swz = &sc->sge.sw_zone_info[zidx]; 4541265425Snp region1 = 0; 4542265425Snp region3 = swz->size - hwb->size; 4543255050Snp 4544265425Snp /* 4545265425Snp * Stay within this zone and see if there is a better match when mbuf 4546265425Snp * inlining is allowed. Remember that the hwidx's are sorted in 4547265425Snp * decreasing order of size (so in increasing order of spare area). 4548265425Snp */ 4549265425Snp for (idx = hwidx; idx != -1; idx = hwb->next) { 4550265425Snp hwb = &hwb_list[idx]; 4551265425Snp spare = swz->size - hwb->size; 4552255050Snp 4553265425Snp if (allow_mbufs_in_cluster == 0 || hwb->size < maxp) 4554265425Snp break; 4555281212Snp 4556281212Snp /* 4557281212Snp * Do not inline mbufs if doing so would violate the pad/pack 4558281212Snp * boundary alignment requirement. 4559281212Snp */ 4560281212Snp if (fl_pad && (MSIZE % sc->sge.pad_boundary) != 0) 4561281212Snp continue; 4562281212Snp if (fl->flags & FL_BUF_PACKING && 4563281212Snp (MSIZE % sc->sge.pack_boundary) != 0) 4564281212Snp continue; 4565281212Snp 4566265425Snp if (spare < CL_METADATA_SIZE + MSIZE) 4567265425Snp continue; 4568265425Snp n = (spare - CL_METADATA_SIZE) / MSIZE; 4569265425Snp if (n > howmany(hwb->size, maxp)) 4570265425Snp break; 4571255050Snp 4572265425Snp hwidx = idx; 4573265425Snp if (fl->flags & FL_BUF_PACKING) { 4574265425Snp region1 = n * MSIZE; 4575265425Snp region3 = spare - region1; 4576265425Snp } else { 4577265425Snp region1 = MSIZE; 4578265425Snp region3 = spare - region1; 4579265425Snp break; 4580255050Snp } 4581255050Snp } 4582255050Snp 4583265425Snp KASSERT(zidx >= 0 && zidx < SW_ZONE_SIZES, 4584265425Snp ("%s: bad zone %d for fl %p, maxp %d", __func__, zidx, fl, maxp)); 4585265425Snp KASSERT(hwidx >= 0 && hwidx <= SGE_FLBUF_SIZES, 4586265425Snp ("%s: bad hwidx %d for fl %p, maxp %d", __func__, hwidx, fl, maxp)); 4587265425Snp KASSERT(region1 + sc->sge.hw_buf_info[hwidx].size + region3 == 4588265425Snp sc->sge.sw_zone_info[zidx].size, 4589265425Snp ("%s: bad buffer layout for fl %p, maxp %d. " 4590265425Snp "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4591265425Snp sc->sge.sw_zone_info[zidx].size, region1, 4592265425Snp sc->sge.hw_buf_info[hwidx].size, region3)); 4593265425Snp if (fl->flags & FL_BUF_PACKING || region1 > 0) { 4594265425Snp KASSERT(region3 >= CL_METADATA_SIZE, 4595265425Snp ("%s: no room for metadata. fl %p, maxp %d; " 4596265425Snp "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4597265425Snp sc->sge.sw_zone_info[zidx].size, region1, 4598265425Snp sc->sge.hw_buf_info[hwidx].size, region3)); 4599265425Snp KASSERT(region1 % MSIZE == 0, 4600265425Snp ("%s: bad mbuf region for fl %p, maxp %d. " 4601265425Snp "cl %d; r1 %d, payload %d, r3 %d", __func__, fl, maxp, 4602265425Snp sc->sge.sw_zone_info[zidx].size, region1, 4603265425Snp sc->sge.hw_buf_info[hwidx].size, region3)); 4604265425Snp } 4605265425Snp 4606265425Snp fl->cll_def.zidx = zidx; 4607265425Snp fl->cll_def.hwidx = hwidx; 4608265425Snp fl->cll_def.region1 = region1; 4609265425Snp fl->cll_def.region3 = region3; 4610265425Snp} 4611265425Snp 4612265425Snpstatic void 4613265425Snpfind_safe_refill_source(struct adapter *sc, struct sge_fl *fl) 4614265425Snp{ 4615265425Snp struct sge *s = &sc->sge; 4616265425Snp struct hw_buf_info *hwb; 4617265425Snp struct sw_zone_info *swz; 4618265425Snp int spare; 4619265425Snp int8_t hwidx; 4620265425Snp 4621265425Snp if (fl->flags & FL_BUF_PACKING) 4622265425Snp hwidx = s->safe_hwidx2; /* with room for metadata */ 4623265425Snp else if (allow_mbufs_in_cluster && s->safe_hwidx2 != -1) { 4624265425Snp hwidx = s->safe_hwidx2; 4625265425Snp hwb = &s->hw_buf_info[hwidx]; 4626265425Snp swz = &s->sw_zone_info[hwb->zidx]; 4627265425Snp spare = swz->size - hwb->size; 4628265425Snp 4629265425Snp /* no good if there isn't room for an mbuf as well */ 4630265425Snp if (spare < CL_METADATA_SIZE + MSIZE) 4631265425Snp hwidx = s->safe_hwidx1; 4632265425Snp } else 4633265425Snp hwidx = s->safe_hwidx1; 4634265425Snp 4635265425Snp if (hwidx == -1) { 4636265425Snp /* No fallback source */ 4637265425Snp fl->cll_alt.hwidx = -1; 4638265425Snp fl->cll_alt.zidx = -1; 4639265425Snp 4640265425Snp return; 4641265425Snp } 4642265425Snp 4643265425Snp hwb = &s->hw_buf_info[hwidx]; 4644265425Snp swz = &s->sw_zone_info[hwb->zidx]; 4645265425Snp spare = swz->size - hwb->size; 4646265425Snp fl->cll_alt.hwidx = hwidx; 4647265425Snp fl->cll_alt.zidx = hwb->zidx; 4648281212Snp if (allow_mbufs_in_cluster && 4649281212Snp (fl_pad == 0 || (MSIZE % sc->sge.pad_boundary) == 0)) 4650265425Snp fl->cll_alt.region1 = ((spare - CL_METADATA_SIZE) / MSIZE) * MSIZE; 4651255050Snp else 4652265425Snp fl->cll_alt.region1 = 0; 4653265425Snp fl->cll_alt.region3 = spare - fl->cll_alt.region1; 4654218792Snp} 4655219286Snp 4656222510Snpstatic void 4657228561Snpadd_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 4658222510Snp{ 4659228561Snp mtx_lock(&sc->sfl_lock); 4660228561Snp FL_LOCK(fl); 4661228561Snp if ((fl->flags & FL_DOOMED) == 0) { 4662228561Snp fl->flags |= FL_STARVING; 4663228561Snp TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 4664228561Snp callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 4665222510Snp } 4666228561Snp FL_UNLOCK(fl); 4667228561Snp mtx_unlock(&sc->sfl_lock); 4668222510Snp} 4669222510Snp 4670284052Snpstatic void 4671284052Snphandle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 4672284052Snp{ 4673284052Snp struct sge_wrq *wrq = (void *)eq; 4674284052Snp 4675284052Snp atomic_readandclear_int(&eq->equiq); 4676284052Snp taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 4677284052Snp} 4678284052Snp 4679284052Snpstatic void 4680284052Snphandle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 4681284052Snp{ 4682284052Snp struct sge_txq *txq = (void *)eq; 4683284052Snp 4684284052Snp MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 4685284052Snp 4686284052Snp atomic_readandclear_int(&eq->equiq); 4687284052Snp mp_ring_check_drainage(txq->r, 0); 4688284052Snp taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 4689284052Snp} 4690284052Snp 4691220873Snpstatic int 4692228561Snphandle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 4693228561Snp struct mbuf *m) 4694220873Snp{ 4695228561Snp const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 4696228561Snp unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 4697228561Snp struct adapter *sc = iq->adapter; 4698228561Snp struct sge *s = &sc->sge; 4699228561Snp struct sge_eq *eq; 4700284052Snp static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 4701284052Snp &handle_wrq_egr_update, &handle_eth_egr_update, 4702284052Snp &handle_wrq_egr_update}; 4703220873Snp 4704228561Snp KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4705228561Snp rss->opcode)); 4706220873Snp 4707228561Snp eq = s->eqmap[qid - s->eq_start]; 4708284052Snp (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 4709220873Snp 4710228561Snp return (0); 4711228561Snp} 4712220873Snp 4713247291Snp/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 4714247291SnpCTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 4715247291Snp offsetof(struct cpl_fw6_msg, data)); 4716247291Snp 4717228561Snpstatic int 4718239336Snphandle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 4719228561Snp{ 4720239336Snp struct adapter *sc = iq->adapter; 4721228561Snp const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 4722220873Snp 4723228561Snp KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 4724228561Snp rss->opcode)); 4725220873Snp 4726247291Snp if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 4727247291Snp const struct rss_header *rss2; 4728247291Snp 4729247291Snp rss2 = (const struct rss_header *)&cpl->data[0]; 4730247291Snp return (sc->cpl_handler[rss2->opcode](iq, rss2, m)); 4731247291Snp } 4732247291Snp 4733239336Snp return (sc->fw_msg_handler[cpl->type](sc, &cpl->data[0])); 4734220873Snp} 4735221911Snp 4736221911Snpstatic int 4737222510Snpsysctl_uint16(SYSCTL_HANDLER_ARGS) 4738221911Snp{ 4739221911Snp uint16_t *id = arg1; 4740221911Snp int i = *id; 4741221911Snp 4742221911Snp return sysctl_handle_int(oidp, &i, 0, req); 4743221911Snp} 4744265425Snp 4745265425Snpstatic int 4746265425Snpsysctl_bufsizes(SYSCTL_HANDLER_ARGS) 4747265425Snp{ 4748265425Snp struct sge *s = arg1; 4749265425Snp struct hw_buf_info *hwb = &s->hw_buf_info[0]; 4750265425Snp struct sw_zone_info *swz = &s->sw_zone_info[0]; 4751265425Snp int i, rc; 4752265425Snp struct sbuf sb; 4753265425Snp char c; 4754265425Snp 4755265425Snp sbuf_new(&sb, NULL, 32, SBUF_AUTOEXTEND); 4756265425Snp for (i = 0; i < SGE_FLBUF_SIZES; i++, hwb++) { 4757265425Snp if (hwb->zidx >= 0 && swz[hwb->zidx].size <= largest_rx_cluster) 4758265425Snp c = '*'; 4759265425Snp else 4760265425Snp c = '\0'; 4761265425Snp 4762265425Snp sbuf_printf(&sb, "%u%c ", hwb->size, c); 4763265425Snp } 4764265425Snp sbuf_trim(&sb); 4765265425Snp sbuf_finish(&sb); 4766265425Snp rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 4767265425Snp sbuf_delete(&sb); 4768265425Snp return (rc); 4769265425Snp} 4770