1/*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD$"); 32 33#include "opt_inet.h" 34#include "opt_inet6.h" 35#include "opt_ratelimit.h" 36 37#include <sys/types.h> 38#include <sys/eventhandler.h> 39#include <sys/mbuf.h> 40#include <sys/socket.h> 41#include <sys/kernel.h> 42#include <sys/malloc.h> 43#include <sys/queue.h> 44#include <sys/sbuf.h> 45#include <sys/taskqueue.h> 46#include <sys/time.h> 47#include <sys/sglist.h> 48#include <sys/sysctl.h> 49#include <sys/smp.h> 50#include <sys/counter.h> 51#include <net/bpf.h> 52#include <net/ethernet.h> 53#include <net/if.h> 54#include <net/if_vlan_var.h> 55#include <net/if_vxlan.h> 56#include <netinet/in.h> 57#include <netinet/ip.h> 58#include <netinet/ip6.h> 59#include <netinet/tcp.h> 60#include <netinet/udp.h> 61#include <machine/in_cksum.h> 62#include <machine/md_var.h> 63#include <vm/vm.h> 64#include <vm/pmap.h> 65#ifdef DEV_NETMAP 66#include <machine/bus.h> 67#include <sys/selinfo.h> 68#include <net/if_var.h> 69#include <net/netmap.h> 70#include <dev/netmap/netmap_kern.h> 71#endif 72 73#include "common/common.h" 74#include "common/t4_regs.h" 75#include "common/t4_regs_values.h" 76#include "common/t4_msg.h" 77#include "t4_l2t.h" 78#include "t4_mp_ring.h" 79 80#ifdef T4_PKT_TIMESTAMP 81#define RX_COPY_THRESHOLD (MINCLSIZE - 8) 82#else 83#define RX_COPY_THRESHOLD MINCLSIZE 84#endif 85 86/* Internal mbuf flags stored in PH_loc.eight[1]. */ 87#define MC_NOMAP 0x01 88#define MC_RAW_WR 0x02 89 90/* 91 * Ethernet frames are DMA'd at this byte offset into the freelist buffer. 92 * 0-7 are valid values. 93 */ 94static int fl_pktshift = 0; 95SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pktshift, CTLFLAG_RDTUN, &fl_pktshift, 0, 96 "payload DMA offset in rx buffer (bytes)"); 97 98/* 99 * Pad ethernet payload up to this boundary. 100 * -1: driver should figure out a good value. 101 * 0: disable padding. 102 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value. 103 */ 104int fl_pad = -1; 105SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pad, CTLFLAG_RDTUN, &fl_pad, 0, 106 "payload pad boundary (bytes)"); 107 108/* 109 * Status page length. 110 * -1: driver should figure out a good value. 111 * 64 or 128 are the only other valid values. 112 */ 113static int spg_len = -1; 114SYSCTL_INT(_hw_cxgbe, OID_AUTO, spg_len, CTLFLAG_RDTUN, &spg_len, 0, 115 "status page size (bytes)"); 116 117/* 118 * Congestion drops. 119 * -1: no congestion feedback (not recommended). 120 * 0: backpressure the channel instead of dropping packets right away. 121 * 1: no backpressure, drop packets for the congested queue immediately. 122 */ 123static int cong_drop = 0; 124SYSCTL_INT(_hw_cxgbe, OID_AUTO, cong_drop, CTLFLAG_RDTUN, &cong_drop, 0, 125 "Congestion control for RX queues (0 = backpressure, 1 = drop"); 126 127/* 128 * Deliver multiple frames in the same free list buffer if they fit. 129 * -1: let the driver decide whether to enable buffer packing or not. 130 * 0: disable buffer packing. 131 * 1: enable buffer packing. 132 */ 133static int buffer_packing = -1; 134SYSCTL_INT(_hw_cxgbe, OID_AUTO, buffer_packing, CTLFLAG_RDTUN, &buffer_packing, 135 0, "Enable buffer packing"); 136 137/* 138 * Start next frame in a packed buffer at this boundary. 139 * -1: driver should figure out a good value. 140 * T4: driver will ignore this and use the same value as fl_pad above. 141 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value. 142 */ 143static int fl_pack = -1; 144SYSCTL_INT(_hw_cxgbe, OID_AUTO, fl_pack, CTLFLAG_RDTUN, &fl_pack, 0, 145 "payload pack boundary (bytes)"); 146 147/* 148 * Largest rx cluster size that the driver is allowed to allocate. 149 */ 150static int largest_rx_cluster = MJUM16BYTES; 151SYSCTL_INT(_hw_cxgbe, OID_AUTO, largest_rx_cluster, CTLFLAG_RDTUN, 152 &largest_rx_cluster, 0, "Largest rx cluster (bytes)"); 153 154/* 155 * Size of cluster allocation that's most likely to succeed. The driver will 156 * fall back to this size if it fails to allocate clusters larger than this. 157 */ 158static int safest_rx_cluster = PAGE_SIZE; 159SYSCTL_INT(_hw_cxgbe, OID_AUTO, safest_rx_cluster, CTLFLAG_RDTUN, 160 &safest_rx_cluster, 0, "Safe rx cluster (bytes)"); 161 162#ifdef RATELIMIT 163/* 164 * Knob to control TCP timestamp rewriting, and the granularity of the tick used 165 * for rewriting. -1 and 0-3 are all valid values. 166 * -1: hardware should leave the TCP timestamps alone. 167 * 0: 1ms 168 * 1: 100us 169 * 2: 10us 170 * 3: 1us 171 */ 172static int tsclk = -1; 173SYSCTL_INT(_hw_cxgbe, OID_AUTO, tsclk, CTLFLAG_RDTUN, &tsclk, 0, 174 "Control TCP timestamp rewriting when using pacing"); 175 176static int eo_max_backlog = 1024 * 1024; 177SYSCTL_INT(_hw_cxgbe, OID_AUTO, eo_max_backlog, CTLFLAG_RDTUN, &eo_max_backlog, 178 0, "Maximum backlog of ratelimited data per flow"); 179#endif 180 181/* 182 * The interrupt holdoff timers are multiplied by this value on T6+. 183 * 1 and 3-17 (both inclusive) are legal values. 184 */ 185static int tscale = 1; 186SYSCTL_INT(_hw_cxgbe, OID_AUTO, tscale, CTLFLAG_RDTUN, &tscale, 0, 187 "Interrupt holdoff timer scale on T6+"); 188 189/* 190 * Number of LRO entries in the lro_ctrl structure per rx queue. 191 */ 192static int lro_entries = TCP_LRO_ENTRIES; 193SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_entries, CTLFLAG_RDTUN, &lro_entries, 0, 194 "Number of LRO entries per RX queue"); 195 196/* 197 * This enables presorting of frames before they're fed into tcp_lro_rx. 198 */ 199static int lro_mbufs = 0; 200SYSCTL_INT(_hw_cxgbe, OID_AUTO, lro_mbufs, CTLFLAG_RDTUN, &lro_mbufs, 0, 201 "Enable presorting of LRO frames"); 202 203static counter_u64_t pullups; 204SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, pullups, CTLFLAG_RD, &pullups, 205 "Number of mbuf pullups performed"); 206 207static counter_u64_t defrags; 208SYSCTL_COUNTER_U64(_hw_cxgbe, OID_AUTO, defrags, CTLFLAG_RD, &defrags, 209 "Number of mbuf defrags performed"); 210 211static int t4_tx_coalesce = 1; 212SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce, CTLFLAG_RWTUN, &t4_tx_coalesce, 0, 213 "tx coalescing allowed"); 214 215/* 216 * The driver will make aggressive attempts at tx coalescing if it sees these 217 * many packets eligible for coalescing in quick succession, with no more than 218 * the specified gap in between the eth_tx calls that delivered the packets. 219 */ 220static int t4_tx_coalesce_pkts = 32; 221SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_pkts, CTLFLAG_RWTUN, 222 &t4_tx_coalesce_pkts, 0, 223 "# of consecutive packets (1 - 255) that will trigger tx coalescing"); 224static int t4_tx_coalesce_gap = 5; 225SYSCTL_INT(_hw_cxgbe, OID_AUTO, tx_coalesce_gap, CTLFLAG_RWTUN, 226 &t4_tx_coalesce_gap, 0, "tx gap (in microseconds)"); 227 228static int service_iq(struct sge_iq *, int); 229static int service_iq_fl(struct sge_iq *, int); 230static struct mbuf *get_fl_payload(struct adapter *, struct sge_fl *, uint32_t); 231static int eth_rx(struct adapter *, struct sge_rxq *, const struct iq_desc *, 232 u_int); 233static inline void init_iq(struct sge_iq *, struct adapter *, int, int, int); 234static inline void init_fl(struct adapter *, struct sge_fl *, int, int, char *); 235static inline void init_eq(struct adapter *, struct sge_eq *, int, int, uint8_t, 236 uint16_t, char *); 237static int alloc_ring(struct adapter *, size_t, bus_dma_tag_t *, bus_dmamap_t *, 238 bus_addr_t *, void **); 239static int free_ring(struct adapter *, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, 240 void *); 241static int alloc_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *, 242 int, int); 243static int free_iq_fl(struct vi_info *, struct sge_iq *, struct sge_fl *); 244static void add_iq_sysctls(struct sysctl_ctx_list *, struct sysctl_oid *, 245 struct sge_iq *); 246static void add_fl_sysctls(struct adapter *, struct sysctl_ctx_list *, 247 struct sysctl_oid *, struct sge_fl *); 248static int alloc_fwq(struct adapter *); 249static int free_fwq(struct adapter *); 250static int alloc_ctrlq(struct adapter *, struct sge_wrq *, int, 251 struct sysctl_oid *); 252static int alloc_rxq(struct vi_info *, struct sge_rxq *, int, int, 253 struct sysctl_oid *); 254static int free_rxq(struct vi_info *, struct sge_rxq *); 255#ifdef TCP_OFFLOAD 256static int alloc_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *, int, int, 257 struct sysctl_oid *); 258static int free_ofld_rxq(struct vi_info *, struct sge_ofld_rxq *); 259#endif 260#ifdef DEV_NETMAP 261static int alloc_nm_rxq(struct vi_info *, struct sge_nm_rxq *, int, int, 262 struct sysctl_oid *); 263static int free_nm_rxq(struct vi_info *, struct sge_nm_rxq *); 264static int alloc_nm_txq(struct vi_info *, struct sge_nm_txq *, int, int, 265 struct sysctl_oid *); 266static int free_nm_txq(struct vi_info *, struct sge_nm_txq *); 267#endif 268static int ctrl_eq_alloc(struct adapter *, struct sge_eq *); 269static int eth_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 270#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 271static int ofld_eq_alloc(struct adapter *, struct vi_info *, struct sge_eq *); 272#endif 273static int alloc_eq(struct adapter *, struct vi_info *, struct sge_eq *); 274static int free_eq(struct adapter *, struct sge_eq *); 275static int alloc_wrq(struct adapter *, struct vi_info *, struct sge_wrq *, 276 struct sysctl_oid *); 277static int free_wrq(struct adapter *, struct sge_wrq *); 278static int alloc_txq(struct vi_info *, struct sge_txq *, int, 279 struct sysctl_oid *); 280static int free_txq(struct vi_info *, struct sge_txq *); 281static void oneseg_dma_callback(void *, bus_dma_segment_t *, int, int); 282static inline void ring_fl_db(struct adapter *, struct sge_fl *); 283static int refill_fl(struct adapter *, struct sge_fl *, int); 284static void refill_sfl(void *); 285static int alloc_fl_sdesc(struct sge_fl *); 286static void free_fl_sdesc(struct adapter *, struct sge_fl *); 287static int find_refill_source(struct adapter *, int, bool); 288static void add_fl_to_sfl(struct adapter *, struct sge_fl *); 289 290static inline void get_pkt_gl(struct mbuf *, struct sglist *); 291static inline u_int txpkt_len16(u_int, const u_int); 292static inline u_int txpkt_vm_len16(u_int, const u_int); 293static inline void calculate_mbuf_len16(struct mbuf *, bool); 294static inline u_int txpkts0_len16(u_int); 295static inline u_int txpkts1_len16(void); 296static u_int write_raw_wr(struct sge_txq *, void *, struct mbuf *, u_int); 297static u_int write_txpkt_wr(struct adapter *, struct sge_txq *, struct mbuf *, 298 u_int); 299static u_int write_txpkt_vm_wr(struct adapter *, struct sge_txq *, 300 struct mbuf *); 301static int add_to_txpkts_vf(struct adapter *, struct sge_txq *, struct mbuf *, 302 int, bool *); 303static int add_to_txpkts_pf(struct adapter *, struct sge_txq *, struct mbuf *, 304 int, bool *); 305static u_int write_txpkts_wr(struct adapter *, struct sge_txq *); 306static u_int write_txpkts_vm_wr(struct adapter *, struct sge_txq *); 307static void write_gl_to_txd(struct sge_txq *, struct mbuf *, caddr_t *, int); 308static inline void copy_to_txd(struct sge_eq *, caddr_t, caddr_t *, int); 309static inline void ring_eq_db(struct adapter *, struct sge_eq *, u_int); 310static inline uint16_t read_hw_cidx(struct sge_eq *); 311static inline u_int reclaimable_tx_desc(struct sge_eq *); 312static inline u_int total_available_tx_desc(struct sge_eq *); 313static u_int reclaim_tx_descs(struct sge_txq *, u_int); 314static void tx_reclaim(void *, int); 315static __be64 get_flit(struct sglist_seg *, int, int); 316static int handle_sge_egr_update(struct sge_iq *, const struct rss_header *, 317 struct mbuf *); 318static int handle_fw_msg(struct sge_iq *, const struct rss_header *, 319 struct mbuf *); 320static int t4_handle_wrerr_rpl(struct adapter *, const __be64 *); 321static void wrq_tx_drain(void *, int); 322static void drain_wrq_wr_list(struct adapter *, struct sge_wrq *); 323 324static int sysctl_uint16(SYSCTL_HANDLER_ARGS); 325static int sysctl_bufsizes(SYSCTL_HANDLER_ARGS); 326#ifdef RATELIMIT 327static inline u_int txpkt_eo_len16(u_int, u_int, u_int); 328static int ethofld_fw4_ack(struct sge_iq *, const struct rss_header *, 329 struct mbuf *); 330#endif 331 332static counter_u64_t extfree_refs; 333static counter_u64_t extfree_rels; 334 335an_handler_t t4_an_handler; 336fw_msg_handler_t t4_fw_msg_handler[NUM_FW6_TYPES]; 337cpl_handler_t t4_cpl_handler[NUM_CPL_CMDS]; 338cpl_handler_t set_tcb_rpl_handlers[NUM_CPL_COOKIES]; 339cpl_handler_t l2t_write_rpl_handlers[NUM_CPL_COOKIES]; 340cpl_handler_t act_open_rpl_handlers[NUM_CPL_COOKIES]; 341cpl_handler_t abort_rpl_rss_handlers[NUM_CPL_COOKIES]; 342cpl_handler_t fw4_ack_handlers[NUM_CPL_COOKIES]; 343 344void 345t4_register_an_handler(an_handler_t h) 346{ 347 uintptr_t *loc; 348 349 MPASS(h == NULL || t4_an_handler == NULL); 350 351 loc = (uintptr_t *)&t4_an_handler; 352 atomic_store_rel_ptr(loc, (uintptr_t)h); 353} 354 355void 356t4_register_fw_msg_handler(int type, fw_msg_handler_t h) 357{ 358 uintptr_t *loc; 359 360 MPASS(type < nitems(t4_fw_msg_handler)); 361 MPASS(h == NULL || t4_fw_msg_handler[type] == NULL); 362 /* 363 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 364 * handler dispatch table. Reject any attempt to install a handler for 365 * this subtype. 366 */ 367 MPASS(type != FW_TYPE_RSSCPL); 368 MPASS(type != FW6_TYPE_RSSCPL); 369 370 loc = (uintptr_t *)&t4_fw_msg_handler[type]; 371 atomic_store_rel_ptr(loc, (uintptr_t)h); 372} 373 374void 375t4_register_cpl_handler(int opcode, cpl_handler_t h) 376{ 377 uintptr_t *loc; 378 379 MPASS(opcode < nitems(t4_cpl_handler)); 380 MPASS(h == NULL || t4_cpl_handler[opcode] == NULL); 381 382 loc = (uintptr_t *)&t4_cpl_handler[opcode]; 383 atomic_store_rel_ptr(loc, (uintptr_t)h); 384} 385 386static int 387set_tcb_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 388 struct mbuf *m) 389{ 390 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 391 u_int tid; 392 int cookie; 393 394 MPASS(m == NULL); 395 396 tid = GET_TID(cpl); 397 if (is_hpftid(iq->adapter, tid) || is_ftid(iq->adapter, tid)) { 398 /* 399 * The return code for filter-write is put in the CPL cookie so 400 * we have to rely on the hardware tid (is_ftid) to determine 401 * that this is a response to a filter. 402 */ 403 cookie = CPL_COOKIE_FILTER; 404 } else { 405 cookie = G_COOKIE(cpl->cookie); 406 } 407 MPASS(cookie > CPL_COOKIE_RESERVED); 408 MPASS(cookie < nitems(set_tcb_rpl_handlers)); 409 410 return (set_tcb_rpl_handlers[cookie](iq, rss, m)); 411} 412 413static int 414l2t_write_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 415 struct mbuf *m) 416{ 417 const struct cpl_l2t_write_rpl *rpl = (const void *)(rss + 1); 418 unsigned int cookie; 419 420 MPASS(m == NULL); 421 422 cookie = GET_TID(rpl) & F_SYNC_WR ? CPL_COOKIE_TOM : CPL_COOKIE_FILTER; 423 return (l2t_write_rpl_handlers[cookie](iq, rss, m)); 424} 425 426static int 427act_open_rpl_handler(struct sge_iq *iq, const struct rss_header *rss, 428 struct mbuf *m) 429{ 430 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 431 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); 432 433 MPASS(m == NULL); 434 MPASS(cookie != CPL_COOKIE_RESERVED); 435 436 return (act_open_rpl_handlers[cookie](iq, rss, m)); 437} 438 439static int 440abort_rpl_rss_handler(struct sge_iq *iq, const struct rss_header *rss, 441 struct mbuf *m) 442{ 443 struct adapter *sc = iq->adapter; 444 u_int cookie; 445 446 MPASS(m == NULL); 447 if (is_hashfilter(sc)) 448 cookie = CPL_COOKIE_HASHFILTER; 449 else 450 cookie = CPL_COOKIE_TOM; 451 452 return (abort_rpl_rss_handlers[cookie](iq, rss, m)); 453} 454 455static int 456fw4_ack_handler(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 457{ 458 struct adapter *sc = iq->adapter; 459 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 460 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 461 u_int cookie; 462 463 MPASS(m == NULL); 464 if (is_etid(sc, tid)) 465 cookie = CPL_COOKIE_ETHOFLD; 466 else 467 cookie = CPL_COOKIE_TOM; 468 469 return (fw4_ack_handlers[cookie](iq, rss, m)); 470} 471 472static void 473t4_init_shared_cpl_handlers(void) 474{ 475 476 t4_register_cpl_handler(CPL_SET_TCB_RPL, set_tcb_rpl_handler); 477 t4_register_cpl_handler(CPL_L2T_WRITE_RPL, l2t_write_rpl_handler); 478 t4_register_cpl_handler(CPL_ACT_OPEN_RPL, act_open_rpl_handler); 479 t4_register_cpl_handler(CPL_ABORT_RPL_RSS, abort_rpl_rss_handler); 480 t4_register_cpl_handler(CPL_FW4_ACK, fw4_ack_handler); 481} 482 483void 484t4_register_shared_cpl_handler(int opcode, cpl_handler_t h, int cookie) 485{ 486 uintptr_t *loc; 487 488 MPASS(opcode < nitems(t4_cpl_handler)); 489 MPASS(cookie > CPL_COOKIE_RESERVED); 490 MPASS(cookie < NUM_CPL_COOKIES); 491 MPASS(t4_cpl_handler[opcode] != NULL); 492 493 switch (opcode) { 494 case CPL_SET_TCB_RPL: 495 loc = (uintptr_t *)&set_tcb_rpl_handlers[cookie]; 496 break; 497 case CPL_L2T_WRITE_RPL: 498 loc = (uintptr_t *)&l2t_write_rpl_handlers[cookie]; 499 break; 500 case CPL_ACT_OPEN_RPL: 501 loc = (uintptr_t *)&act_open_rpl_handlers[cookie]; 502 break; 503 case CPL_ABORT_RPL_RSS: 504 loc = (uintptr_t *)&abort_rpl_rss_handlers[cookie]; 505 break; 506 case CPL_FW4_ACK: 507 loc = (uintptr_t *)&fw4_ack_handlers[cookie]; 508 break; 509 default: 510 MPASS(0); 511 return; 512 } 513 MPASS(h == NULL || *loc == (uintptr_t)NULL); 514 atomic_store_rel_ptr(loc, (uintptr_t)h); 515} 516 517/* 518 * Called on MOD_LOAD. Validates and calculates the SGE tunables. 519 */ 520void 521t4_sge_modload(void) 522{ 523 524 if (fl_pktshift < 0 || fl_pktshift > 7) { 525 printf("Invalid hw.cxgbe.fl_pktshift value (%d)," 526 " using 0 instead.\n", fl_pktshift); 527 fl_pktshift = 0; 528 } 529 530 if (spg_len != 64 && spg_len != 128) { 531 int len; 532 533#if defined(__i386__) || defined(__amd64__) 534 len = cpu_clflush_line_size > 64 ? 128 : 64; 535#else 536 len = 64; 537#endif 538 if (spg_len != -1) { 539 printf("Invalid hw.cxgbe.spg_len value (%d)," 540 " using %d instead.\n", spg_len, len); 541 } 542 spg_len = len; 543 } 544 545 if (cong_drop < -1 || cong_drop > 1) { 546 printf("Invalid hw.cxgbe.cong_drop value (%d)," 547 " using 0 instead.\n", cong_drop); 548 cong_drop = 0; 549 } 550 551 if (tscale != 1 && (tscale < 3 || tscale > 17)) { 552 printf("Invalid hw.cxgbe.tscale value (%d)," 553 " using 1 instead.\n", tscale); 554 tscale = 1; 555 } 556 557 if (largest_rx_cluster != MCLBYTES && 558#if MJUMPAGESIZE != MCLBYTES 559 largest_rx_cluster != MJUMPAGESIZE && 560#endif 561 largest_rx_cluster != MJUM9BYTES && 562 largest_rx_cluster != MJUM16BYTES) { 563 printf("Invalid hw.cxgbe.largest_rx_cluster value (%d)," 564 " using %d instead.\n", largest_rx_cluster, MJUM16BYTES); 565 largest_rx_cluster = MJUM16BYTES; 566 } 567 568 if (safest_rx_cluster != MCLBYTES && 569#if MJUMPAGESIZE != MCLBYTES 570 safest_rx_cluster != MJUMPAGESIZE && 571#endif 572 safest_rx_cluster != MJUM9BYTES && 573 safest_rx_cluster != MJUM16BYTES) { 574 printf("Invalid hw.cxgbe.safest_rx_cluster value (%d)," 575 " using %d instead.\n", safest_rx_cluster, MJUMPAGESIZE); 576 safest_rx_cluster = MJUMPAGESIZE; 577 } 578 579 extfree_refs = counter_u64_alloc(M_WAITOK); 580 extfree_rels = counter_u64_alloc(M_WAITOK); 581 pullups = counter_u64_alloc(M_WAITOK); 582 defrags = counter_u64_alloc(M_WAITOK); 583 counter_u64_zero(extfree_refs); 584 counter_u64_zero(extfree_rels); 585 counter_u64_zero(pullups); 586 counter_u64_zero(defrags); 587 588 t4_init_shared_cpl_handlers(); 589 t4_register_cpl_handler(CPL_FW4_MSG, handle_fw_msg); 590 t4_register_cpl_handler(CPL_FW6_MSG, handle_fw_msg); 591 t4_register_cpl_handler(CPL_SGE_EGR_UPDATE, handle_sge_egr_update); 592#ifdef RATELIMIT 593 t4_register_shared_cpl_handler(CPL_FW4_ACK, ethofld_fw4_ack, 594 CPL_COOKIE_ETHOFLD); 595#endif 596 t4_register_fw_msg_handler(FW6_TYPE_CMD_RPL, t4_handle_fw_rpl); 597 t4_register_fw_msg_handler(FW6_TYPE_WRERR_RPL, t4_handle_wrerr_rpl); 598} 599 600void 601t4_sge_modunload(void) 602{ 603 604 counter_u64_free(extfree_refs); 605 counter_u64_free(extfree_rels); 606 counter_u64_free(pullups); 607 counter_u64_free(defrags); 608} 609 610uint64_t 611t4_sge_extfree_refs(void) 612{ 613 uint64_t refs, rels; 614 615 rels = counter_u64_fetch(extfree_rels); 616 refs = counter_u64_fetch(extfree_refs); 617 618 return (refs - rels); 619} 620 621/* max 4096 */ 622#define MAX_PACK_BOUNDARY 512 623 624static inline void 625setup_pad_and_pack_boundaries(struct adapter *sc) 626{ 627 uint32_t v, m; 628 int pad, pack, pad_shift; 629 630 pad_shift = chip_id(sc) > CHELSIO_T5 ? X_T6_INGPADBOUNDARY_SHIFT : 631 X_INGPADBOUNDARY_SHIFT; 632 pad = fl_pad; 633 if (fl_pad < (1 << pad_shift) || 634 fl_pad > (1 << (pad_shift + M_INGPADBOUNDARY)) || 635 !powerof2(fl_pad)) { 636 /* 637 * If there is any chance that we might use buffer packing and 638 * the chip is a T4, then pick 64 as the pad/pack boundary. Set 639 * it to the minimum allowed in all other cases. 640 */ 641 pad = is_t4(sc) && buffer_packing ? 64 : 1 << pad_shift; 642 643 /* 644 * For fl_pad = 0 we'll still write a reasonable value to the 645 * register but all the freelists will opt out of padding. 646 * We'll complain here only if the user tried to set it to a 647 * value greater than 0 that was invalid. 648 */ 649 if (fl_pad > 0) { 650 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pad value" 651 " (%d), using %d instead.\n", fl_pad, pad); 652 } 653 } 654 m = V_INGPADBOUNDARY(M_INGPADBOUNDARY); 655 v = V_INGPADBOUNDARY(ilog2(pad) - pad_shift); 656 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 657 658 if (is_t4(sc)) { 659 if (fl_pack != -1 && fl_pack != pad) { 660 /* Complain but carry on. */ 661 device_printf(sc->dev, "hw.cxgbe.fl_pack (%d) ignored," 662 " using %d instead.\n", fl_pack, pad); 663 } 664 return; 665 } 666 667 pack = fl_pack; 668 if (fl_pack < 16 || fl_pack == 32 || fl_pack > 4096 || 669 !powerof2(fl_pack)) { 670 if (sc->params.pci.mps > MAX_PACK_BOUNDARY) 671 pack = MAX_PACK_BOUNDARY; 672 else 673 pack = max(sc->params.pci.mps, CACHE_LINE_SIZE); 674 MPASS(powerof2(pack)); 675 if (pack < 16) 676 pack = 16; 677 if (pack == 32) 678 pack = 64; 679 if (pack > 4096) 680 pack = 4096; 681 if (fl_pack != -1) { 682 device_printf(sc->dev, "Invalid hw.cxgbe.fl_pack value" 683 " (%d), using %d instead.\n", fl_pack, pack); 684 } 685 } 686 m = V_INGPACKBOUNDARY(M_INGPACKBOUNDARY); 687 if (pack == 16) 688 v = V_INGPACKBOUNDARY(0); 689 else 690 v = V_INGPACKBOUNDARY(ilog2(pack) - 5); 691 692 MPASS(!is_t4(sc)); /* T4 doesn't have SGE_CONTROL2 */ 693 t4_set_reg_field(sc, A_SGE_CONTROL2, m, v); 694} 695 696/* 697 * adap->params.vpd.cclk must be set up before this is called. 698 */ 699void 700t4_tweak_chip_settings(struct adapter *sc) 701{ 702 int i, reg; 703 uint32_t v, m; 704 int intr_timer[SGE_NTIMERS] = {1, 5, 10, 50, 100, 200}; 705 int timer_max = M_TIMERVALUE0 * 1000 / sc->params.vpd.cclk; 706 int intr_pktcount[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 707 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 708 static int sw_buf_sizes[] = { 709 MCLBYTES, 710#if MJUMPAGESIZE != MCLBYTES 711 MJUMPAGESIZE, 712#endif 713 MJUM9BYTES, 714 MJUM16BYTES 715 }; 716 717 KASSERT(sc->flags & MASTER_PF, 718 ("%s: trying to change chip settings when not master.", __func__)); 719 720 m = V_PKTSHIFT(M_PKTSHIFT) | F_RXPKTCPLMODE | F_EGRSTATUSPAGESIZE; 721 v = V_PKTSHIFT(fl_pktshift) | F_RXPKTCPLMODE | 722 V_EGRSTATUSPAGESIZE(spg_len == 128); 723 t4_set_reg_field(sc, A_SGE_CONTROL, m, v); 724 725 setup_pad_and_pack_boundaries(sc); 726 727 v = V_HOSTPAGESIZEPF0(PAGE_SHIFT - 10) | 728 V_HOSTPAGESIZEPF1(PAGE_SHIFT - 10) | 729 V_HOSTPAGESIZEPF2(PAGE_SHIFT - 10) | 730 V_HOSTPAGESIZEPF3(PAGE_SHIFT - 10) | 731 V_HOSTPAGESIZEPF4(PAGE_SHIFT - 10) | 732 V_HOSTPAGESIZEPF5(PAGE_SHIFT - 10) | 733 V_HOSTPAGESIZEPF6(PAGE_SHIFT - 10) | 734 V_HOSTPAGESIZEPF7(PAGE_SHIFT - 10); 735 t4_write_reg(sc, A_SGE_HOST_PAGE_SIZE, v); 736 737 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE0, 4096); 738 t4_write_reg(sc, A_SGE_FL_BUFFER_SIZE1, 65536); 739 reg = A_SGE_FL_BUFFER_SIZE2; 740 for (i = 0; i < nitems(sw_buf_sizes); i++) { 741 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 742 t4_write_reg(sc, reg, sw_buf_sizes[i]); 743 reg += 4; 744 MPASS(reg <= A_SGE_FL_BUFFER_SIZE15); 745 t4_write_reg(sc, reg, sw_buf_sizes[i] - CL_METADATA_SIZE); 746 reg += 4; 747 } 748 749 v = V_THRESHOLD_0(intr_pktcount[0]) | V_THRESHOLD_1(intr_pktcount[1]) | 750 V_THRESHOLD_2(intr_pktcount[2]) | V_THRESHOLD_3(intr_pktcount[3]); 751 t4_write_reg(sc, A_SGE_INGRESS_RX_THRESHOLD, v); 752 753 KASSERT(intr_timer[0] <= timer_max, 754 ("%s: not a single usable timer (%d, %d)", __func__, intr_timer[0], 755 timer_max)); 756 for (i = 1; i < nitems(intr_timer); i++) { 757 KASSERT(intr_timer[i] >= intr_timer[i - 1], 758 ("%s: timers not listed in increasing order (%d)", 759 __func__, i)); 760 761 while (intr_timer[i] > timer_max) { 762 if (i == nitems(intr_timer) - 1) { 763 intr_timer[i] = timer_max; 764 break; 765 } 766 intr_timer[i] += intr_timer[i - 1]; 767 intr_timer[i] /= 2; 768 } 769 } 770 771 v = V_TIMERVALUE0(us_to_core_ticks(sc, intr_timer[0])) | 772 V_TIMERVALUE1(us_to_core_ticks(sc, intr_timer[1])); 773 t4_write_reg(sc, A_SGE_TIMER_VALUE_0_AND_1, v); 774 v = V_TIMERVALUE2(us_to_core_ticks(sc, intr_timer[2])) | 775 V_TIMERVALUE3(us_to_core_ticks(sc, intr_timer[3])); 776 t4_write_reg(sc, A_SGE_TIMER_VALUE_2_AND_3, v); 777 v = V_TIMERVALUE4(us_to_core_ticks(sc, intr_timer[4])) | 778 V_TIMERVALUE5(us_to_core_ticks(sc, intr_timer[5])); 779 t4_write_reg(sc, A_SGE_TIMER_VALUE_4_AND_5, v); 780 781 if (chip_id(sc) >= CHELSIO_T6) { 782 m = V_TSCALE(M_TSCALE); 783 if (tscale == 1) 784 v = 0; 785 else 786 v = V_TSCALE(tscale - 2); 787 t4_set_reg_field(sc, A_SGE_ITP_CONTROL, m, v); 788 789 if (sc->debug_flags & DF_DISABLE_TCB_CACHE) { 790 m = V_RDTHRESHOLD(M_RDTHRESHOLD) | F_WRTHRTHRESHEN | 791 V_WRTHRTHRESH(M_WRTHRTHRESH); 792 t4_tp_pio_read(sc, &v, 1, A_TP_CMM_CONFIG, 1); 793 v &= ~m; 794 v |= V_RDTHRESHOLD(1) | F_WRTHRTHRESHEN | 795 V_WRTHRTHRESH(16); 796 t4_tp_pio_write(sc, &v, 1, A_TP_CMM_CONFIG, 1); 797 } 798 } 799 800 /* 4K, 16K, 64K, 256K DDP "page sizes" for TDDP */ 801 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 802 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, v); 803 804 /* 805 * 4K, 8K, 16K, 64K DDP "page sizes" for iSCSI DDP. These have been 806 * chosen with MAXPHYS = 128K in mind. The largest DDP buffer that we 807 * may have to deal with is MAXPHYS + 1 page. 808 */ 809 v = V_HPZ0(0) | V_HPZ1(1) | V_HPZ2(2) | V_HPZ3(4); 810 t4_write_reg(sc, A_ULP_RX_ISCSI_PSZ, v); 811 812 /* We use multiple DDP page sizes both in plain-TOE and ISCSI modes. */ 813 m = v = F_TDDPTAGTCB | F_ISCSITAGTCB; 814 t4_set_reg_field(sc, A_ULP_RX_CTL, m, v); 815 816 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 817 F_RESETDDPOFFSET; 818 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 819 t4_set_reg_field(sc, A_TP_PARA_REG5, m, v); 820} 821 822/* 823 * SGE wants the buffer to be at least 64B and then a multiple of 16. Its 824 * address mut be 16B aligned. If padding is in use the buffer's start and end 825 * need to be aligned to the pad boundary as well. We'll just make sure that 826 * the size is a multiple of the pad boundary here, it is up to the buffer 827 * allocation code to make sure the start of the buffer is aligned. 828 */ 829static inline int 830hwsz_ok(struct adapter *sc, int hwsz) 831{ 832 int mask = fl_pad ? sc->params.sge.pad_boundary - 1 : 16 - 1; 833 834 return (hwsz >= 64 && (hwsz & mask) == 0); 835} 836 837/* 838 * XXX: driver really should be able to deal with unexpected settings. 839 */ 840int 841t4_read_chip_settings(struct adapter *sc) 842{ 843 struct sge *s = &sc->sge; 844 struct sge_params *sp = &sc->params.sge; 845 int i, j, n, rc = 0; 846 uint32_t m, v, r; 847 uint16_t indsz = min(RX_COPY_THRESHOLD - 1, M_INDICATESIZE); 848 static int sw_buf_sizes[] = { /* Sorted by size */ 849 MCLBYTES, 850#if MJUMPAGESIZE != MCLBYTES 851 MJUMPAGESIZE, 852#endif 853 MJUM9BYTES, 854 MJUM16BYTES 855 }; 856 struct rx_buf_info *rxb; 857 858 m = F_RXPKTCPLMODE; 859 v = F_RXPKTCPLMODE; 860 r = sc->params.sge.sge_control; 861 if ((r & m) != v) { 862 device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r); 863 rc = EINVAL; 864 } 865 866 /* 867 * If this changes then every single use of PAGE_SHIFT in the driver 868 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift. 869 */ 870 if (sp->page_shift != PAGE_SHIFT) { 871 device_printf(sc->dev, "invalid SGE_HOST_PAGE_SIZE(0x%x)\n", r); 872 rc = EINVAL; 873 } 874 875 s->safe_zidx = -1; 876 rxb = &s->rx_buf_info[0]; 877 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 878 rxb->size1 = sw_buf_sizes[i]; 879 rxb->zone = m_getzone(rxb->size1); 880 rxb->type = m_gettype(rxb->size1); 881 rxb->size2 = 0; 882 rxb->hwidx1 = -1; 883 rxb->hwidx2 = -1; 884 for (j = 0; j < SGE_FLBUF_SIZES; j++) { 885 int hwsize = sp->sge_fl_buffer_size[j]; 886 887 if (!hwsz_ok(sc, hwsize)) 888 continue; 889 890 /* hwidx for size1 */ 891 if (rxb->hwidx1 == -1 && rxb->size1 == hwsize) 892 rxb->hwidx1 = j; 893 894 /* hwidx for size2 (buffer packing) */ 895 if (rxb->size1 - CL_METADATA_SIZE < hwsize) 896 continue; 897 n = rxb->size1 - hwsize - CL_METADATA_SIZE; 898 if (n == 0) { 899 rxb->hwidx2 = j; 900 rxb->size2 = hwsize; 901 break; /* stop looking */ 902 } 903 if (rxb->hwidx2 != -1) { 904 if (n < sp->sge_fl_buffer_size[rxb->hwidx2] - 905 hwsize - CL_METADATA_SIZE) { 906 rxb->hwidx2 = j; 907 rxb->size2 = hwsize; 908 } 909 } else if (n <= 2 * CL_METADATA_SIZE) { 910 rxb->hwidx2 = j; 911 rxb->size2 = hwsize; 912 } 913 } 914 if (rxb->hwidx2 != -1) 915 sc->flags |= BUF_PACKING_OK; 916 if (s->safe_zidx == -1 && rxb->size1 == safest_rx_cluster) 917 s->safe_zidx = i; 918 } 919 920 if (sc->flags & IS_VF) 921 return (0); 922 923 v = V_HPZ0(0) | V_HPZ1(2) | V_HPZ2(4) | V_HPZ3(6); 924 r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ); 925 if (r != v) { 926 device_printf(sc->dev, "invalid ULP_RX_TDDP_PSZ(0x%x)\n", r); 927 rc = EINVAL; 928 } 929 930 m = v = F_TDDPTAGTCB; 931 r = t4_read_reg(sc, A_ULP_RX_CTL); 932 if ((r & m) != v) { 933 device_printf(sc->dev, "invalid ULP_RX_CTL(0x%x)\n", r); 934 rc = EINVAL; 935 } 936 937 m = V_INDICATESIZE(M_INDICATESIZE) | F_REARMDDPOFFSET | 938 F_RESETDDPOFFSET; 939 v = V_INDICATESIZE(indsz) | F_REARMDDPOFFSET | F_RESETDDPOFFSET; 940 r = t4_read_reg(sc, A_TP_PARA_REG5); 941 if ((r & m) != v) { 942 device_printf(sc->dev, "invalid TP_PARA_REG5(0x%x)\n", r); 943 rc = EINVAL; 944 } 945 946 t4_init_tp_params(sc, 1); 947 948 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 949 t4_load_mtus(sc, sc->params.mtus, sc->params.a_wnd, sc->params.b_wnd); 950 951 return (rc); 952} 953 954int 955t4_create_dma_tag(struct adapter *sc) 956{ 957 int rc; 958 959 rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, 960 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 961 BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, BUS_DMA_ALLOCNOW, NULL, 962 NULL, &sc->dmat); 963 if (rc != 0) { 964 device_printf(sc->dev, 965 "failed to create main DMA tag: %d\n", rc); 966 } 967 968 return (rc); 969} 970 971void 972t4_sge_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 973 struct sysctl_oid_list *children) 974{ 975 struct sge_params *sp = &sc->params.sge; 976 977 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "buffer_sizes", 978 CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_bufsizes, "A", 979 "freelist buffer sizes"); 980 981 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pktshift", CTLFLAG_RD, 982 NULL, sp->fl_pktshift, "payload DMA offset in rx buffer (bytes)"); 983 984 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pad", CTLFLAG_RD, 985 NULL, sp->pad_boundary, "payload pad boundary (bytes)"); 986 987 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "spg_len", CTLFLAG_RD, 988 NULL, sp->spg_len, "status page size (bytes)"); 989 990 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "cong_drop", CTLFLAG_RD, 991 NULL, cong_drop, "congestion drop setting"); 992 993 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "fl_pack", CTLFLAG_RD, 994 NULL, sp->pack_boundary, "payload pack boundary (bytes)"); 995} 996 997int 998t4_destroy_dma_tag(struct adapter *sc) 999{ 1000 if (sc->dmat) 1001 bus_dma_tag_destroy(sc->dmat); 1002 1003 return (0); 1004} 1005 1006/* 1007 * Allocate and initialize the firmware event queue, control queues, and special 1008 * purpose rx queues owned by the adapter. 1009 * 1010 * Returns errno on failure. Resources allocated up to that point may still be 1011 * allocated. Caller is responsible for cleanup in case this function fails. 1012 */ 1013int 1014t4_setup_adapter_queues(struct adapter *sc) 1015{ 1016 struct sysctl_oid *oid; 1017 struct sysctl_oid_list *children; 1018 int rc, i; 1019 1020 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1021 1022 sysctl_ctx_init(&sc->ctx); 1023 sc->flags |= ADAP_SYSCTL_CTX; 1024 1025 /* 1026 * Firmware event queue 1027 */ 1028 rc = alloc_fwq(sc); 1029 if (rc != 0) 1030 return (rc); 1031 1032 /* 1033 * That's all for the VF driver. 1034 */ 1035 if (sc->flags & IS_VF) 1036 return (rc); 1037 1038 oid = device_get_sysctl_tree(sc->dev); 1039 children = SYSCTL_CHILDREN(oid); 1040 1041 /* 1042 * XXX: General purpose rx queues, one per port. 1043 */ 1044 1045 /* 1046 * Control queues, one per port. 1047 */ 1048 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "ctrlq", 1049 CTLFLAG_RD, NULL, "control queues"); 1050 for_each_port(sc, i) { 1051 struct sge_wrq *ctrlq = &sc->sge.ctrlq[i]; 1052 1053 rc = alloc_ctrlq(sc, ctrlq, i, oid); 1054 if (rc != 0) 1055 return (rc); 1056 } 1057 1058 return (rc); 1059} 1060 1061/* 1062 * Idempotent 1063 */ 1064int 1065t4_teardown_adapter_queues(struct adapter *sc) 1066{ 1067 int i; 1068 1069 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 1070 1071 /* Do this before freeing the queue */ 1072 if (sc->flags & ADAP_SYSCTL_CTX) { 1073 sysctl_ctx_free(&sc->ctx); 1074 sc->flags &= ~ADAP_SYSCTL_CTX; 1075 } 1076 1077 if (!(sc->flags & IS_VF)) { 1078 for_each_port(sc, i) 1079 free_wrq(sc, &sc->sge.ctrlq[i]); 1080 } 1081 free_fwq(sc); 1082 1083 return (0); 1084} 1085 1086/* Maximum payload that could arrive with a single iq descriptor. */ 1087static inline int 1088max_rx_payload(struct adapter *sc, struct ifnet *ifp, const bool ofld) 1089{ 1090 int maxp; 1091 1092 /* large enough even when hw VLAN extraction is disabled */ 1093 maxp = sc->params.sge.fl_pktshift + ETHER_HDR_LEN + 1094 ETHER_VLAN_ENCAP_LEN + ifp->if_mtu; 1095 if (ofld && sc->tt.tls && sc->cryptocaps & FW_CAPS_CONFIG_TLSKEYS && 1096 maxp < sc->params.tp.max_rx_pdu) 1097 maxp = sc->params.tp.max_rx_pdu; 1098 return (maxp); 1099} 1100 1101int 1102t4_setup_vi_queues(struct vi_info *vi) 1103{ 1104 int rc = 0, i, intr_idx, iqidx; 1105 struct sge_rxq *rxq; 1106 struct sge_txq *txq; 1107#ifdef TCP_OFFLOAD 1108 struct sge_ofld_rxq *ofld_rxq; 1109#endif 1110#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1111 struct sge_wrq *ofld_txq; 1112#endif 1113#ifdef DEV_NETMAP 1114 int saved_idx; 1115 struct sge_nm_rxq *nm_rxq; 1116 struct sge_nm_txq *nm_txq; 1117#endif 1118 char name[16]; 1119 struct port_info *pi = vi->pi; 1120 struct adapter *sc = pi->adapter; 1121 struct ifnet *ifp = vi->ifp; 1122 struct sysctl_oid *oid = device_get_sysctl_tree(vi->dev); 1123 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 1124 int maxp; 1125 1126 /* Interrupt vector to start from (when using multiple vectors) */ 1127 intr_idx = vi->first_intr; 1128 1129#ifdef DEV_NETMAP 1130 saved_idx = intr_idx; 1131 if (ifp->if_capabilities & IFCAP_NETMAP) { 1132 1133 /* netmap is supported with direct interrupts only. */ 1134 MPASS(!forwarding_intr_to_fwq(sc)); 1135 1136 /* 1137 * We don't have buffers to back the netmap rx queues 1138 * right now so we create the queues in a way that 1139 * doesn't set off any congestion signal in the chip. 1140 */ 1141 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_rxq", 1142 CTLFLAG_RD, NULL, "rx queues"); 1143 for_each_nm_rxq(vi, i, nm_rxq) { 1144 rc = alloc_nm_rxq(vi, nm_rxq, intr_idx, i, oid); 1145 if (rc != 0) 1146 goto done; 1147 intr_idx++; 1148 } 1149 1150 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "nm_txq", 1151 CTLFLAG_RD, NULL, "tx queues"); 1152 for_each_nm_txq(vi, i, nm_txq) { 1153 iqidx = vi->first_nm_rxq + (i % vi->nnmrxq); 1154 rc = alloc_nm_txq(vi, nm_txq, iqidx, i, oid); 1155 if (rc != 0) 1156 goto done; 1157 } 1158 } 1159 1160 /* Normal rx queues and netmap rx queues share the same interrupts. */ 1161 intr_idx = saved_idx; 1162#endif 1163 1164 /* 1165 * Allocate rx queues first because a default iqid is required when 1166 * creating a tx queue. 1167 */ 1168 maxp = max_rx_payload(sc, ifp, false); 1169 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "rxq", 1170 CTLFLAG_RD, NULL, "rx queues"); 1171 for_each_rxq(vi, i, rxq) { 1172 1173 init_iq(&rxq->iq, sc, vi->tmr_idx, vi->pktc_idx, vi->qsize_rxq); 1174 1175 snprintf(name, sizeof(name), "%s rxq%d-fl", 1176 device_get_nameunit(vi->dev), i); 1177 init_fl(sc, &rxq->fl, vi->qsize_rxq / 8, maxp, name); 1178 1179 rc = alloc_rxq(vi, rxq, 1180 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1181 if (rc != 0) 1182 goto done; 1183 intr_idx++; 1184 } 1185#ifdef DEV_NETMAP 1186 if (ifp->if_capabilities & IFCAP_NETMAP) 1187 intr_idx = saved_idx + max(vi->nrxq, vi->nnmrxq); 1188#endif 1189#ifdef TCP_OFFLOAD 1190 maxp = max_rx_payload(sc, ifp, true); 1191 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_rxq", 1192 CTLFLAG_RD, NULL, "rx queues for offloaded TCP connections"); 1193 for_each_ofld_rxq(vi, i, ofld_rxq) { 1194 1195 init_iq(&ofld_rxq->iq, sc, vi->ofld_tmr_idx, vi->ofld_pktc_idx, 1196 vi->qsize_rxq); 1197 1198 snprintf(name, sizeof(name), "%s ofld_rxq%d-fl", 1199 device_get_nameunit(vi->dev), i); 1200 init_fl(sc, &ofld_rxq->fl, vi->qsize_rxq / 8, maxp, name); 1201 1202 rc = alloc_ofld_rxq(vi, ofld_rxq, 1203 forwarding_intr_to_fwq(sc) ? -1 : intr_idx, i, oid); 1204 if (rc != 0) 1205 goto done; 1206 intr_idx++; 1207 } 1208#endif 1209 1210 /* 1211 * Now the tx queues. 1212 */ 1213 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "txq", CTLFLAG_RD, 1214 NULL, "tx queues"); 1215 for_each_txq(vi, i, txq) { 1216 iqidx = vi->first_rxq + (i % vi->nrxq); 1217 snprintf(name, sizeof(name), "%s txq%d", 1218 device_get_nameunit(vi->dev), i); 1219 init_eq(sc, &txq->eq, EQ_ETH, vi->qsize_txq, pi->tx_chan, 1220 sc->sge.rxq[iqidx].iq.cntxt_id, name); 1221 1222 rc = alloc_txq(vi, txq, i, oid); 1223 if (rc != 0) 1224 goto done; 1225 } 1226#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1227 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, "ofld_txq", 1228 CTLFLAG_RD, NULL, "tx queues for TOE/ETHOFLD"); 1229 for_each_ofld_txq(vi, i, ofld_txq) { 1230 struct sysctl_oid *oid2; 1231 1232 snprintf(name, sizeof(name), "%s ofld_txq%d", 1233 device_get_nameunit(vi->dev), i); 1234 if (vi->nofldrxq > 0) { 1235 iqidx = vi->first_ofld_rxq + (i % vi->nofldrxq); 1236 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1237 pi->tx_chan, sc->sge.ofld_rxq[iqidx].iq.cntxt_id, 1238 name); 1239 } else { 1240 iqidx = vi->first_rxq + (i % vi->nrxq); 1241 init_eq(sc, &ofld_txq->eq, EQ_OFLD, vi->qsize_txq, 1242 pi->tx_chan, sc->sge.rxq[iqidx].iq.cntxt_id, name); 1243 } 1244 1245 snprintf(name, sizeof(name), "%d", i); 1246 oid2 = SYSCTL_ADD_NODE(&vi->ctx, SYSCTL_CHILDREN(oid), OID_AUTO, 1247 name, CTLFLAG_RD, NULL, "offload tx queue"); 1248 1249 rc = alloc_wrq(sc, vi, ofld_txq, oid2); 1250 if (rc != 0) 1251 goto done; 1252 } 1253#endif 1254done: 1255 if (rc) 1256 t4_teardown_vi_queues(vi); 1257 1258 return (rc); 1259} 1260 1261/* 1262 * Idempotent 1263 */ 1264int 1265t4_teardown_vi_queues(struct vi_info *vi) 1266{ 1267 int i; 1268 struct sge_rxq *rxq; 1269 struct sge_txq *txq; 1270#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1271 struct port_info *pi = vi->pi; 1272 struct adapter *sc = pi->adapter; 1273 struct sge_wrq *ofld_txq; 1274#endif 1275#ifdef TCP_OFFLOAD 1276 struct sge_ofld_rxq *ofld_rxq; 1277#endif 1278#ifdef DEV_NETMAP 1279 struct sge_nm_rxq *nm_rxq; 1280 struct sge_nm_txq *nm_txq; 1281#endif 1282 1283 /* Do this before freeing the queues */ 1284 if (vi->flags & VI_SYSCTL_CTX) { 1285 sysctl_ctx_free(&vi->ctx); 1286 vi->flags &= ~VI_SYSCTL_CTX; 1287 } 1288 1289#ifdef DEV_NETMAP 1290 if (vi->ifp->if_capabilities & IFCAP_NETMAP) { 1291 for_each_nm_txq(vi, i, nm_txq) { 1292 free_nm_txq(vi, nm_txq); 1293 } 1294 1295 for_each_nm_rxq(vi, i, nm_rxq) { 1296 free_nm_rxq(vi, nm_rxq); 1297 } 1298 } 1299#endif 1300 1301 /* 1302 * Take down all the tx queues first, as they reference the rx queues 1303 * (for egress updates, etc.). 1304 */ 1305 1306 for_each_txq(vi, i, txq) { 1307 free_txq(vi, txq); 1308 } 1309#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 1310 for_each_ofld_txq(vi, i, ofld_txq) { 1311 free_wrq(sc, ofld_txq); 1312 } 1313#endif 1314 1315 /* 1316 * Then take down the rx queues. 1317 */ 1318 1319 for_each_rxq(vi, i, rxq) { 1320 free_rxq(vi, rxq); 1321 } 1322#ifdef TCP_OFFLOAD 1323 for_each_ofld_rxq(vi, i, ofld_rxq) { 1324 free_ofld_rxq(vi, ofld_rxq); 1325 } 1326#endif 1327 1328 return (0); 1329} 1330 1331/* 1332 * Interrupt handler when the driver is using only 1 interrupt. This is a very 1333 * unusual scenario. 1334 * 1335 * a) Deals with errors, if any. 1336 * b) Services firmware event queue, which is taking interrupts for all other 1337 * queues. 1338 */ 1339void 1340t4_intr_all(void *arg) 1341{ 1342 struct adapter *sc = arg; 1343 struct sge_iq *fwq = &sc->sge.fwq; 1344 1345 MPASS(sc->intr_count == 1); 1346 1347 if (sc->intr_type == INTR_INTX) 1348 t4_write_reg(sc, MYPF_REG(A_PCIE_PF_CLI), 0); 1349 1350 t4_intr_err(arg); 1351 t4_intr_evt(fwq); 1352} 1353 1354/* 1355 * Interrupt handler for errors (installed directly when multiple interrupts are 1356 * being used, or called by t4_intr_all). 1357 */ 1358void 1359t4_intr_err(void *arg) 1360{ 1361 struct adapter *sc = arg; 1362 uint32_t v; 1363 const bool verbose = (sc->debug_flags & DF_VERBOSE_SLOWINTR) != 0; 1364 1365 if (sc->flags & ADAP_ERR) 1366 return; 1367 1368 v = t4_read_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE)); 1369 if (v & F_PFSW) { 1370 sc->swintr++; 1371 t4_write_reg(sc, MYPF_REG(A_PL_PF_INT_CAUSE), v); 1372 } 1373 1374 t4_slow_intr_handler(sc, verbose); 1375} 1376 1377/* 1378 * Interrupt handler for iq-only queues. The firmware event queue is the only 1379 * such queue right now. 1380 */ 1381void 1382t4_intr_evt(void *arg) 1383{ 1384 struct sge_iq *iq = arg; 1385 1386 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1387 service_iq(iq, 0); 1388 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1389 } 1390} 1391 1392/* 1393 * Interrupt handler for iq+fl queues. 1394 */ 1395void 1396t4_intr(void *arg) 1397{ 1398 struct sge_iq *iq = arg; 1399 1400 if (atomic_cmpset_int(&iq->state, IQS_IDLE, IQS_BUSY)) { 1401 service_iq_fl(iq, 0); 1402 (void) atomic_cmpset_int(&iq->state, IQS_BUSY, IQS_IDLE); 1403 } 1404} 1405 1406#ifdef DEV_NETMAP 1407/* 1408 * Interrupt handler for netmap rx queues. 1409 */ 1410void 1411t4_nm_intr(void *arg) 1412{ 1413 struct sge_nm_rxq *nm_rxq = arg; 1414 1415 if (atomic_cmpset_int(&nm_rxq->nm_state, NM_ON, NM_BUSY)) { 1416 service_nm_rxq(nm_rxq); 1417 (void) atomic_cmpset_int(&nm_rxq->nm_state, NM_BUSY, NM_ON); 1418 } 1419} 1420 1421/* 1422 * Interrupt handler for vectors shared between NIC and netmap rx queues. 1423 */ 1424void 1425t4_vi_intr(void *arg) 1426{ 1427 struct irq *irq = arg; 1428 1429 MPASS(irq->nm_rxq != NULL); 1430 t4_nm_intr(irq->nm_rxq); 1431 1432 MPASS(irq->rxq != NULL); 1433 t4_intr(irq->rxq); 1434} 1435#endif 1436 1437/* 1438 * Deals with interrupts on an iq-only (no freelist) queue. 1439 */ 1440static int 1441service_iq(struct sge_iq *iq, int budget) 1442{ 1443 struct sge_iq *q; 1444 struct adapter *sc = iq->adapter; 1445 struct iq_desc *d = &iq->desc[iq->cidx]; 1446 int ndescs = 0, limit; 1447 int rsp_type; 1448 uint32_t lq; 1449 STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); 1450 1451 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1452 KASSERT((iq->flags & IQ_HAS_FL) == 0, 1453 ("%s: called for iq %p with fl (iq->flags 0x%x)", __func__, iq, 1454 iq->flags)); 1455 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1456 MPASS((iq->flags & IQ_LRO_ENABLED) == 0); 1457 1458 limit = budget ? budget : iq->qsize / 16; 1459 1460 /* 1461 * We always come back and check the descriptor ring for new indirect 1462 * interrupts and other responses after running a single handler. 1463 */ 1464 for (;;) { 1465 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1466 1467 rmb(); 1468 1469 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1470 lq = be32toh(d->rsp.pldbuflen_qid); 1471 1472 switch (rsp_type) { 1473 case X_RSPD_TYPE_FLBUF: 1474 panic("%s: data for an iq (%p) with no freelist", 1475 __func__, iq); 1476 1477 /* NOTREACHED */ 1478 1479 case X_RSPD_TYPE_CPL: 1480 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1481 ("%s: bad opcode %02x.", __func__, 1482 d->rss.opcode)); 1483 t4_cpl_handler[d->rss.opcode](iq, &d->rss, NULL); 1484 break; 1485 1486 case X_RSPD_TYPE_INTR: 1487 /* 1488 * There are 1K interrupt-capable queues (qids 0 1489 * through 1023). A response type indicating a 1490 * forwarded interrupt with a qid >= 1K is an 1491 * iWARP async notification. 1492 */ 1493 if (__predict_true(lq >= 1024)) { 1494 t4_an_handler(iq, &d->rsp); 1495 break; 1496 } 1497 1498 q = sc->sge.iqmap[lq - sc->sge.iq_start - 1499 sc->sge.iq_base]; 1500 if (atomic_cmpset_int(&q->state, IQS_IDLE, 1501 IQS_BUSY)) { 1502 if (service_iq_fl(q, q->qsize / 16) == 0) { 1503 (void) atomic_cmpset_int(&q->state, 1504 IQS_BUSY, IQS_IDLE); 1505 } else { 1506 STAILQ_INSERT_TAIL(&iql, q, 1507 link); 1508 } 1509 } 1510 break; 1511 1512 default: 1513 KASSERT(0, 1514 ("%s: illegal response type %d on iq %p", 1515 __func__, rsp_type, iq)); 1516 log(LOG_ERR, 1517 "%s: illegal response type %d on iq %p", 1518 device_get_nameunit(sc->dev), rsp_type, iq); 1519 break; 1520 } 1521 1522 d++; 1523 if (__predict_false(++iq->cidx == iq->sidx)) { 1524 iq->cidx = 0; 1525 iq->gen ^= F_RSPD_GEN; 1526 d = &iq->desc[0]; 1527 } 1528 if (__predict_false(++ndescs == limit)) { 1529 t4_write_reg(sc, sc->sge_gts_reg, 1530 V_CIDXINC(ndescs) | 1531 V_INGRESSQID(iq->cntxt_id) | 1532 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1533 ndescs = 0; 1534 1535 if (budget) { 1536 return (EINPROGRESS); 1537 } 1538 } 1539 } 1540 1541 if (STAILQ_EMPTY(&iql)) 1542 break; 1543 1544 /* 1545 * Process the head only, and send it to the back of the list if 1546 * it's still not done. 1547 */ 1548 q = STAILQ_FIRST(&iql); 1549 STAILQ_REMOVE_HEAD(&iql, link); 1550 if (service_iq_fl(q, q->qsize / 8) == 0) 1551 (void) atomic_cmpset_int(&q->state, IQS_BUSY, IQS_IDLE); 1552 else 1553 STAILQ_INSERT_TAIL(&iql, q, link); 1554 } 1555 1556 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1557 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1558 1559 return (0); 1560} 1561 1562static inline int 1563sort_before_lro(struct lro_ctrl *lro) 1564{ 1565 1566 return (lro->lro_mbuf_max != 0); 1567} 1568 1569static inline uint64_t 1570last_flit_to_ns(struct adapter *sc, uint64_t lf) 1571{ 1572 uint64_t n = be64toh(lf) & 0xfffffffffffffff; /* 60b, not 64b. */ 1573 1574 if (n > UINT64_MAX / 1000000) 1575 return (n / sc->params.vpd.cclk * 1000000); 1576 else 1577 return (n * 1000000 / sc->params.vpd.cclk); 1578} 1579 1580static inline void 1581move_to_next_rxbuf(struct sge_fl *fl) 1582{ 1583 1584 fl->rx_offset = 0; 1585 if (__predict_false((++fl->cidx & 7) == 0)) { 1586 uint16_t cidx = fl->cidx >> 3; 1587 1588 if (__predict_false(cidx == fl->sidx)) 1589 fl->cidx = cidx = 0; 1590 fl->hw_cidx = cidx; 1591 } 1592} 1593 1594/* 1595 * Deals with interrupts on an iq+fl queue. 1596 */ 1597static int 1598service_iq_fl(struct sge_iq *iq, int budget) 1599{ 1600 struct sge_rxq *rxq = iq_to_rxq(iq); 1601 struct sge_fl *fl; 1602 struct adapter *sc = iq->adapter; 1603 struct iq_desc *d = &iq->desc[iq->cidx]; 1604 int ndescs, limit; 1605 int rsp_type, starved; 1606 uint32_t lq; 1607 uint16_t fl_hw_cidx; 1608 struct mbuf *m0; 1609#if defined(INET) || defined(INET6) 1610 const struct timeval lro_timeout = {0, sc->lro_timeout}; 1611 struct lro_ctrl *lro = &rxq->lro; 1612#endif 1613 1614 KASSERT(iq->state == IQS_BUSY, ("%s: iq %p not BUSY", __func__, iq)); 1615 MPASS(iq->flags & IQ_HAS_FL); 1616 1617 ndescs = 0; 1618#if defined(INET) || defined(INET6) 1619 if (iq->flags & IQ_ADJ_CREDIT) { 1620 MPASS(sort_before_lro(lro)); 1621 iq->flags &= ~IQ_ADJ_CREDIT; 1622 if ((d->rsp.u.type_gen & F_RSPD_GEN) != iq->gen) { 1623 tcp_lro_flush_all(lro); 1624 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(1) | 1625 V_INGRESSQID((u32)iq->cntxt_id) | 1626 V_SEINTARM(iq->intr_params)); 1627 return (0); 1628 } 1629 ndescs = 1; 1630 } 1631#else 1632 MPASS((iq->flags & IQ_ADJ_CREDIT) == 0); 1633#endif 1634 1635 limit = budget ? budget : iq->qsize / 16; 1636 fl = &rxq->fl; 1637 fl_hw_cidx = fl->hw_cidx; /* stable snapshot */ 1638 while ((d->rsp.u.type_gen & F_RSPD_GEN) == iq->gen) { 1639 1640 rmb(); 1641 1642 m0 = NULL; 1643 rsp_type = G_RSPD_TYPE(d->rsp.u.type_gen); 1644 lq = be32toh(d->rsp.pldbuflen_qid); 1645 1646 switch (rsp_type) { 1647 case X_RSPD_TYPE_FLBUF: 1648 if (lq & F_RSPD_NEWBUF) { 1649 if (fl->rx_offset > 0) 1650 move_to_next_rxbuf(fl); 1651 lq = G_RSPD_LEN(lq); 1652 } 1653 if (IDXDIFF(fl->hw_cidx, fl_hw_cidx, fl->sidx) > 4) { 1654 FL_LOCK(fl); 1655 refill_fl(sc, fl, 64); 1656 FL_UNLOCK(fl); 1657 fl_hw_cidx = fl->hw_cidx; 1658 } 1659 1660 if (d->rss.opcode == CPL_RX_PKT) { 1661 if (__predict_true(eth_rx(sc, rxq, d, lq) == 0)) 1662 break; 1663 goto out; 1664 } 1665 m0 = get_fl_payload(sc, fl, lq); 1666 if (__predict_false(m0 == NULL)) 1667 goto out; 1668 1669 /* fall through */ 1670 1671 case X_RSPD_TYPE_CPL: 1672 KASSERT(d->rss.opcode < NUM_CPL_CMDS, 1673 ("%s: bad opcode %02x.", __func__, d->rss.opcode)); 1674 t4_cpl_handler[d->rss.opcode](iq, &d->rss, m0); 1675 break; 1676 1677 case X_RSPD_TYPE_INTR: 1678 1679 /* 1680 * There are 1K interrupt-capable queues (qids 0 1681 * through 1023). A response type indicating a 1682 * forwarded interrupt with a qid >= 1K is an 1683 * iWARP async notification. That is the only 1684 * acceptable indirect interrupt on this queue. 1685 */ 1686 if (__predict_false(lq < 1024)) { 1687 panic("%s: indirect interrupt on iq_fl %p " 1688 "with qid %u", __func__, iq, lq); 1689 } 1690 1691 t4_an_handler(iq, &d->rsp); 1692 break; 1693 1694 default: 1695 KASSERT(0, ("%s: illegal response type %d on iq %p", 1696 __func__, rsp_type, iq)); 1697 log(LOG_ERR, "%s: illegal response type %d on iq %p", 1698 device_get_nameunit(sc->dev), rsp_type, iq); 1699 break; 1700 } 1701 1702 d++; 1703 if (__predict_false(++iq->cidx == iq->sidx)) { 1704 iq->cidx = 0; 1705 iq->gen ^= F_RSPD_GEN; 1706 d = &iq->desc[0]; 1707 } 1708 if (__predict_false(++ndescs == limit)) { 1709 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1710 V_INGRESSQID(iq->cntxt_id) | 1711 V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX))); 1712 1713#if defined(INET) || defined(INET6) 1714 if (iq->flags & IQ_LRO_ENABLED && 1715 !sort_before_lro(lro) && 1716 sc->lro_timeout != 0) { 1717 tcp_lro_flush_inactive(lro, &lro_timeout); 1718 } 1719#endif 1720 if (budget) 1721 return (EINPROGRESS); 1722 ndescs = 0; 1723 } 1724 } 1725out: 1726#if defined(INET) || defined(INET6) 1727 if (iq->flags & IQ_LRO_ENABLED) { 1728 if (ndescs > 0 && lro->lro_mbuf_count > 8) { 1729 MPASS(sort_before_lro(lro)); 1730 /* hold back one credit and don't flush LRO state */ 1731 iq->flags |= IQ_ADJ_CREDIT; 1732 ndescs--; 1733 } else { 1734 tcp_lro_flush_all(lro); 1735 } 1736 } 1737#endif 1738 1739 t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) | 1740 V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params)); 1741 1742 FL_LOCK(fl); 1743 starved = refill_fl(sc, fl, 64); 1744 FL_UNLOCK(fl); 1745 if (__predict_false(starved != 0)) 1746 add_fl_to_sfl(sc, fl); 1747 1748 return (0); 1749} 1750 1751static inline struct cluster_metadata * 1752cl_metadata(struct fl_sdesc *sd) 1753{ 1754 1755 return ((void *)(sd->cl + sd->moff)); 1756} 1757 1758static void 1759rxb_free(struct mbuf *m) 1760{ 1761 struct cluster_metadata *clm = m->m_ext.ext_arg1; 1762 1763 uma_zfree(clm->zone, clm->cl); 1764 counter_u64_add(extfree_rels, 1); 1765} 1766 1767/* 1768 * The mbuf returned comes from zone_muf and carries the payload in one of these 1769 * ways 1770 * a) complete frame inside the mbuf 1771 * b) m_cljset (for clusters without metadata) 1772 * d) m_extaddref (cluster with metadata) 1773 */ 1774static struct mbuf * 1775get_scatter_segment(struct adapter *sc, struct sge_fl *fl, int fr_offset, 1776 int remaining) 1777{ 1778 struct mbuf *m; 1779 struct fl_sdesc *sd = &fl->sdesc[fl->cidx]; 1780 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[sd->zidx]; 1781 struct cluster_metadata *clm; 1782 int len, blen; 1783 caddr_t payload; 1784 1785 if (fl->flags & FL_BUF_PACKING) { 1786 u_int l, pad; 1787 1788 blen = rxb->size2 - fl->rx_offset; /* max possible in this buf */ 1789 len = min(remaining, blen); 1790 payload = sd->cl + fl->rx_offset; 1791 1792 l = fr_offset + len; 1793 pad = roundup2(l, fl->buf_boundary) - l; 1794 if (fl->rx_offset + len + pad < rxb->size2) 1795 blen = len + pad; 1796 MPASS(fl->rx_offset + blen <= rxb->size2); 1797 } else { 1798 MPASS(fl->rx_offset == 0); /* not packing */ 1799 blen = rxb->size1; 1800 len = min(remaining, blen); 1801 payload = sd->cl; 1802 } 1803 1804 if (fr_offset == 0) { 1805 m = m_gethdr(M_NOWAIT, MT_DATA); 1806 if (__predict_false(m == NULL)) 1807 return (NULL); 1808 m->m_pkthdr.len = remaining; 1809 } else { 1810 m = m_get(M_NOWAIT, MT_DATA); 1811 if (__predict_false(m == NULL)) 1812 return (NULL); 1813 } 1814 m->m_len = len; 1815 1816 if (sc->sc_do_rxcopy && len < RX_COPY_THRESHOLD) { 1817 /* copy data to mbuf */ 1818 bcopy(payload, mtod(m, caddr_t), len); 1819 if (fl->flags & FL_BUF_PACKING) { 1820 fl->rx_offset += blen; 1821 MPASS(fl->rx_offset <= rxb->size2); 1822 if (fl->rx_offset < rxb->size2) 1823 return (m); /* without advancing the cidx */ 1824 } 1825 } else if (fl->flags & FL_BUF_PACKING) { 1826 clm = cl_metadata(sd); 1827 if (sd->nmbuf++ == 0) { 1828 clm->refcount = 1; 1829 clm->zone = rxb->zone; 1830 clm->cl = sd->cl; 1831 counter_u64_add(extfree_refs, 1); 1832 } 1833 m_extaddref(m, payload, blen, &clm->refcount, rxb_free, clm, 1834 NULL); 1835 1836 fl->rx_offset += blen; 1837 MPASS(fl->rx_offset <= rxb->size2); 1838 if (fl->rx_offset < rxb->size2) 1839 return (m); /* without advancing the cidx */ 1840 } else { 1841 m_cljset(m, sd->cl, rxb->type); 1842 sd->cl = NULL; /* consumed, not a recycle candidate */ 1843 } 1844 1845 move_to_next_rxbuf(fl); 1846 1847 return (m); 1848} 1849 1850static struct mbuf * 1851get_fl_payload(struct adapter *sc, struct sge_fl *fl, const u_int plen) 1852{ 1853 struct mbuf *m0, *m, **pnext; 1854 u_int remaining; 1855 1856 if (__predict_false(fl->flags & FL_BUF_RESUME)) { 1857 M_ASSERTPKTHDR(fl->m0); 1858 MPASS(fl->m0->m_pkthdr.len == plen); 1859 MPASS(fl->remaining < plen); 1860 1861 m0 = fl->m0; 1862 pnext = fl->pnext; 1863 remaining = fl->remaining; 1864 fl->flags &= ~FL_BUF_RESUME; 1865 goto get_segment; 1866 } 1867 1868 /* 1869 * Payload starts at rx_offset in the current hw buffer. Its length is 1870 * 'len' and it may span multiple hw buffers. 1871 */ 1872 1873 m0 = get_scatter_segment(sc, fl, 0, plen); 1874 if (m0 == NULL) 1875 return (NULL); 1876 remaining = plen - m0->m_len; 1877 pnext = &m0->m_next; 1878 while (remaining > 0) { 1879get_segment: 1880 MPASS(fl->rx_offset == 0); 1881 m = get_scatter_segment(sc, fl, plen - remaining, remaining); 1882 if (__predict_false(m == NULL)) { 1883 fl->m0 = m0; 1884 fl->pnext = pnext; 1885 fl->remaining = remaining; 1886 fl->flags |= FL_BUF_RESUME; 1887 return (NULL); 1888 } 1889 *pnext = m; 1890 pnext = &m->m_next; 1891 remaining -= m->m_len; 1892 } 1893 *pnext = NULL; 1894 1895 M_ASSERTPKTHDR(m0); 1896 return (m0); 1897} 1898 1899static int 1900eth_rx(struct adapter *sc, struct sge_rxq *rxq, const struct iq_desc *d, 1901 u_int plen) 1902{ 1903 struct mbuf *m0; 1904 struct ifnet *ifp = rxq->ifp; 1905 struct sge_fl *fl = &rxq->fl; 1906 const struct cpl_rx_pkt *cpl; 1907#if defined(INET) || defined(INET6) 1908 struct lro_ctrl *lro = &rxq->lro; 1909#endif 1910 uint16_t err_vec, tnl_type, tnlhdr_len; 1911 static const int sw_hashtype[4][2] = { 1912 {M_HASHTYPE_NONE, M_HASHTYPE_NONE}, 1913 {M_HASHTYPE_RSS_IPV4, M_HASHTYPE_RSS_IPV6}, 1914 {M_HASHTYPE_RSS_TCP_IPV4, M_HASHTYPE_RSS_TCP_IPV6}, 1915 {M_HASHTYPE_RSS_UDP_IPV4, M_HASHTYPE_RSS_UDP_IPV6}, 1916 }; 1917 static const int sw_csum_flags[2][2] = { 1918 { 1919 /* IP, inner IP */ 1920 CSUM_ENCAP_VXLAN | 1921 CSUM_L3_CALC | CSUM_L3_VALID | 1922 CSUM_L4_CALC | CSUM_L4_VALID | 1923 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1924 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1925 1926 /* IP, inner IP6 */ 1927 CSUM_ENCAP_VXLAN | 1928 CSUM_L3_CALC | CSUM_L3_VALID | 1929 CSUM_L4_CALC | CSUM_L4_VALID | 1930 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1931 }, 1932 { 1933 /* IP6, inner IP */ 1934 CSUM_ENCAP_VXLAN | 1935 CSUM_L4_CALC | CSUM_L4_VALID | 1936 CSUM_INNER_L3_CALC | CSUM_INNER_L3_VALID | 1937 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1938 1939 /* IP6, inner IP6 */ 1940 CSUM_ENCAP_VXLAN | 1941 CSUM_L4_CALC | CSUM_L4_VALID | 1942 CSUM_INNER_L4_CALC | CSUM_INNER_L4_VALID, 1943 }, 1944 }; 1945 1946 MPASS(plen > sc->params.sge.fl_pktshift); 1947 m0 = get_fl_payload(sc, fl, plen); 1948 if (__predict_false(m0 == NULL)) 1949 return (ENOMEM); 1950 1951 m0->m_pkthdr.len -= sc->params.sge.fl_pktshift; 1952 m0->m_len -= sc->params.sge.fl_pktshift; 1953 m0->m_data += sc->params.sge.fl_pktshift; 1954 1955 m0->m_pkthdr.rcvif = ifp; 1956 M_HASHTYPE_SET(m0, sw_hashtype[d->rss.hash_type][d->rss.ipv6]); 1957 m0->m_pkthdr.flowid = be32toh(d->rss.hash_val); 1958 1959 cpl = (const void *)(&d->rss + 1); 1960 if (sc->params.tp.rx_pkt_encap) { 1961 const uint16_t ev = be16toh(cpl->err_vec); 1962 1963 err_vec = G_T6_COMPR_RXERR_VEC(ev); 1964 tnl_type = G_T6_RX_TNL_TYPE(ev); 1965 tnlhdr_len = G_T6_RX_TNLHDR_LEN(ev); 1966 } else { 1967 err_vec = be16toh(cpl->err_vec); 1968 tnl_type = 0; 1969 tnlhdr_len = 0; 1970 } 1971 if (cpl->csum_calc && err_vec == 0) { 1972 int ipv6 = !!(cpl->l2info & htobe32(F_RXF_IP6)); 1973 1974 /* checksum(s) calculated and found to be correct. */ 1975 1976 MPASS((cpl->l2info & htobe32(F_RXF_IP)) ^ 1977 (cpl->l2info & htobe32(F_RXF_IP6))); 1978 m0->m_pkthdr.csum_data = be16toh(cpl->csum); 1979 if (tnl_type == 0) { 1980 if (!ipv6 && ifp->if_capenable & IFCAP_RXCSUM) { 1981 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 1982 CSUM_L3_VALID | CSUM_L4_CALC | 1983 CSUM_L4_VALID; 1984 } else if (ipv6 && ifp->if_capenable & IFCAP_RXCSUM_IPV6) { 1985 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 1986 CSUM_L4_VALID; 1987 } 1988 rxq->rxcsum++; 1989 } else { 1990 MPASS(tnl_type == RX_PKT_TNL_TYPE_VXLAN); 1991 if (__predict_false(cpl->ip_frag)) { 1992 /* 1993 * csum_data is for the inner frame (which is an 1994 * IP fragment) and is not 0xffff. There is no 1995 * way to pass the inner csum_data to the stack. 1996 * We don't want the stack to use the inner 1997 * csum_data to validate the outer frame or it 1998 * will get rejected. So we fix csum_data here 1999 * and let sw do the checksum of inner IP 2000 * fragments. 2001 * 2002 * XXX: Need 32b for csum_data2 in an rx mbuf. 2003 * Maybe stuff it into rcv_tstmp? 2004 */ 2005 m0->m_pkthdr.csum_data = 0xffff; 2006 if (ipv6) { 2007 m0->m_pkthdr.csum_flags = CSUM_L4_CALC | 2008 CSUM_L4_VALID; 2009 } else { 2010 m0->m_pkthdr.csum_flags = CSUM_L3_CALC | 2011 CSUM_L3_VALID | CSUM_L4_CALC | 2012 CSUM_L4_VALID; 2013 } 2014 } else { 2015 int outer_ipv6; 2016 2017 MPASS(m0->m_pkthdr.csum_data == 0xffff); 2018 2019 outer_ipv6 = tnlhdr_len >= 2020 sizeof(struct ether_header) + 2021 sizeof(struct ip6_hdr); 2022 m0->m_pkthdr.csum_flags = 2023 sw_csum_flags[outer_ipv6][ipv6]; 2024 } 2025 rxq->vxlan_rxcsum++; 2026 } 2027 } 2028 2029 if (cpl->vlan_ex) { 2030 m0->m_pkthdr.ether_vtag = be16toh(cpl->vlan); 2031 m0->m_flags |= M_VLANTAG; 2032 rxq->vlan_extraction++; 2033 } 2034 2035 if (rxq->iq.flags & IQ_RX_TIMESTAMP) { 2036 /* 2037 * Fill up rcv_tstmp but do not set M_TSTMP. 2038 * rcv_tstmp is not in the format that the 2039 * kernel expects and we don't want to mislead 2040 * it. For now this is only for custom code 2041 * that knows how to interpret cxgbe's stamp. 2042 */ 2043 m0->m_pkthdr.rcv_tstmp = 2044 last_flit_to_ns(sc, d->rsp.u.last_flit); 2045#ifdef notyet 2046 m0->m_flags |= M_TSTMP; 2047#endif 2048 } 2049 2050#if defined(INET) || defined(INET6) 2051 if (rxq->iq.flags & IQ_LRO_ENABLED && tnl_type == 0 && 2052 (M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV4 || 2053 M_HASHTYPE_GET(m0) == M_HASHTYPE_RSS_TCP_IPV6)) { 2054 if (sort_before_lro(lro)) { 2055 tcp_lro_queue_mbuf(lro, m0); 2056 return (0); /* queued for sort, then LRO */ 2057 } 2058 if (tcp_lro_rx(lro, m0, 0) == 0) 2059 return (0); /* queued for LRO */ 2060 } 2061#endif 2062 ifp->if_input(ifp, m0); 2063 2064 return (0); 2065} 2066 2067/* 2068 * Must drain the wrq or make sure that someone else will. 2069 */ 2070static void 2071wrq_tx_drain(void *arg, int n) 2072{ 2073 struct sge_wrq *wrq = arg; 2074 struct sge_eq *eq = &wrq->eq; 2075 2076 EQ_LOCK(eq); 2077 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2078 drain_wrq_wr_list(wrq->adapter, wrq); 2079 EQ_UNLOCK(eq); 2080} 2081 2082static void 2083drain_wrq_wr_list(struct adapter *sc, struct sge_wrq *wrq) 2084{ 2085 struct sge_eq *eq = &wrq->eq; 2086 u_int available, dbdiff; /* # of hardware descriptors */ 2087 u_int n; 2088 struct wrqe *wr; 2089 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2090 2091 EQ_LOCK_ASSERT_OWNED(eq); 2092 MPASS(TAILQ_EMPTY(&wrq->incomplete_wrs)); 2093 wr = STAILQ_FIRST(&wrq->wr_list); 2094 MPASS(wr != NULL); /* Must be called with something useful to do */ 2095 MPASS(eq->pidx == eq->dbidx); 2096 dbdiff = 0; 2097 2098 do { 2099 eq->cidx = read_hw_cidx(eq); 2100 if (eq->pidx == eq->cidx) 2101 available = eq->sidx - 1; 2102 else 2103 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2104 2105 MPASS(wr->wrq == wrq); 2106 n = howmany(wr->wr_len, EQ_ESIZE); 2107 if (available < n) 2108 break; 2109 2110 dst = (void *)&eq->desc[eq->pidx]; 2111 if (__predict_true(eq->sidx - eq->pidx > n)) { 2112 /* Won't wrap, won't end exactly at the status page. */ 2113 bcopy(&wr->wr[0], dst, wr->wr_len); 2114 eq->pidx += n; 2115 } else { 2116 int first_portion = (eq->sidx - eq->pidx) * EQ_ESIZE; 2117 2118 bcopy(&wr->wr[0], dst, first_portion); 2119 if (wr->wr_len > first_portion) { 2120 bcopy(&wr->wr[first_portion], &eq->desc[0], 2121 wr->wr_len - first_portion); 2122 } 2123 eq->pidx = n - (eq->sidx - eq->pidx); 2124 } 2125 wrq->tx_wrs_copied++; 2126 2127 if (available < eq->sidx / 4 && 2128 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2129 /* 2130 * XXX: This is not 100% reliable with some 2131 * types of WRs. But this is a very unusual 2132 * situation for an ofld/ctrl queue anyway. 2133 */ 2134 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2135 F_FW_WR_EQUEQ); 2136 } 2137 2138 dbdiff += n; 2139 if (dbdiff >= 16) { 2140 ring_eq_db(sc, eq, dbdiff); 2141 dbdiff = 0; 2142 } 2143 2144 STAILQ_REMOVE_HEAD(&wrq->wr_list, link); 2145 free_wrqe(wr); 2146 MPASS(wrq->nwr_pending > 0); 2147 wrq->nwr_pending--; 2148 MPASS(wrq->ndesc_needed >= n); 2149 wrq->ndesc_needed -= n; 2150 } while ((wr = STAILQ_FIRST(&wrq->wr_list)) != NULL); 2151 2152 if (dbdiff) 2153 ring_eq_db(sc, eq, dbdiff); 2154} 2155 2156/* 2157 * Doesn't fail. Holds on to work requests it can't send right away. 2158 */ 2159void 2160t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, struct wrqe *wr) 2161{ 2162#ifdef INVARIANTS 2163 struct sge_eq *eq = &wrq->eq; 2164#endif 2165 2166 EQ_LOCK_ASSERT_OWNED(eq); 2167 MPASS(wr != NULL); 2168 MPASS(wr->wr_len > 0 && wr->wr_len <= SGE_MAX_WR_LEN); 2169 MPASS((wr->wr_len & 0x7) == 0); 2170 2171 STAILQ_INSERT_TAIL(&wrq->wr_list, wr, link); 2172 wrq->nwr_pending++; 2173 wrq->ndesc_needed += howmany(wr->wr_len, EQ_ESIZE); 2174 2175 if (!TAILQ_EMPTY(&wrq->incomplete_wrs)) 2176 return; /* commit_wrq_wr will drain wr_list as well. */ 2177 2178 drain_wrq_wr_list(sc, wrq); 2179 2180 /* Doorbell must have caught up to the pidx. */ 2181 MPASS(eq->pidx == eq->dbidx); 2182} 2183 2184void 2185t4_update_fl_bufsize(struct ifnet *ifp) 2186{ 2187 struct vi_info *vi = ifp->if_softc; 2188 struct adapter *sc = vi->adapter; 2189 struct sge_rxq *rxq; 2190#ifdef TCP_OFFLOAD 2191 struct sge_ofld_rxq *ofld_rxq; 2192#endif 2193 struct sge_fl *fl; 2194 int i, maxp; 2195 2196 maxp = max_rx_payload(sc, ifp, false); 2197 for_each_rxq(vi, i, rxq) { 2198 fl = &rxq->fl; 2199 2200 FL_LOCK(fl); 2201 fl->zidx = find_refill_source(sc, maxp, 2202 fl->flags & FL_BUF_PACKING); 2203 FL_UNLOCK(fl); 2204 } 2205#ifdef TCP_OFFLOAD 2206 maxp = max_rx_payload(sc, ifp, true); 2207 for_each_ofld_rxq(vi, i, ofld_rxq) { 2208 fl = &ofld_rxq->fl; 2209 2210 FL_LOCK(fl); 2211 fl->zidx = find_refill_source(sc, maxp, 2212 fl->flags & FL_BUF_PACKING); 2213 FL_UNLOCK(fl); 2214 } 2215#endif 2216} 2217 2218static inline int 2219mbuf_nsegs(struct mbuf *m) 2220{ 2221 2222 M_ASSERTPKTHDR(m); 2223 KASSERT(m->m_pkthdr.inner_l5hlen > 0, 2224 ("%s: mbuf %p missing information on # of segments.", __func__, m)); 2225 2226 return (m->m_pkthdr.inner_l5hlen); 2227} 2228 2229static inline void 2230set_mbuf_nsegs(struct mbuf *m, uint8_t nsegs) 2231{ 2232 2233 M_ASSERTPKTHDR(m); 2234 m->m_pkthdr.inner_l5hlen = nsegs; 2235} 2236 2237static inline int 2238mbuf_cflags(struct mbuf *m) 2239{ 2240 2241 M_ASSERTPKTHDR(m); 2242 return (m->m_pkthdr.PH_loc.eight[4]); 2243} 2244 2245static inline void 2246set_mbuf_cflags(struct mbuf *m, uint8_t flags) 2247{ 2248 2249 M_ASSERTPKTHDR(m); 2250 m->m_pkthdr.PH_loc.eight[4] = flags; 2251} 2252 2253static inline int 2254mbuf_len16(struct mbuf *m) 2255{ 2256 int n; 2257 2258 M_ASSERTPKTHDR(m); 2259 n = m->m_pkthdr.PH_loc.eight[0]; 2260 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2261 2262 return (n); 2263} 2264 2265static inline void 2266set_mbuf_len16(struct mbuf *m, uint8_t len16) 2267{ 2268 2269 M_ASSERTPKTHDR(m); 2270 MPASS(len16 > 0 && len16 <= SGE_MAX_WR_LEN / 16); 2271 m->m_pkthdr.PH_loc.eight[0] = len16; 2272} 2273 2274#ifdef RATELIMIT 2275static inline int 2276mbuf_eo_nsegs(struct mbuf *m) 2277{ 2278 2279 M_ASSERTPKTHDR(m); 2280 return (m->m_pkthdr.PH_loc.eight[1]); 2281} 2282 2283static inline void 2284set_mbuf_eo_nsegs(struct mbuf *m, uint8_t nsegs) 2285{ 2286 2287 M_ASSERTPKTHDR(m); 2288 m->m_pkthdr.PH_loc.eight[1] = nsegs; 2289} 2290 2291static inline int 2292mbuf_eo_len16(struct mbuf *m) 2293{ 2294 int n; 2295 2296 M_ASSERTPKTHDR(m); 2297 n = m->m_pkthdr.PH_loc.eight[2]; 2298 MPASS(n > 0 && n <= SGE_MAX_WR_LEN / 16); 2299 2300 return (n); 2301} 2302 2303static inline void 2304set_mbuf_eo_len16(struct mbuf *m, uint8_t len16) 2305{ 2306 2307 M_ASSERTPKTHDR(m); 2308 m->m_pkthdr.PH_loc.eight[2] = len16; 2309} 2310 2311static inline int 2312mbuf_eo_tsclk_tsoff(struct mbuf *m) 2313{ 2314 2315 M_ASSERTPKTHDR(m); 2316 return (m->m_pkthdr.PH_loc.eight[3]); 2317} 2318 2319static inline void 2320set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_tsoff) 2321{ 2322 2323 M_ASSERTPKTHDR(m); 2324 m->m_pkthdr.PH_loc.eight[3] = tsclk_tsoff; 2325} 2326 2327static inline int 2328needs_eo(struct mbuf *m) 2329{ 2330 2331 return (m->m_pkthdr.snd_tag != NULL); 2332} 2333#endif 2334 2335/* 2336 * Try to allocate an mbuf to contain a raw work request. To make it 2337 * easy to construct the work request, don't allocate a chain but a 2338 * single mbuf. 2339 */ 2340struct mbuf * 2341alloc_wr_mbuf(int len, int how) 2342{ 2343 struct mbuf *m; 2344 2345 if (len <= MHLEN) 2346 m = m_gethdr(how, MT_DATA); 2347 else if (len <= MCLBYTES) 2348 m = m_getcl(how, MT_DATA, M_PKTHDR); 2349 else 2350 m = NULL; 2351 if (m == NULL) 2352 return (NULL); 2353 m->m_pkthdr.len = len; 2354 m->m_len = len; 2355 set_mbuf_cflags(m, MC_RAW_WR); 2356 set_mbuf_len16(m, howmany(len, 16)); 2357 return (m); 2358} 2359 2360static inline bool 2361needs_hwcsum(struct mbuf *m) 2362{ 2363 const uint32_t csum_flags = CSUM_IP | CSUM_IP_UDP | CSUM_IP_TCP | 2364 CSUM_IP_TSO | CSUM_INNER_IP | CSUM_INNER_IP_UDP | 2365 CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | CSUM_IP6_UDP | 2366 CSUM_IP6_TCP | CSUM_IP6_TSO | CSUM_INNER_IP6_UDP | 2367 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO; 2368 2369 M_ASSERTPKTHDR(m); 2370 2371 return (m->m_pkthdr.csum_flags & csum_flags); 2372} 2373 2374static inline bool 2375needs_tso(struct mbuf *m) 2376{ 2377 const uint32_t csum_flags = CSUM_IP_TSO | CSUM_IP6_TSO | 2378 CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 2379 2380 M_ASSERTPKTHDR(m); 2381 2382 return (m->m_pkthdr.csum_flags & csum_flags); 2383} 2384 2385static inline bool 2386needs_vxlan_csum(struct mbuf *m) 2387{ 2388 2389 M_ASSERTPKTHDR(m); 2390 2391 return (m->m_pkthdr.csum_flags & CSUM_ENCAP_VXLAN); 2392} 2393 2394static inline bool 2395needs_vxlan_tso(struct mbuf *m) 2396{ 2397 const uint32_t csum_flags = CSUM_ENCAP_VXLAN | CSUM_INNER_IP_TSO | 2398 CSUM_INNER_IP6_TSO; 2399 2400 M_ASSERTPKTHDR(m); 2401 2402 return ((m->m_pkthdr.csum_flags & csum_flags) != 0 && 2403 (m->m_pkthdr.csum_flags & csum_flags) != CSUM_ENCAP_VXLAN); 2404} 2405 2406static inline bool 2407needs_inner_tcp_csum(struct mbuf *m) 2408{ 2409 const uint32_t csum_flags = CSUM_INNER_IP_TSO | CSUM_INNER_IP6_TSO; 2410 2411 M_ASSERTPKTHDR(m); 2412 2413 return (m->m_pkthdr.csum_flags & csum_flags); 2414} 2415 2416static inline bool 2417needs_l3_csum(struct mbuf *m) 2418{ 2419 const uint32_t csum_flags = CSUM_IP | CSUM_IP_TSO | CSUM_INNER_IP | 2420 CSUM_INNER_IP_TSO; 2421 2422 M_ASSERTPKTHDR(m); 2423 2424 return (m->m_pkthdr.csum_flags & csum_flags); 2425} 2426 2427static inline bool 2428needs_outer_tcp_csum(struct mbuf *m) 2429{ 2430 const uint32_t csum_flags = CSUM_IP_TCP | CSUM_IP_TSO | CSUM_IP6_TCP | 2431 CSUM_IP6_TSO; 2432 2433 M_ASSERTPKTHDR(m); 2434 2435 return (m->m_pkthdr.csum_flags & csum_flags); 2436} 2437 2438#ifdef RATELIMIT 2439static inline bool 2440needs_outer_l4_csum(struct mbuf *m) 2441{ 2442 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP_TCP | CSUM_IP_TSO | 2443 CSUM_IP6_UDP | CSUM_IP6_TCP | CSUM_IP6_TSO; 2444 2445 M_ASSERTPKTHDR(m); 2446 2447 return (m->m_pkthdr.csum_flags & csum_flags); 2448} 2449 2450static inline bool 2451needs_outer_udp_csum(struct mbuf *m) 2452{ 2453 const uint32_t csum_flags = CSUM_IP_UDP | CSUM_IP6_UDP; 2454 2455 M_ASSERTPKTHDR(m); 2456 2457 return (m->m_pkthdr.csum_flags & csum_flags); 2458} 2459#endif 2460 2461static inline bool 2462needs_vlan_insertion(struct mbuf *m) 2463{ 2464 2465 M_ASSERTPKTHDR(m); 2466 2467 return (m->m_flags & M_VLANTAG); 2468} 2469 2470static void * 2471m_advance(struct mbuf **pm, int *poffset, int len) 2472{ 2473 struct mbuf *m = *pm; 2474 int offset = *poffset; 2475 uintptr_t p = 0; 2476 2477 MPASS(len > 0); 2478 2479 for (;;) { 2480 if (offset + len < m->m_len) { 2481 offset += len; 2482 p = mtod(m, uintptr_t) + offset; 2483 break; 2484 } 2485 len -= m->m_len - offset; 2486 m = m->m_next; 2487 offset = 0; 2488 MPASS(m != NULL); 2489 } 2490 *poffset = offset; 2491 *pm = m; 2492 return ((void *)p); 2493} 2494 2495#if IFCAP_NOMAP != 0 2496static inline int 2497count_mbuf_ext_pgs(struct mbuf *m, int skip, vm_paddr_t *nextaddr) 2498{ 2499 struct mbuf_ext_pgs *ext_pgs; 2500 vm_paddr_t paddr; 2501 int i, len, off, pglen, pgoff, seglen, segoff; 2502 int nsegs = 0; 2503 2504 MBUF_EXT_PGS_ASSERT(m); 2505 ext_pgs = m->m_ext.ext_pgs; 2506 off = mtod(m, vm_offset_t); 2507 len = m->m_len; 2508 off += skip; 2509 len -= skip; 2510 2511 if (ext_pgs->hdr_len != 0) { 2512 if (off >= ext_pgs->hdr_len) { 2513 off -= ext_pgs->hdr_len; 2514 } else { 2515 seglen = ext_pgs->hdr_len - off; 2516 segoff = off; 2517 seglen = min(seglen, len); 2518 off = 0; 2519 len -= seglen; 2520 paddr = pmap_kextract( 2521 (vm_offset_t)&ext_pgs->hdr[segoff]); 2522 if (*nextaddr != paddr) 2523 nsegs++; 2524 *nextaddr = paddr + seglen; 2525 } 2526 } 2527 pgoff = ext_pgs->first_pg_off; 2528 for (i = 0; i < ext_pgs->npgs && len > 0; i++) { 2529 pglen = mbuf_ext_pg_len(ext_pgs, i, pgoff); 2530 if (off >= pglen) { 2531 off -= pglen; 2532 pgoff = 0; 2533 continue; 2534 } 2535 seglen = pglen - off; 2536 segoff = pgoff + off; 2537 off = 0; 2538 seglen = min(seglen, len); 2539 len -= seglen; 2540 paddr = ext_pgs->pa[i] + segoff; 2541 if (*nextaddr != paddr) 2542 nsegs++; 2543 *nextaddr = paddr + seglen; 2544 pgoff = 0; 2545 }; 2546 if (len != 0) { 2547 seglen = min(len, ext_pgs->trail_len - off); 2548 len -= seglen; 2549 paddr = pmap_kextract((vm_offset_t)&ext_pgs->trail[off]); 2550 if (*nextaddr != paddr) 2551 nsegs++; 2552 *nextaddr = paddr + seglen; 2553 } 2554 2555 return (nsegs); 2556} 2557#endif 2558 2559 2560/* 2561 * Can deal with empty mbufs in the chain that have m_len = 0, but the chain 2562 * must have at least one mbuf that's not empty. It is possible for this 2563 * routine to return 0 if skip accounts for all the contents of the mbuf chain. 2564 */ 2565static inline int 2566count_mbuf_nsegs(struct mbuf *m, int skip, uint8_t *cflags) 2567{ 2568 vm_paddr_t nextaddr, paddr; 2569 vm_offset_t va; 2570 int len, nsegs; 2571 2572 M_ASSERTPKTHDR(m); 2573 MPASS(m->m_pkthdr.len > 0); 2574 MPASS(m->m_pkthdr.len >= skip); 2575 2576 nsegs = 0; 2577 nextaddr = 0; 2578 for (; m; m = m->m_next) { 2579 len = m->m_len; 2580 if (__predict_false(len == 0)) 2581 continue; 2582 if (skip >= len) { 2583 skip -= len; 2584 continue; 2585 } 2586#if IFCAP_NOMAP != 0 2587 if ((m->m_flags & M_NOMAP) != 0) { 2588 *cflags |= MC_NOMAP; 2589 nsegs += count_mbuf_ext_pgs(m, skip, &nextaddr); 2590 skip = 0; 2591 continue; 2592 } 2593#endif 2594 va = mtod(m, vm_offset_t) + skip; 2595 len -= skip; 2596 skip = 0; 2597 paddr = pmap_kextract(va); 2598 nsegs += sglist_count((void *)(uintptr_t)va, len); 2599 if (paddr == nextaddr) 2600 nsegs--; 2601 nextaddr = pmap_kextract(va + len - 1) + 1; 2602 } 2603 2604 return (nsegs); 2605} 2606 2607/* 2608 * The maximum number of segments that can fit in a WR. 2609 */ 2610static int 2611max_nsegs_allowed(struct mbuf *m, bool vm_wr) 2612{ 2613 2614 if (vm_wr) { 2615 if (needs_tso(m)) 2616 return (TX_SGL_SEGS_VM_TSO); 2617 return (TX_SGL_SEGS_VM); 2618 } 2619 2620 if (needs_tso(m)) { 2621 if (needs_vxlan_tso(m)) 2622 return (TX_SGL_SEGS_VXLAN_TSO); 2623 else 2624 return (TX_SGL_SEGS_TSO); 2625 } 2626 2627 return (TX_SGL_SEGS); 2628} 2629 2630/* 2631 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change: 2632 * a) caller can assume it's been freed if this function returns with an error. 2633 * b) it may get defragged up if the gather list is too long for the hardware. 2634 */ 2635int 2636parse_pkt(struct mbuf **mp, bool vm_wr) 2637{ 2638 struct mbuf *m0 = *mp, *m; 2639 int rc, nsegs, defragged = 0, offset; 2640 struct ether_header *eh; 2641 void *l3hdr; 2642#if defined(INET) || defined(INET6) 2643 struct tcphdr *tcp; 2644#endif 2645 uint16_t eh_type; 2646 uint8_t cflags; 2647 2648 cflags = 0; 2649 M_ASSERTPKTHDR(m0); 2650 if (__predict_false(m0->m_pkthdr.len < ETHER_HDR_LEN)) { 2651 rc = EINVAL; 2652fail: 2653 m_freem(m0); 2654 *mp = NULL; 2655 return (rc); 2656 } 2657restart: 2658 /* 2659 * First count the number of gather list segments in the payload. 2660 * Defrag the mbuf if nsegs exceeds the hardware limit. 2661 */ 2662 M_ASSERTPKTHDR(m0); 2663 MPASS(m0->m_pkthdr.len > 0); 2664 nsegs = count_mbuf_nsegs(m0, 0, &cflags); 2665 if (nsegs > max_nsegs_allowed(m0, vm_wr)) { 2666 if (defragged++ > 0) { 2667 rc = EFBIG; 2668 goto fail; 2669 } 2670 counter_u64_add(defrags, 1); 2671 if ((m = m_defrag(m0, M_NOWAIT)) == NULL) { 2672 rc = ENOMEM; 2673 goto fail; 2674 } 2675 *mp = m0 = m; /* update caller's copy after defrag */ 2676 goto restart; 2677 } 2678 2679 if (__predict_false(nsegs > 2 && m0->m_pkthdr.len <= MHLEN && 2680 !(cflags & MC_NOMAP))) { 2681 counter_u64_add(pullups, 1); 2682 m0 = m_pullup(m0, m0->m_pkthdr.len); 2683 if (m0 == NULL) { 2684 /* Should have left well enough alone. */ 2685 rc = EFBIG; 2686 goto fail; 2687 } 2688 *mp = m0; /* update caller's copy after pullup */ 2689 goto restart; 2690 } 2691 set_mbuf_nsegs(m0, nsegs); 2692 set_mbuf_cflags(m0, cflags); 2693 calculate_mbuf_len16(m0, vm_wr); 2694 2695#ifdef RATELIMIT 2696 /* 2697 * Ethofld is limited to TCP and UDP for now, and only when L4 hw 2698 * checksumming is enabled. needs_outer_l4_csum happens to check for 2699 * all the right things. 2700 */ 2701 if (__predict_false(needs_eo(m0) && !needs_outer_l4_csum(m0))) 2702 m0->m_pkthdr.snd_tag = NULL; 2703#endif 2704 2705 if (!needs_hwcsum(m0) 2706#ifdef RATELIMIT 2707 && !needs_eo(m0) 2708#endif 2709 ) 2710 return (0); 2711 2712 m = m0; 2713 eh = mtod(m, struct ether_header *); 2714 eh_type = ntohs(eh->ether_type); 2715 if (eh_type == ETHERTYPE_VLAN) { 2716 struct ether_vlan_header *evh = (void *)eh; 2717 2718 eh_type = ntohs(evh->evl_proto); 2719 m0->m_pkthdr.l2hlen = sizeof(*evh); 2720 } else 2721 m0->m_pkthdr.l2hlen = sizeof(*eh); 2722 2723 offset = 0; 2724 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.l2hlen); 2725 2726 switch (eh_type) { 2727#ifdef INET6 2728 case ETHERTYPE_IPV6: 2729 m0->m_pkthdr.l3hlen = sizeof(struct ip6_hdr); 2730 break; 2731#endif 2732#ifdef INET 2733 case ETHERTYPE_IP: 2734 { 2735 struct ip *ip = l3hdr; 2736 2737 if (needs_vxlan_csum(m0)) { 2738 /* Driver will do the outer IP hdr checksum. */ 2739 ip->ip_sum = 0; 2740 if (needs_vxlan_tso(m0)) { 2741 const uint16_t ipl = ip->ip_len; 2742 2743 ip->ip_len = 0; 2744 ip->ip_sum = ~in_cksum_hdr(ip); 2745 ip->ip_len = ipl; 2746 } else 2747 ip->ip_sum = in_cksum_hdr(ip); 2748 } 2749 m0->m_pkthdr.l3hlen = ip->ip_hl << 2; 2750 break; 2751 } 2752#endif 2753 default: 2754 panic("%s: ethertype 0x%04x unknown. if_cxgbe must be compiled" 2755 " with the same INET/INET6 options as the kernel.", 2756 __func__, eh_type); 2757 } 2758 2759 if (needs_vxlan_csum(m0)) { 2760 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2761 m0->m_pkthdr.l5hlen = sizeof(struct vxlan_header); 2762 2763 /* Inner headers. */ 2764 eh = m_advance(&m, &offset, m0->m_pkthdr.l3hlen + 2765 sizeof(struct udphdr) + sizeof(struct vxlan_header)); 2766 eh_type = ntohs(eh->ether_type); 2767 if (eh_type == ETHERTYPE_VLAN) { 2768 struct ether_vlan_header *evh = (void *)eh; 2769 2770 eh_type = ntohs(evh->evl_proto); 2771 m0->m_pkthdr.inner_l2hlen = sizeof(*evh); 2772 } else 2773 m0->m_pkthdr.inner_l2hlen = sizeof(*eh); 2774 l3hdr = m_advance(&m, &offset, m0->m_pkthdr.inner_l2hlen); 2775 2776 switch (eh_type) { 2777#ifdef INET6 2778 case ETHERTYPE_IPV6: 2779 m0->m_pkthdr.inner_l3hlen = sizeof(struct ip6_hdr); 2780 break; 2781#endif 2782#ifdef INET 2783 case ETHERTYPE_IP: 2784 { 2785 struct ip *ip = l3hdr; 2786 2787 m0->m_pkthdr.inner_l3hlen = ip->ip_hl << 2; 2788 break; 2789 } 2790#endif 2791 default: 2792 panic("%s: VXLAN hw offload requested with unknown " 2793 "ethertype 0x%04x. if_cxgbe must be compiled" 2794 " with the same INET/INET6 options as the kernel.", 2795 __func__, eh_type); 2796 } 2797#if defined(INET) || defined(INET6) 2798 if (needs_inner_tcp_csum(m0)) { 2799 tcp = m_advance(&m, &offset, m0->m_pkthdr.inner_l3hlen); 2800 m0->m_pkthdr.inner_l4hlen = tcp->th_off * 4; 2801 } 2802#endif 2803 MPASS((m0->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0); 2804 m0->m_pkthdr.csum_flags &= CSUM_INNER_IP6_UDP | 2805 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_TSO | CSUM_INNER_IP | 2806 CSUM_INNER_IP_UDP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_TSO | 2807 CSUM_ENCAP_VXLAN; 2808 } 2809 2810#if defined(INET) || defined(INET6) 2811 if (needs_outer_tcp_csum(m0)) { 2812 tcp = m_advance(&m, &offset, m0->m_pkthdr.l3hlen); 2813 m0->m_pkthdr.l4hlen = tcp->th_off * 4; 2814#ifdef RATELIMIT 2815 if (tsclk >= 0 && *(uint32_t *)(tcp + 1) == ntohl(0x0101080a)) { 2816 set_mbuf_eo_tsclk_tsoff(m0, 2817 V_FW_ETH_TX_EO_WR_TSCLK(tsclk) | 2818 V_FW_ETH_TX_EO_WR_TSOFF(sizeof(*tcp) / 2 + 1)); 2819 } else 2820 set_mbuf_eo_tsclk_tsoff(m0, 0); 2821 } else if (needs_outer_udp_csum(m0)) { 2822 m0->m_pkthdr.l4hlen = sizeof(struct udphdr); 2823#endif 2824 } 2825#ifdef RATELIMIT 2826 if (needs_eo(m0)) { 2827 u_int immhdrs; 2828 2829 /* EO WRs have the headers in the WR and not the GL. */ 2830 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + 2831 m0->m_pkthdr.l4hlen; 2832 cflags = 0; 2833 nsegs = count_mbuf_nsegs(m0, immhdrs, &cflags); 2834 MPASS(cflags == mbuf_cflags(m0)); 2835 set_mbuf_eo_nsegs(m0, nsegs); 2836 set_mbuf_eo_len16(m0, 2837 txpkt_eo_len16(nsegs, immhdrs, needs_tso(m0))); 2838 } 2839#endif 2840#endif 2841 MPASS(m0 == *mp); 2842 return (0); 2843} 2844 2845void * 2846start_wrq_wr(struct sge_wrq *wrq, int len16, struct wrq_cookie *cookie) 2847{ 2848 struct sge_eq *eq = &wrq->eq; 2849 struct adapter *sc = wrq->adapter; 2850 int ndesc, available; 2851 struct wrqe *wr; 2852 void *w; 2853 2854 MPASS(len16 > 0); 2855 ndesc = tx_len16_to_desc(len16); 2856 MPASS(ndesc > 0 && ndesc <= SGE_MAX_WR_NDESC); 2857 2858 EQ_LOCK(eq); 2859 2860 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2861 drain_wrq_wr_list(sc, wrq); 2862 2863 if (!STAILQ_EMPTY(&wrq->wr_list)) { 2864slowpath: 2865 EQ_UNLOCK(eq); 2866 wr = alloc_wrqe(len16 * 16, wrq); 2867 if (__predict_false(wr == NULL)) 2868 return (NULL); 2869 cookie->pidx = -1; 2870 cookie->ndesc = ndesc; 2871 return (&wr->wr); 2872 } 2873 2874 eq->cidx = read_hw_cidx(eq); 2875 if (eq->pidx == eq->cidx) 2876 available = eq->sidx - 1; 2877 else 2878 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2879 if (available < ndesc) 2880 goto slowpath; 2881 2882 cookie->pidx = eq->pidx; 2883 cookie->ndesc = ndesc; 2884 TAILQ_INSERT_TAIL(&wrq->incomplete_wrs, cookie, link); 2885 2886 w = &eq->desc[eq->pidx]; 2887 IDXINCR(eq->pidx, ndesc, eq->sidx); 2888 if (__predict_false(cookie->pidx + ndesc > eq->sidx)) { 2889 w = &wrq->ss[0]; 2890 wrq->ss_pidx = cookie->pidx; 2891 wrq->ss_len = len16 * 16; 2892 } 2893 2894 EQ_UNLOCK(eq); 2895 2896 return (w); 2897} 2898 2899void 2900commit_wrq_wr(struct sge_wrq *wrq, void *w, struct wrq_cookie *cookie) 2901{ 2902 struct sge_eq *eq = &wrq->eq; 2903 struct adapter *sc = wrq->adapter; 2904 int ndesc, pidx; 2905 struct wrq_cookie *prev, *next; 2906 2907 if (cookie->pidx == -1) { 2908 struct wrqe *wr = __containerof(w, struct wrqe, wr); 2909 2910 t4_wrq_tx(sc, wr); 2911 return; 2912 } 2913 2914 if (__predict_false(w == &wrq->ss[0])) { 2915 int n = (eq->sidx - wrq->ss_pidx) * EQ_ESIZE; 2916 2917 MPASS(wrq->ss_len > n); /* WR had better wrap around. */ 2918 bcopy(&wrq->ss[0], &eq->desc[wrq->ss_pidx], n); 2919 bcopy(&wrq->ss[n], &eq->desc[0], wrq->ss_len - n); 2920 wrq->tx_wrs_ss++; 2921 } else 2922 wrq->tx_wrs_direct++; 2923 2924 EQ_LOCK(eq); 2925 ndesc = cookie->ndesc; /* Can be more than SGE_MAX_WR_NDESC here. */ 2926 pidx = cookie->pidx; 2927 MPASS(pidx >= 0 && pidx < eq->sidx); 2928 prev = TAILQ_PREV(cookie, wrq_incomplete_wrs, link); 2929 next = TAILQ_NEXT(cookie, link); 2930 if (prev == NULL) { 2931 MPASS(pidx == eq->dbidx); 2932 if (next == NULL || ndesc >= 16) { 2933 int available; 2934 struct fw_eth_tx_pkt_wr *dst; /* any fw WR struct will do */ 2935 2936 /* 2937 * Note that the WR via which we'll request tx updates 2938 * is at pidx and not eq->pidx, which has moved on 2939 * already. 2940 */ 2941 dst = (void *)&eq->desc[pidx]; 2942 available = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 2943 if (available < eq->sidx / 4 && 2944 atomic_cmpset_int(&eq->equiq, 0, 1)) { 2945 /* 2946 * XXX: This is not 100% reliable with some 2947 * types of WRs. But this is a very unusual 2948 * situation for an ofld/ctrl queue anyway. 2949 */ 2950 dst->equiq_to_len16 |= htobe32(F_FW_WR_EQUIQ | 2951 F_FW_WR_EQUEQ); 2952 } 2953 2954 ring_eq_db(wrq->adapter, eq, ndesc); 2955 } else { 2956 MPASS(IDXDIFF(next->pidx, pidx, eq->sidx) == ndesc); 2957 next->pidx = pidx; 2958 next->ndesc += ndesc; 2959 } 2960 } else { 2961 MPASS(IDXDIFF(pidx, prev->pidx, eq->sidx) == prev->ndesc); 2962 prev->ndesc += ndesc; 2963 } 2964 TAILQ_REMOVE(&wrq->incomplete_wrs, cookie, link); 2965 2966 if (TAILQ_EMPTY(&wrq->incomplete_wrs) && !STAILQ_EMPTY(&wrq->wr_list)) 2967 drain_wrq_wr_list(sc, wrq); 2968 2969#ifdef INVARIANTS 2970 if (TAILQ_EMPTY(&wrq->incomplete_wrs)) { 2971 /* Doorbell must have caught up to the pidx. */ 2972 MPASS(wrq->eq.pidx == wrq->eq.dbidx); 2973 } 2974#endif 2975 EQ_UNLOCK(eq); 2976} 2977 2978static u_int 2979can_resume_eth_tx(struct mp_ring *r) 2980{ 2981 struct sge_eq *eq = r->cookie; 2982 2983 return (total_available_tx_desc(eq) > eq->sidx / 8); 2984} 2985 2986static inline bool 2987cannot_use_txpkts(struct mbuf *m) 2988{ 2989 /* maybe put a GL limit too, to avoid silliness? */ 2990 2991 return (needs_tso(m) || (mbuf_cflags(m) & MC_RAW_WR) != 0); 2992} 2993 2994static inline int 2995discard_tx(struct sge_eq *eq) 2996{ 2997 2998 return ((eq->flags & (EQ_ENABLED | EQ_QFLUSH)) != EQ_ENABLED); 2999} 3000 3001static inline int 3002wr_can_update_eq(void *p) 3003{ 3004 struct fw_eth_tx_pkts_wr *wr = p; 3005 3006 switch (G_FW_WR_OP(be32toh(wr->op_pkd))) { 3007 case FW_ULPTX_WR: 3008 case FW_ETH_TX_PKT_WR: 3009 case FW_ETH_TX_PKTS_WR: 3010 case FW_ETH_TX_PKTS2_WR: 3011 case FW_ETH_TX_PKT_VM_WR: 3012 case FW_ETH_TX_PKTS_VM_WR: 3013 return (1); 3014 default: 3015 return (0); 3016 } 3017} 3018 3019static inline void 3020set_txupdate_flags(struct sge_txq *txq, u_int avail, 3021 struct fw_eth_tx_pkt_wr *wr) 3022{ 3023 struct sge_eq *eq = &txq->eq; 3024 struct txpkts *txp = &txq->txp; 3025 3026 if ((txp->npkt > 0 || avail < eq->sidx / 2) && 3027 atomic_cmpset_int(&eq->equiq, 0, 1)) { 3028 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ | F_FW_WR_EQUIQ); 3029 eq->equeqidx = eq->pidx; 3030 } else if (IDXDIFF(eq->pidx, eq->equeqidx, eq->sidx) >= 32) { 3031 wr->equiq_to_len16 |= htobe32(F_FW_WR_EQUEQ); 3032 eq->equeqidx = eq->pidx; 3033 } 3034} 3035 3036#if defined(__i386__) || defined(__amd64__) 3037extern uint64_t tsc_freq; 3038#endif 3039 3040static inline bool 3041record_eth_tx_time(struct sge_txq *txq) 3042{ 3043 const uint64_t cycles = get_cyclecount(); 3044 const uint64_t last_tx = txq->last_tx; 3045#if defined(__i386__) || defined(__amd64__) 3046 const uint64_t itg = tsc_freq * t4_tx_coalesce_gap / 1000000; 3047#else 3048 const uint64_t itg = 0; 3049#endif 3050 3051 MPASS(cycles >= last_tx); 3052 txq->last_tx = cycles; 3053 return (cycles - last_tx < itg); 3054} 3055 3056/* 3057 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to 3058 * be consumed. Return the actual number consumed. 0 indicates a stall. 3059 */ 3060static u_int 3061eth_tx(struct mp_ring *r, u_int cidx, u_int pidx, bool *coalescing) 3062{ 3063 struct sge_txq *txq = r->cookie; 3064 struct ifnet *ifp = txq->ifp; 3065 struct sge_eq *eq = &txq->eq; 3066 struct txpkts *txp = &txq->txp; 3067 struct vi_info *vi = ifp->if_softc; 3068 struct adapter *sc = vi->adapter; 3069 u_int total, remaining; /* # of packets */ 3070 u_int n, avail, dbdiff; /* # of hardware descriptors */ 3071 int i, rc; 3072 struct mbuf *m0; 3073 bool snd, recent_tx; 3074 void *wr; /* start of the last WR written to the ring */ 3075 3076 TXQ_LOCK_ASSERT_OWNED(txq); 3077 recent_tx = record_eth_tx_time(txq); 3078 3079 remaining = IDXDIFF(pidx, cidx, r->size); 3080 if (__predict_false(discard_tx(eq))) { 3081 for (i = 0; i < txp->npkt; i++) 3082 m_freem(txp->mb[i]); 3083 txp->npkt = 0; 3084 while (cidx != pidx) { 3085 m0 = r->items[cidx]; 3086 m_freem(m0); 3087 if (++cidx == r->size) 3088 cidx = 0; 3089 } 3090 reclaim_tx_descs(txq, eq->sidx); 3091 *coalescing = false; 3092 return (remaining); /* emptied */ 3093 } 3094 3095 /* How many hardware descriptors do we have readily available. */ 3096 if (eq->pidx == eq->cidx) 3097 avail = eq->sidx - 1; 3098 else 3099 avail = IDXDIFF(eq->cidx, eq->pidx, eq->sidx) - 1; 3100 3101 total = 0; 3102 if (remaining == 0) { 3103 txp->score = 0; 3104 txq->txpkts_flush++; 3105 goto send_txpkts; 3106 } 3107 3108 dbdiff = 0; 3109 MPASS(remaining > 0); 3110 while (remaining > 0) { 3111 m0 = r->items[cidx]; 3112 M_ASSERTPKTHDR(m0); 3113 MPASS(m0->m_nextpkt == NULL); 3114 3115 if (avail < 2 * SGE_MAX_WR_NDESC) 3116 avail += reclaim_tx_descs(txq, 64); 3117 3118 if (t4_tx_coalesce == 0 && txp->npkt == 0) 3119 goto skip_coalescing; 3120 if (cannot_use_txpkts(m0)) 3121 txp->score = 0; 3122 else if (recent_tx) { 3123 if (++txp->score == 0) 3124 txp->score = UINT8_MAX; 3125 } else 3126 txp->score = 1; 3127 if (txp->npkt > 0 || remaining > 1 || 3128 txp->score >= t4_tx_coalesce_pkts || 3129 atomic_load_int(&txq->eq.equiq) != 0) { 3130 if (vi->flags & TX_USES_VM_WR) 3131 rc = add_to_txpkts_vf(sc, txq, m0, avail, &snd); 3132 else 3133 rc = add_to_txpkts_pf(sc, txq, m0, avail, &snd); 3134 } else { 3135 snd = false; 3136 rc = EINVAL; 3137 } 3138 if (snd) { 3139 MPASS(txp->npkt > 0); 3140 for (i = 0; i < txp->npkt; i++) 3141 ETHER_BPF_MTAP(ifp, txp->mb[i]); 3142 if (txp->npkt > 1) { 3143 MPASS(avail >= tx_len16_to_desc(txp->len16)); 3144 if (vi->flags & TX_USES_VM_WR) 3145 n = write_txpkts_vm_wr(sc, txq); 3146 else 3147 n = write_txpkts_wr(sc, txq); 3148 } else { 3149 MPASS(avail >= 3150 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3151 if (vi->flags & TX_USES_VM_WR) 3152 n = write_txpkt_vm_wr(sc, txq, 3153 txp->mb[0]); 3154 else 3155 n = write_txpkt_wr(sc, txq, txp->mb[0], 3156 avail); 3157 } 3158 MPASS(n <= SGE_MAX_WR_NDESC); 3159 avail -= n; 3160 dbdiff += n; 3161 wr = &eq->desc[eq->pidx]; 3162 IDXINCR(eq->pidx, n, eq->sidx); 3163 txp->npkt = 0; /* emptied */ 3164 } 3165 if (rc == 0) { 3166 /* m0 was coalesced into txq->txpkts. */ 3167 goto next_mbuf; 3168 } 3169 if (rc == EAGAIN) { 3170 /* 3171 * m0 is suitable for tx coalescing but could not be 3172 * combined with the existing txq->txpkts, which has now 3173 * been transmitted. Start a new txpkts with m0. 3174 */ 3175 MPASS(snd); 3176 MPASS(txp->npkt == 0); 3177 continue; 3178 } 3179 3180 MPASS(rc != 0 && rc != EAGAIN); 3181 MPASS(txp->npkt == 0); 3182skip_coalescing: 3183 n = tx_len16_to_desc(mbuf_len16(m0)); 3184 if (__predict_false(avail < n)) { 3185 avail += reclaim_tx_descs(txq, min(n, 32)); 3186 if (avail < n) 3187 break; /* out of descriptors */ 3188 } 3189 3190 wr = &eq->desc[eq->pidx]; 3191 if (mbuf_cflags(m0) & MC_RAW_WR) { 3192 n = write_raw_wr(txq, wr, m0, avail); 3193 } else { 3194 ETHER_BPF_MTAP(ifp, m0); 3195 if (vi->flags & TX_USES_VM_WR) 3196 n = write_txpkt_vm_wr(sc, txq, m0); 3197 else 3198 n = write_txpkt_wr(sc, txq, m0, avail); 3199 } 3200 MPASS(n >= 1 && n <= avail); 3201 MPASS(n <= SGE_MAX_WR_NDESC); 3202 3203 avail -= n; 3204 dbdiff += n; 3205 IDXINCR(eq->pidx, n, eq->sidx); 3206 3207 if (dbdiff >= 512 / EQ_ESIZE) { /* X_FETCHBURSTMAX_512B */ 3208 if (wr_can_update_eq(wr)) 3209 set_txupdate_flags(txq, avail, wr); 3210 ring_eq_db(sc, eq, dbdiff); 3211 avail += reclaim_tx_descs(txq, 32); 3212 dbdiff = 0; 3213 } 3214next_mbuf: 3215 total++; 3216 remaining--; 3217 if (__predict_false(++cidx == r->size)) 3218 cidx = 0; 3219 } 3220 if (dbdiff != 0) { 3221 if (wr_can_update_eq(wr)) 3222 set_txupdate_flags(txq, avail, wr); 3223 ring_eq_db(sc, eq, dbdiff); 3224 reclaim_tx_descs(txq, 32); 3225 } else if (eq->pidx == eq->cidx && txp->npkt > 0 && 3226 atomic_load_int(&txq->eq.equiq) == 0) { 3227 /* 3228 * If nothing was submitted to the chip for tx (it was coalesced 3229 * into txpkts instead) and there is no tx update outstanding 3230 * then we need to send txpkts now. 3231 */ 3232send_txpkts: 3233 MPASS(txp->npkt > 0); 3234 for (i = 0; i < txp->npkt; i++) 3235 ETHER_BPF_MTAP(ifp, txp->mb[i]); 3236 if (txp->npkt > 1) { 3237 MPASS(avail >= tx_len16_to_desc(txp->len16)); 3238 if (vi->flags & TX_USES_VM_WR) 3239 n = write_txpkts_vm_wr(sc, txq); 3240 else 3241 n = write_txpkts_wr(sc, txq); 3242 } else { 3243 MPASS(avail >= 3244 tx_len16_to_desc(mbuf_len16(txp->mb[0]))); 3245 if (vi->flags & TX_USES_VM_WR) 3246 n = write_txpkt_vm_wr(sc, txq, txp->mb[0]); 3247 else 3248 n = write_txpkt_wr(sc, txq, txp->mb[0], avail); 3249 } 3250 MPASS(n <= SGE_MAX_WR_NDESC); 3251 wr = &eq->desc[eq->pidx]; 3252 IDXINCR(eq->pidx, n, eq->sidx); 3253 txp->npkt = 0; /* emptied */ 3254 3255 MPASS(wr_can_update_eq(wr)); 3256 set_txupdate_flags(txq, avail - n, wr); 3257 ring_eq_db(sc, eq, n); 3258 reclaim_tx_descs(txq, 32); 3259 } 3260 *coalescing = txp->npkt > 0; 3261 3262 return (total); 3263} 3264 3265static inline void 3266init_iq(struct sge_iq *iq, struct adapter *sc, int tmr_idx, int pktc_idx, 3267 int qsize) 3268{ 3269 3270 KASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS, 3271 ("%s: bad tmr_idx %d", __func__, tmr_idx)); 3272 KASSERT(pktc_idx < SGE_NCOUNTERS, /* -ve is ok, means don't use */ 3273 ("%s: bad pktc_idx %d", __func__, pktc_idx)); 3274 3275 iq->flags = 0; 3276 iq->adapter = sc; 3277 iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx); 3278 iq->intr_pktc_idx = SGE_NCOUNTERS - 1; 3279 if (pktc_idx >= 0) { 3280 iq->intr_params |= F_QINTR_CNT_EN; 3281 iq->intr_pktc_idx = pktc_idx; 3282 } 3283 iq->qsize = roundup2(qsize, 16); /* See FW_IQ_CMD/iqsize */ 3284 iq->sidx = iq->qsize - sc->params.sge.spg_len / IQ_ESIZE; 3285} 3286 3287static inline void 3288init_fl(struct adapter *sc, struct sge_fl *fl, int qsize, int maxp, char *name) 3289{ 3290 3291 fl->qsize = qsize; 3292 fl->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3293 strlcpy(fl->lockname, name, sizeof(fl->lockname)); 3294 if (sc->flags & BUF_PACKING_OK && 3295 ((!is_t4(sc) && buffer_packing) || /* T5+: enabled unless 0 */ 3296 (is_t4(sc) && buffer_packing == 1)))/* T4: disabled unless 1 */ 3297 fl->flags |= FL_BUF_PACKING; 3298 fl->zidx = find_refill_source(sc, maxp, fl->flags & FL_BUF_PACKING); 3299 fl->safe_zidx = sc->sge.safe_zidx; 3300} 3301 3302static inline void 3303init_eq(struct adapter *sc, struct sge_eq *eq, int eqtype, int qsize, 3304 uint8_t tx_chan, uint16_t iqid, char *name) 3305{ 3306 KASSERT(eqtype <= EQ_TYPEMASK, ("%s: bad qtype %d", __func__, eqtype)); 3307 3308 eq->flags = eqtype & EQ_TYPEMASK; 3309 eq->tx_chan = tx_chan; 3310 eq->iqid = iqid; 3311 eq->sidx = qsize - sc->params.sge.spg_len / EQ_ESIZE; 3312 strlcpy(eq->lockname, name, sizeof(eq->lockname)); 3313} 3314 3315static int 3316alloc_ring(struct adapter *sc, size_t len, bus_dma_tag_t *tag, 3317 bus_dmamap_t *map, bus_addr_t *pa, void **va) 3318{ 3319 int rc; 3320 3321 rc = bus_dma_tag_create(sc->dmat, 512, 0, BUS_SPACE_MAXADDR, 3322 BUS_SPACE_MAXADDR, NULL, NULL, len, 1, len, 0, NULL, NULL, tag); 3323 if (rc != 0) { 3324 device_printf(sc->dev, "cannot allocate DMA tag: %d\n", rc); 3325 goto done; 3326 } 3327 3328 rc = bus_dmamem_alloc(*tag, va, 3329 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, map); 3330 if (rc != 0) { 3331 device_printf(sc->dev, "cannot allocate DMA memory: %d\n", rc); 3332 goto done; 3333 } 3334 3335 rc = bus_dmamap_load(*tag, *map, *va, len, oneseg_dma_callback, pa, 0); 3336 if (rc != 0) { 3337 device_printf(sc->dev, "cannot load DMA map: %d\n", rc); 3338 goto done; 3339 } 3340done: 3341 if (rc) 3342 free_ring(sc, *tag, *map, *pa, *va); 3343 3344 return (rc); 3345} 3346 3347static int 3348free_ring(struct adapter *sc, bus_dma_tag_t tag, bus_dmamap_t map, 3349 bus_addr_t pa, void *va) 3350{ 3351 if (pa) 3352 bus_dmamap_unload(tag, map); 3353 if (va) 3354 bus_dmamem_free(tag, va, map); 3355 if (tag) 3356 bus_dma_tag_destroy(tag); 3357 3358 return (0); 3359} 3360 3361/* 3362 * Allocates the ring for an ingress queue and an optional freelist. If the 3363 * freelist is specified it will be allocated and then associated with the 3364 * ingress queue. 3365 * 3366 * Returns errno on failure. Resources allocated up to that point may still be 3367 * allocated. Caller is responsible for cleanup in case this function fails. 3368 * 3369 * If the ingress queue will take interrupts directly then the intr_idx 3370 * specifies the vector, starting from 0. -1 means the interrupts for this 3371 * queue should be forwarded to the fwq. 3372 */ 3373static int 3374alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl, 3375 int intr_idx, int cong) 3376{ 3377 int rc, i, cntxt_id; 3378 size_t len; 3379 struct fw_iq_cmd c; 3380 struct port_info *pi = vi->pi; 3381 struct adapter *sc = iq->adapter; 3382 struct sge_params *sp = &sc->params.sge; 3383 __be32 v = 0; 3384 3385 len = iq->qsize * IQ_ESIZE; 3386 rc = alloc_ring(sc, len, &iq->desc_tag, &iq->desc_map, &iq->ba, 3387 (void **)&iq->desc); 3388 if (rc != 0) 3389 return (rc); 3390 3391 bzero(&c, sizeof(c)); 3392 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_IQ_CMD) | F_FW_CMD_REQUEST | 3393 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_IQ_CMD_PFN(sc->pf) | 3394 V_FW_IQ_CMD_VFN(0)); 3395 3396 c.alloc_to_len16 = htobe32(F_FW_IQ_CMD_ALLOC | F_FW_IQ_CMD_IQSTART | 3397 FW_LEN16(c)); 3398 3399 /* Special handling for firmware event queue */ 3400 if (iq == &sc->sge.fwq) 3401 v |= F_FW_IQ_CMD_IQASYNCH; 3402 3403 if (intr_idx < 0) { 3404 /* Forwarded interrupts, all headed to fwq */ 3405 v |= F_FW_IQ_CMD_IQANDST; 3406 v |= V_FW_IQ_CMD_IQANDSTINDEX(sc->sge.fwq.cntxt_id); 3407 } else { 3408 KASSERT(intr_idx < sc->intr_count, 3409 ("%s: invalid direct intr_idx %d", __func__, intr_idx)); 3410 v |= V_FW_IQ_CMD_IQANDSTINDEX(intr_idx); 3411 } 3412 3413 c.type_to_iqandstindex = htobe32(v | 3414 V_FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) | 3415 V_FW_IQ_CMD_VIID(vi->viid) | 3416 V_FW_IQ_CMD_IQANUD(X_UPDATEDELIVERY_INTERRUPT)); 3417 c.iqdroprss_to_iqesize = htobe16(V_FW_IQ_CMD_IQPCIECH(pi->tx_chan) | 3418 F_FW_IQ_CMD_IQGTSMODE | 3419 V_FW_IQ_CMD_IQINTCNTTHRESH(iq->intr_pktc_idx) | 3420 V_FW_IQ_CMD_IQESIZE(ilog2(IQ_ESIZE) - 4)); 3421 c.iqsize = htobe16(iq->qsize); 3422 c.iqaddr = htobe64(iq->ba); 3423 if (cong >= 0) 3424 c.iqns_to_fl0congen = htobe32(F_FW_IQ_CMD_IQFLINTCONGEN); 3425 3426 if (fl) { 3427 mtx_init(&fl->fl_lock, fl->lockname, NULL, MTX_DEF); 3428 3429 len = fl->qsize * EQ_ESIZE; 3430 rc = alloc_ring(sc, len, &fl->desc_tag, &fl->desc_map, 3431 &fl->ba, (void **)&fl->desc); 3432 if (rc) 3433 return (rc); 3434 3435 /* Allocate space for one software descriptor per buffer. */ 3436 rc = alloc_fl_sdesc(fl); 3437 if (rc != 0) { 3438 device_printf(sc->dev, 3439 "failed to setup fl software descriptors: %d\n", 3440 rc); 3441 return (rc); 3442 } 3443 3444 if (fl->flags & FL_BUF_PACKING) { 3445 fl->lowat = roundup2(sp->fl_starve_threshold2, 8); 3446 fl->buf_boundary = sp->pack_boundary; 3447 } else { 3448 fl->lowat = roundup2(sp->fl_starve_threshold, 8); 3449 fl->buf_boundary = 16; 3450 } 3451 if (fl_pad && fl->buf_boundary < sp->pad_boundary) 3452 fl->buf_boundary = sp->pad_boundary; 3453 3454 c.iqns_to_fl0congen |= 3455 htobe32(V_FW_IQ_CMD_FL0HOSTFCMODE(X_HOSTFCMODE_NONE) | 3456 F_FW_IQ_CMD_FL0FETCHRO | F_FW_IQ_CMD_FL0DATARO | 3457 (fl_pad ? F_FW_IQ_CMD_FL0PADEN : 0) | 3458 (fl->flags & FL_BUF_PACKING ? F_FW_IQ_CMD_FL0PACKEN : 3459 0)); 3460 if (cong >= 0) { 3461 c.iqns_to_fl0congen |= 3462 htobe32(V_FW_IQ_CMD_FL0CNGCHMAP(cong) | 3463 F_FW_IQ_CMD_FL0CONGCIF | 3464 F_FW_IQ_CMD_FL0CONGEN); 3465 } 3466 c.fl0dcaen_to_fl0cidxfthresh = 3467 htobe16(V_FW_IQ_CMD_FL0FBMIN(chip_id(sc) <= CHELSIO_T5 ? 3468 X_FETCHBURSTMIN_128B : X_FETCHBURSTMIN_64B_T6) | 3469 V_FW_IQ_CMD_FL0FBMAX(chip_id(sc) <= CHELSIO_T5 ? 3470 X_FETCHBURSTMAX_512B : X_FETCHBURSTMAX_256B)); 3471 c.fl0size = htobe16(fl->qsize); 3472 c.fl0addr = htobe64(fl->ba); 3473 } 3474 3475 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 3476 if (rc != 0) { 3477 device_printf(sc->dev, 3478 "failed to create ingress queue: %d\n", rc); 3479 return (rc); 3480 } 3481 3482 iq->cidx = 0; 3483 iq->gen = F_RSPD_GEN; 3484 iq->intr_next = iq->intr_params; 3485 iq->cntxt_id = be16toh(c.iqid); 3486 iq->abs_id = be16toh(c.physiqid); 3487 iq->flags |= IQ_ALLOCATED; 3488 3489 cntxt_id = iq->cntxt_id - sc->sge.iq_start; 3490 if (cntxt_id >= sc->sge.iqmap_sz) { 3491 panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__, 3492 cntxt_id, sc->sge.iqmap_sz - 1); 3493 } 3494 sc->sge.iqmap[cntxt_id] = iq; 3495 3496 if (fl) { 3497 u_int qid; 3498 3499 iq->flags |= IQ_HAS_FL; 3500 fl->cntxt_id = be16toh(c.fl0id); 3501 fl->pidx = fl->cidx = 0; 3502 3503 cntxt_id = fl->cntxt_id - sc->sge.eq_start; 3504 if (cntxt_id >= sc->sge.eqmap_sz) { 3505 panic("%s: fl->cntxt_id (%d) more than the max (%d)", 3506 __func__, cntxt_id, sc->sge.eqmap_sz - 1); 3507 } 3508 sc->sge.eqmap[cntxt_id] = (void *)fl; 3509 3510 qid = fl->cntxt_id; 3511 if (isset(&sc->doorbells, DOORBELL_UDB)) { 3512 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 3513 uint32_t mask = (1 << s_qpp) - 1; 3514 volatile uint8_t *udb; 3515 3516 udb = sc->udbs_base + UDBS_DB_OFFSET; 3517 udb += (qid >> s_qpp) << PAGE_SHIFT; 3518 qid &= mask; 3519 if (qid < PAGE_SIZE / UDBS_SEG_SIZE) { 3520 udb += qid << UDBS_SEG_SHIFT; 3521 qid = 0; 3522 } 3523 fl->udb = (volatile void *)udb; 3524 } 3525 fl->dbval = V_QID(qid) | sc->chip_params->sge_fl_db; 3526 3527 FL_LOCK(fl); 3528 /* Enough to make sure the SGE doesn't think it's starved */ 3529 refill_fl(sc, fl, fl->lowat); 3530 FL_UNLOCK(fl); 3531 } 3532 3533 if (chip_id(sc) >= CHELSIO_T5 && !(sc->flags & IS_VF) && cong >= 0) { 3534 uint32_t param, val; 3535 3536 param = V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) | 3537 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_CONM_CTXT) | 3538 V_FW_PARAMS_PARAM_YZ(iq->cntxt_id); 3539 if (cong == 0) 3540 val = 1 << 19; 3541 else { 3542 val = 2 << 19; 3543 for (i = 0; i < 4; i++) { 3544 if (cong & (1 << i)) 3545 val |= 1 << (i << 2); 3546 } 3547 } 3548 3549 rc = -t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 3550 if (rc != 0) { 3551 /* report error but carry on */ 3552 device_printf(sc->dev, 3553 "failed to set congestion manager context for " 3554 "ingress queue %d: %d\n", iq->cntxt_id, rc); 3555 } 3556 } 3557 3558 /* Enable IQ interrupts */ 3559 atomic_store_rel_int(&iq->state, IQS_IDLE); 3560 t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) | 3561 V_INGRESSQID(iq->cntxt_id)); 3562 3563 return (0); 3564} 3565 3566static int 3567free_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl) 3568{ 3569 int rc; 3570 struct adapter *sc = iq->adapter; 3571 device_t dev; 3572 3573 if (sc == NULL) 3574 return (0); /* nothing to do */ 3575 3576 dev = vi ? vi->dev : sc->dev; 3577 3578 if (iq->flags & IQ_ALLOCATED) { 3579 rc = -t4_iq_free(sc, sc->mbox, sc->pf, 0, 3580 FW_IQ_TYPE_FL_INT_CAP, iq->cntxt_id, 3581 fl ? fl->cntxt_id : 0xffff, 0xffff); 3582 if (rc != 0) { 3583 device_printf(dev, 3584 "failed to free queue %p: %d\n", iq, rc); 3585 return (rc); 3586 } 3587 iq->flags &= ~IQ_ALLOCATED; 3588 } 3589 3590 free_ring(sc, iq->desc_tag, iq->desc_map, iq->ba, iq->desc); 3591 3592 bzero(iq, sizeof(*iq)); 3593 3594 if (fl) { 3595 free_ring(sc, fl->desc_tag, fl->desc_map, fl->ba, 3596 fl->desc); 3597 3598 if (fl->sdesc) 3599 free_fl_sdesc(sc, fl); 3600 3601 if (mtx_initialized(&fl->fl_lock)) 3602 mtx_destroy(&fl->fl_lock); 3603 3604 bzero(fl, sizeof(*fl)); 3605 } 3606 3607 return (0); 3608} 3609 3610static void 3611add_iq_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *oid, 3612 struct sge_iq *iq) 3613{ 3614 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3615 3616 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, &iq->ba, 3617 "bus address of descriptor ring"); 3618 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3619 iq->qsize * IQ_ESIZE, "descriptor ring size in bytes"); 3620 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3621 CTLTYPE_INT | CTLFLAG_RD, &iq->abs_id, 0, sysctl_uint16, "I", 3622 "absolute id of the queue"); 3623 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3624 CTLTYPE_INT | CTLFLAG_RD, &iq->cntxt_id, 0, sysctl_uint16, "I", 3625 "SGE context id of the queue"); 3626 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3627 CTLTYPE_INT | CTLFLAG_RD, &iq->cidx, 0, sysctl_uint16, "I", 3628 "consumer index"); 3629} 3630 3631static void 3632add_fl_sysctls(struct adapter *sc, struct sysctl_ctx_list *ctx, 3633 struct sysctl_oid *oid, struct sge_fl *fl) 3634{ 3635 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3636 3637 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3638 "freelist"); 3639 children = SYSCTL_CHILDREN(oid); 3640 3641 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 3642 &fl->ba, "bus address of descriptor ring"); 3643 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 3644 fl->sidx * EQ_ESIZE + sc->params.sge.spg_len, 3645 "desc ring size in bytes"); 3646 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3647 CTLTYPE_INT | CTLFLAG_RD, &fl->cntxt_id, 0, sysctl_uint16, "I", 3648 "SGE context id of the freelist"); 3649 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "padding", CTLFLAG_RD, NULL, 3650 fl_pad ? 1 : 0, "padding enabled"); 3651 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "packing", CTLFLAG_RD, NULL, 3652 fl->flags & FL_BUF_PACKING ? 1 : 0, "packing enabled"); 3653 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, &fl->cidx, 3654 0, "consumer index"); 3655 if (fl->flags & FL_BUF_PACKING) { 3656 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_offset", 3657 CTLFLAG_RD, &fl->rx_offset, 0, "packing rx offset"); 3658 } 3659 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, &fl->pidx, 3660 0, "producer index"); 3661 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_allocated", 3662 CTLFLAG_RD, &fl->cl_allocated, "# of clusters allocated"); 3663 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_recycled", 3664 CTLFLAG_RD, &fl->cl_recycled, "# of clusters recycled"); 3665 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "cluster_fast_recycled", 3666 CTLFLAG_RD, &fl->cl_fast_recycled, "# of clusters recycled (fast)"); 3667} 3668 3669static int 3670alloc_fwq(struct adapter *sc) 3671{ 3672 int rc, intr_idx; 3673 struct sge_iq *fwq = &sc->sge.fwq; 3674 struct sysctl_oid *oid = device_get_sysctl_tree(sc->dev); 3675 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3676 3677 init_iq(fwq, sc, 0, 0, FW_IQ_QSIZE); 3678 if (sc->flags & IS_VF) 3679 intr_idx = 0; 3680 else 3681 intr_idx = sc->intr_count > 1 ? 1 : 0; 3682 rc = alloc_iq_fl(&sc->port[0]->vi[0], fwq, NULL, intr_idx, -1); 3683 if (rc != 0) { 3684 device_printf(sc->dev, 3685 "failed to create firmware event queue: %d\n", rc); 3686 return (rc); 3687 } 3688 3689 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, "fwq", CTLFLAG_RD, 3690 NULL, "firmware event queue"); 3691 add_iq_sysctls(&sc->ctx, oid, fwq); 3692 3693 return (0); 3694} 3695 3696static int 3697free_fwq(struct adapter *sc) 3698{ 3699 return free_iq_fl(NULL, &sc->sge.fwq, NULL); 3700} 3701 3702static int 3703alloc_ctrlq(struct adapter *sc, struct sge_wrq *ctrlq, int idx, 3704 struct sysctl_oid *oid) 3705{ 3706 int rc; 3707 char name[16]; 3708 struct sysctl_oid_list *children; 3709 3710 snprintf(name, sizeof(name), "%s ctrlq%d", device_get_nameunit(sc->dev), 3711 idx); 3712 init_eq(sc, &ctrlq->eq, EQ_CTRL, CTRL_EQ_QSIZE, sc->port[idx]->tx_chan, 3713 sc->sge.fwq.cntxt_id, name); 3714 3715 children = SYSCTL_CHILDREN(oid); 3716 snprintf(name, sizeof(name), "%d", idx); 3717 oid = SYSCTL_ADD_NODE(&sc->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3718 NULL, "ctrl queue"); 3719 rc = alloc_wrq(sc, NULL, ctrlq, oid); 3720 3721 return (rc); 3722} 3723 3724int 3725tnl_cong(struct port_info *pi, int drop) 3726{ 3727 3728 if (drop == -1) 3729 return (-1); 3730 else if (drop == 1) 3731 return (0); 3732 else 3733 return (pi->rx_e_chan_map); 3734} 3735 3736static int 3737alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx, 3738 struct sysctl_oid *oid) 3739{ 3740 int rc; 3741 struct adapter *sc = vi->adapter; 3742 struct sysctl_oid_list *children; 3743 char name[16]; 3744 3745 rc = alloc_iq_fl(vi, &rxq->iq, &rxq->fl, intr_idx, 3746 tnl_cong(vi->pi, cong_drop)); 3747 if (rc != 0) 3748 return (rc); 3749 3750 if (idx == 0) 3751 sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id; 3752 else 3753 KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id, 3754 ("iq_base mismatch")); 3755 KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF, 3756 ("PF with non-zero iq_base")); 3757 3758 /* 3759 * The freelist is just barely above the starvation threshold right now, 3760 * fill it up a bit more. 3761 */ 3762 FL_LOCK(&rxq->fl); 3763 refill_fl(sc, &rxq->fl, 128); 3764 FL_UNLOCK(&rxq->fl); 3765 3766#if defined(INET) || defined(INET6) 3767 rc = tcp_lro_init_args(&rxq->lro, vi->ifp, lro_entries, lro_mbufs); 3768 if (rc != 0) 3769 return (rc); 3770 MPASS(rxq->lro.ifp == vi->ifp); /* also indicates LRO init'ed */ 3771 3772 if (vi->ifp->if_capenable & IFCAP_LRO) 3773 rxq->iq.flags |= IQ_LRO_ENABLED; 3774#endif 3775 if (vi->ifp->if_capenable & IFCAP_HWRXTSTMP) 3776 rxq->iq.flags |= IQ_RX_TIMESTAMP; 3777 rxq->ifp = vi->ifp; 3778 3779 children = SYSCTL_CHILDREN(oid); 3780 3781 snprintf(name, sizeof(name), "%d", idx); 3782 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3783 NULL, "rx queue"); 3784 children = SYSCTL_CHILDREN(oid); 3785 3786 add_iq_sysctls(&vi->ctx, oid, &rxq->iq); 3787#if defined(INET) || defined(INET6) 3788 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, 3789 &rxq->lro.lro_queued, 0, NULL); 3790 SYSCTL_ADD_U64(&vi->ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, 3791 &rxq->lro.lro_flushed, 0, NULL); 3792#endif 3793 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "rxcsum", CTLFLAG_RD, 3794 &rxq->rxcsum, "# of times hardware assisted with checksum"); 3795 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_extraction", 3796 CTLFLAG_RD, &rxq->vlan_extraction, 3797 "# of times hardware extracted 802.1Q tag"); 3798 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_rxcsum", 3799 CTLFLAG_RD, &rxq->vxlan_rxcsum, 3800 "# of times hardware assisted with inner checksum (VXLAN) "); 3801 3802 add_fl_sysctls(sc, &vi->ctx, oid, &rxq->fl); 3803 3804 return (rc); 3805} 3806 3807static int 3808free_rxq(struct vi_info *vi, struct sge_rxq *rxq) 3809{ 3810 int rc; 3811 3812#if defined(INET) || defined(INET6) 3813 if (rxq->lro.ifp) { 3814 tcp_lro_free(&rxq->lro); 3815 rxq->lro.ifp = NULL; 3816 } 3817#endif 3818 3819 rc = free_iq_fl(vi, &rxq->iq, &rxq->fl); 3820 if (rc == 0) 3821 bzero(rxq, sizeof(*rxq)); 3822 3823 return (rc); 3824} 3825 3826#ifdef TCP_OFFLOAD 3827static int 3828alloc_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq, 3829 int intr_idx, int idx, struct sysctl_oid *oid) 3830{ 3831 struct port_info *pi = vi->pi; 3832 int rc; 3833 struct sysctl_oid_list *children; 3834 char name[16]; 3835 3836 rc = alloc_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl, intr_idx, 0); 3837 if (rc != 0) 3838 return (rc); 3839 3840 children = SYSCTL_CHILDREN(oid); 3841 3842 snprintf(name, sizeof(name), "%d", idx); 3843 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3844 NULL, "rx queue"); 3845 add_iq_sysctls(&vi->ctx, oid, &ofld_rxq->iq); 3846 add_fl_sysctls(pi->adapter, &vi->ctx, oid, &ofld_rxq->fl); 3847 3848 return (rc); 3849} 3850 3851static int 3852free_ofld_rxq(struct vi_info *vi, struct sge_ofld_rxq *ofld_rxq) 3853{ 3854 int rc; 3855 3856 rc = free_iq_fl(vi, &ofld_rxq->iq, &ofld_rxq->fl); 3857 if (rc == 0) 3858 bzero(ofld_rxq, sizeof(*ofld_rxq)); 3859 3860 return (rc); 3861} 3862#endif 3863 3864#ifdef DEV_NETMAP 3865static int 3866alloc_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int intr_idx, 3867 int idx, struct sysctl_oid *oid) 3868{ 3869 int rc; 3870 struct sysctl_oid_list *children; 3871 struct sysctl_ctx_list *ctx; 3872 char name[16]; 3873 size_t len; 3874 struct adapter *sc = vi->adapter; 3875 struct netmap_adapter *na = NA(vi->ifp); 3876 3877 MPASS(na != NULL); 3878 3879 len = vi->qsize_rxq * IQ_ESIZE; 3880 rc = alloc_ring(sc, len, &nm_rxq->iq_desc_tag, &nm_rxq->iq_desc_map, 3881 &nm_rxq->iq_ba, (void **)&nm_rxq->iq_desc); 3882 if (rc != 0) 3883 return (rc); 3884 3885 len = na->num_rx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3886 rc = alloc_ring(sc, len, &nm_rxq->fl_desc_tag, &nm_rxq->fl_desc_map, 3887 &nm_rxq->fl_ba, (void **)&nm_rxq->fl_desc); 3888 if (rc != 0) 3889 return (rc); 3890 3891 nm_rxq->vi = vi; 3892 nm_rxq->nid = idx; 3893 nm_rxq->iq_cidx = 0; 3894 nm_rxq->iq_sidx = vi->qsize_rxq - sc->params.sge.spg_len / IQ_ESIZE; 3895 nm_rxq->iq_gen = F_RSPD_GEN; 3896 nm_rxq->fl_pidx = nm_rxq->fl_cidx = 0; 3897 nm_rxq->fl_sidx = na->num_rx_desc; 3898 nm_rxq->fl_sidx2 = nm_rxq->fl_sidx; /* copy for rxsync cacheline */ 3899 nm_rxq->intr_idx = intr_idx; 3900 nm_rxq->iq_cntxt_id = INVALID_NM_RXQ_CNTXT_ID; 3901 3902 ctx = &vi->ctx; 3903 children = SYSCTL_CHILDREN(oid); 3904 3905 snprintf(name, sizeof(name), "%d", idx); 3906 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, name, CTLFLAG_RD, NULL, 3907 "rx queue"); 3908 children = SYSCTL_CHILDREN(oid); 3909 3910 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "abs_id", 3911 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_abs_id, 0, sysctl_uint16, 3912 "I", "absolute id of the queue"); 3913 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3914 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cntxt_id, 0, sysctl_uint16, 3915 "I", "SGE context id of the queue"); 3916 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 3917 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->iq_cidx, 0, sysctl_uint16, "I", 3918 "consumer index"); 3919 3920 children = SYSCTL_CHILDREN(oid); 3921 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "fl", CTLFLAG_RD, NULL, 3922 "freelist"); 3923 children = SYSCTL_CHILDREN(oid); 3924 3925 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cntxt_id", 3926 CTLTYPE_INT | CTLFLAG_RD, &nm_rxq->fl_cntxt_id, 0, sysctl_uint16, 3927 "I", "SGE context id of the freelist"); 3928 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cidx", CTLFLAG_RD, 3929 &nm_rxq->fl_cidx, 0, "consumer index"); 3930 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "pidx", CTLFLAG_RD, 3931 &nm_rxq->fl_pidx, 0, "producer index"); 3932 3933 return (rc); 3934} 3935 3936 3937static int 3938free_nm_rxq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq) 3939{ 3940 struct adapter *sc = vi->adapter; 3941 3942 if (vi->flags & VI_INIT_DONE) 3943 MPASS(nm_rxq->iq_cntxt_id == INVALID_NM_RXQ_CNTXT_ID); 3944 else 3945 MPASS(nm_rxq->iq_cntxt_id == 0); 3946 3947 free_ring(sc, nm_rxq->iq_desc_tag, nm_rxq->iq_desc_map, nm_rxq->iq_ba, 3948 nm_rxq->iq_desc); 3949 free_ring(sc, nm_rxq->fl_desc_tag, nm_rxq->fl_desc_map, nm_rxq->fl_ba, 3950 nm_rxq->fl_desc); 3951 3952 return (0); 3953} 3954 3955static int 3956alloc_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq, int iqidx, int idx, 3957 struct sysctl_oid *oid) 3958{ 3959 int rc; 3960 size_t len; 3961 struct port_info *pi = vi->pi; 3962 struct adapter *sc = pi->adapter; 3963 struct netmap_adapter *na = NA(vi->ifp); 3964 char name[16]; 3965 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 3966 3967 len = na->num_tx_desc * EQ_ESIZE + sc->params.sge.spg_len; 3968 rc = alloc_ring(sc, len, &nm_txq->desc_tag, &nm_txq->desc_map, 3969 &nm_txq->ba, (void **)&nm_txq->desc); 3970 if (rc) 3971 return (rc); 3972 3973 nm_txq->pidx = nm_txq->cidx = 0; 3974 nm_txq->sidx = na->num_tx_desc; 3975 nm_txq->nid = idx; 3976 nm_txq->iqidx = iqidx; 3977 nm_txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT) | 3978 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 3979 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 3980 if (sc->params.fw_vers >= FW_VERSION32(1, 24, 11, 0)) 3981 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS2_WR)); 3982 else 3983 nm_txq->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 3984 nm_txq->cntxt_id = INVALID_NM_TXQ_CNTXT_ID; 3985 3986 snprintf(name, sizeof(name), "%d", idx); 3987 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 3988 NULL, "netmap tx queue"); 3989 children = SYSCTL_CHILDREN(oid); 3990 3991 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 3992 &nm_txq->cntxt_id, 0, "SGE context id of the queue"); 3993 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 3994 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->cidx, 0, sysctl_uint16, "I", 3995 "consumer index"); 3996 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 3997 CTLTYPE_INT | CTLFLAG_RD, &nm_txq->pidx, 0, sysctl_uint16, "I", 3998 "producer index"); 3999 4000 return (rc); 4001} 4002 4003static int 4004free_nm_txq(struct vi_info *vi, struct sge_nm_txq *nm_txq) 4005{ 4006 struct adapter *sc = vi->adapter; 4007 4008 if (vi->flags & VI_INIT_DONE) 4009 MPASS(nm_txq->cntxt_id == INVALID_NM_TXQ_CNTXT_ID); 4010 else 4011 MPASS(nm_txq->cntxt_id == 0); 4012 4013 free_ring(sc, nm_txq->desc_tag, nm_txq->desc_map, nm_txq->ba, 4014 nm_txq->desc); 4015 4016 return (0); 4017} 4018#endif 4019 4020/* 4021 * Returns a reasonable automatic cidx flush threshold for a given queue size. 4022 */ 4023static u_int 4024qsize_to_fthresh(int qsize) 4025{ 4026 u_int fthresh; 4027 4028 while (!powerof2(qsize)) 4029 qsize++; 4030 fthresh = ilog2(qsize); 4031 if (fthresh > X_CIDXFLUSHTHRESH_128) 4032 fthresh = X_CIDXFLUSHTHRESH_128; 4033 4034 return (fthresh); 4035} 4036 4037static int 4038ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq) 4039{ 4040 int rc, cntxt_id; 4041 struct fw_eq_ctrl_cmd c; 4042 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4043 4044 bzero(&c, sizeof(c)); 4045 4046 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_CTRL_CMD) | F_FW_CMD_REQUEST | 4047 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_CTRL_CMD_PFN(sc->pf) | 4048 V_FW_EQ_CTRL_CMD_VFN(0)); 4049 c.alloc_to_len16 = htobe32(F_FW_EQ_CTRL_CMD_ALLOC | 4050 F_FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c)); 4051 c.cmpliqid_eqid = htonl(V_FW_EQ_CTRL_CMD_CMPLIQID(eq->iqid)); 4052 c.physeqid_pkd = htobe32(0); 4053 c.fetchszm_to_iqid = 4054 htobe32(V_FW_EQ_CTRL_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 4055 V_FW_EQ_CTRL_CMD_PCIECHN(eq->tx_chan) | 4056 F_FW_EQ_CTRL_CMD_FETCHRO | V_FW_EQ_CTRL_CMD_IQID(eq->iqid)); 4057 c.dcaen_to_eqsize = 4058 htobe32(V_FW_EQ_CTRL_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4059 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4060 V_FW_EQ_CTRL_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4061 V_FW_EQ_CTRL_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 4062 V_FW_EQ_CTRL_CMD_EQSIZE(qsize)); 4063 c.eqaddr = htobe64(eq->ba); 4064 4065 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4066 if (rc != 0) { 4067 device_printf(sc->dev, 4068 "failed to create control queue %d: %d\n", eq->tx_chan, rc); 4069 return (rc); 4070 } 4071 eq->flags |= EQ_ALLOCATED; 4072 4073 eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid)); 4074 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4075 if (cntxt_id >= sc->sge.eqmap_sz) 4076 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4077 cntxt_id, sc->sge.eqmap_sz - 1); 4078 sc->sge.eqmap[cntxt_id] = eq; 4079 4080 return (rc); 4081} 4082 4083static int 4084eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4085{ 4086 int rc, cntxt_id; 4087 struct fw_eq_eth_cmd c; 4088 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4089 4090 bzero(&c, sizeof(c)); 4091 4092 c.op_to_vfn = htobe32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | 4093 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | 4094 V_FW_EQ_ETH_CMD_VFN(0)); 4095 c.alloc_to_len16 = htobe32(F_FW_EQ_ETH_CMD_ALLOC | 4096 F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); 4097 c.autoequiqe_to_viid = htobe32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | 4098 F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(vi->viid)); 4099 c.fetchszm_to_iqid = 4100 htobe32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_NONE) | 4101 V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | 4102 V_FW_EQ_ETH_CMD_IQID(eq->iqid)); 4103 c.dcaen_to_eqsize = 4104 htobe32(V_FW_EQ_ETH_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4105 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4106 V_FW_EQ_ETH_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4107 V_FW_EQ_ETH_CMD_EQSIZE(qsize)); 4108 c.eqaddr = htobe64(eq->ba); 4109 4110 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4111 if (rc != 0) { 4112 device_printf(vi->dev, 4113 "failed to create Ethernet egress queue: %d\n", rc); 4114 return (rc); 4115 } 4116 eq->flags |= EQ_ALLOCATED; 4117 4118 eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd)); 4119 eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd)); 4120 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4121 if (cntxt_id >= sc->sge.eqmap_sz) 4122 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4123 cntxt_id, sc->sge.eqmap_sz - 1); 4124 sc->sge.eqmap[cntxt_id] = eq; 4125 4126 return (rc); 4127} 4128 4129#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4130static int 4131ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4132{ 4133 int rc, cntxt_id; 4134 struct fw_eq_ofld_cmd c; 4135 int qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4136 4137 bzero(&c, sizeof(c)); 4138 4139 c.op_to_vfn = htonl(V_FW_CMD_OP(FW_EQ_OFLD_CMD) | F_FW_CMD_REQUEST | 4140 F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_OFLD_CMD_PFN(sc->pf) | 4141 V_FW_EQ_OFLD_CMD_VFN(0)); 4142 c.alloc_to_len16 = htonl(F_FW_EQ_OFLD_CMD_ALLOC | 4143 F_FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 4144 c.fetchszm_to_iqid = 4145 htonl(V_FW_EQ_OFLD_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | 4146 V_FW_EQ_OFLD_CMD_PCIECHN(eq->tx_chan) | 4147 F_FW_EQ_OFLD_CMD_FETCHRO | V_FW_EQ_OFLD_CMD_IQID(eq->iqid)); 4148 c.dcaen_to_eqsize = 4149 htobe32(V_FW_EQ_OFLD_CMD_FBMIN(chip_id(sc) <= CHELSIO_T5 ? 4150 X_FETCHBURSTMIN_64B : X_FETCHBURSTMIN_64B_T6) | 4151 V_FW_EQ_OFLD_CMD_FBMAX(X_FETCHBURSTMAX_512B) | 4152 V_FW_EQ_OFLD_CMD_CIDXFTHRESH(qsize_to_fthresh(qsize)) | 4153 V_FW_EQ_OFLD_CMD_EQSIZE(qsize)); 4154 c.eqaddr = htobe64(eq->ba); 4155 4156 rc = -t4_wr_mbox(sc, sc->mbox, &c, sizeof(c), &c); 4157 if (rc != 0) { 4158 device_printf(vi->dev, 4159 "failed to create egress queue for TCP offload: %d\n", rc); 4160 return (rc); 4161 } 4162 eq->flags |= EQ_ALLOCATED; 4163 4164 eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd)); 4165 cntxt_id = eq->cntxt_id - sc->sge.eq_start; 4166 if (cntxt_id >= sc->sge.eqmap_sz) 4167 panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__, 4168 cntxt_id, sc->sge.eqmap_sz - 1); 4169 sc->sge.eqmap[cntxt_id] = eq; 4170 4171 return (rc); 4172} 4173#endif 4174 4175static int 4176alloc_eq(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq) 4177{ 4178 int rc, qsize; 4179 size_t len; 4180 4181 mtx_init(&eq->eq_lock, eq->lockname, NULL, MTX_DEF); 4182 4183 qsize = eq->sidx + sc->params.sge.spg_len / EQ_ESIZE; 4184 len = qsize * EQ_ESIZE; 4185 rc = alloc_ring(sc, len, &eq->desc_tag, &eq->desc_map, 4186 &eq->ba, (void **)&eq->desc); 4187 if (rc) 4188 return (rc); 4189 4190 eq->pidx = eq->cidx = eq->dbidx = 0; 4191 /* Note that equeqidx is not used with sge_wrq (OFLD/CTRL) queues. */ 4192 eq->equeqidx = 0; 4193 eq->doorbells = sc->doorbells; 4194 4195 switch (eq->flags & EQ_TYPEMASK) { 4196 case EQ_CTRL: 4197 rc = ctrl_eq_alloc(sc, eq); 4198 break; 4199 4200 case EQ_ETH: 4201 rc = eth_eq_alloc(sc, vi, eq); 4202 break; 4203 4204#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4205 case EQ_OFLD: 4206 rc = ofld_eq_alloc(sc, vi, eq); 4207 break; 4208#endif 4209 4210 default: 4211 panic("%s: invalid eq type %d.", __func__, 4212 eq->flags & EQ_TYPEMASK); 4213 } 4214 if (rc != 0) { 4215 device_printf(sc->dev, 4216 "failed to allocate egress queue(%d): %d\n", 4217 eq->flags & EQ_TYPEMASK, rc); 4218 } 4219 4220 if (isset(&eq->doorbells, DOORBELL_UDB) || 4221 isset(&eq->doorbells, DOORBELL_UDBWC) || 4222 isset(&eq->doorbells, DOORBELL_WCWR)) { 4223 uint32_t s_qpp = sc->params.sge.eq_s_qpp; 4224 uint32_t mask = (1 << s_qpp) - 1; 4225 volatile uint8_t *udb; 4226 4227 udb = sc->udbs_base + UDBS_DB_OFFSET; 4228 udb += (eq->cntxt_id >> s_qpp) << PAGE_SHIFT; /* pg offset */ 4229 eq->udb_qid = eq->cntxt_id & mask; /* id in page */ 4230 if (eq->udb_qid >= PAGE_SIZE / UDBS_SEG_SIZE) 4231 clrbit(&eq->doorbells, DOORBELL_WCWR); 4232 else { 4233 udb += eq->udb_qid << UDBS_SEG_SHIFT; /* seg offset */ 4234 eq->udb_qid = 0; 4235 } 4236 eq->udb = (volatile void *)udb; 4237 } 4238 4239 return (rc); 4240} 4241 4242static int 4243free_eq(struct adapter *sc, struct sge_eq *eq) 4244{ 4245 int rc; 4246 4247 if (eq->flags & EQ_ALLOCATED) { 4248 switch (eq->flags & EQ_TYPEMASK) { 4249 case EQ_CTRL: 4250 rc = -t4_ctrl_eq_free(sc, sc->mbox, sc->pf, 0, 4251 eq->cntxt_id); 4252 break; 4253 4254 case EQ_ETH: 4255 rc = -t4_eth_eq_free(sc, sc->mbox, sc->pf, 0, 4256 eq->cntxt_id); 4257 break; 4258 4259#if defined(TCP_OFFLOAD) || defined(RATELIMIT) 4260 case EQ_OFLD: 4261 rc = -t4_ofld_eq_free(sc, sc->mbox, sc->pf, 0, 4262 eq->cntxt_id); 4263 break; 4264#endif 4265 4266 default: 4267 panic("%s: invalid eq type %d.", __func__, 4268 eq->flags & EQ_TYPEMASK); 4269 } 4270 if (rc != 0) { 4271 device_printf(sc->dev, 4272 "failed to free egress queue (%d): %d\n", 4273 eq->flags & EQ_TYPEMASK, rc); 4274 return (rc); 4275 } 4276 eq->flags &= ~EQ_ALLOCATED; 4277 } 4278 4279 free_ring(sc, eq->desc_tag, eq->desc_map, eq->ba, eq->desc); 4280 4281 if (mtx_initialized(&eq->eq_lock)) 4282 mtx_destroy(&eq->eq_lock); 4283 4284 bzero(eq, sizeof(*eq)); 4285 return (0); 4286} 4287 4288static int 4289alloc_wrq(struct adapter *sc, struct vi_info *vi, struct sge_wrq *wrq, 4290 struct sysctl_oid *oid) 4291{ 4292 int rc; 4293 struct sysctl_ctx_list *ctx = vi ? &vi->ctx : &sc->ctx; 4294 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4295 4296 rc = alloc_eq(sc, vi, &wrq->eq); 4297 if (rc) 4298 return (rc); 4299 4300 wrq->adapter = sc; 4301 TASK_INIT(&wrq->wrq_tx_task, 0, wrq_tx_drain, wrq); 4302 TAILQ_INIT(&wrq->incomplete_wrs); 4303 STAILQ_INIT(&wrq->wr_list); 4304 wrq->nwr_pending = 0; 4305 wrq->ndesc_needed = 0; 4306 4307 SYSCTL_ADD_UAUTO(ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4308 &wrq->eq.ba, "bus address of descriptor ring"); 4309 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4310 wrq->eq.sidx * EQ_ESIZE + sc->params.sge.spg_len, 4311 "desc ring size in bytes"); 4312 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4313 &wrq->eq.cntxt_id, 0, "SGE context id of the queue"); 4314 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "cidx", 4315 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.cidx, 0, sysctl_uint16, "I", 4316 "consumer index"); 4317 SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "pidx", 4318 CTLTYPE_INT | CTLFLAG_RD, &wrq->eq.pidx, 0, sysctl_uint16, "I", 4319 "producer index"); 4320 SYSCTL_ADD_INT(ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4321 wrq->eq.sidx, "status page index"); 4322 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_direct", CTLFLAG_RD, 4323 &wrq->tx_wrs_direct, "# of work requests (direct)"); 4324 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_copied", CTLFLAG_RD, 4325 &wrq->tx_wrs_copied, "# of work requests (copied)"); 4326 SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tx_wrs_sspace", CTLFLAG_RD, 4327 &wrq->tx_wrs_ss, "# of work requests (copied from scratch space)"); 4328 4329 return (rc); 4330} 4331 4332static int 4333free_wrq(struct adapter *sc, struct sge_wrq *wrq) 4334{ 4335 int rc; 4336 4337 rc = free_eq(sc, &wrq->eq); 4338 if (rc) 4339 return (rc); 4340 4341 bzero(wrq, sizeof(*wrq)); 4342 return (0); 4343} 4344 4345static int 4346alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx, 4347 struct sysctl_oid *oid) 4348{ 4349 int rc; 4350 struct port_info *pi = vi->pi; 4351 struct adapter *sc = pi->adapter; 4352 struct sge_eq *eq = &txq->eq; 4353 struct txpkts *txp; 4354 char name[16]; 4355 struct sysctl_oid_list *children = SYSCTL_CHILDREN(oid); 4356 4357 rc = mp_ring_alloc(&txq->r, eq->sidx, txq, eth_tx, can_resume_eth_tx, 4358 M_CXGBE, &eq->eq_lock, M_WAITOK); 4359 if (rc != 0) { 4360 device_printf(sc->dev, "failed to allocate mp_ring: %d\n", rc); 4361 return (rc); 4362 } 4363 4364 rc = alloc_eq(sc, vi, eq); 4365 if (rc != 0) { 4366 mp_ring_free(txq->r); 4367 txq->r = NULL; 4368 return (rc); 4369 } 4370 4371 /* Can't fail after this point. */ 4372 4373 if (idx == 0) 4374 sc->sge.eq_base = eq->abs_id - eq->cntxt_id; 4375 else 4376 KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id, 4377 ("eq_base mismatch")); 4378 KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF, 4379 ("PF with non-zero eq_base")); 4380 4381 TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq); 4382 txq->ifp = vi->ifp; 4383 txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK); 4384 if (vi->flags & TX_USES_VM_WR) 4385 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4386 V_TXPKT_INTF(pi->tx_chan)); 4387 else 4388 txq->cpl_ctrl0 = htobe32(V_TXPKT_OPCODE(CPL_TX_PKT_XT) | 4389 V_TXPKT_INTF(pi->tx_chan) | V_TXPKT_PF(sc->pf) | 4390 V_TXPKT_VF(vi->vin) | V_TXPKT_VF_VLD(vi->vfvld)); 4391 txq->tc_idx = -1; 4392 txq->sdesc = malloc(eq->sidx * sizeof(struct tx_sdesc), M_CXGBE, 4393 M_ZERO | M_WAITOK); 4394 4395 txp = &txq->txp; 4396 MPASS(nitems(txp->mb) >= sc->params.max_pkts_per_eth_tx_pkts_wr); 4397 txq->txp.max_npkt = min(nitems(txp->mb), 4398 sc->params.max_pkts_per_eth_tx_pkts_wr); 4399 if (vi->flags & TX_USES_VM_WR && !(sc->flags & IS_VF)) 4400 txq->txp.max_npkt--; 4401 4402 snprintf(name, sizeof(name), "%d", idx); 4403 oid = SYSCTL_ADD_NODE(&vi->ctx, children, OID_AUTO, name, CTLFLAG_RD, 4404 NULL, "tx queue"); 4405 children = SYSCTL_CHILDREN(oid); 4406 4407 SYSCTL_ADD_UAUTO(&vi->ctx, children, OID_AUTO, "ba", CTLFLAG_RD, 4408 &eq->ba, "bus address of descriptor ring"); 4409 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "dmalen", CTLFLAG_RD, NULL, 4410 eq->sidx * EQ_ESIZE + sc->params.sge.spg_len, 4411 "desc ring size in bytes"); 4412 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD, 4413 &eq->abs_id, 0, "absolute id of the queue"); 4414 SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD, 4415 &eq->cntxt_id, 0, "SGE context id of the queue"); 4416 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx", 4417 CTLTYPE_INT | CTLFLAG_RD, &eq->cidx, 0, sysctl_uint16, "I", 4418 "consumer index"); 4419 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "pidx", 4420 CTLTYPE_INT | CTLFLAG_RD, &eq->pidx, 0, sysctl_uint16, "I", 4421 "producer index"); 4422 SYSCTL_ADD_INT(&vi->ctx, children, OID_AUTO, "sidx", CTLFLAG_RD, NULL, 4423 eq->sidx, "status page index"); 4424 4425 SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "tc", 4426 CTLTYPE_INT | CTLFLAG_RW, vi, idx, sysctl_tc, "I", 4427 "traffic class (-1 means none)"); 4428 4429 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txcsum", CTLFLAG_RD, 4430 &txq->txcsum, "# of times hardware assisted with checksum"); 4431 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vlan_insertion", 4432 CTLFLAG_RD, &txq->vlan_insertion, 4433 "# of times hardware inserted 802.1Q tag"); 4434 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "tso_wrs", CTLFLAG_RD, 4435 &txq->tso_wrs, "# of TSO work requests"); 4436 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "imm_wrs", CTLFLAG_RD, 4437 &txq->imm_wrs, "# of work requests with immediate data"); 4438 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "sgl_wrs", CTLFLAG_RD, 4439 &txq->sgl_wrs, "# of work requests with direct SGL"); 4440 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkt_wrs", CTLFLAG_RD, 4441 &txq->txpkt_wrs, "# of txpkt work requests (one pkt/WR)"); 4442 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_wrs", 4443 CTLFLAG_RD, &txq->txpkts0_wrs, 4444 "# of txpkts (type 0) work requests"); 4445 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_wrs", 4446 CTLFLAG_RD, &txq->txpkts1_wrs, 4447 "# of txpkts (type 1) work requests"); 4448 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts0_pkts", 4449 CTLFLAG_RD, &txq->txpkts0_pkts, 4450 "# of frames tx'd using type0 txpkts work requests"); 4451 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts1_pkts", 4452 CTLFLAG_RD, &txq->txpkts1_pkts, 4453 "# of frames tx'd using type1 txpkts work requests"); 4454 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "txpkts_flush", 4455 CTLFLAG_RD, &txq->txpkts_flush, 4456 "# of times txpkts had to be flushed out by an egress-update"); 4457 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "raw_wrs", CTLFLAG_RD, 4458 &txq->raw_wrs, "# of raw work requests (non-packets)"); 4459 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_tso_wrs", 4460 CTLFLAG_RD, &txq->vxlan_tso_wrs, "# of VXLAN TSO work requests"); 4461 SYSCTL_ADD_UQUAD(&vi->ctx, children, OID_AUTO, "vxlan_txcsum", 4462 CTLFLAG_RD, &txq->vxlan_txcsum, 4463 "# of times hardware assisted with inner checksums (VXLAN)"); 4464 4465 mp_ring_sysctls(txq->r, &vi->ctx, children); 4466 4467 return (0); 4468} 4469 4470static int 4471free_txq(struct vi_info *vi, struct sge_txq *txq) 4472{ 4473 int rc; 4474 struct adapter *sc = vi->adapter; 4475 struct sge_eq *eq = &txq->eq; 4476 4477 rc = free_eq(sc, eq); 4478 if (rc) 4479 return (rc); 4480 4481 sglist_free(txq->gl); 4482 free(txq->sdesc, M_CXGBE); 4483 mp_ring_free(txq->r); 4484 4485 bzero(txq, sizeof(*txq)); 4486 return (0); 4487} 4488 4489static void 4490oneseg_dma_callback(void *arg, bus_dma_segment_t *segs, int nseg, int error) 4491{ 4492 bus_addr_t *ba = arg; 4493 4494 KASSERT(nseg == 1, 4495 ("%s meant for single segment mappings only.", __func__)); 4496 4497 *ba = error ? 0 : segs->ds_addr; 4498} 4499 4500static inline void 4501ring_fl_db(struct adapter *sc, struct sge_fl *fl) 4502{ 4503 uint32_t n, v; 4504 4505 n = IDXDIFF(fl->pidx >> 3, fl->dbidx, fl->sidx); 4506 MPASS(n > 0); 4507 4508 wmb(); 4509 v = fl->dbval | V_PIDX(n); 4510 if (fl->udb) 4511 *fl->udb = htole32(v); 4512 else 4513 t4_write_reg(sc, sc->sge_kdoorbell_reg, v); 4514 IDXINCR(fl->dbidx, n, fl->sidx); 4515} 4516 4517/* 4518 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are 4519 * recycled do not count towards this allocation budget. 4520 * 4521 * Returns non-zero to indicate that this freelist should be added to the list 4522 * of starving freelists. 4523 */ 4524static int 4525refill_fl(struct adapter *sc, struct sge_fl *fl, int n) 4526{ 4527 __be64 *d; 4528 struct fl_sdesc *sd; 4529 uintptr_t pa; 4530 caddr_t cl; 4531 struct rx_buf_info *rxb; 4532 struct cluster_metadata *clm; 4533 uint16_t max_pidx, zidx = fl->zidx; 4534 uint16_t hw_cidx = fl->hw_cidx; /* stable snapshot */ 4535 4536 FL_LOCK_ASSERT_OWNED(fl); 4537 4538 /* 4539 * We always stop at the beginning of the hardware descriptor that's just 4540 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx, 4541 * which would mean an empty freelist to the chip. 4542 */ 4543 max_pidx = __predict_false(hw_cidx == 0) ? fl->sidx - 1 : hw_cidx - 1; 4544 if (fl->pidx == max_pidx * 8) 4545 return (0); 4546 4547 d = &fl->desc[fl->pidx]; 4548 sd = &fl->sdesc[fl->pidx]; 4549 rxb = &sc->sge.rx_buf_info[zidx]; 4550 4551 while (n > 0) { 4552 4553 if (sd->cl != NULL) { 4554 4555 if (sd->nmbuf == 0) { 4556 /* 4557 * Fast recycle without involving any atomics on 4558 * the cluster's metadata (if the cluster has 4559 * metadata). This happens when all frames 4560 * received in the cluster were small enough to 4561 * fit within a single mbuf each. 4562 */ 4563 fl->cl_fast_recycled++; 4564 goto recycled; 4565 } 4566 4567 /* 4568 * Cluster is guaranteed to have metadata. Clusters 4569 * without metadata always take the fast recycle path 4570 * when they're recycled. 4571 */ 4572 clm = cl_metadata(sd); 4573 MPASS(clm != NULL); 4574 4575 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4576 fl->cl_recycled++; 4577 counter_u64_add(extfree_rels, 1); 4578 goto recycled; 4579 } 4580 sd->cl = NULL; /* gave up my reference */ 4581 } 4582 MPASS(sd->cl == NULL); 4583 cl = uma_zalloc(rxb->zone, M_NOWAIT); 4584 if (__predict_false(cl == NULL)) { 4585 if (zidx != fl->safe_zidx) { 4586 zidx = fl->safe_zidx; 4587 rxb = &sc->sge.rx_buf_info[zidx]; 4588 cl = uma_zalloc(rxb->zone, M_NOWAIT); 4589 } 4590 if (cl == NULL) 4591 break; 4592 } 4593 fl->cl_allocated++; 4594 n--; 4595 4596 pa = pmap_kextract((vm_offset_t)cl); 4597 sd->cl = cl; 4598 sd->zidx = zidx; 4599 4600 if (fl->flags & FL_BUF_PACKING) { 4601 *d = htobe64(pa | rxb->hwidx2); 4602 sd->moff = rxb->size2; 4603 } else { 4604 *d = htobe64(pa | rxb->hwidx1); 4605 sd->moff = 0; 4606 } 4607recycled: 4608 sd->nmbuf = 0; 4609 d++; 4610 sd++; 4611 if (__predict_false((++fl->pidx & 7) == 0)) { 4612 uint16_t pidx = fl->pidx >> 3; 4613 4614 if (__predict_false(pidx == fl->sidx)) { 4615 fl->pidx = 0; 4616 pidx = 0; 4617 sd = fl->sdesc; 4618 d = fl->desc; 4619 } 4620 if (n < 8 || pidx == max_pidx) 4621 break; 4622 4623 if (IDXDIFF(pidx, fl->dbidx, fl->sidx) >= 4) 4624 ring_fl_db(sc, fl); 4625 } 4626 } 4627 4628 if ((fl->pidx >> 3) != fl->dbidx) 4629 ring_fl_db(sc, fl); 4630 4631 return (FL_RUNNING_LOW(fl) && !(fl->flags & FL_STARVING)); 4632} 4633 4634/* 4635 * Attempt to refill all starving freelists. 4636 */ 4637static void 4638refill_sfl(void *arg) 4639{ 4640 struct adapter *sc = arg; 4641 struct sge_fl *fl, *fl_temp; 4642 4643 mtx_assert(&sc->sfl_lock, MA_OWNED); 4644 TAILQ_FOREACH_SAFE(fl, &sc->sfl, link, fl_temp) { 4645 FL_LOCK(fl); 4646 refill_fl(sc, fl, 64); 4647 if (FL_NOT_RUNNING_LOW(fl) || fl->flags & FL_DOOMED) { 4648 TAILQ_REMOVE(&sc->sfl, fl, link); 4649 fl->flags &= ~FL_STARVING; 4650 } 4651 FL_UNLOCK(fl); 4652 } 4653 4654 if (!TAILQ_EMPTY(&sc->sfl)) 4655 callout_schedule(&sc->sfl_callout, hz / 5); 4656} 4657 4658static int 4659alloc_fl_sdesc(struct sge_fl *fl) 4660{ 4661 4662 fl->sdesc = malloc(fl->sidx * 8 * sizeof(struct fl_sdesc), M_CXGBE, 4663 M_ZERO | M_WAITOK); 4664 4665 return (0); 4666} 4667 4668static void 4669free_fl_sdesc(struct adapter *sc, struct sge_fl *fl) 4670{ 4671 struct fl_sdesc *sd; 4672 struct cluster_metadata *clm; 4673 int i; 4674 4675 sd = fl->sdesc; 4676 for (i = 0; i < fl->sidx * 8; i++, sd++) { 4677 if (sd->cl == NULL) 4678 continue; 4679 4680 if (sd->nmbuf == 0) 4681 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, sd->cl); 4682 else if (fl->flags & FL_BUF_PACKING) { 4683 clm = cl_metadata(sd); 4684 if (atomic_fetchadd_int(&clm->refcount, -1) == 1) { 4685 uma_zfree(sc->sge.rx_buf_info[sd->zidx].zone, 4686 sd->cl); 4687 counter_u64_add(extfree_rels, 1); 4688 } 4689 } 4690 sd->cl = NULL; 4691 } 4692 4693 free(fl->sdesc, M_CXGBE); 4694 fl->sdesc = NULL; 4695} 4696 4697static inline void 4698get_pkt_gl(struct mbuf *m, struct sglist *gl) 4699{ 4700 int rc; 4701 4702 M_ASSERTPKTHDR(m); 4703 4704 sglist_reset(gl); 4705 rc = sglist_append_mbuf(gl, m); 4706 if (__predict_false(rc != 0)) { 4707 panic("%s: mbuf %p (%d segs) was vetted earlier but now fails " 4708 "with %d.", __func__, m, mbuf_nsegs(m), rc); 4709 } 4710 4711 KASSERT(gl->sg_nseg == mbuf_nsegs(m), 4712 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m, 4713 mbuf_nsegs(m), gl->sg_nseg)); 4714#if 0 /* vm_wr not readily available here. */ 4715 KASSERT(gl->sg_nseg > 0 && gl->sg_nseg <= max_nsegs_allowed(m, vm_wr), 4716 ("%s: %d segments, should have been 1 <= nsegs <= %d", __func__, 4717 gl->sg_nseg, max_nsegs_allowed(m, vm_wr))); 4718#endif 4719} 4720 4721/* 4722 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 4723 */ 4724static inline u_int 4725txpkt_len16(u_int nsegs, const u_int extra) 4726{ 4727 u_int n; 4728 4729 MPASS(nsegs > 0); 4730 4731 nsegs--; /* first segment is part of ulptx_sgl */ 4732 n = extra + sizeof(struct fw_eth_tx_pkt_wr) + 4733 sizeof(struct cpl_tx_pkt_core) + 4734 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4735 4736 return (howmany(n, 16)); 4737} 4738 4739/* 4740 * len16 for a txpkt_vm WR with a GL. Includes the firmware work 4741 * request header. 4742 */ 4743static inline u_int 4744txpkt_vm_len16(u_int nsegs, const u_int extra) 4745{ 4746 u_int n; 4747 4748 MPASS(nsegs > 0); 4749 4750 nsegs--; /* first segment is part of ulptx_sgl */ 4751 n = extra + sizeof(struct fw_eth_tx_pkt_vm_wr) + 4752 sizeof(struct cpl_tx_pkt_core) + 4753 sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4754 4755 return (howmany(n, 16)); 4756} 4757 4758static inline void 4759calculate_mbuf_len16(struct mbuf *m, bool vm_wr) 4760{ 4761 const int lso = sizeof(struct cpl_tx_pkt_lso_core); 4762 const int tnl_lso = sizeof(struct cpl_tx_tnl_lso); 4763 4764 if (vm_wr) { 4765 if (needs_tso(m)) 4766 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), lso)); 4767 else 4768 set_mbuf_len16(m, txpkt_vm_len16(mbuf_nsegs(m), 0)); 4769 return; 4770 } 4771 4772 if (needs_tso(m)) { 4773 if (needs_vxlan_tso(m)) 4774 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), tnl_lso)); 4775 else 4776 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), lso)); 4777 } else 4778 set_mbuf_len16(m, txpkt_len16(mbuf_nsegs(m), 0)); 4779} 4780 4781/* 4782 * len16 for a txpkts type 0 WR with a GL. Does not include the firmware work 4783 * request header. 4784 */ 4785static inline u_int 4786txpkts0_len16(u_int nsegs) 4787{ 4788 u_int n; 4789 4790 MPASS(nsegs > 0); 4791 4792 nsegs--; /* first segment is part of ulptx_sgl */ 4793 n = sizeof(struct ulp_txpkt) + sizeof(struct ulptx_idata) + 4794 sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl) + 4795 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 4796 4797 return (howmany(n, 16)); 4798} 4799 4800/* 4801 * len16 for a txpkts type 1 WR with a GL. Does not include the firmware work 4802 * request header. 4803 */ 4804static inline u_int 4805txpkts1_len16(void) 4806{ 4807 u_int n; 4808 4809 n = sizeof(struct cpl_tx_pkt_core) + sizeof(struct ulptx_sgl); 4810 4811 return (howmany(n, 16)); 4812} 4813 4814static inline u_int 4815imm_payload(u_int ndesc) 4816{ 4817 u_int n; 4818 4819 n = ndesc * EQ_ESIZE - sizeof(struct fw_eth_tx_pkt_wr) - 4820 sizeof(struct cpl_tx_pkt_core); 4821 4822 return (n); 4823} 4824 4825static inline uint64_t 4826csum_to_ctrl(struct adapter *sc, struct mbuf *m) 4827{ 4828 uint64_t ctrl; 4829 int csum_type, l2hlen, l3hlen; 4830 int x, y; 4831 static const int csum_types[3][2] = { 4832 {TX_CSUM_TCPIP, TX_CSUM_TCPIP6}, 4833 {TX_CSUM_UDPIP, TX_CSUM_UDPIP6}, 4834 {TX_CSUM_IP, 0} 4835 }; 4836 4837 M_ASSERTPKTHDR(m); 4838 4839 if (!needs_hwcsum(m)) 4840 return (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS); 4841 4842 MPASS(m->m_pkthdr.l2hlen >= ETHER_HDR_LEN); 4843 MPASS(m->m_pkthdr.l3hlen >= sizeof(struct ip)); 4844 4845 if (needs_vxlan_csum(m)) { 4846 MPASS(m->m_pkthdr.l4hlen > 0); 4847 MPASS(m->m_pkthdr.l5hlen > 0); 4848 MPASS(m->m_pkthdr.inner_l2hlen >= ETHER_HDR_LEN); 4849 MPASS(m->m_pkthdr.inner_l3hlen >= sizeof(struct ip)); 4850 4851 l2hlen = m->m_pkthdr.l2hlen + m->m_pkthdr.l3hlen + 4852 m->m_pkthdr.l4hlen + m->m_pkthdr.l5hlen + 4853 m->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN; 4854 l3hlen = m->m_pkthdr.inner_l3hlen; 4855 } else { 4856 l2hlen = m->m_pkthdr.l2hlen - ETHER_HDR_LEN; 4857 l3hlen = m->m_pkthdr.l3hlen; 4858 } 4859 4860 ctrl = 0; 4861 if (!needs_l3_csum(m)) 4862 ctrl |= F_TXPKT_IPCSUM_DIS; 4863 4864 if (m->m_pkthdr.csum_flags & (CSUM_IP_TCP | CSUM_INNER_IP_TCP | 4865 CSUM_IP6_TCP | CSUM_INNER_IP6_TCP)) 4866 x = 0; /* TCP */ 4867 else if (m->m_pkthdr.csum_flags & (CSUM_IP_UDP | CSUM_INNER_IP_UDP | 4868 CSUM_IP6_UDP | CSUM_INNER_IP6_UDP)) 4869 x = 1; /* UDP */ 4870 else 4871 x = 2; 4872 4873 if (m->m_pkthdr.csum_flags & (CSUM_IP | CSUM_IP_TCP | CSUM_IP_UDP | 4874 CSUM_INNER_IP | CSUM_INNER_IP_TCP | CSUM_INNER_IP_UDP)) 4875 y = 0; /* IPv4 */ 4876 else { 4877 MPASS(m->m_pkthdr.csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | 4878 CSUM_INNER_IP6_TCP | CSUM_INNER_IP6_UDP)); 4879 y = 1; /* IPv6 */ 4880 } 4881 /* 4882 * needs_hwcsum returned true earlier so there must be some kind of 4883 * checksum to calculate. 4884 */ 4885 csum_type = csum_types[x][y]; 4886 MPASS(csum_type != 0); 4887 if (csum_type == TX_CSUM_IP) 4888 ctrl |= F_TXPKT_L4CSUM_DIS; 4889 ctrl |= V_TXPKT_CSUM_TYPE(csum_type) | V_TXPKT_IPHDR_LEN(l3hlen); 4890 if (chip_id(sc) <= CHELSIO_T5) 4891 ctrl |= V_TXPKT_ETHHDR_LEN(l2hlen); 4892 else 4893 ctrl |= V_T6_TXPKT_ETHHDR_LEN(l2hlen); 4894 4895 return (ctrl); 4896} 4897 4898static inline void * 4899write_lso_cpl(void *cpl, struct mbuf *m0) 4900{ 4901 struct cpl_tx_pkt_lso_core *lso; 4902 uint32_t ctrl; 4903 4904 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4905 m0->m_pkthdr.l4hlen > 0, 4906 ("%s: mbuf %p needs TSO but missing header lengths", 4907 __func__, m0)); 4908 4909 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 4910 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 4911 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 4912 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 4913 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 4914 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4915 ctrl |= F_LSO_IPV6; 4916 4917 lso = cpl; 4918 lso->lso_ctrl = htobe32(ctrl); 4919 lso->ipid_ofst = htobe16(0); 4920 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 4921 lso->seqno_offset = htobe32(0); 4922 lso->len = htobe32(m0->m_pkthdr.len); 4923 4924 return (lso + 1); 4925} 4926 4927static void * 4928write_tnl_lso_cpl(void *cpl, struct mbuf *m0) 4929{ 4930 struct cpl_tx_tnl_lso *tnl_lso = cpl; 4931 uint32_t ctrl; 4932 4933 KASSERT(m0->m_pkthdr.inner_l2hlen > 0 && 4934 m0->m_pkthdr.inner_l3hlen > 0 && m0->m_pkthdr.inner_l4hlen > 0 && 4935 m0->m_pkthdr.inner_l5hlen > 0, 4936 ("%s: mbuf %p needs VXLAN_TSO but missing inner header lengths", 4937 __func__, m0)); 4938 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 4939 m0->m_pkthdr.l4hlen > 0 && m0->m_pkthdr.l5hlen > 0, 4940 ("%s: mbuf %p needs VXLAN_TSO but missing outer header lengths", 4941 __func__, m0)); 4942 4943 /* Outer headers. */ 4944 ctrl = V_CPL_TX_TNL_LSO_OPCODE(CPL_TX_TNL_LSO) | 4945 F_CPL_TX_TNL_LSO_FIRST | F_CPL_TX_TNL_LSO_LAST | 4946 V_CPL_TX_TNL_LSO_ETHHDRLENOUT( 4947 (m0->m_pkthdr.l2hlen - ETHER_HDR_LEN) >> 2) | 4948 V_CPL_TX_TNL_LSO_IPHDRLENOUT(m0->m_pkthdr.l3hlen >> 2) | 4949 F_CPL_TX_TNL_LSO_IPLENSETOUT; 4950 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 4951 ctrl |= F_CPL_TX_TNL_LSO_IPV6OUT; 4952 else { 4953 ctrl |= F_CPL_TX_TNL_LSO_IPHDRCHKOUT | 4954 F_CPL_TX_TNL_LSO_IPIDINCOUT; 4955 } 4956 tnl_lso->op_to_IpIdSplitOut = htobe32(ctrl); 4957 tnl_lso->IpIdOffsetOut = 0; 4958 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 4959 htobe16(F_CPL_TX_TNL_LSO_UDPCHKCLROUT | 4960 F_CPL_TX_TNL_LSO_UDPLENSETOUT | 4961 V_CPL_TX_TNL_LSO_TNLHDRLEN(m0->m_pkthdr.l2hlen + 4962 m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen + 4963 m0->m_pkthdr.l5hlen) | 4964 V_CPL_TX_TNL_LSO_TNLTYPE(TX_TNL_TYPE_VXLAN)); 4965 tnl_lso->r1 = 0; 4966 4967 /* Inner headers. */ 4968 ctrl = V_CPL_TX_TNL_LSO_ETHHDRLEN( 4969 (m0->m_pkthdr.inner_l2hlen - ETHER_HDR_LEN) >> 2) | 4970 V_CPL_TX_TNL_LSO_IPHDRLEN(m0->m_pkthdr.inner_l3hlen >> 2) | 4971 V_CPL_TX_TNL_LSO_TCPHDRLEN(m0->m_pkthdr.inner_l4hlen >> 2); 4972 if (m0->m_pkthdr.inner_l3hlen == sizeof(struct ip6_hdr)) 4973 ctrl |= F_CPL_TX_TNL_LSO_IPV6; 4974 tnl_lso->Flow_to_TcpHdrLen = htobe32(ctrl); 4975 tnl_lso->IpIdOffset = 0; 4976 tnl_lso->IpIdSplit_to_Mss = 4977 htobe16(V_CPL_TX_TNL_LSO_MSS(m0->m_pkthdr.tso_segsz)); 4978 tnl_lso->TCPSeqOffset = 0; 4979 tnl_lso->EthLenOffset_Size = 4980 htobe32(V_CPL_TX_TNL_LSO_SIZE(m0->m_pkthdr.len)); 4981 4982 return (tnl_lso + 1); 4983} 4984 4985#define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */ 4986 4987/* 4988 * Write a VM txpkt WR for this packet to the hardware descriptors, update the 4989 * software descriptor, and advance the pidx. It is guaranteed that enough 4990 * descriptors are available. 4991 * 4992 * The return value is the # of hardware descriptors used. 4993 */ 4994static u_int 4995write_txpkt_vm_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0) 4996{ 4997 struct sge_eq *eq; 4998 struct fw_eth_tx_pkt_vm_wr *wr; 4999 struct tx_sdesc *txsd; 5000 struct cpl_tx_pkt_core *cpl; 5001 uint32_t ctrl; /* used in many unrelated places */ 5002 uint64_t ctrl1; 5003 int len16, ndesc, pktlen, nsegs; 5004 caddr_t dst; 5005 5006 TXQ_LOCK_ASSERT_OWNED(txq); 5007 M_ASSERTPKTHDR(m0); 5008 5009 len16 = mbuf_len16(m0); 5010 nsegs = mbuf_nsegs(m0); 5011 pktlen = m0->m_pkthdr.len; 5012 ctrl = sizeof(struct cpl_tx_pkt_core); 5013 if (needs_tso(m0)) 5014 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5015 ndesc = tx_len16_to_desc(len16); 5016 5017 /* Firmware work request header */ 5018 eq = &txq->eq; 5019 wr = (void *)&eq->desc[eq->pidx]; 5020 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_VM_WR) | 5021 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 5022 5023 ctrl = V_FW_WR_LEN16(len16); 5024 wr->equiq_to_len16 = htobe32(ctrl); 5025 wr->r3[0] = 0; 5026 wr->r3[1] = 0; 5027 5028 /* 5029 * Copy over ethmacdst, ethmacsrc, ethtype, and vlantci. 5030 * vlantci is ignored unless the ethtype is 0x8100, so it's 5031 * simpler to always copy it rather than making it 5032 * conditional. Also, it seems that we do not have to set 5033 * vlantci or fake the ethtype when doing VLAN tag insertion. 5034 */ 5035 m_copydata(m0, 0, VM_TX_L2HDR_LEN, wr->ethmacdst); 5036 5037 if (needs_tso(m0)) { 5038 cpl = write_lso_cpl(wr + 1, m0); 5039 txq->tso_wrs++; 5040 } else 5041 cpl = (void *)(wr + 1); 5042 5043 /* Checksum offload */ 5044 ctrl1 = csum_to_ctrl(sc, m0); 5045 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5046 txq->txcsum++; /* some hardware assistance provided */ 5047 5048 /* VLAN tag insertion */ 5049 if (needs_vlan_insertion(m0)) { 5050 ctrl1 |= F_TXPKT_VLAN_VLD | 5051 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 5052 txq->vlan_insertion++; 5053 } 5054 5055 /* CPL header */ 5056 cpl->ctrl0 = txq->cpl_ctrl0; 5057 cpl->pack = 0; 5058 cpl->len = htobe16(pktlen); 5059 cpl->ctrl1 = htobe64(ctrl1); 5060 5061 /* SGL */ 5062 dst = (void *)(cpl + 1); 5063 5064 /* 5065 * A packet using TSO will use up an entire descriptor for the 5066 * firmware work request header, LSO CPL, and TX_PKT_XT CPL. 5067 * If this descriptor is the last descriptor in the ring, wrap 5068 * around to the front of the ring explicitly for the start of 5069 * the sgl. 5070 */ 5071 if (dst == (void *)&eq->desc[eq->sidx]) { 5072 dst = (void *)&eq->desc[0]; 5073 write_gl_to_txd(txq, m0, &dst, 0); 5074 } else 5075 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 5076 txq->sgl_wrs++; 5077 txq->txpkt_wrs++; 5078 5079 txsd = &txq->sdesc[eq->pidx]; 5080 txsd->m = m0; 5081 txsd->desc_used = ndesc; 5082 5083 return (ndesc); 5084} 5085 5086/* 5087 * Write a raw WR to the hardware descriptors, update the software 5088 * descriptor, and advance the pidx. It is guaranteed that enough 5089 * descriptors are available. 5090 * 5091 * The return value is the # of hardware descriptors used. 5092 */ 5093static u_int 5094write_raw_wr(struct sge_txq *txq, void *wr, struct mbuf *m0, u_int available) 5095{ 5096 struct sge_eq *eq = &txq->eq; 5097 struct tx_sdesc *txsd; 5098 struct mbuf *m; 5099 caddr_t dst; 5100 int len16, ndesc; 5101 5102 len16 = mbuf_len16(m0); 5103 ndesc = tx_len16_to_desc(len16); 5104 MPASS(ndesc <= available); 5105 5106 dst = wr; 5107 for (m = m0; m != NULL; m = m->m_next) 5108 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 5109 5110 txq->raw_wrs++; 5111 5112 txsd = &txq->sdesc[eq->pidx]; 5113 txsd->m = m0; 5114 txsd->desc_used = ndesc; 5115 5116 return (ndesc); 5117} 5118 5119/* 5120 * Write a txpkt WR for this packet to the hardware descriptors, update the 5121 * software descriptor, and advance the pidx. It is guaranteed that enough 5122 * descriptors are available. 5123 * 5124 * The return value is the # of hardware descriptors used. 5125 */ 5126static u_int 5127write_txpkt_wr(struct adapter *sc, struct sge_txq *txq, struct mbuf *m0, 5128 u_int available) 5129{ 5130 struct sge_eq *eq; 5131 struct fw_eth_tx_pkt_wr *wr; 5132 struct tx_sdesc *txsd; 5133 struct cpl_tx_pkt_core *cpl; 5134 uint32_t ctrl; /* used in many unrelated places */ 5135 uint64_t ctrl1; 5136 int len16, ndesc, pktlen, nsegs; 5137 caddr_t dst; 5138 5139 TXQ_LOCK_ASSERT_OWNED(txq); 5140 M_ASSERTPKTHDR(m0); 5141 5142 len16 = mbuf_len16(m0); 5143 nsegs = mbuf_nsegs(m0); 5144 pktlen = m0->m_pkthdr.len; 5145 ctrl = sizeof(struct cpl_tx_pkt_core); 5146 if (needs_tso(m0)) { 5147 if (needs_vxlan_tso(m0)) 5148 ctrl += sizeof(struct cpl_tx_tnl_lso); 5149 else 5150 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 5151 } else if (!(mbuf_cflags(m0) & MC_NOMAP) && pktlen <= imm_payload(2) && 5152 available >= 2) { 5153 /* Immediate data. Recalculate len16 and set nsegs to 0. */ 5154 ctrl += pktlen; 5155 len16 = howmany(sizeof(struct fw_eth_tx_pkt_wr) + 5156 sizeof(struct cpl_tx_pkt_core) + pktlen, 16); 5157 nsegs = 0; 5158 } 5159 ndesc = tx_len16_to_desc(len16); 5160 MPASS(ndesc <= available); 5161 5162 /* Firmware work request header */ 5163 eq = &txq->eq; 5164 wr = (void *)&eq->desc[eq->pidx]; 5165 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_PKT_WR) | 5166 V_FW_ETH_TX_PKT_WR_IMMDLEN(ctrl)); 5167 5168 ctrl = V_FW_WR_LEN16(len16); 5169 wr->equiq_to_len16 = htobe32(ctrl); 5170 wr->r3 = 0; 5171 5172 if (needs_tso(m0)) { 5173 if (needs_vxlan_tso(m0)) { 5174 cpl = write_tnl_lso_cpl(wr + 1, m0); 5175 txq->vxlan_tso_wrs++; 5176 } else { 5177 cpl = write_lso_cpl(wr + 1, m0); 5178 txq->tso_wrs++; 5179 } 5180 } else 5181 cpl = (void *)(wr + 1); 5182 5183 /* Checksum offload */ 5184 ctrl1 = csum_to_ctrl(sc, m0); 5185 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5186 /* some hardware assistance provided */ 5187 if (needs_vxlan_csum(m0)) 5188 txq->vxlan_txcsum++; 5189 else 5190 txq->txcsum++; 5191 } 5192 5193 /* VLAN tag insertion */ 5194 if (needs_vlan_insertion(m0)) { 5195 ctrl1 |= F_TXPKT_VLAN_VLD | 5196 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 5197 txq->vlan_insertion++; 5198 } 5199 5200 /* CPL header */ 5201 cpl->ctrl0 = txq->cpl_ctrl0; 5202 cpl->pack = 0; 5203 cpl->len = htobe16(pktlen); 5204 cpl->ctrl1 = htobe64(ctrl1); 5205 5206 /* SGL */ 5207 dst = (void *)(cpl + 1); 5208 if (__predict_false((uintptr_t)dst == (uintptr_t)&eq->desc[eq->sidx])) 5209 dst = (caddr_t)&eq->desc[0]; 5210 if (nsegs > 0) { 5211 5212 write_gl_to_txd(txq, m0, &dst, eq->sidx - ndesc < eq->pidx); 5213 txq->sgl_wrs++; 5214 } else { 5215 struct mbuf *m; 5216 5217 for (m = m0; m != NULL; m = m->m_next) { 5218 copy_to_txd(eq, mtod(m, caddr_t), &dst, m->m_len); 5219#ifdef INVARIANTS 5220 pktlen -= m->m_len; 5221#endif 5222 } 5223#ifdef INVARIANTS 5224 KASSERT(pktlen == 0, ("%s: %d bytes left.", __func__, pktlen)); 5225#endif 5226 txq->imm_wrs++; 5227 } 5228 5229 txq->txpkt_wrs++; 5230 5231 txsd = &txq->sdesc[eq->pidx]; 5232 txsd->m = m0; 5233 txsd->desc_used = ndesc; 5234 5235 return (ndesc); 5236} 5237 5238static inline bool 5239cmp_l2hdr(struct txpkts *txp, struct mbuf *m) 5240{ 5241 int len; 5242 5243 MPASS(txp->npkt > 0); 5244 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 5245 5246 if (txp->ethtype == be16toh(ETHERTYPE_VLAN)) 5247 len = VM_TX_L2HDR_LEN; 5248 else 5249 len = sizeof(struct ether_header); 5250 5251 return (memcmp(m->m_data, &txp->ethmacdst[0], len) != 0); 5252} 5253 5254static inline void 5255save_l2hdr(struct txpkts *txp, struct mbuf *m) 5256{ 5257 MPASS(m->m_len >= VM_TX_L2HDR_LEN); 5258 5259 memcpy(&txp->ethmacdst[0], mtod(m, const void *), VM_TX_L2HDR_LEN); 5260} 5261 5262static int 5263add_to_txpkts_vf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5264 int avail, bool *send) 5265{ 5266 struct txpkts *txp = &txq->txp; 5267 5268 /* Cannot have TSO and coalesce at the same time. */ 5269 if (cannot_use_txpkts(m)) { 5270cannot_coalesce: 5271 *send = txp->npkt > 0; 5272 return (EINVAL); 5273 } 5274 5275 /* VF allows coalescing of type 1 (1 GL) only */ 5276 if (mbuf_nsegs(m) > 1) 5277 goto cannot_coalesce; 5278 5279 *send = false; 5280 if (txp->npkt > 0) { 5281 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5282 MPASS(txp->npkt < txp->max_npkt); 5283 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5284 5285 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > avail) { 5286retry_after_send: 5287 *send = true; 5288 return (EAGAIN); 5289 } 5290 if (m->m_pkthdr.len + txp->plen > 65535) 5291 goto retry_after_send; 5292 if (cmp_l2hdr(txp, m)) 5293 goto retry_after_send; 5294 5295 txp->len16 += txpkts1_len16(); 5296 txp->plen += m->m_pkthdr.len; 5297 txp->mb[txp->npkt++] = m; 5298 if (txp->npkt == txp->max_npkt) 5299 *send = true; 5300 } else { 5301 txp->len16 = howmany(sizeof(struct fw_eth_tx_pkts_vm_wr), 16) + 5302 txpkts1_len16(); 5303 if (tx_len16_to_desc(txp->len16) > avail) 5304 goto cannot_coalesce; 5305 txp->npkt = 1; 5306 txp->wr_type = 1; 5307 txp->plen = m->m_pkthdr.len; 5308 txp->mb[0] = m; 5309 save_l2hdr(txp, m); 5310 } 5311 return (0); 5312} 5313 5314static int 5315add_to_txpkts_pf(struct adapter *sc, struct sge_txq *txq, struct mbuf *m, 5316 int avail, bool *send) 5317{ 5318 struct txpkts *txp = &txq->txp; 5319 int nsegs; 5320 5321 MPASS(!(sc->flags & IS_VF)); 5322 5323 /* Cannot have TSO and coalesce at the same time. */ 5324 if (cannot_use_txpkts(m)) { 5325cannot_coalesce: 5326 *send = txp->npkt > 0; 5327 return (EINVAL); 5328 } 5329 5330 *send = false; 5331 nsegs = mbuf_nsegs(m); 5332 if (txp->npkt == 0) { 5333 if (m->m_pkthdr.len > 65535) 5334 goto cannot_coalesce; 5335 if (nsegs > 1) { 5336 txp->wr_type = 0; 5337 txp->len16 = 5338 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5339 txpkts0_len16(nsegs); 5340 } else { 5341 txp->wr_type = 1; 5342 txp->len16 = 5343 howmany(sizeof(struct fw_eth_tx_pkts_wr), 16) + 5344 txpkts1_len16(); 5345 } 5346 if (tx_len16_to_desc(txp->len16) > avail) 5347 goto cannot_coalesce; 5348 txp->npkt = 1; 5349 txp->plen = m->m_pkthdr.len; 5350 txp->mb[0] = m; 5351 } else { 5352 MPASS(tx_len16_to_desc(txp->len16) <= avail); 5353 MPASS(txp->npkt < txp->max_npkt); 5354 5355 if (m->m_pkthdr.len + txp->plen > 65535) { 5356retry_after_send: 5357 *send = true; 5358 return (EAGAIN); 5359 } 5360 5361 MPASS(txp->wr_type == 0 || txp->wr_type == 1); 5362 if (txp->wr_type == 0) { 5363 if (tx_len16_to_desc(txp->len16 + 5364 txpkts0_len16(nsegs)) > min(avail, SGE_MAX_WR_NDESC)) 5365 goto retry_after_send; 5366 txp->len16 += txpkts0_len16(nsegs); 5367 } else { 5368 if (nsegs != 1) 5369 goto retry_after_send; 5370 if (tx_len16_to_desc(txp->len16 + txpkts1_len16()) > 5371 avail) 5372 goto retry_after_send; 5373 txp->len16 += txpkts1_len16(); 5374 } 5375 5376 txp->plen += m->m_pkthdr.len; 5377 txp->mb[txp->npkt++] = m; 5378 if (txp->npkt == txp->max_npkt) 5379 *send = true; 5380 } 5381 return (0); 5382} 5383 5384/* 5385 * Write a txpkts WR for the packets in txp to the hardware descriptors, update 5386 * the software descriptor, and advance the pidx. It is guaranteed that enough 5387 * descriptors are available. 5388 * 5389 * The return value is the # of hardware descriptors used. 5390 */ 5391static u_int 5392write_txpkts_wr(struct adapter *sc, struct sge_txq *txq) 5393{ 5394 const struct txpkts *txp = &txq->txp; 5395 struct sge_eq *eq = &txq->eq; 5396 struct fw_eth_tx_pkts_wr *wr; 5397 struct tx_sdesc *txsd; 5398 struct cpl_tx_pkt_core *cpl; 5399 uint64_t ctrl1; 5400 int ndesc, i, checkwrap; 5401 struct mbuf *m, *last; 5402 void *flitp; 5403 5404 TXQ_LOCK_ASSERT_OWNED(txq); 5405 MPASS(txp->npkt > 0); 5406 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5407 5408 wr = (void *)&eq->desc[eq->pidx]; 5409 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_WR)); 5410 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5411 wr->plen = htobe16(txp->plen); 5412 wr->npkt = txp->npkt; 5413 wr->r3 = 0; 5414 wr->type = txp->wr_type; 5415 flitp = wr + 1; 5416 5417 /* 5418 * At this point we are 16B into a hardware descriptor. If checkwrap is 5419 * set then we know the WR is going to wrap around somewhere. We'll 5420 * check for that at appropriate points. 5421 */ 5422 ndesc = tx_len16_to_desc(txp->len16); 5423 last = NULL; 5424 checkwrap = eq->sidx - ndesc < eq->pidx; 5425 for (i = 0; i < txp->npkt; i++) { 5426 m = txp->mb[i]; 5427 if (txp->wr_type == 0) { 5428 struct ulp_txpkt *ulpmc; 5429 struct ulptx_idata *ulpsc; 5430 5431 /* ULP master command */ 5432 ulpmc = flitp; 5433 ulpmc->cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | 5434 V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(eq->iqid)); 5435 ulpmc->len = htobe32(txpkts0_len16(mbuf_nsegs(m))); 5436 5437 /* ULP subcommand */ 5438 ulpsc = (void *)(ulpmc + 1); 5439 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | 5440 F_ULP_TX_SC_MORE); 5441 ulpsc->len = htobe32(sizeof(struct cpl_tx_pkt_core)); 5442 5443 cpl = (void *)(ulpsc + 1); 5444 if (checkwrap && 5445 (uintptr_t)cpl == (uintptr_t)&eq->desc[eq->sidx]) 5446 cpl = (void *)&eq->desc[0]; 5447 } else { 5448 cpl = flitp; 5449 } 5450 5451 /* Checksum offload */ 5452 ctrl1 = csum_to_ctrl(sc, m); 5453 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) { 5454 /* some hardware assistance provided */ 5455 if (needs_vxlan_csum(m)) 5456 txq->vxlan_txcsum++; 5457 else 5458 txq->txcsum++; 5459 } 5460 5461 /* VLAN tag insertion */ 5462 if (needs_vlan_insertion(m)) { 5463 ctrl1 |= F_TXPKT_VLAN_VLD | 5464 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5465 txq->vlan_insertion++; 5466 } 5467 5468 /* CPL header */ 5469 cpl->ctrl0 = txq->cpl_ctrl0; 5470 cpl->pack = 0; 5471 cpl->len = htobe16(m->m_pkthdr.len); 5472 cpl->ctrl1 = htobe64(ctrl1); 5473 5474 flitp = cpl + 1; 5475 if (checkwrap && 5476 (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5477 flitp = (void *)&eq->desc[0]; 5478 5479 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), checkwrap); 5480 5481 if (last != NULL) 5482 last->m_nextpkt = m; 5483 last = m; 5484 } 5485 5486 txq->sgl_wrs++; 5487 if (txp->wr_type == 0) { 5488 txq->txpkts0_pkts += txp->npkt; 5489 txq->txpkts0_wrs++; 5490 } else { 5491 txq->txpkts1_pkts += txp->npkt; 5492 txq->txpkts1_wrs++; 5493 } 5494 5495 txsd = &txq->sdesc[eq->pidx]; 5496 txsd->m = txp->mb[0]; 5497 txsd->desc_used = ndesc; 5498 5499 return (ndesc); 5500} 5501 5502static u_int 5503write_txpkts_vm_wr(struct adapter *sc, struct sge_txq *txq) 5504{ 5505 const struct txpkts *txp = &txq->txp; 5506 struct sge_eq *eq = &txq->eq; 5507 struct fw_eth_tx_pkts_vm_wr *wr; 5508 struct tx_sdesc *txsd; 5509 struct cpl_tx_pkt_core *cpl; 5510 uint64_t ctrl1; 5511 int ndesc, i; 5512 struct mbuf *m, *last; 5513 void *flitp; 5514 5515 TXQ_LOCK_ASSERT_OWNED(txq); 5516 MPASS(txp->npkt > 0); 5517 MPASS(txp->wr_type == 1); /* VF supports type 1 only */ 5518 MPASS(txp->mb[0] != NULL); 5519 MPASS(txp->len16 <= howmany(SGE_MAX_WR_LEN, 16)); 5520 5521 wr = (void *)&eq->desc[eq->pidx]; 5522 wr->op_pkd = htobe32(V_FW_WR_OP(FW_ETH_TX_PKTS_VM_WR)); 5523 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(txp->len16)); 5524 wr->r3 = 0; 5525 wr->plen = htobe16(txp->plen); 5526 wr->npkt = txp->npkt; 5527 wr->r4 = 0; 5528 memcpy(&wr->ethmacdst[0], &txp->ethmacdst[0], 16); 5529 flitp = wr + 1; 5530 5531 /* 5532 * At this point we are 32B into a hardware descriptor. Each mbuf in 5533 * the WR will take 32B so we check for the end of the descriptor ring 5534 * before writing odd mbufs (mb[1], 3, 5, ..) 5535 */ 5536 ndesc = tx_len16_to_desc(txp->len16); 5537 last = NULL; 5538 for (i = 0; i < txp->npkt; i++) { 5539 m = txp->mb[i]; 5540 if (i & 1 && (uintptr_t)flitp == (uintptr_t)&eq->desc[eq->sidx]) 5541 flitp = &eq->desc[0]; 5542 cpl = flitp; 5543 5544 /* Checksum offload */ 5545 ctrl1 = csum_to_ctrl(sc, m); 5546 if (ctrl1 != (F_TXPKT_IPCSUM_DIS | F_TXPKT_L4CSUM_DIS)) 5547 txq->txcsum++; /* some hardware assistance provided */ 5548 5549 /* VLAN tag insertion */ 5550 if (needs_vlan_insertion(m)) { 5551 ctrl1 |= F_TXPKT_VLAN_VLD | 5552 V_TXPKT_VLAN(m->m_pkthdr.ether_vtag); 5553 txq->vlan_insertion++; 5554 } 5555 5556 /* CPL header */ 5557 cpl->ctrl0 = txq->cpl_ctrl0; 5558 cpl->pack = 0; 5559 cpl->len = htobe16(m->m_pkthdr.len); 5560 cpl->ctrl1 = htobe64(ctrl1); 5561 5562 flitp = cpl + 1; 5563 MPASS(mbuf_nsegs(m) == 1); 5564 write_gl_to_txd(txq, m, (caddr_t *)(&flitp), 0); 5565 5566 if (last != NULL) 5567 last->m_nextpkt = m; 5568 last = m; 5569 } 5570 5571 txq->sgl_wrs++; 5572 txq->txpkts1_pkts += txp->npkt; 5573 txq->txpkts1_wrs++; 5574 5575 txsd = &txq->sdesc[eq->pidx]; 5576 txsd->m = txp->mb[0]; 5577 txsd->desc_used = ndesc; 5578 5579 return (ndesc); 5580} 5581 5582/* 5583 * If the SGL ends on an address that is not 16 byte aligned, this function will 5584 * add a 0 filled flit at the end. 5585 */ 5586static void 5587write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap) 5588{ 5589 struct sge_eq *eq = &txq->eq; 5590 struct sglist *gl = txq->gl; 5591 struct sglist_seg *seg; 5592 __be64 *flitp, *wrap; 5593 struct ulptx_sgl *usgl; 5594 int i, nflits, nsegs; 5595 5596 KASSERT(((uintptr_t)(*to) & 0xf) == 0, 5597 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to)); 5598 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5599 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5600 5601 get_pkt_gl(m, gl); 5602 nsegs = gl->sg_nseg; 5603 MPASS(nsegs > 0); 5604 5605 nflits = (3 * (nsegs - 1)) / 2 + ((nsegs - 1) & 1) + 2; 5606 flitp = (__be64 *)(*to); 5607 wrap = (__be64 *)(&eq->desc[eq->sidx]); 5608 seg = &gl->sg_segs[0]; 5609 usgl = (void *)flitp; 5610 5611 /* 5612 * We start at a 16 byte boundary somewhere inside the tx descriptor 5613 * ring, so we're at least 16 bytes away from the status page. There is 5614 * no chance of a wrap around in the middle of usgl (which is 16 bytes). 5615 */ 5616 5617 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 5618 V_ULPTX_NSGE(nsegs)); 5619 usgl->len0 = htobe32(seg->ss_len); 5620 usgl->addr0 = htobe64(seg->ss_paddr); 5621 seg++; 5622 5623 if (checkwrap == 0 || (uintptr_t)(flitp + nflits) <= (uintptr_t)wrap) { 5624 5625 /* Won't wrap around at all */ 5626 5627 for (i = 0; i < nsegs - 1; i++, seg++) { 5628 usgl->sge[i / 2].len[i & 1] = htobe32(seg->ss_len); 5629 usgl->sge[i / 2].addr[i & 1] = htobe64(seg->ss_paddr); 5630 } 5631 if (i & 1) 5632 usgl->sge[i / 2].len[1] = htobe32(0); 5633 flitp += nflits; 5634 } else { 5635 5636 /* Will wrap somewhere in the rest of the SGL */ 5637 5638 /* 2 flits already written, write the rest flit by flit */ 5639 flitp = (void *)(usgl + 1); 5640 for (i = 0; i < nflits - 2; i++) { 5641 if (flitp == wrap) 5642 flitp = (void *)eq->desc; 5643 *flitp++ = get_flit(seg, nsegs - 1, i); 5644 } 5645 } 5646 5647 if (nflits & 1) { 5648 MPASS(((uintptr_t)flitp) & 0xf); 5649 *flitp++ = 0; 5650 } 5651 5652 MPASS((((uintptr_t)flitp) & 0xf) == 0); 5653 if (__predict_false(flitp == wrap)) 5654 *to = (void *)eq->desc; 5655 else 5656 *to = (void *)flitp; 5657} 5658 5659static inline void 5660copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len) 5661{ 5662 5663 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]); 5664 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]); 5665 5666 if (__predict_true((uintptr_t)(*to) + len <= 5667 (uintptr_t)&eq->desc[eq->sidx])) { 5668 bcopy(from, *to, len); 5669 (*to) += len; 5670 } else { 5671 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to); 5672 5673 bcopy(from, *to, portion); 5674 from += portion; 5675 portion = len - portion; /* remaining */ 5676 bcopy(from, (void *)eq->desc, portion); 5677 (*to) = (caddr_t)eq->desc + portion; 5678 } 5679} 5680 5681static inline void 5682ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n) 5683{ 5684 u_int db; 5685 5686 MPASS(n > 0); 5687 5688 db = eq->doorbells; 5689 if (n > 1) 5690 clrbit(&db, DOORBELL_WCWR); 5691 wmb(); 5692 5693 switch (ffs(db) - 1) { 5694 case DOORBELL_UDB: 5695 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5696 break; 5697 5698 case DOORBELL_WCWR: { 5699 volatile uint64_t *dst, *src; 5700 int i; 5701 5702 /* 5703 * Queues whose 128B doorbell segment fits in the page do not 5704 * use relative qid (udb_qid is always 0). Only queues with 5705 * doorbell segments can do WCWR. 5706 */ 5707 KASSERT(eq->udb_qid == 0 && n == 1, 5708 ("%s: inappropriate doorbell (0x%x, %d, %d) for eq %p", 5709 __func__, eq->doorbells, n, eq->dbidx, eq)); 5710 5711 dst = (volatile void *)((uintptr_t)eq->udb + UDBS_WR_OFFSET - 5712 UDBS_DB_OFFSET); 5713 i = eq->dbidx; 5714 src = (void *)&eq->desc[i]; 5715 while (src != (void *)&eq->desc[i + 1]) 5716 *dst++ = *src++; 5717 wmb(); 5718 break; 5719 } 5720 5721 case DOORBELL_UDBWC: 5722 *eq->udb = htole32(V_QID(eq->udb_qid) | V_PIDX(n)); 5723 wmb(); 5724 break; 5725 5726 case DOORBELL_KDB: 5727 t4_write_reg(sc, sc->sge_kdoorbell_reg, 5728 V_QID(eq->cntxt_id) | V_PIDX(n)); 5729 break; 5730 } 5731 5732 IDXINCR(eq->dbidx, n, eq->sidx); 5733} 5734 5735static inline u_int 5736reclaimable_tx_desc(struct sge_eq *eq) 5737{ 5738 uint16_t hw_cidx; 5739 5740 hw_cidx = read_hw_cidx(eq); 5741 return (IDXDIFF(hw_cidx, eq->cidx, eq->sidx)); 5742} 5743 5744static inline u_int 5745total_available_tx_desc(struct sge_eq *eq) 5746{ 5747 uint16_t hw_cidx, pidx; 5748 5749 hw_cidx = read_hw_cidx(eq); 5750 pidx = eq->pidx; 5751 5752 if (pidx == hw_cidx) 5753 return (eq->sidx - 1); 5754 else 5755 return (IDXDIFF(hw_cidx, pidx, eq->sidx) - 1); 5756} 5757 5758static inline uint16_t 5759read_hw_cidx(struct sge_eq *eq) 5760{ 5761 struct sge_qstat *spg = (void *)&eq->desc[eq->sidx]; 5762 uint16_t cidx = spg->cidx; /* stable snapshot */ 5763 5764 return (be16toh(cidx)); 5765} 5766 5767/* 5768 * Reclaim 'n' descriptors approximately. 5769 */ 5770static u_int 5771reclaim_tx_descs(struct sge_txq *txq, u_int n) 5772{ 5773 struct tx_sdesc *txsd; 5774 struct sge_eq *eq = &txq->eq; 5775 u_int can_reclaim, reclaimed; 5776 5777 TXQ_LOCK_ASSERT_OWNED(txq); 5778 MPASS(n > 0); 5779 5780 reclaimed = 0; 5781 can_reclaim = reclaimable_tx_desc(eq); 5782 while (can_reclaim && reclaimed < n) { 5783 int ndesc; 5784 struct mbuf *m, *nextpkt; 5785 5786 txsd = &txq->sdesc[eq->cidx]; 5787 ndesc = txsd->desc_used; 5788 5789 /* Firmware doesn't return "partial" credits. */ 5790 KASSERT(can_reclaim >= ndesc, 5791 ("%s: unexpected number of credits: %d, %d", 5792 __func__, can_reclaim, ndesc)); 5793 KASSERT(ndesc != 0, 5794 ("%s: descriptor with no credits: cidx %d", 5795 __func__, eq->cidx)); 5796 5797 for (m = txsd->m; m != NULL; m = nextpkt) { 5798 nextpkt = m->m_nextpkt; 5799 m->m_nextpkt = NULL; 5800 m_freem(m); 5801 } 5802 reclaimed += ndesc; 5803 can_reclaim -= ndesc; 5804 IDXINCR(eq->cidx, ndesc, eq->sidx); 5805 } 5806 5807 return (reclaimed); 5808} 5809 5810static void 5811tx_reclaim(void *arg, int n) 5812{ 5813 struct sge_txq *txq = arg; 5814 struct sge_eq *eq = &txq->eq; 5815 5816 do { 5817 if (TXQ_TRYLOCK(txq) == 0) 5818 break; 5819 n = reclaim_tx_descs(txq, 32); 5820 if (eq->cidx == eq->pidx) 5821 eq->equeqidx = eq->pidx; 5822 TXQ_UNLOCK(txq); 5823 } while (n > 0); 5824} 5825 5826static __be64 5827get_flit(struct sglist_seg *segs, int nsegs, int idx) 5828{ 5829 int i = (idx / 3) * 2; 5830 5831 switch (idx % 3) { 5832 case 0: { 5833 uint64_t rc; 5834 5835 rc = (uint64_t)segs[i].ss_len << 32; 5836 if (i + 1 < nsegs) 5837 rc |= (uint64_t)(segs[i + 1].ss_len); 5838 5839 return (htobe64(rc)); 5840 } 5841 case 1: 5842 return (htobe64(segs[i].ss_paddr)); 5843 case 2: 5844 return (htobe64(segs[i + 1].ss_paddr)); 5845 } 5846 5847 return (0); 5848} 5849 5850static int 5851find_refill_source(struct adapter *sc, int maxp, bool packing) 5852{ 5853 int i, zidx = -1; 5854 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 5855 5856 if (packing) { 5857 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 5858 if (rxb->hwidx2 == -1) 5859 continue; 5860 if (rxb->size1 < PAGE_SIZE && 5861 rxb->size1 < largest_rx_cluster) 5862 continue; 5863 if (rxb->size1 > largest_rx_cluster) 5864 break; 5865 MPASS(rxb->size1 - rxb->size2 >= CL_METADATA_SIZE); 5866 if (rxb->size2 >= maxp) 5867 return (i); 5868 zidx = i; 5869 } 5870 } else { 5871 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 5872 if (rxb->hwidx1 == -1) 5873 continue; 5874 if (rxb->size1 > largest_rx_cluster) 5875 break; 5876 if (rxb->size1 >= maxp) 5877 return (i); 5878 zidx = i; 5879 } 5880 } 5881 5882 return (zidx); 5883} 5884 5885static void 5886add_fl_to_sfl(struct adapter *sc, struct sge_fl *fl) 5887{ 5888 mtx_lock(&sc->sfl_lock); 5889 FL_LOCK(fl); 5890 if ((fl->flags & FL_DOOMED) == 0) { 5891 fl->flags |= FL_STARVING; 5892 TAILQ_INSERT_TAIL(&sc->sfl, fl, link); 5893 callout_reset(&sc->sfl_callout, hz / 5, refill_sfl, sc); 5894 } 5895 FL_UNLOCK(fl); 5896 mtx_unlock(&sc->sfl_lock); 5897} 5898 5899static void 5900handle_wrq_egr_update(struct adapter *sc, struct sge_eq *eq) 5901{ 5902 struct sge_wrq *wrq = (void *)eq; 5903 5904 atomic_readandclear_int(&eq->equiq); 5905 taskqueue_enqueue(sc->tq[eq->tx_chan], &wrq->wrq_tx_task); 5906} 5907 5908static void 5909handle_eth_egr_update(struct adapter *sc, struct sge_eq *eq) 5910{ 5911 struct sge_txq *txq = (void *)eq; 5912 5913 MPASS((eq->flags & EQ_TYPEMASK) == EQ_ETH); 5914 5915 atomic_readandclear_int(&eq->equiq); 5916 if (mp_ring_is_idle(txq->r)) 5917 taskqueue_enqueue(sc->tq[eq->tx_chan], &txq->tx_reclaim_task); 5918 else 5919 mp_ring_check_drainage(txq->r, 64); 5920} 5921 5922static int 5923handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, 5924 struct mbuf *m) 5925{ 5926 const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); 5927 unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); 5928 struct adapter *sc = iq->adapter; 5929 struct sge *s = &sc->sge; 5930 struct sge_eq *eq; 5931 static void (*h[])(struct adapter *, struct sge_eq *) = {NULL, 5932 &handle_wrq_egr_update, &handle_eth_egr_update, 5933 &handle_wrq_egr_update}; 5934 5935 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5936 rss->opcode)); 5937 5938 eq = s->eqmap[qid - s->eq_start - s->eq_base]; 5939 (*h[eq->flags & EQ_TYPEMASK])(sc, eq); 5940 5941 return (0); 5942} 5943 5944/* handle_fw_msg works for both fw4_msg and fw6_msg because this is valid */ 5945CTASSERT(offsetof(struct cpl_fw4_msg, data) == \ 5946 offsetof(struct cpl_fw6_msg, data)); 5947 5948static int 5949handle_fw_msg(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 5950{ 5951 struct adapter *sc = iq->adapter; 5952 const struct cpl_fw6_msg *cpl = (const void *)(rss + 1); 5953 5954 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__, 5955 rss->opcode)); 5956 5957 if (cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL) { 5958 const struct rss_header *rss2; 5959 5960 rss2 = (const struct rss_header *)&cpl->data[0]; 5961 return (t4_cpl_handler[rss2->opcode](iq, rss2, m)); 5962 } 5963 5964 return (t4_fw_msg_handler[cpl->type](sc, &cpl->data[0])); 5965} 5966 5967/** 5968 * t4_handle_wrerr_rpl - process a FW work request error message 5969 * @adap: the adapter 5970 * @rpl: start of the FW message 5971 */ 5972static int 5973t4_handle_wrerr_rpl(struct adapter *adap, const __be64 *rpl) 5974{ 5975 u8 opcode = *(const u8 *)rpl; 5976 const struct fw_error_cmd *e = (const void *)rpl; 5977 unsigned int i; 5978 5979 if (opcode != FW_ERROR_CMD) { 5980 log(LOG_ERR, 5981 "%s: Received WRERR_RPL message with opcode %#x\n", 5982 device_get_nameunit(adap->dev), opcode); 5983 return (EINVAL); 5984 } 5985 log(LOG_ERR, "%s: FW_ERROR (%s) ", device_get_nameunit(adap->dev), 5986 G_FW_ERROR_CMD_FATAL(be32toh(e->op_to_type)) ? "fatal" : 5987 "non-fatal"); 5988 switch (G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))) { 5989 case FW_ERROR_TYPE_EXCEPTION: 5990 log(LOG_ERR, "exception info:\n"); 5991 for (i = 0; i < nitems(e->u.exception.info); i++) 5992 log(LOG_ERR, "%s%08x", i == 0 ? "\t" : " ", 5993 be32toh(e->u.exception.info[i])); 5994 log(LOG_ERR, "\n"); 5995 break; 5996 case FW_ERROR_TYPE_HWMODULE: 5997 log(LOG_ERR, "HW module regaddr %08x regval %08x\n", 5998 be32toh(e->u.hwmodule.regaddr), 5999 be32toh(e->u.hwmodule.regval)); 6000 break; 6001 case FW_ERROR_TYPE_WR: 6002 log(LOG_ERR, "WR cidx %d PF %d VF %d eqid %d hdr:\n", 6003 be16toh(e->u.wr.cidx), 6004 G_FW_ERROR_CMD_PFN(be16toh(e->u.wr.pfn_vfn)), 6005 G_FW_ERROR_CMD_VFN(be16toh(e->u.wr.pfn_vfn)), 6006 be32toh(e->u.wr.eqid)); 6007 for (i = 0; i < nitems(e->u.wr.wrhdr); i++) 6008 log(LOG_ERR, "%s%02x", i == 0 ? "\t" : " ", 6009 e->u.wr.wrhdr[i]); 6010 log(LOG_ERR, "\n"); 6011 break; 6012 case FW_ERROR_TYPE_ACL: 6013 log(LOG_ERR, "ACL cidx %d PF %d VF %d eqid %d %s", 6014 be16toh(e->u.acl.cidx), 6015 G_FW_ERROR_CMD_PFN(be16toh(e->u.acl.pfn_vfn)), 6016 G_FW_ERROR_CMD_VFN(be16toh(e->u.acl.pfn_vfn)), 6017 be32toh(e->u.acl.eqid), 6018 G_FW_ERROR_CMD_MV(be16toh(e->u.acl.mv_pkd)) ? "vlanid" : 6019 "MAC"); 6020 for (i = 0; i < nitems(e->u.acl.val); i++) 6021 log(LOG_ERR, " %02x", e->u.acl.val[i]); 6022 log(LOG_ERR, "\n"); 6023 break; 6024 default: 6025 log(LOG_ERR, "type %#x\n", 6026 G_FW_ERROR_CMD_TYPE(be32toh(e->op_to_type))); 6027 return (EINVAL); 6028 } 6029 return (0); 6030} 6031 6032static int 6033sysctl_uint16(SYSCTL_HANDLER_ARGS) 6034{ 6035 uint16_t *id = arg1; 6036 int i = *id; 6037 6038 return sysctl_handle_int(oidp, &i, 0, req); 6039} 6040 6041static inline bool 6042bufidx_used(struct adapter *sc, int idx) 6043{ 6044 struct rx_buf_info *rxb = &sc->sge.rx_buf_info[0]; 6045 int i; 6046 6047 for (i = 0; i < SW_ZONE_SIZES; i++, rxb++) { 6048 if (rxb->size1 > largest_rx_cluster) 6049 continue; 6050 if (rxb->hwidx1 == idx || rxb->hwidx2 == idx) 6051 return (true); 6052 } 6053 6054 return (false); 6055} 6056 6057static int 6058sysctl_bufsizes(SYSCTL_HANDLER_ARGS) 6059{ 6060 struct adapter *sc = arg1; 6061 struct sge_params *sp = &sc->params.sge; 6062 int i, rc; 6063 struct sbuf sb; 6064 char c; 6065 6066 sbuf_new(&sb, NULL, 128, SBUF_AUTOEXTEND); 6067 for (i = 0; i < SGE_FLBUF_SIZES; i++) { 6068 if (bufidx_used(sc, i)) 6069 c = '*'; 6070 else 6071 c = '\0'; 6072 6073 sbuf_printf(&sb, "%u%c ", sp->sge_fl_buffer_size[i], c); 6074 } 6075 sbuf_trim(&sb); 6076 sbuf_finish(&sb); 6077 rc = sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 6078 sbuf_delete(&sb); 6079 return (rc); 6080} 6081 6082#ifdef RATELIMIT 6083/* 6084 * len16 for a txpkt WR with a GL. Includes the firmware work request header. 6085 */ 6086static inline u_int 6087txpkt_eo_len16(u_int nsegs, u_int immhdrs, u_int tso) 6088{ 6089 u_int n; 6090 6091 MPASS(immhdrs > 0); 6092 6093 n = roundup2(sizeof(struct fw_eth_tx_eo_wr) + 6094 sizeof(struct cpl_tx_pkt_core) + immhdrs, 16); 6095 if (__predict_false(nsegs == 0)) 6096 goto done; 6097 6098 nsegs--; /* first segment is part of ulptx_sgl */ 6099 n += sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); 6100 if (tso) 6101 n += sizeof(struct cpl_tx_pkt_lso_core); 6102 6103done: 6104 return (howmany(n, 16)); 6105} 6106 6107#define ETID_FLOWC_NPARAMS 6 6108#define ETID_FLOWC_LEN (roundup2((sizeof(struct fw_flowc_wr) + \ 6109 ETID_FLOWC_NPARAMS * sizeof(struct fw_flowc_mnemval)), 16)) 6110#define ETID_FLOWC_LEN16 (howmany(ETID_FLOWC_LEN, 16)) 6111 6112static int 6113send_etid_flowc_wr(struct cxgbe_snd_tag *cst, struct port_info *pi, 6114 struct vi_info *vi) 6115{ 6116 struct wrq_cookie cookie; 6117 u_int pfvf = pi->adapter->pf << S_FW_VIID_PFN; 6118 struct fw_flowc_wr *flowc; 6119 6120 mtx_assert(&cst->lock, MA_OWNED); 6121 MPASS((cst->flags & (EO_FLOWC_PENDING | EO_FLOWC_RPL_PENDING)) == 6122 EO_FLOWC_PENDING); 6123 6124 flowc = start_wrq_wr(cst->eo_txq, ETID_FLOWC_LEN16, &cookie); 6125 if (__predict_false(flowc == NULL)) 6126 return (ENOMEM); 6127 6128 bzero(flowc, ETID_FLOWC_LEN); 6129 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6130 V_FW_FLOWC_WR_NPARAMS(ETID_FLOWC_NPARAMS) | V_FW_WR_COMPL(0)); 6131 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(ETID_FLOWC_LEN16) | 6132 V_FW_WR_FLOWID(cst->etid)); 6133 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 6134 flowc->mnemval[0].val = htobe32(pfvf); 6135 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 6136 flowc->mnemval[1].val = htobe32(pi->tx_chan); 6137 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 6138 flowc->mnemval[2].val = htobe32(pi->tx_chan); 6139 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 6140 flowc->mnemval[3].val = htobe32(cst->iqid); 6141 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_EOSTATE; 6142 flowc->mnemval[4].val = htobe32(FW_FLOWC_MNEM_EOSTATE_ESTABLISHED); 6143 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 6144 flowc->mnemval[5].val = htobe32(cst->schedcl); 6145 6146 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 6147 6148 cst->flags &= ~EO_FLOWC_PENDING; 6149 cst->flags |= EO_FLOWC_RPL_PENDING; 6150 MPASS(cst->tx_credits >= ETID_FLOWC_LEN16); /* flowc is first WR. */ 6151 cst->tx_credits -= ETID_FLOWC_LEN16; 6152 6153 return (0); 6154} 6155 6156#define ETID_FLUSH_LEN16 (howmany(sizeof (struct fw_flowc_wr), 16)) 6157 6158void 6159send_etid_flush_wr(struct cxgbe_snd_tag *cst) 6160{ 6161 struct fw_flowc_wr *flowc; 6162 struct wrq_cookie cookie; 6163 6164 mtx_assert(&cst->lock, MA_OWNED); 6165 6166 flowc = start_wrq_wr(cst->eo_txq, ETID_FLUSH_LEN16, &cookie); 6167 if (__predict_false(flowc == NULL)) 6168 CXGBE_UNIMPLEMENTED(__func__); 6169 6170 bzero(flowc, ETID_FLUSH_LEN16 * 16); 6171 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 6172 V_FW_FLOWC_WR_NPARAMS(0) | F_FW_WR_COMPL); 6173 flowc->flowid_len16 = htobe32(V_FW_WR_LEN16(ETID_FLUSH_LEN16) | 6174 V_FW_WR_FLOWID(cst->etid)); 6175 6176 commit_wrq_wr(cst->eo_txq, flowc, &cookie); 6177 6178 cst->flags |= EO_FLUSH_RPL_PENDING; 6179 MPASS(cst->tx_credits >= ETID_FLUSH_LEN16); 6180 cst->tx_credits -= ETID_FLUSH_LEN16; 6181 cst->ncompl++; 6182} 6183 6184static void 6185write_ethofld_wr(struct cxgbe_snd_tag *cst, struct fw_eth_tx_eo_wr *wr, 6186 struct mbuf *m0, int compl) 6187{ 6188 struct cpl_tx_pkt_core *cpl; 6189 uint64_t ctrl1; 6190 uint32_t ctrl; /* used in many unrelated places */ 6191 int len16, pktlen, nsegs, immhdrs; 6192 caddr_t dst; 6193 uintptr_t p; 6194 struct ulptx_sgl *usgl; 6195 struct sglist sg; 6196 struct sglist_seg segs[38]; /* XXX: find real limit. XXX: get off the stack */ 6197 6198 mtx_assert(&cst->lock, MA_OWNED); 6199 M_ASSERTPKTHDR(m0); 6200 KASSERT(m0->m_pkthdr.l2hlen > 0 && m0->m_pkthdr.l3hlen > 0 && 6201 m0->m_pkthdr.l4hlen > 0, 6202 ("%s: ethofld mbuf %p is missing header lengths", __func__, m0)); 6203 6204 len16 = mbuf_eo_len16(m0); 6205 nsegs = mbuf_eo_nsegs(m0); 6206 pktlen = m0->m_pkthdr.len; 6207 ctrl = sizeof(struct cpl_tx_pkt_core); 6208 if (needs_tso(m0)) 6209 ctrl += sizeof(struct cpl_tx_pkt_lso_core); 6210 immhdrs = m0->m_pkthdr.l2hlen + m0->m_pkthdr.l3hlen + m0->m_pkthdr.l4hlen; 6211 ctrl += immhdrs; 6212 6213 wr->op_immdlen = htobe32(V_FW_WR_OP(FW_ETH_TX_EO_WR) | 6214 V_FW_ETH_TX_EO_WR_IMMDLEN(ctrl) | V_FW_WR_COMPL(!!compl)); 6215 wr->equiq_to_len16 = htobe32(V_FW_WR_LEN16(len16) | 6216 V_FW_WR_FLOWID(cst->etid)); 6217 wr->r3 = 0; 6218 if (needs_outer_udp_csum(m0)) { 6219 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; 6220 wr->u.udpseg.ethlen = m0->m_pkthdr.l2hlen; 6221 wr->u.udpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 6222 wr->u.udpseg.udplen = m0->m_pkthdr.l4hlen; 6223 wr->u.udpseg.rtplen = 0; 6224 wr->u.udpseg.r4 = 0; 6225 wr->u.udpseg.mss = htobe16(pktlen - immhdrs); 6226 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; 6227 wr->u.udpseg.plen = htobe32(pktlen - immhdrs); 6228 cpl = (void *)(wr + 1); 6229 } else { 6230 MPASS(needs_outer_tcp_csum(m0)); 6231 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; 6232 wr->u.tcpseg.ethlen = m0->m_pkthdr.l2hlen; 6233 wr->u.tcpseg.iplen = htobe16(m0->m_pkthdr.l3hlen); 6234 wr->u.tcpseg.tcplen = m0->m_pkthdr.l4hlen; 6235 wr->u.tcpseg.tsclk_tsoff = mbuf_eo_tsclk_tsoff(m0); 6236 wr->u.tcpseg.r4 = 0; 6237 wr->u.tcpseg.r5 = 0; 6238 wr->u.tcpseg.plen = htobe32(pktlen - immhdrs); 6239 6240 if (needs_tso(m0)) { 6241 struct cpl_tx_pkt_lso_core *lso = (void *)(wr + 1); 6242 6243 wr->u.tcpseg.mss = htobe16(m0->m_pkthdr.tso_segsz); 6244 6245 ctrl = V_LSO_OPCODE(CPL_TX_PKT_LSO) | 6246 F_LSO_FIRST_SLICE | F_LSO_LAST_SLICE | 6247 V_LSO_ETHHDR_LEN((m0->m_pkthdr.l2hlen - 6248 ETHER_HDR_LEN) >> 2) | 6249 V_LSO_IPHDR_LEN(m0->m_pkthdr.l3hlen >> 2) | 6250 V_LSO_TCPHDR_LEN(m0->m_pkthdr.l4hlen >> 2); 6251 if (m0->m_pkthdr.l3hlen == sizeof(struct ip6_hdr)) 6252 ctrl |= F_LSO_IPV6; 6253 lso->lso_ctrl = htobe32(ctrl); 6254 lso->ipid_ofst = htobe16(0); 6255 lso->mss = htobe16(m0->m_pkthdr.tso_segsz); 6256 lso->seqno_offset = htobe32(0); 6257 lso->len = htobe32(pktlen); 6258 6259 cpl = (void *)(lso + 1); 6260 } else { 6261 wr->u.tcpseg.mss = htobe16(0xffff); 6262 cpl = (void *)(wr + 1); 6263 } 6264 } 6265 6266 /* Checksum offload must be requested for ethofld. */ 6267 MPASS(needs_outer_l4_csum(m0)); 6268 ctrl1 = csum_to_ctrl(cst->adapter, m0); 6269 6270 /* VLAN tag insertion */ 6271 if (needs_vlan_insertion(m0)) { 6272 ctrl1 |= F_TXPKT_VLAN_VLD | 6273 V_TXPKT_VLAN(m0->m_pkthdr.ether_vtag); 6274 } 6275 6276 /* CPL header */ 6277 cpl->ctrl0 = cst->ctrl0; 6278 cpl->pack = 0; 6279 cpl->len = htobe16(pktlen); 6280 cpl->ctrl1 = htobe64(ctrl1); 6281 6282 /* Copy Ethernet, IP & TCP/UDP hdrs as immediate data */ 6283 p = (uintptr_t)(cpl + 1); 6284 m_copydata(m0, 0, immhdrs, (void *)p); 6285 6286 /* SGL */ 6287 dst = (void *)(cpl + 1); 6288 if (nsegs > 0) { 6289 int i, pad; 6290 6291 /* zero-pad upto next 16Byte boundary, if not 16Byte aligned */ 6292 p += immhdrs; 6293 pad = 16 - (immhdrs & 0xf); 6294 bzero((void *)p, pad); 6295 6296 usgl = (void *)(p + pad); 6297 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | 6298 V_ULPTX_NSGE(nsegs)); 6299 6300 sglist_init(&sg, nitems(segs), segs); 6301 for (; m0 != NULL; m0 = m0->m_next) { 6302 if (__predict_false(m0->m_len == 0)) 6303 continue; 6304 if (immhdrs >= m0->m_len) { 6305 immhdrs -= m0->m_len; 6306 continue; 6307 } 6308 6309 sglist_append(&sg, mtod(m0, char *) + immhdrs, 6310 m0->m_len - immhdrs); 6311 immhdrs = 0; 6312 } 6313 MPASS(sg.sg_nseg == nsegs); 6314 6315 /* 6316 * Zero pad last 8B in case the WR doesn't end on a 16B 6317 * boundary. 6318 */ 6319 *(uint64_t *)((char *)wr + len16 * 16 - 8) = 0; 6320 6321 usgl->len0 = htobe32(segs[0].ss_len); 6322 usgl->addr0 = htobe64(segs[0].ss_paddr); 6323 for (i = 0; i < nsegs - 1; i++) { 6324 usgl->sge[i / 2].len[i & 1] = htobe32(segs[i + 1].ss_len); 6325 usgl->sge[i / 2].addr[i & 1] = htobe64(segs[i + 1].ss_paddr); 6326 } 6327 if (i & 1) 6328 usgl->sge[i / 2].len[1] = htobe32(0); 6329 } 6330 6331} 6332 6333static void 6334ethofld_tx(struct cxgbe_snd_tag *cst) 6335{ 6336 struct mbuf *m; 6337 struct wrq_cookie cookie; 6338 int next_credits, compl; 6339 struct fw_eth_tx_eo_wr *wr; 6340 6341 mtx_assert(&cst->lock, MA_OWNED); 6342 6343 while ((m = mbufq_first(&cst->pending_tx)) != NULL) { 6344 M_ASSERTPKTHDR(m); 6345 6346 /* How many len16 credits do we need to send this mbuf. */ 6347 next_credits = mbuf_eo_len16(m); 6348 MPASS(next_credits > 0); 6349 if (next_credits > cst->tx_credits) { 6350 /* 6351 * Tx will make progress eventually because there is at 6352 * least one outstanding fw4_ack that will return 6353 * credits and kick the tx. 6354 */ 6355 MPASS(cst->ncompl > 0); 6356 return; 6357 } 6358 wr = start_wrq_wr(cst->eo_txq, next_credits, &cookie); 6359 if (__predict_false(wr == NULL)) { 6360 /* XXX: wishful thinking, not a real assertion. */ 6361 MPASS(cst->ncompl > 0); 6362 return; 6363 } 6364 cst->tx_credits -= next_credits; 6365 cst->tx_nocompl += next_credits; 6366 compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2; 6367 ETHER_BPF_MTAP(cst->com.ifp, m); 6368 write_ethofld_wr(cst, wr, m, compl); 6369 commit_wrq_wr(cst->eo_txq, wr, &cookie); 6370 if (compl) { 6371 cst->ncompl++; 6372 cst->tx_nocompl = 0; 6373 } 6374 (void) mbufq_dequeue(&cst->pending_tx); 6375 mbufq_enqueue(&cst->pending_fwack, m); 6376 } 6377} 6378 6379int 6380ethofld_transmit(struct ifnet *ifp, struct mbuf *m0) 6381{ 6382 struct cxgbe_snd_tag *cst; 6383 int rc; 6384 6385 MPASS(m0->m_nextpkt == NULL); 6386 MPASS(m0->m_pkthdr.snd_tag != NULL); 6387 cst = mst_to_cst(m0->m_pkthdr.snd_tag); 6388 6389 mtx_lock(&cst->lock); 6390 MPASS(cst->flags & EO_SND_TAG_REF); 6391 6392 if (__predict_false(cst->flags & EO_FLOWC_PENDING)) { 6393 struct vi_info *vi = ifp->if_softc; 6394 struct port_info *pi = vi->pi; 6395 struct adapter *sc = pi->adapter; 6396 const uint32_t rss_mask = vi->rss_size - 1; 6397 uint32_t rss_hash; 6398 6399 cst->eo_txq = &sc->sge.ofld_txq[vi->first_ofld_txq]; 6400 if (M_HASHTYPE_ISHASH(m0)) 6401 rss_hash = m0->m_pkthdr.flowid; 6402 else 6403 rss_hash = arc4random(); 6404 /* We assume RSS hashing */ 6405 cst->iqid = vi->rss[rss_hash & rss_mask]; 6406 cst->eo_txq += rss_hash % vi->nofldtxq; 6407 rc = send_etid_flowc_wr(cst, pi, vi); 6408 if (rc != 0) 6409 goto done; 6410 } 6411 6412 if (__predict_false(cst->plen + m0->m_pkthdr.len > eo_max_backlog)) { 6413 rc = ENOBUFS; 6414 goto done; 6415 } 6416 6417 mbufq_enqueue(&cst->pending_tx, m0); 6418 cst->plen += m0->m_pkthdr.len; 6419 6420 ethofld_tx(cst); 6421 rc = 0; 6422done: 6423 mtx_unlock(&cst->lock); 6424 if (__predict_false(rc != 0)) 6425 m_freem(m0); 6426 return (rc); 6427} 6428 6429static int 6430ethofld_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m0) 6431{ 6432 struct adapter *sc = iq->adapter; 6433 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); 6434 struct mbuf *m; 6435 u_int etid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); 6436 struct cxgbe_snd_tag *cst; 6437 uint8_t credits = cpl->credits; 6438 6439 cst = lookup_etid(sc, etid); 6440 mtx_lock(&cst->lock); 6441 if (__predict_false(cst->flags & EO_FLOWC_RPL_PENDING)) { 6442 MPASS(credits >= ETID_FLOWC_LEN16); 6443 credits -= ETID_FLOWC_LEN16; 6444 cst->flags &= ~EO_FLOWC_RPL_PENDING; 6445 } 6446 6447 KASSERT(cst->ncompl > 0, 6448 ("%s: etid %u (%p) wasn't expecting completion.", 6449 __func__, etid, cst)); 6450 cst->ncompl--; 6451 6452 while (credits > 0) { 6453 m = mbufq_dequeue(&cst->pending_fwack); 6454 if (__predict_false(m == NULL)) { 6455 /* 6456 * The remaining credits are for the final flush that 6457 * was issued when the tag was freed by the kernel. 6458 */ 6459 MPASS((cst->flags & 6460 (EO_FLUSH_RPL_PENDING | EO_SND_TAG_REF)) == 6461 EO_FLUSH_RPL_PENDING); 6462 MPASS(credits == ETID_FLUSH_LEN16); 6463 MPASS(cst->tx_credits + cpl->credits == cst->tx_total); 6464 MPASS(cst->ncompl == 0); 6465 6466 cst->flags &= ~EO_FLUSH_RPL_PENDING; 6467 cst->tx_credits += cpl->credits; 6468freetag: 6469 cxgbe_snd_tag_free_locked(cst); 6470 return (0); /* cst is gone. */ 6471 } 6472 KASSERT(m != NULL, 6473 ("%s: too many credits (%u, %u)", __func__, cpl->credits, 6474 credits)); 6475 KASSERT(credits >= mbuf_eo_len16(m), 6476 ("%s: too few credits (%u, %u, %u)", __func__, 6477 cpl->credits, credits, mbuf_eo_len16(m))); 6478 credits -= mbuf_eo_len16(m); 6479 cst->plen -= m->m_pkthdr.len; 6480 m_freem(m); 6481 } 6482 6483 cst->tx_credits += cpl->credits; 6484 MPASS(cst->tx_credits <= cst->tx_total); 6485 6486 m = mbufq_first(&cst->pending_tx); 6487 if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m)) 6488 ethofld_tx(cst); 6489 6490 if (__predict_false((cst->flags & EO_SND_TAG_REF) == 0) && 6491 cst->ncompl == 0) { 6492 if (cst->tx_credits == cst->tx_total) 6493 goto freetag; 6494 else { 6495 MPASS((cst->flags & EO_FLUSH_RPL_PENDING) == 0); 6496 send_etid_flush_wr(cst); 6497 } 6498 } 6499 6500 mtx_unlock(&cst->lock); 6501 6502 return (0); 6503} 6504#endif 6505