iflib.c revision 347212
1/*- 2 * Copyright (c) 2014-2018, Matthew Macy <mmacy@mattmacy.io> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 11 * 2. Neither the name of Matthew Macy nor the names of its 12 * contributors may be used to endorse or promote products derived from 13 * this software without specific prior written permission. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 19 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 20 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 21 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 22 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 23 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 24 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 25 * POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/11/sys/net/iflib.c 347212 2019-05-06 21:21:15Z erj $"); 30 31#include "opt_inet.h" 32#include "opt_inet6.h" 33#include "opt_acpi.h" 34#include "opt_sched.h" 35 36#include <sys/param.h> 37#include <sys/types.h> 38#include <sys/bus.h> 39#include <sys/eventhandler.h> 40#include <sys/sockio.h> 41#include <sys/kernel.h> 42#include <sys/lock.h> 43#include <sys/mutex.h> 44#include <sys/module.h> 45#include <sys/kobj.h> 46#include <sys/rman.h> 47#include <sys/sbuf.h> 48#include <sys/smp.h> 49#include <sys/socket.h> 50#include <sys/sysctl.h> 51#include <sys/syslog.h> 52#include <sys/taskqueue.h> 53#include <sys/limits.h> 54 55 56#include <net/if.h> 57#include <net/if_var.h> 58#include <net/if_types.h> 59#include <net/if_media.h> 60#include <net/bpf.h> 61#include <net/ethernet.h> 62#include <net/mp_ring.h> 63#include <net/vnet.h> 64 65#include <netinet/in.h> 66#include <netinet/in_pcb.h> 67#include <netinet/tcp_lro.h> 68#include <netinet/in_systm.h> 69#include <netinet/if_ether.h> 70#include <netinet/ip.h> 71#include <netinet/ip6.h> 72#include <netinet/tcp.h> 73#include <netinet/ip_var.h> 74#include <netinet6/ip6_var.h> 75 76#include <machine/bus.h> 77#include <machine/in_cksum.h> 78 79#include <vm/vm.h> 80#include <vm/pmap.h> 81 82#include <dev/led/led.h> 83#include <dev/pci/pcireg.h> 84#include <dev/pci/pcivar.h> 85#include <dev/pci/pci_private.h> 86 87#include <net/iflib.h> 88 89#include "ifdi_if.h" 90 91#if defined(__i386__) || defined(__amd64__) 92#include <sys/memdesc.h> 93#include <machine/bus.h> 94#include <machine/md_var.h> 95#include <machine/specialreg.h> 96#include <x86/include/busdma_impl.h> 97#include <x86/iommu/busdma_dmar.h> 98#endif 99 100#ifdef PCI_IOV 101#include <dev/pci/pci_iov.h> 102#endif 103 104 105#include <sys/bitstring.h> 106/* 107 * enable accounting of every mbuf as it comes in to and goes out of 108 * iflib's software descriptor references 109 */ 110#define MEMORY_LOGGING 0 111/* 112 * Enable mbuf vectors for compressing long mbuf chains 113 */ 114 115/* 116 * NB: 117 * - Prefetching in tx cleaning should perhaps be a tunable. The distance ahead 118 * we prefetch needs to be determined by the time spent in m_free vis a vis 119 * the cost of a prefetch. This will of course vary based on the workload: 120 * - NFLX's m_free path is dominated by vm-based M_EXT manipulation which 121 * is quite expensive, thus suggesting very little prefetch. 122 * - small packet forwarding which is just returning a single mbuf to 123 * UMA will typically be very fast vis a vis the cost of a memory 124 * access. 125 */ 126 127 128/* 129 * File organization: 130 * - private structures 131 * - iflib private utility functions 132 * - ifnet functions 133 * - vlan registry and other exported functions 134 * - iflib public core functions 135 * 136 * 137 */ 138static MALLOC_DEFINE(M_IFLIB, "iflib", "ifnet library"); 139 140struct iflib_txq; 141typedef struct iflib_txq *iflib_txq_t; 142struct iflib_rxq; 143typedef struct iflib_rxq *iflib_rxq_t; 144struct iflib_fl; 145typedef struct iflib_fl *iflib_fl_t; 146 147static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid); 148 149typedef struct iflib_filter_info { 150 driver_filter_t *ifi_filter; 151 void *ifi_filter_arg; 152 struct grouptask *ifi_task; 153 void *ifi_ctx; 154} *iflib_filter_info_t; 155 156struct iflib_ctx { 157 KOBJ_FIELDS; 158 /* 159 * Pointer to hardware driver's softc 160 */ 161 void *ifc_softc; 162 device_t ifc_dev; 163 if_t ifc_ifp; 164 165 cpuset_t ifc_cpus; 166 if_shared_ctx_t ifc_sctx; 167 struct if_softc_ctx ifc_softc_ctx; 168 169 struct mtx ifc_ctx_mtx; 170 struct mtx ifc_state_mtx; 171 172 uint16_t ifc_nhwtxqs; 173 174 iflib_txq_t ifc_txqs; 175 iflib_rxq_t ifc_rxqs; 176 uint32_t ifc_if_flags; 177 uint32_t ifc_flags; 178 uint32_t ifc_max_fl_buf_size; 179 uint32_t ifc_rx_mbuf_sz; 180 181 int ifc_link_state; 182 int ifc_link_irq; 183 int ifc_watchdog_events; 184 struct cdev *ifc_led_dev; 185 struct resource *ifc_msix_mem; 186 187 struct if_irq ifc_legacy_irq; 188 struct grouptask ifc_admin_task; 189 struct grouptask ifc_vflr_task; 190 struct iflib_filter_info ifc_filter_info; 191 struct ifmedia ifc_media; 192 193 struct sysctl_oid *ifc_sysctl_node; 194 uint16_t ifc_sysctl_ntxqs; 195 uint16_t ifc_sysctl_nrxqs; 196 uint16_t ifc_sysctl_qs_eq_override; 197 uint16_t ifc_sysctl_rx_budget; 198 199 qidx_t ifc_sysctl_ntxds[8]; 200 qidx_t ifc_sysctl_nrxds[8]; 201 struct if_txrx ifc_txrx; 202#define isc_txd_encap ifc_txrx.ift_txd_encap 203#define isc_txd_flush ifc_txrx.ift_txd_flush 204#define isc_txd_credits_update ifc_txrx.ift_txd_credits_update 205#define isc_rxd_available ifc_txrx.ift_rxd_available 206#define isc_rxd_pkt_get ifc_txrx.ift_rxd_pkt_get 207#define isc_rxd_refill ifc_txrx.ift_rxd_refill 208#define isc_rxd_flush ifc_txrx.ift_rxd_flush 209#define isc_rxd_refill ifc_txrx.ift_rxd_refill 210#define isc_rxd_refill ifc_txrx.ift_rxd_refill 211#define isc_legacy_intr ifc_txrx.ift_legacy_intr 212 eventhandler_tag ifc_vlan_attach_event; 213 eventhandler_tag ifc_vlan_detach_event; 214 uint8_t ifc_mac[ETHER_ADDR_LEN]; 215 char ifc_mtx_name[16]; 216}; 217 218 219void * 220iflib_get_softc(if_ctx_t ctx) 221{ 222 223 return (ctx->ifc_softc); 224} 225 226device_t 227iflib_get_dev(if_ctx_t ctx) 228{ 229 230 return (ctx->ifc_dev); 231} 232 233if_t 234iflib_get_ifp(if_ctx_t ctx) 235{ 236 237 return (ctx->ifc_ifp); 238} 239 240struct ifmedia * 241iflib_get_media(if_ctx_t ctx) 242{ 243 244 return (&ctx->ifc_media); 245} 246 247void 248iflib_set_mac(if_ctx_t ctx, uint8_t mac[ETHER_ADDR_LEN]) 249{ 250 251 bcopy(mac, ctx->ifc_mac, ETHER_ADDR_LEN); 252} 253 254if_softc_ctx_t 255iflib_get_softc_ctx(if_ctx_t ctx) 256{ 257 258 return (&ctx->ifc_softc_ctx); 259} 260 261if_shared_ctx_t 262iflib_get_sctx(if_ctx_t ctx) 263{ 264 265 return (ctx->ifc_sctx); 266} 267 268#define IP_ALIGNED(m) ((((uintptr_t)(m)->m_data) & 0x3) == 0x2) 269#define CACHE_PTR_INCREMENT (CACHE_LINE_SIZE/sizeof(void*)) 270#define CACHE_PTR_NEXT(ptr) ((void *)(((uintptr_t)(ptr)+CACHE_LINE_SIZE-1) & (CACHE_LINE_SIZE-1))) 271 272#define LINK_ACTIVE(ctx) ((ctx)->ifc_link_state == LINK_STATE_UP) 273#define CTX_IS_VF(ctx) ((ctx)->ifc_sctx->isc_flags & IFLIB_IS_VF) 274 275#define RX_SW_DESC_MAP_CREATED (1 << 0) 276#define TX_SW_DESC_MAP_CREATED (1 << 1) 277#define RX_SW_DESC_INUSE (1 << 3) 278#define TX_SW_DESC_MAPPED (1 << 4) 279 280#define M_TOOBIG M_PROTO1 281 282typedef struct iflib_sw_rx_desc_array { 283 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 284 struct mbuf **ifsd_m; /* pkthdr mbufs */ 285 caddr_t *ifsd_cl; /* direct cluster pointer for rx */ 286 uint8_t *ifsd_flags; 287} iflib_rxsd_array_t; 288 289typedef struct iflib_sw_tx_desc_array { 290 bus_dmamap_t *ifsd_map; /* bus_dma maps for packet */ 291 struct mbuf **ifsd_m; /* pkthdr mbufs */ 292 uint8_t *ifsd_flags; 293} if_txsd_vec_t; 294 295 296/* magic number that should be high enough for any hardware */ 297#define IFLIB_MAX_TX_SEGS 128 298/* bnxt supports 64 with hardware LRO enabled */ 299#define IFLIB_MAX_RX_SEGS 64 300#define IFLIB_RX_COPY_THRESH 128 301#define IFLIB_MAX_RX_REFRESH 32 302/* The minimum descriptors per second before we start coalescing */ 303#define IFLIB_MIN_DESC_SEC 16384 304#define IFLIB_DEFAULT_TX_UPDATE_FREQ 16 305#define IFLIB_QUEUE_IDLE 0 306#define IFLIB_QUEUE_HUNG 1 307#define IFLIB_QUEUE_WORKING 2 308/* maximum number of txqs that can share an rx interrupt */ 309#define IFLIB_MAX_TX_SHARED_INTR 4 310 311/* this should really scale with ring size - this is a fairly arbitrary value */ 312#define TX_BATCH_SIZE 32 313 314#define IFLIB_RESTART_BUDGET 8 315 316#define IFC_LEGACY 0x001 317#define IFC_QFLUSH 0x002 318#define IFC_MULTISEG 0x004 319#define IFC_DMAR 0x008 320#define IFC_SC_ALLOCATED 0x010 321#define IFC_INIT_DONE 0x020 322#define IFC_PREFETCH 0x040 323#define IFC_DO_RESET 0x080 324#define IFC_DO_WATCHDOG 0x100 325#define IFC_CHECK_HUNG 0x200 326#define IFC_IN_DETACH 0x800 327 328 329#define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ 330 CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ 331 CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) 332struct iflib_txq { 333 qidx_t ift_in_use; 334 qidx_t ift_cidx; 335 qidx_t ift_cidx_processed; 336 qidx_t ift_pidx; 337 uint8_t ift_gen; 338 uint8_t ift_br_offset; 339 uint16_t ift_npending; 340 uint16_t ift_db_pending; 341 uint16_t ift_rs_pending; 342 /* implicit pad */ 343 uint8_t ift_txd_size[8]; 344 uint64_t ift_processed; 345 uint64_t ift_cleaned; 346 uint64_t ift_cleaned_prev; 347#if MEMORY_LOGGING 348 uint64_t ift_enqueued; 349 uint64_t ift_dequeued; 350#endif 351 uint64_t ift_no_tx_dma_setup; 352 uint64_t ift_no_desc_avail; 353 uint64_t ift_mbuf_defrag_failed; 354 uint64_t ift_mbuf_defrag; 355 uint64_t ift_map_failed; 356 uint64_t ift_txd_encap_efbig; 357 uint64_t ift_pullups; 358 359 struct mtx ift_mtx; 360 struct mtx ift_db_mtx; 361 362 /* constant values */ 363 if_ctx_t ift_ctx; 364 struct ifmp_ring *ift_br; 365 struct grouptask ift_task; 366 qidx_t ift_size; 367 uint16_t ift_id; 368 struct callout ift_timer; 369 370 if_txsd_vec_t ift_sds; 371 uint8_t ift_qstatus; 372 uint8_t ift_closed; 373 uint8_t ift_update_freq; 374 struct iflib_filter_info ift_filter_info; 375 bus_dma_tag_t ift_desc_tag; 376 bus_dma_tag_t ift_tso_desc_tag; 377 iflib_dma_info_t ift_ifdi; 378#define MTX_NAME_LEN 16 379 char ift_mtx_name[MTX_NAME_LEN]; 380 char ift_db_mtx_name[MTX_NAME_LEN]; 381 bus_dma_segment_t ift_segs[IFLIB_MAX_TX_SEGS] __aligned(CACHE_LINE_SIZE); 382#ifdef IFLIB_DIAGNOSTICS 383 uint64_t ift_cpu_exec_count[256]; 384#endif 385} __aligned(CACHE_LINE_SIZE); 386 387struct iflib_fl { 388 qidx_t ifl_cidx; 389 qidx_t ifl_pidx; 390 qidx_t ifl_credits; 391 uint8_t ifl_gen; 392 uint8_t ifl_rxd_size; 393#if MEMORY_LOGGING 394 uint64_t ifl_m_enqueued; 395 uint64_t ifl_m_dequeued; 396 uint64_t ifl_cl_enqueued; 397 uint64_t ifl_cl_dequeued; 398#endif 399 /* implicit pad */ 400 401 bitstr_t *ifl_rx_bitmap; 402 qidx_t ifl_fragidx; 403 /* constant */ 404 qidx_t ifl_size; 405 uint16_t ifl_buf_size; 406 uint16_t ifl_cltype; 407 uma_zone_t ifl_zone; 408 iflib_rxsd_array_t ifl_sds; 409 iflib_rxq_t ifl_rxq; 410 uint8_t ifl_id; 411 bus_dma_tag_t ifl_desc_tag; 412 iflib_dma_info_t ifl_ifdi; 413 uint64_t ifl_bus_addrs[IFLIB_MAX_RX_REFRESH] __aligned(CACHE_LINE_SIZE); 414 caddr_t ifl_vm_addrs[IFLIB_MAX_RX_REFRESH]; 415 qidx_t ifl_rxd_idxs[IFLIB_MAX_RX_REFRESH]; 416} __aligned(CACHE_LINE_SIZE); 417 418static inline qidx_t 419get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen) 420{ 421 qidx_t used; 422 423 if (pidx > cidx) 424 used = pidx - cidx; 425 else if (pidx < cidx) 426 used = size - cidx + pidx; 427 else if (gen == 0 && pidx == cidx) 428 used = 0; 429 else if (gen == 1 && pidx == cidx) 430 used = size; 431 else 432 panic("bad state"); 433 434 return (used); 435} 436 437#define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen)) 438 439#define IDXDIFF(head, tail, wrap) \ 440 ((head) >= (tail) ? (head) - (tail) : (wrap) - (tail) + (head)) 441 442struct iflib_rxq { 443 /* If there is a separate completion queue - 444 * these are the cq cidx and pidx. Otherwise 445 * these are unused. 446 */ 447 qidx_t ifr_size; 448 qidx_t ifr_cq_cidx; 449 qidx_t ifr_cq_pidx; 450 uint8_t ifr_cq_gen; 451 uint8_t ifr_fl_offset; 452 453 if_ctx_t ifr_ctx; 454 iflib_fl_t ifr_fl; 455 uint64_t ifr_rx_irq; 456 uint16_t ifr_id; 457 uint8_t ifr_lro_enabled; 458 uint8_t ifr_nfl; 459 uint8_t ifr_ntxqirq; 460 uint8_t ifr_txqid[IFLIB_MAX_TX_SHARED_INTR]; 461 struct lro_ctrl ifr_lc; 462 struct grouptask ifr_task; 463 struct iflib_filter_info ifr_filter_info; 464 iflib_dma_info_t ifr_ifdi; 465 466 /* dynamically allocate if any drivers need a value substantially larger than this */ 467 struct if_rxd_frag ifr_frags[IFLIB_MAX_RX_SEGS] __aligned(CACHE_LINE_SIZE); 468#ifdef IFLIB_DIAGNOSTICS 469 uint64_t ifr_cpu_exec_count[256]; 470#endif 471} __aligned(CACHE_LINE_SIZE); 472 473typedef struct if_rxsd { 474 caddr_t *ifsd_cl; 475 struct mbuf **ifsd_m; 476 iflib_fl_t ifsd_fl; 477 qidx_t ifsd_cidx; 478} *if_rxsd_t; 479 480/* multiple of word size */ 481#ifdef __LP64__ 482#define PKT_INFO_SIZE 6 483#define RXD_INFO_SIZE 5 484#define PKT_TYPE uint64_t 485#else 486#define PKT_INFO_SIZE 11 487#define RXD_INFO_SIZE 8 488#define PKT_TYPE uint32_t 489#endif 490#define PKT_LOOP_BOUND ((PKT_INFO_SIZE/3)*3) 491#define RXD_LOOP_BOUND ((RXD_INFO_SIZE/4)*4) 492 493typedef struct if_pkt_info_pad { 494 PKT_TYPE pkt_val[PKT_INFO_SIZE]; 495} *if_pkt_info_pad_t; 496typedef struct if_rxd_info_pad { 497 PKT_TYPE rxd_val[RXD_INFO_SIZE]; 498} *if_rxd_info_pad_t; 499 500CTASSERT(sizeof(struct if_pkt_info_pad) == sizeof(struct if_pkt_info)); 501CTASSERT(sizeof(struct if_rxd_info_pad) == sizeof(struct if_rxd_info)); 502 503 504static inline void 505pkt_info_zero(if_pkt_info_t pi) 506{ 507 if_pkt_info_pad_t pi_pad; 508 509 pi_pad = (if_pkt_info_pad_t)pi; 510 pi_pad->pkt_val[0] = 0; pi_pad->pkt_val[1] = 0; pi_pad->pkt_val[2] = 0; 511 pi_pad->pkt_val[3] = 0; pi_pad->pkt_val[4] = 0; pi_pad->pkt_val[5] = 0; 512#ifndef __LP64__ 513 pi_pad->pkt_val[6] = 0; pi_pad->pkt_val[7] = 0; pi_pad->pkt_val[8] = 0; 514 pi_pad->pkt_val[9] = 0; pi_pad->pkt_val[10] = 0; 515#endif 516} 517 518static inline void 519rxd_info_zero(if_rxd_info_t ri) 520{ 521 if_rxd_info_pad_t ri_pad; 522 int i; 523 524 ri_pad = (if_rxd_info_pad_t)ri; 525 for (i = 0; i < RXD_LOOP_BOUND; i += 4) { 526 ri_pad->rxd_val[i] = 0; 527 ri_pad->rxd_val[i+1] = 0; 528 ri_pad->rxd_val[i+2] = 0; 529 ri_pad->rxd_val[i+3] = 0; 530 } 531#ifdef __LP64__ 532 ri_pad->rxd_val[RXD_INFO_SIZE-1] = 0; 533#endif 534} 535 536/* 537 * Only allow a single packet to take up most 1/nth of the tx ring 538 */ 539#define MAX_SINGLE_PACKET_FRACTION 12 540#define IF_BAD_DMA (bus_addr_t)-1 541 542#define CTX_ACTIVE(ctx) ((if_getdrvflags((ctx)->ifc_ifp) & IFF_DRV_RUNNING)) 543 544#define CTX_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_ctx_mtx, _name, "iflib ctx lock", MTX_DEF) 545#define CTX_LOCK(ctx) mtx_lock(&(ctx)->ifc_ctx_mtx) 546#define CTX_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_ctx_mtx) 547#define CTX_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_ctx_mtx) 548 549 550#define STATE_LOCK_INIT(_sc, _name) mtx_init(&(_sc)->ifc_state_mtx, _name, "iflib state lock", MTX_DEF) 551#define STATE_LOCK(ctx) mtx_lock(&(ctx)->ifc_state_mtx) 552#define STATE_UNLOCK(ctx) mtx_unlock(&(ctx)->ifc_state_mtx) 553#define STATE_LOCK_DESTROY(ctx) mtx_destroy(&(ctx)->ifc_state_mtx) 554 555 556 557#define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx) 558#define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx) 559 560/* Our boot-time initialization hook */ 561static int iflib_module_event_handler(module_t, int, void *); 562 563static moduledata_t iflib_moduledata = { 564 "iflib", 565 iflib_module_event_handler, 566 NULL 567}; 568 569DECLARE_MODULE(iflib, iflib_moduledata, SI_SUB_INIT_IF, SI_ORDER_ANY); 570MODULE_VERSION(iflib, 1); 571 572MODULE_DEPEND(iflib, pci, 1, 1, 1); 573MODULE_DEPEND(iflib, ether, 1, 1, 1); 574 575TASKQGROUP_DEFINE(if_io_tqg, mp_ncpus, 1); 576TASKQGROUP_DEFINE(if_config_tqg, 1, 1); 577 578#ifndef IFLIB_DEBUG_COUNTERS 579#ifdef INVARIANTS 580#define IFLIB_DEBUG_COUNTERS 1 581#else 582#define IFLIB_DEBUG_COUNTERS 0 583#endif /* !INVARIANTS */ 584#endif 585 586static SYSCTL_NODE(_net, OID_AUTO, iflib, CTLFLAG_RD, 0, 587 "iflib driver parameters"); 588 589/* 590 * XXX need to ensure that this can't accidentally cause the head to be moved backwards 591 */ 592static int iflib_min_tx_latency = 0; 593SYSCTL_INT(_net_iflib, OID_AUTO, min_tx_latency, CTLFLAG_RW, 594 &iflib_min_tx_latency, 0, "minimize transmit latency at the possible expense of throughput"); 595static int iflib_no_tx_batch = 0; 596SYSCTL_INT(_net_iflib, OID_AUTO, no_tx_batch, CTLFLAG_RW, 597 &iflib_no_tx_batch, 0, "minimize transmit latency at the possible expense of throughput"); 598 599 600#if IFLIB_DEBUG_COUNTERS 601 602static int iflib_tx_seen; 603static int iflib_tx_sent; 604static int iflib_tx_encap; 605static int iflib_rx_allocs; 606static int iflib_fl_refills; 607static int iflib_fl_refills_large; 608static int iflib_tx_frees; 609 610SYSCTL_INT(_net_iflib, OID_AUTO, tx_seen, CTLFLAG_RD, 611 &iflib_tx_seen, 0, "# tx mbufs seen"); 612SYSCTL_INT(_net_iflib, OID_AUTO, tx_sent, CTLFLAG_RD, 613 &iflib_tx_sent, 0, "# tx mbufs sent"); 614SYSCTL_INT(_net_iflib, OID_AUTO, tx_encap, CTLFLAG_RD, 615 &iflib_tx_encap, 0, "# tx mbufs encapped"); 616SYSCTL_INT(_net_iflib, OID_AUTO, tx_frees, CTLFLAG_RD, 617 &iflib_tx_frees, 0, "# tx frees"); 618SYSCTL_INT(_net_iflib, OID_AUTO, rx_allocs, CTLFLAG_RD, 619 &iflib_rx_allocs, 0, "# rx allocations"); 620SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills, CTLFLAG_RD, 621 &iflib_fl_refills, 0, "# refills"); 622SYSCTL_INT(_net_iflib, OID_AUTO, fl_refills_large, CTLFLAG_RD, 623 &iflib_fl_refills_large, 0, "# large refills"); 624 625 626static int iflib_txq_drain_flushing; 627static int iflib_txq_drain_oactive; 628static int iflib_txq_drain_notready; 629static int iflib_txq_drain_encapfail; 630 631SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_flushing, CTLFLAG_RD, 632 &iflib_txq_drain_flushing, 0, "# drain flushes"); 633SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_oactive, CTLFLAG_RD, 634 &iflib_txq_drain_oactive, 0, "# drain oactives"); 635SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_notready, CTLFLAG_RD, 636 &iflib_txq_drain_notready, 0, "# drain notready"); 637SYSCTL_INT(_net_iflib, OID_AUTO, txq_drain_encapfail, CTLFLAG_RD, 638 &iflib_txq_drain_encapfail, 0, "# drain encap fails"); 639 640 641static int iflib_encap_load_mbuf_fail; 642static int iflib_encap_pad_mbuf_fail; 643static int iflib_encap_txq_avail_fail; 644static int iflib_encap_txd_encap_fail; 645 646SYSCTL_INT(_net_iflib, OID_AUTO, encap_load_mbuf_fail, CTLFLAG_RD, 647 &iflib_encap_load_mbuf_fail, 0, "# busdma load failures"); 648SYSCTL_INT(_net_iflib, OID_AUTO, encap_pad_mbuf_fail, CTLFLAG_RD, 649 &iflib_encap_pad_mbuf_fail, 0, "# runt frame pad failures"); 650SYSCTL_INT(_net_iflib, OID_AUTO, encap_txq_avail_fail, CTLFLAG_RD, 651 &iflib_encap_txq_avail_fail, 0, "# txq avail failures"); 652SYSCTL_INT(_net_iflib, OID_AUTO, encap_txd_encap_fail, CTLFLAG_RD, 653 &iflib_encap_txd_encap_fail, 0, "# driver encap failures"); 654 655static int iflib_task_fn_rxs; 656static int iflib_rx_intr_enables; 657static int iflib_fast_intrs; 658static int iflib_intr_link; 659static int iflib_intr_msix; 660static int iflib_rx_unavail; 661static int iflib_rx_ctx_inactive; 662static int iflib_rx_zero_len; 663static int iflib_rx_if_input; 664static int iflib_rx_mbuf_null; 665static int iflib_rxd_flush; 666 667static int iflib_verbose_debug; 668 669SYSCTL_INT(_net_iflib, OID_AUTO, intr_link, CTLFLAG_RD, 670 &iflib_intr_link, 0, "# intr link calls"); 671SYSCTL_INT(_net_iflib, OID_AUTO, intr_msix, CTLFLAG_RD, 672 &iflib_intr_msix, 0, "# intr msix calls"); 673SYSCTL_INT(_net_iflib, OID_AUTO, task_fn_rx, CTLFLAG_RD, 674 &iflib_task_fn_rxs, 0, "# task_fn_rx calls"); 675SYSCTL_INT(_net_iflib, OID_AUTO, rx_intr_enables, CTLFLAG_RD, 676 &iflib_rx_intr_enables, 0, "# rx intr enables"); 677SYSCTL_INT(_net_iflib, OID_AUTO, fast_intrs, CTLFLAG_RD, 678 &iflib_fast_intrs, 0, "# fast_intr calls"); 679SYSCTL_INT(_net_iflib, OID_AUTO, rx_unavail, CTLFLAG_RD, 680 &iflib_rx_unavail, 0, "# times rxeof called with no available data"); 681SYSCTL_INT(_net_iflib, OID_AUTO, rx_ctx_inactive, CTLFLAG_RD, 682 &iflib_rx_ctx_inactive, 0, "# times rxeof called with inactive context"); 683SYSCTL_INT(_net_iflib, OID_AUTO, rx_zero_len, CTLFLAG_RD, 684 &iflib_rx_zero_len, 0, "# times rxeof saw zero len mbuf"); 685SYSCTL_INT(_net_iflib, OID_AUTO, rx_if_input, CTLFLAG_RD, 686 &iflib_rx_if_input, 0, "# times rxeof called if_input"); 687SYSCTL_INT(_net_iflib, OID_AUTO, rx_mbuf_null, CTLFLAG_RD, 688 &iflib_rx_mbuf_null, 0, "# times rxeof got null mbuf"); 689SYSCTL_INT(_net_iflib, OID_AUTO, rxd_flush, CTLFLAG_RD, 690 &iflib_rxd_flush, 0, "# times rxd_flush called"); 691SYSCTL_INT(_net_iflib, OID_AUTO, verbose_debug, CTLFLAG_RW, 692 &iflib_verbose_debug, 0, "enable verbose debugging"); 693 694#define DBG_COUNTER_INC(name) atomic_add_int(&(iflib_ ## name), 1) 695static void 696iflib_debug_reset(void) 697{ 698 iflib_tx_seen = iflib_tx_sent = iflib_tx_encap = iflib_rx_allocs = 699 iflib_fl_refills = iflib_fl_refills_large = iflib_tx_frees = 700 iflib_txq_drain_flushing = iflib_txq_drain_oactive = 701 iflib_txq_drain_notready = iflib_txq_drain_encapfail = 702 iflib_encap_load_mbuf_fail = iflib_encap_pad_mbuf_fail = 703 iflib_encap_txq_avail_fail = iflib_encap_txd_encap_fail = 704 iflib_task_fn_rxs = iflib_rx_intr_enables = iflib_fast_intrs = 705 iflib_intr_link = iflib_intr_msix = iflib_rx_unavail = 706 iflib_rx_ctx_inactive = iflib_rx_zero_len = iflib_rx_if_input = 707 iflib_rx_mbuf_null = iflib_rxd_flush = 0; 708} 709 710#else 711#define DBG_COUNTER_INC(name) 712static void iflib_debug_reset(void) {} 713#endif 714 715 716 717#define IFLIB_DEBUG 0 718 719static void iflib_tx_structures_free(if_ctx_t ctx); 720static void iflib_rx_structures_free(if_ctx_t ctx); 721static int iflib_queues_alloc(if_ctx_t ctx); 722static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq); 723static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget); 724static int iflib_qset_structures_setup(if_ctx_t ctx); 725static int iflib_msix_init(if_ctx_t ctx); 726static int iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filterarg, int *rid, char *str); 727static void iflib_txq_check_drain(iflib_txq_t txq, int budget); 728static uint32_t iflib_txq_can_drain(struct ifmp_ring *); 729static int iflib_register(if_ctx_t); 730static void iflib_init_locked(if_ctx_t ctx); 731static void iflib_add_device_sysctl_pre(if_ctx_t ctx); 732static void iflib_add_device_sysctl_post(if_ctx_t ctx); 733static void iflib_ifmp_purge(iflib_txq_t txq); 734static void _iflib_pre_assert(if_softc_ctx_t scctx); 735static void iflib_stop(if_ctx_t ctx); 736static void iflib_if_init_locked(if_ctx_t ctx); 737static void iflib_free_intr_mem(if_ctx_t ctx); 738#ifndef __NO_STRICT_ALIGNMENT 739static struct mbuf * iflib_fixup_rx(struct mbuf *m); 740#endif 741 742#ifdef DEV_NETMAP 743#include <sys/selinfo.h> 744#include <net/netmap.h> 745#include <dev/netmap/netmap_kern.h> 746 747MODULE_DEPEND(iflib, netmap, 1, 1, 1); 748 749static int netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init); 750 751/* 752 * device-specific sysctl variables: 753 * 754 * iflib_crcstrip: 0: keep CRC in rx frames (default), 1: strip it. 755 * During regular operations the CRC is stripped, but on some 756 * hardware reception of frames not multiple of 64 is slower, 757 * so using crcstrip=0 helps in benchmarks. 758 * 759 * iflib_rx_miss, iflib_rx_miss_bufs: 760 * count packets that might be missed due to lost interrupts. 761 */ 762SYSCTL_DECL(_dev_netmap); 763/* 764 * The xl driver by default strips CRCs and we do not override it. 765 */ 766 767int iflib_crcstrip = 1; 768SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_crcstrip, 769 CTLFLAG_RW, &iflib_crcstrip, 1, "strip CRC on rx frames"); 770 771int iflib_rx_miss, iflib_rx_miss_bufs; 772SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss, 773 CTLFLAG_RW, &iflib_rx_miss, 0, "potentially missed rx intr"); 774SYSCTL_INT(_dev_netmap, OID_AUTO, iflib_rx_miss_bufs, 775 CTLFLAG_RW, &iflib_rx_miss_bufs, 0, "potentially missed rx intr bufs"); 776 777/* 778 * Register/unregister. We are already under netmap lock. 779 * Only called on the first register or the last unregister. 780 */ 781static int 782iflib_netmap_register(struct netmap_adapter *na, int onoff) 783{ 784 struct ifnet *ifp = na->ifp; 785 if_ctx_t ctx = ifp->if_softc; 786 int status; 787 788 CTX_LOCK(ctx); 789 IFDI_INTR_DISABLE(ctx); 790 791 /* Tell the stack that the interface is no longer active */ 792 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 793 794 if (!CTX_IS_VF(ctx)) 795 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); 796 797 /* enable or disable flags and callbacks in na and ifp */ 798 if (onoff) { 799 nm_set_native_flags(na); 800 } else { 801 nm_clear_native_flags(na); 802 } 803 iflib_stop(ctx); 804 iflib_init_locked(ctx); 805 IFDI_CRCSTRIP_SET(ctx, onoff, iflib_crcstrip); // XXX why twice ? 806 status = ifp->if_drv_flags & IFF_DRV_RUNNING ? 0 : 1; 807 if (status) 808 nm_clear_native_flags(na); 809 CTX_UNLOCK(ctx); 810 return (status); 811} 812 813static int 814netmap_fl_refill(iflib_rxq_t rxq, struct netmap_kring *kring, uint32_t nm_i, bool init) 815{ 816 struct netmap_adapter *na = kring->na; 817 u_int const lim = kring->nkr_num_slots - 1; 818 u_int head = kring->rhead; 819 struct netmap_ring *ring = kring->ring; 820 bus_dmamap_t *map; 821 struct if_rxd_update iru; 822 if_ctx_t ctx = rxq->ifr_ctx; 823 iflib_fl_t fl = &rxq->ifr_fl[0]; 824 uint32_t refill_pidx, nic_i; 825 826 if (nm_i == head && __predict_true(!init)) 827 return 0; 828 iru_init(&iru, rxq, 0 /* flid */); 829 map = fl->ifl_sds.ifsd_map; 830 refill_pidx = netmap_idx_k2n(kring, nm_i); 831 /* 832 * IMPORTANT: we must leave one free slot in the ring, 833 * so move head back by one unit 834 */ 835 head = nm_prev(head, lim); 836 while (nm_i != head) { 837 for (int tmp_pidx = 0; tmp_pidx < IFLIB_MAX_RX_REFRESH && nm_i != head; tmp_pidx++) { 838 struct netmap_slot *slot = &ring->slot[nm_i]; 839 void *addr = PNMB(na, slot, &fl->ifl_bus_addrs[tmp_pidx]); 840 uint32_t nic_i_dma = refill_pidx; 841 nic_i = netmap_idx_k2n(kring, nm_i); 842 843 MPASS(tmp_pidx < IFLIB_MAX_RX_REFRESH); 844 845 if (addr == NETMAP_BUF_BASE(na)) /* bad buf */ 846 return netmap_ring_reinit(kring); 847 848 fl->ifl_vm_addrs[tmp_pidx] = addr; 849 if (__predict_false(init) && map) { 850 netmap_load_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 851 } else if (map && (slot->flags & NS_BUF_CHANGED)) { 852 /* buffer has changed, reload map */ 853 netmap_reload_map(na, fl->ifl_ifdi->idi_tag, map[nic_i], addr); 854 } 855 slot->flags &= ~NS_BUF_CHANGED; 856 857 nm_i = nm_next(nm_i, lim); 858 fl->ifl_rxd_idxs[tmp_pidx] = nic_i = nm_next(nic_i, lim); 859 if (nm_i != head && tmp_pidx < IFLIB_MAX_RX_REFRESH-1) 860 continue; 861 862 iru.iru_pidx = refill_pidx; 863 iru.iru_count = tmp_pidx+1; 864 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 865 866 refill_pidx = nic_i; 867 if (map == NULL) 868 continue; 869 870 for (int n = 0; n < iru.iru_count; n++) { 871 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, map[nic_i_dma], 872 BUS_DMASYNC_PREREAD); 873 /* XXX - change this to not use the netmap func*/ 874 nic_i_dma = nm_next(nic_i_dma, lim); 875 } 876 } 877 } 878 kring->nr_hwcur = head; 879 880 if (map) 881 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 882 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 883 ctx->isc_rxd_flush(ctx->ifc_softc, rxq->ifr_id, fl->ifl_id, nic_i); 884 return (0); 885} 886 887/* 888 * Reconcile kernel and user view of the transmit ring. 889 * 890 * All information is in the kring. 891 * Userspace wants to send packets up to the one before kring->rhead, 892 * kernel knows kring->nr_hwcur is the first unsent packet. 893 * 894 * Here we push packets out (as many as possible), and possibly 895 * reclaim buffers from previously completed transmission. 896 * 897 * The caller (netmap) guarantees that there is only one instance 898 * running at any time. Any interference with other driver 899 * methods should be handled by the individual drivers. 900 */ 901static int 902iflib_netmap_txsync(struct netmap_kring *kring, int flags) 903{ 904 struct netmap_adapter *na = kring->na; 905 struct ifnet *ifp = na->ifp; 906 struct netmap_ring *ring = kring->ring; 907 u_int nm_i; /* index into the netmap ring */ 908 u_int nic_i; /* index into the NIC ring */ 909 u_int n; 910 u_int const lim = kring->nkr_num_slots - 1; 911 u_int const head = kring->rhead; 912 struct if_pkt_info pi; 913 914 /* 915 * interrupts on every tx packet are expensive so request 916 * them every half ring, or where NS_REPORT is set 917 */ 918 u_int report_frequency = kring->nkr_num_slots >> 1; 919 /* device-specific */ 920 if_ctx_t ctx = ifp->if_softc; 921 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id]; 922 923 if (txq->ift_sds.ifsd_map) 924 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 925 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 926 927 928 /* 929 * First part: process new packets to send. 930 * nm_i is the current index in the netmap ring, 931 * nic_i is the corresponding index in the NIC ring. 932 * 933 * If we have packets to send (nm_i != head) 934 * iterate over the netmap ring, fetch length and update 935 * the corresponding slot in the NIC ring. Some drivers also 936 * need to update the buffer's physical address in the NIC slot 937 * even NS_BUF_CHANGED is not set (PNMB computes the addresses). 938 * 939 * The netmap_reload_map() calls is especially expensive, 940 * even when (as in this case) the tag is 0, so do only 941 * when the buffer has actually changed. 942 * 943 * If possible do not set the report/intr bit on all slots, 944 * but only a few times per ring or when NS_REPORT is set. 945 * 946 * Finally, on 10G and faster drivers, it might be useful 947 * to prefetch the next slot and txr entry. 948 */ 949 950 nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); 951 pkt_info_zero(&pi); 952 pi.ipi_segs = txq->ift_segs; 953 pi.ipi_qsidx = kring->ring_id; 954 if (nm_i != head) { /* we have new packets to send */ 955 nic_i = netmap_idx_k2n(kring, nm_i); 956 957 __builtin_prefetch(&ring->slot[nm_i]); 958 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]); 959 if (txq->ift_sds.ifsd_map) 960 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]); 961 962 for (n = 0; nm_i != head; n++) { 963 struct netmap_slot *slot = &ring->slot[nm_i]; 964 u_int len = slot->len; 965 uint64_t paddr; 966 void *addr = PNMB(na, slot, &paddr); 967 int flags = (slot->flags & NS_REPORT || 968 nic_i == 0 || nic_i == report_frequency) ? 969 IPI_TX_INTR : 0; 970 971 /* device-specific */ 972 pi.ipi_len = len; 973 pi.ipi_segs[0].ds_addr = paddr; 974 pi.ipi_segs[0].ds_len = len; 975 pi.ipi_nsegs = 1; 976 pi.ipi_ndescs = 0; 977 pi.ipi_pidx = nic_i; 978 pi.ipi_flags = flags; 979 980 /* Fill the slot in the NIC ring. */ 981 ctx->isc_txd_encap(ctx->ifc_softc, &pi); 982 983 /* prefetch for next round */ 984 __builtin_prefetch(&ring->slot[nm_i + 1]); 985 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]); 986 if (txq->ift_sds.ifsd_map) { 987 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]); 988 989 NM_CHECK_ADDR_LEN(na, addr, len); 990 991 if (slot->flags & NS_BUF_CHANGED) { 992 /* buffer has changed, reload map */ 993 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr); 994 } 995 /* make sure changes to the buffer are synced */ 996 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i], 997 BUS_DMASYNC_PREWRITE); 998 } 999 slot->flags &= ~(NS_REPORT | NS_BUF_CHANGED); 1000 nm_i = nm_next(nm_i, lim); 1001 nic_i = nm_next(nic_i, lim); 1002 } 1003 kring->nr_hwcur = head; 1004 1005 /* synchronize the NIC ring */ 1006 if (txq->ift_sds.ifsd_map) 1007 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map, 1008 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1009 1010 /* (re)start the tx unit up to slot nic_i (excluded) */ 1011 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i); 1012 } 1013 1014 /* 1015 * Second part: reclaim buffers for completed transmissions. 1016 */ 1017 if (iflib_tx_credits_update(ctx, txq)) { 1018 /* some tx completed, increment avail */ 1019 nic_i = txq->ift_cidx_processed; 1020 kring->nr_hwtail = nm_prev(netmap_idx_n2k(kring, nic_i), lim); 1021 } 1022 return (0); 1023} 1024 1025/* 1026 * Reconcile kernel and user view of the receive ring. 1027 * Same as for the txsync, this routine must be efficient. 1028 * The caller guarantees a single invocations, but races against 1029 * the rest of the driver should be handled here. 1030 * 1031 * On call, kring->rhead is the first packet that userspace wants 1032 * to keep, and kring->rcur is the wakeup point. 1033 * The kernel has previously reported packets up to kring->rtail. 1034 * 1035 * If (flags & NAF_FORCE_READ) also check for incoming packets irrespective 1036 * of whether or not we received an interrupt. 1037 */ 1038static int 1039iflib_netmap_rxsync(struct netmap_kring *kring, int flags) 1040{ 1041 struct netmap_adapter *na = kring->na; 1042 struct netmap_ring *ring = kring->ring; 1043 uint32_t nm_i; /* index into the netmap ring */ 1044 uint32_t nic_i; /* index into the NIC ring */ 1045 u_int i, n; 1046 u_int const lim = kring->nkr_num_slots - 1; 1047 u_int const head = netmap_idx_n2k(kring, kring->rhead); 1048 int force_update = (flags & NAF_FORCE_READ) || kring->nr_kflags & NKR_PENDINTR; 1049 struct if_rxd_info ri; 1050 1051 struct ifnet *ifp = na->ifp; 1052 if_ctx_t ctx = ifp->if_softc; 1053 iflib_rxq_t rxq = &ctx->ifc_rxqs[kring->ring_id]; 1054 iflib_fl_t fl = rxq->ifr_fl; 1055 if (head > lim) 1056 return netmap_ring_reinit(kring); 1057 1058 /* XXX check sync modes */ 1059 for (i = 0, fl = rxq->ifr_fl; i < rxq->ifr_nfl; i++, fl++) { 1060 if (fl->ifl_sds.ifsd_map == NULL) 1061 continue; 1062 bus_dmamap_sync(rxq->ifr_fl[i].ifl_desc_tag, fl->ifl_ifdi->idi_map, 1063 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1064 } 1065 /* 1066 * First part: import newly received packets. 1067 * 1068 * nm_i is the index of the next free slot in the netmap ring, 1069 * nic_i is the index of the next received packet in the NIC ring, 1070 * and they may differ in case if_init() has been called while 1071 * in netmap mode. For the receive ring we have 1072 * 1073 * nic_i = rxr->next_check; 1074 * nm_i = kring->nr_hwtail (previous) 1075 * and 1076 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1077 * 1078 * rxr->next_check is set to 0 on a ring reinit 1079 */ 1080 if (netmap_no_pendintr || force_update) { 1081 int crclen = iflib_crcstrip ? 0 : 4; 1082 int error, avail; 1083 1084 for (i = 0; i < rxq->ifr_nfl; i++) { 1085 fl = &rxq->ifr_fl[i]; 1086 nic_i = fl->ifl_cidx; 1087 nm_i = netmap_idx_n2k(kring, nic_i); 1088 avail = iflib_rxd_avail(ctx, rxq, nic_i, USHRT_MAX); 1089 for (n = 0; avail > 0; n++, avail--) { 1090 rxd_info_zero(&ri); 1091 ri.iri_frags = rxq->ifr_frags; 1092 ri.iri_qsidx = kring->ring_id; 1093 ri.iri_ifp = ctx->ifc_ifp; 1094 ri.iri_cidx = nic_i; 1095 1096 error = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 1097 ring->slot[nm_i].len = error ? 0 : ri.iri_len - crclen; 1098 ring->slot[nm_i].flags = 0; 1099 if (fl->ifl_sds.ifsd_map) 1100 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, 1101 fl->ifl_sds.ifsd_map[nic_i], BUS_DMASYNC_POSTREAD); 1102 nm_i = nm_next(nm_i, lim); 1103 nic_i = nm_next(nic_i, lim); 1104 } 1105 if (n) { /* update the state variables */ 1106 if (netmap_no_pendintr && !force_update) { 1107 /* diagnostics */ 1108 iflib_rx_miss ++; 1109 iflib_rx_miss_bufs += n; 1110 } 1111 fl->ifl_cidx = nic_i; 1112 kring->nr_hwtail = netmap_idx_k2n(kring, nm_i); 1113 } 1114 kring->nr_kflags &= ~NKR_PENDINTR; 1115 } 1116 } 1117 /* 1118 * Second part: skip past packets that userspace has released. 1119 * (kring->nr_hwcur to head excluded), 1120 * and make the buffers available for reception. 1121 * As usual nm_i is the index in the netmap ring, 1122 * nic_i is the index in the NIC ring, and 1123 * nm_i == (nic_i + kring->nkr_hwofs) % ring_size 1124 */ 1125 /* XXX not sure how this will work with multiple free lists */ 1126 nm_i = netmap_idx_n2k(kring, kring->nr_hwcur); 1127 1128 return (netmap_fl_refill(rxq, kring, nm_i, false)); 1129} 1130 1131static int 1132iflib_netmap_attach(if_ctx_t ctx) 1133{ 1134 struct netmap_adapter na; 1135 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1136 1137 bzero(&na, sizeof(na)); 1138 1139 na.ifp = ctx->ifc_ifp; 1140 na.na_flags = NAF_BDG_MAYSLEEP; 1141 MPASS(ctx->ifc_softc_ctx.isc_ntxqsets); 1142 MPASS(ctx->ifc_softc_ctx.isc_nrxqsets); 1143 1144 na.num_tx_desc = scctx->isc_ntxd[0]; 1145 na.num_rx_desc = scctx->isc_nrxd[0]; 1146 na.nm_txsync = iflib_netmap_txsync; 1147 na.nm_rxsync = iflib_netmap_rxsync; 1148 na.nm_register = iflib_netmap_register; 1149 na.num_tx_rings = ctx->ifc_softc_ctx.isc_ntxqsets; 1150 na.num_rx_rings = ctx->ifc_softc_ctx.isc_nrxqsets; 1151 return (netmap_attach(&na)); 1152} 1153 1154static void 1155iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq) 1156{ 1157 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1158 struct netmap_slot *slot; 1159 1160 slot = netmap_reset(na, NR_TX, txq->ift_id, 0); 1161 if (slot == NULL) 1162 return; 1163 if (txq->ift_sds.ifsd_map == NULL) 1164 return; 1165 1166 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxd[0]; i++) { 1167 1168 /* 1169 * In netmap mode, set the map for the packet buffer. 1170 * NOTE: Some drivers (not this one) also need to set 1171 * the physical buffer address in the NIC ring. 1172 * netmap_idx_n2k() maps a nic index, i, into the corresponding 1173 * netmap slot index, si 1174 */ 1175 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i); 1176 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si)); 1177 } 1178} 1179 1180static void 1181iflib_netmap_rxq_init(if_ctx_t ctx, iflib_rxq_t rxq) 1182{ 1183 struct netmap_adapter *na = NA(ctx->ifc_ifp); 1184 struct netmap_kring *kring = na->rx_rings[rxq->ifr_id]; 1185 struct netmap_slot *slot; 1186 uint32_t nm_i; 1187 1188 slot = netmap_reset(na, NR_RX, rxq->ifr_id, 0); 1189 if (slot == NULL) 1190 return; 1191 nm_i = netmap_idx_n2k(kring, 0); 1192 netmap_fl_refill(rxq, kring, nm_i, true); 1193} 1194 1195#define iflib_netmap_detach(ifp) netmap_detach(ifp) 1196 1197#else 1198#define iflib_netmap_txq_init(ctx, txq) 1199#define iflib_netmap_rxq_init(ctx, rxq) 1200#define iflib_netmap_detach(ifp) 1201 1202#define iflib_netmap_attach(ctx) (0) 1203#define netmap_rx_irq(ifp, qid, budget) (0) 1204#define netmap_tx_irq(ifp, qid) do {} while (0) 1205 1206#endif 1207 1208#if defined(__i386__) || defined(__amd64__) 1209static __inline void 1210prefetch(void *x) 1211{ 1212 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1213} 1214static __inline void 1215prefetch2cachelines(void *x) 1216{ 1217 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 1218#if (CACHE_LINE_SIZE < 128) 1219 __asm volatile("prefetcht0 %0" :: "m" (*(((unsigned long *)x)+CACHE_LINE_SIZE/(sizeof(unsigned long))))); 1220#endif 1221} 1222#else 1223#define prefetch(x) 1224#define prefetch2cachelines(x) 1225#endif 1226 1227static void 1228iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid) 1229{ 1230 iflib_fl_t fl; 1231 1232 fl = &rxq->ifr_fl[flid]; 1233 iru->iru_paddrs = fl->ifl_bus_addrs; 1234 iru->iru_vaddrs = &fl->ifl_vm_addrs[0]; 1235 iru->iru_idxs = fl->ifl_rxd_idxs; 1236 iru->iru_qsidx = rxq->ifr_id; 1237 iru->iru_buf_size = fl->ifl_buf_size; 1238 iru->iru_flidx = fl->ifl_id; 1239} 1240 1241static void 1242_iflib_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int err) 1243{ 1244 if (err) 1245 return; 1246 *(bus_addr_t *) arg = segs[0].ds_addr; 1247} 1248 1249int 1250iflib_dma_alloc(if_ctx_t ctx, int size, iflib_dma_info_t dma, int mapflags) 1251{ 1252 int err; 1253 if_shared_ctx_t sctx = ctx->ifc_sctx; 1254 device_t dev = ctx->ifc_dev; 1255 1256 KASSERT(sctx->isc_q_align != 0, ("alignment value not initialized")); 1257 1258 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1259 sctx->isc_q_align, 0, /* alignment, bounds */ 1260 BUS_SPACE_MAXADDR, /* lowaddr */ 1261 BUS_SPACE_MAXADDR, /* highaddr */ 1262 NULL, NULL, /* filter, filterarg */ 1263 size, /* maxsize */ 1264 1, /* nsegments */ 1265 size, /* maxsegsize */ 1266 BUS_DMA_ALLOCNOW, /* flags */ 1267 NULL, /* lockfunc */ 1268 NULL, /* lockarg */ 1269 &dma->idi_tag); 1270 if (err) { 1271 device_printf(dev, 1272 "%s: bus_dma_tag_create failed: %d\n", 1273 __func__, err); 1274 goto fail_0; 1275 } 1276 1277 err = bus_dmamem_alloc(dma->idi_tag, (void**) &dma->idi_vaddr, 1278 BUS_DMA_NOWAIT | BUS_DMA_COHERENT | BUS_DMA_ZERO, &dma->idi_map); 1279 if (err) { 1280 device_printf(dev, 1281 "%s: bus_dmamem_alloc(%ju) failed: %d\n", 1282 __func__, (uintmax_t)size, err); 1283 goto fail_1; 1284 } 1285 1286 dma->idi_paddr = IF_BAD_DMA; 1287 err = bus_dmamap_load(dma->idi_tag, dma->idi_map, dma->idi_vaddr, 1288 size, _iflib_dmamap_cb, &dma->idi_paddr, mapflags | BUS_DMA_NOWAIT); 1289 if (err || dma->idi_paddr == IF_BAD_DMA) { 1290 device_printf(dev, 1291 "%s: bus_dmamap_load failed: %d\n", 1292 __func__, err); 1293 goto fail_2; 1294 } 1295 1296 dma->idi_size = size; 1297 return (0); 1298 1299fail_2: 1300 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1301fail_1: 1302 bus_dma_tag_destroy(dma->idi_tag); 1303fail_0: 1304 dma->idi_tag = NULL; 1305 1306 return (err); 1307} 1308 1309int 1310iflib_dma_alloc_multi(if_ctx_t ctx, int *sizes, iflib_dma_info_t *dmalist, int mapflags, int count) 1311{ 1312 int i, err; 1313 iflib_dma_info_t *dmaiter; 1314 1315 dmaiter = dmalist; 1316 for (i = 0; i < count; i++, dmaiter++) { 1317 if ((err = iflib_dma_alloc(ctx, sizes[i], *dmaiter, mapflags)) != 0) 1318 break; 1319 } 1320 if (err) 1321 iflib_dma_free_multi(dmalist, i); 1322 return (err); 1323} 1324 1325void 1326iflib_dma_free(iflib_dma_info_t dma) 1327{ 1328 if (dma->idi_tag == NULL) 1329 return; 1330 if (dma->idi_paddr != IF_BAD_DMA) { 1331 bus_dmamap_sync(dma->idi_tag, dma->idi_map, 1332 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1333 bus_dmamap_unload(dma->idi_tag, dma->idi_map); 1334 dma->idi_paddr = IF_BAD_DMA; 1335 } 1336 if (dma->idi_vaddr != NULL) { 1337 bus_dmamem_free(dma->idi_tag, dma->idi_vaddr, dma->idi_map); 1338 dma->idi_vaddr = NULL; 1339 } 1340 bus_dma_tag_destroy(dma->idi_tag); 1341 dma->idi_tag = NULL; 1342} 1343 1344void 1345iflib_dma_free_multi(iflib_dma_info_t *dmalist, int count) 1346{ 1347 int i; 1348 iflib_dma_info_t *dmaiter = dmalist; 1349 1350 for (i = 0; i < count; i++, dmaiter++) 1351 iflib_dma_free(*dmaiter); 1352} 1353 1354#ifdef EARLY_AP_STARTUP 1355static const int iflib_started = 1; 1356#else 1357/* 1358 * We used to abuse the smp_started flag to decide if the queues have been 1359 * fully initialized (by late taskqgroup_adjust() calls in a SYSINIT()). 1360 * That gave bad races, since the SYSINIT() runs strictly after smp_started 1361 * is set. Run a SYSINIT() strictly after that to just set a usable 1362 * completion flag. 1363 */ 1364 1365static int iflib_started; 1366 1367static void 1368iflib_record_started(void *arg) 1369{ 1370 iflib_started = 1; 1371} 1372 1373SYSINIT(iflib_record_started, SI_SUB_SMP + 1, SI_ORDER_FIRST, 1374 iflib_record_started, NULL); 1375#endif 1376 1377static int 1378iflib_fast_intr(void *arg) 1379{ 1380 iflib_filter_info_t info = arg; 1381 struct grouptask *gtask = info->ifi_task; 1382 if (!iflib_started) 1383 return (FILTER_HANDLED); 1384 1385 DBG_COUNTER_INC(fast_intrs); 1386 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1387 return (FILTER_HANDLED); 1388 1389 GROUPTASK_ENQUEUE(gtask); 1390 return (FILTER_HANDLED); 1391} 1392 1393static int 1394iflib_fast_intr_rxtx(void *arg) 1395{ 1396 iflib_filter_info_t info = arg; 1397 struct grouptask *gtask = info->ifi_task; 1398 iflib_rxq_t rxq = (iflib_rxq_t)info->ifi_ctx; 1399 if_ctx_t ctx; 1400 int i, cidx; 1401 1402 if (!iflib_started) 1403 return (FILTER_HANDLED); 1404 1405 DBG_COUNTER_INC(fast_intrs); 1406 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1407 return (FILTER_HANDLED); 1408 1409 for (i = 0; i < rxq->ifr_ntxqirq; i++) { 1410 qidx_t txqid = rxq->ifr_txqid[i]; 1411 1412 ctx = rxq->ifr_ctx; 1413 1414 if (!ctx->isc_txd_credits_update(ctx->ifc_softc, txqid, false)) { 1415 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txqid); 1416 continue; 1417 } 1418 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 1419 } 1420 if (ctx->ifc_sctx->isc_flags & IFLIB_HAS_RXCQ) 1421 cidx = rxq->ifr_cq_cidx; 1422 else 1423 cidx = rxq->ifr_fl[0].ifl_cidx; 1424 if (iflib_rxd_avail(ctx, rxq, cidx, 1)) 1425 GROUPTASK_ENQUEUE(gtask); 1426 else 1427 IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 1428 return (FILTER_HANDLED); 1429} 1430 1431 1432static int 1433iflib_fast_intr_ctx(void *arg) 1434{ 1435 iflib_filter_info_t info = arg; 1436 struct grouptask *gtask = info->ifi_task; 1437 1438 if (!iflib_started) 1439 return (FILTER_HANDLED); 1440 1441 DBG_COUNTER_INC(fast_intrs); 1442 if (info->ifi_filter != NULL && info->ifi_filter(info->ifi_filter_arg) == FILTER_HANDLED) 1443 return (FILTER_HANDLED); 1444 1445 GROUPTASK_ENQUEUE(gtask); 1446 return (FILTER_HANDLED); 1447} 1448 1449static int 1450_iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 1451 driver_filter_t filter, driver_intr_t handler, void *arg, 1452 char *name) 1453{ 1454 int rc, flags; 1455 struct resource *res; 1456 void *tag = NULL; 1457 device_t dev = ctx->ifc_dev; 1458 1459 flags = RF_ACTIVE; 1460 if (ctx->ifc_flags & IFC_LEGACY) 1461 flags |= RF_SHAREABLE; 1462 MPASS(rid < 512); 1463 irq->ii_rid = rid; 1464 res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq->ii_rid, flags); 1465 if (res == NULL) { 1466 device_printf(dev, 1467 "failed to allocate IRQ for rid %d, name %s.\n", rid, name); 1468 return (ENOMEM); 1469 } 1470 irq->ii_res = res; 1471 KASSERT(filter == NULL || handler == NULL, ("filter and handler can't both be non-NULL")); 1472 rc = bus_setup_intr(dev, res, INTR_MPSAFE | INTR_TYPE_NET, 1473 filter, handler, arg, &tag); 1474 if (rc != 0) { 1475 device_printf(dev, 1476 "failed to setup interrupt for rid %d, name %s: %d\n", 1477 rid, name ? name : "unknown", rc); 1478 return (rc); 1479 } else if (name) 1480 bus_describe_intr(dev, res, tag, "%s", name); 1481 1482 irq->ii_tag = tag; 1483 return (0); 1484} 1485 1486 1487/********************************************************************* 1488 * 1489 * Allocate memory for tx_buffer structures. The tx_buffer stores all 1490 * the information needed to transmit a packet on the wire. This is 1491 * called only once at attach, setup is done every reset. 1492 * 1493 **********************************************************************/ 1494 1495static int 1496iflib_txsd_alloc(iflib_txq_t txq) 1497{ 1498 if_ctx_t ctx = txq->ift_ctx; 1499 if_shared_ctx_t sctx = ctx->ifc_sctx; 1500 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1501 device_t dev = ctx->ifc_dev; 1502 int err, nsegments, ntsosegments; 1503 1504 nsegments = scctx->isc_tx_nsegments; 1505 ntsosegments = scctx->isc_tx_tso_segments_max; 1506 MPASS(scctx->isc_ntxd[0] > 0); 1507 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0); 1508 MPASS(nsegments > 0); 1509 MPASS(ntsosegments > 0); 1510 /* 1511 * Setup DMA descriptor areas. 1512 */ 1513 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1514 1, 0, /* alignment, bounds */ 1515 BUS_SPACE_MAXADDR, /* lowaddr */ 1516 BUS_SPACE_MAXADDR, /* highaddr */ 1517 NULL, NULL, /* filter, filterarg */ 1518 sctx->isc_tx_maxsize, /* maxsize */ 1519 nsegments, /* nsegments */ 1520 sctx->isc_tx_maxsegsize, /* maxsegsize */ 1521 0, /* flags */ 1522 NULL, /* lockfunc */ 1523 NULL, /* lockfuncarg */ 1524 &txq->ift_desc_tag))) { 1525 device_printf(dev,"Unable to allocate TX DMA tag: %d\n", err); 1526 device_printf(dev,"maxsize: %ju nsegments: %d maxsegsize: %ju\n", 1527 (uintmax_t)sctx->isc_tx_maxsize, nsegments, (uintmax_t)sctx->isc_tx_maxsegsize); 1528 goto fail; 1529 } 1530 if ((err = bus_dma_tag_create(bus_get_dma_tag(dev), 1531 1, 0, /* alignment, bounds */ 1532 BUS_SPACE_MAXADDR, /* lowaddr */ 1533 BUS_SPACE_MAXADDR, /* highaddr */ 1534 NULL, NULL, /* filter, filterarg */ 1535 scctx->isc_tx_tso_size_max, /* maxsize */ 1536 ntsosegments, /* nsegments */ 1537 scctx->isc_tx_tso_segsize_max, /* maxsegsize */ 1538 0, /* flags */ 1539 NULL, /* lockfunc */ 1540 NULL, /* lockfuncarg */ 1541 &txq->ift_tso_desc_tag))) { 1542 device_printf(dev,"Unable to allocate TX TSO DMA tag: %d\n", err); 1543 1544 goto fail; 1545 } 1546 if (!(txq->ift_sds.ifsd_flags = 1547 (uint8_t *) malloc(sizeof(uint8_t) * 1548 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1549 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1550 err = ENOMEM; 1551 goto fail; 1552 } 1553 if (!(txq->ift_sds.ifsd_m = 1554 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1555 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1556 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1557 err = ENOMEM; 1558 goto fail; 1559 } 1560 1561 /* Create the descriptor buffer dma maps */ 1562#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1563 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1564 return (0); 1565 1566 if (!(txq->ift_sds.ifsd_map = 1567 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1568 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1569 err = ENOMEM; 1570 goto fail; 1571 } 1572 1573 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) { 1574 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]); 1575 if (err != 0) { 1576 device_printf(dev, "Unable to create TX DMA map\n"); 1577 goto fail; 1578 } 1579 } 1580#endif 1581 return (0); 1582fail: 1583 /* We free all, it handles case where we are in the middle */ 1584 iflib_tx_structures_free(ctx); 1585 return (err); 1586} 1587 1588static void 1589iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i) 1590{ 1591 bus_dmamap_t map; 1592 1593 map = NULL; 1594 if (txq->ift_sds.ifsd_map != NULL) 1595 map = txq->ift_sds.ifsd_map[i]; 1596 if (map != NULL) { 1597 bus_dmamap_unload(txq->ift_desc_tag, map); 1598 bus_dmamap_destroy(txq->ift_desc_tag, map); 1599 txq->ift_sds.ifsd_map[i] = NULL; 1600 } 1601} 1602 1603static void 1604iflib_txq_destroy(iflib_txq_t txq) 1605{ 1606 if_ctx_t ctx = txq->ift_ctx; 1607 1608 for (int i = 0; i < txq->ift_size; i++) 1609 iflib_txsd_destroy(ctx, txq, i); 1610 if (txq->ift_sds.ifsd_map != NULL) { 1611 free(txq->ift_sds.ifsd_map, M_IFLIB); 1612 txq->ift_sds.ifsd_map = NULL; 1613 } 1614 if (txq->ift_sds.ifsd_m != NULL) { 1615 free(txq->ift_sds.ifsd_m, M_IFLIB); 1616 txq->ift_sds.ifsd_m = NULL; 1617 } 1618 if (txq->ift_sds.ifsd_flags != NULL) { 1619 free(txq->ift_sds.ifsd_flags, M_IFLIB); 1620 txq->ift_sds.ifsd_flags = NULL; 1621 } 1622 if (txq->ift_desc_tag != NULL) { 1623 bus_dma_tag_destroy(txq->ift_desc_tag); 1624 txq->ift_desc_tag = NULL; 1625 } 1626 if (txq->ift_tso_desc_tag != NULL) { 1627 bus_dma_tag_destroy(txq->ift_tso_desc_tag); 1628 txq->ift_tso_desc_tag = NULL; 1629 } 1630} 1631 1632static void 1633iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i) 1634{ 1635 struct mbuf **mp; 1636 1637 mp = &txq->ift_sds.ifsd_m[i]; 1638 if (*mp == NULL) 1639 return; 1640 1641 if (txq->ift_sds.ifsd_map != NULL) { 1642 bus_dmamap_sync(txq->ift_desc_tag, 1643 txq->ift_sds.ifsd_map[i], 1644 BUS_DMASYNC_POSTWRITE); 1645 bus_dmamap_unload(txq->ift_desc_tag, 1646 txq->ift_sds.ifsd_map[i]); 1647 } 1648 m_free(*mp); 1649 DBG_COUNTER_INC(tx_frees); 1650 *mp = NULL; 1651} 1652 1653static int 1654iflib_txq_setup(iflib_txq_t txq) 1655{ 1656 if_ctx_t ctx = txq->ift_ctx; 1657 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1658 iflib_dma_info_t di; 1659 int i; 1660 1661 /* Set number of descriptors available */ 1662 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 1663 /* XXX make configurable */ 1664 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ; 1665 1666 /* Reset indices */ 1667 txq->ift_cidx_processed = 0; 1668 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0; 1669 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset]; 1670 1671 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1672 bzero((void *)di->idi_vaddr, di->idi_size); 1673 1674 IFDI_TXQ_SETUP(ctx, txq->ift_id); 1675 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++) 1676 bus_dmamap_sync(di->idi_tag, di->idi_map, 1677 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1678 return (0); 1679} 1680 1681/********************************************************************* 1682 * 1683 * Allocate memory for rx_buffer structures. Since we use one 1684 * rx_buffer per received packet, the maximum number of rx_buffer's 1685 * that we'll need is equal to the number of receive descriptors 1686 * that we've allocated. 1687 * 1688 **********************************************************************/ 1689static int 1690iflib_rxsd_alloc(iflib_rxq_t rxq) 1691{ 1692 if_ctx_t ctx = rxq->ifr_ctx; 1693 if_shared_ctx_t sctx = ctx->ifc_sctx; 1694 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 1695 device_t dev = ctx->ifc_dev; 1696 iflib_fl_t fl; 1697 int err; 1698 1699 MPASS(scctx->isc_nrxd[0] > 0); 1700 MPASS(scctx->isc_nrxd[rxq->ifr_fl_offset] > 0); 1701 1702 fl = rxq->ifr_fl; 1703 for (int i = 0; i < rxq->ifr_nfl; i++, fl++) { 1704 fl->ifl_size = scctx->isc_nrxd[rxq->ifr_fl_offset]; /* this isn't necessarily the same */ 1705 err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1706 1, 0, /* alignment, bounds */ 1707 BUS_SPACE_MAXADDR, /* lowaddr */ 1708 BUS_SPACE_MAXADDR, /* highaddr */ 1709 NULL, NULL, /* filter, filterarg */ 1710 sctx->isc_rx_maxsize, /* maxsize */ 1711 sctx->isc_rx_nsegments, /* nsegments */ 1712 sctx->isc_rx_maxsegsize, /* maxsegsize */ 1713 0, /* flags */ 1714 NULL, /* lockfunc */ 1715 NULL, /* lockarg */ 1716 &fl->ifl_desc_tag); 1717 if (err) { 1718 device_printf(dev, "%s: bus_dma_tag_create failed %d\n", 1719 __func__, err); 1720 goto fail; 1721 } 1722 if (!(fl->ifl_sds.ifsd_flags = 1723 (uint8_t *) malloc(sizeof(uint8_t) * 1724 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1725 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1726 err = ENOMEM; 1727 goto fail; 1728 } 1729 if (!(fl->ifl_sds.ifsd_m = 1730 (struct mbuf **) malloc(sizeof(struct mbuf *) * 1731 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1732 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1733 err = ENOMEM; 1734 goto fail; 1735 } 1736 if (!(fl->ifl_sds.ifsd_cl = 1737 (caddr_t *) malloc(sizeof(caddr_t) * 1738 scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1739 device_printf(dev, "Unable to allocate tx_buffer memory\n"); 1740 err = ENOMEM; 1741 goto fail; 1742 } 1743 1744 /* Create the descriptor buffer dma maps */ 1745#if defined(ACPI_DMAR) || (! (defined(__i386__) || defined(__amd64__))) 1746 if ((ctx->ifc_flags & IFC_DMAR) == 0) 1747 continue; 1748 1749 if (!(fl->ifl_sds.ifsd_map = 1750 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_nrxd[rxq->ifr_fl_offset], M_IFLIB, M_NOWAIT | M_ZERO))) { 1751 device_printf(dev, "Unable to allocate tx_buffer map memory\n"); 1752 err = ENOMEM; 1753 goto fail; 1754 } 1755 1756 for (int i = 0; i < scctx->isc_nrxd[rxq->ifr_fl_offset]; i++) { 1757 err = bus_dmamap_create(fl->ifl_desc_tag, 0, &fl->ifl_sds.ifsd_map[i]); 1758 if (err != 0) { 1759 device_printf(dev, "Unable to create RX buffer DMA map\n"); 1760 goto fail; 1761 } 1762 } 1763#endif 1764 } 1765 return (0); 1766 1767fail: 1768 iflib_rx_structures_free(ctx); 1769 return (err); 1770} 1771 1772 1773/* 1774 * Internal service routines 1775 */ 1776 1777struct rxq_refill_cb_arg { 1778 int error; 1779 bus_dma_segment_t seg; 1780 int nseg; 1781}; 1782 1783static void 1784_rxq_refill_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 1785{ 1786 struct rxq_refill_cb_arg *cb_arg = arg; 1787 1788 cb_arg->error = error; 1789 cb_arg->seg = segs[0]; 1790 cb_arg->nseg = nseg; 1791} 1792 1793 1794#ifdef ACPI_DMAR 1795#define IS_DMAR(ctx) (ctx->ifc_flags & IFC_DMAR) 1796#else 1797#define IS_DMAR(ctx) (0) 1798#endif 1799 1800/** 1801 * rxq_refill - refill an rxq free-buffer list 1802 * @ctx: the iflib context 1803 * @rxq: the free-list to refill 1804 * @n: the number of new buffers to allocate 1805 * 1806 * (Re)populate an rxq free-buffer list with up to @n new packet buffers. 1807 * The caller must assure that @n does not exceed the queue's capacity. 1808 */ 1809static void 1810_iflib_fl_refill(if_ctx_t ctx, iflib_fl_t fl, int count) 1811{ 1812 struct mbuf *m; 1813 int idx, frag_idx = fl->ifl_fragidx; 1814 int pidx = fl->ifl_pidx; 1815 caddr_t cl, *sd_cl; 1816 struct mbuf **sd_m; 1817 uint8_t *sd_flags; 1818 struct if_rxd_update iru; 1819 bus_dmamap_t *sd_map; 1820 int n, i = 0; 1821 uint64_t bus_addr; 1822 int err; 1823 qidx_t credits; 1824 1825 sd_m = fl->ifl_sds.ifsd_m; 1826 sd_map = fl->ifl_sds.ifsd_map; 1827 sd_cl = fl->ifl_sds.ifsd_cl; 1828 sd_flags = fl->ifl_sds.ifsd_flags; 1829 idx = pidx; 1830 credits = fl->ifl_credits; 1831 1832 n = count; 1833 MPASS(n > 0); 1834 MPASS(credits + n <= fl->ifl_size); 1835 1836 if (pidx < fl->ifl_cidx) 1837 MPASS(pidx + n <= fl->ifl_cidx); 1838 if (pidx == fl->ifl_cidx && (credits < fl->ifl_size)) 1839 MPASS(fl->ifl_gen == 0); 1840 if (pidx > fl->ifl_cidx) 1841 MPASS(n <= fl->ifl_size - pidx + fl->ifl_cidx); 1842 1843 DBG_COUNTER_INC(fl_refills); 1844 if (n > 8) 1845 DBG_COUNTER_INC(fl_refills_large); 1846 iru_init(&iru, fl->ifl_rxq, fl->ifl_id); 1847 while (n--) { 1848 /* 1849 * We allocate an uninitialized mbuf + cluster, mbuf is 1850 * initialized after rx. 1851 * 1852 * If the cluster is still set then we know a minimum sized packet was received 1853 */ 1854 bit_ffc_at(fl->ifl_rx_bitmap, frag_idx, fl->ifl_size, &frag_idx); 1855 if ((frag_idx < 0) || (frag_idx >= fl->ifl_size)) 1856 bit_ffc(fl->ifl_rx_bitmap, fl->ifl_size, &frag_idx); 1857 if ((cl = sd_cl[frag_idx]) == NULL) { 1858 if ((cl = sd_cl[frag_idx] = m_cljget(NULL, M_NOWAIT, fl->ifl_buf_size)) == NULL) 1859 break; 1860#if MEMORY_LOGGING 1861 fl->ifl_cl_enqueued++; 1862#endif 1863 } 1864 if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) { 1865 break; 1866 } 1867#if MEMORY_LOGGING 1868 fl->ifl_m_enqueued++; 1869#endif 1870 1871 DBG_COUNTER_INC(rx_allocs); 1872#if defined(__i386__) || defined(__amd64__) 1873 if (!IS_DMAR(ctx)) { 1874 bus_addr = pmap_kextract((vm_offset_t)cl); 1875 } else 1876#endif 1877 { 1878 struct rxq_refill_cb_arg cb_arg; 1879 iflib_rxq_t q; 1880 1881 cb_arg.error = 0; 1882 q = fl->ifl_rxq; 1883 MPASS(sd_map != NULL); 1884 MPASS(sd_map[frag_idx] != NULL); 1885 err = bus_dmamap_load(fl->ifl_desc_tag, sd_map[frag_idx], 1886 cl, fl->ifl_buf_size, _rxq_refill_cb, &cb_arg, 0); 1887 bus_dmamap_sync(fl->ifl_desc_tag, sd_map[frag_idx], 1888 BUS_DMASYNC_PREREAD); 1889 1890 if (err != 0 || cb_arg.error) { 1891 /* 1892 * !zone_pack ? 1893 */ 1894 if (fl->ifl_zone == zone_pack) 1895 uma_zfree(fl->ifl_zone, cl); 1896 m_free(m); 1897 n = 0; 1898 goto done; 1899 } 1900 bus_addr = cb_arg.seg.ds_addr; 1901 } 1902 bit_set(fl->ifl_rx_bitmap, frag_idx); 1903 sd_flags[frag_idx] |= RX_SW_DESC_INUSE; 1904 1905 MPASS(sd_m[frag_idx] == NULL); 1906 sd_cl[frag_idx] = cl; 1907 sd_m[frag_idx] = m; 1908 fl->ifl_rxd_idxs[i] = frag_idx; 1909 fl->ifl_bus_addrs[i] = bus_addr; 1910 fl->ifl_vm_addrs[i] = cl; 1911 credits++; 1912 i++; 1913 MPASS(credits <= fl->ifl_size); 1914 if (++idx == fl->ifl_size) { 1915 fl->ifl_gen = 1; 1916 idx = 0; 1917 } 1918 if (n == 0 || i == IFLIB_MAX_RX_REFRESH) { 1919 iru.iru_pidx = pidx; 1920 iru.iru_count = i; 1921 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 1922 i = 0; 1923 pidx = idx; 1924 fl->ifl_pidx = idx; 1925 fl->ifl_credits = credits; 1926 } 1927 1928 } 1929done: 1930 if (i) { 1931 iru.iru_pidx = pidx; 1932 iru.iru_count = i; 1933 ctx->isc_rxd_refill(ctx->ifc_softc, &iru); 1934 fl->ifl_pidx = idx; 1935 fl->ifl_credits = credits; 1936 } 1937 DBG_COUNTER_INC(rxd_flush); 1938 if (fl->ifl_pidx == 0) 1939 pidx = fl->ifl_size - 1; 1940 else 1941 pidx = fl->ifl_pidx - 1; 1942 1943 if (sd_map) 1944 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 1945 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1946 ctx->isc_rxd_flush(ctx->ifc_softc, fl->ifl_rxq->ifr_id, fl->ifl_id, pidx); 1947 fl->ifl_fragidx = frag_idx; 1948} 1949 1950static __inline void 1951__iflib_fl_refill_lt(if_ctx_t ctx, iflib_fl_t fl, int max) 1952{ 1953 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */ 1954 int32_t reclaimable = fl->ifl_size - fl->ifl_credits - 1; 1955#ifdef INVARIANTS 1956 int32_t delta = fl->ifl_size - get_inuse(fl->ifl_size, fl->ifl_cidx, fl->ifl_pidx, fl->ifl_gen) - 1; 1957#endif 1958 1959 MPASS(fl->ifl_credits <= fl->ifl_size); 1960 MPASS(reclaimable == delta); 1961 1962 if (reclaimable > 0) 1963 _iflib_fl_refill(ctx, fl, min(max, reclaimable)); 1964} 1965 1966uint8_t 1967iflib_in_detach(if_ctx_t ctx) 1968{ 1969 bool in_detach; 1970 STATE_LOCK(ctx); 1971 in_detach = !!(ctx->ifc_flags & IFC_IN_DETACH); 1972 STATE_UNLOCK(ctx); 1973 return (in_detach); 1974} 1975 1976static void 1977iflib_fl_bufs_free(iflib_fl_t fl) 1978{ 1979 iflib_dma_info_t idi = fl->ifl_ifdi; 1980 uint32_t i; 1981 1982 for (i = 0; i < fl->ifl_size; i++) { 1983 struct mbuf **sd_m = &fl->ifl_sds.ifsd_m[i]; 1984 uint8_t *sd_flags = &fl->ifl_sds.ifsd_flags[i]; 1985 caddr_t *sd_cl = &fl->ifl_sds.ifsd_cl[i]; 1986 1987 if (*sd_flags & RX_SW_DESC_INUSE) { 1988 if (fl->ifl_sds.ifsd_map != NULL) { 1989 bus_dmamap_t sd_map = fl->ifl_sds.ifsd_map[i]; 1990 bus_dmamap_unload(fl->ifl_desc_tag, sd_map); 1991 // XXX: Should this get moved out? 1992 if (iflib_in_detach(fl->ifl_rxq->ifr_ctx)) 1993 bus_dmamap_destroy(fl->ifl_desc_tag, sd_map); 1994 } 1995 if (*sd_m != NULL) { 1996 m_init(*sd_m, M_NOWAIT, MT_DATA, 0); 1997 uma_zfree(zone_mbuf, *sd_m); 1998 } 1999 if (*sd_cl != NULL) 2000 uma_zfree(fl->ifl_zone, *sd_cl); 2001 *sd_flags = 0; 2002 } else { 2003 MPASS(*sd_cl == NULL); 2004 MPASS(*sd_m == NULL); 2005 } 2006#if MEMORY_LOGGING 2007 fl->ifl_m_dequeued++; 2008 fl->ifl_cl_dequeued++; 2009#endif 2010 *sd_cl = NULL; 2011 *sd_m = NULL; 2012 } 2013#ifdef INVARIANTS 2014 for (i = 0; i < fl->ifl_size; i++) { 2015 MPASS(fl->ifl_sds.ifsd_flags[i] == 0); 2016 MPASS(fl->ifl_sds.ifsd_cl[i] == NULL); 2017 MPASS(fl->ifl_sds.ifsd_m[i] == NULL); 2018 } 2019#endif 2020 /* 2021 * Reset free list values 2022 */ 2023 fl->ifl_credits = fl->ifl_cidx = fl->ifl_pidx = fl->ifl_gen = fl->ifl_fragidx = 0; 2024 bzero(idi->idi_vaddr, idi->idi_size); 2025} 2026 2027/********************************************************************* 2028 * 2029 * Initialize a receive ring and its buffers. 2030 * 2031 **********************************************************************/ 2032static int 2033iflib_fl_setup(iflib_fl_t fl) 2034{ 2035 iflib_rxq_t rxq = fl->ifl_rxq; 2036 if_ctx_t ctx = rxq->ifr_ctx; 2037 2038 bit_nclear(fl->ifl_rx_bitmap, 0, fl->ifl_size - 1); 2039 /* 2040 ** Free current RX buffer structs and their mbufs 2041 */ 2042 iflib_fl_bufs_free(fl); 2043 /* Now replenish the mbufs */ 2044 MPASS(fl->ifl_credits == 0); 2045 fl->ifl_buf_size = ctx->ifc_rx_mbuf_sz; 2046 if (fl->ifl_buf_size > ctx->ifc_max_fl_buf_size) 2047 ctx->ifc_max_fl_buf_size = fl->ifl_buf_size; 2048 fl->ifl_cltype = m_gettype(fl->ifl_buf_size); 2049 fl->ifl_zone = m_getzone(fl->ifl_buf_size); 2050 2051 2052 /* avoid pre-allocating zillions of clusters to an idle card 2053 * potentially speeding up attach 2054 */ 2055 _iflib_fl_refill(ctx, fl, min(128, fl->ifl_size)); 2056 MPASS(min(128, fl->ifl_size) == fl->ifl_credits); 2057 if (min(128, fl->ifl_size) != fl->ifl_credits) 2058 return (ENOBUFS); 2059 /* 2060 * handle failure 2061 */ 2062 MPASS(rxq != NULL); 2063 MPASS(fl->ifl_ifdi != NULL); 2064 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2065 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2066 return (0); 2067} 2068 2069/********************************************************************* 2070 * 2071 * Free receive ring data structures 2072 * 2073 **********************************************************************/ 2074static void 2075iflib_rx_sds_free(iflib_rxq_t rxq) 2076{ 2077 iflib_fl_t fl; 2078 int i; 2079 2080 if (rxq->ifr_fl != NULL) { 2081 for (i = 0; i < rxq->ifr_nfl; i++) { 2082 fl = &rxq->ifr_fl[i]; 2083 if (fl->ifl_desc_tag != NULL) { 2084 bus_dma_tag_destroy(fl->ifl_desc_tag); 2085 fl->ifl_desc_tag = NULL; 2086 } 2087 free(fl->ifl_sds.ifsd_m, M_IFLIB); 2088 free(fl->ifl_sds.ifsd_cl, M_IFLIB); 2089 /* XXX destroy maps first */ 2090 free(fl->ifl_sds.ifsd_map, M_IFLIB); 2091 fl->ifl_sds.ifsd_m = NULL; 2092 fl->ifl_sds.ifsd_cl = NULL; 2093 fl->ifl_sds.ifsd_map = NULL; 2094 } 2095 free(rxq->ifr_fl, M_IFLIB); 2096 rxq->ifr_fl = NULL; 2097 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 2098 } 2099} 2100 2101/* 2102 * MI independent logic 2103 * 2104 */ 2105static void 2106iflib_timer(void *arg) 2107{ 2108 iflib_txq_t txq = arg; 2109 if_ctx_t ctx = txq->ift_ctx; 2110 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2111 2112 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 2113 return; 2114 /* 2115 ** Check on the state of the TX queue(s), this 2116 ** can be done without the lock because its RO 2117 ** and the HUNG state will be static if set. 2118 */ 2119 IFDI_TIMER(ctx, txq->ift_id); 2120 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) && 2121 ((txq->ift_cleaned_prev == txq->ift_cleaned) || 2122 (sctx->isc_pause_frames == 0))) 2123 goto hung; 2124 2125 if (ifmp_ring_is_stalled(txq->ift_br)) 2126 txq->ift_qstatus = IFLIB_QUEUE_HUNG; 2127 txq->ift_cleaned_prev = txq->ift_cleaned; 2128 /* handle any laggards */ 2129 if (txq->ift_db_pending) 2130 GROUPTASK_ENQUEUE(&txq->ift_task); 2131 2132 sctx->isc_pause_frames = 0; 2133 if (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) 2134 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); 2135 return; 2136 hung: 2137 device_printf(ctx->ifc_dev, "TX(%d) desc avail = %d, pidx = %d\n", 2138 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx); 2139 STATE_LOCK(ctx); 2140 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2141 ctx->ifc_flags |= (IFC_DO_WATCHDOG|IFC_DO_RESET); 2142 iflib_admin_intr_deferred(ctx); 2143 STATE_UNLOCK(ctx); 2144} 2145 2146static void 2147iflib_calc_rx_mbuf_sz(if_ctx_t ctx) 2148{ 2149 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2150 2151 /* 2152 * XXX don't set the max_frame_size to larger 2153 * than the hardware can handle 2154 */ 2155 if (sctx->isc_max_frame_size <= MCLBYTES) 2156 ctx->ifc_rx_mbuf_sz = MCLBYTES; 2157 else 2158 ctx->ifc_rx_mbuf_sz = MJUMPAGESIZE; 2159} 2160 2161uint32_t 2162iflib_get_rx_mbuf_sz(if_ctx_t ctx) 2163{ 2164 return (ctx->ifc_rx_mbuf_sz); 2165} 2166 2167static void 2168iflib_init_locked(if_ctx_t ctx) 2169{ 2170 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 2171 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2172 if_t ifp = ctx->ifc_ifp; 2173 iflib_fl_t fl; 2174 iflib_txq_t txq; 2175 iflib_rxq_t rxq; 2176 int i, j, tx_ip_csum_flags, tx_ip6_csum_flags; 2177 2178 2179 if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2180 IFDI_INTR_DISABLE(ctx); 2181 2182 tx_ip_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_SCTP); 2183 tx_ip6_csum_flags = scctx->isc_tx_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_UDP | CSUM_IP6_SCTP); 2184 /* Set hardware offload abilities */ 2185 if_clearhwassist(ifp); 2186 if (if_getcapenable(ifp) & IFCAP_TXCSUM) 2187 if_sethwassistbits(ifp, tx_ip_csum_flags, 0); 2188 if (if_getcapenable(ifp) & IFCAP_TXCSUM_IPV6) 2189 if_sethwassistbits(ifp, tx_ip6_csum_flags, 0); 2190 if (if_getcapenable(ifp) & IFCAP_TSO4) 2191 if_sethwassistbits(ifp, CSUM_IP_TSO, 0); 2192 if (if_getcapenable(ifp) & IFCAP_TSO6) 2193 if_sethwassistbits(ifp, CSUM_IP6_TSO, 0); 2194 2195 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) { 2196 CALLOUT_LOCK(txq); 2197 callout_stop(&txq->ift_timer); 2198 CALLOUT_UNLOCK(txq); 2199 iflib_netmap_txq_init(ctx, txq); 2200 } 2201 2202 /* 2203 * Calculate a suitable Rx mbuf size prior to calling IFDI_INIT, so 2204 * that drivers can use the value when setting up the hardware receive 2205 * buffers. 2206 */ 2207 iflib_calc_rx_mbuf_sz(ctx); 2208 2209#ifdef INVARIANTS 2210 i = if_getdrvflags(ifp); 2211#endif 2212 IFDI_INIT(ctx); 2213 MPASS(if_getdrvflags(ifp) == i); 2214 for (i = 0, rxq = ctx->ifc_rxqs; i < sctx->isc_nrxqsets; i++, rxq++) { 2215 /* XXX this should really be done on a per-queue basis */ 2216 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 2217 MPASS(rxq->ifr_id == i); 2218 iflib_netmap_rxq_init(ctx, rxq); 2219 continue; 2220 } 2221 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 2222 if (iflib_fl_setup(fl)) { 2223 device_printf(ctx->ifc_dev, "freelist setup failed - check cluster settings\n"); 2224 goto done; 2225 } 2226 } 2227 } 2228done: 2229 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); 2230 IFDI_INTR_ENABLE(ctx); 2231 txq = ctx->ifc_txqs; 2232 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++) 2233 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, 2234 txq->ift_timer.c_cpu); 2235} 2236 2237static int 2238iflib_media_change(if_t ifp) 2239{ 2240 if_ctx_t ctx = if_getsoftc(ifp); 2241 int err; 2242 2243 CTX_LOCK(ctx); 2244 if ((err = IFDI_MEDIA_CHANGE(ctx)) == 0) 2245 iflib_init_locked(ctx); 2246 CTX_UNLOCK(ctx); 2247 return (err); 2248} 2249 2250static void 2251iflib_media_status(if_t ifp, struct ifmediareq *ifmr) 2252{ 2253 if_ctx_t ctx = if_getsoftc(ifp); 2254 2255 CTX_LOCK(ctx); 2256 IFDI_UPDATE_ADMIN_STATUS(ctx); 2257 IFDI_MEDIA_STATUS(ctx, ifmr); 2258 CTX_UNLOCK(ctx); 2259} 2260 2261static void 2262iflib_stop(if_ctx_t ctx) 2263{ 2264 iflib_txq_t txq = ctx->ifc_txqs; 2265 iflib_rxq_t rxq = ctx->ifc_rxqs; 2266 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2267 iflib_dma_info_t di; 2268 iflib_fl_t fl; 2269 int i, j; 2270 2271 /* Tell the stack that the interface is no longer active */ 2272 if_setdrvflagbits(ctx->ifc_ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); 2273 2274 IFDI_INTR_DISABLE(ctx); 2275 DELAY(1000); 2276 IFDI_STOP(ctx); 2277 DELAY(1000); 2278 2279 iflib_debug_reset(); 2280 /* Wait for current tx queue users to exit to disarm watchdog timer. */ 2281 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) { 2282 /* make sure all transmitters have completed before proceeding XXX */ 2283 2284 CALLOUT_LOCK(txq); 2285 callout_stop(&txq->ift_timer); 2286 CALLOUT_UNLOCK(txq); 2287 2288 /* clean any enqueued buffers */ 2289 iflib_ifmp_purge(txq); 2290 /* Free any existing tx buffers. */ 2291 for (j = 0; j < txq->ift_size; j++) { 2292 iflib_txsd_free(ctx, txq, j); 2293 } 2294 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0; 2295 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0; 2296 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0; 2297 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0; 2298 txq->ift_pullups = 0; 2299 ifmp_ring_reset_stats(txq->ift_br); 2300 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++) 2301 bzero((void *)di->idi_vaddr, di->idi_size); 2302 } 2303 for (i = 0; i < scctx->isc_nrxqsets; i++, rxq++) { 2304 /* make sure all transmitters have completed before proceeding XXX */ 2305 2306 for (j = 0, di = rxq->ifr_ifdi; j < rxq->ifr_nfl; j++, di++) 2307 bzero((void *)di->idi_vaddr, di->idi_size); 2308 /* also resets the free lists pidx/cidx */ 2309 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 2310 iflib_fl_bufs_free(fl); 2311 } 2312} 2313 2314static inline caddr_t 2315calc_next_rxd(iflib_fl_t fl, int cidx) 2316{ 2317 qidx_t size; 2318 int nrxd; 2319 caddr_t start, end, cur, next; 2320 2321 nrxd = fl->ifl_size; 2322 size = fl->ifl_rxd_size; 2323 start = fl->ifl_ifdi->idi_vaddr; 2324 2325 if (__predict_false(size == 0)) 2326 return (start); 2327 cur = start + size*cidx; 2328 end = start + size*nrxd; 2329 next = CACHE_PTR_NEXT(cur); 2330 return (next < end ? next : start); 2331} 2332 2333static inline void 2334prefetch_pkts(iflib_fl_t fl, int cidx) 2335{ 2336 int nextptr; 2337 int nrxd = fl->ifl_size; 2338 caddr_t next_rxd; 2339 2340 2341 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1); 2342 prefetch(&fl->ifl_sds.ifsd_m[nextptr]); 2343 prefetch(&fl->ifl_sds.ifsd_cl[nextptr]); 2344 next_rxd = calc_next_rxd(fl, cidx); 2345 prefetch(next_rxd); 2346 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]); 2347 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]); 2348 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]); 2349 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]); 2350 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]); 2351 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]); 2352 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]); 2353 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]); 2354} 2355 2356static void 2357rxd_frag_to_sd(iflib_rxq_t rxq, if_rxd_frag_t irf, int unload, if_rxsd_t sd) 2358{ 2359 int flid, cidx; 2360 bus_dmamap_t map; 2361 iflib_fl_t fl; 2362 iflib_dma_info_t di; 2363 int next; 2364 2365 map = NULL; 2366 flid = irf->irf_flid; 2367 cidx = irf->irf_idx; 2368 fl = &rxq->ifr_fl[flid]; 2369 sd->ifsd_fl = fl; 2370 sd->ifsd_cidx = cidx; 2371 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx]; 2372 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx]; 2373 fl->ifl_credits--; 2374#if MEMORY_LOGGING 2375 fl->ifl_m_dequeued++; 2376#endif 2377 if (rxq->ifr_ctx->ifc_flags & IFC_PREFETCH) 2378 prefetch_pkts(fl, cidx); 2379 if (fl->ifl_sds.ifsd_map != NULL) { 2380 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1); 2381 prefetch(&fl->ifl_sds.ifsd_map[next]); 2382 map = fl->ifl_sds.ifsd_map[cidx]; 2383 di = fl->ifl_ifdi; 2384 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1); 2385 prefetch(&fl->ifl_sds.ifsd_flags[next]); 2386 bus_dmamap_sync(di->idi_tag, di->idi_map, 2387 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2388 2389 /* not valid assert if bxe really does SGE from non-contiguous elements */ 2390 MPASS(fl->ifl_cidx == cidx); 2391 if (unload) 2392 bus_dmamap_unload(fl->ifl_desc_tag, map); 2393 } 2394 fl->ifl_cidx = (fl->ifl_cidx + 1) & (fl->ifl_size-1); 2395 if (__predict_false(fl->ifl_cidx == 0)) 2396 fl->ifl_gen = 0; 2397 if (map != NULL) 2398 bus_dmamap_sync(fl->ifl_ifdi->idi_tag, fl->ifl_ifdi->idi_map, 2399 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 2400 bit_clear(fl->ifl_rx_bitmap, cidx); 2401} 2402 2403static struct mbuf * 2404assemble_segments(iflib_rxq_t rxq, if_rxd_info_t ri, if_rxsd_t sd) 2405{ 2406 int i, padlen , flags; 2407 struct mbuf *m, *mh, *mt; 2408 caddr_t cl; 2409 2410 i = 0; 2411 mh = NULL; 2412 do { 2413 rxd_frag_to_sd(rxq, &ri->iri_frags[i], TRUE, sd); 2414 2415 MPASS(*sd->ifsd_cl != NULL); 2416 MPASS(*sd->ifsd_m != NULL); 2417 2418 /* Don't include zero-length frags */ 2419 if (ri->iri_frags[i].irf_len == 0) { 2420 /* XXX we can save the cluster here, but not the mbuf */ 2421 m_init(*sd->ifsd_m, M_NOWAIT, MT_DATA, 0); 2422 m_free(*sd->ifsd_m); 2423 *sd->ifsd_m = NULL; 2424 continue; 2425 } 2426 m = *sd->ifsd_m; 2427 *sd->ifsd_m = NULL; 2428 if (mh == NULL) { 2429 flags = M_PKTHDR|M_EXT; 2430 mh = mt = m; 2431 padlen = ri->iri_pad; 2432 } else { 2433 flags = M_EXT; 2434 mt->m_next = m; 2435 mt = m; 2436 /* assuming padding is only on the first fragment */ 2437 padlen = 0; 2438 } 2439 cl = *sd->ifsd_cl; 2440 *sd->ifsd_cl = NULL; 2441 2442 /* Can these two be made one ? */ 2443 m_init(m, M_NOWAIT, MT_DATA, flags); 2444 m_cljset(m, cl, sd->ifsd_fl->ifl_cltype); 2445 /* 2446 * These must follow m_init and m_cljset 2447 */ 2448 m->m_data += padlen; 2449 ri->iri_len -= padlen; 2450 m->m_len = ri->iri_frags[i].irf_len; 2451 } while (++i < ri->iri_nfrags); 2452 2453 return (mh); 2454} 2455 2456/* 2457 * Process one software descriptor 2458 */ 2459static struct mbuf * 2460iflib_rxd_pkt_get(iflib_rxq_t rxq, if_rxd_info_t ri) 2461{ 2462 struct if_rxsd sd; 2463 struct mbuf *m; 2464 2465 /* should I merge this back in now that the two paths are basically duplicated? */ 2466 if (ri->iri_nfrags == 1 && 2467 ri->iri_frags[0].irf_len <= MIN(IFLIB_RX_COPY_THRESH, MHLEN)) { 2468 rxd_frag_to_sd(rxq, &ri->iri_frags[0], FALSE, &sd); 2469 m = *sd.ifsd_m; 2470 *sd.ifsd_m = NULL; 2471 m_init(m, M_NOWAIT, MT_DATA, M_PKTHDR); 2472#ifndef __NO_STRICT_ALIGNMENT 2473 if (!IP_ALIGNED(m)) 2474 m->m_data += 2; 2475#endif 2476 memcpy(m->m_data, *sd.ifsd_cl, ri->iri_len); 2477 m->m_len = ri->iri_frags[0].irf_len; 2478 } else { 2479 m = assemble_segments(rxq, ri, &sd); 2480 } 2481 m->m_pkthdr.len = ri->iri_len; 2482 m->m_pkthdr.rcvif = ri->iri_ifp; 2483 m->m_flags |= ri->iri_flags; 2484 m->m_pkthdr.ether_vtag = ri->iri_vtag; 2485 m->m_pkthdr.flowid = ri->iri_flowid; 2486 M_HASHTYPE_SET(m, ri->iri_rsstype); 2487 m->m_pkthdr.csum_flags = ri->iri_csum_flags; 2488 m->m_pkthdr.csum_data = ri->iri_csum_data; 2489 return (m); 2490} 2491 2492#if defined(INET6) || defined(INET) 2493static void 2494iflib_get_ip_forwarding(struct lro_ctrl *lc, bool *v4, bool *v6) 2495{ 2496 CURVNET_SET(lc->ifp->if_vnet); 2497#if defined(INET6) 2498 *v6 = VNET(ip6_forwarding); 2499#endif 2500#if defined(INET) 2501 *v4 = VNET(ipforwarding); 2502#endif 2503 CURVNET_RESTORE(); 2504} 2505 2506/* 2507 * Returns true if it's possible this packet could be LROed. 2508 * if it returns false, it is guaranteed that tcp_lro_rx() 2509 * would not return zero. 2510 */ 2511static bool 2512iflib_check_lro_possible(struct mbuf *m, bool v4_forwarding, bool v6_forwarding) 2513{ 2514 struct ether_header *eh; 2515 uint16_t eh_type; 2516 2517 eh = mtod(m, struct ether_header *); 2518 eh_type = ntohs(eh->ether_type); 2519 switch (eh_type) { 2520#if defined(INET6) 2521 case ETHERTYPE_IPV6: 2522 return !v6_forwarding; 2523#endif 2524#if defined (INET) 2525 case ETHERTYPE_IP: 2526 return !v4_forwarding; 2527#endif 2528 } 2529 2530 return false; 2531} 2532#else 2533static void 2534iflib_get_ip_forwarding(struct lro_ctrl *lc __unused, bool *v4 __unused, bool *v6 __unused) 2535{ 2536} 2537#endif 2538 2539static bool 2540iflib_rxeof(iflib_rxq_t rxq, qidx_t budget) 2541{ 2542 if_ctx_t ctx = rxq->ifr_ctx; 2543 if_shared_ctx_t sctx = ctx->ifc_sctx; 2544 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 2545 int avail, i; 2546 qidx_t *cidxp; 2547 struct if_rxd_info ri; 2548 int err, budget_left, rx_bytes, rx_pkts; 2549 iflib_fl_t fl; 2550 struct ifnet *ifp; 2551 int lro_enabled; 2552 bool lro_possible = false; 2553 bool v4_forwarding, v6_forwarding; 2554 2555 /* 2556 * XXX early demux data packets so that if_input processing only handles 2557 * acks in interrupt context 2558 */ 2559 struct mbuf *m, *mh, *mt, *mf; 2560 2561 ifp = ctx->ifc_ifp; 2562 mh = mt = NULL; 2563 MPASS(budget > 0); 2564 rx_pkts = rx_bytes = 0; 2565 if (sctx->isc_flags & IFLIB_HAS_RXCQ) 2566 cidxp = &rxq->ifr_cq_cidx; 2567 else 2568 cidxp = &rxq->ifr_fl[0].ifl_cidx; 2569 if ((avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget)) == 0) { 2570 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2571 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2572 DBG_COUNTER_INC(rx_unavail); 2573 return (false); 2574 } 2575 2576 for (budget_left = budget; (budget_left > 0) && (avail > 0); budget_left--, avail--) { 2577 if (__predict_false(!CTX_ACTIVE(ctx))) { 2578 DBG_COUNTER_INC(rx_ctx_inactive); 2579 break; 2580 } 2581 /* 2582 * Reset client set fields to their default values 2583 */ 2584 rxd_info_zero(&ri); 2585 ri.iri_qsidx = rxq->ifr_id; 2586 ri.iri_cidx = *cidxp; 2587 ri.iri_ifp = ifp; 2588 ri.iri_frags = rxq->ifr_frags; 2589 err = ctx->isc_rxd_pkt_get(ctx->ifc_softc, &ri); 2590 2591 if (err) 2592 goto err; 2593 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 2594 *cidxp = ri.iri_cidx; 2595 /* Update our consumer index */ 2596 /* XXX NB: shurd - check if this is still safe */ 2597 while (rxq->ifr_cq_cidx >= scctx->isc_nrxd[0]) { 2598 rxq->ifr_cq_cidx -= scctx->isc_nrxd[0]; 2599 rxq->ifr_cq_gen = 0; 2600 } 2601 /* was this only a completion queue message? */ 2602 if (__predict_false(ri.iri_nfrags == 0)) 2603 continue; 2604 } 2605 MPASS(ri.iri_nfrags != 0); 2606 MPASS(ri.iri_len != 0); 2607 2608 /* will advance the cidx on the corresponding free lists */ 2609 m = iflib_rxd_pkt_get(rxq, &ri); 2610 if (avail == 0 && budget_left) 2611 avail = iflib_rxd_avail(ctx, rxq, *cidxp, budget_left); 2612 2613 if (__predict_false(m == NULL)) { 2614 DBG_COUNTER_INC(rx_mbuf_null); 2615 continue; 2616 } 2617 /* imm_pkt: -- cxgb */ 2618 if (mh == NULL) 2619 mh = mt = m; 2620 else { 2621 mt->m_nextpkt = m; 2622 mt = m; 2623 } 2624 } 2625 /* make sure that we can refill faster than drain */ 2626 for (i = 0, fl = &rxq->ifr_fl[0]; i < sctx->isc_nfl; i++, fl++) 2627 __iflib_fl_refill_lt(ctx, fl, budget + 8); 2628 2629 lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO); 2630 if (lro_enabled) 2631 iflib_get_ip_forwarding(&rxq->ifr_lc, &v4_forwarding, &v6_forwarding); 2632 mt = mf = NULL; 2633 while (mh != NULL) { 2634 m = mh; 2635 mh = mh->m_nextpkt; 2636 m->m_nextpkt = NULL; 2637#ifndef __NO_STRICT_ALIGNMENT 2638 if (!IP_ALIGNED(m) && (m = iflib_fixup_rx(m)) == NULL) 2639 continue; 2640#endif 2641 rx_bytes += m->m_pkthdr.len; 2642 rx_pkts++; 2643#if defined(INET6) || defined(INET) 2644 if (lro_enabled) { 2645 if (!lro_possible) { 2646 lro_possible = iflib_check_lro_possible(m, v4_forwarding, v6_forwarding); 2647 if (lro_possible && mf != NULL) { 2648 ifp->if_input(ifp, mf); 2649 DBG_COUNTER_INC(rx_if_input); 2650 mt = mf = NULL; 2651 } 2652 } 2653 if ((m->m_pkthdr.csum_flags & (CSUM_L4_CALC|CSUM_L4_VALID)) == 2654 (CSUM_L4_CALC|CSUM_L4_VALID)) { 2655 if (lro_possible && tcp_lro_rx(&rxq->ifr_lc, m, 0) == 0) 2656 continue; 2657 } 2658 } 2659#endif 2660 if (lro_possible) { 2661 ifp->if_input(ifp, m); 2662 DBG_COUNTER_INC(rx_if_input); 2663 continue; 2664 } 2665 2666 if (mf == NULL) 2667 mf = m; 2668 if (mt != NULL) 2669 mt->m_nextpkt = m; 2670 mt = m; 2671 } 2672 if (mf != NULL) { 2673 ifp->if_input(ifp, mf); 2674 DBG_COUNTER_INC(rx_if_input); 2675 } 2676 2677 if_inc_counter(ifp, IFCOUNTER_IBYTES, rx_bytes); 2678 if_inc_counter(ifp, IFCOUNTER_IPACKETS, rx_pkts); 2679 2680 /* 2681 * Flush any outstanding LRO work 2682 */ 2683#if defined(INET6) || defined(INET) 2684 tcp_lro_flush_all(&rxq->ifr_lc); 2685#endif 2686 if (avail) 2687 return true; 2688 return (iflib_rxd_avail(ctx, rxq, *cidxp, 1)); 2689err: 2690 STATE_LOCK(ctx); 2691 ctx->ifc_flags |= IFC_DO_RESET; 2692 iflib_admin_intr_deferred(ctx); 2693 STATE_UNLOCK(ctx); 2694 return (false); 2695} 2696 2697#define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1) 2698static inline qidx_t 2699txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use) 2700{ 2701 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2702 qidx_t minthresh = txq->ift_size / 8; 2703 if (in_use > 4*minthresh) 2704 return (notify_count); 2705 if (in_use > 2*minthresh) 2706 return (notify_count >> 1); 2707 if (in_use > minthresh) 2708 return (notify_count >> 3); 2709 return (0); 2710} 2711 2712static inline qidx_t 2713txq_max_rs_deferred(iflib_txq_t txq) 2714{ 2715 qidx_t notify_count = TXD_NOTIFY_COUNT(txq); 2716 qidx_t minthresh = txq->ift_size / 8; 2717 if (txq->ift_in_use > 4*minthresh) 2718 return (notify_count); 2719 if (txq->ift_in_use > 2*minthresh) 2720 return (notify_count >> 1); 2721 if (txq->ift_in_use > minthresh) 2722 return (notify_count >> 2); 2723 return (2); 2724} 2725 2726#define M_CSUM_FLAGS(m) ((m)->m_pkthdr.csum_flags) 2727#define M_HAS_VLANTAG(m) (m->m_flags & M_VLANTAG) 2728 2729#define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use)) 2730#define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq) 2731#define TXQ_MAX_DB_CONSUMED(size) (size >> 4) 2732 2733/* forward compatibility for cxgb */ 2734#define FIRST_QSET(ctx) 0 2735#define NTXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_ntxqsets) 2736#define NRXQSETS(ctx) ((ctx)->ifc_softc_ctx.isc_nrxqsets) 2737#define QIDX(ctx, m) ((((m)->m_pkthdr.flowid & ctx->ifc_softc_ctx.isc_rss_table_mask) % NTXQSETS(ctx)) + FIRST_QSET(ctx)) 2738#define DESC_RECLAIMABLE(q) ((int)((q)->ift_processed - (q)->ift_cleaned - (q)->ift_ctx->ifc_softc_ctx.isc_tx_nsegments)) 2739 2740/* XXX we should be setting this to something other than zero */ 2741#define RECLAIM_THRESH(ctx) ((ctx)->ifc_sctx->isc_tx_reclaim_thresh) 2742#define MAX_TX_DESC(ctx) ((ctx)->ifc_softc_ctx.isc_tx_tso_segments_max) 2743 2744static inline bool 2745iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) 2746{ 2747 qidx_t dbval, max; 2748 bool rang; 2749 2750 rang = false; 2751 max = TXQ_MAX_DB_DEFERRED(txq, in_use); 2752 if (ring || txq->ift_db_pending >= max) { 2753 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx; 2754 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval); 2755 txq->ift_db_pending = txq->ift_npending = 0; 2756 rang = true; 2757 } 2758 return (rang); 2759} 2760 2761#ifdef PKT_DEBUG 2762static void 2763print_pkt(if_pkt_info_t pi) 2764{ 2765 printf("pi len: %d qsidx: %d nsegs: %d ndescs: %d flags: %x pidx: %d\n", 2766 pi->ipi_len, pi->ipi_qsidx, pi->ipi_nsegs, pi->ipi_ndescs, pi->ipi_flags, pi->ipi_pidx); 2767 printf("pi new_pidx: %d csum_flags: %lx tso_segsz: %d mflags: %x vtag: %d\n", 2768 pi->ipi_new_pidx, pi->ipi_csum_flags, pi->ipi_tso_segsz, pi->ipi_mflags, pi->ipi_vtag); 2769 printf("pi etype: %d ehdrlen: %d ip_hlen: %d ipproto: %d\n", 2770 pi->ipi_etype, pi->ipi_ehdrlen, pi->ipi_ip_hlen, pi->ipi_ipproto); 2771} 2772#endif 2773 2774#define IS_TSO4(pi) ((pi)->ipi_csum_flags & CSUM_IP_TSO) 2775#define IS_TX_OFFLOAD4(pi) ((pi)->ipi_csum_flags & (CSUM_IP_TCP | CSUM_IP_TSO)) 2776#define IS_TSO6(pi) ((pi)->ipi_csum_flags & CSUM_IP6_TSO) 2777#define IS_TX_OFFLOAD6(pi) ((pi)->ipi_csum_flags & (CSUM_IP6_TCP | CSUM_IP6_TSO)) 2778 2779static int 2780iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp) 2781{ 2782 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx; 2783 struct ether_vlan_header *eh; 2784 struct mbuf *m, *n; 2785 2786 n = m = *mp; 2787 if ((sctx->isc_flags & IFLIB_NEED_SCRATCH) && 2788 M_WRITABLE(m) == 0) { 2789 if ((m = m_dup(m, M_NOWAIT)) == NULL) { 2790 return (ENOMEM); 2791 } else { 2792 m_freem(*mp); 2793 n = *mp = m; 2794 } 2795 } 2796 2797 /* 2798 * Determine where frame payload starts. 2799 * Jump over vlan headers if already present, 2800 * helpful for QinQ too. 2801 */ 2802 if (__predict_false(m->m_len < sizeof(*eh))) { 2803 txq->ift_pullups++; 2804 if (__predict_false((m = m_pullup(m, sizeof(*eh))) == NULL)) 2805 return (ENOMEM); 2806 } 2807 eh = mtod(m, struct ether_vlan_header *); 2808 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { 2809 pi->ipi_etype = ntohs(eh->evl_proto); 2810 pi->ipi_ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 2811 } else { 2812 pi->ipi_etype = ntohs(eh->evl_encap_proto); 2813 pi->ipi_ehdrlen = ETHER_HDR_LEN; 2814 } 2815 2816 switch (pi->ipi_etype) { 2817#ifdef INET 2818 case ETHERTYPE_IP: 2819 { 2820 struct ip *ip = NULL; 2821 struct tcphdr *th = NULL; 2822 int minthlen; 2823 2824 minthlen = min(m->m_pkthdr.len, pi->ipi_ehdrlen + sizeof(*ip) + sizeof(*th)); 2825 if (__predict_false(m->m_len < minthlen)) { 2826 /* 2827 * if this code bloat is causing too much of a hit 2828 * move it to a separate function and mark it noinline 2829 */ 2830 if (m->m_len == pi->ipi_ehdrlen) { 2831 n = m->m_next; 2832 MPASS(n); 2833 if (n->m_len >= sizeof(*ip)) { 2834 ip = (struct ip *)n->m_data; 2835 if (n->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2836 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2837 } else { 2838 txq->ift_pullups++; 2839 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2840 return (ENOMEM); 2841 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2842 } 2843 } else { 2844 txq->ift_pullups++; 2845 if (__predict_false((m = m_pullup(m, minthlen)) == NULL)) 2846 return (ENOMEM); 2847 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2848 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2849 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2850 } 2851 } else { 2852 ip = (struct ip *)(m->m_data + pi->ipi_ehdrlen); 2853 if (m->m_len >= (ip->ip_hl << 2) + sizeof(*th)) 2854 th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); 2855 } 2856 pi->ipi_ip_hlen = ip->ip_hl << 2; 2857 pi->ipi_ipproto = ip->ip_p; 2858 pi->ipi_flags |= IPI_TX_IPV4; 2859 2860 /* TCP checksum offload may require TCP header length */ 2861 if (IS_TX_OFFLOAD4(pi)) { 2862 if (__predict_true(pi->ipi_ipproto == IPPROTO_TCP)) { 2863 if (__predict_false(th == NULL)) { 2864 txq->ift_pullups++; 2865 if (__predict_false((m = m_pullup(m, (ip->ip_hl << 2) + sizeof(*th))) == NULL)) 2866 return (ENOMEM); 2867 th = (struct tcphdr *)((caddr_t)ip + pi->ipi_ip_hlen); 2868 } 2869 pi->ipi_tcp_hflags = th->th_flags; 2870 pi->ipi_tcp_hlen = th->th_off << 2; 2871 pi->ipi_tcp_seq = th->th_seq; 2872 } 2873 if (IS_TSO4(pi)) { 2874 if (__predict_false(ip->ip_p != IPPROTO_TCP)) 2875 return (ENXIO); 2876 /* 2877 * TSO always requires hardware checksum offload. 2878 */ 2879 pi->ipi_csum_flags |= (CSUM_IP_TCP | CSUM_IP); 2880 th->th_sum = in_pseudo(ip->ip_src.s_addr, 2881 ip->ip_dst.s_addr, htons(IPPROTO_TCP)); 2882 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2883 if (sctx->isc_flags & IFLIB_TSO_INIT_IP) { 2884 ip->ip_sum = 0; 2885 ip->ip_len = htons(pi->ipi_ip_hlen + pi->ipi_tcp_hlen + pi->ipi_tso_segsz); 2886 } 2887 } 2888 } 2889 if ((sctx->isc_flags & IFLIB_NEED_ZERO_CSUM) && (pi->ipi_csum_flags & CSUM_IP)) 2890 ip->ip_sum = 0; 2891 2892 break; 2893 } 2894#endif 2895#ifdef INET6 2896 case ETHERTYPE_IPV6: 2897 { 2898 struct ip6_hdr *ip6 = (struct ip6_hdr *)(m->m_data + pi->ipi_ehdrlen); 2899 struct tcphdr *th; 2900 pi->ipi_ip_hlen = sizeof(struct ip6_hdr); 2901 2902 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) { 2903 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr))) == NULL)) 2904 return (ENOMEM); 2905 } 2906 th = (struct tcphdr *)((caddr_t)ip6 + pi->ipi_ip_hlen); 2907 2908 /* XXX-BZ this will go badly in case of ext hdrs. */ 2909 pi->ipi_ipproto = ip6->ip6_nxt; 2910 pi->ipi_flags |= IPI_TX_IPV6; 2911 2912 /* TCP checksum offload may require TCP header length */ 2913 if (IS_TX_OFFLOAD6(pi)) { 2914 if (pi->ipi_ipproto == IPPROTO_TCP) { 2915 if (__predict_false(m->m_len < pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) { 2916 txq->ift_pullups++; 2917 if (__predict_false((m = m_pullup(m, pi->ipi_ehdrlen + sizeof(struct ip6_hdr) + sizeof(struct tcphdr))) == NULL)) 2918 return (ENOMEM); 2919 } 2920 pi->ipi_tcp_hflags = th->th_flags; 2921 pi->ipi_tcp_hlen = th->th_off << 2; 2922 pi->ipi_tcp_seq = th->th_seq; 2923 } 2924 if (IS_TSO6(pi)) { 2925 if (__predict_false(ip6->ip6_nxt != IPPROTO_TCP)) 2926 return (ENXIO); 2927 /* 2928 * TSO always requires hardware checksum offload. 2929 */ 2930 pi->ipi_csum_flags |= CSUM_IP6_TCP; 2931 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); 2932 pi->ipi_tso_segsz = m->m_pkthdr.tso_segsz; 2933 } 2934 } 2935 break; 2936 } 2937#endif 2938 default: 2939 pi->ipi_csum_flags &= ~CSUM_OFFLOAD; 2940 pi->ipi_ip_hlen = 0; 2941 break; 2942 } 2943 *mp = m; 2944 2945 return (0); 2946} 2947 2948static __noinline struct mbuf * 2949collapse_pkthdr(struct mbuf *m0) 2950{ 2951 struct mbuf *m, *m_next, *tmp; 2952 2953 m = m0; 2954 m_next = m->m_next; 2955 while (m_next != NULL && m_next->m_len == 0) { 2956 m = m_next; 2957 m->m_next = NULL; 2958 m_free(m); 2959 m_next = m_next->m_next; 2960 } 2961 m = m0; 2962 m->m_next = m_next; 2963 if ((m_next->m_flags & M_EXT) == 0) { 2964 m = m_defrag(m, M_NOWAIT); 2965 } else { 2966 tmp = m_next->m_next; 2967 memcpy(m_next, m, MPKTHSIZE); 2968 m = m_next; 2969 m->m_next = tmp; 2970 } 2971 return (m); 2972} 2973 2974/* 2975 * If dodgy hardware rejects the scatter gather chain we've handed it 2976 * we'll need to remove the mbuf chain from ifsg_m[] before we can add the 2977 * m_defrag'd mbufs 2978 */ 2979static __noinline struct mbuf * 2980iflib_remove_mbuf(iflib_txq_t txq) 2981{ 2982 int ntxd, i, pidx; 2983 struct mbuf *m, *mh, **ifsd_m; 2984 2985 pidx = txq->ift_pidx; 2986 ifsd_m = txq->ift_sds.ifsd_m; 2987 ntxd = txq->ift_size; 2988 mh = m = ifsd_m[pidx]; 2989 ifsd_m[pidx] = NULL; 2990#if MEMORY_LOGGING 2991 txq->ift_dequeued++; 2992#endif 2993 i = 1; 2994 2995 while (m) { 2996 ifsd_m[(pidx + i) & (ntxd -1)] = NULL; 2997#if MEMORY_LOGGING 2998 txq->ift_dequeued++; 2999#endif 3000 m = m->m_next; 3001 i++; 3002 } 3003 return (mh); 3004} 3005 3006static int 3007iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map, 3008 struct mbuf **m0, bus_dma_segment_t *segs, int *nsegs, 3009 int max_segs, int flags) 3010{ 3011 if_ctx_t ctx; 3012 if_shared_ctx_t sctx; 3013 if_softc_ctx_t scctx; 3014 int i, next, pidx, err, ntxd, count; 3015 struct mbuf *m, *tmp, **ifsd_m; 3016 3017 m = *m0; 3018 3019 /* 3020 * Please don't ever do this 3021 */ 3022 if (__predict_false(m->m_len == 0)) 3023 *m0 = m = collapse_pkthdr(m); 3024 3025 ctx = txq->ift_ctx; 3026 sctx = ctx->ifc_sctx; 3027 scctx = &ctx->ifc_softc_ctx; 3028 ifsd_m = txq->ift_sds.ifsd_m; 3029 ntxd = txq->ift_size; 3030 pidx = txq->ift_pidx; 3031 if (map != NULL) { 3032 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags; 3033 3034 err = bus_dmamap_load_mbuf_sg(tag, map, 3035 *m0, segs, nsegs, BUS_DMA_NOWAIT); 3036 if (err) 3037 return (err); 3038 ifsd_flags[pidx] |= TX_SW_DESC_MAPPED; 3039 count = 0; 3040 m = *m0; 3041 do { 3042 if (__predict_false(m->m_len <= 0)) { 3043 tmp = m; 3044 m = m->m_next; 3045 tmp->m_next = NULL; 3046 m_free(tmp); 3047 continue; 3048 } 3049 m = m->m_next; 3050 count++; 3051 } while (m != NULL); 3052 if (count > *nsegs) { 3053 ifsd_m[pidx] = *m0; 3054 ifsd_m[pidx]->m_flags |= M_TOOBIG; 3055 return (0); 3056 } 3057 m = *m0; 3058 count = 0; 3059 do { 3060 next = (pidx + count) & (ntxd-1); 3061 MPASS(ifsd_m[next] == NULL); 3062 ifsd_m[next] = m; 3063 count++; 3064 tmp = m; 3065 m = m->m_next; 3066 } while (m != NULL); 3067 } else { 3068 int buflen, sgsize, maxsegsz, max_sgsize; 3069 vm_offset_t vaddr; 3070 vm_paddr_t curaddr; 3071 3072 count = i = 0; 3073 m = *m0; 3074 if (m->m_pkthdr.csum_flags & CSUM_TSO) 3075 maxsegsz = scctx->isc_tx_tso_segsize_max; 3076 else 3077 maxsegsz = sctx->isc_tx_maxsegsize; 3078 3079 do { 3080 if (__predict_false(m->m_len <= 0)) { 3081 tmp = m; 3082 m = m->m_next; 3083 tmp->m_next = NULL; 3084 m_free(tmp); 3085 continue; 3086 } 3087 buflen = m->m_len; 3088 vaddr = (vm_offset_t)m->m_data; 3089 /* 3090 * see if we can't be smarter about physically 3091 * contiguous mappings 3092 */ 3093 next = (pidx + count) & (ntxd-1); 3094 MPASS(ifsd_m[next] == NULL); 3095#if MEMORY_LOGGING 3096 txq->ift_enqueued++; 3097#endif 3098 ifsd_m[next] = m; 3099 while (buflen > 0) { 3100 if (i >= max_segs) 3101 goto err; 3102 max_sgsize = MIN(buflen, maxsegsz); 3103 curaddr = pmap_kextract(vaddr); 3104 sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); 3105 sgsize = MIN(sgsize, max_sgsize); 3106 segs[i].ds_addr = curaddr; 3107 segs[i].ds_len = sgsize; 3108 vaddr += sgsize; 3109 buflen -= sgsize; 3110 i++; 3111 } 3112 count++; 3113 tmp = m; 3114 m = m->m_next; 3115 } while (m != NULL); 3116 *nsegs = i; 3117 } 3118 return (0); 3119err: 3120 *m0 = iflib_remove_mbuf(txq); 3121 return (EFBIG); 3122} 3123 3124static inline caddr_t 3125calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid) 3126{ 3127 qidx_t size; 3128 int ntxd; 3129 caddr_t start, end, cur, next; 3130 3131 ntxd = txq->ift_size; 3132 size = txq->ift_txd_size[qid]; 3133 start = txq->ift_ifdi[qid].idi_vaddr; 3134 3135 if (__predict_false(size == 0)) 3136 return (start); 3137 cur = start + size*cidx; 3138 end = start + size*ntxd; 3139 next = CACHE_PTR_NEXT(cur); 3140 return (next < end ? next : start); 3141} 3142 3143/* 3144 * Pad an mbuf to ensure a minimum ethernet frame size. 3145 * min_frame_size is the frame size (less CRC) to pad the mbuf to 3146 */ 3147static __noinline int 3148iflib_ether_pad(device_t dev, struct mbuf **m_head, uint16_t min_frame_size) 3149{ 3150 /* 3151 * 18 is enough bytes to pad an ARP packet to 46 bytes, and 3152 * and ARP message is the smallest common payload I can think of 3153 */ 3154 static char pad[18]; /* just zeros */ 3155 int n; 3156 struct mbuf *new_head; 3157 3158 if (!M_WRITABLE(*m_head)) { 3159 new_head = m_dup(*m_head, M_NOWAIT); 3160 if (new_head == NULL) { 3161 m_freem(*m_head); 3162 device_printf(dev, "cannot pad short frame, m_dup() failed"); 3163 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3164 return ENOMEM; 3165 } 3166 m_freem(*m_head); 3167 *m_head = new_head; 3168 } 3169 3170 for (n = min_frame_size - (*m_head)->m_pkthdr.len; 3171 n > 0; n -= sizeof(pad)) 3172 if (!m_append(*m_head, min(n, sizeof(pad)), pad)) 3173 break; 3174 3175 if (n > 0) { 3176 m_freem(*m_head); 3177 device_printf(dev, "cannot pad short frame\n"); 3178 DBG_COUNTER_INC(encap_pad_mbuf_fail); 3179 return (ENOBUFS); 3180 } 3181 3182 return 0; 3183} 3184 3185static int 3186iflib_encap(iflib_txq_t txq, struct mbuf **m_headp) 3187{ 3188 if_ctx_t ctx; 3189 if_shared_ctx_t sctx; 3190 if_softc_ctx_t scctx; 3191 bus_dma_segment_t *segs; 3192 struct mbuf *m_head; 3193 void *next_txd; 3194 bus_dmamap_t map; 3195 struct if_pkt_info pi; 3196 int remap = 0; 3197 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd; 3198 bus_dma_tag_t desc_tag; 3199 3200 segs = txq->ift_segs; 3201 ctx = txq->ift_ctx; 3202 sctx = ctx->ifc_sctx; 3203 scctx = &ctx->ifc_softc_ctx; 3204 segs = txq->ift_segs; 3205 ntxd = txq->ift_size; 3206 m_head = *m_headp; 3207 map = NULL; 3208 3209 /* 3210 * If we're doing TSO the next descriptor to clean may be quite far ahead 3211 */ 3212 cidx = txq->ift_cidx; 3213 pidx = txq->ift_pidx; 3214 if (ctx->ifc_flags & IFC_PREFETCH) { 3215 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1); 3216 if (!(ctx->ifc_flags & IFLIB_HAS_TXCQ)) { 3217 next_txd = calc_next_txd(txq, cidx, 0); 3218 prefetch(next_txd); 3219 } 3220 3221 /* prefetch the next cache line of mbuf pointers and flags */ 3222 prefetch(&txq->ift_sds.ifsd_m[next]); 3223 if (txq->ift_sds.ifsd_map != NULL) { 3224 prefetch(&txq->ift_sds.ifsd_map[next]); 3225 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1); 3226 prefetch(&txq->ift_sds.ifsd_flags[next]); 3227 } 3228 } else if (txq->ift_sds.ifsd_map != NULL) 3229 map = txq->ift_sds.ifsd_map[pidx]; 3230 3231 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { 3232 desc_tag = txq->ift_tso_desc_tag; 3233 max_segs = scctx->isc_tx_tso_segments_max; 3234 } else { 3235 desc_tag = txq->ift_desc_tag; 3236 max_segs = scctx->isc_tx_nsegments; 3237 } 3238 if ((sctx->isc_flags & IFLIB_NEED_ETHER_PAD) && 3239 __predict_false(m_head->m_pkthdr.len < scctx->isc_min_frame_size)) { 3240 err = iflib_ether_pad(ctx->ifc_dev, m_headp, scctx->isc_min_frame_size); 3241 if (err) 3242 return err; 3243 } 3244 m_head = *m_headp; 3245 3246 pkt_info_zero(&pi); 3247 pi.ipi_mflags = (m_head->m_flags & (M_VLANTAG|M_BCAST|M_MCAST)); 3248 pi.ipi_pidx = pidx; 3249 pi.ipi_qsidx = txq->ift_id; 3250 pi.ipi_len = m_head->m_pkthdr.len; 3251 pi.ipi_csum_flags = m_head->m_pkthdr.csum_flags; 3252 pi.ipi_vtag = (m_head->m_flags & M_VLANTAG) ? m_head->m_pkthdr.ether_vtag : 0; 3253 3254 /* deliberate bitwise OR to make one condition */ 3255 if (__predict_true((pi.ipi_csum_flags | pi.ipi_vtag))) { 3256 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0)) 3257 return (err); 3258 m_head = *m_headp; 3259 } 3260 3261retry: 3262 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT); 3263defrag: 3264 if (__predict_false(err)) { 3265 switch (err) { 3266 case EFBIG: 3267 /* try collapse once and defrag once */ 3268 if (remap == 0) { 3269 m_head = m_collapse(*m_headp, M_NOWAIT, max_segs); 3270 /* try defrag if collapsing fails */ 3271 if (m_head == NULL) 3272 remap++; 3273 } 3274 if (remap == 1) 3275 m_head = m_defrag(*m_headp, M_NOWAIT); 3276 /* 3277 * remap should never be >1 unless bus_dmamap_load_mbuf_sg 3278 * failed to map an mbuf that was run through m_defrag 3279 */ 3280 MPASS(remap <= 1); 3281 if (__predict_false(m_head == NULL || remap > 1)) 3282 goto defrag_failed; 3283 remap++; 3284 txq->ift_mbuf_defrag++; 3285 *m_headp = m_head; 3286 goto retry; 3287 break; 3288 case ENOMEM: 3289 txq->ift_no_tx_dma_setup++; 3290 break; 3291 default: 3292 txq->ift_no_tx_dma_setup++; 3293 m_freem(*m_headp); 3294 DBG_COUNTER_INC(tx_frees); 3295 *m_headp = NULL; 3296 break; 3297 } 3298 txq->ift_map_failed++; 3299 DBG_COUNTER_INC(encap_load_mbuf_fail); 3300 return (err); 3301 } 3302 3303 /* 3304 * XXX assumes a 1 to 1 relationship between segments and 3305 * descriptors - this does not hold true on all drivers, e.g. 3306 * cxgb 3307 */ 3308 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) { 3309 txq->ift_no_desc_avail++; 3310 if (map != NULL) 3311 bus_dmamap_unload(desc_tag, map); 3312 DBG_COUNTER_INC(encap_txq_avail_fail); 3313 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0) 3314 GROUPTASK_ENQUEUE(&txq->ift_task); 3315 return (ENOBUFS); 3316 } 3317 /* 3318 * On Intel cards we can greatly reduce the number of TX interrupts 3319 * we see by only setting report status on every Nth descriptor. 3320 * However, this also means that the driver will need to keep track 3321 * of the descriptors that RS was set on to check them for the DD bit. 3322 */ 3323 txq->ift_rs_pending += nsegs + 1; 3324 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) || 3325 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) { 3326 pi.ipi_flags |= IPI_TX_INTR; 3327 txq->ift_rs_pending = 0; 3328 } 3329 3330 pi.ipi_segs = segs; 3331 pi.ipi_nsegs = nsegs; 3332 3333 MPASS(pidx >= 0 && pidx < txq->ift_size); 3334#ifdef PKT_DEBUG 3335 print_pkt(&pi); 3336#endif 3337 if (map != NULL) 3338 bus_dmamap_sync(desc_tag, map, BUS_DMASYNC_PREWRITE); 3339 if ((err = ctx->isc_txd_encap(ctx->ifc_softc, &pi)) == 0) { 3340 if (map != NULL) 3341 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map, 3342 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 3343 DBG_COUNTER_INC(tx_encap); 3344 MPASS(pi.ipi_new_pidx < txq->ift_size); 3345 3346 ndesc = pi.ipi_new_pidx - pi.ipi_pidx; 3347 if (pi.ipi_new_pidx < pi.ipi_pidx) { 3348 ndesc += txq->ift_size; 3349 txq->ift_gen = 1; 3350 } 3351 /* 3352 * drivers can need as many as 3353 * two sentinels 3354 */ 3355 MPASS(ndesc <= pi.ipi_nsegs + 2); 3356 MPASS(pi.ipi_new_pidx != pidx); 3357 MPASS(ndesc > 0); 3358 txq->ift_in_use += ndesc; 3359 3360 /* 3361 * We update the last software descriptor again here because there may 3362 * be a sentinel and/or there may be more mbufs than segments 3363 */ 3364 txq->ift_pidx = pi.ipi_new_pidx; 3365 txq->ift_npending += pi.ipi_ndescs; 3366 } else { 3367 *m_headp = m_head = iflib_remove_mbuf(txq); 3368 if (err == EFBIG) { 3369 txq->ift_txd_encap_efbig++; 3370 if (remap < 2) { 3371 remap = 1; 3372 goto defrag; 3373 } 3374 } 3375 DBG_COUNTER_INC(encap_txd_encap_fail); 3376 goto defrag_failed; 3377 } 3378 return (err); 3379 3380defrag_failed: 3381 txq->ift_mbuf_defrag_failed++; 3382 txq->ift_map_failed++; 3383 m_freem(*m_headp); 3384 DBG_COUNTER_INC(tx_frees); 3385 *m_headp = NULL; 3386 return (ENOMEM); 3387} 3388 3389static void 3390iflib_tx_desc_free(iflib_txq_t txq, int n) 3391{ 3392 int hasmap; 3393 uint32_t qsize, cidx, mask, gen; 3394 struct mbuf *m, **ifsd_m; 3395 uint8_t *ifsd_flags; 3396 bus_dmamap_t *ifsd_map; 3397 bool do_prefetch; 3398 3399 cidx = txq->ift_cidx; 3400 gen = txq->ift_gen; 3401 qsize = txq->ift_size; 3402 mask = qsize-1; 3403 hasmap = txq->ift_sds.ifsd_map != NULL; 3404 ifsd_flags = txq->ift_sds.ifsd_flags; 3405 ifsd_m = txq->ift_sds.ifsd_m; 3406 ifsd_map = txq->ift_sds.ifsd_map; 3407 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH); 3408 3409 while (n--) { 3410 if (do_prefetch) { 3411 prefetch(ifsd_m[(cidx + 3) & mask]); 3412 prefetch(ifsd_m[(cidx + 4) & mask]); 3413 } 3414 if (ifsd_m[cidx] != NULL) { 3415 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]); 3416 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]); 3417 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) { 3418 /* 3419 * does it matter if it's not the TSO tag? If so we'll 3420 * have to add the type to flags 3421 */ 3422 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]); 3423 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED; 3424 } 3425 if ((m = ifsd_m[cidx]) != NULL) { 3426 /* XXX we don't support any drivers that batch packets yet */ 3427 MPASS(m->m_nextpkt == NULL); 3428 /* if the number of clusters exceeds the number of segments 3429 * there won't be space on the ring to save a pointer to each 3430 * cluster so we simply free the list here 3431 */ 3432 if (m->m_flags & M_TOOBIG) { 3433 m_freem(m); 3434 } else { 3435 m_free(m); 3436 } 3437 ifsd_m[cidx] = NULL; 3438#if MEMORY_LOGGING 3439 txq->ift_dequeued++; 3440#endif 3441 DBG_COUNTER_INC(tx_frees); 3442 } 3443 } 3444 if (__predict_false(++cidx == qsize)) { 3445 cidx = 0; 3446 gen = 0; 3447 } 3448 } 3449 txq->ift_cidx = cidx; 3450 txq->ift_gen = gen; 3451} 3452 3453static __inline int 3454iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh) 3455{ 3456 int reclaim; 3457 if_ctx_t ctx = txq->ift_ctx; 3458 3459 KASSERT(thresh >= 0, ("invalid threshold to reclaim")); 3460 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size); 3461 3462 /* 3463 * Need a rate-limiting check so that this isn't called every time 3464 */ 3465 iflib_tx_credits_update(ctx, txq); 3466 reclaim = DESC_RECLAIMABLE(txq); 3467 3468 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) { 3469#ifdef INVARIANTS 3470 if (iflib_verbose_debug) { 3471 printf("%s processed=%ju cleaned=%ju tx_nsegments=%d reclaim=%d thresh=%d\n", __FUNCTION__, 3472 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments, 3473 reclaim, thresh); 3474 3475 } 3476#endif 3477 return (0); 3478 } 3479 iflib_tx_desc_free(txq, reclaim); 3480 txq->ift_cleaned += reclaim; 3481 txq->ift_in_use -= reclaim; 3482 3483 return (reclaim); 3484} 3485 3486static struct mbuf ** 3487_ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining) 3488{ 3489 int next, size; 3490 struct mbuf **items; 3491 3492 size = r->size; 3493 next = (cidx + CACHE_PTR_INCREMENT) & (size-1); 3494 items = __DEVOLATILE(struct mbuf **, &r->items[0]); 3495 3496 prefetch(items[(cidx + offset) & (size-1)]); 3497 if (remaining > 1) { 3498 prefetch2cachelines(&items[next]); 3499 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]); 3500 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]); 3501 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]); 3502 } 3503 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)])); 3504} 3505 3506static void 3507iflib_txq_check_drain(iflib_txq_t txq, int budget) 3508{ 3509 3510 ifmp_ring_check_drainage(txq->ift_br, budget); 3511} 3512 3513static uint32_t 3514iflib_txq_can_drain(struct ifmp_ring *r) 3515{ 3516 iflib_txq_t txq = r->cookie; 3517 if_ctx_t ctx = txq->ift_ctx; 3518 3519 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) || 3520 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)); 3521} 3522 3523static uint32_t 3524iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3525{ 3526 iflib_txq_t txq = r->cookie; 3527 if_ctx_t ctx = txq->ift_ctx; 3528 struct ifnet *ifp = ctx->ifc_ifp; 3529 struct mbuf **mp, *m; 3530 int i, count, consumed, pkt_sent, bytes_sent, mcast_sent, avail; 3531 int reclaimed, err, in_use_prev, desc_used; 3532 bool do_prefetch, ring, rang; 3533 3534 if (__predict_false(!(if_getdrvflags(ifp) & IFF_DRV_RUNNING) || 3535 !LINK_ACTIVE(ctx))) { 3536 DBG_COUNTER_INC(txq_drain_notready); 3537 return (0); 3538 } 3539 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx)); 3540 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use); 3541 avail = IDXDIFF(pidx, cidx, r->size); 3542 if (__predict_false(ctx->ifc_flags & IFC_QFLUSH)) { 3543 DBG_COUNTER_INC(txq_drain_flushing); 3544 for (i = 0; i < avail; i++) { 3545 m_free(r->items[(cidx + i) & (r->size-1)]); 3546 r->items[(cidx + i) & (r->size-1)] = NULL; 3547 } 3548 return (avail); 3549 } 3550 3551 if (__predict_false(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE)) { 3552 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3553 CALLOUT_LOCK(txq); 3554 callout_stop(&txq->ift_timer); 3555 CALLOUT_UNLOCK(txq); 3556 DBG_COUNTER_INC(txq_drain_oactive); 3557 return (0); 3558 } 3559 if (reclaimed) 3560 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3561 consumed = mcast_sent = bytes_sent = pkt_sent = 0; 3562 count = MIN(avail, TX_BATCH_SIZE); 3563#ifdef INVARIANTS 3564 if (iflib_verbose_debug) 3565 printf("%s avail=%d ifc_flags=%x txq_avail=%d ", __FUNCTION__, 3566 avail, ctx->ifc_flags, TXQ_AVAIL(txq)); 3567#endif 3568 do_prefetch = (ctx->ifc_flags & IFC_PREFETCH); 3569 avail = TXQ_AVAIL(txq); 3570 for (desc_used = i = 0; i < count && avail > MAX_TX_DESC(ctx) + 2; i++) { 3571 int pidx_prev, rem = do_prefetch ? count - i : 0; 3572 3573 mp = _ring_peek_one(r, cidx, i, rem); 3574 MPASS(mp != NULL && *mp != NULL); 3575 if (__predict_false(*mp == (struct mbuf *)txq)) { 3576 consumed++; 3577 reclaimed++; 3578 continue; 3579 } 3580 in_use_prev = txq->ift_in_use; 3581 pidx_prev = txq->ift_pidx; 3582 err = iflib_encap(txq, mp); 3583 if (__predict_false(err)) { 3584 DBG_COUNTER_INC(txq_drain_encapfail); 3585 /* no room - bail out */ 3586 if (err == ENOBUFS) 3587 break; 3588 consumed++; 3589 DBG_COUNTER_INC(txq_drain_encapfail); 3590 /* we can't send this packet - skip it */ 3591 continue; 3592 } 3593 consumed++; 3594 pkt_sent++; 3595 m = *mp; 3596 DBG_COUNTER_INC(tx_sent); 3597 bytes_sent += m->m_pkthdr.len; 3598 mcast_sent += !!(m->m_flags & M_MCAST); 3599 avail = TXQ_AVAIL(txq); 3600 3601 txq->ift_db_pending += (txq->ift_in_use - in_use_prev); 3602 desc_used += (txq->ift_in_use - in_use_prev); 3603 ETHER_BPF_MTAP(ifp, m); 3604 if (__predict_false(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) 3605 break; 3606 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev); 3607 } 3608 3609 /* deliberate use of bitwise or to avoid gratuitous short-circuit */ 3610 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx)); 3611 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use); 3612 if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent); 3613 if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent); 3614 if (mcast_sent) 3615 if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent); 3616#ifdef INVARIANTS 3617 if (iflib_verbose_debug) 3618 printf("consumed=%d\n", consumed); 3619#endif 3620 return (consumed); 3621} 3622 3623static uint32_t 3624iflib_txq_drain_always(struct ifmp_ring *r) 3625{ 3626 return (1); 3627} 3628 3629static uint32_t 3630iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx) 3631{ 3632 int i, avail; 3633 struct mbuf **mp; 3634 iflib_txq_t txq; 3635 3636 txq = r->cookie; 3637 3638 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 3639 CALLOUT_LOCK(txq); 3640 callout_stop(&txq->ift_timer); 3641 CALLOUT_UNLOCK(txq); 3642 3643 avail = IDXDIFF(pidx, cidx, r->size); 3644 for (i = 0; i < avail; i++) { 3645 mp = _ring_peek_one(r, cidx, i, avail - i); 3646 if (__predict_false(*mp == (struct mbuf *)txq)) 3647 continue; 3648 m_freem(*mp); 3649 } 3650 MPASS(ifmp_ring_is_stalled(r) == 0); 3651 return (avail); 3652} 3653 3654static void 3655iflib_ifmp_purge(iflib_txq_t txq) 3656{ 3657 struct ifmp_ring *r; 3658 3659 r = txq->ift_br; 3660 r->drain = iflib_txq_drain_free; 3661 r->can_drain = iflib_txq_drain_always; 3662 3663 ifmp_ring_check_drainage(r, r->size); 3664 3665 r->drain = iflib_txq_drain; 3666 r->can_drain = iflib_txq_can_drain; 3667} 3668 3669static void 3670_task_fn_tx(void *context) 3671{ 3672 iflib_txq_t txq = context; 3673 if_ctx_t ctx = txq->ift_ctx; 3674 struct ifnet *ifp = ctx->ifc_ifp; 3675 int rc; 3676 3677#ifdef IFLIB_DIAGNOSTICS 3678 txq->ift_cpu_exec_count[curcpu]++; 3679#endif 3680 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING)) 3681 return; 3682 if (if_getcapenable(ifp) & IFCAP_NETMAP) { 3683 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false)) 3684 netmap_tx_irq(ifp, txq->ift_id); 3685 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3686 return; 3687 } 3688 if (txq->ift_db_pending) 3689 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE); 3690 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3691 if (ctx->ifc_flags & IFC_LEGACY) 3692 IFDI_INTR_ENABLE(ctx); 3693 else { 3694 rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id); 3695 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3696 } 3697} 3698 3699static void 3700_task_fn_rx(void *context) 3701{ 3702 iflib_rxq_t rxq = context; 3703 if_ctx_t ctx = rxq->ifr_ctx; 3704 bool more; 3705 int rc; 3706 uint16_t budget; 3707 3708#ifdef IFLIB_DIAGNOSTICS 3709 rxq->ifr_cpu_exec_count[curcpu]++; 3710#endif 3711 DBG_COUNTER_INC(task_fn_rxs); 3712 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3713 return; 3714 more = true; 3715#ifdef DEV_NETMAP 3716 if (if_getcapenable(ctx->ifc_ifp) & IFCAP_NETMAP) { 3717 u_int work = 0; 3718 if (netmap_rx_irq(ctx->ifc_ifp, rxq->ifr_id, &work)) { 3719 more = false; 3720 } 3721 } 3722#endif 3723 budget = ctx->ifc_sysctl_rx_budget; 3724 if (budget == 0) 3725 budget = 16; /* XXX */ 3726 if (more == false || (more = iflib_rxeof(rxq, budget)) == false) { 3727 if (ctx->ifc_flags & IFC_LEGACY) 3728 IFDI_INTR_ENABLE(ctx); 3729 else { 3730 DBG_COUNTER_INC(rx_intr_enables); 3731 rc = IFDI_RX_QUEUE_INTR_ENABLE(ctx, rxq->ifr_id); 3732 KASSERT(rc != ENOTSUP, ("MSI-X support requires queue_intr_enable, but not implemented in driver")); 3733 } 3734 } 3735 if (__predict_false(!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING))) 3736 return; 3737 if (more) 3738 GROUPTASK_ENQUEUE(&rxq->ifr_task); 3739} 3740 3741static void 3742_task_fn_admin(void *context) 3743{ 3744 if_ctx_t ctx = context; 3745 if_softc_ctx_t sctx = &ctx->ifc_softc_ctx; 3746 iflib_txq_t txq; 3747 int i; 3748 bool oactive, running, do_reset, do_watchdog, in_detach; 3749 3750 STATE_LOCK(ctx); 3751 running = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING); 3752 oactive = (if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_OACTIVE); 3753 do_reset = (ctx->ifc_flags & IFC_DO_RESET); 3754 do_watchdog = (ctx->ifc_flags & IFC_DO_WATCHDOG); 3755 in_detach = (ctx->ifc_flags & IFC_IN_DETACH); 3756 ctx->ifc_flags &= ~(IFC_DO_RESET|IFC_DO_WATCHDOG); 3757 STATE_UNLOCK(ctx); 3758 3759 if ((!running & !oactive) && 3760 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3761 if (in_detach) 3762 return; 3763 3764 CTX_LOCK(ctx); 3765 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) { 3766 CALLOUT_LOCK(txq); 3767 callout_stop(&txq->ift_timer); 3768 CALLOUT_UNLOCK(txq); 3769 } 3770 if (do_watchdog) { 3771 ctx->ifc_watchdog_events++; 3772 IFDI_WATCHDOG_RESET(ctx); 3773 } 3774 IFDI_UPDATE_ADMIN_STATUS(ctx); 3775 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3776 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu); 3777 IFDI_LINK_INTR_ENABLE(ctx); 3778 if (do_reset) 3779 iflib_if_init_locked(ctx); 3780 CTX_UNLOCK(ctx); 3781 3782 if (LINK_ACTIVE(ctx) == 0) 3783 return; 3784 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) 3785 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 3786} 3787 3788 3789static void 3790_task_fn_iov(void *context) 3791{ 3792 if_ctx_t ctx = context; 3793 3794 if (!(if_getdrvflags(ctx->ifc_ifp) & IFF_DRV_RUNNING) && 3795 !(ctx->ifc_sctx->isc_flags & IFLIB_ADMIN_ALWAYS_RUN)) 3796 return; 3797 3798 CTX_LOCK(ctx); 3799 IFDI_VFLR_HANDLE(ctx); 3800 CTX_UNLOCK(ctx); 3801} 3802 3803static int 3804iflib_sysctl_int_delay(SYSCTL_HANDLER_ARGS) 3805{ 3806 int err; 3807 if_int_delay_info_t info; 3808 if_ctx_t ctx; 3809 3810 info = (if_int_delay_info_t)arg1; 3811 ctx = info->iidi_ctx; 3812 info->iidi_req = req; 3813 info->iidi_oidp = oidp; 3814 CTX_LOCK(ctx); 3815 err = IFDI_SYSCTL_INT_DELAY(ctx, info); 3816 CTX_UNLOCK(ctx); 3817 return (err); 3818} 3819 3820/********************************************************************* 3821 * 3822 * IFNET FUNCTIONS 3823 * 3824 **********************************************************************/ 3825 3826static void 3827iflib_if_init_locked(if_ctx_t ctx) 3828{ 3829 iflib_stop(ctx); 3830 iflib_init_locked(ctx); 3831} 3832 3833 3834static void 3835iflib_if_init(void *arg) 3836{ 3837 if_ctx_t ctx = arg; 3838 3839 CTX_LOCK(ctx); 3840 iflib_if_init_locked(ctx); 3841 CTX_UNLOCK(ctx); 3842} 3843 3844static int 3845iflib_if_transmit(if_t ifp, struct mbuf *m) 3846{ 3847 if_ctx_t ctx = if_getsoftc(ifp); 3848 3849 iflib_txq_t txq; 3850 int err, qidx; 3851 3852 if (__predict_false((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || !LINK_ACTIVE(ctx))) { 3853 DBG_COUNTER_INC(tx_frees); 3854 m_freem(m); 3855 return (ENETDOWN); 3856 } 3857 3858 MPASS(m->m_nextpkt == NULL); 3859 qidx = 0; 3860 if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m)) 3861 qidx = QIDX(ctx, m); 3862 /* 3863 * XXX calculate buf_ring based on flowid (divvy up bits?) 3864 */ 3865 txq = &ctx->ifc_txqs[qidx]; 3866 3867#ifdef DRIVER_BACKPRESSURE 3868 if (txq->ift_closed) { 3869 while (m != NULL) { 3870 next = m->m_nextpkt; 3871 m->m_nextpkt = NULL; 3872 m_freem(m); 3873 m = next; 3874 } 3875 return (ENOBUFS); 3876 } 3877#endif 3878#ifdef notyet 3879 qidx = count = 0; 3880 mp = marr; 3881 next = m; 3882 do { 3883 count++; 3884 next = next->m_nextpkt; 3885 } while (next != NULL); 3886 3887 if (count > nitems(marr)) 3888 if ((mp = malloc(count*sizeof(struct mbuf *), M_IFLIB, M_NOWAIT)) == NULL) { 3889 /* XXX check nextpkt */ 3890 m_freem(m); 3891 /* XXX simplify for now */ 3892 DBG_COUNTER_INC(tx_frees); 3893 return (ENOBUFS); 3894 } 3895 for (next = m, i = 0; next != NULL; i++) { 3896 mp[i] = next; 3897 next = next->m_nextpkt; 3898 mp[i]->m_nextpkt = NULL; 3899 } 3900#endif 3901 DBG_COUNTER_INC(tx_seen); 3902 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE); 3903 3904 GROUPTASK_ENQUEUE(&txq->ift_task); 3905 if (err) { 3906 /* support forthcoming later */ 3907#ifdef DRIVER_BACKPRESSURE 3908 txq->ift_closed = TRUE; 3909#endif 3910 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE); 3911 m_freem(m); 3912 } 3913 3914 return (err); 3915} 3916 3917static void 3918iflib_if_qflush(if_t ifp) 3919{ 3920 if_ctx_t ctx = if_getsoftc(ifp); 3921 iflib_txq_t txq = ctx->ifc_txqs; 3922 int i; 3923 3924 STATE_LOCK(ctx); 3925 ctx->ifc_flags |= IFC_QFLUSH; 3926 STATE_UNLOCK(ctx); 3927 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 3928 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br))) 3929 iflib_txq_check_drain(txq, 0); 3930 STATE_LOCK(ctx); 3931 ctx->ifc_flags &= ~IFC_QFLUSH; 3932 STATE_UNLOCK(ctx); 3933 3934 if_qflush(ifp); 3935} 3936 3937 3938#define IFCAP_FLAGS (IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6 | IFCAP_HWCSUM | IFCAP_LRO | \ 3939 IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTAGGING | IFCAP_HWSTATS | \ 3940 IFCAP_VLAN_MTU | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWTSO) 3941 3942static int 3943iflib_if_ioctl(if_t ifp, u_long command, caddr_t data) 3944{ 3945 if_ctx_t ctx = if_getsoftc(ifp); 3946 struct ifreq *ifr = (struct ifreq *)data; 3947#if defined(INET) || defined(INET6) 3948 struct ifaddr *ifa = (struct ifaddr *)data; 3949#endif 3950 bool avoid_reset = FALSE; 3951 int err = 0, reinit = 0, bits; 3952 3953 switch (command) { 3954 case SIOCSIFADDR: 3955#ifdef INET 3956 if (ifa->ifa_addr->sa_family == AF_INET) 3957 avoid_reset = TRUE; 3958#endif 3959#ifdef INET6 3960 if (ifa->ifa_addr->sa_family == AF_INET6) 3961 avoid_reset = TRUE; 3962#endif 3963 /* 3964 ** Calling init results in link renegotiation, 3965 ** so we avoid doing it when possible. 3966 */ 3967 if (avoid_reset) { 3968 if_setflagbits(ifp, IFF_UP,0); 3969 if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) 3970 reinit = 1; 3971#ifdef INET 3972 if (!(if_getflags(ifp) & IFF_NOARP)) 3973 arp_ifinit(ifp, ifa); 3974#endif 3975 } else 3976 err = ether_ioctl(ifp, command, data); 3977 break; 3978 case SIOCSIFMTU: 3979 CTX_LOCK(ctx); 3980 if (ifr->ifr_mtu == if_getmtu(ifp)) { 3981 CTX_UNLOCK(ctx); 3982 break; 3983 } 3984 bits = if_getdrvflags(ifp); 3985 /* stop the driver and free any clusters before proceeding */ 3986 iflib_stop(ctx); 3987 3988 if ((err = IFDI_MTU_SET(ctx, ifr->ifr_mtu)) == 0) { 3989 STATE_LOCK(ctx); 3990 if (ifr->ifr_mtu > ctx->ifc_max_fl_buf_size) 3991 ctx->ifc_flags |= IFC_MULTISEG; 3992 else 3993 ctx->ifc_flags &= ~IFC_MULTISEG; 3994 STATE_UNLOCK(ctx); 3995 err = if_setmtu(ifp, ifr->ifr_mtu); 3996 } 3997 iflib_init_locked(ctx); 3998 STATE_LOCK(ctx); 3999 if_setdrvflags(ifp, bits); 4000 STATE_UNLOCK(ctx); 4001 CTX_UNLOCK(ctx); 4002 break; 4003 case SIOCSIFFLAGS: 4004 CTX_LOCK(ctx); 4005 if (if_getflags(ifp) & IFF_UP) { 4006 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4007 if ((if_getflags(ifp) ^ ctx->ifc_if_flags) & 4008 (IFF_PROMISC | IFF_ALLMULTI)) { 4009 err = IFDI_PROMISC_SET(ctx, if_getflags(ifp)); 4010 } 4011 } else 4012 reinit = 1; 4013 } else if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4014 iflib_stop(ctx); 4015 } 4016 ctx->ifc_if_flags = if_getflags(ifp); 4017 CTX_UNLOCK(ctx); 4018 break; 4019 case SIOCADDMULTI: 4020 case SIOCDELMULTI: 4021 if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { 4022 CTX_LOCK(ctx); 4023 IFDI_INTR_DISABLE(ctx); 4024 IFDI_MULTI_SET(ctx); 4025 IFDI_INTR_ENABLE(ctx); 4026 CTX_UNLOCK(ctx); 4027 } 4028 break; 4029 case SIOCSIFMEDIA: 4030 CTX_LOCK(ctx); 4031 IFDI_MEDIA_SET(ctx); 4032 CTX_UNLOCK(ctx); 4033 /* falls thru */ 4034 case SIOCGIFMEDIA: 4035 case SIOCGIFXMEDIA: 4036 err = ifmedia_ioctl(ifp, ifr, &ctx->ifc_media, command); 4037 break; 4038 case SIOCGI2C: 4039 { 4040 struct ifi2creq i2c; 4041 4042 err = copyin(ifr_data_get_ptr(ifr), &i2c, sizeof(i2c)); 4043 if (err != 0) 4044 break; 4045 if (i2c.dev_addr != 0xA0 && i2c.dev_addr != 0xA2) { 4046 err = EINVAL; 4047 break; 4048 } 4049 if (i2c.len > sizeof(i2c.data)) { 4050 err = EINVAL; 4051 break; 4052 } 4053 4054 if ((err = IFDI_I2C_REQ(ctx, &i2c)) == 0) 4055 err = copyout(&i2c, ifr_data_get_ptr(ifr), 4056 sizeof(i2c)); 4057 break; 4058 } 4059 case SIOCSIFCAP: 4060 { 4061 int mask, setmask; 4062 4063 mask = ifr->ifr_reqcap ^ if_getcapenable(ifp); 4064 setmask = 0; 4065#ifdef TCP_OFFLOAD 4066 setmask |= mask & (IFCAP_TOE4|IFCAP_TOE6); 4067#endif 4068 setmask |= (mask & IFCAP_FLAGS); 4069 4070 if (setmask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) 4071 setmask |= (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6); 4072 if ((mask & IFCAP_WOL) && 4073 (if_getcapabilities(ifp) & IFCAP_WOL) != 0) 4074 setmask |= (mask & (IFCAP_WOL_MCAST|IFCAP_WOL_MAGIC)); 4075 if_vlancap(ifp); 4076 /* 4077 * want to ensure that traffic has stopped before we change any of the flags 4078 */ 4079 if (setmask) { 4080 CTX_LOCK(ctx); 4081 bits = if_getdrvflags(ifp); 4082 if (bits & IFF_DRV_RUNNING) 4083 iflib_stop(ctx); 4084 STATE_LOCK(ctx); 4085 if_togglecapenable(ifp, setmask); 4086 STATE_UNLOCK(ctx); 4087 if (bits & IFF_DRV_RUNNING) 4088 iflib_init_locked(ctx); 4089 STATE_LOCK(ctx); 4090 if_setdrvflags(ifp, bits); 4091 STATE_UNLOCK(ctx); 4092 CTX_UNLOCK(ctx); 4093 } 4094 break; 4095 } 4096 case SIOCGPRIVATE_0: 4097 case SIOCSDRVSPEC: 4098 case SIOCGDRVSPEC: 4099 CTX_LOCK(ctx); 4100 err = IFDI_PRIV_IOCTL(ctx, command, data); 4101 CTX_UNLOCK(ctx); 4102 break; 4103 default: 4104 err = ether_ioctl(ifp, command, data); 4105 break; 4106 } 4107 if (reinit) 4108 iflib_if_init(ctx); 4109 return (err); 4110} 4111 4112static uint64_t 4113iflib_if_get_counter(if_t ifp, ift_counter cnt) 4114{ 4115 if_ctx_t ctx = if_getsoftc(ifp); 4116 4117 return (IFDI_GET_COUNTER(ctx, cnt)); 4118} 4119 4120/********************************************************************* 4121 * 4122 * OTHER FUNCTIONS EXPORTED TO THE STACK 4123 * 4124 **********************************************************************/ 4125 4126static void 4127iflib_vlan_register(void *arg, if_t ifp, uint16_t vtag) 4128{ 4129 if_ctx_t ctx = if_getsoftc(ifp); 4130 4131 if ((void *)ctx != arg) 4132 return; 4133 4134 if ((vtag == 0) || (vtag > 4095)) 4135 return; 4136 4137 CTX_LOCK(ctx); 4138 IFDI_VLAN_REGISTER(ctx, vtag); 4139 /* Re-init to load the changes */ 4140 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4141 iflib_if_init_locked(ctx); 4142 CTX_UNLOCK(ctx); 4143} 4144 4145static void 4146iflib_vlan_unregister(void *arg, if_t ifp, uint16_t vtag) 4147{ 4148 if_ctx_t ctx = if_getsoftc(ifp); 4149 4150 if ((void *)ctx != arg) 4151 return; 4152 4153 if ((vtag == 0) || (vtag > 4095)) 4154 return; 4155 4156 CTX_LOCK(ctx); 4157 IFDI_VLAN_UNREGISTER(ctx, vtag); 4158 /* Re-init to load the changes */ 4159 if (if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) 4160 iflib_if_init_locked(ctx); 4161 CTX_UNLOCK(ctx); 4162} 4163 4164static void 4165iflib_led_func(void *arg, int onoff) 4166{ 4167 if_ctx_t ctx = arg; 4168 4169 CTX_LOCK(ctx); 4170 IFDI_LED_FUNC(ctx, onoff); 4171 CTX_UNLOCK(ctx); 4172} 4173 4174/********************************************************************* 4175 * 4176 * BUS FUNCTION DEFINITIONS 4177 * 4178 **********************************************************************/ 4179 4180int 4181iflib_device_probe(device_t dev) 4182{ 4183 pci_vendor_info_t *ent; 4184 4185 uint16_t pci_vendor_id, pci_device_id; 4186 uint16_t pci_subvendor_id, pci_subdevice_id; 4187 uint16_t pci_rev_id; 4188 if_shared_ctx_t sctx; 4189 4190 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4191 return (ENOTSUP); 4192 4193 pci_vendor_id = pci_get_vendor(dev); 4194 pci_device_id = pci_get_device(dev); 4195 pci_subvendor_id = pci_get_subvendor(dev); 4196 pci_subdevice_id = pci_get_subdevice(dev); 4197 pci_rev_id = pci_get_revid(dev); 4198 if (sctx->isc_parse_devinfo != NULL) 4199 sctx->isc_parse_devinfo(&pci_device_id, &pci_subvendor_id, &pci_subdevice_id, &pci_rev_id); 4200 4201 ent = sctx->isc_vendor_info; 4202 while (ent->pvi_vendor_id != 0) { 4203 if (pci_vendor_id != ent->pvi_vendor_id) { 4204 ent++; 4205 continue; 4206 } 4207 if ((pci_device_id == ent->pvi_device_id) && 4208 ((pci_subvendor_id == ent->pvi_subvendor_id) || 4209 (ent->pvi_subvendor_id == 0)) && 4210 ((pci_subdevice_id == ent->pvi_subdevice_id) || 4211 (ent->pvi_subdevice_id == 0)) && 4212 ((pci_rev_id == ent->pvi_rev_id) || 4213 (ent->pvi_rev_id == 0))) { 4214 4215 device_set_desc_copy(dev, ent->pvi_name); 4216 /* this needs to be changed to zero if the bus probing code 4217 * ever stops re-probing on best match because the sctx 4218 * may have its values over written by register calls 4219 * in subsequent probes 4220 */ 4221 return (BUS_PROBE_DEFAULT); 4222 } 4223 ent++; 4224 } 4225 return (ENXIO); 4226} 4227 4228int 4229iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ctxp) 4230{ 4231 int err, rid, msix, msix_bar; 4232 if_ctx_t ctx; 4233 if_t ifp; 4234 if_softc_ctx_t scctx; 4235 int i; 4236 uint16_t main_txq; 4237 uint16_t main_rxq; 4238 4239 4240 ctx = malloc(sizeof(* ctx), M_IFLIB, M_WAITOK|M_ZERO); 4241 4242 if (sc == NULL) { 4243 sc = malloc(sctx->isc_driver->size, M_IFLIB, M_WAITOK|M_ZERO); 4244 device_set_softc(dev, ctx); 4245 ctx->ifc_flags |= IFC_SC_ALLOCATED; 4246 } 4247 4248 ctx->ifc_sctx = sctx; 4249 ctx->ifc_dev = dev; 4250 ctx->ifc_softc = sc; 4251 4252 if ((err = iflib_register(ctx)) != 0) { 4253 device_printf(dev, "iflib_register failed %d\n", err); 4254 return (err); 4255 } 4256 iflib_add_device_sysctl_pre(ctx); 4257 4258 scctx = &ctx->ifc_softc_ctx; 4259 ifp = ctx->ifc_ifp; 4260 ctx->ifc_nhwtxqs = sctx->isc_ntxqs; 4261 4262 /* 4263 * XXX sanity check that ntxd & nrxd are a power of 2 4264 */ 4265 if (ctx->ifc_sysctl_ntxqs != 0) 4266 scctx->isc_ntxqsets = ctx->ifc_sysctl_ntxqs; 4267 if (ctx->ifc_sysctl_nrxqs != 0) 4268 scctx->isc_nrxqsets = ctx->ifc_sysctl_nrxqs; 4269 4270 for (i = 0; i < sctx->isc_ntxqs; i++) { 4271 if (ctx->ifc_sysctl_ntxds[i] != 0) 4272 scctx->isc_ntxd[i] = ctx->ifc_sysctl_ntxds[i]; 4273 else 4274 scctx->isc_ntxd[i] = sctx->isc_ntxd_default[i]; 4275 } 4276 4277 for (i = 0; i < sctx->isc_nrxqs; i++) { 4278 if (ctx->ifc_sysctl_nrxds[i] != 0) 4279 scctx->isc_nrxd[i] = ctx->ifc_sysctl_nrxds[i]; 4280 else 4281 scctx->isc_nrxd[i] = sctx->isc_nrxd_default[i]; 4282 } 4283 4284 for (i = 0; i < sctx->isc_nrxqs; i++) { 4285 if (scctx->isc_nrxd[i] < sctx->isc_nrxd_min[i]) { 4286 device_printf(dev, "nrxd%d: %d less than nrxd_min %d - resetting to min\n", 4287 i, scctx->isc_nrxd[i], sctx->isc_nrxd_min[i]); 4288 scctx->isc_nrxd[i] = sctx->isc_nrxd_min[i]; 4289 } 4290 if (scctx->isc_nrxd[i] > sctx->isc_nrxd_max[i]) { 4291 device_printf(dev, "nrxd%d: %d greater than nrxd_max %d - resetting to max\n", 4292 i, scctx->isc_nrxd[i], sctx->isc_nrxd_max[i]); 4293 scctx->isc_nrxd[i] = sctx->isc_nrxd_max[i]; 4294 } 4295 } 4296 4297 for (i = 0; i < sctx->isc_ntxqs; i++) { 4298 if (scctx->isc_ntxd[i] < sctx->isc_ntxd_min[i]) { 4299 device_printf(dev, "ntxd%d: %d less than ntxd_min %d - resetting to min\n", 4300 i, scctx->isc_ntxd[i], sctx->isc_ntxd_min[i]); 4301 scctx->isc_ntxd[i] = sctx->isc_ntxd_min[i]; 4302 } 4303 if (scctx->isc_ntxd[i] > sctx->isc_ntxd_max[i]) { 4304 device_printf(dev, "ntxd%d: %d greater than ntxd_max %d - resetting to max\n", 4305 i, scctx->isc_ntxd[i], sctx->isc_ntxd_max[i]); 4306 scctx->isc_ntxd[i] = sctx->isc_ntxd_max[i]; 4307 } 4308 } 4309 4310 if ((err = IFDI_ATTACH_PRE(ctx)) != 0) { 4311 device_printf(dev, "IFDI_ATTACH_PRE failed %d\n", err); 4312 return (err); 4313 } 4314 _iflib_pre_assert(scctx); 4315 ctx->ifc_txrx = *scctx->isc_txrx; 4316 4317#ifdef INVARIANTS 4318 MPASS(scctx->isc_capenable); 4319 if (scctx->isc_capenable & IFCAP_TXCSUM) 4320 MPASS(scctx->isc_tx_csum_flags); 4321#endif 4322 4323 if_setcapabilities(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4324 if_setcapenable(ifp, scctx->isc_capenable | IFCAP_HWSTATS); 4325 4326 if (scctx->isc_ntxqsets == 0 || (scctx->isc_ntxqsets_max && scctx->isc_ntxqsets_max < scctx->isc_ntxqsets)) 4327 scctx->isc_ntxqsets = scctx->isc_ntxqsets_max; 4328 if (scctx->isc_nrxqsets == 0 || (scctx->isc_nrxqsets_max && scctx->isc_nrxqsets_max < scctx->isc_nrxqsets)) 4329 scctx->isc_nrxqsets = scctx->isc_nrxqsets_max; 4330 4331#ifdef ACPI_DMAR 4332 if (dmar_get_dma_tag(device_get_parent(dev), dev) != NULL) 4333 ctx->ifc_flags |= IFC_DMAR; 4334#elif !(defined(__i386__) || defined(__amd64__)) 4335 /* set unconditionally for !x86 */ 4336 ctx->ifc_flags |= IFC_DMAR; 4337#endif 4338 4339 msix_bar = scctx->isc_msix_bar; 4340 main_txq = (sctx->isc_flags & IFLIB_HAS_TXCQ) ? 1 : 0; 4341 main_rxq = (sctx->isc_flags & IFLIB_HAS_RXCQ) ? 1 : 0; 4342 4343 /* XXX change for per-queue sizes */ 4344 device_printf(dev, "using %d tx descriptors and %d rx descriptors\n", 4345 scctx->isc_ntxd[main_txq], scctx->isc_nrxd[main_rxq]); 4346 for (i = 0; i < sctx->isc_nrxqs; i++) { 4347 if (!powerof2(scctx->isc_nrxd[i])) { 4348 /* round down instead? */ 4349 device_printf(dev, "# rx descriptors must be a power of 2\n"); 4350 err = EINVAL; 4351 goto fail; 4352 } 4353 } 4354 for (i = 0; i < sctx->isc_ntxqs; i++) { 4355 if (!powerof2(scctx->isc_ntxd[i])) { 4356 device_printf(dev, 4357 "# tx descriptors must be a power of 2"); 4358 err = EINVAL; 4359 goto fail; 4360 } 4361 } 4362 4363 if (scctx->isc_tx_nsegments > scctx->isc_ntxd[main_txq] / 4364 MAX_SINGLE_PACKET_FRACTION) 4365 scctx->isc_tx_nsegments = max(1, scctx->isc_ntxd[main_txq] / 4366 MAX_SINGLE_PACKET_FRACTION); 4367 if (scctx->isc_tx_tso_segments_max > scctx->isc_ntxd[main_txq] / 4368 MAX_SINGLE_PACKET_FRACTION) 4369 scctx->isc_tx_tso_segments_max = max(1, 4370 scctx->isc_ntxd[main_txq] / MAX_SINGLE_PACKET_FRACTION); 4371 4372 /* 4373 * Protect the stack against modern hardware 4374 */ 4375 if (scctx->isc_tx_tso_size_max > FREEBSD_TSO_SIZE_MAX) 4376 scctx->isc_tx_tso_size_max = FREEBSD_TSO_SIZE_MAX; 4377 4378 /* TSO parameters - dig these out of the data sheet - simply correspond to tag setup */ 4379 ifp->if_hw_tsomaxsegcount = scctx->isc_tx_tso_segments_max; 4380 ifp->if_hw_tsomax = scctx->isc_tx_tso_size_max; 4381 ifp->if_hw_tsomaxsegsize = scctx->isc_tx_tso_segsize_max; 4382 if (scctx->isc_rss_table_size == 0) 4383 scctx->isc_rss_table_size = 64; 4384 scctx->isc_rss_table_mask = scctx->isc_rss_table_size-1; 4385 4386 GROUPTASK_INIT(&ctx->ifc_admin_task, 0, _task_fn_admin, ctx); 4387 /* XXX format name */ 4388 taskqgroup_attach(qgroup_if_config_tqg, &ctx->ifc_admin_task, ctx, -1, "admin"); 4389 4390 /* Set up cpu set. If it fails, use the set of all CPUs. */ 4391 if (bus_get_cpus(dev, INTR_CPUS, sizeof(ctx->ifc_cpus), &ctx->ifc_cpus) != 0) { 4392 device_printf(dev, "Unable to fetch CPU list\n"); 4393 CPU_COPY(&all_cpus, &ctx->ifc_cpus); 4394 } 4395 MPASS(CPU_COUNT(&ctx->ifc_cpus) > 0); 4396 4397 /* 4398 ** Now setup MSI or MSI/X, should 4399 ** return us the number of supported 4400 ** vectors. (Will be 1 for MSI) 4401 */ 4402 if (sctx->isc_flags & IFLIB_SKIP_MSIX) { 4403 msix = scctx->isc_vectors; 4404 } else if (scctx->isc_msix_bar != 0) 4405 /* 4406 * The simple fact that isc_msix_bar is not 0 does not mean we 4407 * we have a good value there that is known to work. 4408 */ 4409 msix = iflib_msix_init(ctx); 4410 else { 4411 scctx->isc_vectors = 1; 4412 scctx->isc_ntxqsets = 1; 4413 scctx->isc_nrxqsets = 1; 4414 scctx->isc_intr = IFLIB_INTR_LEGACY; 4415 msix = 0; 4416 } 4417 /* Get memory for the station queues */ 4418 if ((err = iflib_queues_alloc(ctx))) { 4419 device_printf(dev, "Unable to allocate queue memory\n"); 4420 goto fail; 4421 } 4422 4423 if ((err = iflib_qset_structures_setup(ctx))) 4424 goto fail_queues; 4425 /* 4426 * Group taskqueues aren't properly set up until SMP is started, 4427 * so we disable interrupts until we can handle them post 4428 * SI_SUB_SMP. 4429 * 4430 * XXX: disabling interrupts doesn't actually work, at least for 4431 * the non-MSI case. When they occur before SI_SUB_SMP completes, 4432 * we do null handling and depend on this not causing too large an 4433 * interrupt storm. 4434 */ 4435 IFDI_INTR_DISABLE(ctx); 4436 if (msix > 1 && (err = IFDI_MSIX_INTR_ASSIGN(ctx, msix)) != 0) { 4437 device_printf(dev, "IFDI_MSIX_INTR_ASSIGN failed %d\n", err); 4438 goto fail_intr_free; 4439 } 4440 if (msix <= 1) { 4441 rid = 0; 4442 if (scctx->isc_intr == IFLIB_INTR_MSI) { 4443 MPASS(msix == 1); 4444 rid = 1; 4445 } 4446 if ((err = iflib_legacy_setup(ctx, ctx->isc_legacy_intr, ctx->ifc_softc, &rid, "irq0")) != 0) { 4447 device_printf(dev, "iflib_legacy_setup failed %d\n", err); 4448 goto fail_intr_free; 4449 } 4450 } 4451 ether_ifattach(ctx->ifc_ifp, ctx->ifc_mac); 4452 if ((err = IFDI_ATTACH_POST(ctx)) != 0) { 4453 device_printf(dev, "IFDI_ATTACH_POST failed %d\n", err); 4454 goto fail_detach; 4455 } 4456 if ((err = iflib_netmap_attach(ctx))) { 4457 device_printf(ctx->ifc_dev, "netmap attach failed: %d\n", err); 4458 goto fail_detach; 4459 } 4460 *ctxp = ctx; 4461 4462 if_setgetcounterfn(ctx->ifc_ifp, iflib_if_get_counter); 4463 iflib_add_device_sysctl_post(ctx); 4464 return (0); 4465fail_detach: 4466 ether_ifdetach(ctx->ifc_ifp); 4467fail_intr_free: 4468 if (scctx->isc_intr == IFLIB_INTR_MSIX || scctx->isc_intr == IFLIB_INTR_MSI) 4469 pci_release_msi(ctx->ifc_dev); 4470fail_queues: 4471 iflib_tx_structures_free(ctx); 4472 iflib_rx_structures_free(ctx); 4473fail: 4474 IFDI_DETACH(ctx); 4475 return (err); 4476} 4477 4478int 4479iflib_device_attach(device_t dev) 4480{ 4481 if_ctx_t ctx; 4482 if_shared_ctx_t sctx; 4483 4484 if ((sctx = DEVICE_REGISTER(dev)) == NULL || sctx->isc_magic != IFLIB_MAGIC) 4485 return (ENOTSUP); 4486 4487 pci_enable_busmaster(dev); 4488 4489 return (iflib_device_register(dev, NULL, sctx, &ctx)); 4490} 4491 4492int 4493iflib_device_deregister(if_ctx_t ctx) 4494{ 4495 if_t ifp = ctx->ifc_ifp; 4496 iflib_txq_t txq; 4497 iflib_rxq_t rxq; 4498 device_t dev = ctx->ifc_dev; 4499 int i, j; 4500 struct taskqgroup *tqg; 4501 iflib_fl_t fl; 4502 4503 /* Make sure VLANS are not using driver */ 4504 if (if_vlantrunkinuse(ifp)) { 4505 device_printf(dev, "Vlan in use, detach first\n"); 4506 return (EBUSY); 4507 } 4508#ifdef PCI_IOV 4509 if (!CTX_IS_VF(ctx) && pci_iov_detach(dev) != 0) { 4510 device_printf(dev, "SR-IOV in use; detach first.\n"); 4511 return (EBUSY); 4512 } 4513#endif 4514 4515 STATE_LOCK(ctx); 4516 ctx->ifc_flags |= IFC_IN_DETACH; 4517 STATE_UNLOCK(ctx); 4518 4519 CTX_LOCK(ctx); 4520 iflib_stop(ctx); 4521 CTX_UNLOCK(ctx); 4522 4523 /* Unregister VLAN events */ 4524 if (ctx->ifc_vlan_attach_event != NULL) 4525 EVENTHANDLER_DEREGISTER(vlan_config, ctx->ifc_vlan_attach_event); 4526 if (ctx->ifc_vlan_detach_event != NULL) 4527 EVENTHANDLER_DEREGISTER(vlan_unconfig, ctx->ifc_vlan_detach_event); 4528 4529 iflib_netmap_detach(ifp); 4530 ether_ifdetach(ifp); 4531 if (ctx->ifc_led_dev != NULL) 4532 led_destroy(ctx->ifc_led_dev); 4533 /* XXX drain any dependent tasks */ 4534 tqg = qgroup_if_io_tqg; 4535 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) { 4536 callout_drain(&txq->ift_timer); 4537 if (txq->ift_task.gt_uniq != NULL) 4538 taskqgroup_detach(tqg, &txq->ift_task); 4539 } 4540 for (i = 0, rxq = ctx->ifc_rxqs; i < NRXQSETS(ctx); i++, rxq++) { 4541 if (rxq->ifr_task.gt_uniq != NULL) 4542 taskqgroup_detach(tqg, &rxq->ifr_task); 4543 4544 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4545 free(fl->ifl_rx_bitmap, M_IFLIB); 4546 4547 } 4548 tqg = qgroup_if_config_tqg; 4549 if (ctx->ifc_admin_task.gt_uniq != NULL) 4550 taskqgroup_detach(tqg, &ctx->ifc_admin_task); 4551 if (ctx->ifc_vflr_task.gt_uniq != NULL) 4552 taskqgroup_detach(tqg, &ctx->ifc_vflr_task); 4553 CTX_LOCK(ctx); 4554 IFDI_DETACH(ctx); 4555 CTX_UNLOCK(ctx); 4556 4557 /* ether_ifdetach calls if_qflush - lock must be destroy afterwards*/ 4558 CTX_LOCK_DESTROY(ctx); 4559 device_set_softc(ctx->ifc_dev, NULL); 4560 iflib_free_intr_mem(ctx); 4561 4562 bus_generic_detach(dev); 4563 if_free(ifp); 4564 4565 iflib_tx_structures_free(ctx); 4566 iflib_rx_structures_free(ctx); 4567 if (ctx->ifc_flags & IFC_SC_ALLOCATED) 4568 free(ctx->ifc_softc, M_IFLIB); 4569 STATE_LOCK_DESTROY(ctx); 4570 free(ctx, M_IFLIB); 4571 return (0); 4572} 4573 4574static void 4575iflib_free_intr_mem(if_ctx_t ctx) 4576{ 4577 4578 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_MSIX) { 4579 iflib_irq_free(ctx, &ctx->ifc_legacy_irq); 4580 } 4581 if (ctx->ifc_softc_ctx.isc_intr != IFLIB_INTR_LEGACY) { 4582 pci_release_msi(ctx->ifc_dev); 4583 } 4584 if (ctx->ifc_msix_mem != NULL) { 4585 bus_release_resource(ctx->ifc_dev, SYS_RES_MEMORY, 4586 rman_get_rid(ctx->ifc_msix_mem), ctx->ifc_msix_mem); 4587 ctx->ifc_msix_mem = NULL; 4588 } 4589} 4590 4591int 4592iflib_device_detach(device_t dev) 4593{ 4594 if_ctx_t ctx = device_get_softc(dev); 4595 4596 return (iflib_device_deregister(ctx)); 4597} 4598 4599int 4600iflib_device_suspend(device_t dev) 4601{ 4602 if_ctx_t ctx = device_get_softc(dev); 4603 4604 CTX_LOCK(ctx); 4605 IFDI_SUSPEND(ctx); 4606 CTX_UNLOCK(ctx); 4607 4608 return bus_generic_suspend(dev); 4609} 4610int 4611iflib_device_shutdown(device_t dev) 4612{ 4613 if_ctx_t ctx = device_get_softc(dev); 4614 4615 CTX_LOCK(ctx); 4616 IFDI_SHUTDOWN(ctx); 4617 CTX_UNLOCK(ctx); 4618 4619 return bus_generic_suspend(dev); 4620} 4621 4622 4623int 4624iflib_device_resume(device_t dev) 4625{ 4626 if_ctx_t ctx = device_get_softc(dev); 4627 iflib_txq_t txq = ctx->ifc_txqs; 4628 4629 CTX_LOCK(ctx); 4630 IFDI_RESUME(ctx); 4631 iflib_if_init_locked(ctx); 4632 CTX_UNLOCK(ctx); 4633 for (int i = 0; i < NTXQSETS(ctx); i++, txq++) 4634 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET); 4635 4636 return (bus_generic_resume(dev)); 4637} 4638 4639int 4640iflib_device_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params) 4641{ 4642 int error; 4643 if_ctx_t ctx = device_get_softc(dev); 4644 4645 CTX_LOCK(ctx); 4646 error = IFDI_IOV_INIT(ctx, num_vfs, params); 4647 CTX_UNLOCK(ctx); 4648 4649 return (error); 4650} 4651 4652void 4653iflib_device_iov_uninit(device_t dev) 4654{ 4655 if_ctx_t ctx = device_get_softc(dev); 4656 4657 CTX_LOCK(ctx); 4658 IFDI_IOV_UNINIT(ctx); 4659 CTX_UNLOCK(ctx); 4660} 4661 4662int 4663iflib_device_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params) 4664{ 4665 int error; 4666 if_ctx_t ctx = device_get_softc(dev); 4667 4668 CTX_LOCK(ctx); 4669 error = IFDI_IOV_VF_ADD(ctx, vfnum, params); 4670 CTX_UNLOCK(ctx); 4671 4672 return (error); 4673} 4674 4675/********************************************************************* 4676 * 4677 * MODULE FUNCTION DEFINITIONS 4678 * 4679 **********************************************************************/ 4680 4681/* 4682 * - Start a fast taskqueue thread for each core 4683 * - Start a taskqueue for control operations 4684 */ 4685static int 4686iflib_module_init(void) 4687{ 4688 return (0); 4689} 4690 4691static int 4692iflib_module_event_handler(module_t mod, int what, void *arg) 4693{ 4694 int err; 4695 4696 switch (what) { 4697 case MOD_LOAD: 4698 if ((err = iflib_module_init()) != 0) 4699 return (err); 4700 break; 4701 case MOD_UNLOAD: 4702 return (EBUSY); 4703 default: 4704 return (EOPNOTSUPP); 4705 } 4706 4707 return (0); 4708} 4709 4710/********************************************************************* 4711 * 4712 * PUBLIC FUNCTION DEFINITIONS 4713 * ordered as in iflib.h 4714 * 4715 **********************************************************************/ 4716 4717 4718static void 4719_iflib_assert(if_shared_ctx_t sctx) 4720{ 4721 MPASS(sctx->isc_tx_maxsize); 4722 MPASS(sctx->isc_tx_maxsegsize); 4723 4724 MPASS(sctx->isc_rx_maxsize); 4725 MPASS(sctx->isc_rx_nsegments); 4726 MPASS(sctx->isc_rx_maxsegsize); 4727 4728 MPASS(sctx->isc_nrxd_min[0]); 4729 MPASS(sctx->isc_nrxd_max[0]); 4730 MPASS(sctx->isc_nrxd_default[0]); 4731 MPASS(sctx->isc_ntxd_min[0]); 4732 MPASS(sctx->isc_ntxd_max[0]); 4733 MPASS(sctx->isc_ntxd_default[0]); 4734} 4735 4736static void 4737_iflib_pre_assert(if_softc_ctx_t scctx) 4738{ 4739 4740 MPASS(scctx->isc_txrx->ift_txd_encap); 4741 MPASS(scctx->isc_txrx->ift_txd_flush); 4742 MPASS(scctx->isc_txrx->ift_txd_credits_update); 4743 MPASS(scctx->isc_txrx->ift_rxd_available); 4744 MPASS(scctx->isc_txrx->ift_rxd_pkt_get); 4745 MPASS(scctx->isc_txrx->ift_rxd_refill); 4746 MPASS(scctx->isc_txrx->ift_rxd_flush); 4747} 4748 4749static int 4750iflib_register(if_ctx_t ctx) 4751{ 4752 if_shared_ctx_t sctx = ctx->ifc_sctx; 4753 driver_t *driver = sctx->isc_driver; 4754 device_t dev = ctx->ifc_dev; 4755 if_t ifp; 4756 4757 _iflib_assert(sctx); 4758 4759 CTX_LOCK_INIT(ctx, device_get_nameunit(ctx->ifc_dev)); 4760 4761 ifp = ctx->ifc_ifp = if_alloc(IFT_ETHER); 4762 if (ifp == NULL) { 4763 device_printf(dev, "can not allocate ifnet structure\n"); 4764 return (ENOMEM); 4765 } 4766 4767 /* 4768 * Initialize our context's device specific methods 4769 */ 4770 kobj_init((kobj_t) ctx, (kobj_class_t) driver); 4771 kobj_class_compile((kobj_class_t) driver); 4772 driver->refs++; 4773 4774 if_initname(ifp, device_get_name(dev), device_get_unit(dev)); 4775 if_setsoftc(ifp, ctx); 4776 if_setdev(ifp, dev); 4777 if_setinitfn(ifp, iflib_if_init); 4778 if_setioctlfn(ifp, iflib_if_ioctl); 4779 if_settransmitfn(ifp, iflib_if_transmit); 4780 if_setqflushfn(ifp, iflib_if_qflush); 4781 if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); 4782 4783 ctx->ifc_vlan_attach_event = 4784 EVENTHANDLER_REGISTER(vlan_config, iflib_vlan_register, ctx, 4785 EVENTHANDLER_PRI_FIRST); 4786 ctx->ifc_vlan_detach_event = 4787 EVENTHANDLER_REGISTER(vlan_unconfig, iflib_vlan_unregister, ctx, 4788 EVENTHANDLER_PRI_FIRST); 4789 4790 ifmedia_init(&ctx->ifc_media, IFM_IMASK, 4791 iflib_media_change, iflib_media_status); 4792 4793 return (0); 4794} 4795 4796 4797static int 4798iflib_queues_alloc(if_ctx_t ctx) 4799{ 4800 if_shared_ctx_t sctx = ctx->ifc_sctx; 4801 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 4802 device_t dev = ctx->ifc_dev; 4803 int nrxqsets = scctx->isc_nrxqsets; 4804 int ntxqsets = scctx->isc_ntxqsets; 4805 iflib_txq_t txq; 4806 iflib_rxq_t rxq; 4807 iflib_fl_t fl = NULL; 4808 int i, j, cpu, err, txconf, rxconf; 4809 iflib_dma_info_t ifdip; 4810 uint32_t *rxqsizes = scctx->isc_rxqsizes; 4811 uint32_t *txqsizes = scctx->isc_txqsizes; 4812 uint8_t nrxqs = sctx->isc_nrxqs; 4813 uint8_t ntxqs = sctx->isc_ntxqs; 4814 int nfree_lists = sctx->isc_nfl ? sctx->isc_nfl : 1; 4815 caddr_t *vaddrs; 4816 uint64_t *paddrs; 4817 4818 KASSERT(ntxqs > 0, ("number of queues per qset must be at least 1")); 4819 KASSERT(nrxqs > 0, ("number of queues per qset must be at least 1")); 4820 4821 /* Allocate the TX ring struct memory */ 4822 if (!(ctx->ifc_txqs = 4823 (iflib_txq_t) malloc(sizeof(struct iflib_txq) * 4824 ntxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 4825 device_printf(dev, "Unable to allocate TX ring memory\n"); 4826 err = ENOMEM; 4827 goto fail; 4828 } 4829 4830 /* Now allocate the RX */ 4831 if (!(ctx->ifc_rxqs = 4832 (iflib_rxq_t) malloc(sizeof(struct iflib_rxq) * 4833 nrxqsets, M_IFLIB, M_NOWAIT | M_ZERO))) { 4834 device_printf(dev, "Unable to allocate RX ring memory\n"); 4835 err = ENOMEM; 4836 goto rx_fail; 4837 } 4838 4839 txq = ctx->ifc_txqs; 4840 rxq = ctx->ifc_rxqs; 4841 4842 /* 4843 * XXX handle allocation failure 4844 */ 4845 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) { 4846 /* Set up some basics */ 4847 4848 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * ntxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 4849 device_printf(dev, "failed to allocate iflib_dma_info\n"); 4850 err = ENOMEM; 4851 goto err_tx_desc; 4852 } 4853 txq->ift_ifdi = ifdip; 4854 for (j = 0; j < ntxqs; j++, ifdip++) { 4855 if (iflib_dma_alloc(ctx, txqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 4856 device_printf(dev, "Unable to allocate Descriptor memory\n"); 4857 err = ENOMEM; 4858 goto err_tx_desc; 4859 } 4860 txq->ift_txd_size[j] = scctx->isc_txd_size[j]; 4861 bzero((void *)ifdip->idi_vaddr, txqsizes[j]); 4862 } 4863 txq->ift_ctx = ctx; 4864 txq->ift_id = i; 4865 if (sctx->isc_flags & IFLIB_HAS_TXCQ) { 4866 txq->ift_br_offset = 1; 4867 } else { 4868 txq->ift_br_offset = 0; 4869 } 4870 /* XXX fix this */ 4871 txq->ift_timer.c_cpu = cpu; 4872 4873 if (iflib_txsd_alloc(txq)) { 4874 device_printf(dev, "Critical Failure setting up TX buffers\n"); 4875 err = ENOMEM; 4876 goto err_tx_desc; 4877 } 4878 4879 /* Initialize the TX lock */ 4880 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout", 4881 device_get_nameunit(dev), txq->ift_id); 4882 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF); 4883 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0); 4884 4885 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db", 4886 device_get_nameunit(dev), txq->ift_id); 4887 4888 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain, 4889 iflib_txq_can_drain, M_IFLIB, M_WAITOK); 4890 if (err) { 4891 /* XXX free any allocated rings */ 4892 device_printf(dev, "Unable to allocate buf_ring\n"); 4893 goto err_tx_desc; 4894 } 4895 } 4896 4897 for (rxconf = i = 0; i < nrxqsets; i++, rxconf++, rxq++) { 4898 /* Set up some basics */ 4899 4900 if ((ifdip = malloc(sizeof(struct iflib_dma_info) * nrxqs, M_IFLIB, M_WAITOK|M_ZERO)) == NULL) { 4901 device_printf(dev, "failed to allocate iflib_dma_info\n"); 4902 err = ENOMEM; 4903 goto err_tx_desc; 4904 } 4905 4906 rxq->ifr_ifdi = ifdip; 4907 /* XXX this needs to be changed if #rx queues != #tx queues */ 4908 rxq->ifr_ntxqirq = 1; 4909 rxq->ifr_txqid[0] = i; 4910 for (j = 0; j < nrxqs; j++, ifdip++) { 4911 if (iflib_dma_alloc(ctx, rxqsizes[j], ifdip, BUS_DMA_NOWAIT)) { 4912 device_printf(dev, "Unable to allocate Descriptor memory\n"); 4913 err = ENOMEM; 4914 goto err_tx_desc; 4915 } 4916 bzero((void *)ifdip->idi_vaddr, rxqsizes[j]); 4917 } 4918 rxq->ifr_ctx = ctx; 4919 rxq->ifr_id = i; 4920 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 4921 rxq->ifr_fl_offset = 1; 4922 } else { 4923 rxq->ifr_fl_offset = 0; 4924 } 4925 rxq->ifr_nfl = nfree_lists; 4926 if (!(fl = 4927 (iflib_fl_t) malloc(sizeof(struct iflib_fl) * nfree_lists, M_IFLIB, M_NOWAIT | M_ZERO))) { 4928 device_printf(dev, "Unable to allocate free list memory\n"); 4929 err = ENOMEM; 4930 goto err_tx_desc; 4931 } 4932 rxq->ifr_fl = fl; 4933 for (j = 0; j < nfree_lists; j++) { 4934 fl[j].ifl_rxq = rxq; 4935 fl[j].ifl_id = j; 4936 fl[j].ifl_ifdi = &rxq->ifr_ifdi[j + rxq->ifr_fl_offset]; 4937 fl[j].ifl_rxd_size = scctx->isc_rxd_size[j]; 4938 } 4939 /* Allocate receive buffers for the ring*/ 4940 if (iflib_rxsd_alloc(rxq)) { 4941 device_printf(dev, 4942 "Critical Failure setting up receive buffers\n"); 4943 err = ENOMEM; 4944 goto err_rx_desc; 4945 } 4946 4947 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) 4948 fl->ifl_rx_bitmap = bit_alloc(fl->ifl_size, M_IFLIB, M_WAITOK|M_ZERO); 4949 } 4950 4951 /* TXQs */ 4952 vaddrs = malloc(sizeof(caddr_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 4953 paddrs = malloc(sizeof(uint64_t)*ntxqsets*ntxqs, M_IFLIB, M_WAITOK); 4954 for (i = 0; i < ntxqsets; i++) { 4955 iflib_dma_info_t di = ctx->ifc_txqs[i].ift_ifdi; 4956 4957 for (j = 0; j < ntxqs; j++, di++) { 4958 vaddrs[i*ntxqs + j] = di->idi_vaddr; 4959 paddrs[i*ntxqs + j] = di->idi_paddr; 4960 } 4961 } 4962 if ((err = IFDI_TX_QUEUES_ALLOC(ctx, vaddrs, paddrs, ntxqs, ntxqsets)) != 0) { 4963 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 4964 iflib_tx_structures_free(ctx); 4965 free(vaddrs, M_IFLIB); 4966 free(paddrs, M_IFLIB); 4967 goto err_rx_desc; 4968 } 4969 free(vaddrs, M_IFLIB); 4970 free(paddrs, M_IFLIB); 4971 4972 /* RXQs */ 4973 vaddrs = malloc(sizeof(caddr_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 4974 paddrs = malloc(sizeof(uint64_t)*nrxqsets*nrxqs, M_IFLIB, M_WAITOK); 4975 for (i = 0; i < nrxqsets; i++) { 4976 iflib_dma_info_t di = ctx->ifc_rxqs[i].ifr_ifdi; 4977 4978 for (j = 0; j < nrxqs; j++, di++) { 4979 vaddrs[i*nrxqs + j] = di->idi_vaddr; 4980 paddrs[i*nrxqs + j] = di->idi_paddr; 4981 } 4982 } 4983 if ((err = IFDI_RX_QUEUES_ALLOC(ctx, vaddrs, paddrs, nrxqs, nrxqsets)) != 0) { 4984 device_printf(ctx->ifc_dev, "device queue allocation failed\n"); 4985 iflib_tx_structures_free(ctx); 4986 free(vaddrs, M_IFLIB); 4987 free(paddrs, M_IFLIB); 4988 goto err_rx_desc; 4989 } 4990 free(vaddrs, M_IFLIB); 4991 free(paddrs, M_IFLIB); 4992 4993 return (0); 4994 4995/* XXX handle allocation failure changes */ 4996err_rx_desc: 4997err_tx_desc: 4998rx_fail: 4999 if (ctx->ifc_rxqs != NULL) 5000 free(ctx->ifc_rxqs, M_IFLIB); 5001 ctx->ifc_rxqs = NULL; 5002 if (ctx->ifc_txqs != NULL) 5003 free(ctx->ifc_txqs, M_IFLIB); 5004 ctx->ifc_txqs = NULL; 5005fail: 5006 return (err); 5007} 5008 5009static int 5010iflib_tx_structures_setup(if_ctx_t ctx) 5011{ 5012 iflib_txq_t txq = ctx->ifc_txqs; 5013 int i; 5014 5015 for (i = 0; i < NTXQSETS(ctx); i++, txq++) 5016 iflib_txq_setup(txq); 5017 5018 return (0); 5019} 5020 5021static void 5022iflib_tx_structures_free(if_ctx_t ctx) 5023{ 5024 iflib_txq_t txq = ctx->ifc_txqs; 5025 int i, j; 5026 5027 for (i = 0; i < NTXQSETS(ctx); i++, txq++) { 5028 iflib_txq_destroy(txq); 5029 for (j = 0; j < ctx->ifc_nhwtxqs; j++) 5030 iflib_dma_free(&txq->ift_ifdi[j]); 5031 } 5032 free(ctx->ifc_txqs, M_IFLIB); 5033 ctx->ifc_txqs = NULL; 5034 IFDI_QUEUES_FREE(ctx); 5035} 5036 5037/********************************************************************* 5038 * 5039 * Initialize all receive rings. 5040 * 5041 **********************************************************************/ 5042static int 5043iflib_rx_structures_setup(if_ctx_t ctx) 5044{ 5045 iflib_rxq_t rxq = ctx->ifc_rxqs; 5046 int q; 5047#if defined(INET6) || defined(INET) 5048 int i, err; 5049#endif 5050 5051 for (q = 0; q < ctx->ifc_softc_ctx.isc_nrxqsets; q++, rxq++) { 5052#if defined(INET6) || defined(INET) 5053 tcp_lro_free(&rxq->ifr_lc); 5054 if ((err = tcp_lro_init_args(&rxq->ifr_lc, ctx->ifc_ifp, 5055 TCP_LRO_ENTRIES, min(1024, 5056 ctx->ifc_softc_ctx.isc_nrxd[rxq->ifr_fl_offset]))) != 0) { 5057 device_printf(ctx->ifc_dev, "LRO Initialization failed!\n"); 5058 goto fail; 5059 } 5060 rxq->ifr_lro_enabled = TRUE; 5061#endif 5062 IFDI_RXQ_SETUP(ctx, rxq->ifr_id); 5063 } 5064 return (0); 5065#if defined(INET6) || defined(INET) 5066fail: 5067 /* 5068 * Free RX software descriptors allocated so far, we will only handle 5069 * the rings that completed, the failing case will have 5070 * cleaned up for itself. 'q' failed, so its the terminus. 5071 */ 5072 rxq = ctx->ifc_rxqs; 5073 for (i = 0; i < q; ++i, rxq++) { 5074 iflib_rx_sds_free(rxq); 5075 rxq->ifr_cq_gen = rxq->ifr_cq_cidx = rxq->ifr_cq_pidx = 0; 5076 } 5077 return (err); 5078#endif 5079} 5080 5081/********************************************************************* 5082 * 5083 * Free all receive rings. 5084 * 5085 **********************************************************************/ 5086static void 5087iflib_rx_structures_free(if_ctx_t ctx) 5088{ 5089 iflib_rxq_t rxq = ctx->ifc_rxqs; 5090 5091 for (int i = 0; i < ctx->ifc_softc_ctx.isc_nrxqsets; i++, rxq++) { 5092 iflib_rx_sds_free(rxq); 5093 } 5094 free(ctx->ifc_rxqs, M_IFLIB); 5095 ctx->ifc_rxqs = NULL; 5096} 5097 5098static int 5099iflib_qset_structures_setup(if_ctx_t ctx) 5100{ 5101 int err; 5102 5103 /* 5104 * It is expected that the caller takes care of freeing queues if this 5105 * fails. 5106 */ 5107 if ((err = iflib_tx_structures_setup(ctx)) != 0) { 5108 device_printf(ctx->ifc_dev, "iflib_tx_structures_setup failed: %d\n", err); 5109 return (err); 5110 } 5111 5112 if ((err = iflib_rx_structures_setup(ctx)) != 0) 5113 device_printf(ctx->ifc_dev, "iflib_rx_structures_setup failed: %d\n", err); 5114 5115 return (err); 5116} 5117 5118int 5119iflib_irq_alloc(if_ctx_t ctx, if_irq_t irq, int rid, 5120 driver_filter_t filter, void *filter_arg, driver_intr_t handler, void *arg, char *name) 5121{ 5122 5123 return (_iflib_irq_alloc(ctx, irq, rid, filter, handler, arg, name)); 5124} 5125 5126#ifdef SMP 5127static int 5128find_nth(if_ctx_t ctx, int qid) 5129{ 5130 cpuset_t cpus; 5131 int i, cpuid, eqid, count; 5132 5133 CPU_COPY(&ctx->ifc_cpus, &cpus); 5134 count = CPU_COUNT(&cpus); 5135 eqid = qid % count; 5136 /* clear up to the qid'th bit */ 5137 for (i = 0; i < eqid; i++) { 5138 cpuid = CPU_FFS(&cpus); 5139 MPASS(cpuid != 0); 5140 CPU_CLR(cpuid-1, &cpus); 5141 } 5142 cpuid = CPU_FFS(&cpus); 5143 MPASS(cpuid != 0); 5144 return (cpuid-1); 5145} 5146 5147#ifdef SCHED_ULE 5148extern struct cpu_group *cpu_top; /* CPU topology */ 5149 5150static int 5151find_child_with_core(int cpu, struct cpu_group *grp) 5152{ 5153 int i; 5154 5155 if (grp->cg_children == 0) 5156 return -1; 5157 5158 MPASS(grp->cg_child); 5159 for (i = 0; i < grp->cg_children; i++) { 5160 if (CPU_ISSET(cpu, &grp->cg_child[i].cg_mask)) 5161 return i; 5162 } 5163 5164 return -1; 5165} 5166 5167/* 5168 * Find the nth "close" core to the specified core 5169 * "close" is defined as the deepest level that shares 5170 * at least an L2 cache. With threads, this will be 5171 * threads on the same core. If the sahred cache is L3 5172 * or higher, simply returns the same core. 5173 */ 5174static int 5175find_close_core(int cpu, int core_offset) 5176{ 5177 struct cpu_group *grp; 5178 int i; 5179 int fcpu; 5180 cpuset_t cs; 5181 5182 grp = cpu_top; 5183 if (grp == NULL) 5184 return cpu; 5185 i = 0; 5186 while ((i = find_child_with_core(cpu, grp)) != -1) { 5187 /* If the child only has one cpu, don't descend */ 5188 if (grp->cg_child[i].cg_count <= 1) 5189 break; 5190 grp = &grp->cg_child[i]; 5191 } 5192 5193 /* If they don't share at least an L2 cache, use the same CPU */ 5194 if (grp->cg_level > CG_SHARE_L2 || grp->cg_level == CG_SHARE_NONE) 5195 return cpu; 5196 5197 /* Now pick one */ 5198 CPU_COPY(&grp->cg_mask, &cs); 5199 5200 /* Add the selected CPU offset to core offset. */ 5201 for (i = 0; (fcpu = CPU_FFS(&cs)) != 0; i++) { 5202 if (fcpu - 1 == cpu) 5203 break; 5204 CPU_CLR(fcpu - 1, &cs); 5205 } 5206 MPASS(fcpu); 5207 5208 core_offset += i; 5209 5210 CPU_COPY(&grp->cg_mask, &cs); 5211 for (i = core_offset % grp->cg_count; i > 0; i--) { 5212 MPASS(CPU_FFS(&cs)); 5213 CPU_CLR(CPU_FFS(&cs) - 1, &cs); 5214 } 5215 MPASS(CPU_FFS(&cs)); 5216 return CPU_FFS(&cs) - 1; 5217} 5218#else 5219static int 5220find_close_core(int cpu, int core_offset __unused) 5221{ 5222 return cpu; 5223} 5224#endif 5225 5226static int 5227get_core_offset(if_ctx_t ctx, iflib_intr_type_t type, int qid) 5228{ 5229 switch (type) { 5230 case IFLIB_INTR_TX: 5231 /* TX queues get cores which share at least an L2 cache with the corresponding RX queue */ 5232 /* XXX handle multiple RX threads per core and more than two core per L2 group */ 5233 return qid / CPU_COUNT(&ctx->ifc_cpus) + 1; 5234 case IFLIB_INTR_RX: 5235 case IFLIB_INTR_RXTX: 5236 /* RX queues get the specified core */ 5237 return qid / CPU_COUNT(&ctx->ifc_cpus); 5238 default: 5239 return -1; 5240 } 5241} 5242#else 5243#define get_core_offset(ctx, type, qid) CPU_FIRST() 5244#define find_close_core(cpuid, tid) CPU_FIRST() 5245#define find_nth(ctx, gid) CPU_FIRST() 5246#endif 5247 5248/* Just to avoid copy/paste */ 5249static inline int 5250iflib_irq_set_affinity(if_ctx_t ctx, int irq, iflib_intr_type_t type, int qid, 5251 struct grouptask *gtask, struct taskqgroup *tqg, void *uniq, char *name) 5252{ 5253 int cpuid; 5254 int err, tid; 5255 5256 cpuid = find_nth(ctx, qid); 5257 tid = get_core_offset(ctx, type, qid); 5258 MPASS(tid >= 0); 5259 cpuid = find_close_core(cpuid, tid); 5260 err = taskqgroup_attach_cpu(tqg, gtask, uniq, cpuid, irq, name); 5261 if (err) { 5262 device_printf(ctx->ifc_dev, "taskqgroup_attach_cpu failed %d\n", err); 5263 return (err); 5264 } 5265#ifdef notyet 5266 if (cpuid > ctx->ifc_cpuid_highest) 5267 ctx->ifc_cpuid_highest = cpuid; 5268#endif 5269 return 0; 5270} 5271 5272int 5273iflib_irq_alloc_generic(if_ctx_t ctx, if_irq_t irq, int rid, 5274 iflib_intr_type_t type, driver_filter_t *filter, 5275 void *filter_arg, int qid, char *name) 5276{ 5277 struct grouptask *gtask; 5278 struct taskqgroup *tqg; 5279 iflib_filter_info_t info; 5280 gtask_fn_t *fn; 5281 int tqrid, err; 5282 driver_filter_t *intr_fast; 5283 void *q; 5284 5285 info = &ctx->ifc_filter_info; 5286 tqrid = rid; 5287 5288 switch (type) { 5289 /* XXX merge tx/rx for netmap? */ 5290 case IFLIB_INTR_TX: 5291 q = &ctx->ifc_txqs[qid]; 5292 info = &ctx->ifc_txqs[qid].ift_filter_info; 5293 gtask = &ctx->ifc_txqs[qid].ift_task; 5294 tqg = qgroup_if_io_tqg; 5295 fn = _task_fn_tx; 5296 intr_fast = iflib_fast_intr; 5297 GROUPTASK_INIT(gtask, 0, fn, q); 5298 break; 5299 case IFLIB_INTR_RX: 5300 q = &ctx->ifc_rxqs[qid]; 5301 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5302 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5303 tqg = qgroup_if_io_tqg; 5304 fn = _task_fn_rx; 5305 intr_fast = iflib_fast_intr; 5306 GROUPTASK_INIT(gtask, 0, fn, q); 5307 break; 5308 case IFLIB_INTR_RXTX: 5309 q = &ctx->ifc_rxqs[qid]; 5310 info = &ctx->ifc_rxqs[qid].ifr_filter_info; 5311 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5312 tqg = qgroup_if_io_tqg; 5313 fn = _task_fn_rx; 5314 intr_fast = iflib_fast_intr_rxtx; 5315 GROUPTASK_INIT(gtask, 0, fn, q); 5316 break; 5317 case IFLIB_INTR_ADMIN: 5318 q = ctx; 5319 tqrid = -1; 5320 info = &ctx->ifc_filter_info; 5321 gtask = &ctx->ifc_admin_task; 5322 tqg = qgroup_if_config_tqg; 5323 fn = _task_fn_admin; 5324 intr_fast = iflib_fast_intr_ctx; 5325 break; 5326 default: 5327 panic("unknown net intr type"); 5328 } 5329 5330 info->ifi_filter = filter; 5331 info->ifi_filter_arg = filter_arg; 5332 info->ifi_task = gtask; 5333 info->ifi_ctx = q; 5334 5335 err = _iflib_irq_alloc(ctx, irq, rid, intr_fast, NULL, info, name); 5336 if (err != 0) { 5337 device_printf(ctx->ifc_dev, "_iflib_irq_alloc failed %d\n", err); 5338 return (err); 5339 } 5340 if (type == IFLIB_INTR_ADMIN) 5341 return (0); 5342 5343 if (tqrid != -1) { 5344 err = iflib_irq_set_affinity(ctx, rman_get_start(irq->ii_res), type, qid, gtask, tqg, q, name); 5345 if (err) 5346 return (err); 5347 } else { 5348 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5349 } 5350 5351 return (0); 5352} 5353 5354void 5355iflib_softirq_alloc_generic(if_ctx_t ctx, if_irq_t irq, iflib_intr_type_t type, void *arg, int qid, char *name) 5356{ 5357 struct grouptask *gtask; 5358 struct taskqgroup *tqg; 5359 gtask_fn_t *fn; 5360 void *q; 5361 int irq_num = -1; 5362 int err; 5363 5364 switch (type) { 5365 case IFLIB_INTR_TX: 5366 q = &ctx->ifc_txqs[qid]; 5367 gtask = &ctx->ifc_txqs[qid].ift_task; 5368 tqg = qgroup_if_io_tqg; 5369 fn = _task_fn_tx; 5370 if (irq != NULL) 5371 irq_num = rman_get_start(irq->ii_res); 5372 break; 5373 case IFLIB_INTR_RX: 5374 q = &ctx->ifc_rxqs[qid]; 5375 gtask = &ctx->ifc_rxqs[qid].ifr_task; 5376 tqg = qgroup_if_io_tqg; 5377 fn = _task_fn_rx; 5378 if (irq != NULL) 5379 irq_num = rman_get_start(irq->ii_res); 5380 break; 5381 case IFLIB_INTR_IOV: 5382 q = ctx; 5383 gtask = &ctx->ifc_vflr_task; 5384 tqg = qgroup_if_config_tqg; 5385 fn = _task_fn_iov; 5386 break; 5387 default: 5388 panic("unknown net intr type"); 5389 } 5390 GROUPTASK_INIT(gtask, 0, fn, q); 5391 if (irq_num != -1) { 5392 err = iflib_irq_set_affinity(ctx, irq_num, type, qid, gtask, tqg, q, name); 5393 if (err) 5394 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5395 } 5396 else { 5397 taskqgroup_attach(tqg, gtask, q, irq_num, name); 5398 } 5399} 5400 5401void 5402iflib_irq_free(if_ctx_t ctx, if_irq_t irq) 5403{ 5404 if (irq->ii_tag) 5405 bus_teardown_intr(ctx->ifc_dev, irq->ii_res, irq->ii_tag); 5406 5407 if (irq->ii_res) 5408 bus_release_resource(ctx->ifc_dev, SYS_RES_IRQ, irq->ii_rid, irq->ii_res); 5409} 5410 5411static int 5412iflib_legacy_setup(if_ctx_t ctx, driver_filter_t filter, void *filter_arg, int *rid, char *name) 5413{ 5414 iflib_txq_t txq = ctx->ifc_txqs; 5415 iflib_rxq_t rxq = ctx->ifc_rxqs; 5416 if_irq_t irq = &ctx->ifc_legacy_irq; 5417 iflib_filter_info_t info; 5418 struct grouptask *gtask; 5419 struct taskqgroup *tqg; 5420 gtask_fn_t *fn; 5421 int tqrid; 5422 void *q; 5423 int err; 5424 5425 q = &ctx->ifc_rxqs[0]; 5426 info = &rxq[0].ifr_filter_info; 5427 gtask = &rxq[0].ifr_task; 5428 tqg = qgroup_if_io_tqg; 5429 tqrid = irq->ii_rid = *rid; 5430 fn = _task_fn_rx; 5431 5432 ctx->ifc_flags |= IFC_LEGACY; 5433 info->ifi_filter = filter; 5434 info->ifi_filter_arg = filter_arg; 5435 info->ifi_task = gtask; 5436 5437 /* We allocate a single interrupt resource */ 5438 if ((err = _iflib_irq_alloc(ctx, irq, tqrid, iflib_fast_intr_ctx, NULL, info, name)) != 0) 5439 return (err); 5440 GROUPTASK_INIT(gtask, 0, fn, q); 5441 taskqgroup_attach(tqg, gtask, q, rman_get_start(irq->ii_res), name); 5442 5443 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq); 5444 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx"); 5445 return (0); 5446} 5447 5448void 5449iflib_led_create(if_ctx_t ctx) 5450{ 5451 5452 ctx->ifc_led_dev = led_create(iflib_led_func, ctx, 5453 device_get_nameunit(ctx->ifc_dev)); 5454} 5455 5456void 5457iflib_tx_intr_deferred(if_ctx_t ctx, int txqid) 5458{ 5459 5460 GROUPTASK_ENQUEUE(&ctx->ifc_txqs[txqid].ift_task); 5461} 5462 5463void 5464iflib_rx_intr_deferred(if_ctx_t ctx, int rxqid) 5465{ 5466 5467 GROUPTASK_ENQUEUE(&ctx->ifc_rxqs[rxqid].ifr_task); 5468} 5469 5470void 5471iflib_admin_intr_deferred(if_ctx_t ctx) 5472{ 5473#ifdef INVARIANTS 5474 struct grouptask *gtask; 5475 5476 gtask = &ctx->ifc_admin_task; 5477 MPASS(gtask != NULL && gtask->gt_taskqueue != NULL); 5478#endif 5479 5480 GROUPTASK_ENQUEUE(&ctx->ifc_admin_task); 5481} 5482 5483void 5484iflib_iov_intr_deferred(if_ctx_t ctx) 5485{ 5486 5487 GROUPTASK_ENQUEUE(&ctx->ifc_vflr_task); 5488} 5489 5490void 5491iflib_io_tqg_attach(struct grouptask *gt, void *uniq, int cpu, char *name) 5492{ 5493 5494 taskqgroup_attach_cpu(qgroup_if_io_tqg, gt, uniq, cpu, -1, name); 5495} 5496 5497void 5498iflib_config_gtask_init(if_ctx_t ctx, struct grouptask *gtask, gtask_fn_t *fn, 5499 char *name) 5500{ 5501 5502 GROUPTASK_INIT(gtask, 0, fn, ctx); 5503 taskqgroup_attach(qgroup_if_config_tqg, gtask, gtask, -1, name); 5504} 5505 5506void 5507iflib_config_gtask_deinit(struct grouptask *gtask) 5508{ 5509 5510 taskqgroup_detach(qgroup_if_config_tqg, gtask); 5511} 5512 5513void 5514iflib_link_state_change(if_ctx_t ctx, int link_state, uint64_t baudrate) 5515{ 5516 if_t ifp = ctx->ifc_ifp; 5517 iflib_txq_t txq = ctx->ifc_txqs; 5518 5519 if_setbaudrate(ifp, baudrate); 5520 if (baudrate >= IF_Gbps(10)) { 5521 STATE_LOCK(ctx); 5522 ctx->ifc_flags |= IFC_PREFETCH; 5523 STATE_UNLOCK(ctx); 5524 } 5525 /* If link down, disable watchdog */ 5526 if ((ctx->ifc_link_state == LINK_STATE_UP) && (link_state == LINK_STATE_DOWN)) { 5527 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++) 5528 txq->ift_qstatus = IFLIB_QUEUE_IDLE; 5529 } 5530 ctx->ifc_link_state = link_state; 5531 if_link_state_change(ifp, link_state); 5532} 5533 5534static int 5535iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq) 5536{ 5537 int credits; 5538#ifdef INVARIANTS 5539 int credits_pre = txq->ift_cidx_processed; 5540#endif 5541 5542 if (ctx->isc_txd_credits_update == NULL) 5543 return (0); 5544 5545 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0) 5546 return (0); 5547 5548 txq->ift_processed += credits; 5549 txq->ift_cidx_processed += credits; 5550 5551 MPASS(credits_pre + credits == txq->ift_cidx_processed); 5552 if (txq->ift_cidx_processed >= txq->ift_size) 5553 txq->ift_cidx_processed -= txq->ift_size; 5554 return (credits); 5555} 5556 5557static int 5558iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget) 5559{ 5560 5561 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx, 5562 budget)); 5563} 5564 5565void 5566iflib_add_int_delay_sysctl(if_ctx_t ctx, const char *name, 5567 const char *description, if_int_delay_info_t info, 5568 int offset, int value) 5569{ 5570 info->iidi_ctx = ctx; 5571 info->iidi_offset = offset; 5572 info->iidi_value = value; 5573 SYSCTL_ADD_PROC(device_get_sysctl_ctx(ctx->ifc_dev), 5574 SYSCTL_CHILDREN(device_get_sysctl_tree(ctx->ifc_dev)), 5575 OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, 5576 info, 0, iflib_sysctl_int_delay, "I", description); 5577} 5578 5579struct mtx * 5580iflib_ctx_lock_get(if_ctx_t ctx) 5581{ 5582 5583 return (&ctx->ifc_ctx_mtx); 5584} 5585 5586static int 5587iflib_msix_init(if_ctx_t ctx) 5588{ 5589 device_t dev = ctx->ifc_dev; 5590 if_shared_ctx_t sctx = ctx->ifc_sctx; 5591 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5592 int vectors, queues, rx_queues, tx_queues, queuemsgs, msgs; 5593 int iflib_num_tx_queues, iflib_num_rx_queues; 5594 int err, admincnt, bar; 5595 5596 iflib_num_tx_queues = ctx->ifc_sysctl_ntxqs; 5597 iflib_num_rx_queues = ctx->ifc_sysctl_nrxqs; 5598 5599 device_printf(dev, "msix_init qsets capped at %d\n", imax(scctx->isc_ntxqsets, scctx->isc_nrxqsets)); 5600 5601 bar = ctx->ifc_softc_ctx.isc_msix_bar; 5602 admincnt = sctx->isc_admin_intrcnt; 5603 /* Override by global tuneable */ 5604 { 5605 int i; 5606 size_t len = sizeof(i); 5607 err = kernel_sysctlbyname(curthread, "hw.pci.enable_msix", &i, &len, NULL, 0, NULL, 0); 5608 if (err == 0) { 5609 if (i == 0) 5610 goto msi; 5611 } 5612 else { 5613 device_printf(dev, "unable to read hw.pci.enable_msix."); 5614 } 5615 } 5616 /* Override by tuneable */ 5617 if (scctx->isc_disable_msix) 5618 goto msi; 5619 5620 /* 5621 ** When used in a virtualized environment 5622 ** PCI BUSMASTER capability may not be set 5623 ** so explicity set it here and rewrite 5624 ** the ENABLE in the MSIX control register 5625 ** at this point to cause the host to 5626 ** successfully initialize us. 5627 */ 5628 { 5629 int msix_ctrl, rid; 5630 5631 pci_enable_busmaster(dev); 5632 rid = 0; 5633 if (pci_find_cap(dev, PCIY_MSIX, &rid) == 0 && rid != 0) { 5634 rid += PCIR_MSIX_CTRL; 5635 msix_ctrl = pci_read_config(dev, rid, 2); 5636 msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; 5637 pci_write_config(dev, rid, msix_ctrl, 2); 5638 } else { 5639 device_printf(dev, "PCIY_MSIX capability not found; " 5640 "or rid %d == 0.\n", rid); 5641 goto msi; 5642 } 5643 } 5644 5645 /* 5646 * bar == -1 => "trust me I know what I'm doing" 5647 * Some drivers are for hardware that is so shoddily 5648 * documented that no one knows which bars are which 5649 * so the developer has to map all bars. This hack 5650 * allows shoddy garbage to use msix in this framework. 5651 */ 5652 if (bar != -1) { 5653 ctx->ifc_msix_mem = bus_alloc_resource_any(dev, 5654 SYS_RES_MEMORY, &bar, RF_ACTIVE); 5655 if (ctx->ifc_msix_mem == NULL) { 5656 /* May not be enabled */ 5657 device_printf(dev, "Unable to map MSIX table \n"); 5658 goto msi; 5659 } 5660 } 5661 /* First try MSI/X */ 5662 if ((msgs = pci_msix_count(dev)) == 0) { /* system has msix disabled */ 5663 device_printf(dev, "System has MSIX disabled \n"); 5664 bus_release_resource(dev, SYS_RES_MEMORY, 5665 bar, ctx->ifc_msix_mem); 5666 ctx->ifc_msix_mem = NULL; 5667 goto msi; 5668 } 5669#if IFLIB_DEBUG 5670 /* use only 1 qset in debug mode */ 5671 queuemsgs = min(msgs - admincnt, 1); 5672#else 5673 queuemsgs = msgs - admincnt; 5674#endif 5675#ifdef RSS 5676 queues = imin(queuemsgs, rss_getnumbuckets()); 5677#else 5678 queues = queuemsgs; 5679#endif 5680 queues = imin(CPU_COUNT(&ctx->ifc_cpus), queues); 5681 device_printf(dev, "pxm cpus: %d queue msgs: %d admincnt: %d\n", 5682 CPU_COUNT(&ctx->ifc_cpus), queuemsgs, admincnt); 5683#ifdef RSS 5684 /* If we're doing RSS, clamp at the number of RSS buckets */ 5685 if (queues > rss_getnumbuckets()) 5686 queues = rss_getnumbuckets(); 5687#endif 5688 if (iflib_num_rx_queues > 0 && iflib_num_rx_queues < queuemsgs - admincnt) 5689 rx_queues = iflib_num_rx_queues; 5690 else 5691 rx_queues = queues; 5692 5693 if (rx_queues > scctx->isc_nrxqsets) 5694 rx_queues = scctx->isc_nrxqsets; 5695 5696 /* 5697 * We want this to be all logical CPUs by default 5698 */ 5699 if (iflib_num_tx_queues > 0 && iflib_num_tx_queues < queues) 5700 tx_queues = iflib_num_tx_queues; 5701 else 5702 tx_queues = mp_ncpus; 5703 5704 if (tx_queues > scctx->isc_ntxqsets) 5705 tx_queues = scctx->isc_ntxqsets; 5706 5707 if (ctx->ifc_sysctl_qs_eq_override == 0) { 5708#ifdef INVARIANTS 5709 if (tx_queues != rx_queues) 5710 device_printf(dev, 5711 "queue equality override not set, capping rx_queues at %d and tx_queues at %d\n", 5712 min(rx_queues, tx_queues), min(rx_queues, tx_queues)); 5713#endif 5714 tx_queues = min(rx_queues, tx_queues); 5715 rx_queues = min(rx_queues, tx_queues); 5716 } 5717 5718 device_printf(dev, "using %d rx queues %d tx queues \n", rx_queues, tx_queues); 5719 5720 vectors = rx_queues + admincnt; 5721 if ((err = pci_alloc_msix(dev, &vectors)) == 0) { 5722 device_printf(dev, "Using MSI-X interrupts with %d vectors\n", 5723 vectors); 5724 scctx->isc_vectors = vectors; 5725 scctx->isc_nrxqsets = rx_queues; 5726 scctx->isc_ntxqsets = tx_queues; 5727 scctx->isc_intr = IFLIB_INTR_MSIX; 5728 5729 return (vectors); 5730 } else { 5731 device_printf(dev, 5732 "failed to allocate %d msix vectors, err: %d - using MSI\n", vectors, err); 5733 } 5734msi: 5735 vectors = pci_msi_count(dev); 5736 scctx->isc_nrxqsets = 1; 5737 scctx->isc_ntxqsets = 1; 5738 scctx->isc_vectors = vectors; 5739 if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0) { 5740 device_printf(dev,"Using an MSI interrupt\n"); 5741 scctx->isc_intr = IFLIB_INTR_MSI; 5742 } else { 5743 device_printf(dev,"Using a Legacy interrupt\n"); 5744 scctx->isc_intr = IFLIB_INTR_LEGACY; 5745 } 5746 5747 return (vectors); 5748} 5749 5750char * ring_states[] = { "IDLE", "BUSY", "STALLED", "ABDICATED" }; 5751 5752static int 5753mp_ring_state_handler(SYSCTL_HANDLER_ARGS) 5754{ 5755 int rc; 5756 uint16_t *state = ((uint16_t *)oidp->oid_arg1); 5757 struct sbuf *sb; 5758 char *ring_state = "UNKNOWN"; 5759 5760 /* XXX needed ? */ 5761 rc = sysctl_wire_old_buffer(req, 0); 5762 MPASS(rc == 0); 5763 if (rc != 0) 5764 return (rc); 5765 sb = sbuf_new_for_sysctl(NULL, NULL, 80, req); 5766 MPASS(sb != NULL); 5767 if (sb == NULL) 5768 return (ENOMEM); 5769 if (state[3] <= 3) 5770 ring_state = ring_states[state[3]]; 5771 5772 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s", 5773 state[0], state[1], state[2], ring_state); 5774 rc = sbuf_finish(sb); 5775 sbuf_delete(sb); 5776 return(rc); 5777} 5778 5779enum iflib_ndesc_handler { 5780 IFLIB_NTXD_HANDLER, 5781 IFLIB_NRXD_HANDLER, 5782}; 5783 5784static int 5785mp_ndesc_handler(SYSCTL_HANDLER_ARGS) 5786{ 5787 if_ctx_t ctx = (void *)arg1; 5788 enum iflib_ndesc_handler type = arg2; 5789 char buf[256] = {0}; 5790 qidx_t *ndesc; 5791 char *p, *next; 5792 int nqs, rc, i; 5793 5794 MPASS(type == IFLIB_NTXD_HANDLER || type == IFLIB_NRXD_HANDLER); 5795 5796 nqs = 8; 5797 switch(type) { 5798 case IFLIB_NTXD_HANDLER: 5799 ndesc = ctx->ifc_sysctl_ntxds; 5800 if (ctx->ifc_sctx) 5801 nqs = ctx->ifc_sctx->isc_ntxqs; 5802 break; 5803 case IFLIB_NRXD_HANDLER: 5804 ndesc = ctx->ifc_sysctl_nrxds; 5805 if (ctx->ifc_sctx) 5806 nqs = ctx->ifc_sctx->isc_nrxqs; 5807 break; 5808 } 5809 if (nqs == 0) 5810 nqs = 8; 5811 5812 for (i=0; i<8; i++) { 5813 if (i >= nqs) 5814 break; 5815 if (i) 5816 strcat(buf, ","); 5817 sprintf(strchr(buf, 0), "%d", ndesc[i]); 5818 } 5819 5820 rc = sysctl_handle_string(oidp, buf, sizeof(buf), req); 5821 if (rc || req->newptr == NULL) 5822 return rc; 5823 5824 for (i = 0, next = buf, p = strsep(&next, " ,"); i < 8 && p; 5825 i++, p = strsep(&next, " ,")) { 5826 ndesc[i] = strtoul(p, NULL, 10); 5827 } 5828 5829 return(rc); 5830} 5831 5832#define NAME_BUFLEN 32 5833static void 5834iflib_add_device_sysctl_pre(if_ctx_t ctx) 5835{ 5836 device_t dev = iflib_get_dev(ctx); 5837 struct sysctl_oid_list *child, *oid_list; 5838 struct sysctl_ctx_list *ctx_list; 5839 struct sysctl_oid *node; 5840 5841 ctx_list = device_get_sysctl_ctx(dev); 5842 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 5843 ctx->ifc_sysctl_node = node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, "iflib", 5844 CTLFLAG_RD, NULL, "IFLIB fields"); 5845 oid_list = SYSCTL_CHILDREN(node); 5846 5847 SYSCTL_ADD_STRING(ctx_list, oid_list, OID_AUTO, "driver_version", 5848 CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, 0, 5849 "driver version"); 5850 5851 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs", 5852 CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0, 5853 "# of txqs to use, 0 => use default #"); 5854 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_nrxqs", 5855 CTLFLAG_RWTUN, &ctx->ifc_sysctl_nrxqs, 0, 5856 "# of rxqs to use, 0 => use default #"); 5857 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_qs_enable", 5858 CTLFLAG_RWTUN, &ctx->ifc_sysctl_qs_eq_override, 0, 5859 "permit #txq != #rxq"); 5860 SYSCTL_ADD_INT(ctx_list, oid_list, OID_AUTO, "disable_msix", 5861 CTLFLAG_RWTUN, &ctx->ifc_softc_ctx.isc_disable_msix, 0, 5862 "disable MSIX (default 0)"); 5863 SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "rx_budget", 5864 CTLFLAG_RWTUN, &ctx->ifc_sysctl_rx_budget, 0, 5865 "set the rx budget"); 5866 5867 /* XXX change for per-queue sizes */ 5868 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_ntxds", 5869 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NTXD_HANDLER, 5870 mp_ndesc_handler, "A", 5871 "list of # of tx descriptors to use, 0 = use default #"); 5872 SYSCTL_ADD_PROC(ctx_list, oid_list, OID_AUTO, "override_nrxds", 5873 CTLTYPE_STRING|CTLFLAG_RWTUN, ctx, IFLIB_NRXD_HANDLER, 5874 mp_ndesc_handler, "A", 5875 "list of # of rx descriptors to use, 0 = use default #"); 5876} 5877 5878static void 5879iflib_add_device_sysctl_post(if_ctx_t ctx) 5880{ 5881 if_shared_ctx_t sctx = ctx->ifc_sctx; 5882 if_softc_ctx_t scctx = &ctx->ifc_softc_ctx; 5883 device_t dev = iflib_get_dev(ctx); 5884 struct sysctl_oid_list *child; 5885 struct sysctl_ctx_list *ctx_list; 5886 iflib_fl_t fl; 5887 iflib_txq_t txq; 5888 iflib_rxq_t rxq; 5889 int i, j; 5890 char namebuf[NAME_BUFLEN]; 5891 char *qfmt; 5892 struct sysctl_oid *queue_node, *fl_node, *node; 5893 struct sysctl_oid_list *queue_list, *fl_list; 5894 ctx_list = device_get_sysctl_ctx(dev); 5895 5896 node = ctx->ifc_sysctl_node; 5897 child = SYSCTL_CHILDREN(node); 5898 5899 if (scctx->isc_ntxqsets > 100) 5900 qfmt = "txq%03d"; 5901 else if (scctx->isc_ntxqsets > 10) 5902 qfmt = "txq%02d"; 5903 else 5904 qfmt = "txq%d"; 5905 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) { 5906 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 5907 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 5908 CTLFLAG_RD, NULL, "Queue Name"); 5909 queue_list = SYSCTL_CHILDREN(queue_node); 5910#if MEMORY_LOGGING 5911 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_dequeued", 5912 CTLFLAG_RD, 5913 &txq->ift_dequeued, "total mbufs freed"); 5914 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_enqueued", 5915 CTLFLAG_RD, 5916 &txq->ift_enqueued, "total mbufs enqueued"); 5917#endif 5918 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag", 5919 CTLFLAG_RD, 5920 &txq->ift_mbuf_defrag, "# of times m_defrag was called"); 5921 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "m_pullups", 5922 CTLFLAG_RD, 5923 &txq->ift_pullups, "# of times m_pullup was called"); 5924 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "mbuf_defrag_failed", 5925 CTLFLAG_RD, 5926 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed"); 5927 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_desc_avail", 5928 CTLFLAG_RD, 5929 &txq->ift_no_desc_avail, "# of times no descriptors were available"); 5930 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "tx_map_failed", 5931 CTLFLAG_RD, 5932 &txq->ift_map_failed, "# of times dma map failed"); 5933 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txd_encap_efbig", 5934 CTLFLAG_RD, 5935 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG"); 5936 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "no_tx_dma_setup", 5937 CTLFLAG_RD, 5938 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG"); 5939 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_pidx", 5940 CTLFLAG_RD, 5941 &txq->ift_pidx, 1, "Producer Index"); 5942 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx", 5943 CTLFLAG_RD, 5944 &txq->ift_cidx, 1, "Consumer Index"); 5945 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_cidx_processed", 5946 CTLFLAG_RD, 5947 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update"); 5948 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "txq_in_use", 5949 CTLFLAG_RD, 5950 &txq->ift_in_use, 1, "descriptors in use"); 5951 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_processed", 5952 CTLFLAG_RD, 5953 &txq->ift_processed, "descriptors procesed for clean"); 5954 SYSCTL_ADD_QUAD(ctx_list, queue_list, OID_AUTO, "txq_cleaned", 5955 CTLFLAG_RD, 5956 &txq->ift_cleaned, "total cleaned"); 5957 SYSCTL_ADD_PROC(ctx_list, queue_list, OID_AUTO, "ring_state", 5958 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state), 5959 0, mp_ring_state_handler, "A", "soft ring state"); 5960 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_enqueues", 5961 CTLFLAG_RD, &txq->ift_br->enqueues, 5962 "# of enqueues to the mp_ring for this queue"); 5963 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_drops", 5964 CTLFLAG_RD, &txq->ift_br->drops, 5965 "# of drops in the mp_ring for this queue"); 5966 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_starts", 5967 CTLFLAG_RD, &txq->ift_br->starts, 5968 "# of normal consumer starts in the mp_ring for this queue"); 5969 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_stalls", 5970 CTLFLAG_RD, &txq->ift_br->stalls, 5971 "# of consumer stalls in the mp_ring for this queue"); 5972 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_restarts", 5973 CTLFLAG_RD, &txq->ift_br->restarts, 5974 "# of consumer restarts in the mp_ring for this queue"); 5975 SYSCTL_ADD_COUNTER_U64(ctx_list, queue_list, OID_AUTO, "r_abdications", 5976 CTLFLAG_RD, &txq->ift_br->abdications, 5977 "# of consumer abdications in the mp_ring for this queue"); 5978 } 5979 5980 if (scctx->isc_nrxqsets > 100) 5981 qfmt = "rxq%03d"; 5982 else if (scctx->isc_nrxqsets > 10) 5983 qfmt = "rxq%02d"; 5984 else 5985 qfmt = "rxq%d"; 5986 for (i = 0, rxq = ctx->ifc_rxqs; i < scctx->isc_nrxqsets; i++, rxq++) { 5987 snprintf(namebuf, NAME_BUFLEN, qfmt, i); 5988 queue_node = SYSCTL_ADD_NODE(ctx_list, child, OID_AUTO, namebuf, 5989 CTLFLAG_RD, NULL, "Queue Name"); 5990 queue_list = SYSCTL_CHILDREN(queue_node); 5991 if (sctx->isc_flags & IFLIB_HAS_RXCQ) { 5992 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_pidx", 5993 CTLFLAG_RD, 5994 &rxq->ifr_cq_pidx, 1, "Producer Index"); 5995 SYSCTL_ADD_U16(ctx_list, queue_list, OID_AUTO, "rxq_cq_cidx", 5996 CTLFLAG_RD, 5997 &rxq->ifr_cq_cidx, 1, "Consumer Index"); 5998 } 5999 6000 for (j = 0, fl = rxq->ifr_fl; j < rxq->ifr_nfl; j++, fl++) { 6001 snprintf(namebuf, NAME_BUFLEN, "rxq_fl%d", j); 6002 fl_node = SYSCTL_ADD_NODE(ctx_list, queue_list, OID_AUTO, namebuf, 6003 CTLFLAG_RD, NULL, "freelist Name"); 6004 fl_list = SYSCTL_CHILDREN(fl_node); 6005 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "pidx", 6006 CTLFLAG_RD, 6007 &fl->ifl_pidx, 1, "Producer Index"); 6008 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx", 6009 CTLFLAG_RD, 6010 &fl->ifl_cidx, 1, "Consumer Index"); 6011 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "credits", 6012 CTLFLAG_RD, 6013 &fl->ifl_credits, 1, "credits available"); 6014#if MEMORY_LOGGING 6015 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_enqueued", 6016 CTLFLAG_RD, 6017 &fl->ifl_m_enqueued, "mbufs allocated"); 6018 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_m_dequeued", 6019 CTLFLAG_RD, 6020 &fl->ifl_m_dequeued, "mbufs freed"); 6021 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_enqueued", 6022 CTLFLAG_RD, 6023 &fl->ifl_cl_enqueued, "clusters allocated"); 6024 SYSCTL_ADD_QUAD(ctx_list, fl_list, OID_AUTO, "fl_cl_dequeued", 6025 CTLFLAG_RD, 6026 &fl->ifl_cl_dequeued, "clusters freed"); 6027#endif 6028 6029 } 6030 } 6031 6032} 6033 6034void 6035iflib_request_reset(if_ctx_t ctx) 6036{ 6037 6038 STATE_LOCK(ctx); 6039 ctx->ifc_flags |= IFC_DO_RESET; 6040 STATE_UNLOCK(ctx); 6041} 6042 6043#ifndef __NO_STRICT_ALIGNMENT 6044static struct mbuf * 6045iflib_fixup_rx(struct mbuf *m) 6046{ 6047 struct mbuf *n; 6048 6049 if (m->m_len <= (MCLBYTES - ETHER_HDR_LEN)) { 6050 bcopy(m->m_data, m->m_data + ETHER_HDR_LEN, m->m_len); 6051 m->m_data += ETHER_HDR_LEN; 6052 n = m; 6053 } else { 6054 MGETHDR(n, M_NOWAIT, MT_DATA); 6055 if (n == NULL) { 6056 m_freem(m); 6057 return (NULL); 6058 } 6059 bcopy(m->m_data, n->m_data, ETHER_HDR_LEN); 6060 m->m_data += ETHER_HDR_LEN; 6061 m->m_len -= ETHER_HDR_LEN; 6062 n->m_len = ETHER_HDR_LEN; 6063 M_MOVE_PKTHDR(n, m); 6064 n->m_next = m; 6065 } 6066 return (n); 6067} 6068#endif 6069