adapter.h revision 237263
1/*- 2 * Copyright (c) 2011 Chelsio Communications, Inc. 3 * All rights reserved. 4 * Written by: Navdeep Parhar <np@FreeBSD.org> 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 * 27 * $FreeBSD: head/sys/dev/cxgbe/adapter.h 237263 2012-06-19 07:34:13Z np $ 28 * 29 */ 30 31#ifndef __T4_ADAPTER_H__ 32#define __T4_ADAPTER_H__ 33 34#include <sys/kernel.h> 35#include <sys/bus.h> 36#include <sys/rman.h> 37#include <sys/types.h> 38#include <sys/malloc.h> 39#include <dev/pci/pcivar.h> 40#include <dev/pci/pcireg.h> 41#include <machine/bus.h> 42#include <sys/socket.h> 43#include <sys/sysctl.h> 44#include <net/ethernet.h> 45#include <net/if.h> 46#include <net/if_media.h> 47#include <netinet/in.h> 48#include <netinet/tcp_lro.h> 49 50#include "offload.h" 51#include "firmware/t4fw_interface.h" 52 53#define T4_CFGNAME "t4fw_cfg" 54#define T4_FWNAME "t4fw" 55 56MALLOC_DECLARE(M_CXGBE); 57#define CXGBE_UNIMPLEMENTED(s) \ 58 panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__) 59 60#if defined(__i386__) || defined(__amd64__) 61static __inline void 62prefetch(void *x) 63{ 64 __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); 65} 66#else 67#define prefetch(x) 68#endif 69 70#ifndef SYSCTL_ADD_UQUAD 71#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD 72#define sysctl_handle_64 sysctl_handle_quad 73#define CTLTYPE_U64 CTLTYPE_QUAD 74#endif 75 76#if (__FreeBSD_version >= 900030) || \ 77 ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000)) 78#define SBUF_DRAIN 1 79#endif 80 81#ifdef __amd64__ 82/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */ 83static __inline uint64_t 84t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 85 bus_size_t offset) 86{ 87 KASSERT(tag == X86_BUS_SPACE_MEM, 88 ("%s: can only handle mem space", __func__)); 89 90 return (*(volatile uint64_t *)(handle + offset)); 91} 92 93static __inline void 94t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh, 95 bus_size_t offset, uint64_t value) 96{ 97 KASSERT(tag == X86_BUS_SPACE_MEM, 98 ("%s: can only handle mem space", __func__)); 99 100 *(volatile uint64_t *)(bsh + offset) = value; 101} 102#else 103static __inline uint64_t 104t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, 105 bus_size_t offset) 106{ 107 return (uint64_t)bus_space_read_4(tag, handle, offset) + 108 ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32); 109} 110 111static __inline void 112t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh, 113 bus_size_t offset, uint64_t value) 114{ 115 bus_space_write_4(tag, bsh, offset, value); 116 bus_space_write_4(tag, bsh, offset + 4, value >> 32); 117} 118#endif 119 120struct adapter; 121typedef struct adapter adapter_t; 122 123enum { 124 FW_IQ_QSIZE = 256, 125 FW_IQ_ESIZE = 64, /* At least 64 mandated by the firmware spec */ 126 127 RX_IQ_QSIZE = 1024, 128 RX_IQ_ESIZE = 64, /* At least 64 so CPL_RX_PKT will fit */ 129 130 EQ_ESIZE = 64, /* All egress queues use this entry size */ 131 132 RX_FL_ESIZE = EQ_ESIZE, /* 8 64bit addresses */ 133#if MJUMPAGESIZE != MCLBYTES 134 FL_BUF_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */ 135#else 136 FL_BUF_SIZES = 3, /* cluster, jumbo9k, jumbo16k */ 137#endif 138 139 CTRL_EQ_QSIZE = 128, 140 141 TX_EQ_QSIZE = 1024, 142 TX_SGL_SEGS = 36, 143 TX_WR_FLITS = SGE_MAX_WR_LEN / 8 144}; 145 146enum { 147 /* adapter intr_type */ 148 INTR_INTX = (1 << 0), 149 INTR_MSI = (1 << 1), 150 INTR_MSIX = (1 << 2) 151}; 152 153enum { 154 /* adapter flags */ 155 FULL_INIT_DONE = (1 << 0), 156 FW_OK = (1 << 1), 157 INTR_DIRECT = (1 << 2), /* direct interrupts for everything */ 158 MASTER_PF = (1 << 3), 159 ADAP_SYSCTL_CTX = (1 << 4), 160 TOM_INIT_DONE = (1 << 5), 161 162 CXGBE_BUSY = (1 << 9), 163 164 /* port flags */ 165 DOOMED = (1 << 0), 166 PORT_INIT_DONE = (1 << 1), 167 PORT_SYSCTL_CTX = (1 << 2), 168}; 169 170#define IS_DOOMED(pi) (pi->flags & DOOMED) 171#define SET_DOOMED(pi) do {pi->flags |= DOOMED;} while (0) 172#define IS_BUSY(sc) (sc->flags & CXGBE_BUSY) 173#define SET_BUSY(sc) do {sc->flags |= CXGBE_BUSY;} while (0) 174#define CLR_BUSY(sc) do {sc->flags &= ~CXGBE_BUSY;} while (0) 175 176struct port_info { 177 device_t dev; 178 struct adapter *adapter; 179 180 struct ifnet *ifp; 181 struct ifmedia media; 182 183 struct mtx pi_lock; 184 char lockname[16]; 185 unsigned long flags; 186 int if_flags; 187 188 uint16_t viid; 189 int16_t xact_addr_filt;/* index of exact MAC address filter */ 190 uint16_t rss_size; /* size of VI's RSS table slice */ 191 uint8_t lport; /* associated offload logical port */ 192 int8_t mdio_addr; 193 uint8_t port_type; 194 uint8_t mod_type; 195 uint8_t port_id; 196 uint8_t tx_chan; 197 198 /* These need to be int as they are used in sysctl */ 199 int ntxq; /* # of tx queues */ 200 int first_txq; /* index of first tx queue */ 201 int nrxq; /* # of rx queues */ 202 int first_rxq; /* index of first rx queue */ 203#ifdef TCP_OFFLOAD 204 int nofldtxq; /* # of offload tx queues */ 205 int first_ofld_txq; /* index of first offload tx queue */ 206 int nofldrxq; /* # of offload rx queues */ 207 int first_ofld_rxq; /* index of first offload rx queue */ 208#endif 209 int tmr_idx; 210 int pktc_idx; 211 int qsize_rxq; 212 int qsize_txq; 213 214 struct link_config link_cfg; 215 struct port_stats stats; 216 217 eventhandler_tag vlan_c; 218 219 struct callout tick; 220 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */ 221 222 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */ 223}; 224 225struct fl_sdesc { 226 struct mbuf *m; 227 bus_dmamap_t map; 228 caddr_t cl; 229 uint8_t tag_idx; /* the sc->fl_tag this map comes from */ 230#ifdef INVARIANTS 231 __be64 ba_tag; 232#endif 233}; 234 235struct tx_desc { 236 __be64 flit[8]; 237}; 238 239struct tx_map { 240 struct mbuf *m; 241 bus_dmamap_t map; 242}; 243 244/* DMA maps used for tx */ 245struct tx_maps { 246 struct tx_map *maps; 247 uint32_t map_total; /* # of DMA maps */ 248 uint32_t map_pidx; /* next map to be used */ 249 uint32_t map_cidx; /* reclaimed up to this index */ 250 uint32_t map_avail; /* # of available maps */ 251}; 252 253struct tx_sdesc { 254 uint8_t desc_used; /* # of hardware descriptors used by the WR */ 255 uint8_t credits; /* NIC txq: # of frames sent out in the WR */ 256}; 257 258enum { 259 /* iq flags */ 260 IQ_ALLOCATED = (1 << 0), /* firmware resources allocated */ 261 IQ_HAS_FL = (1 << 1), /* iq associated with a freelist */ 262 IQ_INTR = (1 << 2), /* iq takes direct interrupt */ 263 IQ_LRO_ENABLED = (1 << 3), /* iq is an eth rxq with LRO enabled */ 264 265 /* iq state */ 266 IQS_DISABLED = 0, 267 IQS_BUSY = 1, 268 IQS_IDLE = 2, 269}; 270 271/* 272 * Ingress Queue: T4 is producer, driver is consumer. 273 */ 274struct sge_iq { 275 bus_dma_tag_t desc_tag; 276 bus_dmamap_t desc_map; 277 bus_addr_t ba; /* bus address of descriptor ring */ 278 char lockname[16]; 279 uint32_t flags; 280 uint16_t abs_id; /* absolute SGE id for the iq */ 281 int8_t intr_pktc_idx; /* packet count threshold index */ 282 int8_t pad0; 283 __be64 *desc; /* KVA of descriptor ring */ 284 285 volatile int state; 286 struct adapter *adapter; 287 const __be64 *cdesc; /* current descriptor */ 288 uint8_t gen; /* generation bit */ 289 uint8_t intr_params; /* interrupt holdoff parameters */ 290 uint8_t intr_next; /* XXX: holdoff for next interrupt */ 291 uint8_t esize; /* size (bytes) of each entry in the queue */ 292 uint16_t qsize; /* size (# of entries) of the queue */ 293 uint16_t cidx; /* consumer index */ 294 uint16_t cntxt_id; /* SGE context id for the iq */ 295 296 STAILQ_ENTRY(sge_iq) link; 297}; 298 299enum { 300 EQ_CTRL = 1, 301 EQ_ETH = 2, 302#ifdef TCP_OFFLOAD 303 EQ_OFLD = 3, 304#endif 305 306 /* eq flags */ 307 EQ_TYPEMASK = 7, /* 3 lsbits hold the type */ 308 EQ_ALLOCATED = (1 << 3), /* firmware resources allocated */ 309 EQ_DOOMED = (1 << 4), /* about to be destroyed */ 310 EQ_CRFLUSHED = (1 << 5), /* expecting an update from SGE */ 311 EQ_STALLED = (1 << 6), /* out of hw descriptors or dmamaps */ 312}; 313 314/* 315 * Egress Queue: driver is producer, T4 is consumer. 316 * 317 * Note: A free list is an egress queue (driver produces the buffers and T4 318 * consumes them) but it's special enough to have its own struct (see sge_fl). 319 */ 320struct sge_eq { 321 unsigned int flags; /* MUST be first */ 322 unsigned int cntxt_id; /* SGE context id for the eq */ 323 bus_dma_tag_t desc_tag; 324 bus_dmamap_t desc_map; 325 char lockname[16]; 326 struct mtx eq_lock; 327 328 struct tx_desc *desc; /* KVA of descriptor ring */ 329 bus_addr_t ba; /* bus address of descriptor ring */ 330 struct sge_qstat *spg; /* status page, for convenience */ 331 uint16_t cap; /* max # of desc, for convenience */ 332 uint16_t avail; /* available descriptors, for convenience */ 333 uint16_t qsize; /* size (# of entries) of the queue */ 334 uint16_t cidx; /* consumer idx (desc idx) */ 335 uint16_t pidx; /* producer idx (desc idx) */ 336 uint16_t pending; /* # of descriptors used since last doorbell */ 337 uint16_t iqid; /* iq that gets egr_update for the eq */ 338 uint8_t tx_chan; /* tx channel used by the eq */ 339 struct task tx_task; 340 struct callout tx_callout; 341 342 /* stats */ 343 344 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for eq */ 345 uint32_t unstalled; /* recovered from stall */ 346}; 347 348enum { 349 FL_STARVING = (1 << 0), /* on the adapter's list of starving fl's */ 350 FL_DOOMED = (1 << 1), /* about to be destroyed */ 351}; 352 353#define FL_RUNNING_LOW(fl) (fl->cap - fl->needed <= fl->lowat) 354#define FL_NOT_RUNNING_LOW(fl) (fl->cap - fl->needed >= 2 * fl->lowat) 355 356struct sge_fl { 357 bus_dma_tag_t desc_tag; 358 bus_dmamap_t desc_map; 359 bus_dma_tag_t tag[FL_BUF_SIZES]; 360 uint8_t tag_idx; 361 struct mtx fl_lock; 362 char lockname[16]; 363 int flags; 364 365 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */ 366 bus_addr_t ba; /* bus address of descriptor ring */ 367 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */ 368 uint32_t cap; /* max # of buffers, for convenience */ 369 uint16_t qsize; /* size (# of entries) of the queue */ 370 uint16_t cntxt_id; /* SGE context id for the freelist */ 371 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */ 372 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */ 373 uint32_t needed; /* # of buffers needed to fill up fl. */ 374 uint32_t lowat; /* # of buffers <= this means fl needs help */ 375 uint32_t pending; /* # of bufs allocated since last doorbell */ 376 unsigned int dmamap_failed; 377 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */ 378}; 379 380/* txq: SGE egress queue + what's needed for Ethernet NIC */ 381struct sge_txq { 382 struct sge_eq eq; /* MUST be first */ 383 384 struct ifnet *ifp; /* the interface this txq belongs to */ 385 bus_dma_tag_t tx_tag; /* tag for transmit buffers */ 386 struct buf_ring *br; /* tx buffer ring */ 387 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */ 388 struct mbuf *m; /* held up due to temporary resource shortage */ 389 390 struct tx_maps txmaps; 391 392 /* stats for common events first */ 393 394 uint64_t txcsum; /* # of times hardware assisted with checksum */ 395 uint64_t tso_wrs; /* # of IPv4 TSO work requests */ 396 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */ 397 uint64_t imm_wrs; /* # of work requests with immediate data */ 398 uint64_t sgl_wrs; /* # of work requests with direct SGL */ 399 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */ 400 uint64_t txpkts_wrs; /* # of coalesced tx work requests */ 401 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */ 402 403 /* stats for not-that-common events */ 404 405 uint32_t no_dmamap; /* no DMA map to load the mbuf */ 406 uint32_t no_desc; /* out of hardware descriptors */ 407} __aligned(CACHE_LINE_SIZE); 408 409/* rxq: SGE ingress queue + SGE free list + miscellaneous items */ 410struct sge_rxq { 411 struct sge_iq iq; /* MUST be first */ 412 struct sge_fl fl; /* MUST follow iq */ 413 414 struct ifnet *ifp; /* the interface this rxq belongs to */ 415#ifdef INET 416 struct lro_ctrl lro; /* LRO state */ 417#endif 418 419 /* stats for common events first */ 420 421 uint64_t rxcsum; /* # of times hardware assisted with checksum */ 422 uint64_t vlan_extraction;/* # of times VLAN tag was extracted */ 423 424 /* stats for not-that-common events */ 425 426} __aligned(CACHE_LINE_SIZE); 427 428static inline struct sge_rxq * 429iq_to_rxq(struct sge_iq *iq) 430{ 431 432 return (member2struct(sge_rxq, iq, iq)); 433} 434 435 436#ifdef TCP_OFFLOAD 437/* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */ 438struct sge_ofld_rxq { 439 struct sge_iq iq; /* MUST be first */ 440 struct sge_fl fl; /* MUST follow iq */ 441} __aligned(CACHE_LINE_SIZE); 442 443static inline struct sge_ofld_rxq * 444iq_to_ofld_rxq(struct sge_iq *iq) 445{ 446 447 return (member2struct(sge_ofld_rxq, iq, iq)); 448} 449#endif 450 451struct wrqe { 452 STAILQ_ENTRY(wrqe) link; 453 struct sge_wrq *wrq; 454 int wr_len; 455 uint64_t wr[] __aligned(16); 456}; 457 458/* 459 * wrq: SGE egress queue that is given prebuilt work requests. Both the control 460 * and offload tx queues are of this type. 461 */ 462struct sge_wrq { 463 struct sge_eq eq; /* MUST be first */ 464 465 struct adapter *adapter; 466 467 /* List of WRs held up due to lack of tx descriptors */ 468 STAILQ_HEAD(, wrqe) wr_list; 469 470 /* stats for common events first */ 471 472 uint64_t tx_wrs; /* # of tx work requests */ 473 474 /* stats for not-that-common events */ 475 476 uint32_t no_desc; /* out of hardware descriptors */ 477} __aligned(CACHE_LINE_SIZE); 478 479struct sge { 480 int timer_val[SGE_NTIMERS]; 481 int counter_val[SGE_NCOUNTERS]; 482 int fl_starve_threshold; 483 484 int nrxq; /* total # of Ethernet rx queues */ 485 int ntxq; /* total # of Ethernet tx tx queues */ 486#ifdef TCP_OFFLOAD 487 int nofldrxq; /* total # of TOE rx queues */ 488 int nofldtxq; /* total # of TOE tx queues */ 489#endif 490 int niq; /* total # of ingress queues */ 491 int neq; /* total # of egress queues */ 492 493 struct sge_iq fwq; /* Firmware event queue */ 494 struct sge_wrq mgmtq; /* Management queue (control queue) */ 495 struct sge_wrq *ctrlq; /* Control queues */ 496 struct sge_txq *txq; /* NIC tx queues */ 497 struct sge_rxq *rxq; /* NIC rx queues */ 498#ifdef TCP_OFFLOAD 499 struct sge_wrq *ofld_txq; /* TOE tx queues */ 500 struct sge_ofld_rxq *ofld_rxq; /* TOE rx queues */ 501#endif 502 503 uint16_t iq_start; 504 int eq_start; 505 struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */ 506 struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */ 507}; 508 509struct rss_header; 510typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *, 511 struct mbuf *); 512typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *); 513 514struct adapter { 515 SLIST_ENTRY(adapter) link; 516 device_t dev; 517 struct cdev *cdev; 518 519 /* PCIe register resources */ 520 int regs_rid; 521 struct resource *regs_res; 522 int msix_rid; 523 struct resource *msix_res; 524 bus_space_handle_t bh; 525 bus_space_tag_t bt; 526 bus_size_t mmio_len; 527 528 unsigned int pf; 529 unsigned int mbox; 530 531 /* Interrupt information */ 532 int intr_type; 533 int intr_count; 534 struct irq { 535 struct resource *res; 536 int rid; 537 void *tag; 538 } *irq; 539 540 bus_dma_tag_t dmat; /* Parent DMA tag */ 541 542 struct sge sge; 543 544 struct taskqueue *tq[NCHAN]; /* taskqueues that flush data out */ 545 struct port_info *port[MAX_NPORTS]; 546 uint8_t chan_map[NCHAN]; 547 uint32_t filter_mode; 548 549#ifdef TCP_OFFLOAD 550 void *tom_softc; /* (struct tom_data *) */ 551 struct tom_tunables tt; 552#endif 553 struct l2t_data *l2t; /* L2 table */ 554 struct tid_info tids; 555 556 int open_device_map; 557#ifdef TCP_OFFLOAD 558 int offload_map; 559#endif 560 int flags; 561 562 char fw_version[32]; 563 unsigned int cfcsum; 564 struct adapter_params params; 565 struct t4_virt_res vres; 566 567 uint16_t linkcaps; 568 uint16_t niccaps; 569 uint16_t toecaps; 570 uint16_t rdmacaps; 571 uint16_t iscsicaps; 572 uint16_t fcoecaps; 573 574 struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */ 575 576 struct mtx sc_lock; 577 char lockname[16]; 578 579 /* Starving free lists */ 580 struct mtx sfl_lock; /* same cache-line as sc_lock? but that's ok */ 581 TAILQ_HEAD(, sge_fl) sfl; 582 struct callout sfl_callout; 583 584 an_handler_t an_handler __aligned(CACHE_LINE_SIZE); 585 cpl_handler_t cpl_handler[256]; 586}; 587 588#define ADAPTER_LOCK(sc) mtx_lock(&(sc)->sc_lock) 589#define ADAPTER_UNLOCK(sc) mtx_unlock(&(sc)->sc_lock) 590#define ADAPTER_LOCK_ASSERT_OWNED(sc) mtx_assert(&(sc)->sc_lock, MA_OWNED) 591#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED) 592 593#define PORT_LOCK(pi) mtx_lock(&(pi)->pi_lock) 594#define PORT_UNLOCK(pi) mtx_unlock(&(pi)->pi_lock) 595#define PORT_LOCK_ASSERT_OWNED(pi) mtx_assert(&(pi)->pi_lock, MA_OWNED) 596#define PORT_LOCK_ASSERT_NOTOWNED(pi) mtx_assert(&(pi)->pi_lock, MA_NOTOWNED) 597 598#define FL_LOCK(fl) mtx_lock(&(fl)->fl_lock) 599#define FL_TRYLOCK(fl) mtx_trylock(&(fl)->fl_lock) 600#define FL_UNLOCK(fl) mtx_unlock(&(fl)->fl_lock) 601#define FL_LOCK_ASSERT_OWNED(fl) mtx_assert(&(fl)->fl_lock, MA_OWNED) 602#define FL_LOCK_ASSERT_NOTOWNED(fl) mtx_assert(&(fl)->fl_lock, MA_NOTOWNED) 603 604#define RXQ_FL_LOCK(rxq) FL_LOCK(&(rxq)->fl) 605#define RXQ_FL_UNLOCK(rxq) FL_UNLOCK(&(rxq)->fl) 606#define RXQ_FL_LOCK_ASSERT_OWNED(rxq) FL_LOCK_ASSERT_OWNED(&(rxq)->fl) 607#define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl) 608 609#define EQ_LOCK(eq) mtx_lock(&(eq)->eq_lock) 610#define EQ_TRYLOCK(eq) mtx_trylock(&(eq)->eq_lock) 611#define EQ_UNLOCK(eq) mtx_unlock(&(eq)->eq_lock) 612#define EQ_LOCK_ASSERT_OWNED(eq) mtx_assert(&(eq)->eq_lock, MA_OWNED) 613#define EQ_LOCK_ASSERT_NOTOWNED(eq) mtx_assert(&(eq)->eq_lock, MA_NOTOWNED) 614 615#define TXQ_LOCK(txq) EQ_LOCK(&(txq)->eq) 616#define TXQ_TRYLOCK(txq) EQ_TRYLOCK(&(txq)->eq) 617#define TXQ_UNLOCK(txq) EQ_UNLOCK(&(txq)->eq) 618#define TXQ_LOCK_ASSERT_OWNED(txq) EQ_LOCK_ASSERT_OWNED(&(txq)->eq) 619#define TXQ_LOCK_ASSERT_NOTOWNED(txq) EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq) 620 621#define for_each_txq(pi, iter, txq) \ 622 txq = &pi->adapter->sge.txq[pi->first_txq]; \ 623 for (iter = 0; iter < pi->ntxq; ++iter, ++txq) 624#define for_each_rxq(pi, iter, rxq) \ 625 rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \ 626 for (iter = 0; iter < pi->nrxq; ++iter, ++rxq) 627#define for_each_ofld_txq(pi, iter, ofld_txq) \ 628 ofld_txq = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq]; \ 629 for (iter = 0; iter < pi->nofldtxq; ++iter, ++ofld_txq) 630#define for_each_ofld_rxq(pi, iter, ofld_rxq) \ 631 ofld_rxq = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq]; \ 632 for (iter = 0; iter < pi->nofldrxq; ++iter, ++ofld_rxq) 633 634/* One for errors, one for firmware events */ 635#define T4_EXTRA_INTR 2 636 637static inline uint32_t 638t4_read_reg(struct adapter *sc, uint32_t reg) 639{ 640 641 return bus_space_read_4(sc->bt, sc->bh, reg); 642} 643 644static inline void 645t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val) 646{ 647 648 bus_space_write_4(sc->bt, sc->bh, reg, val); 649} 650 651static inline uint64_t 652t4_read_reg64(struct adapter *sc, uint32_t reg) 653{ 654 655 return t4_bus_space_read_8(sc->bt, sc->bh, reg); 656} 657 658static inline void 659t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val) 660{ 661 662 t4_bus_space_write_8(sc->bt, sc->bh, reg, val); 663} 664 665static inline void 666t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val) 667{ 668 669 *val = pci_read_config(sc->dev, reg, 1); 670} 671 672static inline void 673t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val) 674{ 675 676 pci_write_config(sc->dev, reg, val, 1); 677} 678 679static inline void 680t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val) 681{ 682 683 *val = pci_read_config(sc->dev, reg, 2); 684} 685 686static inline void 687t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val) 688{ 689 690 pci_write_config(sc->dev, reg, val, 2); 691} 692 693static inline void 694t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val) 695{ 696 697 *val = pci_read_config(sc->dev, reg, 4); 698} 699 700static inline void 701t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val) 702{ 703 704 pci_write_config(sc->dev, reg, val, 4); 705} 706 707static inline struct port_info * 708adap2pinfo(struct adapter *sc, int idx) 709{ 710 711 return (sc->port[idx]); 712} 713 714static inline void 715t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[]) 716{ 717 718 bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN); 719} 720 721static inline bool is_10G_port(const struct port_info *pi) 722{ 723 724 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0); 725} 726 727static inline int tx_resume_threshold(struct sge_eq *eq) 728{ 729 730 return (eq->qsize / 4); 731} 732 733/* t4_main.c */ 734void t4_tx_task(void *, int); 735void t4_tx_callout(void *); 736int t4_os_find_pci_capability(struct adapter *, int); 737int t4_os_pci_save_state(struct adapter *); 738int t4_os_pci_restore_state(struct adapter *); 739void t4_os_portmod_changed(const struct adapter *, int); 740void t4_os_link_changed(struct adapter *, int, int); 741void t4_iterate(void (*)(struct adapter *, void *), void *); 742int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t); 743int t4_register_an_handler(struct adapter *, an_handler_t); 744 745/* t4_sge.c */ 746void t4_sge_modload(void); 747int t4_sge_init(struct adapter *); 748int t4_create_dma_tag(struct adapter *); 749int t4_destroy_dma_tag(struct adapter *); 750int t4_setup_adapter_queues(struct adapter *); 751int t4_teardown_adapter_queues(struct adapter *); 752int t4_setup_port_queues(struct port_info *); 753int t4_teardown_port_queues(struct port_info *); 754int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int); 755void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t); 756void t4_intr_all(void *); 757void t4_intr(void *); 758void t4_intr_err(void *); 759void t4_intr_evt(void *); 760void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *); 761int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *); 762void t4_update_fl_bufsize(struct ifnet *); 763int can_resume_tx(struct sge_eq *); 764 765static inline struct wrqe * 766alloc_wrqe(int wr_len, struct sge_wrq *wrq) 767{ 768 int len = offsetof(struct wrqe, wr) + wr_len; 769 struct wrqe *wr; 770 771 wr = malloc(len, M_CXGBE, M_NOWAIT); 772 if (__predict_false(wr == NULL)) 773 return (NULL); 774 wr->wr_len = wr_len; 775 wr->wrq = wrq; 776 return (wr); 777} 778 779static inline void * 780wrtod(struct wrqe *wr) 781{ 782 return (&wr->wr[0]); 783} 784 785static inline void 786free_wrqe(struct wrqe *wr) 787{ 788 free(wr, M_CXGBE); 789} 790 791static inline void 792t4_wrq_tx(struct adapter *sc, struct wrqe *wr) 793{ 794 struct sge_wrq *wrq = wr->wrq; 795 796 TXQ_LOCK(wrq); 797 t4_wrq_tx_locked(sc, wrq, wr); 798 TXQ_UNLOCK(wrq); 799} 800 801#endif 802