Deleted Added
full compact
adapter.h (282486) adapter.h (284052)
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:

--- 10 unchanged lines hidden (view full) ---

19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: stable/10/sys/dev/cxgbe/adapter.h 282486 2015-05-05 15:13:59Z np $
27 * $FreeBSD: stable/10/sys/dev/cxgbe/adapter.h 284052 2015-06-06 09:28:40Z np $
28 *
29 */
30
31#ifndef __T4_ADAPTER_H__
32#define __T4_ADAPTER_H__
33
34#include <sys/kernel.h>
35#include <sys/bus.h>

--- 105 unchanged lines hidden (view full) ---

141#if MJUMPAGESIZE != MCLBYTES
142 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
143#else
144 SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
145#endif
146 CL_METADATA_SIZE = CACHE_LINE_SIZE,
147
148 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
28 *
29 */
30
31#ifndef __T4_ADAPTER_H__
32#define __T4_ADAPTER_H__
33
34#include <sys/kernel.h>
35#include <sys/bus.h>

--- 105 unchanged lines hidden (view full) ---

141#if MJUMPAGESIZE != MCLBYTES
142 SW_ZONE_SIZES = 4, /* cluster, jumbop, jumbo9k, jumbo16k */
143#else
144 SW_ZONE_SIZES = 3, /* cluster, jumbo9k, jumbo16k */
145#endif
146 CL_METADATA_SIZE = CACHE_LINE_SIZE,
147
148 SGE_MAX_WR_NDESC = SGE_MAX_WR_LEN / EQ_ESIZE, /* max WR size in desc */
149 TX_SGL_SEGS = 36,
149 TX_SGL_SEGS = 39,
150 TX_SGL_SEGS_TSO = 38,
150 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
151};
152
153enum {
154 /* adapter intr_type */
155 INTR_INTX = (1 << 0),
156 INTR_MSI = (1 << 1),
157 INTR_MSIX = (1 << 2)

--- 103 unchanged lines hidden (view full) ---

261 int qsize_rxq;
262 int qsize_txq;
263
264 int linkdnrc;
265 struct link_config link_cfg;
266
267 struct timeval last_refreshed;
268 struct port_stats stats;
151 TX_WR_FLITS = SGE_MAX_WR_LEN / 8
152};
153
154enum {
155 /* adapter intr_type */
156 INTR_INTX = (1 << 0),
157 INTR_MSI = (1 << 1),
158 INTR_MSIX = (1 << 2)

--- 103 unchanged lines hidden (view full) ---

262 int qsize_rxq;
263 int qsize_txq;
264
265 int linkdnrc;
266 struct link_config link_cfg;
267
268 struct timeval last_refreshed;
269 struct port_stats stats;
270 u_int tx_parse_error;
269
270 eventhandler_tag vlan_c;
271
272 struct callout tick;
273 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
274
275 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
276};

--- 19 unchanged lines hidden (view full) ---

296 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */
297 struct cluster_layout cll;
298};
299
300struct tx_desc {
301 __be64 flit[8];
302};
303
271
272 eventhandler_tag vlan_c;
273
274 struct callout tick;
275 struct sysctl_ctx_list ctx; /* from ifconfig up to driver detach */
276
277 uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
278};

--- 19 unchanged lines hidden (view full) ---

298 uint16_t nmbuf; /* # of driver originated mbufs with ref on cluster */
299 struct cluster_layout cll;
300};
301
302struct tx_desc {
303 __be64 flit[8];
304};
305
304struct tx_map {
305 struct mbuf *m;
306 bus_dmamap_t map;
307};
308
309/* DMA maps used for tx */
310struct tx_maps {
311 struct tx_map *maps;
312 uint32_t map_total; /* # of DMA maps */
313 uint32_t map_pidx; /* next map to be used */
314 uint32_t map_cidx; /* reclaimed up to this index */
315 uint32_t map_avail; /* # of available maps */
316};
317
318struct tx_sdesc {
306struct tx_sdesc {
307 struct mbuf *m; /* m_nextpkt linked chain of frames */
319 uint8_t desc_used; /* # of hardware descriptors used by the WR */
308 uint8_t desc_used; /* # of hardware descriptors used by the WR */
320 uint8_t credits; /* NIC txq: # of frames sent out in the WR */
321};
322
323
324#define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
325struct iq_desc {
326 struct rss_header rss;
327 uint8_t cpl[IQ_PAD];
328 struct rsp_ctrl rsp;

--- 37 unchanged lines hidden (view full) ---

366 bus_dma_tag_t desc_tag;
367 bus_dmamap_t desc_map;
368 bus_addr_t ba; /* bus address of descriptor ring */
369};
370
371enum {
372 EQ_CTRL = 1,
373 EQ_ETH = 2,
309};
310
311
312#define IQ_PAD (IQ_ESIZE - sizeof(struct rsp_ctrl) - sizeof(struct rss_header))
313struct iq_desc {
314 struct rss_header rss;
315 uint8_t cpl[IQ_PAD];
316 struct rsp_ctrl rsp;

--- 37 unchanged lines hidden (view full) ---

354 bus_dma_tag_t desc_tag;
355 bus_dmamap_t desc_map;
356 bus_addr_t ba; /* bus address of descriptor ring */
357};
358
359enum {
360 EQ_CTRL = 1,
361 EQ_ETH = 2,
374#ifdef TCP_OFFLOAD
375 EQ_OFLD = 3,
362 EQ_OFLD = 3,
376#endif
377
378 /* eq flags */
363
364 /* eq flags */
379 EQ_TYPEMASK = 7, /* 3 lsbits hold the type */
380 EQ_ALLOCATED = (1 << 3), /* firmware resources allocated */
381 EQ_DOOMED = (1 << 4), /* about to be destroyed */
382 EQ_CRFLUSHED = (1 << 5), /* expecting an update from SGE */
383 EQ_STALLED = (1 << 6), /* out of hw descriptors or dmamaps */
365 EQ_TYPEMASK = 0x3, /* 2 lsbits hold the type (see above) */
366 EQ_ALLOCATED = (1 << 2), /* firmware resources allocated */
367 EQ_ENABLED = (1 << 3), /* open for business */
384};
385
386/* Listed in order of preference. Update t4_sysctls too if you change these */
387enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
388
389/*
390 * Egress Queue: driver is producer, T4 is consumer.
391 *
392 * Note: A free list is an egress queue (driver produces the buffers and T4
393 * consumes them) but it's special enough to have its own struct (see sge_fl).
394 */
395struct sge_eq {
396 unsigned int flags; /* MUST be first */
397 unsigned int cntxt_id; /* SGE context id for the eq */
368};
369
370/* Listed in order of preference. Update t4_sysctls too if you change these */
371enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
372
373/*
374 * Egress Queue: driver is producer, T4 is consumer.
375 *
376 * Note: A free list is an egress queue (driver produces the buffers and T4
377 * consumes them) but it's special enough to have its own struct (see sge_fl).
378 */
379struct sge_eq {
380 unsigned int flags; /* MUST be first */
381 unsigned int cntxt_id; /* SGE context id for the eq */
398 bus_dma_tag_t desc_tag;
399 bus_dmamap_t desc_map;
400 char lockname[16];
401 struct mtx eq_lock;
402
403 struct tx_desc *desc; /* KVA of descriptor ring */
382 struct mtx eq_lock;
383
384 struct tx_desc *desc; /* KVA of descriptor ring */
404 bus_addr_t ba; /* bus address of descriptor ring */
405 struct sge_qstat *spg; /* status page, for convenience */
406 uint16_t doorbells;
407 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
408 u_int udb_qid; /* relative qid within the doorbell page */
385 uint16_t doorbells;
386 volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
387 u_int udb_qid; /* relative qid within the doorbell page */
409 uint16_t cap; /* max # of desc, for convenience */
410 uint16_t avail; /* available descriptors, for convenience */
411 uint16_t qsize; /* size (# of entries) of the queue */
388 uint16_t sidx; /* index of the entry with the status page */
412 uint16_t cidx; /* consumer idx (desc idx) */
413 uint16_t pidx; /* producer idx (desc idx) */
389 uint16_t cidx; /* consumer idx (desc idx) */
390 uint16_t pidx; /* producer idx (desc idx) */
414 uint16_t pending; /* # of descriptors used since last doorbell */
391 uint16_t equeqidx; /* EQUEQ last requested at this pidx */
392 uint16_t dbidx; /* pidx of the most recent doorbell */
415 uint16_t iqid; /* iq that gets egr_update for the eq */
416 uint8_t tx_chan; /* tx channel used by the eq */
393 uint16_t iqid; /* iq that gets egr_update for the eq */
394 uint8_t tx_chan; /* tx channel used by the eq */
417 struct task tx_task;
418 struct callout tx_callout;
395 volatile u_int equiq; /* EQUIQ outstanding */
419
396
420 /* stats */
421
422 uint32_t egr_update; /* # of SGE_EGR_UPDATE notifications for eq */
423 uint32_t unstalled; /* recovered from stall */
397 bus_dma_tag_t desc_tag;
398 bus_dmamap_t desc_map;
399 bus_addr_t ba; /* bus address of descriptor ring */
400 char lockname[16];
424};
425
426struct sw_zone_info {
427 uma_zone_t zone; /* zone that this cluster comes from */
428 int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */
429 int type; /* EXT_xxx type of the cluster */
430 int8_t head_hwidx;
431 int8_t tail_hwidx;

--- 55 unchanged lines hidden (view full) ---

487 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
488 bus_dma_tag_t desc_tag;
489 bus_dmamap_t desc_map;
490 char lockname[16];
491 bus_addr_t ba; /* bus address of descriptor ring */
492 struct cluster_layout cll_alt; /* alternate refill zone, layout */
493};
494
401};
402
403struct sw_zone_info {
404 uma_zone_t zone; /* zone that this cluster comes from */
405 int size; /* size of cluster: 2K, 4K, 9K, 16K, etc. */
406 int type; /* EXT_xxx type of the cluster */
407 int8_t head_hwidx;
408 int8_t tail_hwidx;

--- 55 unchanged lines hidden (view full) ---

464 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
465 bus_dma_tag_t desc_tag;
466 bus_dmamap_t desc_map;
467 char lockname[16];
468 bus_addr_t ba; /* bus address of descriptor ring */
469 struct cluster_layout cll_alt; /* alternate refill zone, layout */
470};
471
472struct mp_ring;
473
495/* txq: SGE egress queue + what's needed for Ethernet NIC */
496struct sge_txq {
497 struct sge_eq eq; /* MUST be first */
498
499 struct ifnet *ifp; /* the interface this txq belongs to */
474/* txq: SGE egress queue + what's needed for Ethernet NIC */
475struct sge_txq {
476 struct sge_eq eq; /* MUST be first */
477
478 struct ifnet *ifp; /* the interface this txq belongs to */
500 bus_dma_tag_t tx_tag; /* tag for transmit buffers */
501 struct buf_ring *br; /* tx buffer ring */
479 struct mp_ring *r; /* tx software ring */
502 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
480 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
503 struct mbuf *m; /* held up due to temporary resource shortage */
481 struct sglist *gl;
482 __be32 cpl_ctrl0; /* for convenience */
504
483
505 struct tx_maps txmaps;
506
484 struct task tx_reclaim_task;
507 /* stats for common events first */
508
509 uint64_t txcsum; /* # of times hardware assisted with checksum */
510 uint64_t tso_wrs; /* # of TSO work requests */
511 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
512 uint64_t imm_wrs; /* # of work requests with immediate data */
513 uint64_t sgl_wrs; /* # of work requests with direct SGL */
514 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
485 /* stats for common events first */
486
487 uint64_t txcsum; /* # of times hardware assisted with checksum */
488 uint64_t tso_wrs; /* # of TSO work requests */
489 uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
490 uint64_t imm_wrs; /* # of work requests with immediate data */
491 uint64_t sgl_wrs; /* # of work requests with direct SGL */
492 uint64_t txpkt_wrs; /* # of txpkt work requests (not coalesced) */
515 uint64_t txpkts_wrs; /* # of coalesced tx work requests */
516 uint64_t txpkts_pkts; /* # of frames in coalesced tx work requests */
493 uint64_t txpkts0_wrs; /* # of type0 coalesced tx work requests */
494 uint64_t txpkts1_wrs; /* # of type1 coalesced tx work requests */
495 uint64_t txpkts0_pkts; /* # of frames in type0 coalesced tx WRs */
496 uint64_t txpkts1_pkts; /* # of frames in type1 coalesced tx WRs */
517
518 /* stats for not-that-common events */
497
498 /* stats for not-that-common events */
519
520 uint32_t no_dmamap; /* no DMA map to load the mbuf */
521 uint32_t no_desc; /* out of hardware descriptors */
522} __aligned(CACHE_LINE_SIZE);
523
524/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
525struct sge_rxq {
526 struct sge_iq iq; /* MUST be first */
527 struct sge_fl fl; /* MUST follow iq */
528
529 struct ifnet *ifp; /* the interface this rxq belongs to */

--- 32 unchanged lines hidden (view full) ---

562 return (__containerof(iq, struct sge_ofld_rxq, iq));
563}
564#endif
565
566struct wrqe {
567 STAILQ_ENTRY(wrqe) link;
568 struct sge_wrq *wrq;
569 int wr_len;
499} __aligned(CACHE_LINE_SIZE);
500
501/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
502struct sge_rxq {
503 struct sge_iq iq; /* MUST be first */
504 struct sge_fl fl; /* MUST follow iq */
505
506 struct ifnet *ifp; /* the interface this rxq belongs to */

--- 32 unchanged lines hidden (view full) ---

539 return (__containerof(iq, struct sge_ofld_rxq, iq));
540}
541#endif
542
543struct wrqe {
544 STAILQ_ENTRY(wrqe) link;
545 struct sge_wrq *wrq;
546 int wr_len;
570 uint64_t wr[] __aligned(16);
547 char wr[] __aligned(16);
571};
572
548};
549
550struct wrq_cookie {
551 TAILQ_ENTRY(wrq_cookie) link;
552 int ndesc;
553 int pidx;
554};
555
573/*
574 * wrq: SGE egress queue that is given prebuilt work requests. Both the control
575 * and offload tx queues are of this type.
576 */
577struct sge_wrq {
578 struct sge_eq eq; /* MUST be first */
579
580 struct adapter *adapter;
556/*
557 * wrq: SGE egress queue that is given prebuilt work requests. Both the control
558 * and offload tx queues are of this type.
559 */
560struct sge_wrq {
561 struct sge_eq eq; /* MUST be first */
562
563 struct adapter *adapter;
564 struct task wrq_tx_task;
581
565
582 /* List of WRs held up due to lack of tx descriptors */
566 /* Tx desc reserved but WR not "committed" yet. */
567 TAILQ_HEAD(wrq_incomplete_wrs , wrq_cookie) incomplete_wrs;
568
569 /* List of WRs ready to go out as soon as descriptors are available. */
583 STAILQ_HEAD(, wrqe) wr_list;
570 STAILQ_HEAD(, wrqe) wr_list;
571 u_int nwr_pending;
572 u_int ndesc_needed;
584
585 /* stats for common events first */
586
573
574 /* stats for common events first */
575
587 uint64_t tx_wrs; /* # of tx work requests */
576 uint64_t tx_wrs_direct; /* # of WRs written directly to desc ring. */
577 uint64_t tx_wrs_ss; /* # of WRs copied from scratch space. */
578 uint64_t tx_wrs_copied; /* # of WRs queued and copied to desc ring. */
588
589 /* stats for not-that-common events */
590
579
580 /* stats for not-that-common events */
581
591 uint32_t no_desc; /* out of hardware descriptors */
582 /*
583 * Scratch space for work requests that wrap around after reaching the
584 * status page, and some infomation about the last WR that used it.
585 */
586 uint16_t ss_pidx;
587 uint16_t ss_len;
588 uint8_t ss[SGE_MAX_WR_LEN];
589
592} __aligned(CACHE_LINE_SIZE);
593
594
595#ifdef DEV_NETMAP
596struct sge_nm_rxq {
597 struct port_info *pi;
598
599 struct iq_desc *iq_desc;

--- 132 unchanged lines hidden (view full) ---

732 void *tag;
733 } *irq;
734
735 bus_dma_tag_t dmat; /* Parent DMA tag */
736
737 struct sge sge;
738 int lro_timeout;
739
590} __aligned(CACHE_LINE_SIZE);
591
592
593#ifdef DEV_NETMAP
594struct sge_nm_rxq {
595 struct port_info *pi;
596
597 struct iq_desc *iq_desc;

--- 132 unchanged lines hidden (view full) ---

730 void *tag;
731 } *irq;
732
733 bus_dma_tag_t dmat; /* Parent DMA tag */
734
735 struct sge sge;
736 int lro_timeout;
737
740 struct taskqueue *tq[NCHAN]; /* taskqueues that flush data out */
738 struct taskqueue *tq[NCHAN]; /* General purpose taskqueues */
741 struct port_info *port[MAX_NPORTS];
742 uint8_t chan_map[NCHAN];
743
744#ifdef TCP_OFFLOAD
745 void *tom_softc; /* (struct tom_data *) */
746 struct tom_tunables tt;
747 void *iwarp_softc; /* (struct c4iw_dev *) */
748 void *iscsi_softc;

--- 216 unchanged lines hidden (view full) ---

965
966 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
967}
968
969static inline int
970tx_resume_threshold(struct sge_eq *eq)
971{
972
739 struct port_info *port[MAX_NPORTS];
740 uint8_t chan_map[NCHAN];
741
742#ifdef TCP_OFFLOAD
743 void *tom_softc; /* (struct tom_data *) */
744 struct tom_tunables tt;
745 void *iwarp_softc; /* (struct c4iw_dev *) */
746 void *iscsi_softc;

--- 216 unchanged lines hidden (view full) ---

963
964 return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
965}
966
967static inline int
968tx_resume_threshold(struct sge_eq *eq)
969{
970
973 return (eq->qsize / 4);
971 /* not quite the same as qsize / 4, but this will do. */
972 return (eq->sidx / 4);
974}
975
976/* t4_main.c */
973}
974
975/* t4_main.c */
977void t4_tx_task(void *, int);
978void t4_tx_callout(void *);
979int t4_os_find_pci_capability(struct adapter *, int);
980int t4_os_pci_save_state(struct adapter *);
981int t4_os_pci_restore_state(struct adapter *);
982void t4_os_portmod_changed(const struct adapter *, int);
983void t4_os_link_changed(struct adapter *, int, int, int);
984void t4_iterate(void (*)(struct adapter *, void *), void *);
985int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
986int t4_register_an_handler(struct adapter *, an_handler_t);

--- 24 unchanged lines hidden (view full) ---

1011int t4_create_dma_tag(struct adapter *);
1012void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
1013 struct sysctl_oid_list *);
1014int t4_destroy_dma_tag(struct adapter *);
1015int t4_setup_adapter_queues(struct adapter *);
1016int t4_teardown_adapter_queues(struct adapter *);
1017int t4_setup_port_queues(struct port_info *);
1018int t4_teardown_port_queues(struct port_info *);
976int t4_os_find_pci_capability(struct adapter *, int);
977int t4_os_pci_save_state(struct adapter *);
978int t4_os_pci_restore_state(struct adapter *);
979void t4_os_portmod_changed(const struct adapter *, int);
980void t4_os_link_changed(struct adapter *, int, int, int);
981void t4_iterate(void (*)(struct adapter *, void *), void *);
982int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
983int t4_register_an_handler(struct adapter *, an_handler_t);

--- 24 unchanged lines hidden (view full) ---

1008int t4_create_dma_tag(struct adapter *);
1009void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
1010 struct sysctl_oid_list *);
1011int t4_destroy_dma_tag(struct adapter *);
1012int t4_setup_adapter_queues(struct adapter *);
1013int t4_teardown_adapter_queues(struct adapter *);
1014int t4_setup_port_queues(struct port_info *);
1015int t4_teardown_port_queues(struct port_info *);
1019int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
1020void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
1021void t4_intr_all(void *);
1022void t4_intr(void *);
1023void t4_intr_err(void *);
1024void t4_intr_evt(void *);
1025void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
1016void t4_intr_all(void *);
1017void t4_intr(void *);
1018void t4_intr_err(void *);
1019void t4_intr_evt(void *);
1020void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
1026int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
1027void t4_update_fl_bufsize(struct ifnet *);
1021void t4_update_fl_bufsize(struct ifnet *);
1028int can_resume_tx(struct sge_eq *);
1022int parse_pkt(struct mbuf **);
1023void *start_wrq_wr(struct sge_wrq *, int, struct wrq_cookie *);
1024void commit_wrq_wr(struct sge_wrq *, void *, struct wrq_cookie *);
1029int tnl_cong(struct port_info *);
1030
1031/* t4_tracer.c */
1032struct t4_tracer;
1033void t4_tracer_modload(void);
1034void t4_tracer_modunload(void);
1035void t4_tracer_port_detach(struct adapter *);
1036int t4_get_tracer(struct adapter *, struct t4_tracer *);

--- 41 unchanged lines hidden ---
1025int tnl_cong(struct port_info *);
1026
1027/* t4_tracer.c */
1028struct t4_tracer;
1029void t4_tracer_modload(void);
1030void t4_tracer_modunload(void);
1031void t4_tracer_port_detach(struct adapter *);
1032int t4_get_tracer(struct adapter *, struct t4_tracer *);

--- 41 unchanged lines hidden ---