adapter.h revision 255005
1/*-
2 * Copyright (c) 2011 Chelsio Communications, Inc.
3 * All rights reserved.
4 * Written by: Navdeep Parhar <np@FreeBSD.org>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: head/sys/dev/cxgbe/adapter.h 255005 2013-08-28 20:45:45Z np $
28 *
29 */
30
31#ifndef __T4_ADAPTER_H__
32#define __T4_ADAPTER_H__
33
34#include <sys/kernel.h>
35#include <sys/bus.h>
36#include <sys/rman.h>
37#include <sys/types.h>
38#include <sys/malloc.h>
39#include <dev/pci/pcivar.h>
40#include <dev/pci/pcireg.h>
41#include <machine/bus.h>
42#include <sys/socket.h>
43#include <sys/sysctl.h>
44#include <net/ethernet.h>
45#include <net/if.h>
46#include <net/if_media.h>
47#include <netinet/in.h>
48#include <netinet/tcp_lro.h>
49
50#include "offload.h"
51#include "firmware/t4fw_interface.h"
52
53MALLOC_DECLARE(M_CXGBE);
54#define CXGBE_UNIMPLEMENTED(s) \
55    panic("%s (%s, line %d) not implemented yet.", s, __FILE__, __LINE__)
56
57#if defined(__i386__) || defined(__amd64__)
58static __inline void
59prefetch(void *x)
60{
61	__asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
62}
63#else
64#define prefetch(x)
65#endif
66
67#ifndef SYSCTL_ADD_UQUAD
68#define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD
69#define sysctl_handle_64 sysctl_handle_quad
70#define CTLTYPE_U64 CTLTYPE_QUAD
71#endif
72
73#if (__FreeBSD_version >= 900030) || \
74    ((__FreeBSD_version >= 802507) && (__FreeBSD_version < 900000))
75#define SBUF_DRAIN 1
76#endif
77
78#ifdef __amd64__
79/* XXX: need systemwide bus_space_read_8/bus_space_write_8 */
80static __inline uint64_t
81t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
82    bus_size_t offset)
83{
84	KASSERT(tag == X86_BUS_SPACE_MEM,
85	    ("%s: can only handle mem space", __func__));
86
87	return (*(volatile uint64_t *)(handle + offset));
88}
89
90static __inline void
91t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
92    bus_size_t offset, uint64_t value)
93{
94	KASSERT(tag == X86_BUS_SPACE_MEM,
95	    ("%s: can only handle mem space", __func__));
96
97	*(volatile uint64_t *)(bsh + offset) = value;
98}
99#else
100static __inline uint64_t
101t4_bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
102    bus_size_t offset)
103{
104	return (uint64_t)bus_space_read_4(tag, handle, offset) +
105	    ((uint64_t)bus_space_read_4(tag, handle, offset + 4) << 32);
106}
107
108static __inline void
109t4_bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t bsh,
110    bus_size_t offset, uint64_t value)
111{
112	bus_space_write_4(tag, bsh, offset, value);
113	bus_space_write_4(tag, bsh, offset + 4, value >> 32);
114}
115#endif
116
117struct adapter;
118typedef struct adapter adapter_t;
119
120enum {
121	FW_IQ_QSIZE = 256,
122	FW_IQ_ESIZE = 64,	/* At least 64 mandated by the firmware spec */
123
124	RX_IQ_QSIZE = 1024,
125	RX_IQ_ESIZE = 64,	/* At least 64 so CPL_RX_PKT will fit */
126
127	EQ_ESIZE = 64,		/* All egress queues use this entry size */
128
129	RX_FL_ESIZE = EQ_ESIZE,	/* 8 64bit addresses */
130#if MJUMPAGESIZE != MCLBYTES
131	FL_BUF_SIZES = 4,	/* cluster, jumbop, jumbo9k, jumbo16k */
132#else
133	FL_BUF_SIZES = 3,	/* cluster, jumbo9k, jumbo16k */
134#endif
135
136	CTRL_EQ_QSIZE = 128,
137
138	TX_EQ_QSIZE = 1024,
139	TX_SGL_SEGS = 36,
140	TX_WR_FLITS = SGE_MAX_WR_LEN / 8
141};
142
143enum {
144	/* adapter intr_type */
145	INTR_INTX	= (1 << 0),
146	INTR_MSI 	= (1 << 1),
147	INTR_MSIX	= (1 << 2)
148};
149
150enum {
151	/* flags understood by begin_synchronized_op */
152	HOLD_LOCK	= (1 << 0),
153	SLEEP_OK	= (1 << 1),
154	INTR_OK		= (1 << 2),
155
156	/* flags understood by end_synchronized_op */
157	LOCK_HELD	= HOLD_LOCK,
158};
159
160enum {
161	/* adapter flags */
162	FULL_INIT_DONE	= (1 << 0),
163	FW_OK		= (1 << 1),
164	INTR_DIRECT	= (1 << 2),	/* direct interrupts for everything */
165	MASTER_PF	= (1 << 3),
166	ADAP_SYSCTL_CTX	= (1 << 4),
167	TOM_INIT_DONE	= (1 << 5),
168
169	CXGBE_BUSY	= (1 << 9),
170
171	/* port flags */
172	DOOMED		= (1 << 0),
173	PORT_INIT_DONE	= (1 << 1),
174	PORT_SYSCTL_CTX	= (1 << 2),
175	HAS_TRACEQ	= (1 << 3),
176};
177
178#define IS_DOOMED(pi)	((pi)->flags & DOOMED)
179#define SET_DOOMED(pi)	do {(pi)->flags |= DOOMED;} while (0)
180#define IS_BUSY(sc)	((sc)->flags & CXGBE_BUSY)
181#define SET_BUSY(sc)	do {(sc)->flags |= CXGBE_BUSY;} while (0)
182#define CLR_BUSY(sc)	do {(sc)->flags &= ~CXGBE_BUSY;} while (0)
183
184struct port_info {
185	device_t dev;
186	struct adapter *adapter;
187
188	struct ifnet *ifp;
189	struct ifmedia media;
190
191	struct mtx pi_lock;
192	char lockname[16];
193	unsigned long flags;
194	int if_flags;
195
196	uint16_t viid;
197	int16_t  xact_addr_filt;/* index of exact MAC address filter */
198	uint16_t rss_size;	/* size of VI's RSS table slice */
199	uint8_t  lport;		/* associated offload logical port */
200	int8_t   mdio_addr;
201	uint8_t  port_type;
202	uint8_t  mod_type;
203	uint8_t  port_id;
204	uint8_t  tx_chan;
205
206	/* These need to be int as they are used in sysctl */
207	int ntxq;	/* # of tx queues */
208	int first_txq;	/* index of first tx queue */
209	int nrxq;	/* # of rx queues */
210	int first_rxq;	/* index of first rx queue */
211#ifdef TCP_OFFLOAD
212	int nofldtxq;		/* # of offload tx queues */
213	int first_ofld_txq;	/* index of first offload tx queue */
214	int nofldrxq;		/* # of offload rx queues */
215	int first_ofld_rxq;	/* index of first offload rx queue */
216#endif
217	int tmr_idx;
218	int pktc_idx;
219	int qsize_rxq;
220	int qsize_txq;
221
222	int linkdnrc;
223	struct link_config link_cfg;
224	struct port_stats stats;
225
226	eventhandler_tag vlan_c;
227
228	struct callout tick;
229	struct sysctl_ctx_list ctx;	/* from ifconfig up to driver detach */
230
231	uint8_t hw_addr[ETHER_ADDR_LEN]; /* factory MAC address, won't change */
232};
233
234struct fl_sdesc {
235	struct mbuf *m;
236	bus_dmamap_t map;
237	caddr_t cl;
238	uint8_t tag_idx;	/* the sc->fl_tag this map comes from */
239#ifdef INVARIANTS
240	__be64 ba_tag;
241#endif
242};
243
244struct tx_desc {
245	__be64 flit[8];
246};
247
248struct tx_map {
249	struct mbuf *m;
250	bus_dmamap_t map;
251};
252
253/* DMA maps used for tx */
254struct tx_maps {
255	struct tx_map *maps;
256	uint32_t map_total;	/* # of DMA maps */
257	uint32_t map_pidx;	/* next map to be used */
258	uint32_t map_cidx;	/* reclaimed up to this index */
259	uint32_t map_avail;	/* # of available maps */
260};
261
262struct tx_sdesc {
263	uint8_t desc_used;	/* # of hardware descriptors used by the WR */
264	uint8_t credits;	/* NIC txq: # of frames sent out in the WR */
265};
266
267enum {
268	/* iq flags */
269	IQ_ALLOCATED	= (1 << 0),	/* firmware resources allocated */
270	IQ_HAS_FL	= (1 << 1),	/* iq associated with a freelist */
271	IQ_INTR		= (1 << 2),	/* iq takes direct interrupt */
272	IQ_LRO_ENABLED	= (1 << 3),	/* iq is an eth rxq with LRO enabled */
273
274	/* iq state */
275	IQS_DISABLED	= 0,
276	IQS_BUSY	= 1,
277	IQS_IDLE	= 2,
278};
279
280/*
281 * Ingress Queue: T4 is producer, driver is consumer.
282 */
283struct sge_iq {
284	bus_dma_tag_t desc_tag;
285	bus_dmamap_t desc_map;
286	bus_addr_t ba;		/* bus address of descriptor ring */
287	uint32_t flags;
288	uint16_t abs_id;	/* absolute SGE id for the iq */
289	int8_t   intr_pktc_idx;	/* packet count threshold index */
290	int8_t   pad0;
291	__be64  *desc;		/* KVA of descriptor ring */
292
293	volatile int state;
294	struct adapter *adapter;
295	const __be64 *cdesc;	/* current descriptor */
296	uint8_t  gen;		/* generation bit */
297	uint8_t  intr_params;	/* interrupt holdoff parameters */
298	uint8_t  intr_next;	/* XXX: holdoff for next interrupt */
299	uint8_t  esize;		/* size (bytes) of each entry in the queue */
300	uint16_t qsize;		/* size (# of entries) of the queue */
301	uint16_t cidx;		/* consumer index */
302	uint16_t cntxt_id;	/* SGE context id for the iq */
303
304	STAILQ_ENTRY(sge_iq) link;
305};
306
307enum {
308	EQ_CTRL		= 1,
309	EQ_ETH		= 2,
310#ifdef TCP_OFFLOAD
311	EQ_OFLD		= 3,
312#endif
313
314	/* eq flags */
315	EQ_TYPEMASK	= 7,		/* 3 lsbits hold the type */
316	EQ_ALLOCATED	= (1 << 3),	/* firmware resources allocated */
317	EQ_DOOMED	= (1 << 4),	/* about to be destroyed */
318	EQ_CRFLUSHED	= (1 << 5),	/* expecting an update from SGE */
319	EQ_STALLED	= (1 << 6),	/* out of hw descriptors or dmamaps */
320};
321
322/* Listed in order of preference.  Update t4_sysctls too if you change these */
323enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
324
325/*
326 * Egress Queue: driver is producer, T4 is consumer.
327 *
328 * Note: A free list is an egress queue (driver produces the buffers and T4
329 * consumes them) but it's special enough to have its own struct (see sge_fl).
330 */
331struct sge_eq {
332	unsigned int flags;	/* MUST be first */
333	unsigned int cntxt_id;	/* SGE context id for the eq */
334	bus_dma_tag_t desc_tag;
335	bus_dmamap_t desc_map;
336	char lockname[16];
337	struct mtx eq_lock;
338
339	struct tx_desc *desc;	/* KVA of descriptor ring */
340	bus_addr_t ba;		/* bus address of descriptor ring */
341	struct sge_qstat *spg;	/* status page, for convenience */
342	int doorbells;
343	volatile uint32_t *udb;	/* KVA of doorbell (lies within BAR2) */
344	u_int udb_qid;		/* relative qid within the doorbell page */
345	uint16_t cap;		/* max # of desc, for convenience */
346	uint16_t avail;		/* available descriptors, for convenience */
347	uint16_t qsize;		/* size (# of entries) of the queue */
348	uint16_t cidx;		/* consumer idx (desc idx) */
349	uint16_t pidx;		/* producer idx (desc idx) */
350	uint16_t pending;	/* # of descriptors used since last doorbell */
351	uint16_t iqid;		/* iq that gets egr_update for the eq */
352	uint8_t tx_chan;	/* tx channel used by the eq */
353	struct task tx_task;
354	struct callout tx_callout;
355
356	/* stats */
357
358	uint32_t egr_update;	/* # of SGE_EGR_UPDATE notifications for eq */
359	uint32_t unstalled;	/* recovered from stall */
360};
361
362enum {
363	FL_STARVING	= (1 << 0), /* on the adapter's list of starving fl's */
364	FL_DOOMED	= (1 << 1), /* about to be destroyed */
365};
366
367#define FL_RUNNING_LOW(fl)	(fl->cap - fl->needed <= fl->lowat)
368#define FL_NOT_RUNNING_LOW(fl)	(fl->cap - fl->needed >= 2 * fl->lowat)
369
370struct sge_fl {
371	bus_dma_tag_t desc_tag;
372	bus_dmamap_t desc_map;
373	bus_dma_tag_t tag[FL_BUF_SIZES];
374	uint8_t tag_idx;
375	struct mtx fl_lock;
376	char lockname[16];
377	int flags;
378
379	__be64 *desc;		/* KVA of descriptor ring, ptr to addresses */
380	bus_addr_t ba;		/* bus address of descriptor ring */
381	struct fl_sdesc *sdesc;	/* KVA of software descriptor ring */
382	uint32_t cap;		/* max # of buffers, for convenience */
383	uint16_t qsize;		/* size (# of entries) of the queue */
384	uint16_t cntxt_id;	/* SGE context id for the freelist */
385	uint32_t cidx;		/* consumer idx (buffer idx, NOT hw desc idx) */
386	uint32_t pidx;		/* producer idx (buffer idx, NOT hw desc idx) */
387	uint32_t needed;	/* # of buffers needed to fill up fl. */
388	uint32_t lowat;		/* # of buffers <= this means fl needs help */
389	uint32_t pending;	/* # of bufs allocated since last doorbell */
390	unsigned int dmamap_failed;
391	TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
392};
393
394/* txq: SGE egress queue + what's needed for Ethernet NIC */
395struct sge_txq {
396	struct sge_eq eq;	/* MUST be first */
397
398	struct ifnet *ifp;	/* the interface this txq belongs to */
399	bus_dma_tag_t tx_tag;	/* tag for transmit buffers */
400	struct buf_ring *br;	/* tx buffer ring */
401	struct tx_sdesc *sdesc;	/* KVA of software descriptor ring */
402	struct mbuf *m;		/* held up due to temporary resource shortage */
403
404	struct tx_maps txmaps;
405
406	/* stats for common events first */
407
408	uint64_t txcsum;	/* # of times hardware assisted with checksum */
409	uint64_t tso_wrs;	/* # of TSO work requests */
410	uint64_t vlan_insertion;/* # of times VLAN tag was inserted */
411	uint64_t imm_wrs;	/* # of work requests with immediate data */
412	uint64_t sgl_wrs;	/* # of work requests with direct SGL */
413	uint64_t txpkt_wrs;	/* # of txpkt work requests (not coalesced) */
414	uint64_t txpkts_wrs;	/* # of coalesced tx work requests */
415	uint64_t txpkts_pkts;	/* # of frames in coalesced tx work requests */
416
417	/* stats for not-that-common events */
418
419	uint32_t no_dmamap;	/* no DMA map to load the mbuf */
420	uint32_t no_desc;	/* out of hardware descriptors */
421} __aligned(CACHE_LINE_SIZE);
422
423/* rxq: SGE ingress queue + SGE free list + miscellaneous items */
424struct sge_rxq {
425	struct sge_iq iq;	/* MUST be first */
426	struct sge_fl fl;	/* MUST follow iq */
427
428	struct ifnet *ifp;	/* the interface this rxq belongs to */
429#if defined(INET) || defined(INET6)
430	struct lro_ctrl lro;	/* LRO state */
431#endif
432
433	/* stats for common events first */
434
435	uint64_t rxcsum;	/* # of times hardware assisted with checksum */
436	uint64_t vlan_extraction;/* # of times VLAN tag was extracted */
437
438	/* stats for not-that-common events */
439
440} __aligned(CACHE_LINE_SIZE);
441
442static inline struct sge_rxq *
443iq_to_rxq(struct sge_iq *iq)
444{
445
446	return (__containerof(iq, struct sge_rxq, iq));
447}
448
449
450#ifdef TCP_OFFLOAD
451/* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
452struct sge_ofld_rxq {
453	struct sge_iq iq;	/* MUST be first */
454	struct sge_fl fl;	/* MUST follow iq */
455} __aligned(CACHE_LINE_SIZE);
456
457static inline struct sge_ofld_rxq *
458iq_to_ofld_rxq(struct sge_iq *iq)
459{
460
461	return (__containerof(iq, struct sge_ofld_rxq, iq));
462}
463#endif
464
465struct wrqe {
466	STAILQ_ENTRY(wrqe) link;
467	struct sge_wrq *wrq;
468	int wr_len;
469	uint64_t wr[] __aligned(16);
470};
471
472/*
473 * wrq: SGE egress queue that is given prebuilt work requests.  Both the control
474 * and offload tx queues are of this type.
475 */
476struct sge_wrq {
477	struct sge_eq eq;	/* MUST be first */
478
479	struct adapter *adapter;
480
481	/* List of WRs held up due to lack of tx descriptors */
482	STAILQ_HEAD(, wrqe) wr_list;
483
484	/* stats for common events first */
485
486	uint64_t tx_wrs;	/* # of tx work requests */
487
488	/* stats for not-that-common events */
489
490	uint32_t no_desc;	/* out of hardware descriptors */
491} __aligned(CACHE_LINE_SIZE);
492
493struct sge {
494	int timer_val[SGE_NTIMERS];
495	int counter_val[SGE_NCOUNTERS];
496	int fl_starve_threshold;
497	int s_qpp;
498
499	int nrxq;	/* total # of Ethernet rx queues */
500	int ntxq;	/* total # of Ethernet tx tx queues */
501#ifdef TCP_OFFLOAD
502	int nofldrxq;	/* total # of TOE rx queues */
503	int nofldtxq;	/* total # of TOE tx queues */
504#endif
505	int niq;	/* total # of ingress queues */
506	int neq;	/* total # of egress queues */
507
508	struct sge_iq fwq;	/* Firmware event queue */
509	struct sge_wrq mgmtq;	/* Management queue (control queue) */
510	struct sge_wrq *ctrlq;	/* Control queues */
511	struct sge_txq *txq;	/* NIC tx queues */
512	struct sge_rxq *rxq;	/* NIC rx queues */
513#ifdef TCP_OFFLOAD
514	struct sge_wrq *ofld_txq;	/* TOE tx queues */
515	struct sge_ofld_rxq *ofld_rxq;	/* TOE rx queues */
516#endif
517
518	uint16_t iq_start;
519	int eq_start;
520	struct sge_iq **iqmap;	/* iq->cntxt_id to iq mapping */
521	struct sge_eq **eqmap;	/* eq->cntxt_id to eq mapping */
522};
523
524struct rss_header;
525typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
526    struct mbuf *);
527typedef int (*an_handler_t)(struct sge_iq *, const struct rsp_ctrl *);
528typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
529
530struct adapter {
531	SLIST_ENTRY(adapter) link;
532	device_t dev;
533	struct cdev *cdev;
534
535	/* PCIe register resources */
536	int regs_rid;
537	struct resource *regs_res;
538	int msix_rid;
539	struct resource *msix_res;
540	bus_space_handle_t bh;
541	bus_space_tag_t bt;
542	bus_size_t mmio_len;
543	int udbs_rid;
544	struct resource *udbs_res;
545	volatile uint8_t *udbs_base;
546
547	unsigned int pf;
548	unsigned int mbox;
549
550	/* Interrupt information */
551	int intr_type;
552	int intr_count;
553	struct irq {
554		struct resource *res;
555		int rid;
556		void *tag;
557	} *irq;
558
559	bus_dma_tag_t dmat;	/* Parent DMA tag */
560
561	struct sge sge;
562
563	struct taskqueue *tq[NCHAN];	/* taskqueues that flush data out */
564	struct port_info *port[MAX_NPORTS];
565	uint8_t chan_map[NCHAN];
566
567#ifdef TCP_OFFLOAD
568	void *tom_softc;	/* (struct tom_data *) */
569	struct tom_tunables tt;
570	void *iwarp_softc;	/* (struct c4iw_dev *) */
571#endif
572	struct l2t_data *l2t;	/* L2 table */
573	struct tid_info tids;
574
575	int doorbells;
576	int open_device_map;
577#ifdef TCP_OFFLOAD
578	int offload_map;
579#endif
580	int flags;
581
582	char ifp_lockname[16];
583	struct mtx ifp_lock;
584	struct ifnet *ifp;	/* tracer ifp */
585	struct ifmedia media;
586	int traceq;		/* iq used by all tracers, -1 if none */
587	int tracer_valid;	/* bitmap of valid tracers */
588	int tracer_enabled;	/* bitmap of enabled tracers */
589
590	char fw_version[32];
591	char cfg_file[32];
592	u_int cfcsum;
593	struct adapter_params params;
594	struct t4_virt_res vres;
595
596	uint16_t linkcaps;
597	uint16_t niccaps;
598	uint16_t toecaps;
599	uint16_t rdmacaps;
600	uint16_t iscsicaps;
601	uint16_t fcoecaps;
602
603	struct sysctl_ctx_list ctx; /* from adapter_full_init to full_uninit */
604
605	struct mtx sc_lock;
606	char lockname[16];
607
608	/* Starving free lists */
609	struct mtx sfl_lock;	/* same cache-line as sc_lock? but that's ok */
610	TAILQ_HEAD(, sge_fl) sfl;
611	struct callout sfl_callout;
612
613	an_handler_t an_handler __aligned(CACHE_LINE_SIZE);
614	fw_msg_handler_t fw_msg_handler[5];	/* NUM_FW6_TYPES */
615	cpl_handler_t cpl_handler[0xef];	/* NUM_CPL_CMDS */
616
617#ifdef INVARIANTS
618	const char *last_op;
619	const void *last_op_thr;
620#endif
621};
622
623#define ADAPTER_LOCK(sc)		mtx_lock(&(sc)->sc_lock)
624#define ADAPTER_UNLOCK(sc)		mtx_unlock(&(sc)->sc_lock)
625#define ADAPTER_LOCK_ASSERT_OWNED(sc)	mtx_assert(&(sc)->sc_lock, MA_OWNED)
626#define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) mtx_assert(&(sc)->sc_lock, MA_NOTOWNED)
627
628/* XXX: not bulletproof, but much better than nothing */
629#define ASSERT_SYNCHRONIZED_OP(sc)	\
630    KASSERT(IS_BUSY(sc) && \
631	(mtx_owned(&(sc)->sc_lock) || sc->last_op_thr == curthread), \
632	("%s: operation not synchronized.", __func__))
633
634#define PORT_LOCK(pi)			mtx_lock(&(pi)->pi_lock)
635#define PORT_UNLOCK(pi)			mtx_unlock(&(pi)->pi_lock)
636#define PORT_LOCK_ASSERT_OWNED(pi)	mtx_assert(&(pi)->pi_lock, MA_OWNED)
637#define PORT_LOCK_ASSERT_NOTOWNED(pi)	mtx_assert(&(pi)->pi_lock, MA_NOTOWNED)
638
639#define FL_LOCK(fl)			mtx_lock(&(fl)->fl_lock)
640#define FL_TRYLOCK(fl)			mtx_trylock(&(fl)->fl_lock)
641#define FL_UNLOCK(fl)			mtx_unlock(&(fl)->fl_lock)
642#define FL_LOCK_ASSERT_OWNED(fl)	mtx_assert(&(fl)->fl_lock, MA_OWNED)
643#define FL_LOCK_ASSERT_NOTOWNED(fl)	mtx_assert(&(fl)->fl_lock, MA_NOTOWNED)
644
645#define RXQ_FL_LOCK(rxq)		FL_LOCK(&(rxq)->fl)
646#define RXQ_FL_UNLOCK(rxq)		FL_UNLOCK(&(rxq)->fl)
647#define RXQ_FL_LOCK_ASSERT_OWNED(rxq)	FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
648#define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
649
650#define EQ_LOCK(eq)			mtx_lock(&(eq)->eq_lock)
651#define EQ_TRYLOCK(eq)			mtx_trylock(&(eq)->eq_lock)
652#define EQ_UNLOCK(eq)			mtx_unlock(&(eq)->eq_lock)
653#define EQ_LOCK_ASSERT_OWNED(eq)	mtx_assert(&(eq)->eq_lock, MA_OWNED)
654#define EQ_LOCK_ASSERT_NOTOWNED(eq)	mtx_assert(&(eq)->eq_lock, MA_NOTOWNED)
655
656#define TXQ_LOCK(txq)			EQ_LOCK(&(txq)->eq)
657#define TXQ_TRYLOCK(txq)		EQ_TRYLOCK(&(txq)->eq)
658#define TXQ_UNLOCK(txq)			EQ_UNLOCK(&(txq)->eq)
659#define TXQ_LOCK_ASSERT_OWNED(txq)	EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
660#define TXQ_LOCK_ASSERT_NOTOWNED(txq)	EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
661
662#define for_each_txq(pi, iter, q) \
663	for (q = &pi->adapter->sge.txq[pi->first_txq], iter = 0; \
664	    iter < pi->ntxq; ++iter, ++q)
665#define for_each_rxq(pi, iter, q) \
666	for (q = &pi->adapter->sge.rxq[pi->first_rxq], iter = 0; \
667	    iter < pi->nrxq; ++iter, ++q)
668#define for_each_ofld_txq(pi, iter, q) \
669	for (q = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq], iter = 0; \
670	    iter < pi->nofldtxq; ++iter, ++q)
671#define for_each_ofld_rxq(pi, iter, q) \
672	for (q = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq], iter = 0; \
673	    iter < pi->nofldrxq; ++iter, ++q)
674
675/* One for errors, one for firmware events */
676#define T4_EXTRA_INTR 2
677
678static inline uint32_t
679t4_read_reg(struct adapter *sc, uint32_t reg)
680{
681
682	return bus_space_read_4(sc->bt, sc->bh, reg);
683}
684
685static inline void
686t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
687{
688
689	bus_space_write_4(sc->bt, sc->bh, reg, val);
690}
691
692static inline uint64_t
693t4_read_reg64(struct adapter *sc, uint32_t reg)
694{
695
696	return t4_bus_space_read_8(sc->bt, sc->bh, reg);
697}
698
699static inline void
700t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
701{
702
703	t4_bus_space_write_8(sc->bt, sc->bh, reg, val);
704}
705
706static inline void
707t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
708{
709
710	*val = pci_read_config(sc->dev, reg, 1);
711}
712
713static inline void
714t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
715{
716
717	pci_write_config(sc->dev, reg, val, 1);
718}
719
720static inline void
721t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
722{
723
724	*val = pci_read_config(sc->dev, reg, 2);
725}
726
727static inline void
728t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
729{
730
731	pci_write_config(sc->dev, reg, val, 2);
732}
733
734static inline void
735t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
736{
737
738	*val = pci_read_config(sc->dev, reg, 4);
739}
740
741static inline void
742t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
743{
744
745	pci_write_config(sc->dev, reg, val, 4);
746}
747
748static inline struct port_info *
749adap2pinfo(struct adapter *sc, int idx)
750{
751
752	return (sc->port[idx]);
753}
754
755static inline void
756t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
757{
758
759	bcopy(hw_addr, sc->port[idx]->hw_addr, ETHER_ADDR_LEN);
760}
761
762static inline bool
763is_10G_port(const struct port_info *pi)
764{
765
766	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
767}
768
769static inline bool
770is_40G_port(const struct port_info *pi)
771{
772
773	return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
774}
775
776static inline int
777tx_resume_threshold(struct sge_eq *eq)
778{
779
780	return (eq->qsize / 4);
781}
782
783/* t4_main.c */
784void t4_tx_task(void *, int);
785void t4_tx_callout(void *);
786int t4_os_find_pci_capability(struct adapter *, int);
787int t4_os_pci_save_state(struct adapter *);
788int t4_os_pci_restore_state(struct adapter *);
789void t4_os_portmod_changed(const struct adapter *, int);
790void t4_os_link_changed(struct adapter *, int, int, int);
791void t4_iterate(void (*)(struct adapter *, void *), void *);
792int t4_register_cpl_handler(struct adapter *, int, cpl_handler_t);
793int t4_register_an_handler(struct adapter *, an_handler_t);
794int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
795int t4_filter_rpl(struct sge_iq *, const struct rss_header *, struct mbuf *);
796int begin_synchronized_op(struct adapter *, struct port_info *, int, char *);
797void end_synchronized_op(struct adapter *, int);
798
799/* t4_sge.c */
800void t4_sge_modload(void);
801void t4_init_sge_cpl_handlers(struct adapter *);
802void t4_tweak_chip_settings(struct adapter *);
803int t4_read_chip_settings(struct adapter *);
804int t4_create_dma_tag(struct adapter *);
805void t4_sge_sysctls(struct adapter *, struct sysctl_ctx_list *,
806    struct sysctl_oid_list *);
807int t4_destroy_dma_tag(struct adapter *);
808int t4_setup_adapter_queues(struct adapter *);
809int t4_teardown_adapter_queues(struct adapter *);
810int t4_setup_port_queues(struct port_info *);
811int t4_teardown_port_queues(struct port_info *);
812int t4_alloc_tx_maps(struct tx_maps *, bus_dma_tag_t, int, int);
813void t4_free_tx_maps(struct tx_maps *, bus_dma_tag_t);
814void t4_intr_all(void *);
815void t4_intr(void *);
816void t4_intr_err(void *);
817void t4_intr_evt(void *);
818void t4_wrq_tx_locked(struct adapter *, struct sge_wrq *, struct wrqe *);
819int t4_eth_tx(struct ifnet *, struct sge_txq *, struct mbuf *);
820void t4_update_fl_bufsize(struct ifnet *);
821int can_resume_tx(struct sge_eq *);
822
823/* t4_tracer.c */
824struct t4_tracer;
825void t4_tracer_modload(void);
826void t4_tracer_modunload(void);
827void t4_tracer_port_detach(struct adapter *);
828int t4_get_tracer(struct adapter *, struct t4_tracer *);
829int t4_set_tracer(struct adapter *, struct t4_tracer *);
830int t4_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
831int t5_trace_pkt(struct sge_iq *, const struct rss_header *, struct mbuf *);
832
833static inline struct wrqe *
834alloc_wrqe(int wr_len, struct sge_wrq *wrq)
835{
836	int len = offsetof(struct wrqe, wr) + wr_len;
837	struct wrqe *wr;
838
839	wr = malloc(len, M_CXGBE, M_NOWAIT);
840	if (__predict_false(wr == NULL))
841		return (NULL);
842	wr->wr_len = wr_len;
843	wr->wrq = wrq;
844	return (wr);
845}
846
847static inline void *
848wrtod(struct wrqe *wr)
849{
850	return (&wr->wr[0]);
851}
852
853static inline void
854free_wrqe(struct wrqe *wr)
855{
856	free(wr, M_CXGBE);
857}
858
859static inline void
860t4_wrq_tx(struct adapter *sc, struct wrqe *wr)
861{
862	struct sge_wrq *wrq = wr->wrq;
863
864	TXQ_LOCK(wrq);
865	t4_wrq_tx_locked(sc, wrq, wr);
866	TXQ_UNLOCK(wrq);
867}
868
869#endif
870