cxgb_sge.c revision 237263
1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12169978Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15169978Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_sge.c 237263 2012-06-19 07:34:13Z np $");
32167514Skmacy
33235963Sbz#include "opt_inet6.h"
34205947Snp#include "opt_inet.h"
35205947Snp
36167514Skmacy#include <sys/param.h>
37167514Skmacy#include <sys/systm.h>
38167514Skmacy#include <sys/kernel.h>
39167514Skmacy#include <sys/module.h>
40167514Skmacy#include <sys/bus.h>
41167514Skmacy#include <sys/conf.h>
42167514Skmacy#include <machine/bus.h>
43167514Skmacy#include <machine/resource.h>
44167514Skmacy#include <sys/bus_dma.h>
45167514Skmacy#include <sys/rman.h>
46167514Skmacy#include <sys/queue.h>
47167514Skmacy#include <sys/sysctl.h>
48167514Skmacy#include <sys/taskqueue.h>
49167514Skmacy
50167514Skmacy#include <sys/proc.h>
51175200Skmacy#include <sys/sbuf.h>
52167514Skmacy#include <sys/sched.h>
53167514Skmacy#include <sys/smp.h>
54167760Skmacy#include <sys/systm.h>
55174708Skmacy#include <sys/syslog.h>
56204348Snp#include <sys/socket.h>
57237263Snp#include <sys/sglist.h>
58167514Skmacy
59194521Skmacy#include <net/bpf.h>
60204348Snp#include <net/ethernet.h>
61204348Snp#include <net/if.h>
62204348Snp#include <net/if_vlan_var.h>
63194521Skmacy
64167514Skmacy#include <netinet/in_systm.h>
65167514Skmacy#include <netinet/in.h>
66167514Skmacy#include <netinet/ip.h>
67231317Snp#include <netinet/ip6.h>
68167514Skmacy#include <netinet/tcp.h>
69167514Skmacy
70167514Skmacy#include <dev/pci/pcireg.h>
71167514Skmacy#include <dev/pci/pcivar.h>
72167514Skmacy
73174670Skmacy#include <vm/vm.h>
74174708Skmacy#include <vm/pmap.h>
75174672Skmacy
76170076Skmacy#include <cxgb_include.h>
77174670Skmacy#include <sys/mvec.h>
78168491Skmacy
79194521Skmacyint	txq_fills = 0;
80194521Skmacyint	multiq_tx_enable = 1;
81194521Skmacy
82237263Snp#ifdef TCP_OFFLOAD
83237263SnpCTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
84237263Snp#endif
85237263Snp
86194521Skmacyextern struct sysctl_oid_list sysctl__hw_cxgb_children;
87194521Skmacyint cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
88194521SkmacyTUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
89217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90194521Skmacy    "size of per-queue mbuf ring");
91194521Skmacy
92194521Skmacystatic int cxgb_tx_coalesce_force = 0;
93194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
94217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
95194521Skmacy    &cxgb_tx_coalesce_force, 0,
96194521Skmacy    "coalesce small packets into a single work request regardless of ring state");
97194521Skmacy
98194521Skmacy#define	COALESCE_START_DEFAULT		TX_ETH_Q_SIZE>>1
99194521Skmacy#define	COALESCE_START_MAX		(TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
100194521Skmacy#define	COALESCE_STOP_DEFAULT		TX_ETH_Q_SIZE>>2
101194521Skmacy#define	COALESCE_STOP_MIN		TX_ETH_Q_SIZE>>5
102194521Skmacy#define	TX_RECLAIM_DEFAULT		TX_ETH_Q_SIZE>>5
103194521Skmacy#define	TX_RECLAIM_MAX			TX_ETH_Q_SIZE>>2
104194521Skmacy#define	TX_RECLAIM_MIN			TX_ETH_Q_SIZE>>6
105194521Skmacy
106194521Skmacy
107194521Skmacystatic int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
108194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
109194521Skmacy    &cxgb_tx_coalesce_enable_start);
110217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
111194521Skmacy    &cxgb_tx_coalesce_enable_start, 0,
112194521Skmacy    "coalesce enable threshold");
113194521Skmacystatic int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
114194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
115217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
116194521Skmacy    &cxgb_tx_coalesce_enable_stop, 0,
117194521Skmacy    "coalesce disable threshold");
118194521Skmacystatic int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
119194521SkmacyTUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
120217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
121194521Skmacy    &cxgb_tx_reclaim_threshold, 0,
122194521Skmacy    "tx cleaning minimum threshold");
123194521Skmacy
124176472Skmacy/*
125176472Skmacy * XXX don't re-enable this until TOE stops assuming
126176472Skmacy * we have an m_ext
127176472Skmacy */
128176472Skmacystatic int recycle_enable = 0;
129177464Skmacy
130175200Skmacyextern int cxgb_use_16k_clusters;
131205950Snpextern int nmbjumbop;
132177464Skmacyextern int nmbjumbo9;
133177464Skmacyextern int nmbjumbo16;
134168737Skmacy
135167514Skmacy#define USE_GTS 0
136167514Skmacy
137167514Skmacy#define SGE_RX_SM_BUF_SIZE	1536
138167514Skmacy#define SGE_RX_DROP_THRES	16
139169978Skmacy#define SGE_RX_COPY_THRES	128
140167514Skmacy
141167514Skmacy/*
142167514Skmacy * Period of the Tx buffer reclaim timer.  This timer does not need to run
143167514Skmacy * frequently as Tx buffers are usually reclaimed by new Tx packets.
144167514Skmacy */
145170038Skmacy#define TX_RECLAIM_PERIOD       (hz >> 1)
146167514Skmacy
147167514Skmacy/*
148167514Skmacy * Values for sge_txq.flags
149167514Skmacy */
150167514Skmacyenum {
151167514Skmacy	TXQ_RUNNING	= 1 << 0,  /* fetch engine is running */
152167514Skmacy	TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
153167514Skmacy};
154167514Skmacy
155167514Skmacystruct tx_desc {
156167514Skmacy	uint64_t	flit[TX_DESC_FLITS];
157167514Skmacy} __packed;
158167514Skmacy
159167514Skmacystruct rx_desc {
160167514Skmacy	uint32_t	addr_lo;
161167514Skmacy	uint32_t	len_gen;
162167514Skmacy	uint32_t	gen2;
163167514Skmacy	uint32_t	addr_hi;
164201758Smbr} __packed;
165167514Skmacy
166167514Skmacystruct rsp_desc {               /* response queue descriptor */
167167514Skmacy	struct rss_header	rss_hdr;
168167514Skmacy	uint32_t		flags;
169167514Skmacy	uint32_t		len_cq;
170167514Skmacy	uint8_t			imm_data[47];
171167514Skmacy	uint8_t			intr_gen;
172167514Skmacy} __packed;
173167514Skmacy
174167514Skmacy#define RX_SW_DESC_MAP_CREATED	(1 << 0)
175168737Skmacy#define TX_SW_DESC_MAP_CREATED	(1 << 1)
176167514Skmacy#define RX_SW_DESC_INUSE        (1 << 3)
177167514Skmacy#define TX_SW_DESC_MAPPED       (1 << 4)
178167514Skmacy
179167514Skmacy#define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
180167514Skmacy#define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
181167514Skmacy#define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
182167514Skmacy#define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
183167514Skmacy
184167514Skmacystruct tx_sw_desc {                /* SW state per Tx descriptor */
185194521Skmacy	struct mbuf	*m;
186167514Skmacy	bus_dmamap_t	map;
187167514Skmacy	int		flags;
188167514Skmacy};
189167514Skmacy
190167514Skmacystruct rx_sw_desc {                /* SW state per Rx descriptor */
191194521Skmacy	caddr_t		rxsd_cl;
192194521Skmacy	struct mbuf	*m;
193194521Skmacy	bus_dmamap_t	map;
194194521Skmacy	int		flags;
195167514Skmacy};
196167514Skmacy
197167514Skmacystruct txq_state {
198194521Skmacy	unsigned int	compl;
199194521Skmacy	unsigned int	gen;
200194521Skmacy	unsigned int	pidx;
201167514Skmacy};
202167514Skmacy
203168351Skmacystruct refill_fl_cb_arg {
204168351Skmacy	int               error;
205168351Skmacy	bus_dma_segment_t seg;
206168351Skmacy	int               nseg;
207168351Skmacy};
208168351Skmacy
209194521Skmacy
210167514Skmacy/*
211167514Skmacy * Maps a number of flits to the number of Tx descriptors that can hold them.
212167514Skmacy * The formula is
213167514Skmacy *
214167514Skmacy * desc = 1 + (flits - 2) / (WR_FLITS - 1).
215167514Skmacy *
216167514Skmacy * HW allows up to 4 descriptors to be combined into a WR.
217167514Skmacy */
218167514Skmacystatic uint8_t flit_desc_map[] = {
219167514Skmacy	0,
220167514Skmacy#if SGE_NUM_GENBITS == 1
221167514Skmacy	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222167514Skmacy	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223167514Skmacy	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224167514Skmacy	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
225167514Skmacy#elif SGE_NUM_GENBITS == 2
226167514Skmacy	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
227167514Skmacy	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
228167514Skmacy	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
229167514Skmacy	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
230167514Skmacy#else
231167514Skmacy# error "SGE_NUM_GENBITS must be 1 or 2"
232167514Skmacy#endif
233167514Skmacy};
234167514Skmacy
235194521Skmacy#define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
236194521Skmacy#define	TXQ_TRYLOCK(qs)		mtx_trylock(&(qs)->lock)
237194521Skmacy#define	TXQ_LOCK(qs)		mtx_lock(&(qs)->lock)
238194521Skmacy#define	TXQ_UNLOCK(qs)		mtx_unlock(&(qs)->lock)
239194521Skmacy#define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
240203834Smlaier#define	TXQ_RING_NEEDS_ENQUEUE(qs)					\
241203834Smlaier	drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242194521Skmacy#define	TXQ_RING_FLUSH(qs)	drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
243194521Skmacy#define	TXQ_RING_DEQUEUE_COND(qs, func, arg)				\
244194521Skmacy	drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
245194521Skmacy#define	TXQ_RING_DEQUEUE(qs) \
246194521Skmacy	drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
247167514Skmacy
248167514Skmacyint cxgb_debug = 0;
249167514Skmacy
250167514Skmacystatic void sge_timer_cb(void *arg);
251167514Skmacystatic void sge_timer_reclaim(void *arg, int ncount);
252171335Skmacystatic void sge_txq_reclaim_handler(void *arg, int ncount);
253194521Skmacystatic void cxgb_start_locked(struct sge_qset *qs);
254167514Skmacy
255194521Skmacy/*
256194521Skmacy * XXX need to cope with bursty scheduling by looking at a wider
257194521Skmacy * window than we are now for determining the need for coalescing
258194521Skmacy *
259194521Skmacy */
260194521Skmacystatic __inline uint64_t
261194521Skmacycheck_pkt_coalesce(struct sge_qset *qs)
262194521Skmacy{
263194521Skmacy        struct adapter *sc;
264194521Skmacy        struct sge_txq *txq;
265194521Skmacy	uint8_t *fill;
266194521Skmacy
267194521Skmacy	if (__predict_false(cxgb_tx_coalesce_force))
268194521Skmacy		return (1);
269194521Skmacy	txq = &qs->txq[TXQ_ETH];
270194521Skmacy        sc = qs->port->adapter;
271194521Skmacy	fill = &sc->tunq_fill[qs->idx];
272194521Skmacy
273194521Skmacy	if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
274194521Skmacy		cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
275194521Skmacy	if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
276194521Skmacy		cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
277194521Skmacy	/*
278194521Skmacy	 * if the hardware transmit queue is more than 1/8 full
279194521Skmacy	 * we mark it as coalescing - we drop back from coalescing
280194521Skmacy	 * when we go below 1/32 full and there are no packets enqueued,
281194521Skmacy	 * this provides us with some degree of hysteresis
282194521Skmacy	 */
283194521Skmacy        if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
284194521Skmacy	    TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
285194521Skmacy                *fill = 0;
286194521Skmacy        else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
287194521Skmacy                *fill = 1;
288194521Skmacy
289194521Skmacy	return (sc->tunq_coalesce);
290194521Skmacy}
291194521Skmacy
292194521Skmacy#ifdef __LP64__
293194521Skmacystatic void
294194521Skmacyset_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
295194521Skmacy{
296194521Skmacy	uint64_t wr_hilo;
297194521Skmacy#if _BYTE_ORDER == _LITTLE_ENDIAN
298194521Skmacy	wr_hilo = wr_hi;
299194521Skmacy	wr_hilo |= (((uint64_t)wr_lo)<<32);
300194521Skmacy#else
301194521Skmacy	wr_hilo = wr_lo;
302194521Skmacy	wr_hilo |= (((uint64_t)wr_hi)<<32);
303194521Skmacy#endif
304194521Skmacy	wrp->wrh_hilo = wr_hilo;
305194521Skmacy}
306194521Skmacy#else
307194521Skmacystatic void
308194521Skmacyset_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
309194521Skmacy{
310194521Skmacy
311194521Skmacy	wrp->wrh_hi = wr_hi;
312194521Skmacy	wmb();
313194521Skmacy	wrp->wrh_lo = wr_lo;
314194521Skmacy}
315194521Skmacy#endif
316194521Skmacy
317194521Skmacystruct coalesce_info {
318194521Skmacy	int count;
319194521Skmacy	int nbytes;
320194521Skmacy};
321194521Skmacy
322194521Skmacystatic int
323194521Skmacycoalesce_check(struct mbuf *m, void *arg)
324194521Skmacy{
325194521Skmacy	struct coalesce_info *ci = arg;
326194521Skmacy	int *count = &ci->count;
327194521Skmacy	int *nbytes = &ci->nbytes;
328194521Skmacy
329194521Skmacy	if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
330194521Skmacy		(*count < 7) && (m->m_next == NULL))) {
331194521Skmacy		*count += 1;
332194521Skmacy		*nbytes += m->m_len;
333194521Skmacy		return (1);
334194521Skmacy	}
335194521Skmacy	return (0);
336194521Skmacy}
337194521Skmacy
338194521Skmacystatic struct mbuf *
339194521Skmacycxgb_dequeue(struct sge_qset *qs)
340194521Skmacy{
341194521Skmacy	struct mbuf *m, *m_head, *m_tail;
342194521Skmacy	struct coalesce_info ci;
343194521Skmacy
344194521Skmacy
345194521Skmacy	if (check_pkt_coalesce(qs) == 0)
346194521Skmacy		return TXQ_RING_DEQUEUE(qs);
347194521Skmacy
348194521Skmacy	m_head = m_tail = NULL;
349194521Skmacy	ci.count = ci.nbytes = 0;
350194521Skmacy	do {
351194521Skmacy		m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
352194521Skmacy		if (m_head == NULL) {
353194521Skmacy			m_tail = m_head = m;
354194521Skmacy		} else if (m != NULL) {
355194521Skmacy			m_tail->m_nextpkt = m;
356194521Skmacy			m_tail = m;
357194521Skmacy		}
358194521Skmacy	} while (m != NULL);
359194521Skmacy	if (ci.count > 7)
360194521Skmacy		panic("trying to coalesce %d packets in to one WR", ci.count);
361194521Skmacy	return (m_head);
362194521Skmacy}
363194521Skmacy
364167514Skmacy/**
365167514Skmacy *	reclaim_completed_tx - reclaims completed Tx descriptors
366167514Skmacy *	@adapter: the adapter
367167514Skmacy *	@q: the Tx queue to reclaim completed descriptors from
368167514Skmacy *
369167514Skmacy *	Reclaims Tx descriptors that the SGE has indicated it has processed,
370167514Skmacy *	and frees the associated buffers if possible.  Called with the Tx
371167514Skmacy *	queue's lock held.
372167514Skmacy */
373167514Skmacystatic __inline int
374194521Skmacyreclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
375167514Skmacy{
376194521Skmacy	struct sge_txq *q = &qs->txq[queue];
377174708Skmacy	int reclaim = desc_reclaimable(q);
378167514Skmacy
379194521Skmacy	if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
380194521Skmacy	    (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
381194521Skmacy		cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
382194521Skmacy
383175224Skmacy	if (reclaim < reclaim_min)
384175224Skmacy		return (0);
385194521Skmacy
386194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
387167514Skmacy	if (reclaim > 0) {
388194521Skmacy		t3_free_tx_desc(qs, reclaim, queue);
389174708Skmacy		q->cleaned += reclaim;
390174708Skmacy		q->in_use -= reclaim;
391194521Skmacy	}
392194521Skmacy	if (isset(&qs->txq_stopped, TXQ_ETH))
393194521Skmacy                clrbit(&qs->txq_stopped, TXQ_ETH);
394194521Skmacy
395174708Skmacy	return (reclaim);
396167514Skmacy}
397167514Skmacy
398167514Skmacy/**
399169978Skmacy *	should_restart_tx - are there enough resources to restart a Tx queue?
400169978Skmacy *	@q: the Tx queue
401169978Skmacy *
402169978Skmacy *	Checks if there are enough descriptors to restart a suspended Tx queue.
403169978Skmacy */
404169978Skmacystatic __inline int
405169978Skmacyshould_restart_tx(const struct sge_txq *q)
406169978Skmacy{
407169978Skmacy	unsigned int r = q->processed - q->cleaned;
408169978Skmacy
409169978Skmacy	return q->in_use - r < (q->size >> 1);
410169978Skmacy}
411169978Skmacy
412169978Skmacy/**
413167514Skmacy *	t3_sge_init - initialize SGE
414167514Skmacy *	@adap: the adapter
415167514Skmacy *	@p: the SGE parameters
416167514Skmacy *
417167514Skmacy *	Performs SGE initialization needed every time after a chip reset.
418167514Skmacy *	We do not initialize any of the queue sets here, instead the driver
419167514Skmacy *	top-level must request those individually.  We also do not enable DMA
420167514Skmacy *	here, that should be done after the queues have been set up.
421167514Skmacy */
422167514Skmacyvoid
423167514Skmacyt3_sge_init(adapter_t *adap, struct sge_params *p)
424167514Skmacy{
425167514Skmacy	u_int ctrl, ups;
426167514Skmacy
427167514Skmacy	ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
428167514Skmacy
429167514Skmacy	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
430176472Skmacy	       F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
431167514Skmacy	       V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
432167514Skmacy	       V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
433167514Skmacy#if SGE_NUM_GENBITS == 1
434167514Skmacy	ctrl |= F_EGRGENCTRL;
435167514Skmacy#endif
436167514Skmacy	if (adap->params.rev > 0) {
437167514Skmacy		if (!(adap->flags & (USING_MSIX | USING_MSI)))
438167514Skmacy			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
439167514Skmacy	}
440167514Skmacy	t3_write_reg(adap, A_SG_CONTROL, ctrl);
441167514Skmacy	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
442167514Skmacy		     V_LORCQDRBTHRSH(512));
443167514Skmacy	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
444167514Skmacy	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
445167514Skmacy		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
446176472Skmacy	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
447176472Skmacy		     adap->params.rev < T3_REV_C ? 1000 : 500);
448167514Skmacy	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
449167514Skmacy	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
450167514Skmacy	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
451167514Skmacy	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
452167514Skmacy	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
453167514Skmacy}
454167514Skmacy
455167514Skmacy
456167514Skmacy/**
457167514Skmacy *	sgl_len - calculates the size of an SGL of the given capacity
458167514Skmacy *	@n: the number of SGL entries
459167514Skmacy *
460167514Skmacy *	Calculates the number of flits needed for a scatter/gather list that
461167514Skmacy *	can hold the given number of entries.
462167514Skmacy */
463167514Skmacystatic __inline unsigned int
464167514Skmacysgl_len(unsigned int n)
465167514Skmacy{
466167514Skmacy	return ((3 * n) / 2 + (n & 1));
467167514Skmacy}
468167514Skmacy
469167514Skmacy/**
470167514Skmacy *	get_imm_packet - return the next ingress packet buffer from a response
471167514Skmacy *	@resp: the response descriptor containing the packet data
472167514Skmacy *
473167514Skmacy *	Return a packet containing the immediate data of the given response.
474167514Skmacy */
475171471Skmacystatic int
476176472Skmacyget_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
477167514Skmacy{
478174708Skmacy
479237263Snp	if (resp->rss_hdr.opcode == CPL_RX_DATA) {
480237263Snp		const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
481237263Snp		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
482237263Snp	} else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
483237263Snp		const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
484237263Snp		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
485237263Snp	} else
486237263Snp		m->m_len = IMMED_PKT_SIZE;
487176472Skmacy	m->m_ext.ext_buf = NULL;
488176472Skmacy	m->m_ext.ext_type = 0;
489237263Snp	memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
490176472Skmacy	return (0);
491167514Skmacy}
492167514Skmacy
493167514Skmacystatic __inline u_int
494167514Skmacyflits_to_desc(u_int n)
495167514Skmacy{
496167514Skmacy	return (flit_desc_map[n]);
497167514Skmacy}
498167514Skmacy
499176472Skmacy#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
500176472Skmacy		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
501176472Skmacy		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
502176472Skmacy		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
503176472Skmacy		    F_HIRCQPARITYERROR)
504176472Skmacy#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
505176472Skmacy#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
506176472Skmacy		      F_RSPQDISABLED)
507176472Skmacy
508176472Skmacy/**
509176472Skmacy *	t3_sge_err_intr_handler - SGE async event interrupt handler
510176472Skmacy *	@adapter: the adapter
511176472Skmacy *
512176472Skmacy *	Interrupt handler for SGE asynchronous (non-data) events.
513176472Skmacy */
514167514Skmacyvoid
515167514Skmacyt3_sge_err_intr_handler(adapter_t *adapter)
516167514Skmacy{
517167514Skmacy	unsigned int v, status;
518167514Skmacy
519167514Skmacy	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
520176472Skmacy	if (status & SGE_PARERR)
521176472Skmacy		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
522176472Skmacy			 status & SGE_PARERR);
523176472Skmacy	if (status & SGE_FRAMINGERR)
524176472Skmacy		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
525176472Skmacy			 status & SGE_FRAMINGERR);
526167514Skmacy	if (status & F_RSPQCREDITOVERFOW)
527167514Skmacy		CH_ALERT(adapter, "SGE response queue credit overflow\n");
528167514Skmacy
529167514Skmacy	if (status & F_RSPQDISABLED) {
530167514Skmacy		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
531167514Skmacy
532167514Skmacy		CH_ALERT(adapter,
533167514Skmacy			 "packet delivered to disabled response queue (0x%x)\n",
534167514Skmacy			 (v >> S_RSPQ0DISABLED) & 0xff);
535167514Skmacy	}
536167514Skmacy
537167514Skmacy	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
538176472Skmacy	if (status & SGE_FATALERR)
539167514Skmacy		t3_fatal_err(adapter);
540167514Skmacy}
541167514Skmacy
542167514Skmacyvoid
543167514Skmacyt3_sge_prep(adapter_t *adap, struct sge_params *p)
544167514Skmacy{
545205950Snp	int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
546167514Skmacy
547205950Snp	nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
548205950Snp	nqsets *= adap->params.nports;
549177464Skmacy
550177464Skmacy	fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
551177464Skmacy
552177464Skmacy	while (!powerof2(fl_q_size))
553177464Skmacy		fl_q_size--;
554205950Snp
555205950Snp	use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
556205950Snp	    is_offload(adap);
557205950Snp
558183059Skmacy#if __FreeBSD_version >= 700111
559205950Snp	if (use_16k) {
560177464Skmacy		jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
561205950Snp		jumbo_buf_size = MJUM16BYTES;
562205950Snp	} else {
563177464Skmacy		jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
564205950Snp		jumbo_buf_size = MJUM9BYTES;
565205950Snp	}
566177464Skmacy#else
567205950Snp	jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
568205950Snp	jumbo_buf_size = MJUMPAGESIZE;
569177464Skmacy#endif
570177464Skmacy	while (!powerof2(jumbo_q_size))
571202678Snp		jumbo_q_size--;
572202678Snp
573202678Snp	if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
574202678Snp		device_printf(adap->dev,
575202678Snp		    "Insufficient clusters and/or jumbo buffers.\n");
576202678Snp
577205950Snp	p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
578167514Skmacy
579167514Skmacy	for (i = 0; i < SGE_QSETS; ++i) {
580167514Skmacy		struct qset_params *q = p->qset + i;
581167514Skmacy
582174708Skmacy		if (adap->params.nports > 2) {
583180583Skmacy			q->coalesce_usecs = 50;
584174708Skmacy		} else {
585174708Skmacy#ifdef INVARIANTS
586180583Skmacy			q->coalesce_usecs = 10;
587174708Skmacy#else
588180583Skmacy			q->coalesce_usecs = 5;
589174708Skmacy#endif
590174708Skmacy		}
591182679Skmacy		q->polling = 0;
592167514Skmacy		q->rspq_size = RSPQ_Q_SIZE;
593177464Skmacy		q->fl_size = fl_q_size;
594177464Skmacy		q->jumbo_size = jumbo_q_size;
595205950Snp		q->jumbo_buf_size = jumbo_buf_size;
596167514Skmacy		q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
597205950Snp		q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
598205950Snp		q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
599167514Skmacy		q->cong_thres = 0;
600167514Skmacy	}
601167514Skmacy}
602167514Skmacy
603167514Skmacyint
604167514Skmacyt3_sge_alloc(adapter_t *sc)
605167514Skmacy{
606167514Skmacy
607167514Skmacy	/* The parent tag. */
608232854Sscottl	if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
609167514Skmacy				1, 0,			/* algnmnt, boundary */
610167514Skmacy				BUS_SPACE_MAXADDR,	/* lowaddr */
611167514Skmacy				BUS_SPACE_MAXADDR,	/* highaddr */
612167514Skmacy				NULL, NULL,		/* filter, filterarg */
613167514Skmacy				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
614167514Skmacy				BUS_SPACE_UNRESTRICTED, /* nsegments */
615167514Skmacy				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
616167514Skmacy				0,			/* flags */
617167514Skmacy				NULL, NULL,		/* lock, lockarg */
618167514Skmacy				&sc->parent_dmat)) {
619167514Skmacy		device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
620167514Skmacy		return (ENOMEM);
621167514Skmacy	}
622167514Skmacy
623167514Skmacy	/*
624167514Skmacy	 * DMA tag for normal sized RX frames
625167514Skmacy	 */
626167514Skmacy	if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
627167514Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
628167514Skmacy		MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
629167514Skmacy		device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
630167514Skmacy		return (ENOMEM);
631167514Skmacy	}
632167514Skmacy
633167514Skmacy	/*
634167514Skmacy	 * DMA tag for jumbo sized RX frames.
635167514Skmacy	 */
636175200Skmacy	if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
637175200Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
638167514Skmacy		BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
639167514Skmacy		device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
640167514Skmacy		return (ENOMEM);
641167514Skmacy	}
642167514Skmacy
643167514Skmacy	/*
644167514Skmacy	 * DMA tag for TX frames.
645167514Skmacy	 */
646167514Skmacy	if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
647167514Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
648167514Skmacy		TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
649167514Skmacy		NULL, NULL, &sc->tx_dmat)) {
650167514Skmacy		device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
651167514Skmacy		return (ENOMEM);
652167514Skmacy	}
653167514Skmacy
654167514Skmacy	return (0);
655167514Skmacy}
656167514Skmacy
657167514Skmacyint
658167514Skmacyt3_sge_free(struct adapter * sc)
659167514Skmacy{
660167514Skmacy
661167514Skmacy	if (sc->tx_dmat != NULL)
662167514Skmacy		bus_dma_tag_destroy(sc->tx_dmat);
663167514Skmacy
664167514Skmacy	if (sc->rx_jumbo_dmat != NULL)
665167514Skmacy		bus_dma_tag_destroy(sc->rx_jumbo_dmat);
666167514Skmacy
667167514Skmacy	if (sc->rx_dmat != NULL)
668167514Skmacy		bus_dma_tag_destroy(sc->rx_dmat);
669167514Skmacy
670167514Skmacy	if (sc->parent_dmat != NULL)
671167514Skmacy		bus_dma_tag_destroy(sc->parent_dmat);
672167514Skmacy
673167514Skmacy	return (0);
674167514Skmacy}
675167514Skmacy
676167514Skmacyvoid
677167514Skmacyt3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
678167514Skmacy{
679167514Skmacy
680180583Skmacy	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
681167514Skmacy	qs->rspq.polling = 0 /* p->polling */;
682167514Skmacy}
683167514Skmacy
684174708Skmacy#if !defined(__i386__) && !defined(__amd64__)
685168351Skmacystatic void
686168351Skmacyrefill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
687168351Skmacy{
688168351Skmacy	struct refill_fl_cb_arg *cb_arg = arg;
689168351Skmacy
690168351Skmacy	cb_arg->error = error;
691168351Skmacy	cb_arg->seg = segs[0];
692168351Skmacy	cb_arg->nseg = nseg;
693167514Skmacy
694168351Skmacy}
695174708Skmacy#endif
696167514Skmacy/**
697167514Skmacy *	refill_fl - refill an SGE free-buffer list
698167514Skmacy *	@sc: the controller softc
699167514Skmacy *	@q: the free-list to refill
700167514Skmacy *	@n: the number of new buffers to allocate
701167514Skmacy *
702167514Skmacy *	(Re)populate an SGE free-buffer list with up to @n new packet buffers.
703167514Skmacy *	The caller must assure that @n does not exceed the queue's capacity.
704167514Skmacy */
705167514Skmacystatic void
706167514Skmacyrefill_fl(adapter_t *sc, struct sge_fl *q, int n)
707167514Skmacy{
708167514Skmacy	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
709167514Skmacy	struct rx_desc *d = &q->desc[q->pidx];
710168351Skmacy	struct refill_fl_cb_arg cb_arg;
711194521Skmacy	struct mbuf *m;
712174708Skmacy	caddr_t cl;
713207688Snp	int err;
714175200Skmacy
715168351Skmacy	cb_arg.error = 0;
716167514Skmacy	while (n--) {
717168351Skmacy		/*
718237263Snp		 * We allocate an uninitialized mbuf + cluster, mbuf is
719237263Snp		 * initialized after rx.
720168351Skmacy		 */
721194521Skmacy		if (q->zone == zone_pack) {
722194521Skmacy			if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
723194521Skmacy				break;
724194521Skmacy			cl = m->m_ext.ext_buf;
725194521Skmacy		} else {
726194521Skmacy			if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
727194521Skmacy				break;
728194521Skmacy			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
729194521Skmacy				uma_zfree(q->zone, cl);
730194521Skmacy				break;
731194521Skmacy			}
732167514Skmacy		}
733167514Skmacy		if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
734168351Skmacy			if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
735167848Skmacy				log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
736168890Skmacy				uma_zfree(q->zone, cl);
737167848Skmacy				goto done;
738167848Skmacy			}
739167514Skmacy			sd->flags |= RX_SW_DESC_MAP_CREATED;
740167514Skmacy		}
741174708Skmacy#if !defined(__i386__) && !defined(__amd64__)
742174708Skmacy		err = bus_dmamap_load(q->entry_tag, sd->map,
743194521Skmacy		    cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
744167514Skmacy
745168351Skmacy		if (err != 0 || cb_arg.error) {
746194554Skmacy			if (q->zone == zone_pack)
747194521Skmacy				uma_zfree(q->zone, cl);
748194521Skmacy			m_free(m);
749194553Skmacy			goto done;
750167514Skmacy		}
751174708Skmacy#else
752194521Skmacy		cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
753174708Skmacy#endif
754168351Skmacy		sd->flags |= RX_SW_DESC_INUSE;
755174708Skmacy		sd->rxsd_cl = cl;
756194521Skmacy		sd->m = m;
757168351Skmacy		d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
758168351Skmacy		d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
759167514Skmacy		d->len_gen = htobe32(V_FLD_GEN1(q->gen));
760167514Skmacy		d->gen2 = htobe32(V_FLD_GEN2(q->gen));
761167514Skmacy
762167514Skmacy		d++;
763167514Skmacy		sd++;
764167514Skmacy
765167514Skmacy		if (++q->pidx == q->size) {
766167514Skmacy			q->pidx = 0;
767167514Skmacy			q->gen ^= 1;
768167514Skmacy			sd = q->sdesc;
769167514Skmacy			d = q->desc;
770167514Skmacy		}
771167514Skmacy		q->credits++;
772207688Snp		q->db_pending++;
773167514Skmacy	}
774167514Skmacy
775167514Skmacydone:
776207688Snp	if (q->db_pending >= 32) {
777207688Snp		q->db_pending = 0;
778176472Skmacy		t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
779207688Snp	}
780167514Skmacy}
781167514Skmacy
782167514Skmacy
783167514Skmacy/**
784167514Skmacy *	free_rx_bufs - free the Rx buffers on an SGE free list
785167514Skmacy *	@sc: the controle softc
786167514Skmacy *	@q: the SGE free list to clean up
787167514Skmacy *
788167514Skmacy *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
789167514Skmacy *	this queue should be stopped before calling this function.
790167514Skmacy */
791167514Skmacystatic void
792167514Skmacyfree_rx_bufs(adapter_t *sc, struct sge_fl *q)
793167514Skmacy{
794167514Skmacy	u_int cidx = q->cidx;
795167514Skmacy
796167514Skmacy	while (q->credits--) {
797167514Skmacy		struct rx_sw_desc *d = &q->sdesc[cidx];
798167514Skmacy
799167514Skmacy		if (d->flags & RX_SW_DESC_INUSE) {
800168351Skmacy			bus_dmamap_unload(q->entry_tag, d->map);
801168351Skmacy			bus_dmamap_destroy(q->entry_tag, d->map);
802194521Skmacy			if (q->zone == zone_pack) {
803194521Skmacy				m_init(d->m, zone_pack, MCLBYTES,
804194521Skmacy				    M_NOWAIT, MT_DATA, M_EXT);
805194521Skmacy				uma_zfree(zone_pack, d->m);
806194521Skmacy			} else {
807194521Skmacy				m_init(d->m, zone_mbuf, MLEN,
808194521Skmacy				    M_NOWAIT, MT_DATA, 0);
809194521Skmacy				uma_zfree(zone_mbuf, d->m);
810194521Skmacy				uma_zfree(q->zone, d->rxsd_cl);
811194521Skmacy			}
812167514Skmacy		}
813194521Skmacy
814174708Skmacy		d->rxsd_cl = NULL;
815194521Skmacy		d->m = NULL;
816167514Skmacy		if (++cidx == q->size)
817167514Skmacy			cidx = 0;
818167514Skmacy	}
819167514Skmacy}
820167514Skmacy
821167514Skmacystatic __inline void
822167514Skmacy__refill_fl(adapter_t *adap, struct sge_fl *fl)
823167514Skmacy{
824167514Skmacy	refill_fl(adap, fl, min(16U, fl->size - fl->credits));
825167514Skmacy}
826167514Skmacy
827174708Skmacystatic __inline void
828174708Skmacy__refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
829174708Skmacy{
830207688Snp	uint32_t reclaimable = fl->size - fl->credits;
831207688Snp
832207688Snp	if (reclaimable > 0)
833207688Snp		refill_fl(adap, fl, min(max, reclaimable));
834174708Skmacy}
835174708Skmacy
836169978Skmacy/**
837169978Skmacy *	recycle_rx_buf - recycle a receive buffer
838169978Skmacy *	@adapter: the adapter
839169978Skmacy *	@q: the SGE free list
840169978Skmacy *	@idx: index of buffer to recycle
841169978Skmacy *
842169978Skmacy *	Recycles the specified buffer on the given free list by adding it at
843169978Skmacy *	the next available slot on the list.
844169978Skmacy */
845167514Skmacystatic void
846169978Skmacyrecycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
847169978Skmacy{
848169978Skmacy	struct rx_desc *from = &q->desc[idx];
849169978Skmacy	struct rx_desc *to   = &q->desc[q->pidx];
850169978Skmacy
851169978Skmacy	q->sdesc[q->pidx] = q->sdesc[idx];
852169978Skmacy	to->addr_lo = from->addr_lo;        // already big endian
853169978Skmacy	to->addr_hi = from->addr_hi;        // likewise
854194521Skmacy	wmb();	/* necessary ? */
855169978Skmacy	to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856169978Skmacy	to->gen2 = htobe32(V_FLD_GEN2(q->gen));
857169978Skmacy	q->credits++;
858169978Skmacy
859169978Skmacy	if (++q->pidx == q->size) {
860169978Skmacy		q->pidx = 0;
861169978Skmacy		q->gen ^= 1;
862169978Skmacy	}
863169978Skmacy	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
864169978Skmacy}
865169978Skmacy
866169978Skmacystatic void
867167514Skmacyalloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868167514Skmacy{
869167514Skmacy	uint32_t *addr;
870167514Skmacy
871167514Skmacy	addr = arg;
872167514Skmacy	*addr = segs[0].ds_addr;
873167514Skmacy}
874167514Skmacy
875167514Skmacystatic int
876167514Skmacyalloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877168351Skmacy    bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878168351Skmacy    bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
879167514Skmacy{
880167514Skmacy	size_t len = nelem * elem_size;
881167514Skmacy	void *s = NULL;
882167514Skmacy	void *p = NULL;
883167514Skmacy	int err;
884167514Skmacy
885167514Skmacy	if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886167514Skmacy				      BUS_SPACE_MAXADDR_32BIT,
887167514Skmacy				      BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888167514Skmacy				      len, 0, NULL, NULL, tag)) != 0) {
889167514Skmacy		device_printf(sc->dev, "Cannot allocate descriptor tag\n");
890167514Skmacy		return (ENOMEM);
891167514Skmacy	}
892167514Skmacy
893167514Skmacy	if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
894167514Skmacy				    map)) != 0) {
895167514Skmacy		device_printf(sc->dev, "Cannot allocate descriptor memory\n");
896167514Skmacy		return (ENOMEM);
897167514Skmacy	}
898167514Skmacy
899167514Skmacy	bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
900167514Skmacy	bzero(p, len);
901167514Skmacy	*(void **)desc = p;
902167514Skmacy
903167514Skmacy	if (sw_size) {
904167514Skmacy		len = nelem * sw_size;
905175340Skmacy		s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
906167514Skmacy		*(void **)sdesc = s;
907167514Skmacy	}
908168351Skmacy	if (parent_entry_tag == NULL)
909168351Skmacy		return (0);
910168351Skmacy
911168646Skmacy	if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912168351Skmacy				      BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913168646Skmacy		                      NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914168646Skmacy				      TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915168351Skmacy		                      NULL, NULL, entry_tag)) != 0) {
916168351Skmacy		device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
917168351Skmacy		return (ENOMEM);
918168351Skmacy	}
919167514Skmacy	return (0);
920167514Skmacy}
921167514Skmacy
922167514Skmacystatic void
923167514Skmacysge_slow_intr_handler(void *arg, int ncount)
924167514Skmacy{
925167514Skmacy	adapter_t *sc = arg;
926167760Skmacy
927167514Skmacy	t3_slow_intr_handler(sc);
928209840Snp	t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
929209840Snp	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
930167514Skmacy}
931167514Skmacy
932171471Skmacy/**
933171471Skmacy *	sge_timer_cb - perform periodic maintenance of an SGE qset
934171471Skmacy *	@data: the SGE queue set to maintain
935171471Skmacy *
936171471Skmacy *	Runs periodically from a timer to perform maintenance of an SGE queue
937171471Skmacy *	set.  It performs two tasks:
938171471Skmacy *
939171471Skmacy *	a) Cleans up any completed Tx descriptors that may still be pending.
940171471Skmacy *	Normal descriptor cleanup happens when new packets are added to a Tx
941171471Skmacy *	queue so this timer is relatively infrequent and does any cleanup only
942171471Skmacy *	if the Tx queue has not seen any new packets in a while.  We make a
943171471Skmacy *	best effort attempt to reclaim descriptors, in that we don't wait
944171471Skmacy *	around if we cannot get a queue's lock (which most likely is because
945171471Skmacy *	someone else is queueing new packets and so will also handle the clean
946171471Skmacy *	up).  Since control queues use immediate data exclusively we don't
947171471Skmacy *	bother cleaning them up here.
948171471Skmacy *
949171471Skmacy *	b) Replenishes Rx queues that have run out due to memory shortage.
950171471Skmacy *	Normally new Rx buffers are added when existing ones are consumed but
951171471Skmacy *	when out of memory a queue can become empty.  We try to add only a few
952171471Skmacy *	buffers here, the queue will be replenished fully as these new buffers
953171471Skmacy *	are used up if memory shortage has subsided.
954171471Skmacy *
955171471Skmacy *	c) Return coalesced response queue credits in case a response queue is
956171471Skmacy *	starved.
957171471Skmacy *
958171471Skmacy *	d) Ring doorbells for T304 tunnel queues since we have seen doorbell
959171471Skmacy *	fifo overflows and the FW doesn't implement any recovery scheme yet.
960171471Skmacy */
961167514Skmacystatic void
962167514Skmacysge_timer_cb(void *arg)
963167514Skmacy{
964167514Skmacy	adapter_t *sc = arg;
965194521Skmacy	if ((sc->flags & USING_MSIX) == 0) {
966194521Skmacy
967194521Skmacy		struct port_info *pi;
968194521Skmacy		struct sge_qset *qs;
969194521Skmacy		struct sge_txq  *txq;
970194521Skmacy		int i, j;
971194521Skmacy		int reclaim_ofl, refill_rx;
972174708Skmacy
973194521Skmacy		if (sc->open_device_map == 0)
974194521Skmacy			return;
975194521Skmacy
976194521Skmacy		for (i = 0; i < sc->params.nports; i++) {
977194521Skmacy			pi = &sc->port[i];
978194521Skmacy			for (j = 0; j < pi->nqsets; j++) {
979194521Skmacy				qs = &sc->sge.qs[pi->first_qset + j];
980194521Skmacy				txq = &qs->txq[0];
981194521Skmacy				reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982194521Skmacy				refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983194521Skmacy				    (qs->fl[1].credits < qs->fl[1].size));
984194521Skmacy				if (reclaim_ofl || refill_rx) {
985194521Skmacy					taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
986194521Skmacy					break;
987194521Skmacy				}
988167514Skmacy			}
989167514Skmacy		}
990182882Skmacy	}
991194521Skmacy
992171471Skmacy	if (sc->params.nports > 2) {
993171471Skmacy		int i;
994171471Skmacy
995171471Skmacy		for_each_port(sc, i) {
996171471Skmacy			struct port_info *pi = &sc->port[i];
997171471Skmacy
998171471Skmacy			t3_write_reg(sc, A_SG_KDOORBELL,
999171471Skmacy				     F_SELEGRCNTX |
1000171471Skmacy				     (FW_TUNNEL_SGEEC_START + pi->first_qset));
1001171471Skmacy		}
1002171471Skmacy	}
1003194521Skmacy	if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004194521Skmacy	    sc->open_device_map != 0)
1005170869Skmacy		callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1006167514Skmacy}
1007167514Skmacy
1008167514Skmacy/*
1009167514Skmacy * This is meant to be a catch-all function to keep sge state private
1010167514Skmacy * to sge.c
1011167514Skmacy *
1012167514Skmacy */
1013167514Skmacyint
1014170654Skmacyt3_sge_init_adapter(adapter_t *sc)
1015167514Skmacy{
1016167514Skmacy	callout_init(&sc->sge_timer_ch, CALLOUT_MPSAFE);
1017167514Skmacy	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018167514Skmacy	TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1019167514Skmacy	return (0);
1020167514Skmacy}
1021167514Skmacy
1022170654Skmacyint
1023175224Skmacyt3_sge_reset_adapter(adapter_t *sc)
1024175224Skmacy{
1025175224Skmacy	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1026175224Skmacy	return (0);
1027175224Skmacy}
1028175224Skmacy
1029175224Skmacyint
1030174708Skmacyt3_sge_init_port(struct port_info *pi)
1031170654Skmacy{
1032174708Skmacy	TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1033170789Skmacy	return (0);
1034170654Skmacy}
1035170654Skmacy
1036167655Skmacy/**
1037167655Skmacy *	refill_rspq - replenish an SGE response queue
1038167655Skmacy *	@adapter: the adapter
1039167655Skmacy *	@q: the response queue to replenish
1040167655Skmacy *	@credits: how many new responses to make available
1041167655Skmacy *
1042167655Skmacy *	Replenishes a response queue by making the supplied number of responses
1043167655Skmacy *	available to HW.
1044167655Skmacy */
1045167655Skmacystatic __inline void
1046167655Skmacyrefill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1047167655Skmacy{
1048167514Skmacy
1049167655Skmacy	/* mbufs are allocated on demand when a rspq entry is processed. */
1050167655Skmacy	t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1051167655Skmacy		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1052167655Skmacy}
1053167655Skmacy
1054167514Skmacystatic void
1055171335Skmacysge_txq_reclaim_handler(void *arg, int ncount)
1056171335Skmacy{
1057194521Skmacy	struct sge_qset *qs = arg;
1058194521Skmacy	int i;
1059171335Skmacy
1060194521Skmacy	for (i = 0; i < 3; i++)
1061194521Skmacy		reclaim_completed_tx(qs, 16, i);
1062171335Skmacy}
1063171335Skmacy
1064171335Skmacystatic void
1065167514Skmacysge_timer_reclaim(void *arg, int ncount)
1066167514Skmacy{
1067174708Skmacy	struct port_info *pi = arg;
1068174708Skmacy	int i, nqsets = pi->nqsets;
1069174708Skmacy	adapter_t *sc = pi->adapter;
1070167514Skmacy	struct sge_qset *qs;
1071167514Skmacy	struct mtx *lock;
1072194521Skmacy
1073194521Skmacy	KASSERT((sc->flags & USING_MSIX) == 0,
1074194521Skmacy	    ("can't call timer reclaim for msi-x"));
1075167760Skmacy
1076167514Skmacy	for (i = 0; i < nqsets; i++) {
1077182882Skmacy		qs = &sc->sge.qs[pi->first_qset + i];
1078169978Skmacy
1079194521Skmacy		reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080167514Skmacy		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081167514Skmacy			    &sc->sge.qs[0].rspq.lock;
1082167514Skmacy
1083167514Skmacy		if (mtx_trylock(lock)) {
1084167514Skmacy			/* XXX currently assume that we are *NOT* polling */
1085167514Skmacy			uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1086167760Skmacy
1087167760Skmacy			if (qs->fl[0].credits < qs->fl[0].size - 16)
1088167514Skmacy				__refill_fl(sc, &qs->fl[0]);
1089167760Skmacy			if (qs->fl[1].credits < qs->fl[1].size - 16)
1090167514Skmacy				__refill_fl(sc, &qs->fl[1]);
1091167514Skmacy
1092167514Skmacy			if (status & (1 << qs->rspq.cntxt_id)) {
1093167514Skmacy				if (qs->rspq.credits) {
1094167514Skmacy					refill_rspq(sc, &qs->rspq, 1);
1095167514Skmacy					qs->rspq.credits--;
1096167514Skmacy					t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1097167514Skmacy					    1 << qs->rspq.cntxt_id);
1098167514Skmacy				}
1099167514Skmacy			}
1100167514Skmacy			mtx_unlock(lock);
1101167514Skmacy		}
1102167514Skmacy	}
1103167514Skmacy}
1104167514Skmacy
1105167514Skmacy/**
1106167514Skmacy *	init_qset_cntxt - initialize an SGE queue set context info
1107167514Skmacy *	@qs: the queue set
1108167514Skmacy *	@id: the queue set id
1109167514Skmacy *
1110167514Skmacy *	Initializes the TIDs and context ids for the queues of a queue set.
1111167514Skmacy */
1112167514Skmacystatic void
1113167514Skmacyinit_qset_cntxt(struct sge_qset *qs, u_int id)
1114167514Skmacy{
1115167514Skmacy
1116167514Skmacy	qs->rspq.cntxt_id = id;
1117167514Skmacy	qs->fl[0].cntxt_id = 2 * id;
1118167514Skmacy	qs->fl[1].cntxt_id = 2 * id + 1;
1119167514Skmacy	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120167514Skmacy	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121167514Skmacy	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122167514Skmacy	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123167514Skmacy	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1124174708Skmacy
1125174708Skmacy	mbufq_init(&qs->txq[TXQ_ETH].sendq);
1126174708Skmacy	mbufq_init(&qs->txq[TXQ_OFLD].sendq);
1127174708Skmacy	mbufq_init(&qs->txq[TXQ_CTRL].sendq);
1128167514Skmacy}
1129167514Skmacy
1130167514Skmacy
1131167514Skmacystatic void
1132167514Skmacytxq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1133167514Skmacy{
1134167514Skmacy	txq->in_use += ndesc;
1135167514Skmacy	/*
1136167514Skmacy	 * XXX we don't handle stopping of queue
1137167514Skmacy	 * presumably start handles this when we bump against the end
1138167514Skmacy	 */
1139167514Skmacy	txqs->gen = txq->gen;
1140167514Skmacy	txq->unacked += ndesc;
1141176472Skmacy	txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1142176472Skmacy	txq->unacked &= 31;
1143167514Skmacy	txqs->pidx = txq->pidx;
1144167514Skmacy	txq->pidx += ndesc;
1145175347Skmacy#ifdef INVARIANTS
1146175347Skmacy	if (((txqs->pidx > txq->cidx) &&
1147175347Skmacy		(txq->pidx < txqs->pidx) &&
1148175347Skmacy		(txq->pidx >= txq->cidx)) ||
1149175347Skmacy	    ((txqs->pidx < txq->cidx) &&
1150175347Skmacy		(txq->pidx >= txq-> cidx)) ||
1151175347Skmacy	    ((txqs->pidx < txq->cidx) &&
1152175347Skmacy		(txq->cidx < txqs->pidx)))
1153175347Skmacy		panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1154175347Skmacy		    txqs->pidx, txq->pidx, txq->cidx);
1155175347Skmacy#endif
1156167514Skmacy	if (txq->pidx >= txq->size) {
1157167514Skmacy		txq->pidx -= txq->size;
1158167514Skmacy		txq->gen ^= 1;
1159167514Skmacy	}
1160167514Skmacy
1161167514Skmacy}
1162167514Skmacy
1163167514Skmacy/**
1164167514Skmacy *	calc_tx_descs - calculate the number of Tx descriptors for a packet
1165167514Skmacy *	@m: the packet mbufs
1166167514Skmacy *      @nsegs: the number of segments
1167167514Skmacy *
1168167514Skmacy * 	Returns the number of Tx descriptors needed for the given Ethernet
1169167514Skmacy * 	packet.  Ethernet packets require addition of WR and CPL headers.
1170167514Skmacy */
1171167514Skmacystatic __inline unsigned int
1172167514Skmacycalc_tx_descs(const struct mbuf *m, int nsegs)
1173167514Skmacy{
1174167514Skmacy	unsigned int flits;
1175167514Skmacy
1176194521Skmacy	if (m->m_pkthdr.len <= PIO_LEN)
1177167514Skmacy		return 1;
1178167514Skmacy
1179167514Skmacy	flits = sgl_len(nsegs) + 2;
1180174708Skmacy	if (m->m_pkthdr.csum_flags & CSUM_TSO)
1181167514Skmacy		flits++;
1182204274Snp
1183167514Skmacy	return flits_to_desc(flits);
1184167514Skmacy}
1185167514Skmacy
1186167514Skmacy/**
1187167514Skmacy *	make_sgl - populate a scatter/gather list for a packet
1188167514Skmacy *	@sgp: the SGL to populate
1189167514Skmacy *	@segs: the packet dma segments
1190167514Skmacy *	@nsegs: the number of segments
1191167514Skmacy *
1192167514Skmacy *	Generates a scatter/gather list for the buffers that make up a packet
1193167514Skmacy *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1194167514Skmacy *	appropriately.
1195167514Skmacy */
1196167514Skmacystatic __inline void
1197167514Skmacymake_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1198167514Skmacy{
1199167514Skmacy	int i, idx;
1200167514Skmacy
1201174708Skmacy	for (idx = 0, i = 0; i < nsegs; i++) {
1202174708Skmacy		/*
1203174708Skmacy		 * firmware doesn't like empty segments
1204174708Skmacy		 */
1205174708Skmacy		if (segs[i].ds_len == 0)
1206174708Skmacy			continue;
1207167514Skmacy		if (i && idx == 0)
1208167514Skmacy			++sgp;
1209174708Skmacy
1210167514Skmacy		sgp->len[idx] = htobe32(segs[i].ds_len);
1211167514Skmacy		sgp->addr[idx] = htobe64(segs[i].ds_addr);
1212174708Skmacy		idx ^= 1;
1213167514Skmacy	}
1214167514Skmacy
1215175200Skmacy	if (idx) {
1216167514Skmacy		sgp->len[idx] = 0;
1217175200Skmacy		sgp->addr[idx] = 0;
1218175200Skmacy	}
1219167514Skmacy}
1220167514Skmacy
1221167514Skmacy/**
1222167514Skmacy *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1223167514Skmacy *	@adap: the adapter
1224167514Skmacy *	@q: the Tx queue
1225167514Skmacy *
1226194521Skmacy *	Ring the doorbell if a Tx queue is asleep.  There is a natural race,
1227167514Skmacy *	where the HW is going to sleep just after we checked, however,
1228167514Skmacy *	then the interrupt handler will detect the outstanding TX packet
1229167514Skmacy *	and ring the doorbell for us.
1230167514Skmacy *
1231167514Skmacy *	When GTS is disabled we unconditionally ring the doorbell.
1232167514Skmacy */
1233167514Skmacystatic __inline void
1234207688Snpcheck_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1235167514Skmacy{
1236167514Skmacy#if USE_GTS
1237167514Skmacy	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1238167514Skmacy	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1239167514Skmacy		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1240167514Skmacy#ifdef T3_TRACE
1241167514Skmacy		T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1242167514Skmacy			  q->cntxt_id);
1243167514Skmacy#endif
1244167514Skmacy		t3_write_reg(adap, A_SG_KDOORBELL,
1245167514Skmacy			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1246167514Skmacy	}
1247167514Skmacy#else
1248207688Snp	if (mustring || ++q->db_pending >= 32) {
1249207688Snp		wmb();            /* write descriptors before telling HW */
1250207688Snp		t3_write_reg(adap, A_SG_KDOORBELL,
1251207688Snp		    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1252207688Snp		q->db_pending = 0;
1253207688Snp	}
1254167514Skmacy#endif
1255167514Skmacy}
1256167514Skmacy
1257167514Skmacystatic __inline void
1258167514Skmacywr_gen2(struct tx_desc *d, unsigned int gen)
1259167514Skmacy{
1260167514Skmacy#if SGE_NUM_GENBITS == 2
1261167514Skmacy	d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1262167514Skmacy#endif
1263167514Skmacy}
1264167514Skmacy
1265169978Skmacy/**
1266169978Skmacy *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1267169978Skmacy *	@ndesc: number of Tx descriptors spanned by the SGL
1268169978Skmacy *	@txd: first Tx descriptor to be written
1269169978Skmacy *	@txqs: txq state (generation and producer index)
1270169978Skmacy *	@txq: the SGE Tx queue
1271169978Skmacy *	@sgl: the SGL
1272169978Skmacy *	@flits: number of flits to the start of the SGL in the first descriptor
1273169978Skmacy *	@sgl_flits: the SGL size in flits
1274169978Skmacy *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1275169978Skmacy *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1276169978Skmacy *
1277169978Skmacy *	Write a work request header and an associated SGL.  If the SGL is
1278169978Skmacy *	small enough to fit into one Tx descriptor it has already been written
1279169978Skmacy *	and we just need to write the WR header.  Otherwise we distribute the
1280169978Skmacy *	SGL across the number of descriptors it spans.
1281169978Skmacy */
1282169978Skmacystatic void
1283169978Skmacywrite_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1284169978Skmacy    const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1285169978Skmacy    unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1286169978Skmacy{
1287169978Skmacy
1288169978Skmacy	struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1289169978Skmacy	struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1290169978Skmacy
1291169978Skmacy	if (__predict_true(ndesc == 1)) {
1292194521Skmacy		set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1293237263Snp		    V_WR_SGLSFLT(flits)) | wr_hi,
1294237263Snp		    htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1295237263Snp		    wr_lo);
1296237263Snp
1297169978Skmacy		wr_gen2(txd, txqs->gen);
1298174708Skmacy
1299169978Skmacy	} else {
1300169978Skmacy		unsigned int ogen = txqs->gen;
1301169978Skmacy		const uint64_t *fp = (const uint64_t *)sgl;
1302169978Skmacy		struct work_request_hdr *wp = wrp;
1303169978Skmacy
1304194521Skmacy		wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1305169978Skmacy		    V_WR_SGLSFLT(flits)) | wr_hi;
1306169978Skmacy
1307169978Skmacy		while (sgl_flits) {
1308169978Skmacy			unsigned int avail = WR_FLITS - flits;
1309169978Skmacy
1310169978Skmacy			if (avail > sgl_flits)
1311169978Skmacy				avail = sgl_flits;
1312169978Skmacy			memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1313169978Skmacy			sgl_flits -= avail;
1314169978Skmacy			ndesc--;
1315169978Skmacy			if (!sgl_flits)
1316169978Skmacy				break;
1317169978Skmacy
1318169978Skmacy			fp += avail;
1319169978Skmacy			txd++;
1320169978Skmacy			txsd++;
1321169978Skmacy			if (++txqs->pidx == txq->size) {
1322169978Skmacy				txqs->pidx = 0;
1323169978Skmacy				txqs->gen ^= 1;
1324169978Skmacy				txd = txq->desc;
1325169978Skmacy				txsd = txq->sdesc;
1326169978Skmacy			}
1327194521Skmacy
1328169978Skmacy			/*
1329169978Skmacy			 * when the head of the mbuf chain
1330169978Skmacy			 * is freed all clusters will be freed
1331169978Skmacy			 * with it
1332169978Skmacy			 */
1333169978Skmacy			wrp = (struct work_request_hdr *)txd;
1334194521Skmacy			wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1335169978Skmacy			    V_WR_SGLSFLT(1)) | wr_hi;
1336194521Skmacy			wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1337169978Skmacy				    sgl_flits + 1)) |
1338169978Skmacy			    V_WR_GEN(txqs->gen)) | wr_lo;
1339169978Skmacy			wr_gen2(txd, txqs->gen);
1340169978Skmacy			flits = 1;
1341169978Skmacy		}
1342194521Skmacy		wrp->wrh_hi |= htonl(F_WR_EOP);
1343169978Skmacy		wmb();
1344194521Skmacy		wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1345169978Skmacy		wr_gen2((struct tx_desc *)wp, ogen);
1346169978Skmacy	}
1347169978Skmacy}
1348169978Skmacy
1349204348Snp/* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1350204348Snp#define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1351167514Skmacy
1352174708Skmacy#define GET_VTAG(cntrl, m) \
1353174708Skmacydo { \
1354174708Skmacy	if ((m)->m_flags & M_VLANTAG)					            \
1355174708Skmacy		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1356174708Skmacy} while (0)
1357174708Skmacy
1358194521Skmacystatic int
1359194521Skmacyt3_encap(struct sge_qset *qs, struct mbuf **m)
1360167514Skmacy{
1361167514Skmacy	adapter_t *sc;
1362167514Skmacy	struct mbuf *m0;
1363167514Skmacy	struct sge_txq *txq;
1364167514Skmacy	struct txq_state txqs;
1365174708Skmacy	struct port_info *pi;
1366171868Skmacy	unsigned int ndesc, flits, cntrl, mlen;
1367172096Skmacy	int err, nsegs, tso_info = 0;
1368167514Skmacy
1369167514Skmacy	struct work_request_hdr *wrp;
1370167514Skmacy	struct tx_sw_desc *txsd;
1371174708Skmacy	struct sg_ent *sgp, *sgl;
1372167514Skmacy	uint32_t wr_hi, wr_lo, sgl_flits;
1373175347Skmacy	bus_dma_segment_t segs[TX_MAX_SEGS];
1374167514Skmacy
1375167514Skmacy	struct tx_desc *txd;
1376174708Skmacy
1377174708Skmacy	pi = qs->port;
1378174708Skmacy	sc = pi->adapter;
1379167514Skmacy	txq = &qs->txq[TXQ_ETH];
1380175347Skmacy	txd = &txq->desc[txq->pidx];
1381174708Skmacy	txsd = &txq->sdesc[txq->pidx];
1382174708Skmacy	sgl = txq->txq_sgl;
1383194521Skmacy
1384194521Skmacy	prefetch(txd);
1385174708Skmacy	m0 = *m;
1386204348Snp
1387194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
1388174708Skmacy	cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1389194521Skmacy	KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1390194521Skmacy
1391194521Skmacy	if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1392194521Skmacy	    m0->m_pkthdr.csum_flags & (CSUM_TSO))
1393168644Skmacy		tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1394204274Snp
1395194521Skmacy	if (m0->m_nextpkt != NULL) {
1396195006Snp		busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1397194521Skmacy		ndesc = 1;
1398194521Skmacy		mlen = 0;
1399194521Skmacy	} else {
1400195006Snp		if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1401195006Snp		    &m0, segs, &nsegs))) {
1402194521Skmacy			if (cxgb_debug)
1403194521Skmacy				printf("failed ... err=%d\n", err);
1404174708Skmacy			return (err);
1405194521Skmacy		}
1406194521Skmacy		mlen = m0->m_pkthdr.len;
1407194521Skmacy		ndesc = calc_tx_descs(m0, nsegs);
1408194521Skmacy	}
1409194521Skmacy	txq_prod(txq, ndesc, &txqs);
1410174708Skmacy
1411194521Skmacy	KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1412194521Skmacy	txsd->m = m0;
1413194521Skmacy
1414194521Skmacy	if (m0->m_nextpkt != NULL) {
1415174708Skmacy		struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1416174708Skmacy		int i, fidx;
1417174708Skmacy
1418194521Skmacy		if (nsegs > 7)
1419194521Skmacy			panic("trying to coalesce %d packets in to one WR", nsegs);
1420194521Skmacy		txq->txq_coalesced += nsegs;
1421174708Skmacy		wrp = (struct work_request_hdr *)txd;
1422194521Skmacy		flits = nsegs*2 + 1;
1423174708Skmacy
1424194521Skmacy		for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1425194521Skmacy			struct cpl_tx_pkt_batch_entry *cbe;
1426194521Skmacy			uint64_t flit;
1427194521Skmacy			uint32_t *hflit = (uint32_t *)&flit;
1428194521Skmacy			int cflags = m0->m_pkthdr.csum_flags;
1429174708Skmacy
1430174708Skmacy			cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1431194521Skmacy			GET_VTAG(cntrl, m0);
1432174708Skmacy			cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1433194521Skmacy			if (__predict_false(!(cflags & CSUM_IP)))
1434180583Skmacy				cntrl |= F_TXPKT_IPCSUM_DIS;
1435194521Skmacy			if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP))))
1436180583Skmacy				cntrl |= F_TXPKT_L4CSUM_DIS;
1437194521Skmacy
1438194521Skmacy			hflit[0] = htonl(cntrl);
1439194521Skmacy			hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1440194521Skmacy			flit |= htobe64(1 << 24);
1441194521Skmacy			cbe = &cpl_batch->pkt_entry[i];
1442194521Skmacy			cbe->cntrl = hflit[0];
1443194521Skmacy			cbe->len = hflit[1];
1444174708Skmacy			cbe->addr = htobe64(segs[i].ds_addr);
1445174708Skmacy		}
1446174708Skmacy
1447194521Skmacy		wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1448194521Skmacy		    V_WR_SGLSFLT(flits)) |
1449194521Skmacy		    htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1450194521Skmacy		wr_lo = htonl(V_WR_LEN(flits) |
1451194521Skmacy		    V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1452194521Skmacy		set_wr_hdr(wrp, wr_hi, wr_lo);
1453174708Skmacy		wmb();
1454204271Snp		ETHER_BPF_MTAP(pi->ifp, m0);
1455174708Skmacy		wr_gen2(txd, txqs.gen);
1456207688Snp		check_ring_tx_db(sc, txq, 0);
1457174708Skmacy		return (0);
1458174708Skmacy	} else if (tso_info) {
1459231317Snp		uint16_t eth_type;
1460174708Skmacy		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1461204348Snp		struct ether_header *eh;
1462231317Snp		void *l3hdr;
1463167514Skmacy		struct tcphdr *tcp;
1464174708Skmacy
1465167514Skmacy		txd->flit[2] = 0;
1466180583Skmacy		GET_VTAG(cntrl, m0);
1467167514Skmacy		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1468167514Skmacy		hdr->cntrl = htonl(cntrl);
1469174708Skmacy		hdr->len = htonl(mlen | 0x80000000);
1470167514Skmacy
1471204348Snp		if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1472180583Skmacy			printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%#x,flags=%#x",
1473181653Skmacy			    m0, mlen, m0->m_pkthdr.tso_segsz,
1474181653Skmacy			    m0->m_pkthdr.csum_flags, m0->m_flags);
1475181653Skmacy			panic("tx tso packet too small");
1476181653Skmacy		}
1477174708Skmacy
1478181653Skmacy		/* Make sure that ether, ip, tcp headers are all in m0 */
1479204348Snp		if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1480204348Snp			m0 = m_pullup(m0, TCPPKTHDRSIZE);
1481181653Skmacy			if (__predict_false(m0 == NULL)) {
1482181653Skmacy				/* XXX panic probably an overreaction */
1483181653Skmacy				panic("couldn't fit header into mbuf");
1484181653Skmacy			}
1485181653Skmacy		}
1486181653Skmacy
1487204348Snp		eh = mtod(m0, struct ether_header *);
1488231317Snp		eth_type = eh->ether_type;
1489231317Snp		if (eth_type == htons(ETHERTYPE_VLAN)) {
1490231317Snp			struct ether_vlan_header *evh = (void *)eh;
1491231317Snp
1492231317Snp			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1493231317Snp			l3hdr = evh + 1;
1494231317Snp			eth_type = evh->evl_proto;
1495167514Skmacy		} else {
1496231317Snp			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1497231317Snp			l3hdr = eh + 1;
1498167514Skmacy		}
1499168737Skmacy
1500231317Snp		if (eth_type == htons(ETHERTYPE_IP)) {
1501231317Snp			struct ip *ip = l3hdr;
1502231317Snp
1503231317Snp			tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1504231317Snp			tcp = (struct tcphdr *)(ip + 1);
1505231317Snp		} else if (eth_type == htons(ETHERTYPE_IPV6)) {
1506231317Snp			struct ip6_hdr *ip6 = l3hdr;
1507231317Snp
1508231317Snp			KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1509231317Snp			    ("%s: CSUM_TSO with ip6_nxt %d",
1510231317Snp			    __func__, ip6->ip6_nxt));
1511231317Snp
1512231317Snp			tso_info |= F_LSO_IPV6;
1513231317Snp			tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1514231317Snp			tcp = (struct tcphdr *)(ip6 + 1);
1515231317Snp		} else
1516231317Snp			panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1517231317Snp
1518231317Snp		tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1519167514Skmacy		hdr->lso_info = htonl(tso_info);
1520180583Skmacy
1521180583Skmacy		if (__predict_false(mlen <= PIO_LEN)) {
1522204348Snp			/*
1523204348Snp			 * pkt not undersized but fits in PIO_LEN
1524183062Skmacy			 * Indicates a TSO bug at the higher levels.
1525183059Skmacy			 */
1526194521Skmacy			txsd->m = NULL;
1527180583Skmacy			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1528180583Skmacy			flits = (mlen + 7) / 8 + 3;
1529194521Skmacy			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1530180583Skmacy					  V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1531180583Skmacy					  F_WR_SOP | F_WR_EOP | txqs.compl);
1532194521Skmacy			wr_lo = htonl(V_WR_LEN(flits) |
1533194521Skmacy			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1534194521Skmacy			set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1535180583Skmacy			wmb();
1536204271Snp			ETHER_BPF_MTAP(pi->ifp, m0);
1537180583Skmacy			wr_gen2(txd, txqs.gen);
1538207688Snp			check_ring_tx_db(sc, txq, 0);
1539204271Snp			m_freem(m0);
1540180583Skmacy			return (0);
1541180583Skmacy		}
1542167514Skmacy		flits = 3;
1543167514Skmacy	} else {
1544174708Skmacy		struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1545194521Skmacy
1546174708Skmacy		GET_VTAG(cntrl, m0);
1547167514Skmacy		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1548180583Skmacy		if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1549180583Skmacy			cntrl |= F_TXPKT_IPCSUM_DIS;
1550180583Skmacy		if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP))))
1551180583Skmacy			cntrl |= F_TXPKT_L4CSUM_DIS;
1552167514Skmacy		cpl->cntrl = htonl(cntrl);
1553174708Skmacy		cpl->len = htonl(mlen | 0x80000000);
1554174708Skmacy
1555175340Skmacy		if (mlen <= PIO_LEN) {
1556194521Skmacy			txsd->m = NULL;
1557175340Skmacy			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1558167514Skmacy			flits = (mlen + 7) / 8 + 2;
1559194521Skmacy
1560194521Skmacy			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1561194521Skmacy			    V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1562167514Skmacy					  F_WR_SOP | F_WR_EOP | txqs.compl);
1563194521Skmacy			wr_lo = htonl(V_WR_LEN(flits) |
1564194521Skmacy			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1565194521Skmacy			set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1566167514Skmacy			wmb();
1567204271Snp			ETHER_BPF_MTAP(pi->ifp, m0);
1568167514Skmacy			wr_gen2(txd, txqs.gen);
1569207688Snp			check_ring_tx_db(sc, txq, 0);
1570204271Snp			m_freem(m0);
1571167514Skmacy			return (0);
1572167514Skmacy		}
1573167514Skmacy		flits = 2;
1574167514Skmacy	}
1575174708Skmacy	wrp = (struct work_request_hdr *)txd;
1576169978Skmacy	sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1577167514Skmacy	make_sgl(sgp, segs, nsegs);
1578167514Skmacy
1579167514Skmacy	sgl_flits = sgl_len(nsegs);
1580167514Skmacy
1581204271Snp	ETHER_BPF_MTAP(pi->ifp, m0);
1582204271Snp
1583194521Skmacy	KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1584167514Skmacy	wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1585167514Skmacy	wr_lo = htonl(V_WR_TID(txq->token));
1586194521Skmacy	write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1587194521Skmacy	    sgl_flits, wr_hi, wr_lo);
1588207688Snp	check_ring_tx_db(sc, txq, 0);
1589167514Skmacy
1590194521Skmacy	return (0);
1591194521Skmacy}
1592194521Skmacy
1593194521Skmacyvoid
1594194521Skmacycxgb_tx_watchdog(void *arg)
1595194521Skmacy{
1596194521Skmacy	struct sge_qset *qs = arg;
1597194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1598194521Skmacy
1599194521Skmacy        if (qs->coalescing != 0 &&
1600194521Skmacy	    (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1601194521Skmacy	    TXQ_RING_EMPTY(qs))
1602194521Skmacy                qs->coalescing = 0;
1603194521Skmacy        else if (qs->coalescing == 0 &&
1604194521Skmacy	    (txq->in_use >= cxgb_tx_coalesce_enable_start))
1605194521Skmacy                qs->coalescing = 1;
1606194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1607194521Skmacy		qs->qs_flags |= QS_FLUSHING;
1608194521Skmacy		cxgb_start_locked(qs);
1609194521Skmacy		qs->qs_flags &= ~QS_FLUSHING;
1610194521Skmacy		TXQ_UNLOCK(qs);
1611174708Skmacy	}
1612194521Skmacy	if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1613194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1614194521Skmacy		    qs, txq->txq_watchdog.c_cpu);
1615194521Skmacy}
1616194521Skmacy
1617194521Skmacystatic void
1618194521Skmacycxgb_tx_timeout(void *arg)
1619194521Skmacy{
1620194521Skmacy	struct sge_qset *qs = arg;
1621194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1622194521Skmacy
1623194521Skmacy	if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1624194521Skmacy                qs->coalescing = 1;
1625194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1626194521Skmacy		qs->qs_flags |= QS_TIMEOUT;
1627194521Skmacy		cxgb_start_locked(qs);
1628194521Skmacy		qs->qs_flags &= ~QS_TIMEOUT;
1629194521Skmacy		TXQ_UNLOCK(qs);
1630194521Skmacy	}
1631194521Skmacy}
1632194521Skmacy
1633194521Skmacystatic void
1634194521Skmacycxgb_start_locked(struct sge_qset *qs)
1635194521Skmacy{
1636194521Skmacy	struct mbuf *m_head = NULL;
1637194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1638194521Skmacy	struct port_info *pi = qs->port;
1639194521Skmacy	struct ifnet *ifp = pi->ifp;
1640194521Skmacy
1641194521Skmacy	if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1642194521Skmacy		reclaim_completed_tx(qs, 0, TXQ_ETH);
1643194521Skmacy
1644194521Skmacy	if (!pi->link_config.link_ok) {
1645194521Skmacy		TXQ_RING_FLUSH(qs);
1646194521Skmacy		return;
1647194521Skmacy	}
1648194521Skmacy	TXQ_LOCK_ASSERT(qs);
1649207688Snp	while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1650194521Skmacy	    pi->link_config.link_ok) {
1651194521Skmacy		reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1652194521Skmacy
1653205949Snp		if (txq->size - txq->in_use <= TX_MAX_DESC)
1654205949Snp			break;
1655205949Snp
1656194521Skmacy		if ((m_head = cxgb_dequeue(qs)) == NULL)
1657194521Skmacy			break;
1658194521Skmacy		/*
1659194521Skmacy		 *  Encapsulation can modify our pointer, and or make it
1660194521Skmacy		 *  NULL on failure.  In that event, we can't requeue.
1661194521Skmacy		 */
1662194521Skmacy		if (t3_encap(qs, &m_head) || m_head == NULL)
1663194521Skmacy			break;
1664194521Skmacy
1665194521Skmacy		m_head = NULL;
1666194521Skmacy	}
1667207688Snp
1668207688Snp	if (txq->db_pending)
1669207688Snp		check_ring_tx_db(pi->adapter, txq, 1);
1670207688Snp
1671194521Skmacy	if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1672194521Skmacy	    pi->link_config.link_ok)
1673194521Skmacy		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1674194521Skmacy		    qs, txq->txq_timer.c_cpu);
1675194521Skmacy	if (m_head != NULL)
1676194521Skmacy		m_freem(m_head);
1677194521Skmacy}
1678194521Skmacy
1679194521Skmacystatic int
1680194521Skmacycxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1681194521Skmacy{
1682194521Skmacy	struct port_info *pi = qs->port;
1683194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1684194521Skmacy	struct buf_ring *br = txq->txq_mr;
1685194521Skmacy	int error, avail;
1686194521Skmacy
1687194521Skmacy	avail = txq->size - txq->in_use;
1688194521Skmacy	TXQ_LOCK_ASSERT(qs);
1689194521Skmacy
1690194521Skmacy	/*
1691194521Skmacy	 * We can only do a direct transmit if the following are true:
1692194521Skmacy	 * - we aren't coalescing (ring < 3/4 full)
1693194521Skmacy	 * - the link is up -- checked in caller
1694194521Skmacy	 * - there are no packets enqueued already
1695194521Skmacy	 * - there is space in hardware transmit queue
1696194521Skmacy	 */
1697194521Skmacy	if (check_pkt_coalesce(qs) == 0 &&
1698205949Snp	    !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1699194521Skmacy		if (t3_encap(qs, &m)) {
1700194521Skmacy			if (m != NULL &&
1701194521Skmacy			    (error = drbr_enqueue(ifp, br, m)) != 0)
1702194521Skmacy				return (error);
1703194521Skmacy		} else {
1704207688Snp			if (txq->db_pending)
1705207688Snp				check_ring_tx_db(pi->adapter, txq, 1);
1706207688Snp
1707194521Skmacy			/*
1708194521Skmacy			 * We've bypassed the buf ring so we need to update
1709194521Skmacy			 * the stats directly
1710194521Skmacy			 */
1711194521Skmacy			txq->txq_direct_packets++;
1712194521Skmacy			txq->txq_direct_bytes += m->m_pkthdr.len;
1713194521Skmacy		}
1714194521Skmacy	} else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1715194521Skmacy		return (error);
1716194521Skmacy
1717194521Skmacy	reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1718194521Skmacy	if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1719194521Skmacy	    (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1720194521Skmacy		cxgb_start_locked(qs);
1721194521Skmacy	else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1722194521Skmacy		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1723194521Skmacy		    qs, txq->txq_timer.c_cpu);
1724167514Skmacy	return (0);
1725167514Skmacy}
1726167514Skmacy
1727194521Skmacyint
1728194521Skmacycxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1729194521Skmacy{
1730194521Skmacy	struct sge_qset *qs;
1731194521Skmacy	struct port_info *pi = ifp->if_softc;
1732194521Skmacy	int error, qidx = pi->first_qset;
1733167514Skmacy
1734194521Skmacy	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1735194521Skmacy	    ||(!pi->link_config.link_ok)) {
1736194521Skmacy		m_freem(m);
1737194521Skmacy		return (0);
1738194521Skmacy	}
1739194521Skmacy
1740194521Skmacy	if (m->m_flags & M_FLOWID)
1741194521Skmacy		qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1742194521Skmacy
1743194521Skmacy	qs = &pi->adapter->sge.qs[qidx];
1744194521Skmacy
1745194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1746194521Skmacy		/* XXX running */
1747194521Skmacy		error = cxgb_transmit_locked(ifp, qs, m);
1748194521Skmacy		TXQ_UNLOCK(qs);
1749194521Skmacy	} else
1750194521Skmacy		error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1751194521Skmacy	return (error);
1752194521Skmacy}
1753194521Skmacy
1754194521Skmacyvoid
1755194521Skmacycxgb_qflush(struct ifnet *ifp)
1756194521Skmacy{
1757194521Skmacy	/*
1758194521Skmacy	 * flush any enqueued mbufs in the buf_rings
1759194521Skmacy	 * and in the transmit queues
1760194521Skmacy	 * no-op for now
1761194521Skmacy	 */
1762194521Skmacy	return;
1763194521Skmacy}
1764194521Skmacy
1765167514Skmacy/**
1766167514Skmacy *	write_imm - write a packet into a Tx descriptor as immediate data
1767167514Skmacy *	@d: the Tx descriptor to write
1768167514Skmacy *	@m: the packet
1769167514Skmacy *	@len: the length of packet data to write as immediate data
1770167514Skmacy *	@gen: the generation bit value to write
1771167514Skmacy *
1772167514Skmacy *	Writes a packet as immediate data into a Tx descriptor.  The packet
1773167514Skmacy *	contains a work request at its beginning.  We must write the packet
1774167514Skmacy *	carefully so the SGE doesn't read accidentally before it's written in
1775167514Skmacy *	its entirety.
1776167514Skmacy */
1777169978Skmacystatic __inline void
1778237263Snpwrite_imm(struct tx_desc *d, caddr_t src,
1779169978Skmacy	  unsigned int len, unsigned int gen)
1780167514Skmacy{
1781237263Snp	struct work_request_hdr *from = (struct work_request_hdr *)src;
1782167514Skmacy	struct work_request_hdr *to = (struct work_request_hdr *)d;
1783194521Skmacy	uint32_t wr_hi, wr_lo;
1784167514Skmacy
1785237263Snp	KASSERT(len <= WR_LEN && len >= sizeof(*from),
1786237263Snp	    ("%s: invalid len %d", __func__, len));
1787174708Skmacy
1788167514Skmacy	memcpy(&to[1], &from[1], len - sizeof(*from));
1789194521Skmacy	wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1790237263Snp	    V_WR_BCNTLFLT(len & 7));
1791237263Snp	wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1792194521Skmacy	set_wr_hdr(to, wr_hi, wr_lo);
1793167514Skmacy	wmb();
1794167514Skmacy	wr_gen2(d, gen);
1795167514Skmacy}
1796167514Skmacy
1797167514Skmacy/**
1798167514Skmacy *	check_desc_avail - check descriptor availability on a send queue
1799167514Skmacy *	@adap: the adapter
1800167514Skmacy *	@q: the TX queue
1801167514Skmacy *	@m: the packet needing the descriptors
1802167514Skmacy *	@ndesc: the number of Tx descriptors needed
1803167514Skmacy *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1804167514Skmacy *
1805167514Skmacy *	Checks if the requested number of Tx descriptors is available on an
1806167514Skmacy *	SGE send queue.  If the queue is already suspended or not enough
1807167514Skmacy *	descriptors are available the packet is queued for later transmission.
1808167514Skmacy *	Must be called with the Tx queue locked.
1809167514Skmacy *
1810167514Skmacy *	Returns 0 if enough descriptors are available, 1 if there aren't
1811167514Skmacy *	enough descriptors and the packet has been queued, and 2 if the caller
1812167514Skmacy *	needs to retry because there weren't enough descriptors at the
1813167514Skmacy *	beginning of the call but some freed up in the mean time.
1814167514Skmacy */
1815167514Skmacystatic __inline int
1816167514Skmacycheck_desc_avail(adapter_t *adap, struct sge_txq *q,
1817169978Skmacy		 struct mbuf *m, unsigned int ndesc,
1818169978Skmacy		 unsigned int qid)
1819167514Skmacy{
1820167514Skmacy	/*
1821167514Skmacy	 * XXX We currently only use this for checking the control queue
1822167514Skmacy	 * the control queue is only used for binding qsets which happens
1823167514Skmacy	 * at init time so we are guaranteed enough descriptors
1824167514Skmacy	 */
1825169978Skmacy	if (__predict_false(!mbufq_empty(&q->sendq))) {
1826169978Skmacyaddq_exit:	mbufq_tail(&q->sendq, m);
1827167514Skmacy		return 1;
1828167514Skmacy	}
1829167514Skmacy	if (__predict_false(q->size - q->in_use < ndesc)) {
1830167514Skmacy
1831167514Skmacy		struct sge_qset *qs = txq_to_qset(q, qid);
1832167514Skmacy
1833169978Skmacy		setbit(&qs->txq_stopped, qid);
1834167514Skmacy		if (should_restart_tx(q) &&
1835167514Skmacy		    test_and_clear_bit(qid, &qs->txq_stopped))
1836167514Skmacy			return 2;
1837167514Skmacy
1838167514Skmacy		q->stops++;
1839167514Skmacy		goto addq_exit;
1840167514Skmacy	}
1841167514Skmacy	return 0;
1842167514Skmacy}
1843167514Skmacy
1844167514Skmacy
1845167514Skmacy/**
1846167514Skmacy *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1847167514Skmacy *	@q: the SGE control Tx queue
1848167514Skmacy *
1849167514Skmacy *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1850167514Skmacy *	that send only immediate data (presently just the control queues) and
1851169978Skmacy *	thus do not have any mbufs
1852167514Skmacy */
1853167514Skmacystatic __inline void
1854167514Skmacyreclaim_completed_tx_imm(struct sge_txq *q)
1855167514Skmacy{
1856167514Skmacy	unsigned int reclaim = q->processed - q->cleaned;
1857167514Skmacy
1858167514Skmacy	q->in_use -= reclaim;
1859167514Skmacy	q->cleaned += reclaim;
1860167514Skmacy}
1861167514Skmacy
1862167514Skmacy/**
1863167514Skmacy *	ctrl_xmit - send a packet through an SGE control Tx queue
1864167514Skmacy *	@adap: the adapter
1865167514Skmacy *	@q: the control queue
1866167514Skmacy *	@m: the packet
1867167514Skmacy *
1868167514Skmacy *	Send a packet through an SGE control Tx queue.  Packets sent through
1869167514Skmacy *	a control queue must fit entirely as immediate data in a single Tx
1870167514Skmacy *	descriptor and have no page fragments.
1871167514Skmacy */
1872167514Skmacystatic int
1873194521Skmacyctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1874167514Skmacy{
1875167514Skmacy	int ret;
1876170654Skmacy	struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1877194521Skmacy	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1878194521Skmacy
1879237263Snp	KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1880237263Snp
1881194521Skmacy	wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1882194521Skmacy	wrp->wrh_lo = htonl(V_WR_TID(q->token));
1883167514Skmacy
1884194521Skmacy	TXQ_LOCK(qs);
1885167514Skmacyagain:	reclaim_completed_tx_imm(q);
1886167514Skmacy
1887167514Skmacy	ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1888167514Skmacy	if (__predict_false(ret)) {
1889167514Skmacy		if (ret == 1) {
1890194521Skmacy			TXQ_UNLOCK(qs);
1891174708Skmacy			return (ENOSPC);
1892167514Skmacy		}
1893167514Skmacy		goto again;
1894167514Skmacy	}
1895237263Snp	write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1896174708Skmacy
1897167514Skmacy	q->in_use++;
1898167514Skmacy	if (++q->pidx >= q->size) {
1899167514Skmacy		q->pidx = 0;
1900167514Skmacy		q->gen ^= 1;
1901167514Skmacy	}
1902194521Skmacy	TXQ_UNLOCK(qs);
1903197043Snp	wmb();
1904167514Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
1905237263Snp	    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1906237263Snp
1907237263Snp	m_free(m);
1908167514Skmacy	return (0);
1909167514Skmacy}
1910167514Skmacy
1911169978Skmacy
1912167514Skmacy/**
1913167514Skmacy *	restart_ctrlq - restart a suspended control queue
1914167514Skmacy *	@qs: the queue set cotaining the control queue
1915167514Skmacy *
1916167514Skmacy *	Resumes transmission on a suspended Tx control queue.
1917167514Skmacy */
1918167514Skmacystatic void
1919169978Skmacyrestart_ctrlq(void *data, int npending)
1920167514Skmacy{
1921167514Skmacy	struct mbuf *m;
1922167514Skmacy	struct sge_qset *qs = (struct sge_qset *)data;
1923167514Skmacy	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1924167514Skmacy	adapter_t *adap = qs->port->adapter;
1925167514Skmacy
1926194521Skmacy	TXQ_LOCK(qs);
1927167514Skmacyagain:	reclaim_completed_tx_imm(q);
1928169978Skmacy
1929167514Skmacy	while (q->in_use < q->size &&
1930169978Skmacy	       (m = mbufq_dequeue(&q->sendq)) != NULL) {
1931167514Skmacy
1932237263Snp		write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1933237263Snp		m_free(m);
1934167514Skmacy
1935167514Skmacy		if (++q->pidx >= q->size) {
1936167514Skmacy			q->pidx = 0;
1937167514Skmacy			q->gen ^= 1;
1938167514Skmacy		}
1939167514Skmacy		q->in_use++;
1940167514Skmacy	}
1941169978Skmacy	if (!mbufq_empty(&q->sendq)) {
1942169978Skmacy		setbit(&qs->txq_stopped, TXQ_CTRL);
1943167514Skmacy
1944167514Skmacy		if (should_restart_tx(q) &&
1945167514Skmacy		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1946167514Skmacy			goto again;
1947167514Skmacy		q->stops++;
1948167514Skmacy	}
1949194521Skmacy	TXQ_UNLOCK(qs);
1950167514Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
1951167514Skmacy		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1952167514Skmacy}
1953167514Skmacy
1954169978Skmacy
1955167514Skmacy/*
1956167514Skmacy * Send a management message through control queue 0
1957167514Skmacy */
1958167514Skmacyint
1959167514Skmacyt3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1960167514Skmacy{
1961194521Skmacy	return ctrl_xmit(adap, &adap->sge.qs[0], m);
1962167514Skmacy}
1963167514Skmacy
1964167514Skmacy/**
1965167514Skmacy *	free_qset - free the resources of an SGE queue set
1966167514Skmacy *	@sc: the controller owning the queue set
1967167514Skmacy *	@q: the queue set
1968167514Skmacy *
1969167514Skmacy *	Release the HW and SW resources associated with an SGE queue set, such
1970167514Skmacy *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
1971167514Skmacy *	queue set must be quiesced prior to calling this.
1972167514Skmacy */
1973194521Skmacystatic void
1974167514Skmacyt3_free_qset(adapter_t *sc, struct sge_qset *q)
1975167514Skmacy{
1976167514Skmacy	int i;
1977174708Skmacy
1978194521Skmacy	reclaim_completed_tx(q, 0, TXQ_ETH);
1979205950Snp	if (q->txq[TXQ_ETH].txq_mr != NULL)
1980205950Snp		buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1981205950Snp	if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1982205950Snp		ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1983205950Snp		free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1984185165Skmacy	}
1985205950Snp
1986167514Skmacy	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1987167514Skmacy		if (q->fl[i].desc) {
1988176472Skmacy			mtx_lock_spin(&sc->sge.reg_lock);
1989167514Skmacy			t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1990176472Skmacy			mtx_unlock_spin(&sc->sge.reg_lock);
1991167514Skmacy			bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1992167514Skmacy			bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1993167514Skmacy					q->fl[i].desc_map);
1994167514Skmacy			bus_dma_tag_destroy(q->fl[i].desc_tag);
1995168351Skmacy			bus_dma_tag_destroy(q->fl[i].entry_tag);
1996167514Skmacy		}
1997167514Skmacy		if (q->fl[i].sdesc) {
1998167514Skmacy			free_rx_bufs(sc, &q->fl[i]);
1999167514Skmacy			free(q->fl[i].sdesc, M_DEVBUF);
2000167514Skmacy		}
2001167514Skmacy	}
2002167514Skmacy
2003194521Skmacy	mtx_unlock(&q->lock);
2004194521Skmacy	MTX_DESTROY(&q->lock);
2005170869Skmacy	for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2006167514Skmacy		if (q->txq[i].desc) {
2007176472Skmacy			mtx_lock_spin(&sc->sge.reg_lock);
2008167514Skmacy			t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2009176472Skmacy			mtx_unlock_spin(&sc->sge.reg_lock);
2010167514Skmacy			bus_dmamap_unload(q->txq[i].desc_tag,
2011167514Skmacy					q->txq[i].desc_map);
2012167514Skmacy			bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2013167514Skmacy					q->txq[i].desc_map);
2014167514Skmacy			bus_dma_tag_destroy(q->txq[i].desc_tag);
2015168351Skmacy			bus_dma_tag_destroy(q->txq[i].entry_tag);
2016167514Skmacy		}
2017167514Skmacy		if (q->txq[i].sdesc) {
2018167514Skmacy			free(q->txq[i].sdesc, M_DEVBUF);
2019167514Skmacy		}
2020167514Skmacy	}
2021167514Skmacy
2022167514Skmacy	if (q->rspq.desc) {
2023176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2024167514Skmacy		t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2025176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2026167514Skmacy
2027167514Skmacy		bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2028167514Skmacy		bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2029167514Skmacy			        q->rspq.desc_map);
2030167514Skmacy		bus_dma_tag_destroy(q->rspq.desc_tag);
2031170869Skmacy		MTX_DESTROY(&q->rspq.lock);
2032167514Skmacy	}
2033168351Skmacy
2034235963Sbz#if defined(INET6) || defined(INET)
2035181616Skmacy	tcp_lro_free(&q->lro.ctrl);
2036205947Snp#endif
2037181616Skmacy
2038167514Skmacy	bzero(q, sizeof(*q));
2039167514Skmacy}
2040167514Skmacy
2041167514Skmacy/**
2042167514Skmacy *	t3_free_sge_resources - free SGE resources
2043167514Skmacy *	@sc: the adapter softc
2044167514Skmacy *
2045167514Skmacy *	Frees resources used by the SGE queue sets.
2046167514Skmacy */
2047167514Skmacyvoid
2048219946Snpt3_free_sge_resources(adapter_t *sc, int nqsets)
2049167514Skmacy{
2050219946Snp	int i;
2051174708Skmacy
2052194521Skmacy	for (i = 0; i < nqsets; ++i) {
2053194521Skmacy		TXQ_LOCK(&sc->sge.qs[i]);
2054167514Skmacy		t3_free_qset(sc, &sc->sge.qs[i]);
2055194521Skmacy	}
2056167514Skmacy}
2057167514Skmacy
2058167514Skmacy/**
2059167514Skmacy *	t3_sge_start - enable SGE
2060167514Skmacy *	@sc: the controller softc
2061167514Skmacy *
2062167514Skmacy *	Enables the SGE for DMAs.  This is the last step in starting packet
2063167514Skmacy *	transfers.
2064167514Skmacy */
2065167514Skmacyvoid
2066167514Skmacyt3_sge_start(adapter_t *sc)
2067167514Skmacy{
2068167514Skmacy	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2069167514Skmacy}
2070167514Skmacy
2071169978Skmacy/**
2072169978Skmacy *	t3_sge_stop - disable SGE operation
2073169978Skmacy *	@sc: the adapter
2074169978Skmacy *
2075169978Skmacy *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
2076169978Skmacy *	from error interrupts) or from normal process context.  In the latter
2077169978Skmacy *	case it also disables any pending queue restart tasklets.  Note that
2078169978Skmacy *	if it is called in interrupt context it cannot disable the restart
2079169978Skmacy *	tasklets as it cannot wait, however the tasklets will have no effect
2080169978Skmacy *	since the doorbells are disabled and the driver will call this again
2081169978Skmacy *	later from process context, at which time the tasklets will be stopped
2082169978Skmacy *	if they are still running.
2083169978Skmacy */
2084169978Skmacyvoid
2085169978Skmacyt3_sge_stop(adapter_t *sc)
2086169978Skmacy{
2087170869Skmacy	int i, nqsets;
2088170869Skmacy
2089169978Skmacy	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2090167514Skmacy
2091170654Skmacy	if (sc->tq == NULL)
2092170654Skmacy		return;
2093170654Skmacy
2094170869Skmacy	for (nqsets = i = 0; i < (sc)->params.nports; i++)
2095170869Skmacy		nqsets += sc->port[i].nqsets;
2096175340Skmacy#ifdef notyet
2097175340Skmacy	/*
2098175340Skmacy	 *
2099175340Skmacy	 * XXX
2100175340Skmacy	 */
2101170869Skmacy	for (i = 0; i < nqsets; ++i) {
2102169978Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
2103169978Skmacy
2104171335Skmacy		taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2105171335Skmacy		taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2106169978Skmacy	}
2107175340Skmacy#endif
2108169978Skmacy}
2109169978Skmacy
2110167514Skmacy/**
2111174708Skmacy *	t3_free_tx_desc - reclaims Tx descriptors and their buffers
2112167514Skmacy *	@adapter: the adapter
2113167514Skmacy *	@q: the Tx queue to reclaim descriptors from
2114174708Skmacy *	@reclaimable: the number of descriptors to reclaim
2115174708Skmacy *      @m_vec_size: maximum number of buffers to reclaim
2116174708Skmacy *      @desc_reclaimed: returns the number of descriptors reclaimed
2117167514Skmacy *
2118167514Skmacy *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2119167514Skmacy *	Tx buffers.  Called with the Tx queue lock held.
2120174708Skmacy *
2121174708Skmacy *      Returns number of buffers of reclaimed
2122167514Skmacy */
2123174708Skmacyvoid
2124194521Skmacyt3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2125167514Skmacy{
2126174708Skmacy	struct tx_sw_desc *txsd;
2127194521Skmacy	unsigned int cidx, mask;
2128194521Skmacy	struct sge_txq *q = &qs->txq[queue];
2129194521Skmacy
2130167514Skmacy#ifdef T3_TRACE
2131167514Skmacy	T3_TRACE2(sc->tb[q->cntxt_id & 7],
2132174708Skmacy		  "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2133167514Skmacy#endif
2134174708Skmacy	cidx = q->cidx;
2135194521Skmacy	mask = q->size - 1;
2136174708Skmacy	txsd = &q->sdesc[cidx];
2137194521Skmacy
2138194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
2139174708Skmacy	while (reclaimable--) {
2140194521Skmacy		prefetch(q->sdesc[(cidx + 1) & mask].m);
2141194521Skmacy		prefetch(q->sdesc[(cidx + 2) & mask].m);
2142194521Skmacy
2143194521Skmacy		if (txsd->m != NULL) {
2144174708Skmacy			if (txsd->flags & TX_SW_DESC_MAPPED) {
2145174708Skmacy				bus_dmamap_unload(q->entry_tag, txsd->map);
2146174708Skmacy				txsd->flags &= ~TX_SW_DESC_MAPPED;
2147167514Skmacy			}
2148194521Skmacy			m_freem_list(txsd->m);
2149194521Skmacy			txsd->m = NULL;
2150174708Skmacy		} else
2151174708Skmacy			q->txq_skipped++;
2152174708Skmacy
2153174708Skmacy		++txsd;
2154167514Skmacy		if (++cidx == q->size) {
2155167514Skmacy			cidx = 0;
2156174708Skmacy			txsd = q->sdesc;
2157167514Skmacy		}
2158167514Skmacy	}
2159167514Skmacy	q->cidx = cidx;
2160167514Skmacy
2161167514Skmacy}
2162167514Skmacy
2163167514Skmacy/**
2164167514Skmacy *	is_new_response - check if a response is newly written
2165167514Skmacy *	@r: the response descriptor
2166167514Skmacy *	@q: the response queue
2167167514Skmacy *
2168167514Skmacy *	Returns true if a response descriptor contains a yet unprocessed
2169167514Skmacy *	response.
2170167514Skmacy */
2171167514Skmacystatic __inline int
2172167514Skmacyis_new_response(const struct rsp_desc *r,
2173167514Skmacy    const struct sge_rspq *q)
2174167514Skmacy{
2175167514Skmacy	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2176167514Skmacy}
2177167514Skmacy
2178167514Skmacy#define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2179167514Skmacy#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2180167514Skmacy			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2181167514Skmacy			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2182167514Skmacy			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2183167514Skmacy
2184167514Skmacy/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2185167514Skmacy#define NOMEM_INTR_DELAY 2500
2186167514Skmacy
2187237263Snp#ifdef TCP_OFFLOAD
2188169978Skmacy/**
2189169978Skmacy *	write_ofld_wr - write an offload work request
2190169978Skmacy *	@adap: the adapter
2191169978Skmacy *	@m: the packet to send
2192169978Skmacy *	@q: the Tx queue
2193169978Skmacy *	@pidx: index of the first Tx descriptor to write
2194169978Skmacy *	@gen: the generation value to use
2195169978Skmacy *	@ndesc: number of descriptors the packet will occupy
2196169978Skmacy *
2197169978Skmacy *	Write an offload work request to send the supplied packet.  The packet
2198169978Skmacy *	data already carry the work request with most fields populated.
2199169978Skmacy */
2200169978Skmacystatic void
2201237263Snpwrite_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2202237263Snp    unsigned int pidx, unsigned int gen, unsigned int ndesc)
2203167514Skmacy{
2204169978Skmacy	unsigned int sgl_flits, flits;
2205237263Snp	int i, idx, nsegs, wrlen;
2206169978Skmacy	struct work_request_hdr *from;
2207237263Snp	struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2208169978Skmacy	struct tx_desc *d = &q->desc[pidx];
2209169978Skmacy	struct txq_state txqs;
2210237263Snp	struct sglist_seg *segs;
2211237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2212237263Snp	struct sglist *sgl;
2213237263Snp
2214237263Snp	from = (void *)(oh + 1);	/* Start of WR within mbuf */
2215237263Snp	wrlen = m->m_len - sizeof(*oh);
2216237263Snp
2217237263Snp	if (!(oh->flags & F_HDR_SGL)) {
2218237263Snp		write_imm(d, (caddr_t)from, wrlen, gen);
2219237263Snp
2220237263Snp		/*
2221237263Snp		 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2222237263Snp		 * t3_push_frames and freed in wr_ack.  Others, like those sent
2223237263Snp		 * down by close_conn, t3_send_reset, etc. should be freed here.
2224237263Snp		 */
2225237263Snp		if (!(oh->flags & F_HDR_DF))
2226237263Snp			m_free(m);
2227169978Skmacy		return;
2228169978Skmacy	}
2229169978Skmacy
2230237263Snp	memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2231169978Skmacy
2232237263Snp	sgl = oh->sgl;
2233237263Snp	flits = wrlen / 8;
2234237263Snp	sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2235169978Skmacy
2236237263Snp	nsegs = sgl->sg_nseg;
2237237263Snp	segs = sgl->sg_segs;
2238237263Snp	for (idx = 0, i = 0; i < nsegs; i++) {
2239237263Snp		KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2240237263Snp		if (i && idx == 0)
2241237263Snp			++sgp;
2242237263Snp		sgp->len[idx] = htobe32(segs[i].ss_len);
2243237263Snp		sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2244237263Snp		idx ^= 1;
2245237263Snp	}
2246237263Snp	if (idx) {
2247237263Snp		sgp->len[idx] = 0;
2248237263Snp		sgp->addr[idx] = 0;
2249237263Snp	}
2250237263Snp
2251169978Skmacy	sgl_flits = sgl_len(nsegs);
2252174708Skmacy	txqs.gen = gen;
2253174708Skmacy	txqs.pidx = pidx;
2254174708Skmacy	txqs.compl = 0;
2255174708Skmacy
2256237263Snp	write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2257194521Skmacy	    from->wrh_hi, from->wrh_lo);
2258167514Skmacy}
2259167514Skmacy
2260169978Skmacy/**
2261169978Skmacy *	ofld_xmit - send a packet through an offload queue
2262169978Skmacy *	@adap: the adapter
2263169978Skmacy *	@q: the Tx offload queue
2264169978Skmacy *	@m: the packet
2265169978Skmacy *
2266169978Skmacy *	Send an offload packet through an SGE offload queue.
2267169978Skmacy */
2268169978Skmacystatic int
2269194521Skmacyofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2270169978Skmacy{
2271237263Snp	int ret;
2272171978Skmacy	unsigned int ndesc;
2273171978Skmacy	unsigned int pidx, gen;
2274194521Skmacy	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2275237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2276169978Skmacy
2277237263Snp	ndesc = G_HDR_NDESC(oh->flags);
2278169978Skmacy
2279194521Skmacy	TXQ_LOCK(qs);
2280194521Skmacyagain:	reclaim_completed_tx(qs, 16, TXQ_OFLD);
2281169978Skmacy	ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2282169978Skmacy	if (__predict_false(ret)) {
2283169978Skmacy		if (ret == 1) {
2284194521Skmacy			TXQ_UNLOCK(qs);
2285174708Skmacy			return (EINTR);
2286167514Skmacy		}
2287169978Skmacy		goto again;
2288169978Skmacy	}
2289169978Skmacy
2290169978Skmacy	gen = q->gen;
2291169978Skmacy	q->in_use += ndesc;
2292169978Skmacy	pidx = q->pidx;
2293169978Skmacy	q->pidx += ndesc;
2294169978Skmacy	if (q->pidx >= q->size) {
2295169978Skmacy		q->pidx -= q->size;
2296169978Skmacy		q->gen ^= 1;
2297169978Skmacy	}
2298237263Snp
2299237263Snp	write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2300237263Snp	check_ring_tx_db(adap, q, 1);
2301194521Skmacy	TXQ_UNLOCK(qs);
2302169978Skmacy
2303172101Skmacy	return (0);
2304169978Skmacy}
2305167514Skmacy
2306169978Skmacy/**
2307169978Skmacy *	restart_offloadq - restart a suspended offload queue
2308169978Skmacy *	@qs: the queue set cotaining the offload queue
2309169978Skmacy *
2310169978Skmacy *	Resumes transmission on a suspended Tx offload queue.
2311169978Skmacy */
2312169978Skmacystatic void
2313169978Skmacyrestart_offloadq(void *data, int npending)
2314169978Skmacy{
2315169978Skmacy	struct mbuf *m;
2316169978Skmacy	struct sge_qset *qs = data;
2317169978Skmacy	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2318169978Skmacy	adapter_t *adap = qs->port->adapter;
2319237263Snp	int cleaned;
2320169978Skmacy
2321194521Skmacy	TXQ_LOCK(qs);
2322194521Skmacyagain:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2323169978Skmacy
2324169978Skmacy	while ((m = mbufq_peek(&q->sendq)) != NULL) {
2325169978Skmacy		unsigned int gen, pidx;
2326237263Snp		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2327237263Snp		unsigned int ndesc = G_HDR_NDESC(oh->flags);
2328169978Skmacy
2329169978Skmacy		if (__predict_false(q->size - q->in_use < ndesc)) {
2330169978Skmacy			setbit(&qs->txq_stopped, TXQ_OFLD);
2331169978Skmacy			if (should_restart_tx(q) &&
2332169978Skmacy			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2333169978Skmacy				goto again;
2334169978Skmacy			q->stops++;
2335169978Skmacy			break;
2336169978Skmacy		}
2337169978Skmacy
2338169978Skmacy		gen = q->gen;
2339169978Skmacy		q->in_use += ndesc;
2340169978Skmacy		pidx = q->pidx;
2341169978Skmacy		q->pidx += ndesc;
2342169978Skmacy		if (q->pidx >= q->size) {
2343169978Skmacy			q->pidx -= q->size;
2344169978Skmacy			q->gen ^= 1;
2345169978Skmacy		}
2346169978Skmacy
2347169978Skmacy		(void)mbufq_dequeue(&q->sendq);
2348194521Skmacy		TXQ_UNLOCK(qs);
2349237263Snp		write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2350194521Skmacy		TXQ_LOCK(qs);
2351169978Skmacy	}
2352169978Skmacy#if USE_GTS
2353169978Skmacy	set_bit(TXQ_RUNNING, &q->flags);
2354169978Skmacy	set_bit(TXQ_LAST_PKT_DB, &q->flags);
2355169978Skmacy#endif
2356194521Skmacy	TXQ_UNLOCK(qs);
2357176472Skmacy	wmb();
2358169978Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
2359169978Skmacy		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2360167514Skmacy}
2361167514Skmacy
2362169978Skmacy/**
2363169978Skmacy *	t3_offload_tx - send an offload packet
2364169978Skmacy *	@m: the packet
2365169978Skmacy *
2366169978Skmacy *	Sends an offload packet.  We use the packet priority to select the
2367169978Skmacy *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2368169978Skmacy *	should be sent as regular or control, bits 1-3 select the queue set.
2369169978Skmacy */
2370169978Skmacyint
2371237263Snpt3_offload_tx(struct adapter *sc, struct mbuf *m)
2372169978Skmacy{
2373237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2374237263Snp	struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2375169978Skmacy
2376237263Snp	if (oh->flags & F_HDR_CTRL) {
2377237263Snp		m_adj(m, sizeof (*oh));	/* trim ofld_hdr off */
2378237263Snp		return (ctrl_xmit(sc, qs, m));
2379237263Snp	} else
2380237263Snp		return (ofld_xmit(sc, qs, m));
2381169978Skmacy}
2382237263Snp#endif
2383169978Skmacy
2384167514Skmacystatic void
2385167514Skmacyrestart_tx(struct sge_qset *qs)
2386167514Skmacy{
2387169978Skmacy	struct adapter *sc = qs->port->adapter;
2388237263Snp
2389169978Skmacy	if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2390169978Skmacy	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2391169978Skmacy	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2392169978Skmacy		qs->txq[TXQ_OFLD].restarts++;
2393171335Skmacy		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2394169978Skmacy	}
2395237263Snp
2396169978Skmacy	if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2397169978Skmacy	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2398169978Skmacy	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2399169978Skmacy		qs->txq[TXQ_CTRL].restarts++;
2400171335Skmacy		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2401169978Skmacy	}
2402167514Skmacy}
2403167514Skmacy
2404169978Skmacy/**
2405169978Skmacy *	t3_sge_alloc_qset - initialize an SGE queue set
2406169978Skmacy *	@sc: the controller softc
2407169978Skmacy *	@id: the queue set id
2408169978Skmacy *	@nports: how many Ethernet ports will be using this queue set
2409169978Skmacy *	@irq_vec_idx: the IRQ vector index for response queue interrupts
2410169978Skmacy *	@p: configuration parameters for this queue set
2411169978Skmacy *	@ntxq: number of Tx queues for the queue set
2412169978Skmacy *	@pi: port info for queue set
2413169978Skmacy *
2414169978Skmacy *	Allocate resources and initialize an SGE queue set.  A queue set
2415169978Skmacy *	comprises a response queue, two Rx free-buffer queues, and up to 3
2416169978Skmacy *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
2417169978Skmacy *	queue, offload queue, and control queue.
2418169978Skmacy */
2419169978Skmacyint
2420169978Skmacyt3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2421169978Skmacy		  const struct qset_params *p, int ntxq, struct port_info *pi)
2422169978Skmacy{
2423169978Skmacy	struct sge_qset *q = &sc->sge.qs[id];
2424194521Skmacy	int i, ret = 0;
2425169978Skmacy
2426194521Skmacy	MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2427194521Skmacy	q->port = pi;
2428237263Snp	q->adap = sc;
2429194521Skmacy
2430205950Snp	if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2431205950Snp	    M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2432205950Snp		device_printf(sc->dev, "failed to allocate mbuf ring\n");
2433205950Snp		goto err;
2434174708Skmacy	}
2435205950Snp	if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2436205950Snp	    M_NOWAIT | M_ZERO)) == NULL) {
2437205950Snp		device_printf(sc->dev, "failed to allocate ifq\n");
2438205950Snp		goto err;
2439205950Snp	}
2440205950Snp	ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2441205950Snp	callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2442205950Snp	callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2443205950Snp	q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2444205950Snp	q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2445205950Snp
2446169978Skmacy	init_qset_cntxt(q, id);
2447175347Skmacy	q->idx = id;
2448169978Skmacy	if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2449169978Skmacy		    sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2450169978Skmacy		    &q->fl[0].desc, &q->fl[0].sdesc,
2451169978Skmacy		    &q->fl[0].desc_tag, &q->fl[0].desc_map,
2452169978Skmacy		    sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2453169978Skmacy		printf("error %d from alloc ring fl0\n", ret);
2454169978Skmacy		goto err;
2455169978Skmacy	}
2456169978Skmacy
2457169978Skmacy	if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2458169978Skmacy		    sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2459169978Skmacy		    &q->fl[1].desc, &q->fl[1].sdesc,
2460169978Skmacy		    &q->fl[1].desc_tag, &q->fl[1].desc_map,
2461169978Skmacy		    sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2462169978Skmacy		printf("error %d from alloc ring fl1\n", ret);
2463169978Skmacy		goto err;
2464169978Skmacy	}
2465169978Skmacy
2466169978Skmacy	if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2467169978Skmacy		    &q->rspq.phys_addr, &q->rspq.desc, NULL,
2468169978Skmacy		    &q->rspq.desc_tag, &q->rspq.desc_map,
2469169978Skmacy		    NULL, NULL)) != 0) {
2470169978Skmacy		printf("error %d from alloc ring rspq\n", ret);
2471169978Skmacy		goto err;
2472169978Skmacy	}
2473169978Skmacy
2474169978Skmacy	for (i = 0; i < ntxq; ++i) {
2475169978Skmacy		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2476169978Skmacy
2477169978Skmacy		if ((ret = alloc_ring(sc, p->txq_size[i],
2478169978Skmacy			    sizeof(struct tx_desc), sz,
2479169978Skmacy			    &q->txq[i].phys_addr, &q->txq[i].desc,
2480169978Skmacy			    &q->txq[i].sdesc, &q->txq[i].desc_tag,
2481169978Skmacy			    &q->txq[i].desc_map,
2482169978Skmacy			    sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2483169978Skmacy			printf("error %d from alloc ring tx %i\n", ret, i);
2484169978Skmacy			goto err;
2485169978Skmacy		}
2486169978Skmacy		mbufq_init(&q->txq[i].sendq);
2487169978Skmacy		q->txq[i].gen = 1;
2488169978Skmacy		q->txq[i].size = p->txq_size[i];
2489169978Skmacy	}
2490237263Snp
2491237263Snp#ifdef TCP_OFFLOAD
2492171335Skmacy	TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2493237263Snp#endif
2494171335Skmacy	TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2495194521Skmacy	TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2496194521Skmacy	TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2497171335Skmacy
2498169978Skmacy	q->fl[0].gen = q->fl[1].gen = 1;
2499169978Skmacy	q->fl[0].size = p->fl_size;
2500169978Skmacy	q->fl[1].size = p->jumbo_size;
2501169978Skmacy
2502169978Skmacy	q->rspq.gen = 1;
2503171471Skmacy	q->rspq.cidx = 0;
2504169978Skmacy	q->rspq.size = p->rspq_size;
2505170869Skmacy
2506169978Skmacy	q->txq[TXQ_ETH].stop_thres = nports *
2507169978Skmacy	    flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2508169978Skmacy
2509194521Skmacy	q->fl[0].buf_size = MCLBYTES;
2510194521Skmacy	q->fl[0].zone = zone_pack;
2511194521Skmacy	q->fl[0].type = EXT_PACKET;
2512205950Snp
2513205950Snp	if (p->jumbo_buf_size ==  MJUM16BYTES) {
2514174708Skmacy		q->fl[1].zone = zone_jumbo16;
2515174708Skmacy		q->fl[1].type = EXT_JUMBO16;
2516205950Snp	} else if (p->jumbo_buf_size ==  MJUM9BYTES) {
2517175200Skmacy		q->fl[1].zone = zone_jumbo9;
2518175200Skmacy		q->fl[1].type = EXT_JUMBO9;
2519205950Snp	} else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
2520205950Snp		q->fl[1].zone = zone_jumbop;
2521205950Snp		q->fl[1].type = EXT_JUMBOP;
2522205950Snp	} else {
2523205950Snp		KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2524205950Snp		ret = EDOOFUS;
2525205950Snp		goto err;
2526175200Skmacy	}
2527205950Snp	q->fl[1].buf_size = p->jumbo_buf_size;
2528171978Skmacy
2529183289Skmacy	/* Allocate and setup the lro_ctrl structure */
2530181616Skmacy	q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2531235963Sbz#if defined(INET6) || defined(INET)
2532181616Skmacy	ret = tcp_lro_init(&q->lro.ctrl);
2533181616Skmacy	if (ret) {
2534181616Skmacy		printf("error %d from tcp_lro_init\n", ret);
2535181616Skmacy		goto err;
2536181616Skmacy	}
2537205947Snp#endif
2538181616Skmacy	q->lro.ctrl.ifp = pi->ifp;
2539181616Skmacy
2540176472Skmacy	mtx_lock_spin(&sc->sge.reg_lock);
2541169978Skmacy	ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2542169978Skmacy				   q->rspq.phys_addr, q->rspq.size,
2543169978Skmacy				   q->fl[0].buf_size, 1, 0);
2544169978Skmacy	if (ret) {
2545169978Skmacy		printf("error %d from t3_sge_init_rspcntxt\n", ret);
2546169978Skmacy		goto err_unlock;
2547169978Skmacy	}
2548169978Skmacy
2549169978Skmacy	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2550169978Skmacy		ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2551169978Skmacy					  q->fl[i].phys_addr, q->fl[i].size,
2552169978Skmacy					  q->fl[i].buf_size, p->cong_thres, 1,
2553169978Skmacy					  0);
2554169978Skmacy		if (ret) {
2555169978Skmacy			printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2556169978Skmacy			goto err_unlock;
2557169978Skmacy		}
2558169978Skmacy	}
2559169978Skmacy
2560169978Skmacy	ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2561169978Skmacy				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2562169978Skmacy				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2563169978Skmacy				 1, 0);
2564169978Skmacy	if (ret) {
2565169978Skmacy		printf("error %d from t3_sge_init_ecntxt\n", ret);
2566169978Skmacy		goto err_unlock;
2567169978Skmacy	}
2568169978Skmacy
2569169978Skmacy	if (ntxq > 1) {
2570169978Skmacy		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2571169978Skmacy					 USE_GTS, SGE_CNTXT_OFLD, id,
2572169978Skmacy					 q->txq[TXQ_OFLD].phys_addr,
2573169978Skmacy					 q->txq[TXQ_OFLD].size, 0, 1, 0);
2574169978Skmacy		if (ret) {
2575169978Skmacy			printf("error %d from t3_sge_init_ecntxt\n", ret);
2576169978Skmacy			goto err_unlock;
2577169978Skmacy		}
2578169978Skmacy	}
2579169978Skmacy
2580169978Skmacy	if (ntxq > 2) {
2581169978Skmacy		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2582169978Skmacy					 SGE_CNTXT_CTRL, id,
2583169978Skmacy					 q->txq[TXQ_CTRL].phys_addr,
2584169978Skmacy					 q->txq[TXQ_CTRL].size,
2585169978Skmacy					 q->txq[TXQ_CTRL].token, 1, 0);
2586169978Skmacy		if (ret) {
2587169978Skmacy			printf("error %d from t3_sge_init_ecntxt\n", ret);
2588169978Skmacy			goto err_unlock;
2589169978Skmacy		}
2590169978Skmacy	}
2591169978Skmacy
2592170869Skmacy	snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2593170869Skmacy	    device_get_unit(sc->dev), irq_vec_idx);
2594170869Skmacy	MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2595170869Skmacy
2596176472Skmacy	mtx_unlock_spin(&sc->sge.reg_lock);
2597169978Skmacy	t3_update_qset_coalesce(q, p);
2598237263Snp
2599169978Skmacy	refill_fl(sc, &q->fl[0], q->fl[0].size);
2600169978Skmacy	refill_fl(sc, &q->fl[1], q->fl[1].size);
2601169978Skmacy	refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2602169978Skmacy
2603169978Skmacy	t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2604169978Skmacy		     V_NEWTIMER(q->rspq.holdoff_tmr));
2605169978Skmacy
2606169978Skmacy	return (0);
2607169978Skmacy
2608169978Skmacyerr_unlock:
2609176472Skmacy	mtx_unlock_spin(&sc->sge.reg_lock);
2610169978Skmacyerr:
2611194521Skmacy	TXQ_LOCK(q);
2612169978Skmacy	t3_free_qset(sc, q);
2613169978Skmacy
2614169978Skmacy	return (ret);
2615169978Skmacy}
2616169978Skmacy
2617181616Skmacy/*
2618181616Skmacy * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2619181616Skmacy * ethernet data.  Hardware assistance with various checksums and any vlan tag
2620181616Skmacy * will also be taken into account here.
2621181616Skmacy */
2622167514Skmacyvoid
2623171978Skmacyt3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)
2624167514Skmacy{
2625170654Skmacy	struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2626171978Skmacy	struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2627167514Skmacy	struct ifnet *ifp = pi->ifp;
2628167514Skmacy
2629167514Skmacy	if ((ifp->if_capenable & IFCAP_RXCSUM) && !cpl->fragment &&
2630167514Skmacy	    cpl->csum_valid && cpl->csum == 0xffff) {
2631167514Skmacy		m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID);
2632167514Skmacy		rspq_to_qset(rq)->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
2633167514Skmacy		m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED|CSUM_IP_VALID|CSUM_DATA_VALID|CSUM_PSEUDO_HDR);
2634167514Skmacy		m->m_pkthdr.csum_data = 0xffff;
2635167514Skmacy	}
2636204274Snp
2637204274Snp	if (cpl->vlan_valid) {
2638167514Skmacy		m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2639167514Skmacy		m->m_flags |= M_VLANTAG;
2640167514Skmacy	}
2641204274Snp
2642167514Skmacy	m->m_pkthdr.rcvif = ifp;
2643170654Skmacy	m->m_pkthdr.header = mtod(m, uint8_t *) + sizeof(*cpl) + ethpad;
2644168737Skmacy	/*
2645168737Skmacy	 * adjust after conversion to mbuf chain
2646168737Skmacy	 */
2647174708Skmacy	m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2648174708Skmacy	m->m_len -= (sizeof(*cpl) + ethpad);
2649174708Skmacy	m->m_data += (sizeof(*cpl) + ethpad);
2650167514Skmacy}
2651167514Skmacy
2652167514Skmacy/**
2653167514Skmacy *	get_packet - return the next ingress packet buffer from a free list
2654167514Skmacy *	@adap: the adapter that received the packet
2655167514Skmacy *	@drop_thres: # of remaining buffers before we start dropping packets
2656167514Skmacy *	@qs: the qset that the SGE free list holding the packet belongs to
2657167514Skmacy *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2658167514Skmacy *      @r: response descriptor
2659167514Skmacy *
2660167514Skmacy *	Get the next packet from a free list and complete setup of the
2661167514Skmacy *	sk_buff.  If the packet is small we make a copy and recycle the
2662167514Skmacy *	original buffer, otherwise we use the original buffer itself.  If a
2663167514Skmacy *	positive drop threshold is supplied packets are dropped and their
2664167514Skmacy *	buffers recycled if (a) the number of remaining buffers is under the
2665167514Skmacy *	threshold and the packet is too big to copy, or (b) the packet should
2666167514Skmacy *	be copied but there is no memory for the copy.
2667167514Skmacy */
2668167514Skmacystatic int
2669167514Skmacyget_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2670175340Skmacy    struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2671172101Skmacy{
2672172101Skmacy
2673172101Skmacy	unsigned int len_cq =  ntohl(r->len_cq);
2674172101Skmacy	struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2675194521Skmacy	int mask, cidx = fl->cidx;
2676194521Skmacy	struct rx_sw_desc *sd = &fl->sdesc[cidx];
2677172101Skmacy	uint32_t len = G_RSPD_LEN(len_cq);
2678194521Skmacy	uint32_t flags = M_EXT;
2679194521Skmacy	uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2680175711Skmacy	caddr_t cl;
2681194521Skmacy	struct mbuf *m;
2682172101Skmacy	int ret = 0;
2683172101Skmacy
2684194521Skmacy	mask = fl->size - 1;
2685194521Skmacy	prefetch(fl->sdesc[(cidx + 1) & mask].m);
2686194521Skmacy	prefetch(fl->sdesc[(cidx + 2) & mask].m);
2687194521Skmacy	prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2688194521Skmacy	prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2689194521Skmacy
2690172101Skmacy	fl->credits--;
2691172101Skmacy	bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2692175200Skmacy
2693194521Skmacy	if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2694194521Skmacy	    sopeop == RSPQ_SOP_EOP) {
2695194521Skmacy		if ((m = m_gethdr(M_DONTWAIT, MT_DATA)) == NULL)
2696175200Skmacy			goto skip_recycle;
2697194521Skmacy		cl = mtod(m, void *);
2698194521Skmacy		memcpy(cl, sd->rxsd_cl, len);
2699175200Skmacy		recycle_rx_buf(adap, fl, fl->cidx);
2700194521Skmacy		m->m_pkthdr.len = m->m_len = len;
2701194521Skmacy		m->m_flags = 0;
2702194521Skmacy		mh->mh_head = mh->mh_tail = m;
2703194521Skmacy		ret = 1;
2704194521Skmacy		goto done;
2705175200Skmacy	} else {
2706175200Skmacy	skip_recycle:
2707175200Skmacy		bus_dmamap_unload(fl->entry_tag, sd->map);
2708175200Skmacy		cl = sd->rxsd_cl;
2709194521Skmacy		m = sd->m;
2710172101Skmacy
2711175200Skmacy		if ((sopeop == RSPQ_SOP_EOP) ||
2712175200Skmacy		    (sopeop == RSPQ_SOP))
2713194521Skmacy			flags |= M_PKTHDR;
2714195512Snp		m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2715194521Skmacy		if (fl->zone == zone_pack) {
2716194521Skmacy			/*
2717194521Skmacy			 * restore clobbered data pointer
2718194521Skmacy			 */
2719194521Skmacy			m->m_data = m->m_ext.ext_buf;
2720194521Skmacy		} else {
2721194521Skmacy			m_cljset(m, cl, fl->type);
2722194521Skmacy		}
2723194521Skmacy		m->m_len = len;
2724175200Skmacy	}
2725172101Skmacy	switch(sopeop) {
2726172101Skmacy	case RSPQ_SOP_EOP:
2727194521Skmacy		ret = 1;
2728194521Skmacy		/* FALLTHROUGH */
2729194521Skmacy	case RSPQ_SOP:
2730172101Skmacy		mh->mh_head = mh->mh_tail = m;
2731172101Skmacy		m->m_pkthdr.len = len;
2732194521Skmacy		break;
2733194521Skmacy	case RSPQ_EOP:
2734172101Skmacy		ret = 1;
2735194521Skmacy		/* FALLTHROUGH */
2736172101Skmacy	case RSPQ_NSOP_NEOP:
2737172101Skmacy		if (mh->mh_tail == NULL) {
2738175711Skmacy			log(LOG_ERR, "discarding intermediate descriptor entry\n");
2739172101Skmacy			m_freem(m);
2740172101Skmacy			break;
2741172101Skmacy		}
2742172101Skmacy		mh->mh_tail->m_next = m;
2743172101Skmacy		mh->mh_tail = m;
2744172101Skmacy		mh->mh_head->m_pkthdr.len += len;
2745172101Skmacy		break;
2746172101Skmacy	}
2747194521Skmacy	if (cxgb_debug)
2748194521Skmacy		printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2749194521Skmacydone:
2750172101Skmacy	if (++fl->cidx == fl->size)
2751172101Skmacy		fl->cidx = 0;
2752172101Skmacy
2753172101Skmacy	return (ret);
2754172101Skmacy}
2755172101Skmacy
2756167514Skmacy/**
2757167514Skmacy *	handle_rsp_cntrl_info - handles control information in a response
2758167514Skmacy *	@qs: the queue set corresponding to the response
2759167514Skmacy *	@flags: the response control flags
2760167514Skmacy *
2761167514Skmacy *	Handles the control information of an SGE response, such as GTS
2762167514Skmacy *	indications and completion credits for the queue set's Tx queues.
2763167514Skmacy *	HW coalesces credits, we don't do any extra SW coalescing.
2764167514Skmacy */
2765167514Skmacystatic __inline void
2766167514Skmacyhandle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2767167514Skmacy{
2768167514Skmacy	unsigned int credits;
2769167514Skmacy
2770167514Skmacy#if USE_GTS
2771167514Skmacy	if (flags & F_RSPD_TXQ0_GTS)
2772167514Skmacy		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2773167514Skmacy#endif
2774167514Skmacy	credits = G_RSPD_TXQ0_CR(flags);
2775175200Skmacy	if (credits)
2776167514Skmacy		qs->txq[TXQ_ETH].processed += credits;
2777197043Snp
2778167514Skmacy	credits = G_RSPD_TXQ2_CR(flags);
2779197043Snp	if (credits)
2780167514Skmacy		qs->txq[TXQ_CTRL].processed += credits;
2781167514Skmacy
2782167514Skmacy# if USE_GTS
2783167514Skmacy	if (flags & F_RSPD_TXQ1_GTS)
2784167514Skmacy		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2785167514Skmacy# endif
2786167514Skmacy	credits = G_RSPD_TXQ1_CR(flags);
2787167514Skmacy	if (credits)
2788167514Skmacy		qs->txq[TXQ_OFLD].processed += credits;
2789174708Skmacy
2790167514Skmacy}
2791167514Skmacy
2792167514Skmacystatic void
2793167514Skmacycheck_ring_db(adapter_t *adap, struct sge_qset *qs,
2794167514Skmacy    unsigned int sleeping)
2795167514Skmacy{
2796167514Skmacy	;
2797167514Skmacy}
2798167514Skmacy
2799167514Skmacy/**
2800167514Skmacy *	process_responses - process responses from an SGE response queue
2801167514Skmacy *	@adap: the adapter
2802167514Skmacy *	@qs: the queue set to which the response queue belongs
2803167514Skmacy *	@budget: how many responses can be processed in this round
2804167514Skmacy *
2805167514Skmacy *	Process responses from an SGE response queue up to the supplied budget.
2806167514Skmacy *	Responses include received packets as well as credits and other events
2807167514Skmacy *	for the queues that belong to the response queue's queue set.
2808167514Skmacy *	A negative budget is effectively unlimited.
2809167514Skmacy *
2810167514Skmacy *	Additionally choose the interrupt holdoff time for the next interrupt
2811167514Skmacy *	on this queue.  If the system is under memory shortage use a fairly
2812167514Skmacy *	long delay to help recovery.
2813167514Skmacy */
2814194521Skmacystatic int
2815167514Skmacyprocess_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2816167514Skmacy{
2817167514Skmacy	struct sge_rspq *rspq = &qs->rspq;
2818167514Skmacy	struct rsp_desc *r = &rspq->desc[rspq->cidx];
2819167514Skmacy	int budget_left = budget;
2820167514Skmacy	unsigned int sleeping = 0;
2821235963Sbz#if defined(INET6) || defined(INET)
2822181616Skmacy	int lro_enabled = qs->lro.enabled;
2823183559Skmacy	int skip_lro;
2824181616Skmacy	struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2825235963Sbz#endif
2826209116Snp	struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2827167514Skmacy#ifdef DEBUG
2828167514Skmacy	static int last_holdoff = 0;
2829171471Skmacy	if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2830167514Skmacy		printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2831167514Skmacy		last_holdoff = rspq->holdoff_tmr;
2832167514Skmacy	}
2833169978Skmacy#endif
2834167514Skmacy	rspq->next_holdoff = rspq->holdoff_tmr;
2835167514Skmacy
2836167514Skmacy	while (__predict_true(budget_left && is_new_response(r, rspq))) {
2837167514Skmacy		int eth, eop = 0, ethpad = 0;
2838167514Skmacy		uint32_t flags = ntohl(r->flags);
2839174708Skmacy		uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2840237263Snp		uint8_t opcode = r->rss_hdr.opcode;
2841167514Skmacy
2842237263Snp		eth = (opcode == CPL_RX_PKT);
2843167514Skmacy
2844167514Skmacy		if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2845176472Skmacy			struct mbuf *m;
2846167514Skmacy
2847176472Skmacy			if (cxgb_debug)
2848176472Skmacy				printf("async notification\n");
2849176472Skmacy
2850209116Snp			if (mh->mh_head == NULL) {
2851209116Snp				mh->mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2852209116Snp				m = mh->mh_head;
2853176472Skmacy			} else {
2854176472Skmacy				m = m_gethdr(M_DONTWAIT, MT_DATA);
2855176472Skmacy			}
2856176472Skmacy			if (m == NULL)
2857176472Skmacy				goto no_mem;
2858176472Skmacy
2859176472Skmacy                        memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2860176472Skmacy			m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2861176472Skmacy                        *mtod(m, char *) = CPL_ASYNC_NOTIF;
2862237263Snp			opcode = CPL_ASYNC_NOTIF;
2863176472Skmacy			eop = 1;
2864176472Skmacy                        rspq->async_notif++;
2865176472Skmacy			goto skip;
2866167514Skmacy		} else if  (flags & F_RSPD_IMM_DATA_VALID) {
2867237263Snp			struct mbuf *m = m_gethdr(M_DONTWAIT, MT_DATA);
2868175200Skmacy
2869237263Snp			if (m == NULL) {
2870176472Skmacy		no_mem:
2871167514Skmacy				rspq->next_holdoff = NOMEM_INTR_DELAY;
2872167514Skmacy				budget_left--;
2873167514Skmacy				break;
2874167514Skmacy			}
2875237263Snp			if (mh->mh_head == NULL)
2876237263Snp				mh->mh_head = m;
2877237263Snp                        else
2878237263Snp				mh->mh_tail->m_next = m;
2879237263Snp			mh->mh_tail = m;
2880237263Snp
2881237263Snp			get_imm_packet(adap, r, m);
2882237263Snp			mh->mh_head->m_pkthdr.len += m->m_len;
2883168491Skmacy			eop = 1;
2884174708Skmacy			rspq->imm_data++;
2885176472Skmacy		} else if (r->len_cq) {
2886167514Skmacy			int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2887172101Skmacy
2888209116Snp			eop = get_packet(adap, drop_thresh, qs, mh, r);
2889194521Skmacy			if (eop) {
2890209116Snp				if (r->rss_hdr.hash_type && !adap->timestamp)
2891209116Snp					mh->mh_head->m_flags |= M_FLOWID;
2892209116Snp				mh->mh_head->m_pkthdr.flowid = rss_hash;
2893194521Skmacy			}
2894194521Skmacy
2895167514Skmacy			ethpad = 2;
2896167514Skmacy		} else {
2897167514Skmacy			rspq->pure_rsps++;
2898167514Skmacy		}
2899176472Skmacy	skip:
2900167514Skmacy		if (flags & RSPD_CTRL_MASK) {
2901167514Skmacy			sleeping |= flags & RSPD_GTS_MASK;
2902167514Skmacy			handle_rsp_cntrl_info(qs, flags);
2903167514Skmacy		}
2904174708Skmacy
2905174708Skmacy		if (!eth && eop) {
2906237263Snp			rspq->offload_pkts++;
2907237263Snp#ifdef TCP_OFFLOAD
2908237263Snp			adap->cpl_handler[opcode](qs, r, mh->mh_head);
2909237263Snp#else
2910237263Snp			m_freem(mh->mh_head);
2911237263Snp#endif
2912209116Snp			mh->mh_head = NULL;
2913174708Skmacy		} else if (eth && eop) {
2914209116Snp			struct mbuf *m = mh->mh_head;
2915167514Skmacy
2916181616Skmacy			t3_rx_eth(adap, rspq, m, ethpad);
2917183559Skmacy
2918183559Skmacy			/*
2919183559Skmacy			 * The T304 sends incoming packets on any qset.  If LRO
2920183559Skmacy			 * is also enabled, we could end up sending packet up
2921183559Skmacy			 * lro_ctrl->ifp's input.  That is incorrect.
2922183559Skmacy			 *
2923183559Skmacy			 * The mbuf's rcvif was derived from the cpl header and
2924183559Skmacy			 * is accurate.  Skip LRO and just use that.
2925183559Skmacy			 */
2926235963Sbz#if defined(INET6) || defined(INET)
2927183559Skmacy			skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2928183559Skmacy
2929205947Snp			if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2930205947Snp			    && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2931205947Snp			    ) {
2932181616Skmacy				/* successfully queue'd for LRO */
2933235963Sbz			} else
2934235963Sbz#endif
2935235963Sbz			{
2936181616Skmacy				/*
2937181616Skmacy				 * LRO not enabled, packet unsuitable for LRO,
2938181616Skmacy				 * or unable to queue.  Pass it up right now in
2939181616Skmacy				 * either case.
2940181616Skmacy				 */
2941181616Skmacy				struct ifnet *ifp = m->m_pkthdr.rcvif;
2942181616Skmacy				(*ifp->if_input)(ifp, m);
2943181616Skmacy			}
2944209116Snp			mh->mh_head = NULL;
2945171469Skmacy
2946167514Skmacy		}
2947237263Snp
2948237263Snp		r++;
2949237263Snp		if (__predict_false(++rspq->cidx == rspq->size)) {
2950237263Snp			rspq->cidx = 0;
2951237263Snp			rspq->gen ^= 1;
2952237263Snp			r = rspq->desc;
2953237263Snp		}
2954237263Snp
2955237263Snp		if (++rspq->credits >= 64) {
2956237263Snp			refill_rspq(adap, rspq, rspq->credits);
2957237263Snp			rspq->credits = 0;
2958237263Snp		}
2959174708Skmacy		__refill_fl_lt(adap, &qs->fl[0], 32);
2960174708Skmacy		__refill_fl_lt(adap, &qs->fl[1], 32);
2961167514Skmacy		--budget_left;
2962167514Skmacy	}
2963167514Skmacy
2964235963Sbz#if defined(INET6) || defined(INET)
2965181616Skmacy	/* Flush LRO */
2966181616Skmacy	while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2967181616Skmacy		struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2968181616Skmacy		SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2969181616Skmacy		tcp_lro_flush(lro_ctrl, queued);
2970181616Skmacy	}
2971205947Snp#endif
2972181616Skmacy
2973167514Skmacy	if (sleeping)
2974167514Skmacy		check_ring_db(adap, qs, sleeping);
2975167514Skmacy
2976194521Skmacy	mb();  /* commit Tx queue processed updates */
2977197043Snp	if (__predict_false(qs->txq_stopped > 1))
2978167514Skmacy		restart_tx(qs);
2979197043Snp
2980174708Skmacy	__refill_fl_lt(adap, &qs->fl[0], 512);
2981174708Skmacy	__refill_fl_lt(adap, &qs->fl[1], 512);
2982167514Skmacy	budget -= budget_left;
2983167514Skmacy	return (budget);
2984167514Skmacy}
2985167514Skmacy
2986167514Skmacy/*
2987167514Skmacy * A helper function that processes responses and issues GTS.
2988167514Skmacy */
2989167514Skmacystatic __inline int
2990167514Skmacyprocess_responses_gts(adapter_t *adap, struct sge_rspq *rq)
2991167514Skmacy{
2992167514Skmacy	int work;
2993167514Skmacy	static int last_holdoff = 0;
2994167514Skmacy
2995167514Skmacy	work = process_responses(adap, rspq_to_qset(rq), -1);
2996167514Skmacy
2997167514Skmacy	if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
2998167514Skmacy		printf("next_holdoff=%d\n", rq->next_holdoff);
2999167514Skmacy		last_holdoff = rq->next_holdoff;
3000167514Skmacy	}
3001175223Skmacy	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3002175223Skmacy	    V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3003175223Skmacy
3004175223Skmacy	return (work);
3005167514Skmacy}
3006167514Skmacy
3007167514Skmacy
3008167514Skmacy/*
3009167514Skmacy * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3010167514Skmacy * Handles data events from SGE response queues as well as error and other
3011167514Skmacy * async events as they all use the same interrupt pin.  We use one SGE
3012167514Skmacy * response queue per port in this mode and protect all response queues with
3013167514Skmacy * queue 0's lock.
3014167514Skmacy */
3015167514Skmacyvoid
3016167514Skmacyt3b_intr(void *data)
3017167514Skmacy{
3018171978Skmacy	uint32_t i, map;
3019167514Skmacy	adapter_t *adap = data;
3020167514Skmacy	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3021167514Skmacy
3022167514Skmacy	t3_write_reg(adap, A_PL_CLI, 0);
3023167514Skmacy	map = t3_read_reg(adap, A_SG_DATA_INTR);
3024167514Skmacy
3025167514Skmacy	if (!map)
3026167514Skmacy		return;
3027167514Skmacy
3028209840Snp	if (__predict_false(map & F_ERRINTR)) {
3029209840Snp		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3030209840Snp		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3031167514Skmacy		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3032209840Snp	}
3033172096Skmacy
3034167514Skmacy	mtx_lock(&q0->lock);
3035171978Skmacy	for_each_port(adap, i)
3036171978Skmacy	    if (map & (1 << i))
3037171978Skmacy			process_responses_gts(adap, &adap->sge.qs[i].rspq);
3038167514Skmacy	mtx_unlock(&q0->lock);
3039167514Skmacy}
3040167514Skmacy
3041167514Skmacy/*
3042167514Skmacy * The MSI interrupt handler.  This needs to handle data events from SGE
3043167514Skmacy * response queues as well as error and other async events as they all use
3044167514Skmacy * the same MSI vector.  We use one SGE response queue per port in this mode
3045167514Skmacy * and protect all response queues with queue 0's lock.
3046167514Skmacy */
3047167514Skmacyvoid
3048167514Skmacyt3_intr_msi(void *data)
3049167514Skmacy{
3050167514Skmacy	adapter_t *adap = data;
3051167514Skmacy	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3052171978Skmacy	int i, new_packets = 0;
3053172096Skmacy
3054167514Skmacy	mtx_lock(&q0->lock);
3055167514Skmacy
3056171978Skmacy	for_each_port(adap, i)
3057171978Skmacy	    if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3058171978Skmacy		    new_packets = 1;
3059167514Skmacy	mtx_unlock(&q0->lock);
3060209840Snp	if (new_packets == 0) {
3061209840Snp		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3062209840Snp		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3063167514Skmacy		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3064209840Snp	}
3065167514Skmacy}
3066167514Skmacy
3067167514Skmacyvoid
3068167514Skmacyt3_intr_msix(void *data)
3069167514Skmacy{
3070167514Skmacy	struct sge_qset *qs = data;
3071167514Skmacy	adapter_t *adap = qs->port->adapter;
3072167514Skmacy	struct sge_rspq *rspq = &qs->rspq;
3073194521Skmacy
3074194521Skmacy	if (process_responses_gts(adap, rspq) == 0)
3075194521Skmacy		rspq->unhandled_irqs++;
3076167514Skmacy}
3077167514Skmacy
3078175200Skmacy#define QDUMP_SBUF_SIZE		32 * 400
3079175209Skmacystatic int
3080175209Skmacyt3_dump_rspq(SYSCTL_HANDLER_ARGS)
3081175209Skmacy{
3082175209Skmacy	struct sge_rspq *rspq;
3083175223Skmacy	struct sge_qset *qs;
3084175209Skmacy	int i, err, dump_end, idx;
3085175209Skmacy	struct sbuf *sb;
3086175209Skmacy	struct rsp_desc *rspd;
3087175223Skmacy	uint32_t data[4];
3088175209Skmacy
3089175209Skmacy	rspq = arg1;
3090175223Skmacy	qs = rspq_to_qset(rspq);
3091175209Skmacy	if (rspq->rspq_dump_count == 0)
3092175209Skmacy		return (0);
3093175209Skmacy	if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3094175209Skmacy		log(LOG_WARNING,
3095175209Skmacy		    "dump count is too large %d\n", rspq->rspq_dump_count);
3096175209Skmacy		rspq->rspq_dump_count = 0;
3097175209Skmacy		return (EINVAL);
3098175209Skmacy	}
3099175209Skmacy	if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3100175209Skmacy		log(LOG_WARNING,
3101175209Skmacy		    "dump start of %d is greater than queue size\n",
3102175209Skmacy		    rspq->rspq_dump_start);
3103175209Skmacy		rspq->rspq_dump_start = 0;
3104175209Skmacy		return (EINVAL);
3105175209Skmacy	}
3106175223Skmacy	err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3107175223Skmacy	if (err)
3108175223Skmacy		return (err);
3109217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3110217916Smdf	if (err)
3111217916Smdf		return (err);
3112212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3113212750Smdf
3114175223Skmacy	sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3115175223Skmacy	    (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3116175223Skmacy	    ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3117175223Skmacy	sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3118175223Skmacy	    ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3119175209Skmacy
3120175223Skmacy	sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3121175209Skmacy	    (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3122175223Skmacy
3123175209Skmacy	dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3124175209Skmacy	for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3125175209Skmacy		idx = i & (RSPQ_Q_SIZE-1);
3126175209Skmacy
3127175209Skmacy		rspd = &rspq->desc[idx];
3128175209Skmacy		sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3129175209Skmacy		    idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3130175209Skmacy		    rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3131175209Skmacy		sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3132175209Skmacy		    rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3133175209Skmacy		    be32toh(rspd->len_cq), rspd->intr_gen);
3134175209Skmacy	}
3135212750Smdf
3136212750Smdf	err = sbuf_finish(sb);
3137212750Smdf	/* Output a trailing NUL. */
3138212750Smdf	if (err == 0)
3139212750Smdf		err = SYSCTL_OUT(req, "", 1);
3140175209Skmacy	sbuf_delete(sb);
3141175209Skmacy	return (err);
3142175209Skmacy}
3143175209Skmacy
3144167514Skmacystatic int
3145176472Skmacyt3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3146175200Skmacy{
3147175200Skmacy	struct sge_txq *txq;
3148175200Skmacy	struct sge_qset *qs;
3149175200Skmacy	int i, j, err, dump_end;
3150175200Skmacy	struct sbuf *sb;
3151175200Skmacy	struct tx_desc *txd;
3152175200Skmacy	uint32_t *WR, wr_hi, wr_lo, gen;
3153175223Skmacy	uint32_t data[4];
3154175200Skmacy
3155175200Skmacy	txq = arg1;
3156175200Skmacy	qs = txq_to_qset(txq, TXQ_ETH);
3157175200Skmacy	if (txq->txq_dump_count == 0) {
3158175200Skmacy		return (0);
3159175200Skmacy	}
3160175200Skmacy	if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3161175200Skmacy		log(LOG_WARNING,
3162175200Skmacy		    "dump count is too large %d\n", txq->txq_dump_count);
3163175200Skmacy		txq->txq_dump_count = 1;
3164175200Skmacy		return (EINVAL);
3165175200Skmacy	}
3166175200Skmacy	if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3167175200Skmacy		log(LOG_WARNING,
3168175200Skmacy		    "dump start of %d is greater than queue size\n",
3169175200Skmacy		    txq->txq_dump_start);
3170175200Skmacy		txq->txq_dump_start = 0;
3171175200Skmacy		return (EINVAL);
3172175200Skmacy	}
3173176472Skmacy	err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3174175223Skmacy	if (err)
3175175223Skmacy		return (err);
3176217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3177217916Smdf	if (err)
3178217916Smdf		return (err);
3179212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3180175223Skmacy
3181175223Skmacy	sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3182175223Skmacy	    (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3183175223Skmacy	    (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3184175223Skmacy	sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3185175223Skmacy	    ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3186175223Skmacy	    ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3187175223Skmacy	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3188175200Skmacy	    txq->txq_dump_start,
3189175200Skmacy	    (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3190175200Skmacy
3191175200Skmacy	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3192175200Skmacy	for (i = txq->txq_dump_start; i < dump_end; i++) {
3193175200Skmacy		txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3194175200Skmacy		WR = (uint32_t *)txd->flit;
3195175200Skmacy		wr_hi = ntohl(WR[0]);
3196175200Skmacy		wr_lo = ntohl(WR[1]);
3197175200Skmacy		gen = G_WR_GEN(wr_lo);
3198175200Skmacy
3199175200Skmacy		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3200175200Skmacy		    wr_hi, wr_lo, gen);
3201175200Skmacy		for (j = 2; j < 30; j += 4)
3202175200Skmacy			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3203175200Skmacy			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3204175200Skmacy
3205175200Skmacy	}
3206212750Smdf	err = sbuf_finish(sb);
3207212750Smdf	/* Output a trailing NUL. */
3208212750Smdf	if (err == 0)
3209212750Smdf		err = SYSCTL_OUT(req, "", 1);
3210175200Skmacy	sbuf_delete(sb);
3211175200Skmacy	return (err);
3212175200Skmacy}
3213175200Skmacy
3214176472Skmacystatic int
3215176472Skmacyt3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3216176472Skmacy{
3217176472Skmacy	struct sge_txq *txq;
3218176472Skmacy	struct sge_qset *qs;
3219176472Skmacy	int i, j, err, dump_end;
3220176472Skmacy	struct sbuf *sb;
3221176472Skmacy	struct tx_desc *txd;
3222176472Skmacy	uint32_t *WR, wr_hi, wr_lo, gen;
3223176472Skmacy
3224176472Skmacy	txq = arg1;
3225176472Skmacy	qs = txq_to_qset(txq, TXQ_CTRL);
3226176472Skmacy	if (txq->txq_dump_count == 0) {
3227176472Skmacy		return (0);
3228176472Skmacy	}
3229176472Skmacy	if (txq->txq_dump_count > 256) {
3230176472Skmacy		log(LOG_WARNING,
3231176472Skmacy		    "dump count is too large %d\n", txq->txq_dump_count);
3232176472Skmacy		txq->txq_dump_count = 1;
3233176472Skmacy		return (EINVAL);
3234176472Skmacy	}
3235176472Skmacy	if (txq->txq_dump_start > 255) {
3236176472Skmacy		log(LOG_WARNING,
3237176472Skmacy		    "dump start of %d is greater than queue size\n",
3238176472Skmacy		    txq->txq_dump_start);
3239176472Skmacy		txq->txq_dump_start = 0;
3240176472Skmacy		return (EINVAL);
3241176472Skmacy	}
3242175200Skmacy
3243217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3244217916Smdf	if (err != 0)
3245217916Smdf		return (err);
3246212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3247176472Skmacy	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3248176472Skmacy	    txq->txq_dump_start,
3249176472Skmacy	    (txq->txq_dump_start + txq->txq_dump_count) & 255);
3250176472Skmacy
3251176472Skmacy	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3252176472Skmacy	for (i = txq->txq_dump_start; i < dump_end; i++) {
3253176472Skmacy		txd = &txq->desc[i & (255)];
3254176472Skmacy		WR = (uint32_t *)txd->flit;
3255176472Skmacy		wr_hi = ntohl(WR[0]);
3256176472Skmacy		wr_lo = ntohl(WR[1]);
3257176472Skmacy		gen = G_WR_GEN(wr_lo);
3258176472Skmacy
3259176472Skmacy		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3260176472Skmacy		    wr_hi, wr_lo, gen);
3261176472Skmacy		for (j = 2; j < 30; j += 4)
3262176472Skmacy			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3263176472Skmacy			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3264176472Skmacy
3265176472Skmacy	}
3266212750Smdf	err = sbuf_finish(sb);
3267212750Smdf	/* Output a trailing NUL. */
3268212750Smdf	if (err == 0)
3269212750Smdf		err = SYSCTL_OUT(req, "", 1);
3270176472Skmacy	sbuf_delete(sb);
3271176472Skmacy	return (err);
3272176472Skmacy}
3273176472Skmacy
3274175200Skmacystatic int
3275180583Skmacyt3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3276167514Skmacy{
3277167514Skmacy	adapter_t *sc = arg1;
3278167514Skmacy	struct qset_params *qsp = &sc->params.sge.qset[0];
3279180583Skmacy	int coalesce_usecs;
3280167514Skmacy	struct sge_qset *qs;
3281167514Skmacy	int i, j, err, nqsets = 0;
3282167514Skmacy	struct mtx *lock;
3283174708Skmacy
3284174708Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0)
3285174708Skmacy		return (ENXIO);
3286174708Skmacy
3287180583Skmacy	coalesce_usecs = qsp->coalesce_usecs;
3288180583Skmacy        err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3289167514Skmacy
3290167514Skmacy	if (err != 0) {
3291167514Skmacy		return (err);
3292167514Skmacy	}
3293180583Skmacy	if (coalesce_usecs == qsp->coalesce_usecs)
3294167514Skmacy		return (0);
3295167514Skmacy
3296167514Skmacy	for (i = 0; i < sc->params.nports; i++)
3297167514Skmacy		for (j = 0; j < sc->port[i].nqsets; j++)
3298167514Skmacy			nqsets++;
3299167514Skmacy
3300180583Skmacy	coalesce_usecs = max(1, coalesce_usecs);
3301167514Skmacy
3302167514Skmacy	for (i = 0; i < nqsets; i++) {
3303167514Skmacy		qs = &sc->sge.qs[i];
3304167514Skmacy		qsp = &sc->params.sge.qset[i];
3305180583Skmacy		qsp->coalesce_usecs = coalesce_usecs;
3306167514Skmacy
3307167514Skmacy		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3308167514Skmacy			    &sc->sge.qs[0].rspq.lock;
3309167514Skmacy
3310167514Skmacy		mtx_lock(lock);
3311167514Skmacy		t3_update_qset_coalesce(qs, qsp);
3312167514Skmacy		t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3313167514Skmacy		    V_NEWTIMER(qs->rspq.holdoff_tmr));
3314167514Skmacy		mtx_unlock(lock);
3315167514Skmacy	}
3316167514Skmacy
3317167514Skmacy	return (0);
3318167514Skmacy}
3319167514Skmacy
3320209116Snpstatic int
3321209116Snpt3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3322209116Snp{
3323209116Snp	adapter_t *sc = arg1;
3324209116Snp	int rc, timestamp;
3325167514Skmacy
3326209116Snp	if ((sc->flags & FULL_INIT_DONE) == 0)
3327209116Snp		return (ENXIO);
3328209116Snp
3329209116Snp	timestamp = sc->timestamp;
3330209116Snp	rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3331209116Snp
3332209116Snp	if (rc != 0)
3333209116Snp		return (rc);
3334209116Snp
3335209116Snp	if (timestamp != sc->timestamp) {
3336209116Snp		t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3337209116Snp		    timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3338209116Snp		sc->timestamp = timestamp;
3339209116Snp	}
3340209116Snp
3341209116Snp	return (0);
3342209116Snp}
3343209116Snp
3344167514Skmacyvoid
3345174708Skmacyt3_add_attach_sysctls(adapter_t *sc)
3346167514Skmacy{
3347167514Skmacy	struct sysctl_ctx_list *ctx;
3348167514Skmacy	struct sysctl_oid_list *children;
3349174708Skmacy
3350167514Skmacy	ctx = device_get_sysctl_ctx(sc->dev);
3351167514Skmacy	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3352167514Skmacy
3353167514Skmacy	/* random information */
3354167514Skmacy	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3355167514Skmacy	    "firmware_version",
3356167514Skmacy	    CTLFLAG_RD, &sc->fw_version,
3357167514Skmacy	    0, "firmware version");
3358217321Smdf	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3359176472Skmacy	    "hw_revision",
3360176472Skmacy	    CTLFLAG_RD, &sc->params.rev,
3361176472Skmacy	    0, "chip model");
3362192540Sgnn	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3363192540Sgnn	    "port_types",
3364192540Sgnn	    CTLFLAG_RD, &sc->port_types,
3365192540Sgnn	    0, "type of ports");
3366176472Skmacy	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3367167514Skmacy	    "enable_debug",
3368167514Skmacy	    CTLFLAG_RW, &cxgb_debug,
3369167514Skmacy	    0, "enable verbose debugging output");
3370217321Smdf	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3371174708Skmacy	    CTLFLAG_RD, &sc->tunq_coalesce,
3372174708Skmacy	    "#tunneled packets freed");
3373168737Skmacy	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3374171335Skmacy	    "txq_overrun",
3375171335Skmacy	    CTLFLAG_RD, &txq_fills,
3376171335Skmacy	    0, "#times txq overrun");
3377217321Smdf	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3378209116Snp	    "core_clock",
3379209116Snp	    CTLFLAG_RD, &sc->params.vpd.cclk,
3380209116Snp	    0, "core clock frequency (in KHz)");
3381167514Skmacy}
3382167514Skmacy
3383175209Skmacy
3384175209Skmacystatic const char *rspq_name = "rspq";
3385175209Skmacystatic const char *txq_names[] =
3386175209Skmacy{
3387175209Skmacy	"txq_eth",
3388175209Skmacy	"txq_ofld",
3389175209Skmacy	"txq_ctrl"
3390181652Skmacy};
3391175209Skmacy
3392181652Skmacystatic int
3393181652Skmacysysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3394181652Skmacy{
3395181652Skmacy	struct port_info *p = arg1;
3396181652Skmacy	uint64_t *parg;
3397181652Skmacy
3398181652Skmacy	if (!p)
3399181652Skmacy		return (EINVAL);
3400181652Skmacy
3401181652Skmacy	parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3402181652Skmacy	PORT_LOCK(p);
3403181652Skmacy	t3_mac_update_stats(&p->mac);
3404181652Skmacy	PORT_UNLOCK(p);
3405181652Skmacy
3406217616Smdf	return (sysctl_handle_64(oidp, parg, 0, req));
3407181652Skmacy}
3408181652Skmacy
3409174708Skmacyvoid
3410174708Skmacyt3_add_configured_sysctls(adapter_t *sc)
3411174708Skmacy{
3412174708Skmacy	struct sysctl_ctx_list *ctx;
3413174708Skmacy	struct sysctl_oid_list *children;
3414174708Skmacy	int i, j;
3415174708Skmacy
3416174708Skmacy	ctx = device_get_sysctl_ctx(sc->dev);
3417174708Skmacy	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3418174708Skmacy
3419174708Skmacy	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3420174708Skmacy	    "intr_coal",
3421174708Skmacy	    CTLTYPE_INT|CTLFLAG_RW, sc,
3422180583Skmacy	    0, t3_set_coalesce_usecs,
3423180583Skmacy	    "I", "interrupt coalescing timer (us)");
3424174708Skmacy
3425209116Snp	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3426209116Snp	    "pkt_timestamp",
3427209116Snp	    CTLTYPE_INT | CTLFLAG_RW, sc,
3428209116Snp	    0, t3_pkt_timestamp,
3429209116Snp	    "I", "provide packet timestamp instead of connection hash");
3430209116Snp
3431174708Skmacy	for (i = 0; i < sc->params.nports; i++) {
3432174708Skmacy		struct port_info *pi = &sc->port[i];
3433174708Skmacy		struct sysctl_oid *poid;
3434174708Skmacy		struct sysctl_oid_list *poidlist;
3435181652Skmacy		struct mac_stats *mstats = &pi->mac.stats;
3436174708Skmacy
3437174708Skmacy		snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3438174708Skmacy		poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3439174708Skmacy		    pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3440174708Skmacy		poidlist = SYSCTL_CHILDREN(poid);
3441217321Smdf		SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3442174708Skmacy		    "nqsets", CTLFLAG_RD, &pi->nqsets,
3443174708Skmacy		    0, "#queue sets");
3444181652Skmacy
3445174708Skmacy		for (j = 0; j < pi->nqsets; j++) {
3446174708Skmacy			struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3447189643Sgnn			struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3448189643Sgnn					  *ctrlqpoid, *lropoid;
3449189643Sgnn			struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3450189643Sgnn					       *txqpoidlist, *ctrlqpoidlist,
3451189643Sgnn					       *lropoidlist;
3452174708Skmacy			struct sge_txq *txq = &qs->txq[TXQ_ETH];
3453174708Skmacy
3454174708Skmacy			snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3455174708Skmacy
3456174708Skmacy			qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3457174708Skmacy			    qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3458174708Skmacy			qspoidlist = SYSCTL_CHILDREN(qspoid);
3459189643Sgnn
3460189643Sgnn			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3461189643Sgnn					CTLFLAG_RD, &qs->fl[0].empty, 0,
3462189643Sgnn					"freelist #0 empty");
3463189643Sgnn			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3464189643Sgnn					CTLFLAG_RD, &qs->fl[1].empty, 0,
3465189643Sgnn					"freelist #1 empty");
3466189643Sgnn
3467175209Skmacy			rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3468175209Skmacy			    rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3469175209Skmacy			rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3470175209Skmacy
3471175209Skmacy			txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3472175209Skmacy			    txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3473175209Skmacy			txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3474175209Skmacy
3475176472Skmacy			ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3476176472Skmacy			    txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3477176472Skmacy			ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3478176472Skmacy
3479181652Skmacy			lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3480181652Skmacy			    "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3481181652Skmacy			lropoidlist = SYSCTL_CHILDREN(lropoid);
3482181652Skmacy
3483175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3484175209Skmacy			    CTLFLAG_RD, &qs->rspq.size,
3485175209Skmacy			    0, "#entries in response queue");
3486175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3487175209Skmacy			    CTLFLAG_RD, &qs->rspq.cidx,
3488175209Skmacy			    0, "consumer index");
3489175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3490175209Skmacy			    CTLFLAG_RD, &qs->rspq.credits,
3491175209Skmacy			    0, "#credits");
3492206109Snp			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3493206109Snp			    CTLFLAG_RD, &qs->rspq.starved,
3494206109Snp			    0, "#times starved");
3495217586Smdf			SYSCTL_ADD_ULONG(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3496175209Skmacy			    CTLFLAG_RD, &qs->rspq.phys_addr,
3497175209Skmacy			    "physical_address_of the queue");
3498175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3499175209Skmacy			    CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3500175209Skmacy			    0, "start rspq dump entry");
3501175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3502175209Skmacy			    CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3503175209Skmacy			    0, "#rspq entries to dump");
3504175209Skmacy			SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3505175209Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3506175209Skmacy			    0, t3_dump_rspq, "A", "dump of the response queue");
3507175209Skmacy
3508217321Smdf			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3509205948Snp			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3510205948Snp			    "#tunneled packets dropped");
3511217321Smdf			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3512174708Skmacy			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3513174708Skmacy			    0, "#tunneled packets waiting to be sent");
3514185162Skmacy#if 0
3515175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3516174708Skmacy			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3517174708Skmacy			    0, "#tunneled packets queue producer index");
3518175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3519174708Skmacy			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3520174708Skmacy			    0, "#tunneled packets queue consumer index");
3521185162Skmacy#endif
3522217321Smdf			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3523174708Skmacy			    CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3524174708Skmacy			    0, "#tunneled packets processed by the card");
3525175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3526174708Skmacy			    CTLFLAG_RD, &txq->cleaned,
3527174708Skmacy			    0, "#tunneled packets cleaned");
3528175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3529174708Skmacy			    CTLFLAG_RD, &txq->in_use,
3530174708Skmacy			    0, "#tunneled packet slots in use");
3531175209Skmacy			SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "frees",
3532174708Skmacy			    CTLFLAG_RD, &txq->txq_frees,
3533174708Skmacy			    "#tunneled packets freed");
3534175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3535174708Skmacy			    CTLFLAG_RD, &txq->txq_skipped,
3536174708Skmacy			    0, "#tunneled packet descriptors skipped");
3537217321Smdf			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3538174708Skmacy			    CTLFLAG_RD, &txq->txq_coalesced,
3539194521Skmacy			    "#tunneled packets coalesced");
3540175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3541174708Skmacy			    CTLFLAG_RD, &txq->txq_enqueued,
3542174708Skmacy			    0, "#tunneled packets enqueued to hardware");
3543175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3544174708Skmacy			    CTLFLAG_RD, &qs->txq_stopped,
3545174708Skmacy			    0, "tx queues stopped");
3546217586Smdf			SYSCTL_ADD_ULONG(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3547175200Skmacy			    CTLFLAG_RD, &txq->phys_addr,
3548175200Skmacy			    "physical_address_of the queue");
3549175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3550175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3551175200Skmacy			    0, "txq generation");
3552175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3553175200Skmacy			    CTLFLAG_RD, &txq->cidx,
3554175200Skmacy			    0, "hardware queue cidx");
3555175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3556175200Skmacy			    CTLFLAG_RD, &txq->pidx,
3557175200Skmacy			    0, "hardware queue pidx");
3558175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3559175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3560175200Skmacy			    0, "txq start idx for dump");
3561175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3562175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3563175200Skmacy			    0, "txq #entries to dump");
3564175209Skmacy			SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3565175200Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3566176472Skmacy			    0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3567176472Skmacy
3568176472Skmacy			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3569176472Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3570176472Skmacy			    0, "ctrlq start idx for dump");
3571176472Skmacy			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3572176472Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3573176472Skmacy			    0, "ctrl #entries to dump");
3574176472Skmacy			SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3575176472Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3576176472Skmacy			    0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3577176472Skmacy
3578181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3579181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3580181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3581181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3582181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3583181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3584181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3585181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3586181652Skmacy		}
3587176472Skmacy
3588181652Skmacy		/* Now add a node for mac stats. */
3589181652Skmacy		poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3590181652Skmacy		    CTLFLAG_RD, NULL, "MAC statistics");
3591181652Skmacy		poidlist = SYSCTL_CHILDREN(poid);
3592176472Skmacy
3593181652Skmacy		/*
3594181652Skmacy		 * We (ab)use the length argument (arg2) to pass on the offset
3595181652Skmacy		 * of the data that we are interested in.  This is only required
3596181652Skmacy		 * for the quad counters that are updated from the hardware (we
3597181652Skmacy		 * make sure that we return the latest value).
3598181652Skmacy		 * sysctl_handle_macstat first updates *all* the counters from
3599181652Skmacy		 * the hardware, and then returns the latest value of the
3600181652Skmacy		 * requested counter.  Best would be to update only the
3601181652Skmacy		 * requested counter from hardware, but t3_mac_update_stats()
3602181652Skmacy		 * hides all the register details and we don't want to dive into
3603181652Skmacy		 * all that here.
3604181652Skmacy		 */
3605181652Skmacy#define CXGB_SYSCTL_ADD_QUAD(a)	SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3606217616Smdf    (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3607181652Skmacy    sysctl_handle_macstat, "QU", 0)
3608181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_octets);
3609181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3610181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames);
3611181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3612181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3613181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_pause);
3614181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3615181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3616181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3617181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3618181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3619181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3620181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3621181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3622181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3623181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3624181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3625181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3626181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3627181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3628181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3629181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3630181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_octets);
3631181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3632181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames);
3633181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3634181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3635181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_pause);
3636193925Sgnn		CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3637181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3638181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3639181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3640181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3641181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_runt);
3642181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3643181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_short);
3644181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3645181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3646181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3647181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3648181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3649181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3650181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3651181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3652181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3653181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3654181652Skmacy#undef CXGB_SYSCTL_ADD_QUAD
3655181652Skmacy
3656181652Skmacy#define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3657181652Skmacy    CTLFLAG_RD, &mstats->a, 0)
3658181652Skmacy		CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3659181652Skmacy		CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3660181652Skmacy		CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3661181652Skmacy		CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3662181652Skmacy		CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3663181652Skmacy		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3664181652Skmacy		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3665181652Skmacy		CXGB_SYSCTL_ADD_ULONG(num_toggled);
3666181652Skmacy		CXGB_SYSCTL_ADD_ULONG(num_resets);
3667192540Sgnn		CXGB_SYSCTL_ADD_ULONG(link_faults);
3668181652Skmacy#undef CXGB_SYSCTL_ADD_ULONG
3669174708Skmacy	}
3670174708Skmacy}
3671174708Skmacy
3672167514Skmacy/**
3673167514Skmacy *	t3_get_desc - dump an SGE descriptor for debugging purposes
3674167514Skmacy *	@qs: the queue set
3675167514Skmacy *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3676167514Skmacy *	@idx: the descriptor index in the queue
3677167514Skmacy *	@data: where to dump the descriptor contents
3678167514Skmacy *
3679167514Skmacy *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
3680167514Skmacy *	size of the descriptor.
3681167514Skmacy */
3682167514Skmacyint
3683167514Skmacyt3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3684167514Skmacy		unsigned char *data)
3685167514Skmacy{
3686167514Skmacy	if (qnum >= 6)
3687167514Skmacy		return (EINVAL);
3688167514Skmacy
3689167514Skmacy	if (qnum < 3) {
3690167514Skmacy		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3691167514Skmacy			return -EINVAL;
3692167514Skmacy		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3693167514Skmacy		return sizeof(struct tx_desc);
3694167514Skmacy	}
3695167514Skmacy
3696167514Skmacy	if (qnum == 3) {
3697167514Skmacy		if (!qs->rspq.desc || idx >= qs->rspq.size)
3698167514Skmacy			return (EINVAL);
3699167514Skmacy		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3700167514Skmacy		return sizeof(struct rsp_desc);
3701167514Skmacy	}
3702167514Skmacy
3703167514Skmacy	qnum -= 4;
3704167514Skmacy	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3705167514Skmacy		return (EINVAL);
3706167514Skmacy	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3707167514Skmacy	return sizeof(struct rx_desc);
3708167514Skmacy}
3709