1167514Skmacy/**************************************************************************
2167514Skmacy
3189643SgnnCopyright (c) 2007-2009, Chelsio Inc.
4167514SkmacyAll rights reserved.
5167514Skmacy
6167514SkmacyRedistribution and use in source and binary forms, with or without
7167514Skmacymodification, are permitted provided that the following conditions are met:
8167514Skmacy
9167514Skmacy 1. Redistributions of source code must retain the above copyright notice,
10167514Skmacy    this list of conditions and the following disclaimer.
11167514Skmacy
12169978Skmacy 2. Neither the name of the Chelsio Corporation nor the names of its
13167514Skmacy    contributors may be used to endorse or promote products derived from
14167514Skmacy    this software without specific prior written permission.
15169978Skmacy
16167514SkmacyTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17167514SkmacyAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18167514SkmacyIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19167514SkmacyARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20167514SkmacyLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21167514SkmacyCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22167514SkmacySUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23167514SkmacyINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24167514SkmacyCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25167514SkmacyARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26167514SkmacyPOSSIBILITY OF SUCH DAMAGE.
27167514Skmacy
28167514Skmacy***************************************************************************/
29167514Skmacy
30167514Skmacy#include <sys/cdefs.h>
31167514Skmacy__FBSDID("$FreeBSD: stable/10/sys/dev/cxgb/cxgb_sge.c 314667 2017-03-04 13:03:31Z avg $");
32167514Skmacy
33235963Sbz#include "opt_inet6.h"
34205947Snp#include "opt_inet.h"
35205947Snp
36167514Skmacy#include <sys/param.h>
37167514Skmacy#include <sys/systm.h>
38167514Skmacy#include <sys/kernel.h>
39167514Skmacy#include <sys/module.h>
40167514Skmacy#include <sys/bus.h>
41167514Skmacy#include <sys/conf.h>
42167514Skmacy#include <machine/bus.h>
43167514Skmacy#include <machine/resource.h>
44167514Skmacy#include <sys/bus_dma.h>
45167514Skmacy#include <sys/rman.h>
46167514Skmacy#include <sys/queue.h>
47167514Skmacy#include <sys/sysctl.h>
48167514Skmacy#include <sys/taskqueue.h>
49167514Skmacy
50167514Skmacy#include <sys/proc.h>
51175200Skmacy#include <sys/sbuf.h>
52167514Skmacy#include <sys/sched.h>
53167514Skmacy#include <sys/smp.h>
54167760Skmacy#include <sys/systm.h>
55174708Skmacy#include <sys/syslog.h>
56204348Snp#include <sys/socket.h>
57237263Snp#include <sys/sglist.h>
58167514Skmacy
59194521Skmacy#include <net/bpf.h>
60204348Snp#include <net/ethernet.h>
61204348Snp#include <net/if.h>
62204348Snp#include <net/if_vlan_var.h>
63194521Skmacy
64167514Skmacy#include <netinet/in_systm.h>
65167514Skmacy#include <netinet/in.h>
66167514Skmacy#include <netinet/ip.h>
67231317Snp#include <netinet/ip6.h>
68167514Skmacy#include <netinet/tcp.h>
69167514Skmacy
70167514Skmacy#include <dev/pci/pcireg.h>
71167514Skmacy#include <dev/pci/pcivar.h>
72167514Skmacy
73174670Skmacy#include <vm/vm.h>
74174708Skmacy#include <vm/pmap.h>
75174672Skmacy
76170076Skmacy#include <cxgb_include.h>
77174670Skmacy#include <sys/mvec.h>
78168491Skmacy
79194521Skmacyint	txq_fills = 0;
80194521Skmacyint	multiq_tx_enable = 1;
81194521Skmacy
82237263Snp#ifdef TCP_OFFLOAD
83237263SnpCTASSERT(NUM_CPL_HANDLERS >= NUM_CPL_CMDS);
84237263Snp#endif
85237263Snp
86194521Skmacyextern struct sysctl_oid_list sysctl__hw_cxgb_children;
87194521Skmacyint cxgb_txq_buf_ring_size = TX_ETH_Q_SIZE;
88194521SkmacyTUNABLE_INT("hw.cxgb.txq_mr_size", &cxgb_txq_buf_ring_size);
89217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, txq_mr_size, CTLFLAG_RDTUN, &cxgb_txq_buf_ring_size, 0,
90194521Skmacy    "size of per-queue mbuf ring");
91194521Skmacy
92194521Skmacystatic int cxgb_tx_coalesce_force = 0;
93194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_force", &cxgb_tx_coalesce_force);
94217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_force, CTLFLAG_RW,
95194521Skmacy    &cxgb_tx_coalesce_force, 0,
96194521Skmacy    "coalesce small packets into a single work request regardless of ring state");
97194521Skmacy
98194521Skmacy#define	COALESCE_START_DEFAULT		TX_ETH_Q_SIZE>>1
99194521Skmacy#define	COALESCE_START_MAX		(TX_ETH_Q_SIZE-(TX_ETH_Q_SIZE>>3))
100194521Skmacy#define	COALESCE_STOP_DEFAULT		TX_ETH_Q_SIZE>>2
101194521Skmacy#define	COALESCE_STOP_MIN		TX_ETH_Q_SIZE>>5
102194521Skmacy#define	TX_RECLAIM_DEFAULT		TX_ETH_Q_SIZE>>5
103194521Skmacy#define	TX_RECLAIM_MAX			TX_ETH_Q_SIZE>>2
104194521Skmacy#define	TX_RECLAIM_MIN			TX_ETH_Q_SIZE>>6
105194521Skmacy
106194521Skmacy
107194521Skmacystatic int cxgb_tx_coalesce_enable_start = COALESCE_START_DEFAULT;
108194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_enable_start",
109194521Skmacy    &cxgb_tx_coalesce_enable_start);
110217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_start, CTLFLAG_RW,
111194521Skmacy    &cxgb_tx_coalesce_enable_start, 0,
112194521Skmacy    "coalesce enable threshold");
113194521Skmacystatic int cxgb_tx_coalesce_enable_stop = COALESCE_STOP_DEFAULT;
114194521SkmacyTUNABLE_INT("hw.cxgb.tx_coalesce_enable_stop", &cxgb_tx_coalesce_enable_stop);
115217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_coalesce_enable_stop, CTLFLAG_RW,
116194521Skmacy    &cxgb_tx_coalesce_enable_stop, 0,
117194521Skmacy    "coalesce disable threshold");
118194521Skmacystatic int cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
119194521SkmacyTUNABLE_INT("hw.cxgb.tx_reclaim_threshold", &cxgb_tx_reclaim_threshold);
120217321SmdfSYSCTL_INT(_hw_cxgb, OID_AUTO, tx_reclaim_threshold, CTLFLAG_RW,
121194521Skmacy    &cxgb_tx_reclaim_threshold, 0,
122194521Skmacy    "tx cleaning minimum threshold");
123194521Skmacy
124176472Skmacy/*
125176472Skmacy * XXX don't re-enable this until TOE stops assuming
126176472Skmacy * we have an m_ext
127176472Skmacy */
128176472Skmacystatic int recycle_enable = 0;
129177464Skmacy
130175200Skmacyextern int cxgb_use_16k_clusters;
131205950Snpextern int nmbjumbop;
132177464Skmacyextern int nmbjumbo9;
133177464Skmacyextern int nmbjumbo16;
134168737Skmacy
135167514Skmacy#define USE_GTS 0
136167514Skmacy
137167514Skmacy#define SGE_RX_SM_BUF_SIZE	1536
138167514Skmacy#define SGE_RX_DROP_THRES	16
139169978Skmacy#define SGE_RX_COPY_THRES	128
140167514Skmacy
141167514Skmacy/*
142167514Skmacy * Period of the Tx buffer reclaim timer.  This timer does not need to run
143167514Skmacy * frequently as Tx buffers are usually reclaimed by new Tx packets.
144167514Skmacy */
145170038Skmacy#define TX_RECLAIM_PERIOD       (hz >> 1)
146167514Skmacy
147167514Skmacy/*
148167514Skmacy * Values for sge_txq.flags
149167514Skmacy */
150167514Skmacyenum {
151167514Skmacy	TXQ_RUNNING	= 1 << 0,  /* fetch engine is running */
152167514Skmacy	TXQ_LAST_PKT_DB = 1 << 1,  /* last packet rang the doorbell */
153167514Skmacy};
154167514Skmacy
155167514Skmacystruct tx_desc {
156167514Skmacy	uint64_t	flit[TX_DESC_FLITS];
157167514Skmacy} __packed;
158167514Skmacy
159167514Skmacystruct rx_desc {
160167514Skmacy	uint32_t	addr_lo;
161167514Skmacy	uint32_t	len_gen;
162167514Skmacy	uint32_t	gen2;
163167514Skmacy	uint32_t	addr_hi;
164201758Smbr} __packed;
165167514Skmacy
166167514Skmacystruct rsp_desc {               /* response queue descriptor */
167167514Skmacy	struct rss_header	rss_hdr;
168167514Skmacy	uint32_t		flags;
169167514Skmacy	uint32_t		len_cq;
170167514Skmacy	uint8_t			imm_data[47];
171167514Skmacy	uint8_t			intr_gen;
172167514Skmacy} __packed;
173167514Skmacy
174167514Skmacy#define RX_SW_DESC_MAP_CREATED	(1 << 0)
175168737Skmacy#define TX_SW_DESC_MAP_CREATED	(1 << 1)
176167514Skmacy#define RX_SW_DESC_INUSE        (1 << 3)
177167514Skmacy#define TX_SW_DESC_MAPPED       (1 << 4)
178167514Skmacy
179167514Skmacy#define RSPQ_NSOP_NEOP           G_RSPD_SOP_EOP(0)
180167514Skmacy#define RSPQ_EOP                 G_RSPD_SOP_EOP(F_RSPD_EOP)
181167514Skmacy#define RSPQ_SOP                 G_RSPD_SOP_EOP(F_RSPD_SOP)
182167514Skmacy#define RSPQ_SOP_EOP             G_RSPD_SOP_EOP(F_RSPD_SOP|F_RSPD_EOP)
183167514Skmacy
184167514Skmacystruct tx_sw_desc {                /* SW state per Tx descriptor */
185194521Skmacy	struct mbuf	*m;
186167514Skmacy	bus_dmamap_t	map;
187167514Skmacy	int		flags;
188167514Skmacy};
189167514Skmacy
190167514Skmacystruct rx_sw_desc {                /* SW state per Rx descriptor */
191194521Skmacy	caddr_t		rxsd_cl;
192194521Skmacy	struct mbuf	*m;
193194521Skmacy	bus_dmamap_t	map;
194194521Skmacy	int		flags;
195167514Skmacy};
196167514Skmacy
197167514Skmacystruct txq_state {
198194521Skmacy	unsigned int	compl;
199194521Skmacy	unsigned int	gen;
200194521Skmacy	unsigned int	pidx;
201167514Skmacy};
202167514Skmacy
203168351Skmacystruct refill_fl_cb_arg {
204168351Skmacy	int               error;
205168351Skmacy	bus_dma_segment_t seg;
206168351Skmacy	int               nseg;
207168351Skmacy};
208168351Skmacy
209194521Skmacy
210167514Skmacy/*
211167514Skmacy * Maps a number of flits to the number of Tx descriptors that can hold them.
212167514Skmacy * The formula is
213167514Skmacy *
214167514Skmacy * desc = 1 + (flits - 2) / (WR_FLITS - 1).
215167514Skmacy *
216167514Skmacy * HW allows up to 4 descriptors to be combined into a WR.
217167514Skmacy */
218167514Skmacystatic uint8_t flit_desc_map[] = {
219167514Skmacy	0,
220167514Skmacy#if SGE_NUM_GENBITS == 1
221167514Skmacy	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
222167514Skmacy	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
223167514Skmacy	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
224167514Skmacy	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4
225167514Skmacy#elif SGE_NUM_GENBITS == 2
226167514Skmacy	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
227167514Skmacy	2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
228167514Skmacy	3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
229167514Skmacy	4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
230167514Skmacy#else
231167514Skmacy# error "SGE_NUM_GENBITS must be 1 or 2"
232167514Skmacy#endif
233167514Skmacy};
234167514Skmacy
235194521Skmacy#define	TXQ_LOCK_ASSERT(qs)	mtx_assert(&(qs)->lock, MA_OWNED)
236194521Skmacy#define	TXQ_TRYLOCK(qs)		mtx_trylock(&(qs)->lock)
237194521Skmacy#define	TXQ_LOCK(qs)		mtx_lock(&(qs)->lock)
238194521Skmacy#define	TXQ_UNLOCK(qs)		mtx_unlock(&(qs)->lock)
239194521Skmacy#define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
240203834Smlaier#define	TXQ_RING_NEEDS_ENQUEUE(qs)					\
241203834Smlaier	drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
242194521Skmacy#define	TXQ_RING_FLUSH(qs)	drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
243194521Skmacy#define	TXQ_RING_DEQUEUE_COND(qs, func, arg)				\
244194521Skmacy	drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
245194521Skmacy#define	TXQ_RING_DEQUEUE(qs) \
246194521Skmacy	drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
247167514Skmacy
248167514Skmacyint cxgb_debug = 0;
249167514Skmacy
250167514Skmacystatic void sge_timer_cb(void *arg);
251167514Skmacystatic void sge_timer_reclaim(void *arg, int ncount);
252171335Skmacystatic void sge_txq_reclaim_handler(void *arg, int ncount);
253194521Skmacystatic void cxgb_start_locked(struct sge_qset *qs);
254167514Skmacy
255194521Skmacy/*
256194521Skmacy * XXX need to cope with bursty scheduling by looking at a wider
257194521Skmacy * window than we are now for determining the need for coalescing
258194521Skmacy *
259194521Skmacy */
260194521Skmacystatic __inline uint64_t
261194521Skmacycheck_pkt_coalesce(struct sge_qset *qs)
262194521Skmacy{
263194521Skmacy        struct adapter *sc;
264194521Skmacy        struct sge_txq *txq;
265194521Skmacy	uint8_t *fill;
266194521Skmacy
267194521Skmacy	if (__predict_false(cxgb_tx_coalesce_force))
268194521Skmacy		return (1);
269194521Skmacy	txq = &qs->txq[TXQ_ETH];
270194521Skmacy        sc = qs->port->adapter;
271194521Skmacy	fill = &sc->tunq_fill[qs->idx];
272194521Skmacy
273194521Skmacy	if (cxgb_tx_coalesce_enable_start > COALESCE_START_MAX)
274194521Skmacy		cxgb_tx_coalesce_enable_start = COALESCE_START_MAX;
275194521Skmacy	if (cxgb_tx_coalesce_enable_stop < COALESCE_STOP_MIN)
276194521Skmacy		cxgb_tx_coalesce_enable_start = COALESCE_STOP_MIN;
277194521Skmacy	/*
278194521Skmacy	 * if the hardware transmit queue is more than 1/8 full
279194521Skmacy	 * we mark it as coalescing - we drop back from coalescing
280194521Skmacy	 * when we go below 1/32 full and there are no packets enqueued,
281194521Skmacy	 * this provides us with some degree of hysteresis
282194521Skmacy	 */
283194521Skmacy        if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
284194521Skmacy	    TXQ_RING_EMPTY(qs) && (qs->coalescing == 0))
285194521Skmacy                *fill = 0;
286194521Skmacy        else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
287194521Skmacy                *fill = 1;
288194521Skmacy
289194521Skmacy	return (sc->tunq_coalesce);
290194521Skmacy}
291194521Skmacy
292194521Skmacy#ifdef __LP64__
293194521Skmacystatic void
294194521Skmacyset_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
295194521Skmacy{
296194521Skmacy	uint64_t wr_hilo;
297194521Skmacy#if _BYTE_ORDER == _LITTLE_ENDIAN
298194521Skmacy	wr_hilo = wr_hi;
299194521Skmacy	wr_hilo |= (((uint64_t)wr_lo)<<32);
300194521Skmacy#else
301194521Skmacy	wr_hilo = wr_lo;
302194521Skmacy	wr_hilo |= (((uint64_t)wr_hi)<<32);
303194521Skmacy#endif
304194521Skmacy	wrp->wrh_hilo = wr_hilo;
305194521Skmacy}
306194521Skmacy#else
307194521Skmacystatic void
308194521Skmacyset_wr_hdr(struct work_request_hdr *wrp, uint32_t wr_hi, uint32_t wr_lo)
309194521Skmacy{
310194521Skmacy
311194521Skmacy	wrp->wrh_hi = wr_hi;
312194521Skmacy	wmb();
313194521Skmacy	wrp->wrh_lo = wr_lo;
314194521Skmacy}
315194521Skmacy#endif
316194521Skmacy
317194521Skmacystruct coalesce_info {
318194521Skmacy	int count;
319194521Skmacy	int nbytes;
320194521Skmacy};
321194521Skmacy
322194521Skmacystatic int
323194521Skmacycoalesce_check(struct mbuf *m, void *arg)
324194521Skmacy{
325194521Skmacy	struct coalesce_info *ci = arg;
326194521Skmacy	int *count = &ci->count;
327194521Skmacy	int *nbytes = &ci->nbytes;
328194521Skmacy
329194521Skmacy	if ((*nbytes == 0) || ((*nbytes + m->m_len <= 10500) &&
330194521Skmacy		(*count < 7) && (m->m_next == NULL))) {
331194521Skmacy		*count += 1;
332194521Skmacy		*nbytes += m->m_len;
333194521Skmacy		return (1);
334194521Skmacy	}
335194521Skmacy	return (0);
336194521Skmacy}
337194521Skmacy
338194521Skmacystatic struct mbuf *
339194521Skmacycxgb_dequeue(struct sge_qset *qs)
340194521Skmacy{
341194521Skmacy	struct mbuf *m, *m_head, *m_tail;
342194521Skmacy	struct coalesce_info ci;
343194521Skmacy
344194521Skmacy
345194521Skmacy	if (check_pkt_coalesce(qs) == 0)
346194521Skmacy		return TXQ_RING_DEQUEUE(qs);
347194521Skmacy
348194521Skmacy	m_head = m_tail = NULL;
349194521Skmacy	ci.count = ci.nbytes = 0;
350194521Skmacy	do {
351194521Skmacy		m = TXQ_RING_DEQUEUE_COND(qs, coalesce_check, &ci);
352194521Skmacy		if (m_head == NULL) {
353194521Skmacy			m_tail = m_head = m;
354194521Skmacy		} else if (m != NULL) {
355194521Skmacy			m_tail->m_nextpkt = m;
356194521Skmacy			m_tail = m;
357194521Skmacy		}
358194521Skmacy	} while (m != NULL);
359194521Skmacy	if (ci.count > 7)
360194521Skmacy		panic("trying to coalesce %d packets in to one WR", ci.count);
361194521Skmacy	return (m_head);
362194521Skmacy}
363194521Skmacy
364167514Skmacy/**
365167514Skmacy *	reclaim_completed_tx - reclaims completed Tx descriptors
366167514Skmacy *	@adapter: the adapter
367167514Skmacy *	@q: the Tx queue to reclaim completed descriptors from
368167514Skmacy *
369167514Skmacy *	Reclaims Tx descriptors that the SGE has indicated it has processed,
370167514Skmacy *	and frees the associated buffers if possible.  Called with the Tx
371167514Skmacy *	queue's lock held.
372167514Skmacy */
373167514Skmacystatic __inline int
374194521Skmacyreclaim_completed_tx(struct sge_qset *qs, int reclaim_min, int queue)
375167514Skmacy{
376194521Skmacy	struct sge_txq *q = &qs->txq[queue];
377174708Skmacy	int reclaim = desc_reclaimable(q);
378167514Skmacy
379194521Skmacy	if ((cxgb_tx_reclaim_threshold > TX_RECLAIM_MAX) ||
380194521Skmacy	    (cxgb_tx_reclaim_threshold < TX_RECLAIM_MIN))
381194521Skmacy		cxgb_tx_reclaim_threshold = TX_RECLAIM_DEFAULT;
382194521Skmacy
383175224Skmacy	if (reclaim < reclaim_min)
384175224Skmacy		return (0);
385194521Skmacy
386194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
387167514Skmacy	if (reclaim > 0) {
388194521Skmacy		t3_free_tx_desc(qs, reclaim, queue);
389174708Skmacy		q->cleaned += reclaim;
390174708Skmacy		q->in_use -= reclaim;
391194521Skmacy	}
392194521Skmacy	if (isset(&qs->txq_stopped, TXQ_ETH))
393194521Skmacy                clrbit(&qs->txq_stopped, TXQ_ETH);
394194521Skmacy
395174708Skmacy	return (reclaim);
396167514Skmacy}
397167514Skmacy
398167514Skmacy/**
399169978Skmacy *	should_restart_tx - are there enough resources to restart a Tx queue?
400169978Skmacy *	@q: the Tx queue
401169978Skmacy *
402169978Skmacy *	Checks if there are enough descriptors to restart a suspended Tx queue.
403169978Skmacy */
404169978Skmacystatic __inline int
405169978Skmacyshould_restart_tx(const struct sge_txq *q)
406169978Skmacy{
407169978Skmacy	unsigned int r = q->processed - q->cleaned;
408169978Skmacy
409169978Skmacy	return q->in_use - r < (q->size >> 1);
410169978Skmacy}
411169978Skmacy
412169978Skmacy/**
413167514Skmacy *	t3_sge_init - initialize SGE
414167514Skmacy *	@adap: the adapter
415167514Skmacy *	@p: the SGE parameters
416167514Skmacy *
417167514Skmacy *	Performs SGE initialization needed every time after a chip reset.
418167514Skmacy *	We do not initialize any of the queue sets here, instead the driver
419167514Skmacy *	top-level must request those individually.  We also do not enable DMA
420167514Skmacy *	here, that should be done after the queues have been set up.
421167514Skmacy */
422167514Skmacyvoid
423167514Skmacyt3_sge_init(adapter_t *adap, struct sge_params *p)
424167514Skmacy{
425167514Skmacy	u_int ctrl, ups;
426167514Skmacy
427167514Skmacy	ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
428167514Skmacy
429167514Skmacy	ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
430176472Skmacy	       F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
431167514Skmacy	       V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
432167514Skmacy	       V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
433167514Skmacy#if SGE_NUM_GENBITS == 1
434167514Skmacy	ctrl |= F_EGRGENCTRL;
435167514Skmacy#endif
436167514Skmacy	if (adap->params.rev > 0) {
437167514Skmacy		if (!(adap->flags & (USING_MSIX | USING_MSI)))
438167514Skmacy			ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
439167514Skmacy	}
440167514Skmacy	t3_write_reg(adap, A_SG_CONTROL, ctrl);
441167514Skmacy	t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
442167514Skmacy		     V_LORCQDRBTHRSH(512));
443167514Skmacy	t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
444167514Skmacy	t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
445167514Skmacy		     V_TIMEOUT(200 * core_ticks_per_usec(adap)));
446176472Skmacy	t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
447176472Skmacy		     adap->params.rev < T3_REV_C ? 1000 : 500);
448167514Skmacy	t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
449167514Skmacy	t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
450167514Skmacy	t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
451167514Skmacy	t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
452167514Skmacy	t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
453167514Skmacy}
454167514Skmacy
455167514Skmacy
456167514Skmacy/**
457167514Skmacy *	sgl_len - calculates the size of an SGL of the given capacity
458167514Skmacy *	@n: the number of SGL entries
459167514Skmacy *
460167514Skmacy *	Calculates the number of flits needed for a scatter/gather list that
461167514Skmacy *	can hold the given number of entries.
462167514Skmacy */
463167514Skmacystatic __inline unsigned int
464167514Skmacysgl_len(unsigned int n)
465167514Skmacy{
466167514Skmacy	return ((3 * n) / 2 + (n & 1));
467167514Skmacy}
468167514Skmacy
469167514Skmacy/**
470167514Skmacy *	get_imm_packet - return the next ingress packet buffer from a response
471167514Skmacy *	@resp: the response descriptor containing the packet data
472167514Skmacy *
473167514Skmacy *	Return a packet containing the immediate data of the given response.
474167514Skmacy */
475171471Skmacystatic int
476176472Skmacyget_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
477167514Skmacy{
478174708Skmacy
479237263Snp	if (resp->rss_hdr.opcode == CPL_RX_DATA) {
480237263Snp		const struct cpl_rx_data *cpl = (const void *)&resp->imm_data[0];
481237263Snp		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
482237263Snp	} else if (resp->rss_hdr.opcode == CPL_RX_PKT) {
483237263Snp		const struct cpl_rx_pkt *cpl = (const void *)&resp->imm_data[0];
484237263Snp		m->m_len = sizeof(*cpl) + ntohs(cpl->len);
485237263Snp	} else
486237263Snp		m->m_len = IMMED_PKT_SIZE;
487176472Skmacy	m->m_ext.ext_buf = NULL;
488176472Skmacy	m->m_ext.ext_type = 0;
489237263Snp	memcpy(mtod(m, uint8_t *), resp->imm_data, m->m_len);
490176472Skmacy	return (0);
491167514Skmacy}
492167514Skmacy
493167514Skmacystatic __inline u_int
494167514Skmacyflits_to_desc(u_int n)
495167514Skmacy{
496167514Skmacy	return (flit_desc_map[n]);
497167514Skmacy}
498167514Skmacy
499176472Skmacy#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
500176472Skmacy		    F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
501176472Skmacy		    V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
502176472Skmacy		    F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
503176472Skmacy		    F_HIRCQPARITYERROR)
504176472Skmacy#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
505176472Skmacy#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
506176472Skmacy		      F_RSPQDISABLED)
507176472Skmacy
508176472Skmacy/**
509176472Skmacy *	t3_sge_err_intr_handler - SGE async event interrupt handler
510176472Skmacy *	@adapter: the adapter
511176472Skmacy *
512176472Skmacy *	Interrupt handler for SGE asynchronous (non-data) events.
513176472Skmacy */
514167514Skmacyvoid
515167514Skmacyt3_sge_err_intr_handler(adapter_t *adapter)
516167514Skmacy{
517167514Skmacy	unsigned int v, status;
518167514Skmacy
519167514Skmacy	status = t3_read_reg(adapter, A_SG_INT_CAUSE);
520176472Skmacy	if (status & SGE_PARERR)
521176472Skmacy		CH_ALERT(adapter, "SGE parity error (0x%x)\n",
522176472Skmacy			 status & SGE_PARERR);
523176472Skmacy	if (status & SGE_FRAMINGERR)
524176472Skmacy		CH_ALERT(adapter, "SGE framing error (0x%x)\n",
525176472Skmacy			 status & SGE_FRAMINGERR);
526167514Skmacy	if (status & F_RSPQCREDITOVERFOW)
527167514Skmacy		CH_ALERT(adapter, "SGE response queue credit overflow\n");
528167514Skmacy
529167514Skmacy	if (status & F_RSPQDISABLED) {
530167514Skmacy		v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
531167514Skmacy
532167514Skmacy		CH_ALERT(adapter,
533167514Skmacy			 "packet delivered to disabled response queue (0x%x)\n",
534167514Skmacy			 (v >> S_RSPQ0DISABLED) & 0xff);
535167514Skmacy	}
536167514Skmacy
537167514Skmacy	t3_write_reg(adapter, A_SG_INT_CAUSE, status);
538176472Skmacy	if (status & SGE_FATALERR)
539167514Skmacy		t3_fatal_err(adapter);
540167514Skmacy}
541167514Skmacy
542167514Skmacyvoid
543167514Skmacyt3_sge_prep(adapter_t *adap, struct sge_params *p)
544167514Skmacy{
545205950Snp	int i, nqsets, fl_q_size, jumbo_q_size, use_16k, jumbo_buf_size;
546167514Skmacy
547205950Snp	nqsets = min(SGE_QSETS / adap->params.nports, mp_ncpus);
548205950Snp	nqsets *= adap->params.nports;
549177464Skmacy
550177464Skmacy	fl_q_size = min(nmbclusters/(3*nqsets), FL_Q_SIZE);
551177464Skmacy
552177464Skmacy	while (!powerof2(fl_q_size))
553177464Skmacy		fl_q_size--;
554205950Snp
555205950Snp	use_16k = cxgb_use_16k_clusters != -1 ? cxgb_use_16k_clusters :
556205950Snp	    is_offload(adap);
557205950Snp
558183059Skmacy#if __FreeBSD_version >= 700111
559205950Snp	if (use_16k) {
560177464Skmacy		jumbo_q_size = min(nmbjumbo16/(3*nqsets), JUMBO_Q_SIZE);
561205950Snp		jumbo_buf_size = MJUM16BYTES;
562205950Snp	} else {
563177464Skmacy		jumbo_q_size = min(nmbjumbo9/(3*nqsets), JUMBO_Q_SIZE);
564205950Snp		jumbo_buf_size = MJUM9BYTES;
565205950Snp	}
566177464Skmacy#else
567205950Snp	jumbo_q_size = min(nmbjumbop/(3*nqsets), JUMBO_Q_SIZE);
568205950Snp	jumbo_buf_size = MJUMPAGESIZE;
569177464Skmacy#endif
570177464Skmacy	while (!powerof2(jumbo_q_size))
571202678Snp		jumbo_q_size--;
572202678Snp
573202678Snp	if (fl_q_size < (FL_Q_SIZE / 4) || jumbo_q_size < (JUMBO_Q_SIZE / 2))
574202678Snp		device_printf(adap->dev,
575202678Snp		    "Insufficient clusters and/or jumbo buffers.\n");
576202678Snp
577205950Snp	p->max_pkt_size = jumbo_buf_size - sizeof(struct cpl_rx_data);
578167514Skmacy
579167514Skmacy	for (i = 0; i < SGE_QSETS; ++i) {
580167514Skmacy		struct qset_params *q = p->qset + i;
581167514Skmacy
582174708Skmacy		if (adap->params.nports > 2) {
583180583Skmacy			q->coalesce_usecs = 50;
584174708Skmacy		} else {
585174708Skmacy#ifdef INVARIANTS
586180583Skmacy			q->coalesce_usecs = 10;
587174708Skmacy#else
588180583Skmacy			q->coalesce_usecs = 5;
589174708Skmacy#endif
590174708Skmacy		}
591182679Skmacy		q->polling = 0;
592167514Skmacy		q->rspq_size = RSPQ_Q_SIZE;
593177464Skmacy		q->fl_size = fl_q_size;
594177464Skmacy		q->jumbo_size = jumbo_q_size;
595205950Snp		q->jumbo_buf_size = jumbo_buf_size;
596167514Skmacy		q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
597205950Snp		q->txq_size[TXQ_OFLD] = is_offload(adap) ? TX_OFLD_Q_SIZE : 16;
598205950Snp		q->txq_size[TXQ_CTRL] = TX_CTRL_Q_SIZE;
599167514Skmacy		q->cong_thres = 0;
600167514Skmacy	}
601167514Skmacy}
602167514Skmacy
603167514Skmacyint
604167514Skmacyt3_sge_alloc(adapter_t *sc)
605167514Skmacy{
606167514Skmacy
607167514Skmacy	/* The parent tag. */
608232854Sscottl	if (bus_dma_tag_create( bus_get_dma_tag(sc->dev),/* PCI parent */
609167514Skmacy				1, 0,			/* algnmnt, boundary */
610167514Skmacy				BUS_SPACE_MAXADDR,	/* lowaddr */
611167514Skmacy				BUS_SPACE_MAXADDR,	/* highaddr */
612167514Skmacy				NULL, NULL,		/* filter, filterarg */
613167514Skmacy				BUS_SPACE_MAXSIZE_32BIT,/* maxsize */
614167514Skmacy				BUS_SPACE_UNRESTRICTED, /* nsegments */
615167514Skmacy				BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */
616167514Skmacy				0,			/* flags */
617167514Skmacy				NULL, NULL,		/* lock, lockarg */
618167514Skmacy				&sc->parent_dmat)) {
619167514Skmacy		device_printf(sc->dev, "Cannot allocate parent DMA tag\n");
620167514Skmacy		return (ENOMEM);
621167514Skmacy	}
622167514Skmacy
623167514Skmacy	/*
624167514Skmacy	 * DMA tag for normal sized RX frames
625167514Skmacy	 */
626167514Skmacy	if (bus_dma_tag_create(sc->parent_dmat, MCLBYTES, 0, BUS_SPACE_MAXADDR,
627167514Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1,
628167514Skmacy		MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_dmat)) {
629167514Skmacy		device_printf(sc->dev, "Cannot allocate RX DMA tag\n");
630167514Skmacy		return (ENOMEM);
631167514Skmacy	}
632167514Skmacy
633167514Skmacy	/*
634167514Skmacy	 * DMA tag for jumbo sized RX frames.
635167514Skmacy	 */
636175200Skmacy	if (bus_dma_tag_create(sc->parent_dmat, MJUM16BYTES, 0, BUS_SPACE_MAXADDR,
637175200Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, MJUM16BYTES, 1, MJUM16BYTES,
638167514Skmacy		BUS_DMA_ALLOCNOW, NULL, NULL, &sc->rx_jumbo_dmat)) {
639167514Skmacy		device_printf(sc->dev, "Cannot allocate RX jumbo DMA tag\n");
640167514Skmacy		return (ENOMEM);
641167514Skmacy	}
642167514Skmacy
643167514Skmacy	/*
644167514Skmacy	 * DMA tag for TX frames.
645167514Skmacy	 */
646167514Skmacy	if (bus_dma_tag_create(sc->parent_dmat, 1, 0, BUS_SPACE_MAXADDR,
647167514Skmacy		BUS_SPACE_MAXADDR, NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
648167514Skmacy		TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
649167514Skmacy		NULL, NULL, &sc->tx_dmat)) {
650167514Skmacy		device_printf(sc->dev, "Cannot allocate TX DMA tag\n");
651167514Skmacy		return (ENOMEM);
652167514Skmacy	}
653167514Skmacy
654167514Skmacy	return (0);
655167514Skmacy}
656167514Skmacy
657167514Skmacyint
658167514Skmacyt3_sge_free(struct adapter * sc)
659167514Skmacy{
660167514Skmacy
661167514Skmacy	if (sc->tx_dmat != NULL)
662167514Skmacy		bus_dma_tag_destroy(sc->tx_dmat);
663167514Skmacy
664167514Skmacy	if (sc->rx_jumbo_dmat != NULL)
665167514Skmacy		bus_dma_tag_destroy(sc->rx_jumbo_dmat);
666167514Skmacy
667167514Skmacy	if (sc->rx_dmat != NULL)
668167514Skmacy		bus_dma_tag_destroy(sc->rx_dmat);
669167514Skmacy
670167514Skmacy	if (sc->parent_dmat != NULL)
671167514Skmacy		bus_dma_tag_destroy(sc->parent_dmat);
672167514Skmacy
673167514Skmacy	return (0);
674167514Skmacy}
675167514Skmacy
676167514Skmacyvoid
677167514Skmacyt3_update_qset_coalesce(struct sge_qset *qs, const struct qset_params *p)
678167514Skmacy{
679167514Skmacy
680180583Skmacy	qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);
681167514Skmacy	qs->rspq.polling = 0 /* p->polling */;
682167514Skmacy}
683167514Skmacy
684174708Skmacy#if !defined(__i386__) && !defined(__amd64__)
685168351Skmacystatic void
686168351Skmacyrefill_fl_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
687168351Skmacy{
688168351Skmacy	struct refill_fl_cb_arg *cb_arg = arg;
689168351Skmacy
690168351Skmacy	cb_arg->error = error;
691168351Skmacy	cb_arg->seg = segs[0];
692168351Skmacy	cb_arg->nseg = nseg;
693167514Skmacy
694168351Skmacy}
695174708Skmacy#endif
696167514Skmacy/**
697167514Skmacy *	refill_fl - refill an SGE free-buffer list
698167514Skmacy *	@sc: the controller softc
699167514Skmacy *	@q: the free-list to refill
700167514Skmacy *	@n: the number of new buffers to allocate
701167514Skmacy *
702167514Skmacy *	(Re)populate an SGE free-buffer list with up to @n new packet buffers.
703167514Skmacy *	The caller must assure that @n does not exceed the queue's capacity.
704167514Skmacy */
705167514Skmacystatic void
706167514Skmacyrefill_fl(adapter_t *sc, struct sge_fl *q, int n)
707167514Skmacy{
708167514Skmacy	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
709167514Skmacy	struct rx_desc *d = &q->desc[q->pidx];
710168351Skmacy	struct refill_fl_cb_arg cb_arg;
711194521Skmacy	struct mbuf *m;
712174708Skmacy	caddr_t cl;
713207688Snp	int err;
714175200Skmacy
715168351Skmacy	cb_arg.error = 0;
716167514Skmacy	while (n--) {
717168351Skmacy		/*
718237263Snp		 * We allocate an uninitialized mbuf + cluster, mbuf is
719237263Snp		 * initialized after rx.
720168351Skmacy		 */
721194521Skmacy		if (q->zone == zone_pack) {
722194521Skmacy			if ((m = m_getcl(M_NOWAIT, MT_NOINIT, M_PKTHDR)) == NULL)
723194521Skmacy				break;
724194521Skmacy			cl = m->m_ext.ext_buf;
725194521Skmacy		} else {
726194521Skmacy			if ((cl = m_cljget(NULL, M_NOWAIT, q->buf_size)) == NULL)
727194521Skmacy				break;
728194521Skmacy			if ((m = m_gethdr(M_NOWAIT, MT_NOINIT)) == NULL) {
729194521Skmacy				uma_zfree(q->zone, cl);
730194521Skmacy				break;
731194521Skmacy			}
732167514Skmacy		}
733167514Skmacy		if ((sd->flags & RX_SW_DESC_MAP_CREATED) == 0) {
734168351Skmacy			if ((err = bus_dmamap_create(q->entry_tag, 0, &sd->map))) {
735167848Skmacy				log(LOG_WARNING, "bus_dmamap_create failed %d\n", err);
736168890Skmacy				uma_zfree(q->zone, cl);
737167848Skmacy				goto done;
738167848Skmacy			}
739167514Skmacy			sd->flags |= RX_SW_DESC_MAP_CREATED;
740167514Skmacy		}
741174708Skmacy#if !defined(__i386__) && !defined(__amd64__)
742174708Skmacy		err = bus_dmamap_load(q->entry_tag, sd->map,
743194521Skmacy		    cl, q->buf_size, refill_fl_cb, &cb_arg, 0);
744167514Skmacy
745168351Skmacy		if (err != 0 || cb_arg.error) {
746194554Skmacy			if (q->zone == zone_pack)
747194521Skmacy				uma_zfree(q->zone, cl);
748194521Skmacy			m_free(m);
749194553Skmacy			goto done;
750167514Skmacy		}
751174708Skmacy#else
752194521Skmacy		cb_arg.seg.ds_addr = pmap_kextract((vm_offset_t)cl);
753174708Skmacy#endif
754168351Skmacy		sd->flags |= RX_SW_DESC_INUSE;
755174708Skmacy		sd->rxsd_cl = cl;
756194521Skmacy		sd->m = m;
757168351Skmacy		d->addr_lo = htobe32(cb_arg.seg.ds_addr & 0xffffffff);
758168351Skmacy		d->addr_hi = htobe32(((uint64_t)cb_arg.seg.ds_addr >>32) & 0xffffffff);
759167514Skmacy		d->len_gen = htobe32(V_FLD_GEN1(q->gen));
760167514Skmacy		d->gen2 = htobe32(V_FLD_GEN2(q->gen));
761167514Skmacy
762167514Skmacy		d++;
763167514Skmacy		sd++;
764167514Skmacy
765167514Skmacy		if (++q->pidx == q->size) {
766167514Skmacy			q->pidx = 0;
767167514Skmacy			q->gen ^= 1;
768167514Skmacy			sd = q->sdesc;
769167514Skmacy			d = q->desc;
770167514Skmacy		}
771167514Skmacy		q->credits++;
772207688Snp		q->db_pending++;
773167514Skmacy	}
774167514Skmacy
775167514Skmacydone:
776207688Snp	if (q->db_pending >= 32) {
777207688Snp		q->db_pending = 0;
778176472Skmacy		t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
779207688Snp	}
780167514Skmacy}
781167514Skmacy
782167514Skmacy
783167514Skmacy/**
784167514Skmacy *	free_rx_bufs - free the Rx buffers on an SGE free list
785167514Skmacy *	@sc: the controle softc
786167514Skmacy *	@q: the SGE free list to clean up
787167514Skmacy *
788167514Skmacy *	Release the buffers on an SGE free-buffer Rx queue.  HW fetching from
789167514Skmacy *	this queue should be stopped before calling this function.
790167514Skmacy */
791167514Skmacystatic void
792167514Skmacyfree_rx_bufs(adapter_t *sc, struct sge_fl *q)
793167514Skmacy{
794167514Skmacy	u_int cidx = q->cidx;
795167514Skmacy
796167514Skmacy	while (q->credits--) {
797167514Skmacy		struct rx_sw_desc *d = &q->sdesc[cidx];
798167514Skmacy
799167514Skmacy		if (d->flags & RX_SW_DESC_INUSE) {
800168351Skmacy			bus_dmamap_unload(q->entry_tag, d->map);
801168351Skmacy			bus_dmamap_destroy(q->entry_tag, d->map);
802194521Skmacy			if (q->zone == zone_pack) {
803194521Skmacy				m_init(d->m, zone_pack, MCLBYTES,
804194521Skmacy				    M_NOWAIT, MT_DATA, M_EXT);
805194521Skmacy				uma_zfree(zone_pack, d->m);
806194521Skmacy			} else {
807194521Skmacy				m_init(d->m, zone_mbuf, MLEN,
808194521Skmacy				    M_NOWAIT, MT_DATA, 0);
809194521Skmacy				uma_zfree(zone_mbuf, d->m);
810194521Skmacy				uma_zfree(q->zone, d->rxsd_cl);
811194521Skmacy			}
812167514Skmacy		}
813194521Skmacy
814174708Skmacy		d->rxsd_cl = NULL;
815194521Skmacy		d->m = NULL;
816167514Skmacy		if (++cidx == q->size)
817167514Skmacy			cidx = 0;
818167514Skmacy	}
819167514Skmacy}
820167514Skmacy
821167514Skmacystatic __inline void
822167514Skmacy__refill_fl(adapter_t *adap, struct sge_fl *fl)
823167514Skmacy{
824167514Skmacy	refill_fl(adap, fl, min(16U, fl->size - fl->credits));
825167514Skmacy}
826167514Skmacy
827174708Skmacystatic __inline void
828174708Skmacy__refill_fl_lt(adapter_t *adap, struct sge_fl *fl, int max)
829174708Skmacy{
830207688Snp	uint32_t reclaimable = fl->size - fl->credits;
831207688Snp
832207688Snp	if (reclaimable > 0)
833207688Snp		refill_fl(adap, fl, min(max, reclaimable));
834174708Skmacy}
835174708Skmacy
836169978Skmacy/**
837169978Skmacy *	recycle_rx_buf - recycle a receive buffer
838169978Skmacy *	@adapter: the adapter
839169978Skmacy *	@q: the SGE free list
840169978Skmacy *	@idx: index of buffer to recycle
841169978Skmacy *
842169978Skmacy *	Recycles the specified buffer on the given free list by adding it at
843169978Skmacy *	the next available slot on the list.
844169978Skmacy */
845167514Skmacystatic void
846169978Skmacyrecycle_rx_buf(adapter_t *adap, struct sge_fl *q, unsigned int idx)
847169978Skmacy{
848169978Skmacy	struct rx_desc *from = &q->desc[idx];
849169978Skmacy	struct rx_desc *to   = &q->desc[q->pidx];
850169978Skmacy
851169978Skmacy	q->sdesc[q->pidx] = q->sdesc[idx];
852169978Skmacy	to->addr_lo = from->addr_lo;        // already big endian
853169978Skmacy	to->addr_hi = from->addr_hi;        // likewise
854194521Skmacy	wmb();	/* necessary ? */
855169978Skmacy	to->len_gen = htobe32(V_FLD_GEN1(q->gen));
856169978Skmacy	to->gen2 = htobe32(V_FLD_GEN2(q->gen));
857169978Skmacy	q->credits++;
858169978Skmacy
859169978Skmacy	if (++q->pidx == q->size) {
860169978Skmacy		q->pidx = 0;
861169978Skmacy		q->gen ^= 1;
862169978Skmacy	}
863169978Skmacy	t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
864169978Skmacy}
865169978Skmacy
866169978Skmacystatic void
867167514Skmacyalloc_ring_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
868167514Skmacy{
869167514Skmacy	uint32_t *addr;
870167514Skmacy
871167514Skmacy	addr = arg;
872167514Skmacy	*addr = segs[0].ds_addr;
873167514Skmacy}
874167514Skmacy
875167514Skmacystatic int
876167514Skmacyalloc_ring(adapter_t *sc, size_t nelem, size_t elem_size, size_t sw_size,
877168351Skmacy    bus_addr_t *phys, void *desc, void *sdesc, bus_dma_tag_t *tag,
878168351Skmacy    bus_dmamap_t *map, bus_dma_tag_t parent_entry_tag, bus_dma_tag_t *entry_tag)
879167514Skmacy{
880167514Skmacy	size_t len = nelem * elem_size;
881167514Skmacy	void *s = NULL;
882167514Skmacy	void *p = NULL;
883167514Skmacy	int err;
884167514Skmacy
885167514Skmacy	if ((err = bus_dma_tag_create(sc->parent_dmat, PAGE_SIZE, 0,
886167514Skmacy				      BUS_SPACE_MAXADDR_32BIT,
887167514Skmacy				      BUS_SPACE_MAXADDR, NULL, NULL, len, 1,
888167514Skmacy				      len, 0, NULL, NULL, tag)) != 0) {
889167514Skmacy		device_printf(sc->dev, "Cannot allocate descriptor tag\n");
890167514Skmacy		return (ENOMEM);
891167514Skmacy	}
892167514Skmacy
893167514Skmacy	if ((err = bus_dmamem_alloc(*tag, (void **)&p, BUS_DMA_NOWAIT,
894167514Skmacy				    map)) != 0) {
895167514Skmacy		device_printf(sc->dev, "Cannot allocate descriptor memory\n");
896167514Skmacy		return (ENOMEM);
897167514Skmacy	}
898167514Skmacy
899167514Skmacy	bus_dmamap_load(*tag, *map, p, len, alloc_ring_cb, phys, 0);
900167514Skmacy	bzero(p, len);
901167514Skmacy	*(void **)desc = p;
902167514Skmacy
903167514Skmacy	if (sw_size) {
904167514Skmacy		len = nelem * sw_size;
905175340Skmacy		s = malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
906167514Skmacy		*(void **)sdesc = s;
907167514Skmacy	}
908168351Skmacy	if (parent_entry_tag == NULL)
909168351Skmacy		return (0);
910168351Skmacy
911168646Skmacy	if ((err = bus_dma_tag_create(parent_entry_tag, 1, 0,
912168351Skmacy				      BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
913168646Skmacy		                      NULL, NULL, TX_MAX_SIZE, TX_MAX_SEGS,
914168646Skmacy				      TX_MAX_SIZE, BUS_DMA_ALLOCNOW,
915168351Skmacy		                      NULL, NULL, entry_tag)) != 0) {
916168351Skmacy		device_printf(sc->dev, "Cannot allocate descriptor entry tag\n");
917168351Skmacy		return (ENOMEM);
918168351Skmacy	}
919167514Skmacy	return (0);
920167514Skmacy}
921167514Skmacy
922167514Skmacystatic void
923167514Skmacysge_slow_intr_handler(void *arg, int ncount)
924167514Skmacy{
925167514Skmacy	adapter_t *sc = arg;
926167760Skmacy
927167514Skmacy	t3_slow_intr_handler(sc);
928209840Snp	t3_write_reg(sc, A_PL_INT_ENABLE0, sc->slow_intr_mask);
929209840Snp	(void) t3_read_reg(sc, A_PL_INT_ENABLE0);
930167514Skmacy}
931167514Skmacy
932171471Skmacy/**
933171471Skmacy *	sge_timer_cb - perform periodic maintenance of an SGE qset
934171471Skmacy *	@data: the SGE queue set to maintain
935171471Skmacy *
936171471Skmacy *	Runs periodically from a timer to perform maintenance of an SGE queue
937171471Skmacy *	set.  It performs two tasks:
938171471Skmacy *
939171471Skmacy *	a) Cleans up any completed Tx descriptors that may still be pending.
940171471Skmacy *	Normal descriptor cleanup happens when new packets are added to a Tx
941171471Skmacy *	queue so this timer is relatively infrequent and does any cleanup only
942171471Skmacy *	if the Tx queue has not seen any new packets in a while.  We make a
943171471Skmacy *	best effort attempt to reclaim descriptors, in that we don't wait
944171471Skmacy *	around if we cannot get a queue's lock (which most likely is because
945171471Skmacy *	someone else is queueing new packets and so will also handle the clean
946171471Skmacy *	up).  Since control queues use immediate data exclusively we don't
947171471Skmacy *	bother cleaning them up here.
948171471Skmacy *
949171471Skmacy *	b) Replenishes Rx queues that have run out due to memory shortage.
950171471Skmacy *	Normally new Rx buffers are added when existing ones are consumed but
951171471Skmacy *	when out of memory a queue can become empty.  We try to add only a few
952171471Skmacy *	buffers here, the queue will be replenished fully as these new buffers
953171471Skmacy *	are used up if memory shortage has subsided.
954171471Skmacy *
955171471Skmacy *	c) Return coalesced response queue credits in case a response queue is
956171471Skmacy *	starved.
957171471Skmacy *
958171471Skmacy *	d) Ring doorbells for T304 tunnel queues since we have seen doorbell
959171471Skmacy *	fifo overflows and the FW doesn't implement any recovery scheme yet.
960171471Skmacy */
961167514Skmacystatic void
962167514Skmacysge_timer_cb(void *arg)
963167514Skmacy{
964167514Skmacy	adapter_t *sc = arg;
965194521Skmacy	if ((sc->flags & USING_MSIX) == 0) {
966194521Skmacy
967194521Skmacy		struct port_info *pi;
968194521Skmacy		struct sge_qset *qs;
969194521Skmacy		struct sge_txq  *txq;
970194521Skmacy		int i, j;
971194521Skmacy		int reclaim_ofl, refill_rx;
972174708Skmacy
973194521Skmacy		if (sc->open_device_map == 0)
974194521Skmacy			return;
975194521Skmacy
976194521Skmacy		for (i = 0; i < sc->params.nports; i++) {
977194521Skmacy			pi = &sc->port[i];
978194521Skmacy			for (j = 0; j < pi->nqsets; j++) {
979194521Skmacy				qs = &sc->sge.qs[pi->first_qset + j];
980194521Skmacy				txq = &qs->txq[0];
981194521Skmacy				reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
982194521Skmacy				refill_rx = ((qs->fl[0].credits < qs->fl[0].size) ||
983194521Skmacy				    (qs->fl[1].credits < qs->fl[1].size));
984194521Skmacy				if (reclaim_ofl || refill_rx) {
985194521Skmacy					taskqueue_enqueue(sc->tq, &pi->timer_reclaim_task);
986194521Skmacy					break;
987194521Skmacy				}
988167514Skmacy			}
989167514Skmacy		}
990182882Skmacy	}
991194521Skmacy
992171471Skmacy	if (sc->params.nports > 2) {
993171471Skmacy		int i;
994171471Skmacy
995171471Skmacy		for_each_port(sc, i) {
996171471Skmacy			struct port_info *pi = &sc->port[i];
997171471Skmacy
998171471Skmacy			t3_write_reg(sc, A_SG_KDOORBELL,
999171471Skmacy				     F_SELEGRCNTX |
1000171471Skmacy				     (FW_TUNNEL_SGEEC_START + pi->first_qset));
1001171471Skmacy		}
1002171471Skmacy	}
1003194521Skmacy	if (((sc->flags & USING_MSIX) == 0 || sc->params.nports > 2) &&
1004194521Skmacy	    sc->open_device_map != 0)
1005170869Skmacy		callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1006167514Skmacy}
1007167514Skmacy
1008167514Skmacy/*
1009167514Skmacy * This is meant to be a catch-all function to keep sge state private
1010167514Skmacy * to sge.c
1011167514Skmacy *
1012167514Skmacy */
1013167514Skmacyint
1014170654Skmacyt3_sge_init_adapter(adapter_t *sc)
1015167514Skmacy{
1016314667Savg	callout_init(&sc->sge_timer_ch, 1);
1017167514Skmacy	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1018167514Skmacy	TASK_INIT(&sc->slow_intr_task, 0, sge_slow_intr_handler, sc);
1019167514Skmacy	return (0);
1020167514Skmacy}
1021167514Skmacy
1022170654Skmacyint
1023175224Skmacyt3_sge_reset_adapter(adapter_t *sc)
1024175224Skmacy{
1025175224Skmacy	callout_reset(&sc->sge_timer_ch, TX_RECLAIM_PERIOD, sge_timer_cb, sc);
1026175224Skmacy	return (0);
1027175224Skmacy}
1028175224Skmacy
1029175224Skmacyint
1030174708Skmacyt3_sge_init_port(struct port_info *pi)
1031170654Skmacy{
1032174708Skmacy	TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
1033170789Skmacy	return (0);
1034170654Skmacy}
1035170654Skmacy
1036167655Skmacy/**
1037167655Skmacy *	refill_rspq - replenish an SGE response queue
1038167655Skmacy *	@adapter: the adapter
1039167655Skmacy *	@q: the response queue to replenish
1040167655Skmacy *	@credits: how many new responses to make available
1041167655Skmacy *
1042167655Skmacy *	Replenishes a response queue by making the supplied number of responses
1043167655Skmacy *	available to HW.
1044167655Skmacy */
1045167655Skmacystatic __inline void
1046167655Skmacyrefill_rspq(adapter_t *sc, const struct sge_rspq *q, u_int credits)
1047167655Skmacy{
1048167514Skmacy
1049167655Skmacy	/* mbufs are allocated on demand when a rspq entry is processed. */
1050167655Skmacy	t3_write_reg(sc, A_SG_RSPQ_CREDIT_RETURN,
1051167655Skmacy		     V_RSPQ(q->cntxt_id) | V_CREDITS(credits));
1052167655Skmacy}
1053167655Skmacy
1054167514Skmacystatic void
1055171335Skmacysge_txq_reclaim_handler(void *arg, int ncount)
1056171335Skmacy{
1057194521Skmacy	struct sge_qset *qs = arg;
1058194521Skmacy	int i;
1059171335Skmacy
1060194521Skmacy	for (i = 0; i < 3; i++)
1061194521Skmacy		reclaim_completed_tx(qs, 16, i);
1062171335Skmacy}
1063171335Skmacy
1064171335Skmacystatic void
1065167514Skmacysge_timer_reclaim(void *arg, int ncount)
1066167514Skmacy{
1067174708Skmacy	struct port_info *pi = arg;
1068174708Skmacy	int i, nqsets = pi->nqsets;
1069174708Skmacy	adapter_t *sc = pi->adapter;
1070167514Skmacy	struct sge_qset *qs;
1071167514Skmacy	struct mtx *lock;
1072194521Skmacy
1073194521Skmacy	KASSERT((sc->flags & USING_MSIX) == 0,
1074194521Skmacy	    ("can't call timer reclaim for msi-x"));
1075167760Skmacy
1076167514Skmacy	for (i = 0; i < nqsets; i++) {
1077182882Skmacy		qs = &sc->sge.qs[pi->first_qset + i];
1078169978Skmacy
1079194521Skmacy		reclaim_completed_tx(qs, 16, TXQ_OFLD);
1080167514Skmacy		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
1081167514Skmacy			    &sc->sge.qs[0].rspq.lock;
1082167514Skmacy
1083167514Skmacy		if (mtx_trylock(lock)) {
1084167514Skmacy			/* XXX currently assume that we are *NOT* polling */
1085167514Skmacy			uint32_t status = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS);
1086167760Skmacy
1087167760Skmacy			if (qs->fl[0].credits < qs->fl[0].size - 16)
1088167514Skmacy				__refill_fl(sc, &qs->fl[0]);
1089167760Skmacy			if (qs->fl[1].credits < qs->fl[1].size - 16)
1090167514Skmacy				__refill_fl(sc, &qs->fl[1]);
1091167514Skmacy
1092167514Skmacy			if (status & (1 << qs->rspq.cntxt_id)) {
1093167514Skmacy				if (qs->rspq.credits) {
1094167514Skmacy					refill_rspq(sc, &qs->rspq, 1);
1095167514Skmacy					qs->rspq.credits--;
1096167514Skmacy					t3_write_reg(sc, A_SG_RSPQ_FL_STATUS,
1097167514Skmacy					    1 << qs->rspq.cntxt_id);
1098167514Skmacy				}
1099167514Skmacy			}
1100167514Skmacy			mtx_unlock(lock);
1101167514Skmacy		}
1102167514Skmacy	}
1103167514Skmacy}
1104167514Skmacy
1105167514Skmacy/**
1106167514Skmacy *	init_qset_cntxt - initialize an SGE queue set context info
1107167514Skmacy *	@qs: the queue set
1108167514Skmacy *	@id: the queue set id
1109167514Skmacy *
1110167514Skmacy *	Initializes the TIDs and context ids for the queues of a queue set.
1111167514Skmacy */
1112167514Skmacystatic void
1113167514Skmacyinit_qset_cntxt(struct sge_qset *qs, u_int id)
1114167514Skmacy{
1115167514Skmacy
1116167514Skmacy	qs->rspq.cntxt_id = id;
1117167514Skmacy	qs->fl[0].cntxt_id = 2 * id;
1118167514Skmacy	qs->fl[1].cntxt_id = 2 * id + 1;
1119167514Skmacy	qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1120167514Skmacy	qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1121167514Skmacy	qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1122167514Skmacy	qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1123167514Skmacy	qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1124174708Skmacy
1125174708Skmacy	mbufq_init(&qs->txq[TXQ_ETH].sendq);
1126174708Skmacy	mbufq_init(&qs->txq[TXQ_OFLD].sendq);
1127174708Skmacy	mbufq_init(&qs->txq[TXQ_CTRL].sendq);
1128167514Skmacy}
1129167514Skmacy
1130167514Skmacy
1131167514Skmacystatic void
1132167514Skmacytxq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1133167514Skmacy{
1134167514Skmacy	txq->in_use += ndesc;
1135167514Skmacy	/*
1136167514Skmacy	 * XXX we don't handle stopping of queue
1137167514Skmacy	 * presumably start handles this when we bump against the end
1138167514Skmacy	 */
1139167514Skmacy	txqs->gen = txq->gen;
1140167514Skmacy	txq->unacked += ndesc;
1141176472Skmacy	txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1142176472Skmacy	txq->unacked &= 31;
1143167514Skmacy	txqs->pidx = txq->pidx;
1144167514Skmacy	txq->pidx += ndesc;
1145175347Skmacy#ifdef INVARIANTS
1146175347Skmacy	if (((txqs->pidx > txq->cidx) &&
1147175347Skmacy		(txq->pidx < txqs->pidx) &&
1148175347Skmacy		(txq->pidx >= txq->cidx)) ||
1149175347Skmacy	    ((txqs->pidx < txq->cidx) &&
1150175347Skmacy		(txq->pidx >= txq-> cidx)) ||
1151175347Skmacy	    ((txqs->pidx < txq->cidx) &&
1152175347Skmacy		(txq->cidx < txqs->pidx)))
1153175347Skmacy		panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1154175347Skmacy		    txqs->pidx, txq->pidx, txq->cidx);
1155175347Skmacy#endif
1156167514Skmacy	if (txq->pidx >= txq->size) {
1157167514Skmacy		txq->pidx -= txq->size;
1158167514Skmacy		txq->gen ^= 1;
1159167514Skmacy	}
1160167514Skmacy
1161167514Skmacy}
1162167514Skmacy
1163167514Skmacy/**
1164167514Skmacy *	calc_tx_descs - calculate the number of Tx descriptors for a packet
1165167514Skmacy *	@m: the packet mbufs
1166167514Skmacy *      @nsegs: the number of segments
1167167514Skmacy *
1168167514Skmacy * 	Returns the number of Tx descriptors needed for the given Ethernet
1169167514Skmacy * 	packet.  Ethernet packets require addition of WR and CPL headers.
1170167514Skmacy */
1171167514Skmacystatic __inline unsigned int
1172167514Skmacycalc_tx_descs(const struct mbuf *m, int nsegs)
1173167514Skmacy{
1174167514Skmacy	unsigned int flits;
1175167514Skmacy
1176194521Skmacy	if (m->m_pkthdr.len <= PIO_LEN)
1177167514Skmacy		return 1;
1178167514Skmacy
1179167514Skmacy	flits = sgl_len(nsegs) + 2;
1180174708Skmacy	if (m->m_pkthdr.csum_flags & CSUM_TSO)
1181167514Skmacy		flits++;
1182204274Snp
1183167514Skmacy	return flits_to_desc(flits);
1184167514Skmacy}
1185167514Skmacy
1186167514Skmacy/**
1187167514Skmacy *	make_sgl - populate a scatter/gather list for a packet
1188167514Skmacy *	@sgp: the SGL to populate
1189167514Skmacy *	@segs: the packet dma segments
1190167514Skmacy *	@nsegs: the number of segments
1191167514Skmacy *
1192167514Skmacy *	Generates a scatter/gather list for the buffers that make up a packet
1193167514Skmacy *	and returns the SGL size in 8-byte words.  The caller must size the SGL
1194167514Skmacy *	appropriately.
1195167514Skmacy */
1196167514Skmacystatic __inline void
1197167514Skmacymake_sgl(struct sg_ent *sgp, bus_dma_segment_t *segs, int nsegs)
1198167514Skmacy{
1199167514Skmacy	int i, idx;
1200167514Skmacy
1201174708Skmacy	for (idx = 0, i = 0; i < nsegs; i++) {
1202174708Skmacy		/*
1203174708Skmacy		 * firmware doesn't like empty segments
1204174708Skmacy		 */
1205174708Skmacy		if (segs[i].ds_len == 0)
1206174708Skmacy			continue;
1207167514Skmacy		if (i && idx == 0)
1208167514Skmacy			++sgp;
1209174708Skmacy
1210167514Skmacy		sgp->len[idx] = htobe32(segs[i].ds_len);
1211167514Skmacy		sgp->addr[idx] = htobe64(segs[i].ds_addr);
1212174708Skmacy		idx ^= 1;
1213167514Skmacy	}
1214167514Skmacy
1215175200Skmacy	if (idx) {
1216167514Skmacy		sgp->len[idx] = 0;
1217175200Skmacy		sgp->addr[idx] = 0;
1218175200Skmacy	}
1219167514Skmacy}
1220167514Skmacy
1221167514Skmacy/**
1222167514Skmacy *	check_ring_tx_db - check and potentially ring a Tx queue's doorbell
1223167514Skmacy *	@adap: the adapter
1224167514Skmacy *	@q: the Tx queue
1225167514Skmacy *
1226194521Skmacy *	Ring the doorbell if a Tx queue is asleep.  There is a natural race,
1227167514Skmacy *	where the HW is going to sleep just after we checked, however,
1228167514Skmacy *	then the interrupt handler will detect the outstanding TX packet
1229167514Skmacy *	and ring the doorbell for us.
1230167514Skmacy *
1231167514Skmacy *	When GTS is disabled we unconditionally ring the doorbell.
1232167514Skmacy */
1233167514Skmacystatic __inline void
1234207688Snpcheck_ring_tx_db(adapter_t *adap, struct sge_txq *q, int mustring)
1235167514Skmacy{
1236167514Skmacy#if USE_GTS
1237167514Skmacy	clear_bit(TXQ_LAST_PKT_DB, &q->flags);
1238167514Skmacy	if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) {
1239167514Skmacy		set_bit(TXQ_LAST_PKT_DB, &q->flags);
1240167514Skmacy#ifdef T3_TRACE
1241167514Skmacy		T3_TRACE1(adap->tb[q->cntxt_id & 7], "doorbell Tx, cntxt %d",
1242167514Skmacy			  q->cntxt_id);
1243167514Skmacy#endif
1244167514Skmacy		t3_write_reg(adap, A_SG_KDOORBELL,
1245167514Skmacy			     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1246167514Skmacy	}
1247167514Skmacy#else
1248207688Snp	if (mustring || ++q->db_pending >= 32) {
1249207688Snp		wmb();            /* write descriptors before telling HW */
1250207688Snp		t3_write_reg(adap, A_SG_KDOORBELL,
1251207688Snp		    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1252207688Snp		q->db_pending = 0;
1253207688Snp	}
1254167514Skmacy#endif
1255167514Skmacy}
1256167514Skmacy
1257167514Skmacystatic __inline void
1258167514Skmacywr_gen2(struct tx_desc *d, unsigned int gen)
1259167514Skmacy{
1260167514Skmacy#if SGE_NUM_GENBITS == 2
1261167514Skmacy	d->flit[TX_DESC_FLITS - 1] = htobe64(gen);
1262167514Skmacy#endif
1263167514Skmacy}
1264167514Skmacy
1265169978Skmacy/**
1266169978Skmacy *	write_wr_hdr_sgl - write a WR header and, optionally, SGL
1267169978Skmacy *	@ndesc: number of Tx descriptors spanned by the SGL
1268169978Skmacy *	@txd: first Tx descriptor to be written
1269169978Skmacy *	@txqs: txq state (generation and producer index)
1270169978Skmacy *	@txq: the SGE Tx queue
1271169978Skmacy *	@sgl: the SGL
1272169978Skmacy *	@flits: number of flits to the start of the SGL in the first descriptor
1273169978Skmacy *	@sgl_flits: the SGL size in flits
1274169978Skmacy *	@wr_hi: top 32 bits of WR header based on WR type (big endian)
1275169978Skmacy *	@wr_lo: low 32 bits of WR header based on WR type (big endian)
1276169978Skmacy *
1277169978Skmacy *	Write a work request header and an associated SGL.  If the SGL is
1278169978Skmacy *	small enough to fit into one Tx descriptor it has already been written
1279169978Skmacy *	and we just need to write the WR header.  Otherwise we distribute the
1280169978Skmacy *	SGL across the number of descriptors it spans.
1281169978Skmacy */
1282169978Skmacystatic void
1283169978Skmacywrite_wr_hdr_sgl(unsigned int ndesc, struct tx_desc *txd, struct txq_state *txqs,
1284169978Skmacy    const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1285169978Skmacy    unsigned int sgl_flits, unsigned int wr_hi, unsigned int wr_lo)
1286169978Skmacy{
1287169978Skmacy
1288169978Skmacy	struct work_request_hdr *wrp = (struct work_request_hdr *)txd;
1289169978Skmacy	struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1290169978Skmacy
1291169978Skmacy	if (__predict_true(ndesc == 1)) {
1292194521Skmacy		set_wr_hdr(wrp, htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1293237263Snp		    V_WR_SGLSFLT(flits)) | wr_hi,
1294237263Snp		    htonl(V_WR_LEN(flits + sgl_flits) | V_WR_GEN(txqs->gen)) |
1295237263Snp		    wr_lo);
1296237263Snp
1297169978Skmacy		wr_gen2(txd, txqs->gen);
1298174708Skmacy
1299169978Skmacy	} else {
1300169978Skmacy		unsigned int ogen = txqs->gen;
1301169978Skmacy		const uint64_t *fp = (const uint64_t *)sgl;
1302169978Skmacy		struct work_request_hdr *wp = wrp;
1303169978Skmacy
1304194521Skmacy		wrp->wrh_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) |
1305169978Skmacy		    V_WR_SGLSFLT(flits)) | wr_hi;
1306169978Skmacy
1307169978Skmacy		while (sgl_flits) {
1308169978Skmacy			unsigned int avail = WR_FLITS - flits;
1309169978Skmacy
1310169978Skmacy			if (avail > sgl_flits)
1311169978Skmacy				avail = sgl_flits;
1312169978Skmacy			memcpy(&txd->flit[flits], fp, avail * sizeof(*fp));
1313169978Skmacy			sgl_flits -= avail;
1314169978Skmacy			ndesc--;
1315169978Skmacy			if (!sgl_flits)
1316169978Skmacy				break;
1317169978Skmacy
1318169978Skmacy			fp += avail;
1319169978Skmacy			txd++;
1320169978Skmacy			txsd++;
1321169978Skmacy			if (++txqs->pidx == txq->size) {
1322169978Skmacy				txqs->pidx = 0;
1323169978Skmacy				txqs->gen ^= 1;
1324169978Skmacy				txd = txq->desc;
1325169978Skmacy				txsd = txq->sdesc;
1326169978Skmacy			}
1327194521Skmacy
1328169978Skmacy			/*
1329169978Skmacy			 * when the head of the mbuf chain
1330169978Skmacy			 * is freed all clusters will be freed
1331169978Skmacy			 * with it
1332169978Skmacy			 */
1333169978Skmacy			wrp = (struct work_request_hdr *)txd;
1334194521Skmacy			wrp->wrh_hi = htonl(V_WR_DATATYPE(1) |
1335169978Skmacy			    V_WR_SGLSFLT(1)) | wr_hi;
1336194521Skmacy			wrp->wrh_lo = htonl(V_WR_LEN(min(WR_FLITS,
1337169978Skmacy				    sgl_flits + 1)) |
1338169978Skmacy			    V_WR_GEN(txqs->gen)) | wr_lo;
1339169978Skmacy			wr_gen2(txd, txqs->gen);
1340169978Skmacy			flits = 1;
1341169978Skmacy		}
1342194521Skmacy		wrp->wrh_hi |= htonl(F_WR_EOP);
1343169978Skmacy		wmb();
1344194521Skmacy		wp->wrh_lo = htonl(V_WR_LEN(WR_FLITS) | V_WR_GEN(ogen)) | wr_lo;
1345169978Skmacy		wr_gen2((struct tx_desc *)wp, ogen);
1346169978Skmacy	}
1347169978Skmacy}
1348169978Skmacy
1349204348Snp/* sizeof(*eh) + sizeof(*ip) + sizeof(*tcp) */
1350204348Snp#define TCPPKTHDRSIZE (ETHER_HDR_LEN + 20 + 20)
1351167514Skmacy
1352174708Skmacy#define GET_VTAG(cntrl, m) \
1353174708Skmacydo { \
1354174708Skmacy	if ((m)->m_flags & M_VLANTAG)					            \
1355174708Skmacy		cntrl |= F_TXPKT_VLAN_VLD | V_TXPKT_VLAN((m)->m_pkthdr.ether_vtag); \
1356174708Skmacy} while (0)
1357174708Skmacy
1358194521Skmacystatic int
1359194521Skmacyt3_encap(struct sge_qset *qs, struct mbuf **m)
1360167514Skmacy{
1361167514Skmacy	adapter_t *sc;
1362167514Skmacy	struct mbuf *m0;
1363167514Skmacy	struct sge_txq *txq;
1364167514Skmacy	struct txq_state txqs;
1365174708Skmacy	struct port_info *pi;
1366171868Skmacy	unsigned int ndesc, flits, cntrl, mlen;
1367172096Skmacy	int err, nsegs, tso_info = 0;
1368167514Skmacy
1369167514Skmacy	struct work_request_hdr *wrp;
1370167514Skmacy	struct tx_sw_desc *txsd;
1371174708Skmacy	struct sg_ent *sgp, *sgl;
1372167514Skmacy	uint32_t wr_hi, wr_lo, sgl_flits;
1373175347Skmacy	bus_dma_segment_t segs[TX_MAX_SEGS];
1374167514Skmacy
1375167514Skmacy	struct tx_desc *txd;
1376174708Skmacy
1377174708Skmacy	pi = qs->port;
1378174708Skmacy	sc = pi->adapter;
1379167514Skmacy	txq = &qs->txq[TXQ_ETH];
1380175347Skmacy	txd = &txq->desc[txq->pidx];
1381174708Skmacy	txsd = &txq->sdesc[txq->pidx];
1382174708Skmacy	sgl = txq->txq_sgl;
1383194521Skmacy
1384194521Skmacy	prefetch(txd);
1385174708Skmacy	m0 = *m;
1386204348Snp
1387194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
1388174708Skmacy	cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1389194521Skmacy	KASSERT(m0->m_flags & M_PKTHDR, ("not packet header\n"));
1390194521Skmacy
1391194521Skmacy	if  (m0->m_nextpkt == NULL && m0->m_next != NULL &&
1392194521Skmacy	    m0->m_pkthdr.csum_flags & (CSUM_TSO))
1393168644Skmacy		tso_info = V_LSO_MSS(m0->m_pkthdr.tso_segsz);
1394204274Snp
1395194521Skmacy	if (m0->m_nextpkt != NULL) {
1396195006Snp		busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1397194521Skmacy		ndesc = 1;
1398194521Skmacy		mlen = 0;
1399194521Skmacy	} else {
1400195006Snp		if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1401195006Snp		    &m0, segs, &nsegs))) {
1402194521Skmacy			if (cxgb_debug)
1403194521Skmacy				printf("failed ... err=%d\n", err);
1404174708Skmacy			return (err);
1405194521Skmacy		}
1406194521Skmacy		mlen = m0->m_pkthdr.len;
1407194521Skmacy		ndesc = calc_tx_descs(m0, nsegs);
1408194521Skmacy	}
1409194521Skmacy	txq_prod(txq, ndesc, &txqs);
1410174708Skmacy
1411194521Skmacy	KASSERT(m0->m_pkthdr.len, ("empty packet nsegs=%d", nsegs));
1412194521Skmacy	txsd->m = m0;
1413194521Skmacy
1414194521Skmacy	if (m0->m_nextpkt != NULL) {
1415174708Skmacy		struct cpl_tx_pkt_batch *cpl_batch = (struct cpl_tx_pkt_batch *)txd;
1416174708Skmacy		int i, fidx;
1417174708Skmacy
1418194521Skmacy		if (nsegs > 7)
1419194521Skmacy			panic("trying to coalesce %d packets in to one WR", nsegs);
1420194521Skmacy		txq->txq_coalesced += nsegs;
1421174708Skmacy		wrp = (struct work_request_hdr *)txd;
1422194521Skmacy		flits = nsegs*2 + 1;
1423174708Skmacy
1424194521Skmacy		for (fidx = 1, i = 0; i < nsegs; i++, fidx += 2) {
1425194521Skmacy			struct cpl_tx_pkt_batch_entry *cbe;
1426194521Skmacy			uint64_t flit;
1427194521Skmacy			uint32_t *hflit = (uint32_t *)&flit;
1428194521Skmacy			int cflags = m0->m_pkthdr.csum_flags;
1429174708Skmacy
1430174708Skmacy			cntrl = V_TXPKT_INTF(pi->txpkt_intf);
1431194521Skmacy			GET_VTAG(cntrl, m0);
1432174708Skmacy			cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1433194521Skmacy			if (__predict_false(!(cflags & CSUM_IP)))
1434180583Skmacy				cntrl |= F_TXPKT_IPCSUM_DIS;
1435237832Snp			if (__predict_false(!(cflags & (CSUM_TCP | CSUM_UDP |
1436237832Snp			    CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1437180583Skmacy				cntrl |= F_TXPKT_L4CSUM_DIS;
1438194521Skmacy
1439194521Skmacy			hflit[0] = htonl(cntrl);
1440194521Skmacy			hflit[1] = htonl(segs[i].ds_len | 0x80000000);
1441194521Skmacy			flit |= htobe64(1 << 24);
1442194521Skmacy			cbe = &cpl_batch->pkt_entry[i];
1443194521Skmacy			cbe->cntrl = hflit[0];
1444194521Skmacy			cbe->len = hflit[1];
1445174708Skmacy			cbe->addr = htobe64(segs[i].ds_addr);
1446174708Skmacy		}
1447174708Skmacy
1448194521Skmacy		wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) |
1449194521Skmacy		    V_WR_SGLSFLT(flits)) |
1450194521Skmacy		    htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1451194521Skmacy		wr_lo = htonl(V_WR_LEN(flits) |
1452194521Skmacy		    V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1453194521Skmacy		set_wr_hdr(wrp, wr_hi, wr_lo);
1454174708Skmacy		wmb();
1455204271Snp		ETHER_BPF_MTAP(pi->ifp, m0);
1456174708Skmacy		wr_gen2(txd, txqs.gen);
1457207688Snp		check_ring_tx_db(sc, txq, 0);
1458174708Skmacy		return (0);
1459174708Skmacy	} else if (tso_info) {
1460231317Snp		uint16_t eth_type;
1461174708Skmacy		struct cpl_tx_pkt_lso *hdr = (struct cpl_tx_pkt_lso *)txd;
1462204348Snp		struct ether_header *eh;
1463231317Snp		void *l3hdr;
1464167514Skmacy		struct tcphdr *tcp;
1465174708Skmacy
1466167514Skmacy		txd->flit[2] = 0;
1467180583Skmacy		GET_VTAG(cntrl, m0);
1468167514Skmacy		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT_LSO);
1469167514Skmacy		hdr->cntrl = htonl(cntrl);
1470174708Skmacy		hdr->len = htonl(mlen | 0x80000000);
1471167514Skmacy
1472204348Snp		if (__predict_false(mlen < TCPPKTHDRSIZE)) {
1473254804Sandre			printf("mbuf=%p,len=%d,tso_segsz=%d,csum_flags=%b,flags=%#x",
1474181653Skmacy			    m0, mlen, m0->m_pkthdr.tso_segsz,
1475254804Sandre			    (int)m0->m_pkthdr.csum_flags, CSUM_BITS, m0->m_flags);
1476181653Skmacy			panic("tx tso packet too small");
1477181653Skmacy		}
1478174708Skmacy
1479181653Skmacy		/* Make sure that ether, ip, tcp headers are all in m0 */
1480204348Snp		if (__predict_false(m0->m_len < TCPPKTHDRSIZE)) {
1481204348Snp			m0 = m_pullup(m0, TCPPKTHDRSIZE);
1482181653Skmacy			if (__predict_false(m0 == NULL)) {
1483181653Skmacy				/* XXX panic probably an overreaction */
1484181653Skmacy				panic("couldn't fit header into mbuf");
1485181653Skmacy			}
1486181653Skmacy		}
1487181653Skmacy
1488204348Snp		eh = mtod(m0, struct ether_header *);
1489231317Snp		eth_type = eh->ether_type;
1490231317Snp		if (eth_type == htons(ETHERTYPE_VLAN)) {
1491231317Snp			struct ether_vlan_header *evh = (void *)eh;
1492231317Snp
1493231317Snp			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II_VLAN);
1494231317Snp			l3hdr = evh + 1;
1495231317Snp			eth_type = evh->evl_proto;
1496167514Skmacy		} else {
1497231317Snp			tso_info |= V_LSO_ETH_TYPE(CPL_ETH_II);
1498231317Snp			l3hdr = eh + 1;
1499167514Skmacy		}
1500168737Skmacy
1501231317Snp		if (eth_type == htons(ETHERTYPE_IP)) {
1502231317Snp			struct ip *ip = l3hdr;
1503231317Snp
1504231317Snp			tso_info |= V_LSO_IPHDR_WORDS(ip->ip_hl);
1505231317Snp			tcp = (struct tcphdr *)(ip + 1);
1506231317Snp		} else if (eth_type == htons(ETHERTYPE_IPV6)) {
1507231317Snp			struct ip6_hdr *ip6 = l3hdr;
1508231317Snp
1509231317Snp			KASSERT(ip6->ip6_nxt == IPPROTO_TCP,
1510231317Snp			    ("%s: CSUM_TSO with ip6_nxt %d",
1511231317Snp			    __func__, ip6->ip6_nxt));
1512231317Snp
1513231317Snp			tso_info |= F_LSO_IPV6;
1514231317Snp			tso_info |= V_LSO_IPHDR_WORDS(sizeof(*ip6) >> 2);
1515231317Snp			tcp = (struct tcphdr *)(ip6 + 1);
1516231317Snp		} else
1517231317Snp			panic("%s: CSUM_TSO but neither ip nor ip6", __func__);
1518231317Snp
1519231317Snp		tso_info |= V_LSO_TCPHDR_WORDS(tcp->th_off);
1520167514Skmacy		hdr->lso_info = htonl(tso_info);
1521180583Skmacy
1522180583Skmacy		if (__predict_false(mlen <= PIO_LEN)) {
1523204348Snp			/*
1524204348Snp			 * pkt not undersized but fits in PIO_LEN
1525183062Skmacy			 * Indicates a TSO bug at the higher levels.
1526183059Skmacy			 */
1527194521Skmacy			txsd->m = NULL;
1528180583Skmacy			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[3]);
1529180583Skmacy			flits = (mlen + 7) / 8 + 3;
1530194521Skmacy			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1531180583Skmacy					  V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1532180583Skmacy					  F_WR_SOP | F_WR_EOP | txqs.compl);
1533194521Skmacy			wr_lo = htonl(V_WR_LEN(flits) |
1534194521Skmacy			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1535194521Skmacy			set_wr_hdr(&hdr->wr, wr_hi, wr_lo);
1536180583Skmacy			wmb();
1537204271Snp			ETHER_BPF_MTAP(pi->ifp, m0);
1538180583Skmacy			wr_gen2(txd, txqs.gen);
1539207688Snp			check_ring_tx_db(sc, txq, 0);
1540204271Snp			m_freem(m0);
1541180583Skmacy			return (0);
1542180583Skmacy		}
1543167514Skmacy		flits = 3;
1544167514Skmacy	} else {
1545174708Skmacy		struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)txd;
1546194521Skmacy
1547174708Skmacy		GET_VTAG(cntrl, m0);
1548167514Skmacy		cntrl |= V_TXPKT_OPCODE(CPL_TX_PKT);
1549180583Skmacy		if (__predict_false(!(m0->m_pkthdr.csum_flags & CSUM_IP)))
1550180583Skmacy			cntrl |= F_TXPKT_IPCSUM_DIS;
1551237832Snp		if (__predict_false(!(m0->m_pkthdr.csum_flags & (CSUM_TCP |
1552237832Snp		    CSUM_UDP | CSUM_UDP_IPV6 | CSUM_TCP_IPV6))))
1553180583Skmacy			cntrl |= F_TXPKT_L4CSUM_DIS;
1554167514Skmacy		cpl->cntrl = htonl(cntrl);
1555174708Skmacy		cpl->len = htonl(mlen | 0x80000000);
1556174708Skmacy
1557175340Skmacy		if (mlen <= PIO_LEN) {
1558194521Skmacy			txsd->m = NULL;
1559175340Skmacy			m_copydata(m0, 0, mlen, (caddr_t)&txd->flit[2]);
1560167514Skmacy			flits = (mlen + 7) / 8 + 2;
1561194521Skmacy
1562194521Skmacy			wr_hi = htonl(V_WR_BCNTLFLT(mlen & 7) |
1563194521Skmacy			    V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) |
1564167514Skmacy					  F_WR_SOP | F_WR_EOP | txqs.compl);
1565194521Skmacy			wr_lo = htonl(V_WR_LEN(flits) |
1566194521Skmacy			    V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1567194521Skmacy			set_wr_hdr(&cpl->wr, wr_hi, wr_lo);
1568167514Skmacy			wmb();
1569204271Snp			ETHER_BPF_MTAP(pi->ifp, m0);
1570167514Skmacy			wr_gen2(txd, txqs.gen);
1571207688Snp			check_ring_tx_db(sc, txq, 0);
1572204271Snp			m_freem(m0);
1573167514Skmacy			return (0);
1574167514Skmacy		}
1575167514Skmacy		flits = 2;
1576167514Skmacy	}
1577174708Skmacy	wrp = (struct work_request_hdr *)txd;
1578169978Skmacy	sgp = (ndesc == 1) ? (struct sg_ent *)&txd->flit[flits] : sgl;
1579167514Skmacy	make_sgl(sgp, segs, nsegs);
1580167514Skmacy
1581167514Skmacy	sgl_flits = sgl_len(nsegs);
1582167514Skmacy
1583204271Snp	ETHER_BPF_MTAP(pi->ifp, m0);
1584204271Snp
1585194521Skmacy	KASSERT(ndesc <= 4, ("ndesc too large %d", ndesc));
1586167514Skmacy	wr_hi = htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | txqs.compl);
1587167514Skmacy	wr_lo = htonl(V_WR_TID(txq->token));
1588194521Skmacy	write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1589194521Skmacy	    sgl_flits, wr_hi, wr_lo);
1590207688Snp	check_ring_tx_db(sc, txq, 0);
1591167514Skmacy
1592194521Skmacy	return (0);
1593194521Skmacy}
1594194521Skmacy
1595194521Skmacyvoid
1596194521Skmacycxgb_tx_watchdog(void *arg)
1597194521Skmacy{
1598194521Skmacy	struct sge_qset *qs = arg;
1599194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1600194521Skmacy
1601194521Skmacy        if (qs->coalescing != 0 &&
1602194521Skmacy	    (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1603194521Skmacy	    TXQ_RING_EMPTY(qs))
1604194521Skmacy                qs->coalescing = 0;
1605194521Skmacy        else if (qs->coalescing == 0 &&
1606194521Skmacy	    (txq->in_use >= cxgb_tx_coalesce_enable_start))
1607194521Skmacy                qs->coalescing = 1;
1608194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1609194521Skmacy		qs->qs_flags |= QS_FLUSHING;
1610194521Skmacy		cxgb_start_locked(qs);
1611194521Skmacy		qs->qs_flags &= ~QS_FLUSHING;
1612194521Skmacy		TXQ_UNLOCK(qs);
1613174708Skmacy	}
1614194521Skmacy	if (qs->port->ifp->if_drv_flags & IFF_DRV_RUNNING)
1615194521Skmacy		callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1616194521Skmacy		    qs, txq->txq_watchdog.c_cpu);
1617194521Skmacy}
1618194521Skmacy
1619194521Skmacystatic void
1620194521Skmacycxgb_tx_timeout(void *arg)
1621194521Skmacy{
1622194521Skmacy	struct sge_qset *qs = arg;
1623194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1624194521Skmacy
1625194521Skmacy	if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1626194521Skmacy                qs->coalescing = 1;
1627194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1628194521Skmacy		qs->qs_flags |= QS_TIMEOUT;
1629194521Skmacy		cxgb_start_locked(qs);
1630194521Skmacy		qs->qs_flags &= ~QS_TIMEOUT;
1631194521Skmacy		TXQ_UNLOCK(qs);
1632194521Skmacy	}
1633194521Skmacy}
1634194521Skmacy
1635194521Skmacystatic void
1636194521Skmacycxgb_start_locked(struct sge_qset *qs)
1637194521Skmacy{
1638194521Skmacy	struct mbuf *m_head = NULL;
1639194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1640194521Skmacy	struct port_info *pi = qs->port;
1641194521Skmacy	struct ifnet *ifp = pi->ifp;
1642194521Skmacy
1643194521Skmacy	if (qs->qs_flags & (QS_FLUSHING|QS_TIMEOUT))
1644194521Skmacy		reclaim_completed_tx(qs, 0, TXQ_ETH);
1645194521Skmacy
1646194521Skmacy	if (!pi->link_config.link_ok) {
1647194521Skmacy		TXQ_RING_FLUSH(qs);
1648194521Skmacy		return;
1649194521Skmacy	}
1650194521Skmacy	TXQ_LOCK_ASSERT(qs);
1651207688Snp	while (!TXQ_RING_EMPTY(qs) && (ifp->if_drv_flags & IFF_DRV_RUNNING) &&
1652194521Skmacy	    pi->link_config.link_ok) {
1653194521Skmacy		reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1654194521Skmacy
1655205949Snp		if (txq->size - txq->in_use <= TX_MAX_DESC)
1656205949Snp			break;
1657205949Snp
1658194521Skmacy		if ((m_head = cxgb_dequeue(qs)) == NULL)
1659194521Skmacy			break;
1660194521Skmacy		/*
1661194521Skmacy		 *  Encapsulation can modify our pointer, and or make it
1662194521Skmacy		 *  NULL on failure.  In that event, we can't requeue.
1663194521Skmacy		 */
1664194521Skmacy		if (t3_encap(qs, &m_head) || m_head == NULL)
1665194521Skmacy			break;
1666194521Skmacy
1667194521Skmacy		m_head = NULL;
1668194521Skmacy	}
1669207688Snp
1670207688Snp	if (txq->db_pending)
1671207688Snp		check_ring_tx_db(pi->adapter, txq, 1);
1672207688Snp
1673194521Skmacy	if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1674194521Skmacy	    pi->link_config.link_ok)
1675194521Skmacy		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1676194521Skmacy		    qs, txq->txq_timer.c_cpu);
1677194521Skmacy	if (m_head != NULL)
1678194521Skmacy		m_freem(m_head);
1679194521Skmacy}
1680194521Skmacy
1681194521Skmacystatic int
1682194521Skmacycxgb_transmit_locked(struct ifnet *ifp, struct sge_qset *qs, struct mbuf *m)
1683194521Skmacy{
1684194521Skmacy	struct port_info *pi = qs->port;
1685194521Skmacy	struct sge_txq *txq = &qs->txq[TXQ_ETH];
1686194521Skmacy	struct buf_ring *br = txq->txq_mr;
1687194521Skmacy	int error, avail;
1688194521Skmacy
1689194521Skmacy	avail = txq->size - txq->in_use;
1690194521Skmacy	TXQ_LOCK_ASSERT(qs);
1691194521Skmacy
1692194521Skmacy	/*
1693194521Skmacy	 * We can only do a direct transmit if the following are true:
1694194521Skmacy	 * - we aren't coalescing (ring < 3/4 full)
1695194521Skmacy	 * - the link is up -- checked in caller
1696194521Skmacy	 * - there are no packets enqueued already
1697194521Skmacy	 * - there is space in hardware transmit queue
1698194521Skmacy	 */
1699194521Skmacy	if (check_pkt_coalesce(qs) == 0 &&
1700205949Snp	    !TXQ_RING_NEEDS_ENQUEUE(qs) && avail > TX_MAX_DESC) {
1701194521Skmacy		if (t3_encap(qs, &m)) {
1702194521Skmacy			if (m != NULL &&
1703194521Skmacy			    (error = drbr_enqueue(ifp, br, m)) != 0)
1704194521Skmacy				return (error);
1705194521Skmacy		} else {
1706207688Snp			if (txq->db_pending)
1707207688Snp				check_ring_tx_db(pi->adapter, txq, 1);
1708207688Snp
1709194521Skmacy			/*
1710194521Skmacy			 * We've bypassed the buf ring so we need to update
1711194521Skmacy			 * the stats directly
1712194521Skmacy			 */
1713194521Skmacy			txq->txq_direct_packets++;
1714194521Skmacy			txq->txq_direct_bytes += m->m_pkthdr.len;
1715194521Skmacy		}
1716194521Skmacy	} else if ((error = drbr_enqueue(ifp, br, m)) != 0)
1717194521Skmacy		return (error);
1718194521Skmacy
1719194521Skmacy	reclaim_completed_tx(qs, cxgb_tx_reclaim_threshold, TXQ_ETH);
1720194521Skmacy	if (!TXQ_RING_EMPTY(qs) && pi->link_config.link_ok &&
1721194521Skmacy	    (!check_pkt_coalesce(qs) || (drbr_inuse(ifp, br) >= 7)))
1722194521Skmacy		cxgb_start_locked(qs);
1723194521Skmacy	else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1724194521Skmacy		callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1725194521Skmacy		    qs, txq->txq_timer.c_cpu);
1726167514Skmacy	return (0);
1727167514Skmacy}
1728167514Skmacy
1729194521Skmacyint
1730194521Skmacycxgb_transmit(struct ifnet *ifp, struct mbuf *m)
1731194521Skmacy{
1732194521Skmacy	struct sge_qset *qs;
1733194521Skmacy	struct port_info *pi = ifp->if_softc;
1734194521Skmacy	int error, qidx = pi->first_qset;
1735167514Skmacy
1736194521Skmacy	if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0
1737194521Skmacy	    ||(!pi->link_config.link_ok)) {
1738194521Skmacy		m_freem(m);
1739194521Skmacy		return (0);
1740194521Skmacy	}
1741281955Shiren
1742281955Shiren	/* check if flowid is set */
1743281955Shiren	if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
1744194521Skmacy		qidx = (m->m_pkthdr.flowid % pi->nqsets) + pi->first_qset;
1745194521Skmacy
1746194521Skmacy	qs = &pi->adapter->sge.qs[qidx];
1747194521Skmacy
1748194521Skmacy	if (TXQ_TRYLOCK(qs)) {
1749194521Skmacy		/* XXX running */
1750194521Skmacy		error = cxgb_transmit_locked(ifp, qs, m);
1751194521Skmacy		TXQ_UNLOCK(qs);
1752194521Skmacy	} else
1753194521Skmacy		error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1754194521Skmacy	return (error);
1755194521Skmacy}
1756194521Skmacy
1757194521Skmacyvoid
1758194521Skmacycxgb_qflush(struct ifnet *ifp)
1759194521Skmacy{
1760194521Skmacy	/*
1761194521Skmacy	 * flush any enqueued mbufs in the buf_rings
1762194521Skmacy	 * and in the transmit queues
1763194521Skmacy	 * no-op for now
1764194521Skmacy	 */
1765194521Skmacy	return;
1766194521Skmacy}
1767194521Skmacy
1768167514Skmacy/**
1769167514Skmacy *	write_imm - write a packet into a Tx descriptor as immediate data
1770167514Skmacy *	@d: the Tx descriptor to write
1771167514Skmacy *	@m: the packet
1772167514Skmacy *	@len: the length of packet data to write as immediate data
1773167514Skmacy *	@gen: the generation bit value to write
1774167514Skmacy *
1775167514Skmacy *	Writes a packet as immediate data into a Tx descriptor.  The packet
1776167514Skmacy *	contains a work request at its beginning.  We must write the packet
1777167514Skmacy *	carefully so the SGE doesn't read accidentally before it's written in
1778167514Skmacy *	its entirety.
1779167514Skmacy */
1780169978Skmacystatic __inline void
1781237263Snpwrite_imm(struct tx_desc *d, caddr_t src,
1782169978Skmacy	  unsigned int len, unsigned int gen)
1783167514Skmacy{
1784237263Snp	struct work_request_hdr *from = (struct work_request_hdr *)src;
1785167514Skmacy	struct work_request_hdr *to = (struct work_request_hdr *)d;
1786194521Skmacy	uint32_t wr_hi, wr_lo;
1787167514Skmacy
1788237263Snp	KASSERT(len <= WR_LEN && len >= sizeof(*from),
1789237263Snp	    ("%s: invalid len %d", __func__, len));
1790174708Skmacy
1791167514Skmacy	memcpy(&to[1], &from[1], len - sizeof(*from));
1792194521Skmacy	wr_hi = from->wrh_hi | htonl(F_WR_SOP | F_WR_EOP |
1793237263Snp	    V_WR_BCNTLFLT(len & 7));
1794237263Snp	wr_lo = from->wrh_lo | htonl(V_WR_GEN(gen) | V_WR_LEN((len + 7) / 8));
1795194521Skmacy	set_wr_hdr(to, wr_hi, wr_lo);
1796167514Skmacy	wmb();
1797167514Skmacy	wr_gen2(d, gen);
1798167514Skmacy}
1799167514Skmacy
1800167514Skmacy/**
1801167514Skmacy *	check_desc_avail - check descriptor availability on a send queue
1802167514Skmacy *	@adap: the adapter
1803167514Skmacy *	@q: the TX queue
1804167514Skmacy *	@m: the packet needing the descriptors
1805167514Skmacy *	@ndesc: the number of Tx descriptors needed
1806167514Skmacy *	@qid: the Tx queue number in its queue set (TXQ_OFLD or TXQ_CTRL)
1807167514Skmacy *
1808167514Skmacy *	Checks if the requested number of Tx descriptors is available on an
1809167514Skmacy *	SGE send queue.  If the queue is already suspended or not enough
1810167514Skmacy *	descriptors are available the packet is queued for later transmission.
1811167514Skmacy *	Must be called with the Tx queue locked.
1812167514Skmacy *
1813167514Skmacy *	Returns 0 if enough descriptors are available, 1 if there aren't
1814167514Skmacy *	enough descriptors and the packet has been queued, and 2 if the caller
1815167514Skmacy *	needs to retry because there weren't enough descriptors at the
1816167514Skmacy *	beginning of the call but some freed up in the mean time.
1817167514Skmacy */
1818167514Skmacystatic __inline int
1819167514Skmacycheck_desc_avail(adapter_t *adap, struct sge_txq *q,
1820169978Skmacy		 struct mbuf *m, unsigned int ndesc,
1821169978Skmacy		 unsigned int qid)
1822167514Skmacy{
1823167514Skmacy	/*
1824167514Skmacy	 * XXX We currently only use this for checking the control queue
1825167514Skmacy	 * the control queue is only used for binding qsets which happens
1826167514Skmacy	 * at init time so we are guaranteed enough descriptors
1827167514Skmacy	 */
1828169978Skmacy	if (__predict_false(!mbufq_empty(&q->sendq))) {
1829169978Skmacyaddq_exit:	mbufq_tail(&q->sendq, m);
1830167514Skmacy		return 1;
1831167514Skmacy	}
1832167514Skmacy	if (__predict_false(q->size - q->in_use < ndesc)) {
1833167514Skmacy
1834167514Skmacy		struct sge_qset *qs = txq_to_qset(q, qid);
1835167514Skmacy
1836169978Skmacy		setbit(&qs->txq_stopped, qid);
1837167514Skmacy		if (should_restart_tx(q) &&
1838167514Skmacy		    test_and_clear_bit(qid, &qs->txq_stopped))
1839167514Skmacy			return 2;
1840167514Skmacy
1841167514Skmacy		q->stops++;
1842167514Skmacy		goto addq_exit;
1843167514Skmacy	}
1844167514Skmacy	return 0;
1845167514Skmacy}
1846167514Skmacy
1847167514Skmacy
1848167514Skmacy/**
1849167514Skmacy *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
1850167514Skmacy *	@q: the SGE control Tx queue
1851167514Skmacy *
1852167514Skmacy *	This is a variant of reclaim_completed_tx() that is used for Tx queues
1853167514Skmacy *	that send only immediate data (presently just the control queues) and
1854169978Skmacy *	thus do not have any mbufs
1855167514Skmacy */
1856167514Skmacystatic __inline void
1857167514Skmacyreclaim_completed_tx_imm(struct sge_txq *q)
1858167514Skmacy{
1859167514Skmacy	unsigned int reclaim = q->processed - q->cleaned;
1860167514Skmacy
1861167514Skmacy	q->in_use -= reclaim;
1862167514Skmacy	q->cleaned += reclaim;
1863167514Skmacy}
1864167514Skmacy
1865167514Skmacy/**
1866167514Skmacy *	ctrl_xmit - send a packet through an SGE control Tx queue
1867167514Skmacy *	@adap: the adapter
1868167514Skmacy *	@q: the control queue
1869167514Skmacy *	@m: the packet
1870167514Skmacy *
1871167514Skmacy *	Send a packet through an SGE control Tx queue.  Packets sent through
1872167514Skmacy *	a control queue must fit entirely as immediate data in a single Tx
1873167514Skmacy *	descriptor and have no page fragments.
1874167514Skmacy */
1875167514Skmacystatic int
1876194521Skmacyctrl_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
1877167514Skmacy{
1878167514Skmacy	int ret;
1879170654Skmacy	struct work_request_hdr *wrp = mtod(m, struct work_request_hdr *);
1880194521Skmacy	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1881194521Skmacy
1882237263Snp	KASSERT(m->m_len <= WR_LEN, ("%s: bad tx data", __func__));
1883237263Snp
1884194521Skmacy	wrp->wrh_hi |= htonl(F_WR_SOP | F_WR_EOP);
1885194521Skmacy	wrp->wrh_lo = htonl(V_WR_TID(q->token));
1886167514Skmacy
1887194521Skmacy	TXQ_LOCK(qs);
1888167514Skmacyagain:	reclaim_completed_tx_imm(q);
1889167514Skmacy
1890167514Skmacy	ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1891167514Skmacy	if (__predict_false(ret)) {
1892167514Skmacy		if (ret == 1) {
1893194521Skmacy			TXQ_UNLOCK(qs);
1894174708Skmacy			return (ENOSPC);
1895167514Skmacy		}
1896167514Skmacy		goto again;
1897167514Skmacy	}
1898237263Snp	write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1899174708Skmacy
1900167514Skmacy	q->in_use++;
1901167514Skmacy	if (++q->pidx >= q->size) {
1902167514Skmacy		q->pidx = 0;
1903167514Skmacy		q->gen ^= 1;
1904167514Skmacy	}
1905194521Skmacy	TXQ_UNLOCK(qs);
1906197043Snp	wmb();
1907167514Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
1908237263Snp	    F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1909237263Snp
1910237263Snp	m_free(m);
1911167514Skmacy	return (0);
1912167514Skmacy}
1913167514Skmacy
1914169978Skmacy
1915167514Skmacy/**
1916167514Skmacy *	restart_ctrlq - restart a suspended control queue
1917167514Skmacy *	@qs: the queue set cotaining the control queue
1918167514Skmacy *
1919167514Skmacy *	Resumes transmission on a suspended Tx control queue.
1920167514Skmacy */
1921167514Skmacystatic void
1922169978Skmacyrestart_ctrlq(void *data, int npending)
1923167514Skmacy{
1924167514Skmacy	struct mbuf *m;
1925167514Skmacy	struct sge_qset *qs = (struct sge_qset *)data;
1926167514Skmacy	struct sge_txq *q = &qs->txq[TXQ_CTRL];
1927167514Skmacy	adapter_t *adap = qs->port->adapter;
1928167514Skmacy
1929194521Skmacy	TXQ_LOCK(qs);
1930167514Skmacyagain:	reclaim_completed_tx_imm(q);
1931169978Skmacy
1932167514Skmacy	while (q->in_use < q->size &&
1933169978Skmacy	       (m = mbufq_dequeue(&q->sendq)) != NULL) {
1934167514Skmacy
1935237263Snp		write_imm(&q->desc[q->pidx], m->m_data, m->m_len, q->gen);
1936237263Snp		m_free(m);
1937167514Skmacy
1938167514Skmacy		if (++q->pidx >= q->size) {
1939167514Skmacy			q->pidx = 0;
1940167514Skmacy			q->gen ^= 1;
1941167514Skmacy		}
1942167514Skmacy		q->in_use++;
1943167514Skmacy	}
1944169978Skmacy	if (!mbufq_empty(&q->sendq)) {
1945169978Skmacy		setbit(&qs->txq_stopped, TXQ_CTRL);
1946167514Skmacy
1947167514Skmacy		if (should_restart_tx(q) &&
1948167514Skmacy		    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1949167514Skmacy			goto again;
1950167514Skmacy		q->stops++;
1951167514Skmacy	}
1952194521Skmacy	TXQ_UNLOCK(qs);
1953167514Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
1954167514Skmacy		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1955167514Skmacy}
1956167514Skmacy
1957169978Skmacy
1958167514Skmacy/*
1959167514Skmacy * Send a management message through control queue 0
1960167514Skmacy */
1961167514Skmacyint
1962167514Skmacyt3_mgmt_tx(struct adapter *adap, struct mbuf *m)
1963167514Skmacy{
1964194521Skmacy	return ctrl_xmit(adap, &adap->sge.qs[0], m);
1965167514Skmacy}
1966167514Skmacy
1967167514Skmacy/**
1968167514Skmacy *	free_qset - free the resources of an SGE queue set
1969167514Skmacy *	@sc: the controller owning the queue set
1970167514Skmacy *	@q: the queue set
1971167514Skmacy *
1972167514Skmacy *	Release the HW and SW resources associated with an SGE queue set, such
1973167514Skmacy *	as HW contexts, packet buffers, and descriptor rings.  Traffic to the
1974167514Skmacy *	queue set must be quiesced prior to calling this.
1975167514Skmacy */
1976194521Skmacystatic void
1977167514Skmacyt3_free_qset(adapter_t *sc, struct sge_qset *q)
1978167514Skmacy{
1979167514Skmacy	int i;
1980174708Skmacy
1981194521Skmacy	reclaim_completed_tx(q, 0, TXQ_ETH);
1982205950Snp	if (q->txq[TXQ_ETH].txq_mr != NULL)
1983205950Snp		buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1984205950Snp	if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1985205950Snp		ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1986205950Snp		free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
1987185165Skmacy	}
1988205950Snp
1989167514Skmacy	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1990167514Skmacy		if (q->fl[i].desc) {
1991176472Skmacy			mtx_lock_spin(&sc->sge.reg_lock);
1992167514Skmacy			t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1993176472Skmacy			mtx_unlock_spin(&sc->sge.reg_lock);
1994167514Skmacy			bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1995167514Skmacy			bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1996167514Skmacy					q->fl[i].desc_map);
1997167514Skmacy			bus_dma_tag_destroy(q->fl[i].desc_tag);
1998168351Skmacy			bus_dma_tag_destroy(q->fl[i].entry_tag);
1999167514Skmacy		}
2000167514Skmacy		if (q->fl[i].sdesc) {
2001167514Skmacy			free_rx_bufs(sc, &q->fl[i]);
2002167514Skmacy			free(q->fl[i].sdesc, M_DEVBUF);
2003167514Skmacy		}
2004167514Skmacy	}
2005167514Skmacy
2006194521Skmacy	mtx_unlock(&q->lock);
2007194521Skmacy	MTX_DESTROY(&q->lock);
2008170869Skmacy	for (i = 0; i < SGE_TXQ_PER_SET; i++) {
2009167514Skmacy		if (q->txq[i].desc) {
2010176472Skmacy			mtx_lock_spin(&sc->sge.reg_lock);
2011167514Skmacy			t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2012176472Skmacy			mtx_unlock_spin(&sc->sge.reg_lock);
2013167514Skmacy			bus_dmamap_unload(q->txq[i].desc_tag,
2014167514Skmacy					q->txq[i].desc_map);
2015167514Skmacy			bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2016167514Skmacy					q->txq[i].desc_map);
2017167514Skmacy			bus_dma_tag_destroy(q->txq[i].desc_tag);
2018168351Skmacy			bus_dma_tag_destroy(q->txq[i].entry_tag);
2019167514Skmacy		}
2020167514Skmacy		if (q->txq[i].sdesc) {
2021167514Skmacy			free(q->txq[i].sdesc, M_DEVBUF);
2022167514Skmacy		}
2023167514Skmacy	}
2024167514Skmacy
2025167514Skmacy	if (q->rspq.desc) {
2026176472Skmacy		mtx_lock_spin(&sc->sge.reg_lock);
2027167514Skmacy		t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
2028176472Skmacy		mtx_unlock_spin(&sc->sge.reg_lock);
2029167514Skmacy
2030167514Skmacy		bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
2031167514Skmacy		bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
2032167514Skmacy			        q->rspq.desc_map);
2033167514Skmacy		bus_dma_tag_destroy(q->rspq.desc_tag);
2034170869Skmacy		MTX_DESTROY(&q->rspq.lock);
2035167514Skmacy	}
2036168351Skmacy
2037235963Sbz#if defined(INET6) || defined(INET)
2038181616Skmacy	tcp_lro_free(&q->lro.ctrl);
2039205947Snp#endif
2040181616Skmacy
2041167514Skmacy	bzero(q, sizeof(*q));
2042167514Skmacy}
2043167514Skmacy
2044167514Skmacy/**
2045167514Skmacy *	t3_free_sge_resources - free SGE resources
2046167514Skmacy *	@sc: the adapter softc
2047167514Skmacy *
2048167514Skmacy *	Frees resources used by the SGE queue sets.
2049167514Skmacy */
2050167514Skmacyvoid
2051219946Snpt3_free_sge_resources(adapter_t *sc, int nqsets)
2052167514Skmacy{
2053219946Snp	int i;
2054174708Skmacy
2055194521Skmacy	for (i = 0; i < nqsets; ++i) {
2056194521Skmacy		TXQ_LOCK(&sc->sge.qs[i]);
2057167514Skmacy		t3_free_qset(sc, &sc->sge.qs[i]);
2058194521Skmacy	}
2059167514Skmacy}
2060167514Skmacy
2061167514Skmacy/**
2062167514Skmacy *	t3_sge_start - enable SGE
2063167514Skmacy *	@sc: the controller softc
2064167514Skmacy *
2065167514Skmacy *	Enables the SGE for DMAs.  This is the last step in starting packet
2066167514Skmacy *	transfers.
2067167514Skmacy */
2068167514Skmacyvoid
2069167514Skmacyt3_sge_start(adapter_t *sc)
2070167514Skmacy{
2071167514Skmacy	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, F_GLOBALENABLE);
2072167514Skmacy}
2073167514Skmacy
2074169978Skmacy/**
2075169978Skmacy *	t3_sge_stop - disable SGE operation
2076169978Skmacy *	@sc: the adapter
2077169978Skmacy *
2078169978Skmacy *	Disables the DMA engine.  This can be called in emeregencies (e.g.,
2079169978Skmacy *	from error interrupts) or from normal process context.  In the latter
2080169978Skmacy *	case it also disables any pending queue restart tasklets.  Note that
2081169978Skmacy *	if it is called in interrupt context it cannot disable the restart
2082169978Skmacy *	tasklets as it cannot wait, however the tasklets will have no effect
2083169978Skmacy *	since the doorbells are disabled and the driver will call this again
2084169978Skmacy *	later from process context, at which time the tasklets will be stopped
2085169978Skmacy *	if they are still running.
2086169978Skmacy */
2087169978Skmacyvoid
2088169978Skmacyt3_sge_stop(adapter_t *sc)
2089169978Skmacy{
2090170869Skmacy	int i, nqsets;
2091170869Skmacy
2092169978Skmacy	t3_set_reg_field(sc, A_SG_CONTROL, F_GLOBALENABLE, 0);
2093167514Skmacy
2094170654Skmacy	if (sc->tq == NULL)
2095170654Skmacy		return;
2096170654Skmacy
2097170869Skmacy	for (nqsets = i = 0; i < (sc)->params.nports; i++)
2098170869Skmacy		nqsets += sc->port[i].nqsets;
2099175340Skmacy#ifdef notyet
2100175340Skmacy	/*
2101175340Skmacy	 *
2102175340Skmacy	 * XXX
2103175340Skmacy	 */
2104170869Skmacy	for (i = 0; i < nqsets; ++i) {
2105169978Skmacy		struct sge_qset *qs = &sc->sge.qs[i];
2106169978Skmacy
2107171335Skmacy		taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2108171335Skmacy		taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2109169978Skmacy	}
2110175340Skmacy#endif
2111169978Skmacy}
2112169978Skmacy
2113167514Skmacy/**
2114174708Skmacy *	t3_free_tx_desc - reclaims Tx descriptors and their buffers
2115167514Skmacy *	@adapter: the adapter
2116167514Skmacy *	@q: the Tx queue to reclaim descriptors from
2117174708Skmacy *	@reclaimable: the number of descriptors to reclaim
2118174708Skmacy *      @m_vec_size: maximum number of buffers to reclaim
2119174708Skmacy *      @desc_reclaimed: returns the number of descriptors reclaimed
2120167514Skmacy *
2121167514Skmacy *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
2122167514Skmacy *	Tx buffers.  Called with the Tx queue lock held.
2123174708Skmacy *
2124174708Skmacy *      Returns number of buffers of reclaimed
2125167514Skmacy */
2126174708Skmacyvoid
2127194521Skmacyt3_free_tx_desc(struct sge_qset *qs, int reclaimable, int queue)
2128167514Skmacy{
2129174708Skmacy	struct tx_sw_desc *txsd;
2130194521Skmacy	unsigned int cidx, mask;
2131194521Skmacy	struct sge_txq *q = &qs->txq[queue];
2132194521Skmacy
2133167514Skmacy#ifdef T3_TRACE
2134167514Skmacy	T3_TRACE2(sc->tb[q->cntxt_id & 7],
2135174708Skmacy		  "reclaiming %u Tx descriptors at cidx %u", reclaimable, cidx);
2136167514Skmacy#endif
2137174708Skmacy	cidx = q->cidx;
2138194521Skmacy	mask = q->size - 1;
2139174708Skmacy	txsd = &q->sdesc[cidx];
2140194521Skmacy
2141194521Skmacy	mtx_assert(&qs->lock, MA_OWNED);
2142174708Skmacy	while (reclaimable--) {
2143194521Skmacy		prefetch(q->sdesc[(cidx + 1) & mask].m);
2144194521Skmacy		prefetch(q->sdesc[(cidx + 2) & mask].m);
2145194521Skmacy
2146194521Skmacy		if (txsd->m != NULL) {
2147174708Skmacy			if (txsd->flags & TX_SW_DESC_MAPPED) {
2148174708Skmacy				bus_dmamap_unload(q->entry_tag, txsd->map);
2149174708Skmacy				txsd->flags &= ~TX_SW_DESC_MAPPED;
2150167514Skmacy			}
2151194521Skmacy			m_freem_list(txsd->m);
2152194521Skmacy			txsd->m = NULL;
2153174708Skmacy		} else
2154174708Skmacy			q->txq_skipped++;
2155174708Skmacy
2156174708Skmacy		++txsd;
2157167514Skmacy		if (++cidx == q->size) {
2158167514Skmacy			cidx = 0;
2159174708Skmacy			txsd = q->sdesc;
2160167514Skmacy		}
2161167514Skmacy	}
2162167514Skmacy	q->cidx = cidx;
2163167514Skmacy
2164167514Skmacy}
2165167514Skmacy
2166167514Skmacy/**
2167167514Skmacy *	is_new_response - check if a response is newly written
2168167514Skmacy *	@r: the response descriptor
2169167514Skmacy *	@q: the response queue
2170167514Skmacy *
2171167514Skmacy *	Returns true if a response descriptor contains a yet unprocessed
2172167514Skmacy *	response.
2173167514Skmacy */
2174167514Skmacystatic __inline int
2175167514Skmacyis_new_response(const struct rsp_desc *r,
2176167514Skmacy    const struct sge_rspq *q)
2177167514Skmacy{
2178167514Skmacy	return (r->intr_gen & F_RSPD_GEN2) == q->gen;
2179167514Skmacy}
2180167514Skmacy
2181167514Skmacy#define RSPD_GTS_MASK  (F_RSPD_TXQ0_GTS | F_RSPD_TXQ1_GTS)
2182167514Skmacy#define RSPD_CTRL_MASK (RSPD_GTS_MASK | \
2183167514Skmacy			V_RSPD_TXQ0_CR(M_RSPD_TXQ0_CR) | \
2184167514Skmacy			V_RSPD_TXQ1_CR(M_RSPD_TXQ1_CR) | \
2185167514Skmacy			V_RSPD_TXQ2_CR(M_RSPD_TXQ2_CR))
2186167514Skmacy
2187167514Skmacy/* How long to delay the next interrupt in case of memory shortage, in 0.1us. */
2188167514Skmacy#define NOMEM_INTR_DELAY 2500
2189167514Skmacy
2190237263Snp#ifdef TCP_OFFLOAD
2191169978Skmacy/**
2192169978Skmacy *	write_ofld_wr - write an offload work request
2193169978Skmacy *	@adap: the adapter
2194169978Skmacy *	@m: the packet to send
2195169978Skmacy *	@q: the Tx queue
2196169978Skmacy *	@pidx: index of the first Tx descriptor to write
2197169978Skmacy *	@gen: the generation value to use
2198169978Skmacy *	@ndesc: number of descriptors the packet will occupy
2199169978Skmacy *
2200169978Skmacy *	Write an offload work request to send the supplied packet.  The packet
2201169978Skmacy *	data already carry the work request with most fields populated.
2202169978Skmacy */
2203169978Skmacystatic void
2204237263Snpwrite_ofld_wr(adapter_t *adap, struct mbuf *m, struct sge_txq *q,
2205237263Snp    unsigned int pidx, unsigned int gen, unsigned int ndesc)
2206167514Skmacy{
2207169978Skmacy	unsigned int sgl_flits, flits;
2208237263Snp	int i, idx, nsegs, wrlen;
2209169978Skmacy	struct work_request_hdr *from;
2210237263Snp	struct sg_ent *sgp, t3sgl[TX_MAX_SEGS / 2 + 1];
2211169978Skmacy	struct tx_desc *d = &q->desc[pidx];
2212169978Skmacy	struct txq_state txqs;
2213237263Snp	struct sglist_seg *segs;
2214237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2215237263Snp	struct sglist *sgl;
2216237263Snp
2217237263Snp	from = (void *)(oh + 1);	/* Start of WR within mbuf */
2218237263Snp	wrlen = m->m_len - sizeof(*oh);
2219237263Snp
2220237263Snp	if (!(oh->flags & F_HDR_SGL)) {
2221237263Snp		write_imm(d, (caddr_t)from, wrlen, gen);
2222237263Snp
2223237263Snp		/*
2224237263Snp		 * mbuf with "real" immediate tx data will be enqueue_wr'd by
2225237263Snp		 * t3_push_frames and freed in wr_ack.  Others, like those sent
2226237263Snp		 * down by close_conn, t3_send_reset, etc. should be freed here.
2227237263Snp		 */
2228237263Snp		if (!(oh->flags & F_HDR_DF))
2229237263Snp			m_free(m);
2230169978Skmacy		return;
2231169978Skmacy	}
2232169978Skmacy
2233237263Snp	memcpy(&d->flit[1], &from[1], wrlen - sizeof(*from));
2234169978Skmacy
2235237263Snp	sgl = oh->sgl;
2236237263Snp	flits = wrlen / 8;
2237237263Snp	sgp = (ndesc == 1) ? (struct sg_ent *)&d->flit[flits] : t3sgl;
2238169978Skmacy
2239237263Snp	nsegs = sgl->sg_nseg;
2240237263Snp	segs = sgl->sg_segs;
2241237263Snp	for (idx = 0, i = 0; i < nsegs; i++) {
2242237263Snp		KASSERT(segs[i].ss_len, ("%s: 0 len in sgl", __func__));
2243237263Snp		if (i && idx == 0)
2244237263Snp			++sgp;
2245237263Snp		sgp->len[idx] = htobe32(segs[i].ss_len);
2246237263Snp		sgp->addr[idx] = htobe64(segs[i].ss_paddr);
2247237263Snp		idx ^= 1;
2248237263Snp	}
2249237263Snp	if (idx) {
2250237263Snp		sgp->len[idx] = 0;
2251237263Snp		sgp->addr[idx] = 0;
2252237263Snp	}
2253237263Snp
2254169978Skmacy	sgl_flits = sgl_len(nsegs);
2255174708Skmacy	txqs.gen = gen;
2256174708Skmacy	txqs.pidx = pidx;
2257174708Skmacy	txqs.compl = 0;
2258174708Skmacy
2259237263Snp	write_wr_hdr_sgl(ndesc, d, &txqs, q, t3sgl, flits, sgl_flits,
2260194521Skmacy	    from->wrh_hi, from->wrh_lo);
2261167514Skmacy}
2262167514Skmacy
2263169978Skmacy/**
2264169978Skmacy *	ofld_xmit - send a packet through an offload queue
2265169978Skmacy *	@adap: the adapter
2266169978Skmacy *	@q: the Tx offload queue
2267169978Skmacy *	@m: the packet
2268169978Skmacy *
2269169978Skmacy *	Send an offload packet through an SGE offload queue.
2270169978Skmacy */
2271169978Skmacystatic int
2272194521Skmacyofld_xmit(adapter_t *adap, struct sge_qset *qs, struct mbuf *m)
2273169978Skmacy{
2274237263Snp	int ret;
2275171978Skmacy	unsigned int ndesc;
2276171978Skmacy	unsigned int pidx, gen;
2277194521Skmacy	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2278237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2279169978Skmacy
2280237263Snp	ndesc = G_HDR_NDESC(oh->flags);
2281169978Skmacy
2282194521Skmacy	TXQ_LOCK(qs);
2283194521Skmacyagain:	reclaim_completed_tx(qs, 16, TXQ_OFLD);
2284169978Skmacy	ret = check_desc_avail(adap, q, m, ndesc, TXQ_OFLD);
2285169978Skmacy	if (__predict_false(ret)) {
2286169978Skmacy		if (ret == 1) {
2287194521Skmacy			TXQ_UNLOCK(qs);
2288174708Skmacy			return (EINTR);
2289167514Skmacy		}
2290169978Skmacy		goto again;
2291169978Skmacy	}
2292169978Skmacy
2293169978Skmacy	gen = q->gen;
2294169978Skmacy	q->in_use += ndesc;
2295169978Skmacy	pidx = q->pidx;
2296169978Skmacy	q->pidx += ndesc;
2297169978Skmacy	if (q->pidx >= q->size) {
2298169978Skmacy		q->pidx -= q->size;
2299169978Skmacy		q->gen ^= 1;
2300169978Skmacy	}
2301237263Snp
2302237263Snp	write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2303237263Snp	check_ring_tx_db(adap, q, 1);
2304194521Skmacy	TXQ_UNLOCK(qs);
2305169978Skmacy
2306172101Skmacy	return (0);
2307169978Skmacy}
2308167514Skmacy
2309169978Skmacy/**
2310169978Skmacy *	restart_offloadq - restart a suspended offload queue
2311169978Skmacy *	@qs: the queue set cotaining the offload queue
2312169978Skmacy *
2313169978Skmacy *	Resumes transmission on a suspended Tx offload queue.
2314169978Skmacy */
2315169978Skmacystatic void
2316169978Skmacyrestart_offloadq(void *data, int npending)
2317169978Skmacy{
2318169978Skmacy	struct mbuf *m;
2319169978Skmacy	struct sge_qset *qs = data;
2320169978Skmacy	struct sge_txq *q = &qs->txq[TXQ_OFLD];
2321169978Skmacy	adapter_t *adap = qs->port->adapter;
2322237263Snp	int cleaned;
2323169978Skmacy
2324194521Skmacy	TXQ_LOCK(qs);
2325194521Skmacyagain:	cleaned = reclaim_completed_tx(qs, 16, TXQ_OFLD);
2326169978Skmacy
2327169978Skmacy	while ((m = mbufq_peek(&q->sendq)) != NULL) {
2328169978Skmacy		unsigned int gen, pidx;
2329237263Snp		struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2330237263Snp		unsigned int ndesc = G_HDR_NDESC(oh->flags);
2331169978Skmacy
2332169978Skmacy		if (__predict_false(q->size - q->in_use < ndesc)) {
2333169978Skmacy			setbit(&qs->txq_stopped, TXQ_OFLD);
2334169978Skmacy			if (should_restart_tx(q) &&
2335169978Skmacy			    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped))
2336169978Skmacy				goto again;
2337169978Skmacy			q->stops++;
2338169978Skmacy			break;
2339169978Skmacy		}
2340169978Skmacy
2341169978Skmacy		gen = q->gen;
2342169978Skmacy		q->in_use += ndesc;
2343169978Skmacy		pidx = q->pidx;
2344169978Skmacy		q->pidx += ndesc;
2345169978Skmacy		if (q->pidx >= q->size) {
2346169978Skmacy			q->pidx -= q->size;
2347169978Skmacy			q->gen ^= 1;
2348169978Skmacy		}
2349169978Skmacy
2350169978Skmacy		(void)mbufq_dequeue(&q->sendq);
2351194521Skmacy		TXQ_UNLOCK(qs);
2352237263Snp		write_ofld_wr(adap, m, q, pidx, gen, ndesc);
2353194521Skmacy		TXQ_LOCK(qs);
2354169978Skmacy	}
2355169978Skmacy#if USE_GTS
2356169978Skmacy	set_bit(TXQ_RUNNING, &q->flags);
2357169978Skmacy	set_bit(TXQ_LAST_PKT_DB, &q->flags);
2358169978Skmacy#endif
2359194521Skmacy	TXQ_UNLOCK(qs);
2360176472Skmacy	wmb();
2361169978Skmacy	t3_write_reg(adap, A_SG_KDOORBELL,
2362169978Skmacy		     F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2363167514Skmacy}
2364167514Skmacy
2365169978Skmacy/**
2366169978Skmacy *	t3_offload_tx - send an offload packet
2367169978Skmacy *	@m: the packet
2368169978Skmacy *
2369169978Skmacy *	Sends an offload packet.  We use the packet priority to select the
2370169978Skmacy *	appropriate Tx queue as follows: bit 0 indicates whether the packet
2371169978Skmacy *	should be sent as regular or control, bits 1-3 select the queue set.
2372169978Skmacy */
2373169978Skmacyint
2374237263Snpt3_offload_tx(struct adapter *sc, struct mbuf *m)
2375169978Skmacy{
2376237263Snp	struct ofld_hdr *oh = mtod(m, struct ofld_hdr *);
2377237263Snp	struct sge_qset *qs = &sc->sge.qs[G_HDR_QSET(oh->flags)];
2378169978Skmacy
2379237263Snp	if (oh->flags & F_HDR_CTRL) {
2380237263Snp		m_adj(m, sizeof (*oh));	/* trim ofld_hdr off */
2381237263Snp		return (ctrl_xmit(sc, qs, m));
2382237263Snp	} else
2383237263Snp		return (ofld_xmit(sc, qs, m));
2384169978Skmacy}
2385237263Snp#endif
2386169978Skmacy
2387167514Skmacystatic void
2388167514Skmacyrestart_tx(struct sge_qset *qs)
2389167514Skmacy{
2390169978Skmacy	struct adapter *sc = qs->port->adapter;
2391237263Snp
2392169978Skmacy	if (isset(&qs->txq_stopped, TXQ_OFLD) &&
2393169978Skmacy	    should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2394169978Skmacy	    test_and_clear_bit(TXQ_OFLD, &qs->txq_stopped)) {
2395169978Skmacy		qs->txq[TXQ_OFLD].restarts++;
2396171335Skmacy		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2397169978Skmacy	}
2398237263Snp
2399169978Skmacy	if (isset(&qs->txq_stopped, TXQ_CTRL) &&
2400169978Skmacy	    should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2401169978Skmacy	    test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped)) {
2402169978Skmacy		qs->txq[TXQ_CTRL].restarts++;
2403171335Skmacy		taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2404169978Skmacy	}
2405167514Skmacy}
2406167514Skmacy
2407169978Skmacy/**
2408169978Skmacy *	t3_sge_alloc_qset - initialize an SGE queue set
2409169978Skmacy *	@sc: the controller softc
2410169978Skmacy *	@id: the queue set id
2411169978Skmacy *	@nports: how many Ethernet ports will be using this queue set
2412169978Skmacy *	@irq_vec_idx: the IRQ vector index for response queue interrupts
2413169978Skmacy *	@p: configuration parameters for this queue set
2414169978Skmacy *	@ntxq: number of Tx queues for the queue set
2415169978Skmacy *	@pi: port info for queue set
2416169978Skmacy *
2417169978Skmacy *	Allocate resources and initialize an SGE queue set.  A queue set
2418169978Skmacy *	comprises a response queue, two Rx free-buffer queues, and up to 3
2419169978Skmacy *	Tx queues.  The Tx queues are assigned roles in the order Ethernet
2420169978Skmacy *	queue, offload queue, and control queue.
2421169978Skmacy */
2422169978Skmacyint
2423169978Skmacyt3_sge_alloc_qset(adapter_t *sc, u_int id, int nports, int irq_vec_idx,
2424169978Skmacy		  const struct qset_params *p, int ntxq, struct port_info *pi)
2425169978Skmacy{
2426169978Skmacy	struct sge_qset *q = &sc->sge.qs[id];
2427194521Skmacy	int i, ret = 0;
2428169978Skmacy
2429194521Skmacy	MTX_INIT(&q->lock, q->namebuf, NULL, MTX_DEF);
2430194521Skmacy	q->port = pi;
2431237263Snp	q->adap = sc;
2432194521Skmacy
2433205950Snp	if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2434205950Snp	    M_DEVBUF, M_WAITOK, &q->lock)) == NULL) {
2435205950Snp		device_printf(sc->dev, "failed to allocate mbuf ring\n");
2436205950Snp		goto err;
2437174708Skmacy	}
2438205950Snp	if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2439205950Snp	    M_NOWAIT | M_ZERO)) == NULL) {
2440205950Snp		device_printf(sc->dev, "failed to allocate ifq\n");
2441205950Snp		goto err;
2442205950Snp	}
2443205950Snp	ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2444205950Snp	callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2445205950Snp	callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2446205950Snp	q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2447205950Snp	q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2448205950Snp
2449169978Skmacy	init_qset_cntxt(q, id);
2450175347Skmacy	q->idx = id;
2451169978Skmacy	if ((ret = alloc_ring(sc, p->fl_size, sizeof(struct rx_desc),
2452169978Skmacy		    sizeof(struct rx_sw_desc), &q->fl[0].phys_addr,
2453169978Skmacy		    &q->fl[0].desc, &q->fl[0].sdesc,
2454169978Skmacy		    &q->fl[0].desc_tag, &q->fl[0].desc_map,
2455169978Skmacy		    sc->rx_dmat, &q->fl[0].entry_tag)) != 0) {
2456169978Skmacy		printf("error %d from alloc ring fl0\n", ret);
2457169978Skmacy		goto err;
2458169978Skmacy	}
2459169978Skmacy
2460169978Skmacy	if ((ret = alloc_ring(sc, p->jumbo_size, sizeof(struct rx_desc),
2461169978Skmacy		    sizeof(struct rx_sw_desc), &q->fl[1].phys_addr,
2462169978Skmacy		    &q->fl[1].desc, &q->fl[1].sdesc,
2463169978Skmacy		    &q->fl[1].desc_tag, &q->fl[1].desc_map,
2464169978Skmacy		    sc->rx_jumbo_dmat, &q->fl[1].entry_tag)) != 0) {
2465169978Skmacy		printf("error %d from alloc ring fl1\n", ret);
2466169978Skmacy		goto err;
2467169978Skmacy	}
2468169978Skmacy
2469169978Skmacy	if ((ret = alloc_ring(sc, p->rspq_size, sizeof(struct rsp_desc), 0,
2470169978Skmacy		    &q->rspq.phys_addr, &q->rspq.desc, NULL,
2471169978Skmacy		    &q->rspq.desc_tag, &q->rspq.desc_map,
2472169978Skmacy		    NULL, NULL)) != 0) {
2473169978Skmacy		printf("error %d from alloc ring rspq\n", ret);
2474169978Skmacy		goto err;
2475169978Skmacy	}
2476169978Skmacy
2477242087Snp	snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2478242087Snp	    device_get_unit(sc->dev), irq_vec_idx);
2479242087Snp	MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2480242087Snp
2481169978Skmacy	for (i = 0; i < ntxq; ++i) {
2482169978Skmacy		size_t sz = i == TXQ_CTRL ? 0 : sizeof(struct tx_sw_desc);
2483169978Skmacy
2484169978Skmacy		if ((ret = alloc_ring(sc, p->txq_size[i],
2485169978Skmacy			    sizeof(struct tx_desc), sz,
2486169978Skmacy			    &q->txq[i].phys_addr, &q->txq[i].desc,
2487169978Skmacy			    &q->txq[i].sdesc, &q->txq[i].desc_tag,
2488169978Skmacy			    &q->txq[i].desc_map,
2489169978Skmacy			    sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2490169978Skmacy			printf("error %d from alloc ring tx %i\n", ret, i);
2491169978Skmacy			goto err;
2492169978Skmacy		}
2493169978Skmacy		mbufq_init(&q->txq[i].sendq);
2494169978Skmacy		q->txq[i].gen = 1;
2495169978Skmacy		q->txq[i].size = p->txq_size[i];
2496169978Skmacy	}
2497237263Snp
2498237263Snp#ifdef TCP_OFFLOAD
2499171335Skmacy	TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2500237263Snp#endif
2501171335Skmacy	TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2502194521Skmacy	TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2503194521Skmacy	TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2504171335Skmacy
2505169978Skmacy	q->fl[0].gen = q->fl[1].gen = 1;
2506169978Skmacy	q->fl[0].size = p->fl_size;
2507169978Skmacy	q->fl[1].size = p->jumbo_size;
2508169978Skmacy
2509169978Skmacy	q->rspq.gen = 1;
2510171471Skmacy	q->rspq.cidx = 0;
2511169978Skmacy	q->rspq.size = p->rspq_size;
2512170869Skmacy
2513169978Skmacy	q->txq[TXQ_ETH].stop_thres = nports *
2514169978Skmacy	    flits_to_desc(sgl_len(TX_MAX_SEGS + 1) + 3);
2515169978Skmacy
2516194521Skmacy	q->fl[0].buf_size = MCLBYTES;
2517194521Skmacy	q->fl[0].zone = zone_pack;
2518194521Skmacy	q->fl[0].type = EXT_PACKET;
2519205950Snp
2520205950Snp	if (p->jumbo_buf_size ==  MJUM16BYTES) {
2521174708Skmacy		q->fl[1].zone = zone_jumbo16;
2522174708Skmacy		q->fl[1].type = EXT_JUMBO16;
2523205950Snp	} else if (p->jumbo_buf_size ==  MJUM9BYTES) {
2524175200Skmacy		q->fl[1].zone = zone_jumbo9;
2525175200Skmacy		q->fl[1].type = EXT_JUMBO9;
2526205950Snp	} else if (p->jumbo_buf_size ==  MJUMPAGESIZE) {
2527205950Snp		q->fl[1].zone = zone_jumbop;
2528205950Snp		q->fl[1].type = EXT_JUMBOP;
2529205950Snp	} else {
2530205950Snp		KASSERT(0, ("can't deal with jumbo_buf_size %d.", p->jumbo_buf_size));
2531205950Snp		ret = EDOOFUS;
2532205950Snp		goto err;
2533175200Skmacy	}
2534205950Snp	q->fl[1].buf_size = p->jumbo_buf_size;
2535171978Skmacy
2536183289Skmacy	/* Allocate and setup the lro_ctrl structure */
2537181616Skmacy	q->lro.enabled = !!(pi->ifp->if_capenable & IFCAP_LRO);
2538235963Sbz#if defined(INET6) || defined(INET)
2539181616Skmacy	ret = tcp_lro_init(&q->lro.ctrl);
2540181616Skmacy	if (ret) {
2541181616Skmacy		printf("error %d from tcp_lro_init\n", ret);
2542181616Skmacy		goto err;
2543181616Skmacy	}
2544205947Snp#endif
2545181616Skmacy	q->lro.ctrl.ifp = pi->ifp;
2546181616Skmacy
2547176472Skmacy	mtx_lock_spin(&sc->sge.reg_lock);
2548169978Skmacy	ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2549169978Skmacy				   q->rspq.phys_addr, q->rspq.size,
2550169978Skmacy				   q->fl[0].buf_size, 1, 0);
2551169978Skmacy	if (ret) {
2552169978Skmacy		printf("error %d from t3_sge_init_rspcntxt\n", ret);
2553169978Skmacy		goto err_unlock;
2554169978Skmacy	}
2555169978Skmacy
2556169978Skmacy	for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
2557169978Skmacy		ret = -t3_sge_init_flcntxt(sc, q->fl[i].cntxt_id, 0,
2558169978Skmacy					  q->fl[i].phys_addr, q->fl[i].size,
2559169978Skmacy					  q->fl[i].buf_size, p->cong_thres, 1,
2560169978Skmacy					  0);
2561169978Skmacy		if (ret) {
2562169978Skmacy			printf("error %d from t3_sge_init_flcntxt for index i=%d\n", ret, i);
2563169978Skmacy			goto err_unlock;
2564169978Skmacy		}
2565169978Skmacy	}
2566169978Skmacy
2567169978Skmacy	ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2568169978Skmacy				 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2569169978Skmacy				 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2570169978Skmacy				 1, 0);
2571169978Skmacy	if (ret) {
2572169978Skmacy		printf("error %d from t3_sge_init_ecntxt\n", ret);
2573169978Skmacy		goto err_unlock;
2574169978Skmacy	}
2575169978Skmacy
2576169978Skmacy	if (ntxq > 1) {
2577169978Skmacy		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2578169978Skmacy					 USE_GTS, SGE_CNTXT_OFLD, id,
2579169978Skmacy					 q->txq[TXQ_OFLD].phys_addr,
2580169978Skmacy					 q->txq[TXQ_OFLD].size, 0, 1, 0);
2581169978Skmacy		if (ret) {
2582169978Skmacy			printf("error %d from t3_sge_init_ecntxt\n", ret);
2583169978Skmacy			goto err_unlock;
2584169978Skmacy		}
2585169978Skmacy	}
2586169978Skmacy
2587169978Skmacy	if (ntxq > 2) {
2588169978Skmacy		ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2589169978Skmacy					 SGE_CNTXT_CTRL, id,
2590169978Skmacy					 q->txq[TXQ_CTRL].phys_addr,
2591169978Skmacy					 q->txq[TXQ_CTRL].size,
2592169978Skmacy					 q->txq[TXQ_CTRL].token, 1, 0);
2593169978Skmacy		if (ret) {
2594169978Skmacy			printf("error %d from t3_sge_init_ecntxt\n", ret);
2595169978Skmacy			goto err_unlock;
2596169978Skmacy		}
2597169978Skmacy	}
2598242087Snp
2599176472Skmacy	mtx_unlock_spin(&sc->sge.reg_lock);
2600169978Skmacy	t3_update_qset_coalesce(q, p);
2601237263Snp
2602169978Skmacy	refill_fl(sc, &q->fl[0], q->fl[0].size);
2603169978Skmacy	refill_fl(sc, &q->fl[1], q->fl[1].size);
2604169978Skmacy	refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2605169978Skmacy
2606169978Skmacy	t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2607169978Skmacy		     V_NEWTIMER(q->rspq.holdoff_tmr));
2608169978Skmacy
2609169978Skmacy	return (0);
2610169978Skmacy
2611169978Skmacyerr_unlock:
2612176472Skmacy	mtx_unlock_spin(&sc->sge.reg_lock);
2613169978Skmacyerr:
2614194521Skmacy	TXQ_LOCK(q);
2615169978Skmacy	t3_free_qset(sc, q);
2616169978Skmacy
2617169978Skmacy	return (ret);
2618169978Skmacy}
2619169978Skmacy
2620181616Skmacy/*
2621181616Skmacy * Remove CPL_RX_PKT headers from the mbuf and reduce it to a regular mbuf with
2622181616Skmacy * ethernet data.  Hardware assistance with various checksums and any vlan tag
2623181616Skmacy * will also be taken into account here.
2624181616Skmacy */
2625167514Skmacyvoid
2626237832Snpt3_rx_eth(struct adapter *adap, struct mbuf *m, int ethpad)
2627167514Skmacy{
2628170654Skmacy	struct cpl_rx_pkt *cpl = (struct cpl_rx_pkt *)(mtod(m, uint8_t *) + ethpad);
2629171978Skmacy	struct port_info *pi = &adap->port[adap->rxpkt_map[cpl->iff]];
2630167514Skmacy	struct ifnet *ifp = pi->ifp;
2631167514Skmacy
2632204274Snp	if (cpl->vlan_valid) {
2633167514Skmacy		m->m_pkthdr.ether_vtag = ntohs(cpl->vlan);
2634167514Skmacy		m->m_flags |= M_VLANTAG;
2635167514Skmacy	}
2636204274Snp
2637167514Skmacy	m->m_pkthdr.rcvif = ifp;
2638168737Skmacy	/*
2639168737Skmacy	 * adjust after conversion to mbuf chain
2640168737Skmacy	 */
2641174708Skmacy	m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2642174708Skmacy	m->m_len -= (sizeof(*cpl) + ethpad);
2643174708Skmacy	m->m_data += (sizeof(*cpl) + ethpad);
2644237832Snp
2645237832Snp	if (!cpl->fragment && cpl->csum_valid && cpl->csum == 0xffff) {
2646237832Snp		struct ether_header *eh = mtod(m, void *);
2647237832Snp		uint16_t eh_type;
2648237832Snp
2649237832Snp		if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2650237832Snp			struct ether_vlan_header *evh = mtod(m, void *);
2651237832Snp
2652237832Snp			eh_type = evh->evl_proto;
2653237832Snp		} else
2654237832Snp			eh_type = eh->ether_type;
2655237832Snp
2656237832Snp		if (ifp->if_capenable & IFCAP_RXCSUM &&
2657237832Snp		    eh_type == htons(ETHERTYPE_IP)) {
2658237832Snp			m->m_pkthdr.csum_flags = (CSUM_IP_CHECKED |
2659237832Snp			    CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
2660237832Snp			m->m_pkthdr.csum_data = 0xffff;
2661237832Snp		} else if (ifp->if_capenable & IFCAP_RXCSUM_IPV6 &&
2662237832Snp		    eh_type == htons(ETHERTYPE_IPV6)) {
2663237832Snp			m->m_pkthdr.csum_flags = (CSUM_DATA_VALID_IPV6 |
2664237832Snp			    CSUM_PSEUDO_HDR);
2665237832Snp			m->m_pkthdr.csum_data = 0xffff;
2666237832Snp		}
2667237832Snp	}
2668167514Skmacy}
2669167514Skmacy
2670167514Skmacy/**
2671167514Skmacy *	get_packet - return the next ingress packet buffer from a free list
2672167514Skmacy *	@adap: the adapter that received the packet
2673167514Skmacy *	@drop_thres: # of remaining buffers before we start dropping packets
2674167514Skmacy *	@qs: the qset that the SGE free list holding the packet belongs to
2675167514Skmacy *      @mh: the mbuf header, contains a pointer to the head and tail of the mbuf chain
2676167514Skmacy *      @r: response descriptor
2677167514Skmacy *
2678167514Skmacy *	Get the next packet from a free list and complete setup of the
2679167514Skmacy *	sk_buff.  If the packet is small we make a copy and recycle the
2680167514Skmacy *	original buffer, otherwise we use the original buffer itself.  If a
2681167514Skmacy *	positive drop threshold is supplied packets are dropped and their
2682167514Skmacy *	buffers recycled if (a) the number of remaining buffers is under the
2683167514Skmacy *	threshold and the packet is too big to copy, or (b) the packet should
2684167514Skmacy *	be copied but there is no memory for the copy.
2685167514Skmacy */
2686167514Skmacystatic int
2687167514Skmacyget_packet(adapter_t *adap, unsigned int drop_thres, struct sge_qset *qs,
2688175340Skmacy    struct t3_mbuf_hdr *mh, struct rsp_desc *r)
2689172101Skmacy{
2690172101Skmacy
2691172101Skmacy	unsigned int len_cq =  ntohl(r->len_cq);
2692172101Skmacy	struct sge_fl *fl = (len_cq & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
2693194521Skmacy	int mask, cidx = fl->cidx;
2694194521Skmacy	struct rx_sw_desc *sd = &fl->sdesc[cidx];
2695172101Skmacy	uint32_t len = G_RSPD_LEN(len_cq);
2696194521Skmacy	uint32_t flags = M_EXT;
2697194521Skmacy	uint8_t sopeop = G_RSPD_SOP_EOP(ntohl(r->flags));
2698175711Skmacy	caddr_t cl;
2699194521Skmacy	struct mbuf *m;
2700172101Skmacy	int ret = 0;
2701172101Skmacy
2702194521Skmacy	mask = fl->size - 1;
2703194521Skmacy	prefetch(fl->sdesc[(cidx + 1) & mask].m);
2704194521Skmacy	prefetch(fl->sdesc[(cidx + 2) & mask].m);
2705194521Skmacy	prefetch(fl->sdesc[(cidx + 1) & mask].rxsd_cl);
2706194521Skmacy	prefetch(fl->sdesc[(cidx + 2) & mask].rxsd_cl);
2707194521Skmacy
2708172101Skmacy	fl->credits--;
2709172101Skmacy	bus_dmamap_sync(fl->entry_tag, sd->map, BUS_DMASYNC_POSTREAD);
2710175200Skmacy
2711194521Skmacy	if (recycle_enable && len <= SGE_RX_COPY_THRES &&
2712194521Skmacy	    sopeop == RSPQ_SOP_EOP) {
2713243857Sglebius		if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
2714175200Skmacy			goto skip_recycle;
2715194521Skmacy		cl = mtod(m, void *);
2716194521Skmacy		memcpy(cl, sd->rxsd_cl, len);
2717175200Skmacy		recycle_rx_buf(adap, fl, fl->cidx);
2718194521Skmacy		m->m_pkthdr.len = m->m_len = len;
2719194521Skmacy		m->m_flags = 0;
2720194521Skmacy		mh->mh_head = mh->mh_tail = m;
2721194521Skmacy		ret = 1;
2722194521Skmacy		goto done;
2723175200Skmacy	} else {
2724175200Skmacy	skip_recycle:
2725175200Skmacy		bus_dmamap_unload(fl->entry_tag, sd->map);
2726175200Skmacy		cl = sd->rxsd_cl;
2727194521Skmacy		m = sd->m;
2728172101Skmacy
2729175200Skmacy		if ((sopeop == RSPQ_SOP_EOP) ||
2730175200Skmacy		    (sopeop == RSPQ_SOP))
2731194521Skmacy			flags |= M_PKTHDR;
2732195512Snp		m_init(m, fl->zone, fl->buf_size, M_NOWAIT, MT_DATA, flags);
2733194521Skmacy		if (fl->zone == zone_pack) {
2734194521Skmacy			/*
2735194521Skmacy			 * restore clobbered data pointer
2736194521Skmacy			 */
2737194521Skmacy			m->m_data = m->m_ext.ext_buf;
2738194521Skmacy		} else {
2739194521Skmacy			m_cljset(m, cl, fl->type);
2740194521Skmacy		}
2741194521Skmacy		m->m_len = len;
2742175200Skmacy	}
2743172101Skmacy	switch(sopeop) {
2744172101Skmacy	case RSPQ_SOP_EOP:
2745194521Skmacy		ret = 1;
2746194521Skmacy		/* FALLTHROUGH */
2747194521Skmacy	case RSPQ_SOP:
2748172101Skmacy		mh->mh_head = mh->mh_tail = m;
2749172101Skmacy		m->m_pkthdr.len = len;
2750194521Skmacy		break;
2751194521Skmacy	case RSPQ_EOP:
2752172101Skmacy		ret = 1;
2753194521Skmacy		/* FALLTHROUGH */
2754172101Skmacy	case RSPQ_NSOP_NEOP:
2755172101Skmacy		if (mh->mh_tail == NULL) {
2756175711Skmacy			log(LOG_ERR, "discarding intermediate descriptor entry\n");
2757172101Skmacy			m_freem(m);
2758172101Skmacy			break;
2759172101Skmacy		}
2760172101Skmacy		mh->mh_tail->m_next = m;
2761172101Skmacy		mh->mh_tail = m;
2762172101Skmacy		mh->mh_head->m_pkthdr.len += len;
2763172101Skmacy		break;
2764172101Skmacy	}
2765194521Skmacy	if (cxgb_debug)
2766194521Skmacy		printf("len=%d pktlen=%d\n", m->m_len, m->m_pkthdr.len);
2767194521Skmacydone:
2768172101Skmacy	if (++fl->cidx == fl->size)
2769172101Skmacy		fl->cidx = 0;
2770172101Skmacy
2771172101Skmacy	return (ret);
2772172101Skmacy}
2773172101Skmacy
2774167514Skmacy/**
2775167514Skmacy *	handle_rsp_cntrl_info - handles control information in a response
2776167514Skmacy *	@qs: the queue set corresponding to the response
2777167514Skmacy *	@flags: the response control flags
2778167514Skmacy *
2779167514Skmacy *	Handles the control information of an SGE response, such as GTS
2780167514Skmacy *	indications and completion credits for the queue set's Tx queues.
2781167514Skmacy *	HW coalesces credits, we don't do any extra SW coalescing.
2782167514Skmacy */
2783167514Skmacystatic __inline void
2784167514Skmacyhandle_rsp_cntrl_info(struct sge_qset *qs, uint32_t flags)
2785167514Skmacy{
2786167514Skmacy	unsigned int credits;
2787167514Skmacy
2788167514Skmacy#if USE_GTS
2789167514Skmacy	if (flags & F_RSPD_TXQ0_GTS)
2790167514Skmacy		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2791167514Skmacy#endif
2792167514Skmacy	credits = G_RSPD_TXQ0_CR(flags);
2793175200Skmacy	if (credits)
2794167514Skmacy		qs->txq[TXQ_ETH].processed += credits;
2795197043Snp
2796167514Skmacy	credits = G_RSPD_TXQ2_CR(flags);
2797197043Snp	if (credits)
2798167514Skmacy		qs->txq[TXQ_CTRL].processed += credits;
2799167514Skmacy
2800167514Skmacy# if USE_GTS
2801167514Skmacy	if (flags & F_RSPD_TXQ1_GTS)
2802167514Skmacy		clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2803167514Skmacy# endif
2804167514Skmacy	credits = G_RSPD_TXQ1_CR(flags);
2805167514Skmacy	if (credits)
2806167514Skmacy		qs->txq[TXQ_OFLD].processed += credits;
2807174708Skmacy
2808167514Skmacy}
2809167514Skmacy
2810167514Skmacystatic void
2811167514Skmacycheck_ring_db(adapter_t *adap, struct sge_qset *qs,
2812167514Skmacy    unsigned int sleeping)
2813167514Skmacy{
2814167514Skmacy	;
2815167514Skmacy}
2816167514Skmacy
2817167514Skmacy/**
2818167514Skmacy *	process_responses - process responses from an SGE response queue
2819167514Skmacy *	@adap: the adapter
2820167514Skmacy *	@qs: the queue set to which the response queue belongs
2821167514Skmacy *	@budget: how many responses can be processed in this round
2822167514Skmacy *
2823167514Skmacy *	Process responses from an SGE response queue up to the supplied budget.
2824167514Skmacy *	Responses include received packets as well as credits and other events
2825167514Skmacy *	for the queues that belong to the response queue's queue set.
2826167514Skmacy *	A negative budget is effectively unlimited.
2827167514Skmacy *
2828167514Skmacy *	Additionally choose the interrupt holdoff time for the next interrupt
2829167514Skmacy *	on this queue.  If the system is under memory shortage use a fairly
2830167514Skmacy *	long delay to help recovery.
2831167514Skmacy */
2832194521Skmacystatic int
2833167514Skmacyprocess_responses(adapter_t *adap, struct sge_qset *qs, int budget)
2834167514Skmacy{
2835167514Skmacy	struct sge_rspq *rspq = &qs->rspq;
2836167514Skmacy	struct rsp_desc *r = &rspq->desc[rspq->cidx];
2837167514Skmacy	int budget_left = budget;
2838167514Skmacy	unsigned int sleeping = 0;
2839235963Sbz#if defined(INET6) || defined(INET)
2840181616Skmacy	int lro_enabled = qs->lro.enabled;
2841183559Skmacy	int skip_lro;
2842181616Skmacy	struct lro_ctrl *lro_ctrl = &qs->lro.ctrl;
2843235963Sbz#endif
2844209116Snp	struct t3_mbuf_hdr *mh = &rspq->rspq_mh;
2845167514Skmacy#ifdef DEBUG
2846167514Skmacy	static int last_holdoff = 0;
2847171471Skmacy	if (cxgb_debug && rspq->holdoff_tmr != last_holdoff) {
2848167514Skmacy		printf("next_holdoff=%d\n", rspq->holdoff_tmr);
2849167514Skmacy		last_holdoff = rspq->holdoff_tmr;
2850167514Skmacy	}
2851169978Skmacy#endif
2852167514Skmacy	rspq->next_holdoff = rspq->holdoff_tmr;
2853167514Skmacy
2854167514Skmacy	while (__predict_true(budget_left && is_new_response(r, rspq))) {
2855167514Skmacy		int eth, eop = 0, ethpad = 0;
2856167514Skmacy		uint32_t flags = ntohl(r->flags);
2857174708Skmacy		uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2858237263Snp		uint8_t opcode = r->rss_hdr.opcode;
2859167514Skmacy
2860237263Snp		eth = (opcode == CPL_RX_PKT);
2861167514Skmacy
2862167514Skmacy		if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2863176472Skmacy			struct mbuf *m;
2864167514Skmacy
2865176472Skmacy			if (cxgb_debug)
2866176472Skmacy				printf("async notification\n");
2867176472Skmacy
2868209116Snp			if (mh->mh_head == NULL) {
2869243857Sglebius				mh->mh_head = m_gethdr(M_NOWAIT, MT_DATA);
2870209116Snp				m = mh->mh_head;
2871176472Skmacy			} else {
2872243857Sglebius				m = m_gethdr(M_NOWAIT, MT_DATA);
2873176472Skmacy			}
2874176472Skmacy			if (m == NULL)
2875176472Skmacy				goto no_mem;
2876176472Skmacy
2877176472Skmacy                        memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2878176472Skmacy			m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2879305556Sdim                        *mtod(m, uint8_t *) = CPL_ASYNC_NOTIF;
2880237263Snp			opcode = CPL_ASYNC_NOTIF;
2881176472Skmacy			eop = 1;
2882176472Skmacy                        rspq->async_notif++;
2883176472Skmacy			goto skip;
2884167514Skmacy		} else if  (flags & F_RSPD_IMM_DATA_VALID) {
2885243857Sglebius			struct mbuf *m = m_gethdr(M_NOWAIT, MT_DATA);
2886175200Skmacy
2887237263Snp			if (m == NULL) {
2888176472Skmacy		no_mem:
2889167514Skmacy				rspq->next_holdoff = NOMEM_INTR_DELAY;
2890167514Skmacy				budget_left--;
2891167514Skmacy				break;
2892167514Skmacy			}
2893237263Snp			if (mh->mh_head == NULL)
2894237263Snp				mh->mh_head = m;
2895237263Snp                        else
2896237263Snp				mh->mh_tail->m_next = m;
2897237263Snp			mh->mh_tail = m;
2898237263Snp
2899237263Snp			get_imm_packet(adap, r, m);
2900237263Snp			mh->mh_head->m_pkthdr.len += m->m_len;
2901168491Skmacy			eop = 1;
2902174708Skmacy			rspq->imm_data++;
2903176472Skmacy		} else if (r->len_cq) {
2904167514Skmacy			int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2905172101Skmacy
2906209116Snp			eop = get_packet(adap, drop_thresh, qs, mh, r);
2907194521Skmacy			if (eop) {
2908281955Shiren				if (r->rss_hdr.hash_type && !adap->timestamp) {
2909281955Shiren					M_HASHTYPE_SET(mh->mh_head, M_HASHTYPE_OPAQUE);
2910281955Shiren					mh->mh_head->m_pkthdr.flowid = rss_hash;
2911281955Shiren				}
2912194521Skmacy			}
2913194521Skmacy
2914167514Skmacy			ethpad = 2;
2915167514Skmacy		} else {
2916167514Skmacy			rspq->pure_rsps++;
2917167514Skmacy		}
2918176472Skmacy	skip:
2919167514Skmacy		if (flags & RSPD_CTRL_MASK) {
2920167514Skmacy			sleeping |= flags & RSPD_GTS_MASK;
2921167514Skmacy			handle_rsp_cntrl_info(qs, flags);
2922167514Skmacy		}
2923174708Skmacy
2924174708Skmacy		if (!eth && eop) {
2925237263Snp			rspq->offload_pkts++;
2926237263Snp#ifdef TCP_OFFLOAD
2927237263Snp			adap->cpl_handler[opcode](qs, r, mh->mh_head);
2928237263Snp#else
2929237263Snp			m_freem(mh->mh_head);
2930237263Snp#endif
2931209116Snp			mh->mh_head = NULL;
2932174708Skmacy		} else if (eth && eop) {
2933209116Snp			struct mbuf *m = mh->mh_head;
2934167514Skmacy
2935237832Snp			t3_rx_eth(adap, m, ethpad);
2936183559Skmacy
2937183559Skmacy			/*
2938183559Skmacy			 * The T304 sends incoming packets on any qset.  If LRO
2939183559Skmacy			 * is also enabled, we could end up sending packet up
2940183559Skmacy			 * lro_ctrl->ifp's input.  That is incorrect.
2941183559Skmacy			 *
2942183559Skmacy			 * The mbuf's rcvif was derived from the cpl header and
2943183559Skmacy			 * is accurate.  Skip LRO and just use that.
2944183559Skmacy			 */
2945235963Sbz#if defined(INET6) || defined(INET)
2946183559Skmacy			skip_lro = __predict_false(qs->port->ifp != m->m_pkthdr.rcvif);
2947183559Skmacy
2948205947Snp			if (lro_enabled && lro_ctrl->lro_cnt && !skip_lro
2949205947Snp			    && (tcp_lro_rx(lro_ctrl, m, 0) == 0)
2950205947Snp			    ) {
2951181616Skmacy				/* successfully queue'd for LRO */
2952235963Sbz			} else
2953235963Sbz#endif
2954235963Sbz			{
2955181616Skmacy				/*
2956181616Skmacy				 * LRO not enabled, packet unsuitable for LRO,
2957181616Skmacy				 * or unable to queue.  Pass it up right now in
2958181616Skmacy				 * either case.
2959181616Skmacy				 */
2960181616Skmacy				struct ifnet *ifp = m->m_pkthdr.rcvif;
2961181616Skmacy				(*ifp->if_input)(ifp, m);
2962181616Skmacy			}
2963209116Snp			mh->mh_head = NULL;
2964171469Skmacy
2965167514Skmacy		}
2966237263Snp
2967237263Snp		r++;
2968237263Snp		if (__predict_false(++rspq->cidx == rspq->size)) {
2969237263Snp			rspq->cidx = 0;
2970237263Snp			rspq->gen ^= 1;
2971237263Snp			r = rspq->desc;
2972237263Snp		}
2973237263Snp
2974237263Snp		if (++rspq->credits >= 64) {
2975237263Snp			refill_rspq(adap, rspq, rspq->credits);
2976237263Snp			rspq->credits = 0;
2977237263Snp		}
2978174708Skmacy		__refill_fl_lt(adap, &qs->fl[0], 32);
2979174708Skmacy		__refill_fl_lt(adap, &qs->fl[1], 32);
2980167514Skmacy		--budget_left;
2981167514Skmacy	}
2982167514Skmacy
2983235963Sbz#if defined(INET6) || defined(INET)
2984181616Skmacy	/* Flush LRO */
2985181616Skmacy	while (!SLIST_EMPTY(&lro_ctrl->lro_active)) {
2986181616Skmacy		struct lro_entry *queued = SLIST_FIRST(&lro_ctrl->lro_active);
2987181616Skmacy		SLIST_REMOVE_HEAD(&lro_ctrl->lro_active, next);
2988181616Skmacy		tcp_lro_flush(lro_ctrl, queued);
2989181616Skmacy	}
2990205947Snp#endif
2991181616Skmacy
2992167514Skmacy	if (sleeping)
2993167514Skmacy		check_ring_db(adap, qs, sleeping);
2994167514Skmacy
2995194521Skmacy	mb();  /* commit Tx queue processed updates */
2996197043Snp	if (__predict_false(qs->txq_stopped > 1))
2997167514Skmacy		restart_tx(qs);
2998197043Snp
2999174708Skmacy	__refill_fl_lt(adap, &qs->fl[0], 512);
3000174708Skmacy	__refill_fl_lt(adap, &qs->fl[1], 512);
3001167514Skmacy	budget -= budget_left;
3002167514Skmacy	return (budget);
3003167514Skmacy}
3004167514Skmacy
3005167514Skmacy/*
3006167514Skmacy * A helper function that processes responses and issues GTS.
3007167514Skmacy */
3008167514Skmacystatic __inline int
3009167514Skmacyprocess_responses_gts(adapter_t *adap, struct sge_rspq *rq)
3010167514Skmacy{
3011167514Skmacy	int work;
3012167514Skmacy	static int last_holdoff = 0;
3013167514Skmacy
3014167514Skmacy	work = process_responses(adap, rspq_to_qset(rq), -1);
3015167514Skmacy
3016167514Skmacy	if (cxgb_debug && (rq->next_holdoff != last_holdoff)) {
3017167514Skmacy		printf("next_holdoff=%d\n", rq->next_holdoff);
3018167514Skmacy		last_holdoff = rq->next_holdoff;
3019167514Skmacy	}
3020175223Skmacy	t3_write_reg(adap, A_SG_GTS, V_RSPQ(rq->cntxt_id) |
3021175223Skmacy	    V_NEWTIMER(rq->next_holdoff) | V_NEWINDEX(rq->cidx));
3022175223Skmacy
3023175223Skmacy	return (work);
3024167514Skmacy}
3025167514Skmacy
3026167514Skmacy
3027167514Skmacy/*
3028167514Skmacy * Interrupt handler for legacy INTx interrupts for T3B-based cards.
3029167514Skmacy * Handles data events from SGE response queues as well as error and other
3030167514Skmacy * async events as they all use the same interrupt pin.  We use one SGE
3031167514Skmacy * response queue per port in this mode and protect all response queues with
3032167514Skmacy * queue 0's lock.
3033167514Skmacy */
3034167514Skmacyvoid
3035167514Skmacyt3b_intr(void *data)
3036167514Skmacy{
3037171978Skmacy	uint32_t i, map;
3038167514Skmacy	adapter_t *adap = data;
3039167514Skmacy	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3040167514Skmacy
3041167514Skmacy	t3_write_reg(adap, A_PL_CLI, 0);
3042167514Skmacy	map = t3_read_reg(adap, A_SG_DATA_INTR);
3043167514Skmacy
3044167514Skmacy	if (!map)
3045167514Skmacy		return;
3046167514Skmacy
3047209840Snp	if (__predict_false(map & F_ERRINTR)) {
3048209840Snp		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3049209840Snp		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3050167514Skmacy		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3051209840Snp	}
3052172096Skmacy
3053167514Skmacy	mtx_lock(&q0->lock);
3054171978Skmacy	for_each_port(adap, i)
3055171978Skmacy	    if (map & (1 << i))
3056171978Skmacy			process_responses_gts(adap, &adap->sge.qs[i].rspq);
3057167514Skmacy	mtx_unlock(&q0->lock);
3058167514Skmacy}
3059167514Skmacy
3060167514Skmacy/*
3061167514Skmacy * The MSI interrupt handler.  This needs to handle data events from SGE
3062167514Skmacy * response queues as well as error and other async events as they all use
3063167514Skmacy * the same MSI vector.  We use one SGE response queue per port in this mode
3064167514Skmacy * and protect all response queues with queue 0's lock.
3065167514Skmacy */
3066167514Skmacyvoid
3067167514Skmacyt3_intr_msi(void *data)
3068167514Skmacy{
3069167514Skmacy	adapter_t *adap = data;
3070167514Skmacy	struct sge_rspq *q0 = &adap->sge.qs[0].rspq;
3071171978Skmacy	int i, new_packets = 0;
3072172096Skmacy
3073167514Skmacy	mtx_lock(&q0->lock);
3074167514Skmacy
3075171978Skmacy	for_each_port(adap, i)
3076171978Skmacy	    if (process_responses_gts(adap, &adap->sge.qs[i].rspq))
3077171978Skmacy		    new_packets = 1;
3078167514Skmacy	mtx_unlock(&q0->lock);
3079209840Snp	if (new_packets == 0) {
3080209840Snp		t3_write_reg(adap, A_PL_INT_ENABLE0, 0);
3081209840Snp		(void) t3_read_reg(adap, A_PL_INT_ENABLE0);
3082167514Skmacy		taskqueue_enqueue(adap->tq, &adap->slow_intr_task);
3083209840Snp	}
3084167514Skmacy}
3085167514Skmacy
3086167514Skmacyvoid
3087167514Skmacyt3_intr_msix(void *data)
3088167514Skmacy{
3089167514Skmacy	struct sge_qset *qs = data;
3090167514Skmacy	adapter_t *adap = qs->port->adapter;
3091167514Skmacy	struct sge_rspq *rspq = &qs->rspq;
3092194521Skmacy
3093194521Skmacy	if (process_responses_gts(adap, rspq) == 0)
3094194521Skmacy		rspq->unhandled_irqs++;
3095167514Skmacy}
3096167514Skmacy
3097175200Skmacy#define QDUMP_SBUF_SIZE		32 * 400
3098175209Skmacystatic int
3099175209Skmacyt3_dump_rspq(SYSCTL_HANDLER_ARGS)
3100175209Skmacy{
3101175209Skmacy	struct sge_rspq *rspq;
3102175223Skmacy	struct sge_qset *qs;
3103175209Skmacy	int i, err, dump_end, idx;
3104175209Skmacy	struct sbuf *sb;
3105175209Skmacy	struct rsp_desc *rspd;
3106175223Skmacy	uint32_t data[4];
3107175209Skmacy
3108175209Skmacy	rspq = arg1;
3109175223Skmacy	qs = rspq_to_qset(rspq);
3110175209Skmacy	if (rspq->rspq_dump_count == 0)
3111175209Skmacy		return (0);
3112175209Skmacy	if (rspq->rspq_dump_count > RSPQ_Q_SIZE) {
3113175209Skmacy		log(LOG_WARNING,
3114175209Skmacy		    "dump count is too large %d\n", rspq->rspq_dump_count);
3115175209Skmacy		rspq->rspq_dump_count = 0;
3116175209Skmacy		return (EINVAL);
3117175209Skmacy	}
3118175209Skmacy	if (rspq->rspq_dump_start > (RSPQ_Q_SIZE-1)) {
3119175209Skmacy		log(LOG_WARNING,
3120175209Skmacy		    "dump start of %d is greater than queue size\n",
3121175209Skmacy		    rspq->rspq_dump_start);
3122175209Skmacy		rspq->rspq_dump_start = 0;
3123175209Skmacy		return (EINVAL);
3124175209Skmacy	}
3125175223Skmacy	err = t3_sge_read_rspq(qs->port->adapter, rspq->cntxt_id, data);
3126175223Skmacy	if (err)
3127175223Skmacy		return (err);
3128217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3129217916Smdf	if (err)
3130217916Smdf		return (err);
3131212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3132212750Smdf
3133175223Skmacy	sbuf_printf(sb, " \n index=%u size=%u MSI-X/RspQ=%u intr enable=%u intr armed=%u\n",
3134175223Skmacy	    (data[0] & 0xffff), data[0] >> 16, ((data[2] >> 20) & 0x3f),
3135175223Skmacy	    ((data[2] >> 26) & 1), ((data[2] >> 27) & 1));
3136175223Skmacy	sbuf_printf(sb, " generation=%u CQ mode=%u FL threshold=%u\n",
3137175223Skmacy	    ((data[2] >> 28) & 1), ((data[2] >> 31) & 1), data[3]);
3138175209Skmacy
3139175223Skmacy	sbuf_printf(sb, " start=%d -> end=%d\n", rspq->rspq_dump_start,
3140175209Skmacy	    (rspq->rspq_dump_start + rspq->rspq_dump_count) & (RSPQ_Q_SIZE-1));
3141175223Skmacy
3142175209Skmacy	dump_end = rspq->rspq_dump_start + rspq->rspq_dump_count;
3143175209Skmacy	for (i = rspq->rspq_dump_start; i < dump_end; i++) {
3144175209Skmacy		idx = i & (RSPQ_Q_SIZE-1);
3145175209Skmacy
3146175209Skmacy		rspd = &rspq->desc[idx];
3147175209Skmacy		sbuf_printf(sb, "\tidx=%04d opcode=%02x cpu_idx=%x hash_type=%x cq_idx=%x\n",
3148175209Skmacy		    idx, rspd->rss_hdr.opcode, rspd->rss_hdr.cpu_idx,
3149175209Skmacy		    rspd->rss_hdr.hash_type, be16toh(rspd->rss_hdr.cq_idx));
3150175209Skmacy		sbuf_printf(sb, "\trss_hash_val=%x flags=%08x len_cq=%x intr_gen=%x\n",
3151175209Skmacy		    rspd->rss_hdr.rss_hash_val, be32toh(rspd->flags),
3152175209Skmacy		    be32toh(rspd->len_cq), rspd->intr_gen);
3153175209Skmacy	}
3154212750Smdf
3155212750Smdf	err = sbuf_finish(sb);
3156212750Smdf	/* Output a trailing NUL. */
3157212750Smdf	if (err == 0)
3158212750Smdf		err = SYSCTL_OUT(req, "", 1);
3159175209Skmacy	sbuf_delete(sb);
3160175209Skmacy	return (err);
3161175209Skmacy}
3162175209Skmacy
3163167514Skmacystatic int
3164176472Skmacyt3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
3165175200Skmacy{
3166175200Skmacy	struct sge_txq *txq;
3167175200Skmacy	struct sge_qset *qs;
3168175200Skmacy	int i, j, err, dump_end;
3169175200Skmacy	struct sbuf *sb;
3170175200Skmacy	struct tx_desc *txd;
3171175200Skmacy	uint32_t *WR, wr_hi, wr_lo, gen;
3172175223Skmacy	uint32_t data[4];
3173175200Skmacy
3174175200Skmacy	txq = arg1;
3175175200Skmacy	qs = txq_to_qset(txq, TXQ_ETH);
3176175200Skmacy	if (txq->txq_dump_count == 0) {
3177175200Skmacy		return (0);
3178175200Skmacy	}
3179175200Skmacy	if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3180175200Skmacy		log(LOG_WARNING,
3181175200Skmacy		    "dump count is too large %d\n", txq->txq_dump_count);
3182175200Skmacy		txq->txq_dump_count = 1;
3183175200Skmacy		return (EINVAL);
3184175200Skmacy	}
3185175200Skmacy	if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3186175200Skmacy		log(LOG_WARNING,
3187175200Skmacy		    "dump start of %d is greater than queue size\n",
3188175200Skmacy		    txq->txq_dump_start);
3189175200Skmacy		txq->txq_dump_start = 0;
3190175200Skmacy		return (EINVAL);
3191175200Skmacy	}
3192176472Skmacy	err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3193175223Skmacy	if (err)
3194175223Skmacy		return (err);
3195217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3196217916Smdf	if (err)
3197217916Smdf		return (err);
3198212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3199175223Skmacy
3200175223Skmacy	sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",
3201175223Skmacy	    (data[0] & 0x7fff), ((data[0] >> 15) & 1), (data[0] >> 16),
3202175223Skmacy	    (data[1] & 0xffff), ((data[3] >> 4) & 7), ((data[3] >> 7) & 1));
3203175223Skmacy	sbuf_printf(sb, " TUN=%u TOE=%u generation%u uP token=%u valid=%u\n",
3204175223Skmacy	    ((data[3] >> 8) & 1), ((data[3] >> 9) & 1), ((data[3] >> 10) & 1),
3205175223Skmacy	    ((data[3] >> 11) & 0xfffff), ((data[3] >> 31) & 1));
3206175223Skmacy	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3207175200Skmacy	    txq->txq_dump_start,
3208175200Skmacy	    (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3209175200Skmacy
3210175200Skmacy	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3211175200Skmacy	for (i = txq->txq_dump_start; i < dump_end; i++) {
3212175200Skmacy		txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3213175200Skmacy		WR = (uint32_t *)txd->flit;
3214175200Skmacy		wr_hi = ntohl(WR[0]);
3215175200Skmacy		wr_lo = ntohl(WR[1]);
3216175200Skmacy		gen = G_WR_GEN(wr_lo);
3217175200Skmacy
3218175200Skmacy		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3219175200Skmacy		    wr_hi, wr_lo, gen);
3220175200Skmacy		for (j = 2; j < 30; j += 4)
3221175200Skmacy			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3222175200Skmacy			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3223175200Skmacy
3224175200Skmacy	}
3225212750Smdf	err = sbuf_finish(sb);
3226212750Smdf	/* Output a trailing NUL. */
3227212750Smdf	if (err == 0)
3228212750Smdf		err = SYSCTL_OUT(req, "", 1);
3229175200Skmacy	sbuf_delete(sb);
3230175200Skmacy	return (err);
3231175200Skmacy}
3232175200Skmacy
3233176472Skmacystatic int
3234176472Skmacyt3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3235176472Skmacy{
3236176472Skmacy	struct sge_txq *txq;
3237176472Skmacy	struct sge_qset *qs;
3238176472Skmacy	int i, j, err, dump_end;
3239176472Skmacy	struct sbuf *sb;
3240176472Skmacy	struct tx_desc *txd;
3241176472Skmacy	uint32_t *WR, wr_hi, wr_lo, gen;
3242176472Skmacy
3243176472Skmacy	txq = arg1;
3244176472Skmacy	qs = txq_to_qset(txq, TXQ_CTRL);
3245176472Skmacy	if (txq->txq_dump_count == 0) {
3246176472Skmacy		return (0);
3247176472Skmacy	}
3248176472Skmacy	if (txq->txq_dump_count > 256) {
3249176472Skmacy		log(LOG_WARNING,
3250176472Skmacy		    "dump count is too large %d\n", txq->txq_dump_count);
3251176472Skmacy		txq->txq_dump_count = 1;
3252176472Skmacy		return (EINVAL);
3253176472Skmacy	}
3254176472Skmacy	if (txq->txq_dump_start > 255) {
3255176472Skmacy		log(LOG_WARNING,
3256176472Skmacy		    "dump start of %d is greater than queue size\n",
3257176472Skmacy		    txq->txq_dump_start);
3258176472Skmacy		txq->txq_dump_start = 0;
3259176472Skmacy		return (EINVAL);
3260176472Skmacy	}
3261175200Skmacy
3262217916Smdf	err = sysctl_wire_old_buffer(req, 0);
3263217916Smdf	if (err != 0)
3264217916Smdf		return (err);
3265212750Smdf	sb = sbuf_new_for_sysctl(NULL, NULL, QDUMP_SBUF_SIZE, req);
3266176472Skmacy	sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3267176472Skmacy	    txq->txq_dump_start,
3268176472Skmacy	    (txq->txq_dump_start + txq->txq_dump_count) & 255);
3269176472Skmacy
3270176472Skmacy	dump_end = txq->txq_dump_start + txq->txq_dump_count;
3271176472Skmacy	for (i = txq->txq_dump_start; i < dump_end; i++) {
3272176472Skmacy		txd = &txq->desc[i & (255)];
3273176472Skmacy		WR = (uint32_t *)txd->flit;
3274176472Skmacy		wr_hi = ntohl(WR[0]);
3275176472Skmacy		wr_lo = ntohl(WR[1]);
3276176472Skmacy		gen = G_WR_GEN(wr_lo);
3277176472Skmacy
3278176472Skmacy		sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3279176472Skmacy		    wr_hi, wr_lo, gen);
3280176472Skmacy		for (j = 2; j < 30; j += 4)
3281176472Skmacy			sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3282176472Skmacy			    WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3283176472Skmacy
3284176472Skmacy	}
3285212750Smdf	err = sbuf_finish(sb);
3286212750Smdf	/* Output a trailing NUL. */
3287212750Smdf	if (err == 0)
3288212750Smdf		err = SYSCTL_OUT(req, "", 1);
3289176472Skmacy	sbuf_delete(sb);
3290176472Skmacy	return (err);
3291176472Skmacy}
3292176472Skmacy
3293175200Skmacystatic int
3294180583Skmacyt3_set_coalesce_usecs(SYSCTL_HANDLER_ARGS)
3295167514Skmacy{
3296167514Skmacy	adapter_t *sc = arg1;
3297167514Skmacy	struct qset_params *qsp = &sc->params.sge.qset[0];
3298180583Skmacy	int coalesce_usecs;
3299167514Skmacy	struct sge_qset *qs;
3300167514Skmacy	int i, j, err, nqsets = 0;
3301167514Skmacy	struct mtx *lock;
3302174708Skmacy
3303174708Skmacy	if ((sc->flags & FULL_INIT_DONE) == 0)
3304174708Skmacy		return (ENXIO);
3305174708Skmacy
3306180583Skmacy	coalesce_usecs = qsp->coalesce_usecs;
3307180583Skmacy        err = sysctl_handle_int(oidp, &coalesce_usecs, arg2, req);
3308167514Skmacy
3309167514Skmacy	if (err != 0) {
3310167514Skmacy		return (err);
3311167514Skmacy	}
3312180583Skmacy	if (coalesce_usecs == qsp->coalesce_usecs)
3313167514Skmacy		return (0);
3314167514Skmacy
3315167514Skmacy	for (i = 0; i < sc->params.nports; i++)
3316167514Skmacy		for (j = 0; j < sc->port[i].nqsets; j++)
3317167514Skmacy			nqsets++;
3318167514Skmacy
3319180583Skmacy	coalesce_usecs = max(1, coalesce_usecs);
3320167514Skmacy
3321167514Skmacy	for (i = 0; i < nqsets; i++) {
3322167514Skmacy		qs = &sc->sge.qs[i];
3323167514Skmacy		qsp = &sc->params.sge.qset[i];
3324180583Skmacy		qsp->coalesce_usecs = coalesce_usecs;
3325167514Skmacy
3326167514Skmacy		lock = (sc->flags & USING_MSIX) ? &qs->rspq.lock :
3327167514Skmacy			    &sc->sge.qs[0].rspq.lock;
3328167514Skmacy
3329167514Skmacy		mtx_lock(lock);
3330167514Skmacy		t3_update_qset_coalesce(qs, qsp);
3331167514Skmacy		t3_write_reg(sc, A_SG_GTS, V_RSPQ(qs->rspq.cntxt_id) |
3332167514Skmacy		    V_NEWTIMER(qs->rspq.holdoff_tmr));
3333167514Skmacy		mtx_unlock(lock);
3334167514Skmacy	}
3335167514Skmacy
3336167514Skmacy	return (0);
3337167514Skmacy}
3338167514Skmacy
3339209116Snpstatic int
3340209116Snpt3_pkt_timestamp(SYSCTL_HANDLER_ARGS)
3341209116Snp{
3342209116Snp	adapter_t *sc = arg1;
3343209116Snp	int rc, timestamp;
3344167514Skmacy
3345209116Snp	if ((sc->flags & FULL_INIT_DONE) == 0)
3346209116Snp		return (ENXIO);
3347209116Snp
3348209116Snp	timestamp = sc->timestamp;
3349209116Snp	rc = sysctl_handle_int(oidp, &timestamp, arg2, req);
3350209116Snp
3351209116Snp	if (rc != 0)
3352209116Snp		return (rc);
3353209116Snp
3354209116Snp	if (timestamp != sc->timestamp) {
3355209116Snp		t3_set_reg_field(sc, A_TP_PC_CONFIG2, F_ENABLERXPKTTMSTPRSS,
3356209116Snp		    timestamp ? F_ENABLERXPKTTMSTPRSS : 0);
3357209116Snp		sc->timestamp = timestamp;
3358209116Snp	}
3359209116Snp
3360209116Snp	return (0);
3361209116Snp}
3362209116Snp
3363167514Skmacyvoid
3364174708Skmacyt3_add_attach_sysctls(adapter_t *sc)
3365167514Skmacy{
3366167514Skmacy	struct sysctl_ctx_list *ctx;
3367167514Skmacy	struct sysctl_oid_list *children;
3368174708Skmacy
3369167514Skmacy	ctx = device_get_sysctl_ctx(sc->dev);
3370167514Skmacy	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3371167514Skmacy
3372167514Skmacy	/* random information */
3373167514Skmacy	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3374167514Skmacy	    "firmware_version",
3375273736Shselasky	    CTLFLAG_RD, sc->fw_version,
3376167514Skmacy	    0, "firmware version");
3377217321Smdf	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3378176472Skmacy	    "hw_revision",
3379176472Skmacy	    CTLFLAG_RD, &sc->params.rev,
3380176472Skmacy	    0, "chip model");
3381192540Sgnn	SYSCTL_ADD_STRING(ctx, children, OID_AUTO,
3382192540Sgnn	    "port_types",
3383273736Shselasky	    CTLFLAG_RD, sc->port_types,
3384192540Sgnn	    0, "type of ports");
3385176472Skmacy	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3386167514Skmacy	    "enable_debug",
3387167514Skmacy	    CTLFLAG_RW, &cxgb_debug,
3388167514Skmacy	    0, "enable verbose debugging output");
3389217321Smdf	SYSCTL_ADD_UQUAD(ctx, children, OID_AUTO, "tunq_coalesce",
3390174708Skmacy	    CTLFLAG_RD, &sc->tunq_coalesce,
3391174708Skmacy	    "#tunneled packets freed");
3392168737Skmacy	SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3393171335Skmacy	    "txq_overrun",
3394171335Skmacy	    CTLFLAG_RD, &txq_fills,
3395171335Skmacy	    0, "#times txq overrun");
3396217321Smdf	SYSCTL_ADD_UINT(ctx, children, OID_AUTO,
3397209116Snp	    "core_clock",
3398209116Snp	    CTLFLAG_RD, &sc->params.vpd.cclk,
3399209116Snp	    0, "core clock frequency (in KHz)");
3400167514Skmacy}
3401167514Skmacy
3402175209Skmacy
3403175209Skmacystatic const char *rspq_name = "rspq";
3404175209Skmacystatic const char *txq_names[] =
3405175209Skmacy{
3406175209Skmacy	"txq_eth",
3407175209Skmacy	"txq_ofld",
3408175209Skmacy	"txq_ctrl"
3409181652Skmacy};
3410175209Skmacy
3411181652Skmacystatic int
3412181652Skmacysysctl_handle_macstat(SYSCTL_HANDLER_ARGS)
3413181652Skmacy{
3414181652Skmacy	struct port_info *p = arg1;
3415181652Skmacy	uint64_t *parg;
3416181652Skmacy
3417181652Skmacy	if (!p)
3418181652Skmacy		return (EINVAL);
3419181652Skmacy
3420181652Skmacy	parg = (uint64_t *) ((uint8_t *)&p->mac.stats + arg2);
3421181652Skmacy	PORT_LOCK(p);
3422181652Skmacy	t3_mac_update_stats(&p->mac);
3423181652Skmacy	PORT_UNLOCK(p);
3424181652Skmacy
3425217616Smdf	return (sysctl_handle_64(oidp, parg, 0, req));
3426181652Skmacy}
3427181652Skmacy
3428174708Skmacyvoid
3429174708Skmacyt3_add_configured_sysctls(adapter_t *sc)
3430174708Skmacy{
3431174708Skmacy	struct sysctl_ctx_list *ctx;
3432174708Skmacy	struct sysctl_oid_list *children;
3433174708Skmacy	int i, j;
3434174708Skmacy
3435174708Skmacy	ctx = device_get_sysctl_ctx(sc->dev);
3436174708Skmacy	children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev));
3437174708Skmacy
3438174708Skmacy	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3439174708Skmacy	    "intr_coal",
3440174708Skmacy	    CTLTYPE_INT|CTLFLAG_RW, sc,
3441180583Skmacy	    0, t3_set_coalesce_usecs,
3442180583Skmacy	    "I", "interrupt coalescing timer (us)");
3443174708Skmacy
3444209116Snp	SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3445209116Snp	    "pkt_timestamp",
3446209116Snp	    CTLTYPE_INT | CTLFLAG_RW, sc,
3447209116Snp	    0, t3_pkt_timestamp,
3448209116Snp	    "I", "provide packet timestamp instead of connection hash");
3449209116Snp
3450174708Skmacy	for (i = 0; i < sc->params.nports; i++) {
3451174708Skmacy		struct port_info *pi = &sc->port[i];
3452174708Skmacy		struct sysctl_oid *poid;
3453174708Skmacy		struct sysctl_oid_list *poidlist;
3454181652Skmacy		struct mac_stats *mstats = &pi->mac.stats;
3455174708Skmacy
3456174708Skmacy		snprintf(pi->namebuf, PORT_NAME_LEN, "port%d", i);
3457174708Skmacy		poid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO,
3458174708Skmacy		    pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3459174708Skmacy		poidlist = SYSCTL_CHILDREN(poid);
3460217321Smdf		SYSCTL_ADD_UINT(ctx, poidlist, OID_AUTO,
3461174708Skmacy		    "nqsets", CTLFLAG_RD, &pi->nqsets,
3462174708Skmacy		    0, "#queue sets");
3463181652Skmacy
3464174708Skmacy		for (j = 0; j < pi->nqsets; j++) {
3465174708Skmacy			struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3466189643Sgnn			struct sysctl_oid *qspoid, *rspqpoid, *txqpoid,
3467189643Sgnn					  *ctrlqpoid, *lropoid;
3468189643Sgnn			struct sysctl_oid_list *qspoidlist, *rspqpoidlist,
3469189643Sgnn					       *txqpoidlist, *ctrlqpoidlist,
3470189643Sgnn					       *lropoidlist;
3471174708Skmacy			struct sge_txq *txq = &qs->txq[TXQ_ETH];
3472174708Skmacy
3473174708Skmacy			snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3474174708Skmacy
3475174708Skmacy			qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3476174708Skmacy			    qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3477174708Skmacy			qspoidlist = SYSCTL_CHILDREN(qspoid);
3478189643Sgnn
3479189643Sgnn			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl0_empty",
3480189643Sgnn					CTLFLAG_RD, &qs->fl[0].empty, 0,
3481189643Sgnn					"freelist #0 empty");
3482189643Sgnn			SYSCTL_ADD_UINT(ctx, qspoidlist, OID_AUTO, "fl1_empty",
3483189643Sgnn					CTLFLAG_RD, &qs->fl[1].empty, 0,
3484189643Sgnn					"freelist #1 empty");
3485189643Sgnn
3486175209Skmacy			rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3487175209Skmacy			    rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3488175209Skmacy			rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3489175209Skmacy
3490175209Skmacy			txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3491175209Skmacy			    txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3492175209Skmacy			txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3493175209Skmacy
3494176472Skmacy			ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3495176472Skmacy			    txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3496176472Skmacy			ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3497176472Skmacy
3498181652Skmacy			lropoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3499181652Skmacy			    "lro_stats", CTLFLAG_RD, NULL, "LRO statistics");
3500181652Skmacy			lropoidlist = SYSCTL_CHILDREN(lropoid);
3501181652Skmacy
3502175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3503175209Skmacy			    CTLFLAG_RD, &qs->rspq.size,
3504175209Skmacy			    0, "#entries in response queue");
3505175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3506175209Skmacy			    CTLFLAG_RD, &qs->rspq.cidx,
3507175209Skmacy			    0, "consumer index");
3508175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3509175209Skmacy			    CTLFLAG_RD, &qs->rspq.credits,
3510175209Skmacy			    0, "#credits");
3511206109Snp			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "starved",
3512206109Snp			    CTLFLAG_RD, &qs->rspq.starved,
3513206109Snp			    0, "#times starved");
3514273736Shselasky			SYSCTL_ADD_UAUTO(ctx, rspqpoidlist, OID_AUTO, "phys_addr",
3515175209Skmacy			    CTLFLAG_RD, &qs->rspq.phys_addr,
3516175209Skmacy			    "physical_address_of the queue");
3517175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_start",
3518175209Skmacy			    CTLFLAG_RW, &qs->rspq.rspq_dump_start,
3519175209Skmacy			    0, "start rspq dump entry");
3520175209Skmacy			SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3521175209Skmacy			    CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3522175209Skmacy			    0, "#rspq entries to dump");
3523175209Skmacy			SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3524175209Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3525175209Skmacy			    0, t3_dump_rspq, "A", "dump of the response queue");
3526175209Skmacy
3527217321Smdf			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "dropped",
3528205948Snp			    CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3529205948Snp			    "#tunneled packets dropped");
3530217321Smdf			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3531174708Skmacy			    CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3532174708Skmacy			    0, "#tunneled packets waiting to be sent");
3533185162Skmacy#if 0
3534175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3535174708Skmacy			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3536174708Skmacy			    0, "#tunneled packets queue producer index");
3537175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_cidx",
3538174708Skmacy			    CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3539174708Skmacy			    0, "#tunneled packets queue consumer index");
3540185162Skmacy#endif
3541217321Smdf			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "processed",
3542174708Skmacy			    CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3543174708Skmacy			    0, "#tunneled packets processed by the card");
3544175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "cleaned",
3545174708Skmacy			    CTLFLAG_RD, &txq->cleaned,
3546174708Skmacy			    0, "#tunneled packets cleaned");
3547175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "in_use",
3548174708Skmacy			    CTLFLAG_RD, &txq->in_use,
3549174708Skmacy			    0, "#tunneled packet slots in use");
3550273736Shselasky			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "frees",
3551174708Skmacy			    CTLFLAG_RD, &txq->txq_frees,
3552174708Skmacy			    "#tunneled packets freed");
3553175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "skipped",
3554174708Skmacy			    CTLFLAG_RD, &txq->txq_skipped,
3555174708Skmacy			    0, "#tunneled packet descriptors skipped");
3556217321Smdf			SYSCTL_ADD_UQUAD(ctx, txqpoidlist, OID_AUTO, "coalesced",
3557174708Skmacy			    CTLFLAG_RD, &txq->txq_coalesced,
3558194521Skmacy			    "#tunneled packets coalesced");
3559175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "enqueued",
3560174708Skmacy			    CTLFLAG_RD, &txq->txq_enqueued,
3561174708Skmacy			    0, "#tunneled packets enqueued to hardware");
3562175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "stopped_flags",
3563174708Skmacy			    CTLFLAG_RD, &qs->txq_stopped,
3564174708Skmacy			    0, "tx queues stopped");
3565273736Shselasky			SYSCTL_ADD_UAUTO(ctx, txqpoidlist, OID_AUTO, "phys_addr",
3566175200Skmacy			    CTLFLAG_RD, &txq->phys_addr,
3567175200Skmacy			    "physical_address_of the queue");
3568175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "qgen",
3569175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3570175200Skmacy			    0, "txq generation");
3571175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_cidx",
3572175200Skmacy			    CTLFLAG_RD, &txq->cidx,
3573175200Skmacy			    0, "hardware queue cidx");
3574175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "hw_pidx",
3575175200Skmacy			    CTLFLAG_RD, &txq->pidx,
3576175200Skmacy			    0, "hardware queue pidx");
3577175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3578175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3579175200Skmacy			    0, "txq start idx for dump");
3580175209Skmacy			SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3581175200Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3582175200Skmacy			    0, "txq #entries to dump");
3583175209Skmacy			SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3584175200Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3585176472Skmacy			    0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3586176472Skmacy
3587176472Skmacy			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3588176472Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3589176472Skmacy			    0, "ctrlq start idx for dump");
3590176472Skmacy			SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3591176472Skmacy			    CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3592176472Skmacy			    0, "ctrl #entries to dump");
3593176472Skmacy			SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3594176472Skmacy			    CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3595176472Skmacy			    0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3596176472Skmacy
3597181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_queued",
3598181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_queued, 0, NULL);
3599181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_flushed",
3600181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_flushed, 0, NULL);
3601181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_bad_csum",
3602181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_bad_csum, 0, NULL);
3603181652Skmacy			SYSCTL_ADD_INT(ctx, lropoidlist, OID_AUTO, "lro_cnt",
3604181652Skmacy			    CTLFLAG_RD, &qs->lro.ctrl.lro_cnt, 0, NULL);
3605181652Skmacy		}
3606176472Skmacy
3607181652Skmacy		/* Now add a node for mac stats. */
3608181652Skmacy		poid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO, "mac_stats",
3609181652Skmacy		    CTLFLAG_RD, NULL, "MAC statistics");
3610181652Skmacy		poidlist = SYSCTL_CHILDREN(poid);
3611176472Skmacy
3612181652Skmacy		/*
3613181652Skmacy		 * We (ab)use the length argument (arg2) to pass on the offset
3614181652Skmacy		 * of the data that we are interested in.  This is only required
3615181652Skmacy		 * for the quad counters that are updated from the hardware (we
3616181652Skmacy		 * make sure that we return the latest value).
3617181652Skmacy		 * sysctl_handle_macstat first updates *all* the counters from
3618181652Skmacy		 * the hardware, and then returns the latest value of the
3619181652Skmacy		 * requested counter.  Best would be to update only the
3620181652Skmacy		 * requested counter from hardware, but t3_mac_update_stats()
3621181652Skmacy		 * hides all the register details and we don't want to dive into
3622181652Skmacy		 * all that here.
3623181652Skmacy		 */
3624181652Skmacy#define CXGB_SYSCTL_ADD_QUAD(a)	SYSCTL_ADD_OID(ctx, poidlist, OID_AUTO, #a, \
3625217616Smdf    (CTLTYPE_U64 | CTLFLAG_RD), pi, offsetof(struct mac_stats, a), \
3626181652Skmacy    sysctl_handle_macstat, "QU", 0)
3627181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_octets);
3628181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_octets_bad);
3629181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames);
3630181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_mcast_frames);
3631181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_bcast_frames);
3632181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_pause);
3633181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_deferred);
3634181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_late_collisions);
3635181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_total_collisions);
3636181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_excess_collisions);
3637181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_underrun);
3638181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_len_errs);
3639181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_mac_internal_errs);
3640181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_excess_deferral);
3641181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_fcs_errs);
3642181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_64);
3643181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_65_127);
3644181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_128_255);
3645181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_256_511);
3646181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_512_1023);
3647181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_1024_1518);
3648181652Skmacy		CXGB_SYSCTL_ADD_QUAD(tx_frames_1519_max);
3649181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_octets);
3650181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_octets_bad);
3651181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames);
3652181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_mcast_frames);
3653181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_bcast_frames);
3654181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_pause);
3655193925Sgnn		CXGB_SYSCTL_ADD_QUAD(rx_fcs_errs);
3656181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_align_errs);
3657181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_symbol_errs);
3658181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_data_errs);
3659181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_sequence_errs);
3660181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_runt);
3661181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_jabber);
3662181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_short);
3663181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_too_long);
3664181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_mac_internal_errs);
3665181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_cong_drops);
3666181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_64);
3667181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_65_127);
3668181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_128_255);
3669181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_256_511);
3670181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_512_1023);
3671181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_1024_1518);
3672181652Skmacy		CXGB_SYSCTL_ADD_QUAD(rx_frames_1519_max);
3673181652Skmacy#undef CXGB_SYSCTL_ADD_QUAD
3674181652Skmacy
3675181652Skmacy#define CXGB_SYSCTL_ADD_ULONG(a) SYSCTL_ADD_ULONG(ctx, poidlist, OID_AUTO, #a, \
3676181652Skmacy    CTLFLAG_RD, &mstats->a, 0)
3677181652Skmacy		CXGB_SYSCTL_ADD_ULONG(tx_fifo_parity_err);
3678181652Skmacy		CXGB_SYSCTL_ADD_ULONG(rx_fifo_parity_err);
3679181652Skmacy		CXGB_SYSCTL_ADD_ULONG(tx_fifo_urun);
3680181652Skmacy		CXGB_SYSCTL_ADD_ULONG(rx_fifo_ovfl);
3681181652Skmacy		CXGB_SYSCTL_ADD_ULONG(serdes_signal_loss);
3682181652Skmacy		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_ctc_err);
3683181652Skmacy		CXGB_SYSCTL_ADD_ULONG(xaui_pcs_align_change);
3684181652Skmacy		CXGB_SYSCTL_ADD_ULONG(num_toggled);
3685181652Skmacy		CXGB_SYSCTL_ADD_ULONG(num_resets);
3686192540Sgnn		CXGB_SYSCTL_ADD_ULONG(link_faults);
3687181652Skmacy#undef CXGB_SYSCTL_ADD_ULONG
3688174708Skmacy	}
3689174708Skmacy}
3690174708Skmacy
3691167514Skmacy/**
3692167514Skmacy *	t3_get_desc - dump an SGE descriptor for debugging purposes
3693167514Skmacy *	@qs: the queue set
3694167514Skmacy *	@qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)
3695167514Skmacy *	@idx: the descriptor index in the queue
3696167514Skmacy *	@data: where to dump the descriptor contents
3697167514Skmacy *
3698167514Skmacy *	Dumps the contents of a HW descriptor of an SGE queue.  Returns the
3699167514Skmacy *	size of the descriptor.
3700167514Skmacy */
3701167514Skmacyint
3702167514Skmacyt3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
3703167514Skmacy		unsigned char *data)
3704167514Skmacy{
3705167514Skmacy	if (qnum >= 6)
3706167514Skmacy		return (EINVAL);
3707167514Skmacy
3708167514Skmacy	if (qnum < 3) {
3709167514Skmacy		if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3710167514Skmacy			return -EINVAL;
3711167514Skmacy		memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));
3712167514Skmacy		return sizeof(struct tx_desc);
3713167514Skmacy	}
3714167514Skmacy
3715167514Skmacy	if (qnum == 3) {
3716167514Skmacy		if (!qs->rspq.desc || idx >= qs->rspq.size)
3717167514Skmacy			return (EINVAL);
3718167514Skmacy		memcpy(data, &qs->rspq.desc[idx], sizeof(struct rsp_desc));
3719167514Skmacy		return sizeof(struct rsp_desc);
3720167514Skmacy	}
3721167514Skmacy
3722167514Skmacy	qnum -= 4;
3723167514Skmacy	if (!qs->fl[qnum].desc || idx >= qs->fl[qnum].size)
3724167514Skmacy		return (EINVAL);
3725167514Skmacy	memcpy(data, &qs->fl[qnum].desc[idx], sizeof(struct rx_desc));
3726167514Skmacy	return sizeof(struct rx_desc);
3727167514Skmacy}
3728