Deleted Added
full compact
cxgb_sge.c (175872) cxgb_sge.c (176472)
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29#define DEBUG_BUFRING
30
31
32#include <sys/cdefs.h>
1/**************************************************************************
2
3Copyright (c) 2007, Chelsio Inc.
4All rights reserved.
5
6Redistribution and use in source and binary forms, with or without
7modification, are permitted provided that the following conditions are met:
8

--- 16 unchanged lines hidden (view full) ---

25ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26POSSIBILITY OF SUCH DAMAGE.
27
28***************************************************************************/
29#define DEBUG_BUFRING
30
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_sge.c 175872 2008-02-01 19:36:27Z phk $");
33__FBSDID("$FreeBSD: head/sys/dev/cxgb/cxgb_sge.c 176472 2008-02-23 01:06:17Z kmacy $");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/module.h>
39#include <sys/bus.h>
40#include <sys/conf.h>
41#include <machine/bus.h>

--- 26 unchanged lines hidden (view full) ---

68#include <cxgb_include.h>
69#include <sys/mvec.h>
70#else
71#include <dev/cxgb/cxgb_include.h>
72#include <dev/cxgb/sys/mvec.h>
73#endif
74
75int txq_fills = 0;
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/kernel.h>
38#include <sys/module.h>
39#include <sys/bus.h>
40#include <sys/conf.h>
41#include <machine/bus.h>

--- 26 unchanged lines hidden (view full) ---

68#include <cxgb_include.h>
69#include <sys/mvec.h>
70#else
71#include <dev/cxgb/cxgb_include.h>
72#include <dev/cxgb/sys/mvec.h>
73#endif
74
75int txq_fills = 0;
76static int recycle_enable = 1;
76/*
77 * XXX don't re-enable this until TOE stops assuming
78 * we have an m_ext
79 */
80static int recycle_enable = 0;
77extern int cxgb_txq_buf_ring_size;
78int cxgb_cached_allocations;
79int cxgb_cached;
81extern int cxgb_txq_buf_ring_size;
82int cxgb_cached_allocations;
83int cxgb_cached;
80int cxgb_ext_freed;
84int cxgb_ext_freed = 0;
85int cxgb_ext_inited = 0;
81extern int cxgb_use_16k_clusters;
82extern int cxgb_pcpu_cache_enable;
83
84
85#define USE_GTS 0
86
87#define SGE_RX_SM_BUF_SIZE 1536
88#define SGE_RX_DROP_THRES 16

--- 153 unchanged lines hidden (view full) ---

242void
243t3_sge_init(adapter_t *adap, struct sge_params *p)
244{
245 u_int ctrl, ups;
246
247 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
248
249 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
86extern int cxgb_use_16k_clusters;
87extern int cxgb_pcpu_cache_enable;
88
89
90#define USE_GTS 0
91
92#define SGE_RX_SM_BUF_SIZE 1536
93#define SGE_RX_DROP_THRES 16

--- 153 unchanged lines hidden (view full) ---

247void
248t3_sge_init(adapter_t *adap, struct sge_params *p)
249{
250 u_int ctrl, ups;
251
252 ups = 0; /* = ffs(pci_resource_len(adap->pdev, 2) >> 12); */
253
254 ctrl = F_DROPPKT | V_PKTSHIFT(2) | F_FLMODE | F_AVOIDCQOVFL |
250 F_CQCRDTCTRL |
255 F_CQCRDTCTRL | F_CONGMODE | F_TNLFLMODE | F_FATLPERREN |
251 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
252 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
253#if SGE_NUM_GENBITS == 1
254 ctrl |= F_EGRGENCTRL;
255#endif
256 if (adap->params.rev > 0) {
257 if (!(adap->flags & (USING_MSIX | USING_MSI)))
258 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
256 V_HOSTPAGESIZE(PAGE_SHIFT - 11) | F_BIGENDIANINGRESS |
257 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING;
258#if SGE_NUM_GENBITS == 1
259 ctrl |= F_EGRGENCTRL;
260#endif
261 if (adap->params.rev > 0) {
262 if (!(adap->flags & (USING_MSIX | USING_MSI)))
263 ctrl |= F_ONEINTMULTQ | F_OPTONEINTMULTQ;
259 ctrl |= F_CQCRDTCTRL | F_AVOIDCQOVFL;
260 }
261 t3_write_reg(adap, A_SG_CONTROL, ctrl);
262 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
263 V_LORCQDRBTHRSH(512));
264 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
265 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
266 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
264 }
265 t3_write_reg(adap, A_SG_CONTROL, ctrl);
266 t3_write_reg(adap, A_SG_EGR_RCQ_DRB_THRSH, V_HIRCQDRBTHRSH(512) |
267 V_LORCQDRBTHRSH(512));
268 t3_write_reg(adap, A_SG_TIMER_TICK, core_ticks_per_usec(adap) / 10);
269 t3_write_reg(adap, A_SG_CMDQ_CREDIT_TH, V_THRESHOLD(32) |
270 V_TIMEOUT(200 * core_ticks_per_usec(adap)));
267 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH, 1000);
271 t3_write_reg(adap, A_SG_HI_DRB_HI_THRSH,
272 adap->params.rev < T3_REV_C ? 1000 : 500);
268 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
269 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
270 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
271 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
272 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
273}
274
275

--- 12 unchanged lines hidden (view full) ---

288
289/**
290 * get_imm_packet - return the next ingress packet buffer from a response
291 * @resp: the response descriptor containing the packet data
292 *
293 * Return a packet containing the immediate data of the given response.
294 */
295static int
273 t3_write_reg(adap, A_SG_HI_DRB_LO_THRSH, 256);
274 t3_write_reg(adap, A_SG_LO_DRB_HI_THRSH, 1000);
275 t3_write_reg(adap, A_SG_LO_DRB_LO_THRSH, 256);
276 t3_write_reg(adap, A_SG_OCO_BASE, V_BASE1(0xfff));
277 t3_write_reg(adap, A_SG_DRB_PRI_THRESH, 63 * 1024);
278}
279
280

--- 12 unchanged lines hidden (view full) ---

293
294/**
295 * get_imm_packet - return the next ingress packet buffer from a response
296 * @resp: the response descriptor containing the packet data
297 *
298 * Return a packet containing the immediate data of the given response.
299 */
300static int
296get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m, void *cl, uint32_t flags)
301get_imm_packet(adapter_t *sc, const struct rsp_desc *resp, struct mbuf *m)
297{
298
302{
303
299 m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE;
304 m->m_len = m->m_pkthdr.len = IMMED_PKT_SIZE;
305 m->m_ext.ext_buf = NULL;
306 m->m_ext.ext_type = 0;
300 memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE);
307 memcpy(mtod(m, uint8_t *), resp->imm_data, IMMED_PKT_SIZE);
301 return (0);
302
308 return (0);
303}
304
305static __inline u_int
306flits_to_desc(u_int n)
307{
308 return (flit_desc_map[n]);
309}
310
309}
310
311static __inline u_int
312flits_to_desc(u_int n)
313{
314 return (flit_desc_map[n]);
315}
316
317#define SGE_PARERR (F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
318 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
319 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
320 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
321 F_HIRCQPARITYERROR)
322#define SGE_FRAMINGERR (F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR)
323#define SGE_FATALERR (SGE_PARERR | SGE_FRAMINGERR | F_RSPQCREDITOVERFOW | \
324 F_RSPQDISABLED)
325
326/**
327 * t3_sge_err_intr_handler - SGE async event interrupt handler
328 * @adapter: the adapter
329 *
330 * Interrupt handler for SGE asynchronous (non-data) events.
331 */
311void
312t3_sge_err_intr_handler(adapter_t *adapter)
313{
314 unsigned int v, status;
315
332void
333t3_sge_err_intr_handler(adapter_t *adapter)
334{
335 unsigned int v, status;
336
316
317 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
337 status = t3_read_reg(adapter, A_SG_INT_CAUSE);
318
338 if (status & SGE_PARERR)
339 CH_ALERT(adapter, "SGE parity error (0x%x)\n",
340 status & SGE_PARERR);
341 if (status & SGE_FRAMINGERR)
342 CH_ALERT(adapter, "SGE framing error (0x%x)\n",
343 status & SGE_FRAMINGERR);
319 if (status & F_RSPQCREDITOVERFOW)
320 CH_ALERT(adapter, "SGE response queue credit overflow\n");
321
322 if (status & F_RSPQDISABLED) {
323 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
324
325 CH_ALERT(adapter,
326 "packet delivered to disabled response queue (0x%x)\n",
327 (v >> S_RSPQ0DISABLED) & 0xff);
328 }
329
330 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
344 if (status & F_RSPQCREDITOVERFOW)
345 CH_ALERT(adapter, "SGE response queue credit overflow\n");
346
347 if (status & F_RSPQDISABLED) {
348 v = t3_read_reg(adapter, A_SG_RSPQ_FL_STATUS);
349
350 CH_ALERT(adapter,
351 "packet delivered to disabled response queue (0x%x)\n",
352 (v >> S_RSPQ0DISABLED) & 0xff);
353 }
354
355 t3_write_reg(adapter, A_SG_INT_CAUSE, status);
331 if (status & (F_RSPQCREDITOVERFOW | F_RSPQDISABLED))
356 if (status & SGE_FATALERR)
332 t3_fatal_err(adapter);
333}
334
335void
336t3_sge_prep(adapter_t *adap, struct sge_params *p)
337{
338 int i;
339
340 /* XXX Does ETHER_ALIGN need to be accounted for here? */
341 p->max_pkt_size = adap->sge.qs[0].fl[1].buf_size - sizeof(struct cpl_rx_data);
342
343 for (i = 0; i < SGE_QSETS; ++i) {
344 struct qset_params *q = p->qset + i;
345
357 t3_fatal_err(adapter);
358}
359
360void
361t3_sge_prep(adapter_t *adap, struct sge_params *p)
362{
363 int i;
364
365 /* XXX Does ETHER_ALIGN need to be accounted for here? */
366 p->max_pkt_size = adap->sge.qs[0].fl[1].buf_size - sizeof(struct cpl_rx_data);
367
368 for (i = 0; i < SGE_QSETS; ++i) {
369 struct qset_params *q = p->qset + i;
370
346 q->polling = adap->params.rev > 0;
347
348 if (adap->params.nports > 2) {
349 q->coalesce_nsecs = 50000;
350 } else {
351#ifdef INVARIANTS
352 q->coalesce_nsecs = 10000;
353#else
354 q->coalesce_nsecs = 5000;
355#endif
356 }
371 if (adap->params.nports > 2) {
372 q->coalesce_nsecs = 50000;
373 } else {
374#ifdef INVARIANTS
375 q->coalesce_nsecs = 10000;
376#else
377 q->coalesce_nsecs = 5000;
378#endif
379 }
380 q->polling = adap->params.rev > 0;
357 q->rspq_size = RSPQ_Q_SIZE;
358 q->fl_size = FL_Q_SIZE;
359 q->jumbo_size = JUMBO_Q_SIZE;
360 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
361 q->txq_size[TXQ_OFLD] = 1024;
362 q->txq_size[TXQ_CTRL] = 256;
363 q->cong_thres = 0;
364 }

--- 103 unchanged lines hidden (view full) ---

468 */
469static void
470refill_fl(adapter_t *sc, struct sge_fl *q, int n)
471{
472 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
473 struct rx_desc *d = &q->desc[q->pidx];
474 struct refill_fl_cb_arg cb_arg;
475 caddr_t cl;
381 q->rspq_size = RSPQ_Q_SIZE;
382 q->fl_size = FL_Q_SIZE;
383 q->jumbo_size = JUMBO_Q_SIZE;
384 q->txq_size[TXQ_ETH] = TX_ETH_Q_SIZE;
385 q->txq_size[TXQ_OFLD] = 1024;
386 q->txq_size[TXQ_CTRL] = 256;
387 q->cong_thres = 0;
388 }

--- 103 unchanged lines hidden (view full) ---

492 */
493static void
494refill_fl(adapter_t *sc, struct sge_fl *q, int n)
495{
496 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
497 struct rx_desc *d = &q->desc[q->pidx];
498 struct refill_fl_cb_arg cb_arg;
499 caddr_t cl;
476 int err;
500 int err, count = 0;
477 int header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
478
479 cb_arg.error = 0;
480 while (n--) {
481 /*
482 * We only allocate a cluster, mbuf allocation happens after rx
483 */
484 if ((cl = cxgb_cache_get(q->zone)) == NULL) {

--- 37 unchanged lines hidden (view full) ---

522
523 if (++q->pidx == q->size) {
524 q->pidx = 0;
525 q->gen ^= 1;
526 sd = q->sdesc;
527 d = q->desc;
528 }
529 q->credits++;
501 int header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) + sizeof(struct m_ext_) + sizeof(uint32_t);
502
503 cb_arg.error = 0;
504 while (n--) {
505 /*
506 * We only allocate a cluster, mbuf allocation happens after rx
507 */
508 if ((cl = cxgb_cache_get(q->zone)) == NULL) {

--- 37 unchanged lines hidden (view full) ---

546
547 if (++q->pidx == q->size) {
548 q->pidx = 0;
549 q->gen ^= 1;
550 sd = q->sdesc;
551 d = q->desc;
552 }
553 q->credits++;
554 count++;
530 }
531
532done:
555 }
556
557done:
533 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
558 if (count)
559 t3_write_reg(sc, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
534}
535
536
537/**
538 * free_rx_bufs - free the Rx buffers on an SGE free list
539 * @sc: the controle softc
540 * @q: the SGE free list to clean up
541 *

--- 229 unchanged lines hidden (view full) ---

771{
772 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
773 return (0);
774}
775
776void
777t3_sge_deinit_sw(adapter_t *sc)
778{
560}
561
562
563/**
564 * free_rx_bufs - free the Rx buffers on an SGE free list
565 * @sc: the controle softc
566 * @q: the SGE free list to clean up
567 *

--- 229 unchanged lines hidden (view full) ---

797{
798 TASK_INIT(&pi->timer_reclaim_task, 0, sge_timer_reclaim, pi);
799 return (0);
800}
801
802void
803t3_sge_deinit_sw(adapter_t *sc)
804{
779 int i;
780
781 callout_drain(&sc->sge_timer_ch);
782 if (sc->tq)
783 taskqueue_drain(sc->tq, &sc->slow_intr_task);
784 for (i = 0; i < sc->params.nports; i++)
785 if (sc->port[i].tq != NULL)
786 taskqueue_drain(sc->port[i].tq, &sc->port[i].timer_reclaim_task);
787
788 mi_deinit();
789}
790
791/**
792 * refill_rspq - replenish an SGE response queue
793 * @adapter: the adapter
794 * @q: the response queue to replenish

--- 109 unchanged lines hidden (view full) ---

904{
905 txq->in_use += ndesc;
906 /*
907 * XXX we don't handle stopping of queue
908 * presumably start handles this when we bump against the end
909 */
910 txqs->gen = txq->gen;
911 txq->unacked += ndesc;
805
806 mi_deinit();
807}
808
809/**
810 * refill_rspq - replenish an SGE response queue
811 * @adapter: the adapter
812 * @q: the response queue to replenish

--- 109 unchanged lines hidden (view full) ---

922{
923 txq->in_use += ndesc;
924 /*
925 * XXX we don't handle stopping of queue
926 * presumably start handles this when we bump against the end
927 */
928 txqs->gen = txq->gen;
929 txq->unacked += ndesc;
912 txqs->compl = (txq->unacked & 8) << (S_WR_COMPL - 3);
913 txq->unacked &= 7;
930 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
931 txq->unacked &= 31;
914 txqs->pidx = txq->pidx;
915 txq->pidx += ndesc;
916#ifdef INVARIANTS
917 if (((txqs->pidx > txq->cidx) &&
918 (txq->pidx < txqs->pidx) &&
919 (txq->pidx >= txq->cidx)) ||
920 ((txqs->pidx < txq->cidx) &&
921 (txq->pidx >= txq-> cidx)) ||

--- 282 unchanged lines hidden (view full) ---

1204 uint32_t wr_hi, wr_lo, sgl_flits;
1205 bus_dma_segment_t segs[TX_MAX_SEGS];
1206
1207 struct tx_desc *txd;
1208 struct mbuf_vec *mv;
1209 struct mbuf_iovec *mi;
1210
1211 DPRINTF("t3_encap cpu=%d ", curcpu);
932 txqs->pidx = txq->pidx;
933 txq->pidx += ndesc;
934#ifdef INVARIANTS
935 if (((txqs->pidx > txq->cidx) &&
936 (txq->pidx < txqs->pidx) &&
937 (txq->pidx >= txq->cidx)) ||
938 ((txqs->pidx < txq->cidx) &&
939 (txq->pidx >= txq-> cidx)) ||

--- 282 unchanged lines hidden (view full) ---

1222 uint32_t wr_hi, wr_lo, sgl_flits;
1223 bus_dma_segment_t segs[TX_MAX_SEGS];
1224
1225 struct tx_desc *txd;
1226 struct mbuf_vec *mv;
1227 struct mbuf_iovec *mi;
1228
1229 DPRINTF("t3_encap cpu=%d ", curcpu);
1212 KASSERT(qs->idx == 0, ("invalid qs %d", qs->idx));
1213
1214 mi = NULL;
1215 pi = qs->port;
1216 sc = pi->adapter;
1217 txq = &qs->txq[TXQ_ETH];
1218 txd = &txq->desc[txq->pidx];
1219 txsd = &txq->sdesc[txq->pidx];
1220 sgl = txq->txq_sgl;

--- 84 unchanged lines hidden (view full) ---

1305 hdr->cntrl = htonl(cntrl);
1306 mlen = m0->m_pkthdr.len;
1307 hdr->len = htonl(mlen | 0x80000000);
1308
1309 DPRINTF("tso buf len=%d\n", mlen);
1310 undersized = (((tmpmi->mi_len < TCPPKTHDRSIZE) &&
1311 (m0->m_flags & M_VLANTAG)) ||
1312 (tmpmi->mi_len < TCPPKTHDRSIZE - ETHER_VLAN_ENCAP_LEN));
1230
1231 mi = NULL;
1232 pi = qs->port;
1233 sc = pi->adapter;
1234 txq = &qs->txq[TXQ_ETH];
1235 txd = &txq->desc[txq->pidx];
1236 txsd = &txq->sdesc[txq->pidx];
1237 sgl = txq->txq_sgl;

--- 84 unchanged lines hidden (view full) ---

1322 hdr->cntrl = htonl(cntrl);
1323 mlen = m0->m_pkthdr.len;
1324 hdr->len = htonl(mlen | 0x80000000);
1325
1326 DPRINTF("tso buf len=%d\n", mlen);
1327 undersized = (((tmpmi->mi_len < TCPPKTHDRSIZE) &&
1328 (m0->m_flags & M_VLANTAG)) ||
1329 (tmpmi->mi_len < TCPPKTHDRSIZE - ETHER_VLAN_ENCAP_LEN));
1330
1313 if (__predict_false(undersized)) {
1314 pkthdr = tmp;
1315 dump_mi(mi);
1316 panic("discontig packet - fixxorz");
1317 } else
1318 pkthdr = m0->m_data;
1319
1320 if (__predict_false(m0->m_flags & M_VLANTAG)) {

--- 224 unchanged lines hidden (view full) ---

1545 mtx_lock(&q->lock);
1546again: reclaim_completed_tx_imm(q);
1547
1548 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1549 if (__predict_false(ret)) {
1550 if (ret == 1) {
1551 mtx_unlock(&q->lock);
1552 log(LOG_ERR, "no desc available\n");
1331 if (__predict_false(undersized)) {
1332 pkthdr = tmp;
1333 dump_mi(mi);
1334 panic("discontig packet - fixxorz");
1335 } else
1336 pkthdr = m0->m_data;
1337
1338 if (__predict_false(m0->m_flags & M_VLANTAG)) {

--- 224 unchanged lines hidden (view full) ---

1563 mtx_lock(&q->lock);
1564again: reclaim_completed_tx_imm(q);
1565
1566 ret = check_desc_avail(adap, q, m, 1, TXQ_CTRL);
1567 if (__predict_false(ret)) {
1568 if (ret == 1) {
1569 mtx_unlock(&q->lock);
1570 log(LOG_ERR, "no desc available\n");
1553
1554 return (ENOSPC);
1555 }
1556 goto again;
1557 }
1558 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1559
1560 q->in_use++;
1561 if (++q->pidx >= q->size) {

--- 43 unchanged lines hidden (view full) ---

1605 smp_mb();
1606
1607 if (should_restart_tx(q) &&
1608 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1609 goto again;
1610 q->stops++;
1611 }
1612 mtx_unlock(&q->lock);
1571 return (ENOSPC);
1572 }
1573 goto again;
1574 }
1575 write_imm(&q->desc[q->pidx], m, m->m_len, q->gen);
1576
1577 q->in_use++;
1578 if (++q->pidx >= q->size) {

--- 43 unchanged lines hidden (view full) ---

1622 smp_mb();
1623
1624 if (should_restart_tx(q) &&
1625 test_and_clear_bit(TXQ_CTRL, &qs->txq_stopped))
1626 goto again;
1627 q->stops++;
1628 }
1629 mtx_unlock(&q->lock);
1630 wmb();
1613 t3_write_reg(adap, A_SG_KDOORBELL,
1614 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1615}
1616
1617
1618/*
1619 * Send a management message through control queue 0
1620 */

--- 22 unchanged lines hidden (view full) ---

1643
1644 for (i = 0; i < SGE_TXQ_PER_SET; i++)
1645 if (q->txq[i].txq_mr.br_ring != NULL) {
1646 free(q->txq[i].txq_mr.br_ring, M_DEVBUF);
1647 mtx_destroy(&q->txq[i].txq_mr.br_lock);
1648 }
1649 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1650 if (q->fl[i].desc) {
1631 t3_write_reg(adap, A_SG_KDOORBELL,
1632 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
1633}
1634
1635
1636/*
1637 * Send a management message through control queue 0
1638 */

--- 22 unchanged lines hidden (view full) ---

1661
1662 for (i = 0; i < SGE_TXQ_PER_SET; i++)
1663 if (q->txq[i].txq_mr.br_ring != NULL) {
1664 free(q->txq[i].txq_mr.br_ring, M_DEVBUF);
1665 mtx_destroy(&q->txq[i].txq_mr.br_lock);
1666 }
1667 for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
1668 if (q->fl[i].desc) {
1651 mtx_lock(&sc->sge.reg_lock);
1669 mtx_lock_spin(&sc->sge.reg_lock);
1652 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1670 t3_sge_disable_fl(sc, q->fl[i].cntxt_id);
1653 mtx_unlock(&sc->sge.reg_lock);
1671 mtx_unlock_spin(&sc->sge.reg_lock);
1654 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1655 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1656 q->fl[i].desc_map);
1657 bus_dma_tag_destroy(q->fl[i].desc_tag);
1658 bus_dma_tag_destroy(q->fl[i].entry_tag);
1659 }
1660 if (q->fl[i].sdesc) {
1661 free_rx_bufs(sc, &q->fl[i]);
1662 free(q->fl[i].sdesc, M_DEVBUF);
1663 }
1664 }
1665
1666 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
1667 if (q->txq[i].desc) {
1672 bus_dmamap_unload(q->fl[i].desc_tag, q->fl[i].desc_map);
1673 bus_dmamem_free(q->fl[i].desc_tag, q->fl[i].desc,
1674 q->fl[i].desc_map);
1675 bus_dma_tag_destroy(q->fl[i].desc_tag);
1676 bus_dma_tag_destroy(q->fl[i].entry_tag);
1677 }
1678 if (q->fl[i].sdesc) {
1679 free_rx_bufs(sc, &q->fl[i]);
1680 free(q->fl[i].sdesc, M_DEVBUF);
1681 }
1682 }
1683
1684 for (i = 0; i < SGE_TXQ_PER_SET; i++) {
1685 if (q->txq[i].desc) {
1668 mtx_lock(&sc->sge.reg_lock);
1686 mtx_lock_spin(&sc->sge.reg_lock);
1669 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
1687 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
1670 mtx_unlock(&sc->sge.reg_lock);
1688 mtx_unlock_spin(&sc->sge.reg_lock);
1671 bus_dmamap_unload(q->txq[i].desc_tag,
1672 q->txq[i].desc_map);
1673 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
1674 q->txq[i].desc_map);
1675 bus_dma_tag_destroy(q->txq[i].desc_tag);
1676 bus_dma_tag_destroy(q->txq[i].entry_tag);
1677 MTX_DESTROY(&q->txq[i].lock);
1678 }
1679 if (q->txq[i].sdesc) {
1680 free(q->txq[i].sdesc, M_DEVBUF);
1681 }
1682 }
1683
1684 if (q->rspq.desc) {
1689 bus_dmamap_unload(q->txq[i].desc_tag,
1690 q->txq[i].desc_map);
1691 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
1692 q->txq[i].desc_map);
1693 bus_dma_tag_destroy(q->txq[i].desc_tag);
1694 bus_dma_tag_destroy(q->txq[i].entry_tag);
1695 MTX_DESTROY(&q->txq[i].lock);
1696 }
1697 if (q->txq[i].sdesc) {
1698 free(q->txq[i].sdesc, M_DEVBUF);
1699 }
1700 }
1701
1702 if (q->rspq.desc) {
1685 mtx_lock(&sc->sge.reg_lock);
1703 mtx_lock_spin(&sc->sge.reg_lock);
1686 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
1704 t3_sge_disable_rspcntxt(sc, q->rspq.cntxt_id);
1687 mtx_unlock(&sc->sge.reg_lock);
1705 mtx_unlock_spin(&sc->sge.reg_lock);
1688
1689 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
1690 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
1691 q->rspq.desc_map);
1692 bus_dma_tag_destroy(q->rspq.desc_tag);
1693 MTX_DESTROY(&q->rspq.lock);
1694 }
1695

--- 192 unchanged lines hidden (view full) ---

1888 bus_dma_segment_t *segs, unsigned int nsegs)
1889{
1890 unsigned int sgl_flits, flits;
1891 struct work_request_hdr *from;
1892 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
1893 struct tx_desc *d = &q->desc[pidx];
1894 struct txq_state txqs;
1895
1706
1707 bus_dmamap_unload(q->rspq.desc_tag, q->rspq.desc_map);
1708 bus_dmamem_free(q->rspq.desc_tag, q->rspq.desc,
1709 q->rspq.desc_map);
1710 bus_dma_tag_destroy(q->rspq.desc_tag);
1711 MTX_DESTROY(&q->rspq.lock);
1712 }
1713

--- 192 unchanged lines hidden (view full) ---

1906 bus_dma_segment_t *segs, unsigned int nsegs)
1907{
1908 unsigned int sgl_flits, flits;
1909 struct work_request_hdr *from;
1910 struct sg_ent *sgp, sgl[TX_MAX_SEGS / 2 + 1];
1911 struct tx_desc *d = &q->desc[pidx];
1912 struct txq_state txqs;
1913
1896 if (immediate(m) && segs == NULL) {
1914 if (immediate(m) && nsegs == 0) {
1897 write_imm(d, m, m->m_len, gen);
1898 return;
1899 }
1900
1901 /* Only TX_DATA builds SGLs */
1902 from = mtod(m, struct work_request_hdr *);
1903 memcpy(&d->flit[1], &from[1], m->m_len - sizeof(*from));
1904

--- 17 unchanged lines hidden (view full) ---

1922 *
1923 * Returns the number of Tx descriptors needed for the given offload
1924 * packet. These packets are already fully constructed.
1925 */
1926static __inline unsigned int
1927calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs)
1928{
1929 unsigned int flits, cnt = 0;
1915 write_imm(d, m, m->m_len, gen);
1916 return;
1917 }
1918
1919 /* Only TX_DATA builds SGLs */
1920 from = mtod(m, struct work_request_hdr *);
1921 memcpy(&d->flit[1], &from[1], m->m_len - sizeof(*from));
1922

--- 17 unchanged lines hidden (view full) ---

1940 *
1941 * Returns the number of Tx descriptors needed for the given offload
1942 * packet. These packets are already fully constructed.
1943 */
1944static __inline unsigned int
1945calc_tx_descs_ofld(struct mbuf *m, unsigned int nsegs)
1946{
1947 unsigned int flits, cnt = 0;
1948 int ndescs;
1930
1949
1950 if (m->m_len <= WR_LEN && nsegs == 0)
1951 return (1); /* packet fits as immediate data */
1931
1952
1932 if (m->m_len <= WR_LEN)
1933 return 1; /* packet fits as immediate data */
1934
1935 if (m->m_flags & M_IOVEC)
1936 cnt = mtomv(m)->mv_count;
1953 if (m->m_flags & M_IOVEC)
1954 cnt = mtomv(m)->mv_count;
1955 else
1956 cnt = nsegs;
1937
1938 /* headers */
1957
1958 /* headers */
1939 flits = ((uint8_t *)m->m_pkthdr.header - mtod(m, uint8_t *)) / 8;
1959 flits = m->m_len / 8;
1940
1960
1941 return flits_to_desc(flits + sgl_len(cnt));
1961 ndescs = flits_to_desc(flits + sgl_len(cnt));
1962
1963 CTR4(KTR_CXGB, "flits=%d sgl_len=%d nsegs=%d ndescs=%d",
1964 flits, sgl_len(cnt), nsegs, ndescs);
1965
1966 return (ndescs);
1942}
1943
1944/**
1945 * ofld_xmit - send a packet through an offload queue
1946 * @adap: the adapter
1947 * @q: the Tx offload queue
1948 * @m: the packet
1949 *

--- 43 unchanged lines hidden (view full) ---

1993 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
1994 ndesc, pidx, skb->len, skb->len - skb->data_len,
1995 skb_shinfo(skb)->nr_frags);
1996#endif
1997 mtx_unlock(&q->lock);
1998
1999 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2000 check_ring_tx_db(adap, q);
1967}
1968
1969/**
1970 * ofld_xmit - send a packet through an offload queue
1971 * @adap: the adapter
1972 * @q: the Tx offload queue
1973 * @m: the packet
1974 *

--- 43 unchanged lines hidden (view full) ---

2018 "ofld_xmit: ndesc %u, pidx %u, len %u, main %u, frags %u",
2019 ndesc, pidx, skb->len, skb->len - skb->data_len,
2020 skb_shinfo(skb)->nr_frags);
2021#endif
2022 mtx_unlock(&q->lock);
2023
2024 write_ofld_wr(adap, m, q, pidx, gen, ndesc, segs, nsegs);
2025 check_ring_tx_db(adap, q);
2001
2002 return (0);
2003}
2004
2005/**
2006 * restart_offloadq - restart a suspended offload queue
2007 * @qs: the queue set cotaining the offload queue
2008 *
2009 * Resumes transmission on a suspended Tx offload queue.

--- 43 unchanged lines hidden (view full) ---

2053 mtx_lock(&q->lock);
2054 }
2055 mtx_unlock(&q->lock);
2056
2057#if USE_GTS
2058 set_bit(TXQ_RUNNING, &q->flags);
2059 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2060#endif
2026 return (0);
2027}
2028
2029/**
2030 * restart_offloadq - restart a suspended offload queue
2031 * @qs: the queue set cotaining the offload queue
2032 *
2033 * Resumes transmission on a suspended Tx offload queue.

--- 43 unchanged lines hidden (view full) ---

2077 mtx_lock(&q->lock);
2078 }
2079 mtx_unlock(&q->lock);
2080
2081#if USE_GTS
2082 set_bit(TXQ_RUNNING, &q->flags);
2083 set_bit(TXQ_LAST_PKT_DB, &q->flags);
2084#endif
2085 wmb();
2061 t3_write_reg(adap, A_SG_KDOORBELL,
2062 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2063}
2064
2065/**
2066 * queue_set - return the queue set a packet should use
2067 * @m: the packet
2068 *

--- 226 unchanged lines hidden (view full) ---

2295 }
2296#else
2297 q->fl[1].buf_size = MJUMPAGESIZE - header_size;
2298 q->fl[1].zone = zone_jumbop;
2299 q->fl[1].type = EXT_JUMBOP;
2300#endif
2301 q->lro.enabled = lro_default;
2302
2086 t3_write_reg(adap, A_SG_KDOORBELL,
2087 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id));
2088}
2089
2090/**
2091 * queue_set - return the queue set a packet should use
2092 * @m: the packet
2093 *

--- 226 unchanged lines hidden (view full) ---

2320 }
2321#else
2322 q->fl[1].buf_size = MJUMPAGESIZE - header_size;
2323 q->fl[1].zone = zone_jumbop;
2324 q->fl[1].type = EXT_JUMBOP;
2325#endif
2326 q->lro.enabled = lro_default;
2327
2303 mtx_lock(&sc->sge.reg_lock);
2328 mtx_lock_spin(&sc->sge.reg_lock);
2304 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2305 q->rspq.phys_addr, q->rspq.size,
2306 q->fl[0].buf_size, 1, 0);
2307 if (ret) {
2308 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2309 goto err_unlock;
2310 }
2311

--- 39 unchanged lines hidden (view full) ---

2351 goto err_unlock;
2352 }
2353 }
2354
2355 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2356 device_get_unit(sc->dev), irq_vec_idx);
2357 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2358
2329 ret = -t3_sge_init_rspcntxt(sc, q->rspq.cntxt_id, irq_vec_idx,
2330 q->rspq.phys_addr, q->rspq.size,
2331 q->fl[0].buf_size, 1, 0);
2332 if (ret) {
2333 printf("error %d from t3_sge_init_rspcntxt\n", ret);
2334 goto err_unlock;
2335 }
2336

--- 39 unchanged lines hidden (view full) ---

2376 goto err_unlock;
2377 }
2378 }
2379
2380 snprintf(q->rspq.lockbuf, RSPQ_NAME_LEN, "t3 rspq lock %d:%d",
2381 device_get_unit(sc->dev), irq_vec_idx);
2382 MTX_INIT(&q->rspq.lock, q->rspq.lockbuf, NULL, MTX_DEF);
2383
2359 mtx_unlock(&sc->sge.reg_lock);
2384 mtx_unlock_spin(&sc->sge.reg_lock);
2360 t3_update_qset_coalesce(q, p);
2361 q->port = pi;
2362
2363 refill_fl(sc, &q->fl[0], q->fl[0].size);
2364 refill_fl(sc, &q->fl[1], q->fl[1].size);
2365 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2366
2367 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2368 V_NEWTIMER(q->rspq.holdoff_tmr));
2369
2370 return (0);
2371
2372err_unlock:
2385 t3_update_qset_coalesce(q, p);
2386 q->port = pi;
2387
2388 refill_fl(sc, &q->fl[0], q->fl[0].size);
2389 refill_fl(sc, &q->fl[1], q->fl[1].size);
2390 refill_rspq(sc, &q->rspq, q->rspq.size - 1);
2391
2392 t3_write_reg(sc, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
2393 V_NEWTIMER(q->rspq.holdoff_tmr));
2394
2395 return (0);
2396
2397err_unlock:
2373 mtx_unlock(&sc->sge.reg_lock);
2398 mtx_unlock_spin(&sc->sge.reg_lock);
2374err:
2375 t3_free_qset(sc, q);
2376
2377 return (ret);
2378}
2379
2380void
2381t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)

--- 32 unchanged lines hidden (view full) ---

2414 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2415 m->m_len -= (sizeof(*cpl) + ethpad);
2416 m->m_data += (sizeof(*cpl) + ethpad);
2417
2418 (*ifp->if_input)(ifp, m);
2419}
2420
2421static void
2399err:
2400 t3_free_qset(sc, q);
2401
2402 return (ret);
2403}
2404
2405void
2406t3_rx_eth(struct adapter *adap, struct sge_rspq *rq, struct mbuf *m, int ethpad)

--- 32 unchanged lines hidden (view full) ---

2439 m->m_pkthdr.len -= (sizeof(*cpl) + ethpad);
2440 m->m_len -= (sizeof(*cpl) + ethpad);
2441 m->m_data += (sizeof(*cpl) + ethpad);
2442
2443 (*ifp->if_input)(ifp, m);
2444}
2445
2446static void
2422ext_free_handler(void *cl, void * arg)
2447ext_free_handler(void *arg1, void * arg2)
2423{
2448{
2424 uintptr_t type = (uintptr_t)arg;
2449 uintptr_t type = (uintptr_t)arg2;
2425 uma_zone_t zone;
2426 struct mbuf *m;
2427
2450 uma_zone_t zone;
2451 struct mbuf *m;
2452
2428 m = cl;
2453 m = arg1;
2429 zone = m_getzonefromtype(type);
2430 m->m_ext.ext_type = (int)type;
2431 cxgb_ext_freed++;
2454 zone = m_getzonefromtype(type);
2455 m->m_ext.ext_type = (int)type;
2456 cxgb_ext_freed++;
2432 cxgb_cache_put(zone, cl);
2457 cxgb_cache_put(zone, m);
2433}
2434
2435static void
2436init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone)
2437{
2438 struct mbuf *m;
2439 int header_size;
2440
2441 header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) +
2442 sizeof(struct m_ext_) + sizeof(uint32_t);
2443
2444 bzero(cl, header_size);
2445 m = (struct mbuf *)cl;
2458}
2459
2460static void
2461init_cluster_mbuf(caddr_t cl, int flags, int type, uma_zone_t zone)
2462{
2463 struct mbuf *m;
2464 int header_size;
2465
2466 header_size = sizeof(struct m_hdr) + sizeof(struct pkthdr) +
2467 sizeof(struct m_ext_) + sizeof(uint32_t);
2468
2469 bzero(cl, header_size);
2470 m = (struct mbuf *)cl;
2446
2471
2472 cxgb_ext_inited++;
2447 SLIST_INIT(&m->m_pkthdr.tags);
2448 m->m_type = MT_DATA;
2449 m->m_flags = flags | M_NOFREE | M_EXT;
2450 m->m_data = cl + header_size;
2451 m->m_ext.ext_buf = cl;
2452 m->m_ext.ref_cnt = (uint32_t *)(cl + header_size - sizeof(uint32_t));
2453 m->m_ext.ext_size = m_getsizefromtype(type);
2454 m->m_ext.ext_free = ext_free_handler;

--- 261 unchanged lines hidden (view full) ---

2716 int eth, eop = 0, ethpad = 0;
2717 uint32_t flags = ntohl(r->flags);
2718 uint32_t rss_csum = *(const uint32_t *)r;
2719 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2720
2721 eth = (r->rss_hdr.opcode == CPL_RX_PKT);
2722
2723 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2473 SLIST_INIT(&m->m_pkthdr.tags);
2474 m->m_type = MT_DATA;
2475 m->m_flags = flags | M_NOFREE | M_EXT;
2476 m->m_data = cl + header_size;
2477 m->m_ext.ext_buf = cl;
2478 m->m_ext.ref_cnt = (uint32_t *)(cl + header_size - sizeof(uint32_t));
2479 m->m_ext.ext_size = m_getsizefromtype(type);
2480 m->m_ext.ext_free = ext_free_handler;

--- 261 unchanged lines hidden (view full) ---

2742 int eth, eop = 0, ethpad = 0;
2743 uint32_t flags = ntohl(r->flags);
2744 uint32_t rss_csum = *(const uint32_t *)r;
2745 uint32_t rss_hash = be32toh(r->rss_hdr.rss_hash_val);
2746
2747 eth = (r->rss_hdr.opcode == CPL_RX_PKT);
2748
2749 if (__predict_false(flags & F_RSPD_ASYNC_NOTIF)) {
2724 /* XXX */
2725 printf("async notification\n");
2750 struct mbuf *m;
2726
2751
2752 if (cxgb_debug)
2753 printf("async notification\n");
2754
2755 if (rspq->rspq_mh.mh_head == NULL) {
2756 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2757 m = rspq->rspq_mh.mh_head;
2758 } else {
2759 m = m_gethdr(M_DONTWAIT, MT_DATA);
2760 }
2761
2762 /* XXX m is lost here if rspq->rspq_mbuf is not NULL */
2763
2764 if (m == NULL)
2765 goto no_mem;
2766
2767 memcpy(mtod(m, char *), r, AN_PKT_SIZE);
2768 m->m_len = m->m_pkthdr.len = AN_PKT_SIZE;
2769 *mtod(m, char *) = CPL_ASYNC_NOTIF;
2770 rss_csum = htonl(CPL_ASYNC_NOTIF << 24);
2771 eop = 1;
2772 rspq->async_notif++;
2773 goto skip;
2727 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2728 struct mbuf *m = NULL;
2729
2730 DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n",
2731 r->rss_hdr.opcode, rspq->cidx);
2732 if (rspq->rspq_mh.mh_head == NULL)
2733 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2734 else
2735 m = m_gethdr(M_DONTWAIT, MT_DATA);
2736
2774 } else if (flags & F_RSPD_IMM_DATA_VALID) {
2775 struct mbuf *m = NULL;
2776
2777 DPRINTF("IMM DATA VALID opcode=0x%x rspq->cidx=%d\n",
2778 r->rss_hdr.opcode, rspq->cidx);
2779 if (rspq->rspq_mh.mh_head == NULL)
2780 rspq->rspq_mh.mh_head = m_gethdr(M_DONTWAIT, MT_DATA);
2781 else
2782 m = m_gethdr(M_DONTWAIT, MT_DATA);
2783
2737 /*
2738 * XXX revisit me
2739 */
2740 if (rspq->rspq_mh.mh_head == NULL && m == NULL) {
2784 if (rspq->rspq_mh.mh_head == NULL && m == NULL) {
2785 no_mem:
2741 rspq->next_holdoff = NOMEM_INTR_DELAY;
2742 budget_left--;
2743 break;
2744 }
2786 rspq->next_holdoff = NOMEM_INTR_DELAY;
2787 budget_left--;
2788 break;
2789 }
2745 get_imm_packet(adap, r, rspq->rspq_mh.mh_head, m, flags);
2746
2790 get_imm_packet(adap, r, rspq->rspq_mh.mh_head);
2747 eop = 1;
2748 rspq->imm_data++;
2791 eop = 1;
2792 rspq->imm_data++;
2749 } else if (r->len_cq) {
2793 } else if (r->len_cq) {
2750 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2751
2752#ifdef DISABLE_MBUF_IOVEC
2753 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r);
2754#else
2755 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
2794 int drop_thresh = eth ? SGE_RX_DROP_THRES : 0;
2795
2796#ifdef DISABLE_MBUF_IOVEC
2797 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mh, r);
2798#else
2799 eop = get_packet(adap, drop_thresh, qs, &rspq->rspq_mbuf, r);
2800#endif
2756#ifdef IFNET_MULTIQUEUE
2801#ifdef IFNET_MULTIQUEUE
2757 rspq->rspq_mbuf->m_pkthdr.rss_hash = rss_hash;
2802 rspq->rspq_mh.mh_head->m_pkthdr.rss_hash = rss_hash;
2758#endif
2803#endif
2759#endif
2760 ethpad = 2;
2761 } else {
2762 DPRINTF("pure response\n");
2763 rspq->pure_rsps++;
2764 }
2804 ethpad = 2;
2805 } else {
2806 DPRINTF("pure response\n");
2807 rspq->pure_rsps++;
2808 }
2765
2809 skip:
2766 if (flags & RSPD_CTRL_MASK) {
2767 sleeping |= flags & RSPD_GTS_MASK;
2768 handle_rsp_cntrl_info(qs, flags);
2769 }
2770
2771 r++;
2772 if (__predict_false(++rspq->cidx == rspq->size)) {
2773 rspq->cidx = 0;

--- 8 unchanged lines hidden (view full) ---

2782 DPRINTF("eth=%d eop=%d flags=0x%x\n", eth, eop, flags);
2783
2784 if (!eth && eop) {
2785 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
2786 /*
2787 * XXX size mismatch
2788 */
2789 m_set_priority(rspq->rspq_mh.mh_head, rss_hash);
2810 if (flags & RSPD_CTRL_MASK) {
2811 sleeping |= flags & RSPD_GTS_MASK;
2812 handle_rsp_cntrl_info(qs, flags);
2813 }
2814
2815 r++;
2816 if (__predict_false(++rspq->cidx == rspq->size)) {
2817 rspq->cidx = 0;

--- 8 unchanged lines hidden (view full) ---

2826 DPRINTF("eth=%d eop=%d flags=0x%x\n", eth, eop, flags);
2827
2828 if (!eth && eop) {
2829 rspq->rspq_mh.mh_head->m_pkthdr.csum_data = rss_csum;
2830 /*
2831 * XXX size mismatch
2832 */
2833 m_set_priority(rspq->rspq_mh.mh_head, rss_hash);
2790
2834
2835
2791 ngathered = rx_offload(&adap->tdev, rspq,
2792 rspq->rspq_mh.mh_head, offload_mbufs, ngathered);
2793 rspq->rspq_mh.mh_head = NULL;
2794 DPRINTF("received offload packet\n");
2795
2796 } else if (eth && eop) {
2797 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *));
2798 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES);

--- 184 unchanged lines hidden (view full) ---

2983 goto retry_sbufops;
2984 }
2985 sbuf_finish(sb);
2986 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
2987 sbuf_delete(sb);
2988 return (err);
2989}
2990
2836 ngathered = rx_offload(&adap->tdev, rspq,
2837 rspq->rspq_mh.mh_head, offload_mbufs, ngathered);
2838 rspq->rspq_mh.mh_head = NULL;
2839 DPRINTF("received offload packet\n");
2840
2841 } else if (eth && eop) {
2842 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *));
2843 prefetch(mtod(rspq->rspq_mh.mh_head, uint8_t *) + L1_CACHE_BYTES);

--- 184 unchanged lines hidden (view full) ---

3028 goto retry_sbufops;
3029 }
3030 sbuf_finish(sb);
3031 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3032 sbuf_delete(sb);
3033 return (err);
3034}
3035
2991
2992/*
2993 * broken by recent mbuf changes
2994 */
2995static int
3036static int
2996t3_dump_txq(SYSCTL_HANDLER_ARGS)
3037t3_dump_txq_eth(SYSCTL_HANDLER_ARGS)
2997{
2998 struct sge_txq *txq;
2999 struct sge_qset *qs;
3000 int i, j, err, dump_end;
3001 static int multiplier = 1;
3002 struct sbuf *sb;
3003 struct tx_desc *txd;
3004 uint32_t *WR, wr_hi, wr_lo, gen;

--- 12 unchanged lines hidden (view full) ---

3017 }
3018 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3019 log(LOG_WARNING,
3020 "dump start of %d is greater than queue size\n",
3021 txq->txq_dump_start);
3022 txq->txq_dump_start = 0;
3023 return (EINVAL);
3024 }
3038{
3039 struct sge_txq *txq;
3040 struct sge_qset *qs;
3041 int i, j, err, dump_end;
3042 static int multiplier = 1;
3043 struct sbuf *sb;
3044 struct tx_desc *txd;
3045 uint32_t *WR, wr_hi, wr_lo, gen;

--- 12 unchanged lines hidden (view full) ---

3058 }
3059 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3060 log(LOG_WARNING,
3061 "dump start of %d is greater than queue size\n",
3062 txq->txq_dump_start);
3063 txq->txq_dump_start = 0;
3064 return (EINVAL);
3065 }
3025 err = t3_sge_read_ecntxt(qs->port->adapter, txq->cntxt_id, data);
3066 err = t3_sge_read_ecntxt(qs->port->adapter, qs->rspq.cntxt_id, data);
3026 if (err)
3027 return (err);
3028
3029
3030retry_sbufops:
3031 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3032
3033 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",

--- 27 unchanged lines hidden (view full) ---

3061 goto retry_sbufops;
3062 }
3063 sbuf_finish(sb);
3064 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3065 sbuf_delete(sb);
3066 return (err);
3067}
3068
3067 if (err)
3068 return (err);
3069
3070
3071retry_sbufops:
3072 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3073
3074 sbuf_printf(sb, " \n credits=%u GTS=%u index=%u size=%u rspq#=%u cmdq#=%u\n",

--- 27 unchanged lines hidden (view full) ---

3102 goto retry_sbufops;
3103 }
3104 sbuf_finish(sb);
3105 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3106 sbuf_delete(sb);
3107 return (err);
3108}
3109
3110static int
3111t3_dump_txq_ctrl(SYSCTL_HANDLER_ARGS)
3112{
3113 struct sge_txq *txq;
3114 struct sge_qset *qs;
3115 int i, j, err, dump_end;
3116 static int multiplier = 1;
3117 struct sbuf *sb;
3118 struct tx_desc *txd;
3119 uint32_t *WR, wr_hi, wr_lo, gen;
3120
3121 txq = arg1;
3122 qs = txq_to_qset(txq, TXQ_CTRL);
3123 if (txq->txq_dump_count == 0) {
3124 return (0);
3125 }
3126 if (txq->txq_dump_count > 256) {
3127 log(LOG_WARNING,
3128 "dump count is too large %d\n", txq->txq_dump_count);
3129 txq->txq_dump_count = 1;
3130 return (EINVAL);
3131 }
3132 if (txq->txq_dump_start > 255) {
3133 log(LOG_WARNING,
3134 "dump start of %d is greater than queue size\n",
3135 txq->txq_dump_start);
3136 txq->txq_dump_start = 0;
3137 return (EINVAL);
3138 }
3069
3139
3140retry_sbufops:
3141 sb = sbuf_new(NULL, NULL, QDUMP_SBUF_SIZE*multiplier, SBUF_FIXEDLEN);
3142 sbuf_printf(sb, " qid=%d start=%d -> end=%d\n", qs->idx,
3143 txq->txq_dump_start,
3144 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3145
3146 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3147 for (i = txq->txq_dump_start; i < dump_end; i++) {
3148 txd = &txq->desc[i & (255)];
3149 WR = (uint32_t *)txd->flit;
3150 wr_hi = ntohl(WR[0]);
3151 wr_lo = ntohl(WR[1]);
3152 gen = G_WR_GEN(wr_lo);
3153
3154 sbuf_printf(sb," wr_hi %08x wr_lo %08x gen %d\n",
3155 wr_hi, wr_lo, gen);
3156 for (j = 2; j < 30; j += 4)
3157 sbuf_printf(sb, "\t%08x %08x %08x %08x \n",
3158 WR[j], WR[j + 1], WR[j + 2], WR[j + 3]);
3159
3160 }
3161 if (sbuf_overflowed(sb)) {
3162 sbuf_delete(sb);
3163 multiplier++;
3164 goto retry_sbufops;
3165 }
3166 sbuf_finish(sb);
3167 err = SYSCTL_OUT(req, sbuf_data(sb), sbuf_len(sb) + 1);
3168 sbuf_delete(sb);
3169 return (err);
3170}
3171
3070static int
3071t3_lro_enable(SYSCTL_HANDLER_ARGS)
3072{
3073 adapter_t *sc;
3074 int i, j, enabled, err, nqsets = 0;
3075
3076#ifndef LRO_WORKING
3077 return (0);

--- 79 unchanged lines hidden (view full) ---

3157 CTLFLAG_RD, &sc->fw_version,
3158 0, "firmware version");
3159
3160 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3161 "enable_lro",
3162 CTLTYPE_INT|CTLFLAG_RW, sc,
3163 0, t3_lro_enable,
3164 "I", "enable large receive offload");
3172static int
3173t3_lro_enable(SYSCTL_HANDLER_ARGS)
3174{
3175 adapter_t *sc;
3176 int i, j, enabled, err, nqsets = 0;
3177
3178#ifndef LRO_WORKING
3179 return (0);

--- 79 unchanged lines hidden (view full) ---

3259 CTLFLAG_RD, &sc->fw_version,
3260 0, "firmware version");
3261
3262 SYSCTL_ADD_PROC(ctx, children, OID_AUTO,
3263 "enable_lro",
3264 CTLTYPE_INT|CTLFLAG_RW, sc,
3265 0, t3_lro_enable,
3266 "I", "enable large receive offload");
3165
3166 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3267 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3268 "hw_revision",
3269 CTLFLAG_RD, &sc->params.rev,
3270 0, "chip model");
3271 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3167 "enable_debug",
3168 CTLFLAG_RW, &cxgb_debug,
3169 0, "enable verbose debugging output");
3170 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tunq_coalesce",
3171 CTLFLAG_RD, &sc->tunq_coalesce,
3172 "#tunneled packets freed");
3173 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3174 "txq_overrun",

--- 11 unchanged lines hidden (view full) ---

3186 "cached",
3187 CTLFLAG_RD, &cxgb_cached,
3188 0, "#times a cluster was cached");
3189 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3190 "ext_freed",
3191 CTLFLAG_RD, &cxgb_ext_freed,
3192 0, "#times a cluster was freed through ext_free");
3193 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3272 "enable_debug",
3273 CTLFLAG_RW, &cxgb_debug,
3274 0, "enable verbose debugging output");
3275 SYSCTL_ADD_ULONG(ctx, children, OID_AUTO, "tunq_coalesce",
3276 CTLFLAG_RD, &sc->tunq_coalesce,
3277 "#tunneled packets freed");
3278 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3279 "txq_overrun",

--- 11 unchanged lines hidden (view full) ---

3291 "cached",
3292 CTLFLAG_RD, &cxgb_cached,
3293 0, "#times a cluster was cached");
3294 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3295 "ext_freed",
3296 CTLFLAG_RD, &cxgb_ext_freed,
3297 0, "#times a cluster was freed through ext_free");
3298 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3299 "ext_inited",
3300 CTLFLAG_RD, &cxgb_ext_inited,
3301 0, "#times a cluster was initialized for ext_free");
3302 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3194 "mbufs_outstanding",
3195 CTLFLAG_RD, &cxgb_mbufs_outstanding,
3196 0, "#mbufs in flight in the driver");
3197 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3198 "pack_outstanding",
3199 CTLFLAG_RD, &cxgb_pack_outstanding,
3200 0, "#packet in flight in the driver");
3201}

--- 33 unchanged lines hidden (view full) ---

3235 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3236 poidlist = SYSCTL_CHILDREN(poid);
3237 SYSCTL_ADD_INT(ctx, poidlist, OID_AUTO,
3238 "nqsets", CTLFLAG_RD, &pi->nqsets,
3239 0, "#queue sets");
3240
3241 for (j = 0; j < pi->nqsets; j++) {
3242 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3303 "mbufs_outstanding",
3304 CTLFLAG_RD, &cxgb_mbufs_outstanding,
3305 0, "#mbufs in flight in the driver");
3306 SYSCTL_ADD_INT(ctx, children, OID_AUTO,
3307 "pack_outstanding",
3308 CTLFLAG_RD, &cxgb_pack_outstanding,
3309 0, "#packet in flight in the driver");
3310}

--- 33 unchanged lines hidden (view full) ---

3344 pi->namebuf, CTLFLAG_RD, NULL, "port statistics");
3345 poidlist = SYSCTL_CHILDREN(poid);
3346 SYSCTL_ADD_INT(ctx, poidlist, OID_AUTO,
3347 "nqsets", CTLFLAG_RD, &pi->nqsets,
3348 0, "#queue sets");
3349
3350 for (j = 0; j < pi->nqsets; j++) {
3351 struct sge_qset *qs = &sc->sge.qs[pi->first_qset + j];
3243 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid;
3244 struct sysctl_oid_list *qspoidlist, *rspqpoidlist, *txqpoidlist;
3352 struct sysctl_oid *qspoid, *rspqpoid, *txqpoid, *ctrlqpoid;
3353 struct sysctl_oid_list *qspoidlist, *rspqpoidlist, *txqpoidlist, *ctrlqpoidlist;
3245 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3246
3247 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3248
3249 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3250 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3251 qspoidlist = SYSCTL_CHILDREN(qspoid);
3252
3253 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3254 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3255 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3256
3257 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3258 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3259 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3260
3354 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3355
3356 snprintf(qs->namebuf, QS_NAME_LEN, "qs%d", j);
3357
3358 qspoid = SYSCTL_ADD_NODE(ctx, poidlist, OID_AUTO,
3359 qs->namebuf, CTLFLAG_RD, NULL, "qset statistics");
3360 qspoidlist = SYSCTL_CHILDREN(qspoid);
3361
3362 rspqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3363 rspq_name, CTLFLAG_RD, NULL, "rspq statistics");
3364 rspqpoidlist = SYSCTL_CHILDREN(rspqpoid);
3365
3366 txqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3367 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3368 txqpoidlist = SYSCTL_CHILDREN(txqpoid);
3369
3261
3262
3370 ctrlqpoid = SYSCTL_ADD_NODE(ctx, qspoidlist, OID_AUTO,
3371 txq_names[2], CTLFLAG_RD, NULL, "ctrlq statistics");
3372 ctrlqpoidlist = SYSCTL_CHILDREN(ctrlqpoid);
3373
3263 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3264 CTLFLAG_RD, &qs->rspq.size,
3265 0, "#entries in response queue");
3266 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3267 CTLFLAG_RD, &qs->rspq.cidx,
3268 0, "consumer index");
3269 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3270 CTLFLAG_RD, &qs->rspq.credits,

--- 6 unchanged lines hidden (view full) ---

3277 0, "start rspq dump entry");
3278 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3279 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3280 0, "#rspq entries to dump");
3281 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3282 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3283 0, t3_dump_rspq, "A", "dump of the response queue");
3284
3374 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "size",
3375 CTLFLAG_RD, &qs->rspq.size,
3376 0, "#entries in response queue");
3377 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "cidx",
3378 CTLFLAG_RD, &qs->rspq.cidx,
3379 0, "consumer index");
3380 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "credits",
3381 CTLFLAG_RD, &qs->rspq.credits,

--- 6 unchanged lines hidden (view full) ---

3388 0, "start rspq dump entry");
3389 SYSCTL_ADD_UINT(ctx, rspqpoidlist, OID_AUTO, "dump_count",
3390 CTLFLAG_RW, &qs->rspq.rspq_dump_count,
3391 0, "#rspq entries to dump");
3392 SYSCTL_ADD_PROC(ctx, rspqpoidlist, OID_AUTO, "qdump",
3393 CTLTYPE_STRING | CTLFLAG_RD, &qs->rspq,
3394 0, t3_dump_rspq, "A", "dump of the response queue");
3395
3285
3286
3396
3287 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped",
3288 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops,
3289 0, "#tunneled packets dropped");
3290 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3291 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3292 0, "#tunneled packets waiting to be sent");
3293 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3294 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,

--- 40 unchanged lines hidden (view full) ---

3335 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3336 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3337 0, "txq start idx for dump");
3338 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3339 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3340 0, "txq #entries to dump");
3341 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3342 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3397 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "dropped",
3398 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_drops,
3399 0, "#tunneled packets dropped");
3400 SYSCTL_ADD_INT(ctx, txqpoidlist, OID_AUTO, "sendqlen",
3401 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.qlen,
3402 0, "#tunneled packets waiting to be sent");
3403 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "queue_pidx",
3404 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,

--- 40 unchanged lines hidden (view full) ---

3445 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_start",
3446 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3447 0, "txq start idx for dump");
3448 SYSCTL_ADD_UINT(ctx, txqpoidlist, OID_AUTO, "dump_count",
3449 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3450 0, "txq #entries to dump");
3451 SYSCTL_ADD_PROC(ctx, txqpoidlist, OID_AUTO, "qdump",
3452 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3343 0, t3_dump_txq, "A", "dump of the transmit queue");
3453 0, t3_dump_txq_eth, "A", "dump of the transmit queue");
3454
3455 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_start",
3456 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3457 0, "ctrlq start idx for dump");
3458 SYSCTL_ADD_UINT(ctx, ctrlqpoidlist, OID_AUTO, "dump_count",
3459 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3460 0, "ctrl #entries to dump");
3461 SYSCTL_ADD_PROC(ctx, ctrlqpoidlist, OID_AUTO, "qdump",
3462 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3463 0, t3_dump_txq_ctrl, "A", "dump of the transmit queue");
3464
3465
3466
3467
3468
3344 }
3345 }
3346}
3347
3348/**
3349 * t3_get_desc - dump an SGE descriptor for debugging purposes
3350 * @qs: the queue set
3351 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)

--- 33 unchanged lines hidden ---
3469 }
3470 }
3471}
3472
3473/**
3474 * t3_get_desc - dump an SGE descriptor for debugging purposes
3475 * @qs: the queue set
3476 * @qnum: identifies the specific queue (0..2: Tx, 3:response, 4..5: Rx)

--- 33 unchanged lines hidden ---