Lines Matching refs:txq

234 #define	TXQ_RING_EMPTY(qs)	drbr_empty((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
236 drbr_needs_enqueue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
237 #define TXQ_RING_FLUSH(qs) drbr_flush((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
239 drbr_dequeue_cond((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr, func, arg)
241 drbr_dequeue((qs)->port->ifp, (qs)->txq[TXQ_ETH].txq_mr)
259 struct sge_txq *txq;
264 txq = &qs->txq[TXQ_ETH];
278 if (*fill != 0 && (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
281 else if (*fill == 0 && (txq->in_use >= cxgb_tx_coalesce_enable_start))
371 struct sge_txq *q = &qs->txq[queue];
962 struct sge_txq *txq;
973 txq = &qs->txq[0];
974 reclaim_ofl = txq[TXQ_OFLD].processed - txq[TXQ_OFLD].cleaned;
1112 qs->txq[TXQ_ETH].cntxt_id = FW_TUNNEL_SGEEC_START + id;
1113 qs->txq[TXQ_ETH].token = FW_TUNNEL_TID_START + id;
1114 qs->txq[TXQ_OFLD].cntxt_id = FW_OFLD_SGEEC_START + id;
1115 qs->txq[TXQ_CTRL].cntxt_id = FW_CTRL_SGEEC_START + id;
1116 qs->txq[TXQ_CTRL].token = FW_CTRL_TID_START + id;
1119 mbufq_init(&qs->txq[TXQ_ETH].sendq, INT_MAX);
1120 mbufq_init(&qs->txq[TXQ_OFLD].sendq, INT_MAX);
1121 mbufq_init(&qs->txq[TXQ_CTRL].sendq, INT_MAX);
1126 txq_prod(struct sge_txq *txq, unsigned int ndesc, struct txq_state *txqs)
1128 txq->in_use += ndesc;
1133 txqs->gen = txq->gen;
1134 txq->unacked += ndesc;
1135 txqs->compl = (txq->unacked & 32) << (S_WR_COMPL - 5);
1136 txq->unacked &= 31;
1137 txqs->pidx = txq->pidx;
1138 txq->pidx += ndesc;
1140 if (((txqs->pidx > txq->cidx) &&
1141 (txq->pidx < txqs->pidx) &&
1142 (txq->pidx >= txq->cidx)) ||
1143 ((txqs->pidx < txq->cidx) &&
1144 (txq->pidx >= txq-> cidx)) ||
1145 ((txqs->pidx < txq->cidx) &&
1146 (txq->cidx < txqs->pidx)))
1147 panic("txqs->pidx=%d txq->pidx=%d txq->cidx=%d",
1148 txqs->pidx, txq->pidx, txq->cidx);
1150 if (txq->pidx >= txq->size) {
1151 txq->pidx -= txq->size;
1152 txq->gen ^= 1;
1263 * @txqs: txq state (generation and producer index)
1264 * @txq: the SGE Tx queue
1278 const struct sge_txq *txq, const struct sg_ent *sgl, unsigned int flits,
1283 struct tx_sw_desc *txsd = &txq->sdesc[txqs->pidx];
1315 if (++txqs->pidx == txq->size) {
1318 txd = txq->desc;
1319 txsd = txq->sdesc;
1357 struct sge_txq *txq;
1373 txq = &qs->txq[TXQ_ETH];
1374 txd = &txq->desc[txq->pidx];
1375 txsd = &txq->sdesc[txq->pidx];
1376 sgl = txq->txq_sgl;
1390 busdma_map_sg_vec(txq->entry_tag, txsd->map, m0, segs, &nsegs);
1394 if ((err = busdma_map_sg_collapse(txq->entry_tag, txsd->map,
1403 txq_prod(txq, ndesc, &txqs);
1414 txq->txq_coalesced += nsegs;
1446 V_WR_GEN(txqs.gen)) | htonl(V_WR_TID(txq->token));
1451 check_ring_tx_db(sc, txq, 0);
1528 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1533 check_ring_tx_db(sc, txq, 0);
1560 V_WR_GEN(txqs.gen) | V_WR_TID(txq->token));
1565 check_ring_tx_db(sc, txq, 0);
1581 wr_lo = htonl(V_WR_TID(txq->token));
1582 write_wr_hdr_sgl(ndesc, txd, &txqs, txq, sgl, flits,
1584 check_ring_tx_db(sc, txq, 0);
1593 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1596 (txq->in_use <= cxgb_tx_coalesce_enable_stop) &&
1600 (txq->in_use >= cxgb_tx_coalesce_enable_start))
1609 callout_reset_on(&txq->txq_watchdog, hz/4, cxgb_tx_watchdog,
1610 qs, txq->txq_watchdog.c_cpu);
1617 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1619 if (qs->coalescing == 0 && (txq->in_use >= (txq->size>>3)))
1633 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1649 if (txq->size - txq->in_use <= TX_MAX_DESC)
1664 if (txq->db_pending)
1665 check_ring_tx_db(pi->adapter, txq, 1);
1667 if (!TXQ_RING_EMPTY(qs) && callout_pending(&txq->txq_timer) == 0 &&
1669 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1670 qs, txq->txq_timer.c_cpu);
1679 struct sge_txq *txq = &qs->txq[TXQ_ETH];
1680 struct buf_ring *br = txq->txq_mr;
1683 avail = txq->size - txq->in_use;
1700 if (txq->db_pending)
1701 check_ring_tx_db(pi->adapter, txq, 1);
1707 txq->txq_direct_packets++;
1708 txq->txq_direct_bytes += m->m_pkthdr.len;
1717 else if (!TXQ_RING_EMPTY(qs) && !callout_pending(&txq->txq_timer))
1718 callout_reset_on(&txq->txq_timer, 1, cxgb_tx_timeout,
1719 qs, txq->txq_timer.c_cpu);
1747 error = drbr_enqueue(ifp, qs->txq[TXQ_ETH].txq_mr, m);
1874 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1920 struct sge_txq *q = &qs->txq[TXQ_CTRL];
1976 if (q->txq[TXQ_ETH].txq_mr != NULL)
1977 buf_ring_free(q->txq[TXQ_ETH].txq_mr, M_DEVBUF);
1978 if (q->txq[TXQ_ETH].txq_ifq != NULL) {
1979 ifq_delete(q->txq[TXQ_ETH].txq_ifq);
1980 free(q->txq[TXQ_ETH].txq_ifq, M_DEVBUF);
2003 if (q->txq[i].desc) {
2005 t3_sge_enable_ecntxt(sc, q->txq[i].cntxt_id, 0);
2007 bus_dmamap_unload(q->txq[i].desc_tag,
2008 q->txq[i].desc_map);
2009 bus_dmamem_free(q->txq[i].desc_tag, q->txq[i].desc,
2010 q->txq[i].desc_map);
2011 bus_dma_tag_destroy(q->txq[i].desc_tag);
2012 bus_dma_tag_destroy(q->txq[i].entry_tag);
2014 if (q->txq[i].sdesc) {
2015 free(q->txq[i].sdesc, M_DEVBUF);
2101 taskqueue_drain(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2102 taskqueue_drain(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2125 struct sge_txq *q = &qs->txq[queue];
2271 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2314 struct sge_txq *q = &qs->txq[TXQ_OFLD];
2387 should_restart_tx(&qs->txq[TXQ_OFLD]) &&
2389 qs->txq[TXQ_OFLD].restarts++;
2390 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_OFLD].qresume_task);
2394 should_restart_tx(&qs->txq[TXQ_CTRL]) &&
2396 qs->txq[TXQ_CTRL].restarts++;
2397 taskqueue_enqueue(sc->tq, &qs->txq[TXQ_CTRL].qresume_task);
2427 if ((q->txq[TXQ_ETH].txq_mr = buf_ring_alloc(cxgb_txq_buf_ring_size,
2432 if ((q->txq[TXQ_ETH].txq_ifq = malloc(sizeof(struct ifaltq), M_DEVBUF,
2437 ifq_init(q->txq[TXQ_ETH].txq_ifq, pi->ifp);
2438 callout_init(&q->txq[TXQ_ETH].txq_timer, 1);
2439 callout_init(&q->txq[TXQ_ETH].txq_watchdog, 1);
2440 q->txq[TXQ_ETH].txq_timer.c_cpu = id % mp_ncpus;
2441 q->txq[TXQ_ETH].txq_watchdog.c_cpu = id % mp_ncpus;
2480 &q->txq[i].phys_addr, &q->txq[i].desc,
2481 &q->txq[i].sdesc, &q->txq[i].desc_tag,
2482 &q->txq[i].desc_map,
2483 sc->tx_dmat, &q->txq[i].entry_tag)) != 0) {
2487 mbufq_init(&q->txq[i].sendq, INT_MAX);
2488 q->txq[i].gen = 1;
2489 q->txq[i].size = p->txq_size[i];
2493 TASK_INIT(&q->txq[TXQ_OFLD].qresume_task, 0, restart_offloadq, q);
2495 TASK_INIT(&q->txq[TXQ_CTRL].qresume_task, 0, restart_ctrlq, q);
2496 TASK_INIT(&q->txq[TXQ_ETH].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2497 TASK_INIT(&q->txq[TXQ_OFLD].qreclaim_task, 0, sge_txq_reclaim_handler, q);
2507 q->txq[TXQ_ETH].stop_thres = nports *
2561 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_ETH].cntxt_id, USE_GTS,
2562 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr,
2563 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token,
2571 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_OFLD].cntxt_id,
2573 q->txq[TXQ_OFLD].phys_addr,
2574 q->txq[TXQ_OFLD].size, 0, 1, 0);
2582 ret = -t3_sge_init_ecntxt(sc, q->txq[TXQ_CTRL].cntxt_id, 0,
2584 q->txq[TXQ_CTRL].phys_addr,
2585 q->txq[TXQ_CTRL].size,
2586 q->txq[TXQ_CTRL].token, 1, 0);
2784 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_ETH].flags);
2788 qs->txq[TXQ_ETH].processed += credits;
2792 qs->txq[TXQ_CTRL].processed += credits;
2796 clear_bit(TXQ_RUNNING, &qs->txq[TXQ_OFLD].flags);
2800 qs->txq[TXQ_OFLD].processed += credits;
3154 struct sge_txq *txq;
3162 txq = arg1;
3163 qs = txq_to_qset(txq, TXQ_ETH);
3164 if (txq->txq_dump_count == 0) {
3167 if (txq->txq_dump_count > TX_ETH_Q_SIZE) {
3169 "dump count is too large %d\n", txq->txq_dump_count);
3170 txq->txq_dump_count = 1;
3173 if (txq->txq_dump_start > (TX_ETH_Q_SIZE-1)) {
3176 txq->txq_dump_start);
3177 txq->txq_dump_start = 0;
3195 txq->txq_dump_start,
3196 (txq->txq_dump_start + txq->txq_dump_count) & (TX_ETH_Q_SIZE-1));
3198 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3199 for (i = txq->txq_dump_start; i < dump_end; i++) {
3200 txd = &txq->desc[i & (TX_ETH_Q_SIZE-1)];
3221 struct sge_txq *txq;
3228 txq = arg1;
3229 qs = txq_to_qset(txq, TXQ_CTRL);
3230 if (txq->txq_dump_count == 0) {
3233 if (txq->txq_dump_count > 256) {
3235 "dump count is too large %d\n", txq->txq_dump_count);
3236 txq->txq_dump_count = 1;
3239 if (txq->txq_dump_start > 255) {
3242 txq->txq_dump_start);
3243 txq->txq_dump_start = 0;
3252 txq->txq_dump_start,
3253 (txq->txq_dump_start + txq->txq_dump_count) & 255);
3255 dump_end = txq->txq_dump_start + txq->txq_dump_count;
3256 for (i = txq->txq_dump_start; i < dump_end; i++) {
3257 txd = &txq->desc[i & (255)];
3377 0, "#times txq overrun");
3451 struct sge_txq *txq = &qs->txq[TXQ_ETH];
3471 txq_names[0], CTLFLAG_RD, NULL, "txq statistics");
3508 CTLFLAG_RD, &qs->txq[TXQ_ETH].txq_mr->br_drops,
3511 CTLFLAG_RD, &qs->txq[TXQ_ETH].sendq.mq_len,
3515 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_prod,
3518 CTLFLAG_RD, (uint32_t *)(uintptr_t)&qs->txq[TXQ_ETH].txq_mr.br_cons,
3522 CTLFLAG_RD, &qs->txq[TXQ_ETH].processed,
3525 CTLFLAG_RD, &txq->cleaned,
3528 CTLFLAG_RD, &txq->in_use,
3531 CTLFLAG_RD, &txq->txq_frees,
3534 CTLFLAG_RD, &txq->txq_skipped,
3537 CTLFLAG_RD, &txq->txq_coalesced,
3540 CTLFLAG_RD, &txq->txq_enqueued,
3546 CTLFLAG_RD, &txq->phys_addr,
3549 CTLFLAG_RW, &qs->txq[TXQ_ETH].gen,
3550 0, "txq generation");
3552 CTLFLAG_RD, &txq->cidx,
3555 CTLFLAG_RD, &txq->pidx,
3558 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_start,
3559 0, "txq start idx for dump");
3561 CTLFLAG_RW, &qs->txq[TXQ_ETH].txq_dump_count,
3562 0, "txq #entries to dump");
3564 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_ETH],
3568 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_start,
3571 CTLFLAG_RW, &qs->txq[TXQ_CTRL].txq_dump_count,
3574 CTLTYPE_STRING | CTLFLAG_RD, &qs->txq[TXQ_CTRL],
3689 if (!qs->txq[qnum].desc || idx >= qs->txq[qnum].size)
3691 memcpy(data, &qs->txq[qnum].desc[idx], sizeof(struct tx_desc));