Lines Matching refs:txq

440 #define TXQ_AVAIL(txq) (txq->ift_size - get_inuse(txq->ift_size, txq->ift_cidx, txq->ift_pidx, txq->ift_gen))
561 #define CALLOUT_LOCK(txq) mtx_lock(&txq->ift_mtx)
562 #define CALLOUT_UNLOCK(txq) mtx_unlock(&txq->ift_mtx)
655 &iflib_encap_txq_avail_fail, 0, "# txq avail failures");
726 static int iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq);
731 static void iflib_txq_check_drain(iflib_txq_t txq, int budget);
738 static void iflib_ifmp_purge(iflib_txq_t txq);
926 iflib_txq_t txq = &ctx->ifc_txqs[kring->ring_id];
928 if (txq->ift_sds.ifsd_map)
929 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
957 pi.ipi_segs = txq->ift_segs;
963 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i]);
964 if (txq->ift_sds.ifsd_map)
965 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i]);
990 __builtin_prefetch(&txq->ift_sds.ifsd_m[nic_i + 1]);
991 if (txq->ift_sds.ifsd_map) {
992 __builtin_prefetch(&txq->ift_sds.ifsd_map[nic_i + 1]);
998 netmap_reload_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[nic_i], addr);
1001 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_sds.ifsd_map[nic_i],
1011 if (txq->ift_sds.ifsd_map)
1012 bus_dmamap_sync(txq->ift_desc_tag, txq->ift_ifdi->idi_map,
1016 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, nic_i);
1022 if (iflib_tx_credits_update(ctx, txq)) {
1024 nic_i = txq->ift_cidx_processed;
1160 iflib_netmap_txq_init(if_ctx_t ctx, iflib_txq_t txq)
1165 slot = netmap_reset(na, NR_TX, txq->ift_id, 0);
1168 if (txq->ift_sds.ifsd_map == NULL)
1180 int si = netmap_idx_n2k(na->tx_rings[txq->ift_id], i);
1181 netmap_load_map(na, txq->ift_desc_tag, txq->ift_sds.ifsd_map[i], NMB(na, slot + si));
1203 #define iflib_netmap_txq_init(ctx, txq)
1501 iflib_txsd_alloc(iflib_txq_t txq)
1503 if_ctx_t ctx = txq->ift_ctx;
1512 MPASS(scctx->isc_ntxd[txq->ift_br_offset] > 0);
1529 &txq->ift_desc_tag))) {
1546 &txq->ift_tso_desc_tag))) {
1551 if (!(txq->ift_sds.ifsd_flags =
1553 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1558 if (!(txq->ift_sds.ifsd_m =
1560 scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1571 if (!(txq->ift_sds.ifsd_map =
1572 (bus_dmamap_t *) malloc(sizeof(bus_dmamap_t) * scctx->isc_ntxd[txq->ift_br_offset], M_IFLIB, M_NOWAIT | M_ZERO))) {
1578 for (int i = 0; i < scctx->isc_ntxd[txq->ift_br_offset]; i++) {
1579 err = bus_dmamap_create(txq->ift_desc_tag, 0, &txq->ift_sds.ifsd_map[i]);
1594 iflib_txsd_destroy(if_ctx_t ctx, iflib_txq_t txq, int i)
1598 if (txq->ift_sds.ifsd_map != NULL) {
1599 map = txq->ift_sds.ifsd_map[i];
1600 bus_dmamap_unload(txq->ift_desc_tag, map);
1601 bus_dmamap_destroy(txq->ift_desc_tag, map);
1602 txq->ift_sds.ifsd_map[i] = NULL;
1607 iflib_txq_destroy(iflib_txq_t txq)
1609 if_ctx_t ctx = txq->ift_ctx;
1611 for (int i = 0; i < txq->ift_size; i++)
1612 iflib_txsd_destroy(ctx, txq, i);
1614 if (txq->ift_br != NULL) {
1615 ifmp_ring_free(txq->ift_br);
1616 txq->ift_br = NULL;
1619 mtx_destroy(&txq->ift_mtx);
1621 if (txq->ift_sds.ifsd_map != NULL) {
1622 free(txq->ift_sds.ifsd_map, M_IFLIB);
1623 txq->ift_sds.ifsd_map = NULL;
1625 if (txq->ift_sds.ifsd_m != NULL) {
1626 free(txq->ift_sds.ifsd_m, M_IFLIB);
1627 txq->ift_sds.ifsd_m = NULL;
1629 if (txq->ift_sds.ifsd_flags != NULL) {
1630 free(txq->ift_sds.ifsd_flags, M_IFLIB);
1631 txq->ift_sds.ifsd_flags = NULL;
1633 if (txq->ift_desc_tag != NULL) {
1634 bus_dma_tag_destroy(txq->ift_desc_tag);
1635 txq->ift_desc_tag = NULL;
1637 if (txq->ift_tso_desc_tag != NULL) {
1638 bus_dma_tag_destroy(txq->ift_tso_desc_tag);
1639 txq->ift_tso_desc_tag = NULL;
1641 if (txq->ift_ifdi != NULL) {
1642 free(txq->ift_ifdi, M_IFLIB);
1647 iflib_txsd_free(if_ctx_t ctx, iflib_txq_t txq, int i)
1651 mp = &txq->ift_sds.ifsd_m[i];
1655 if (txq->ift_sds.ifsd_map != NULL) {
1656 bus_dmamap_sync(txq->ift_desc_tag,
1657 txq->ift_sds.ifsd_map[i],
1659 bus_dmamap_unload(txq->ift_desc_tag,
1660 txq->ift_sds.ifsd_map[i]);
1668 iflib_txq_setup(iflib_txq_t txq)
1670 if_ctx_t ctx = txq->ift_ctx;
1676 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
1678 txq->ift_update_freq = IFLIB_DEFAULT_TX_UPDATE_FREQ;
1681 txq->ift_cidx_processed = 0;
1682 txq->ift_pidx = txq->ift_cidx = txq->ift_npending = 0;
1683 txq->ift_size = scctx->isc_ntxd[txq->ift_br_offset];
1685 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
1688 IFDI_TXQ_SETUP(ctx, txq->ift_id);
1689 for (i = 0, di = txq->ift_ifdi; i < ctx->ifc_nhwtxqs; i++, di++)
2124 iflib_txq_t txq = arg;
2125 if_ctx_t ctx = txq->ift_ctx;
2135 IFDI_TIMER(ctx, txq->ift_id);
2136 if ((txq->ift_qstatus == IFLIB_QUEUE_HUNG) &&
2137 ((txq->ift_cleaned_prev == txq->ift_cleaned) ||
2141 if (txq->ift_qstatus != IFLIB_QUEUE_IDLE &&
2142 ifmp_ring_is_stalled(txq->ift_br)) {
2144 txq->ift_qstatus = IFLIB_QUEUE_HUNG;
2146 txq->ift_cleaned_prev = txq->ift_cleaned;
2148 if (txq->ift_db_pending)
2149 GROUPTASK_ENQUEUE(&txq->ift_task);
2153 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
2157 txq->ift_id, TXQ_AVAIL(txq), txq->ift_pidx);
2193 iflib_txq_t txq;
2214 for (i = 0, txq = ctx->ifc_txqs; i < sctx->isc_ntxqsets; i++, txq++) {
2215 CALLOUT_LOCK(txq);
2216 callout_stop(&txq->ift_timer);
2217 CALLOUT_UNLOCK(txq);
2218 iflib_netmap_txq_init(ctx, txq);
2250 txq = ctx->ifc_txqs;
2251 for (i = 0; i < sctx->isc_ntxqsets; i++, txq++)
2252 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq,
2253 txq->ift_timer.c_cpu);
2283 iflib_txq_t txq = ctx->ifc_txqs;
2300 for (i = 0; i < scctx->isc_ntxqsets; i++, txq++) {
2303 CALLOUT_LOCK(txq);
2304 callout_stop(&txq->ift_timer);
2305 CALLOUT_UNLOCK(txq);
2308 iflib_ifmp_purge(txq);
2310 for (j = 0; j < txq->ift_size; j++) {
2311 iflib_txsd_free(ctx, txq, j);
2313 txq->ift_processed = txq->ift_cleaned = txq->ift_cidx_processed = 0;
2314 txq->ift_in_use = txq->ift_gen = txq->ift_cidx = txq->ift_pidx = txq->ift_no_desc_avail = 0;
2315 txq->ift_closed = txq->ift_mbuf_defrag = txq->ift_mbuf_defrag_failed = 0;
2316 txq->ift_no_tx_dma_setup = txq->ift_txd_encap_efbig = txq->ift_map_failed = 0;
2317 txq->ift_pullups = 0;
2318 ifmp_ring_reset_stats(txq->ift_br);
2319 for (j = 0, di = txq->ift_ifdi; j < ctx->ifc_nhwtxqs; j++, di++)
2725 #define TXD_NOTIFY_COUNT(txq) (((txq)->ift_size / (txq)->ift_update_freq)-1)
2727 txq_max_db_deferred(iflib_txq_t txq, qidx_t in_use)
2729 qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2730 qidx_t minthresh = txq->ift_size / 8;
2741 txq_max_rs_deferred(iflib_txq_t txq)
2743 qidx_t notify_count = TXD_NOTIFY_COUNT(txq);
2744 qidx_t minthresh = txq->ift_size / 8;
2745 if (txq->ift_in_use > 4*minthresh)
2747 if (txq->ift_in_use > 2*minthresh)
2749 if (txq->ift_in_use > minthresh)
2757 #define TXQ_MAX_DB_DEFERRED(txq, in_use) txq_max_db_deferred((txq), (in_use))
2758 #define TXQ_MAX_RS_DEFERRED(txq) txq_max_rs_deferred(txq)
2773 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use)
2779 max = TXQ_MAX_DB_DEFERRED(txq, in_use);
2780 if (ring || txq->ift_db_pending >= max) {
2781 dbval = txq->ift_npending ? txq->ift_npending : txq->ift_pidx;
2782 ctx->isc_txd_flush(ctx->ifc_softc, txq->ift_id, dbval);
2783 txq->ift_db_pending = txq->ift_npending = 0;
2808 iflib_parse_header(iflib_txq_t txq, if_pkt_info_t pi, struct mbuf **mp)
2810 if_shared_ctx_t sctx = txq->ift_ctx->ifc_sctx;
2831 txq->ift_pullups++;
2866 txq->ift_pullups++;
2872 txq->ift_pullups++;
2892 txq->ift_pullups++;
2944 txq->ift_pullups++;
3008 iflib_remove_mbuf(iflib_txq_t txq)
3013 pidx = txq->ift_pidx;
3014 ifsd_m = txq->ift_sds.ifsd_m;
3015 ntxd = txq->ift_size;
3019 txq->ift_dequeued++;
3026 txq->ift_dequeued++;
3035 iflib_busdma_load_mbuf_sg(iflib_txq_t txq, bus_dma_tag_t tag, bus_dmamap_t map,
3053 ctx = txq->ift_ctx;
3056 ifsd_m = txq->ift_sds.ifsd_m;
3057 ntxd = txq->ift_size;
3058 pidx = txq->ift_pidx;
3060 uint8_t *ifsd_flags = txq->ift_sds.ifsd_flags;
3124 txq->ift_enqueued++;
3148 *m0 = iflib_remove_mbuf(txq);
3153 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3159 ntxd = txq->ift_size;
3160 size = txq->ift_txd_size[qid];
3161 start = txq->ift_ifdi[qid].idi_vaddr;
3214 iflib_encap(iflib_txq_t txq, struct mbuf **m_headp)
3228 segs = txq->ift_segs;
3229 ctx = txq->ift_ctx;
3232 segs = txq->ift_segs;
3233 ntxd = txq->ift_size;
3240 cidx = txq->ift_cidx;
3241 pidx = txq->ift_pidx;
3245 next_txd = calc_next_txd(txq, cidx, 0);
3250 prefetch(&txq->ift_sds.ifsd_m[next]);
3251 if (txq->ift_sds.ifsd_map != NULL) {
3252 prefetch(&txq->ift_sds.ifsd_map[next]);
3254 prefetch(&txq->ift_sds.ifsd_flags[next]);
3256 } else if (txq->ift_sds.ifsd_map != NULL)
3257 map = txq->ift_sds.ifsd_map[pidx];
3260 desc_tag = txq->ift_tso_desc_tag;
3263 desc_tag = txq->ift_desc_tag;
3277 pi.ipi_qsidx = txq->ift_id;
3284 if (__predict_false((err = iflib_parse_header(txq, &pi, m_headp)) != 0))
3290 err = iflib_busdma_load_mbuf_sg(txq, desc_tag, map, m_headp, segs, &nsegs, max_segs, BUS_DMA_NOWAIT);
3312 txq->ift_mbuf_defrag++;
3317 txq->ift_no_tx_dma_setup++;
3320 txq->ift_no_tx_dma_setup++;
3326 txq->ift_map_failed++;
3336 if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
3337 txq->ift_no_desc_avail++;
3341 if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
3342 GROUPTASK_ENQUEUE(&txq->ift_task);
3351 txq->ift_rs_pending += nsegs + 1;
3352 if (txq->ift_rs_pending > TXQ_MAX_RS_DEFERRED(txq) ||
3353 iflib_no_tx_batch || (TXQ_AVAIL(txq) - nsegs) <= MAX_TX_DESC(ctx) + 2) {
3355 txq->ift_rs_pending = 0;
3361 MPASS(pidx >= 0 && pidx < txq->ift_size);
3369 bus_dmamap_sync(txq->ift_ifdi->idi_tag, txq->ift_ifdi->idi_map,
3372 MPASS(pi.ipi_new_pidx < txq->ift_size);
3376 ndesc += txq->ift_size;
3377 txq->ift_gen = 1;
3386 txq->ift_in_use += ndesc;
3392 txq->ift_pidx = pi.ipi_new_pidx;
3393 txq->ift_npending += pi.ipi_ndescs;
3395 *m_headp = m_head = iflib_remove_mbuf(txq);
3397 txq->ift_txd_encap_efbig++;
3409 txq->ift_mbuf_defrag_failed++;
3410 txq->ift_map_failed++;
3418 iflib_tx_desc_free(iflib_txq_t txq, int n)
3427 cidx = txq->ift_cidx;
3428 gen = txq->ift_gen;
3429 qsize = txq->ift_size;
3431 hasmap = txq->ift_sds.ifsd_map != NULL;
3432 ifsd_flags = txq->ift_sds.ifsd_flags;
3433 ifsd_m = txq->ift_sds.ifsd_m;
3434 ifsd_map = txq->ift_sds.ifsd_map;
3435 do_prefetch = (txq->ift_ctx->ifc_flags & IFC_PREFETCH);
3450 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
3467 txq->ift_dequeued++;
3477 txq->ift_cidx = cidx;
3478 txq->ift_gen = gen;
3482 iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh)
3485 if_ctx_t ctx = txq->ift_ctx;
3488 MPASS(thresh /*+ MAX_TX_DESC(txq->ift_ctx) */ < txq->ift_size);
3493 iflib_tx_credits_update(ctx, txq);
3494 reclaim = DESC_RECLAIMABLE(txq);
3496 if (reclaim <= thresh /* + MAX_TX_DESC(txq->ift_ctx) */) {
3500 txq->ift_processed, txq->ift_cleaned, txq->ift_ctx->ifc_softc_ctx.isc_tx_nsegments,
3507 iflib_tx_desc_free(txq, reclaim);
3508 txq->ift_cleaned += reclaim;
3509 txq->ift_in_use -= reclaim;
3535 iflib_txq_check_drain(iflib_txq_t txq, int budget)
3538 ifmp_ring_check_drainage(txq->ift_br, budget);
3544 iflib_txq_t txq = r->cookie;
3545 if_ctx_t ctx = txq->ift_ctx;
3547 return ((TXQ_AVAIL(txq) > MAX_TX_DESC(ctx) + 2) ||
3548 ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false));
3554 iflib_txq_t txq = r->cookie;
3555 if_ctx_t ctx = txq->ift_ctx;
3567 reclaimed = iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
3568 rang = iflib_txd_db_check(ctx, txq, reclaimed, txq->ift_in_use);
3580 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3581 CALLOUT_LOCK(txq);
3582 callout_stop(&txq->ift_timer);
3583 CALLOUT_UNLOCK(txq);
3588 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3594 avail, ctx->ifc_flags, TXQ_AVAIL(txq));
3597 avail = TXQ_AVAIL(txq);
3603 if (__predict_false(*mp == (struct mbuf *)txq)) {
3608 in_use_prev = txq->ift_in_use;
3609 pidx_prev = txq->ift_pidx;
3610 err = iflib_encap(txq, mp);
3627 avail = TXQ_AVAIL(txq);
3629 txq->ift_db_pending += (txq->ift_in_use - in_use_prev);
3630 desc_used += (txq->ift_in_use - in_use_prev);
3634 rang = iflib_txd_db_check(ctx, txq, false, in_use_prev);
3638 ring = rang ? false : (iflib_min_tx_latency | err) || (TXQ_AVAIL(txq) < MAX_TX_DESC(ctx));
3639 iflib_txd_db_check(ctx, txq, ring, txq->ift_in_use);
3662 iflib_txq_t txq;
3664 txq = r->cookie;
3666 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
3667 CALLOUT_LOCK(txq);
3668 callout_stop(&txq->ift_timer);
3669 CALLOUT_UNLOCK(txq);
3674 if (__predict_false(*mp == (struct mbuf *)txq))
3683 iflib_ifmp_purge(iflib_txq_t txq)
3687 r = txq->ift_br;
3700 iflib_txq_t txq = context;
3701 if_ctx_t ctx = txq->ift_ctx;
3706 txq->ift_cpu_exec_count[curcpu]++;
3711 if (ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, false))
3712 netmap_tx_irq(ifp, txq->ift_id);
3713 IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3716 if (txq->ift_db_pending)
3717 ifmp_ring_enqueue(txq->ift_br, (void **)&txq, 1, TX_BATCH_SIZE);
3718 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3722 rc = IFDI_TX_QUEUE_INTR_ENABLE(ctx, txq->ift_id);
3781 iflib_txq_t txq;
3801 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++) {
3802 CALLOUT_LOCK(txq);
3803 callout_stop(&txq->ift_timer);
3804 CALLOUT_UNLOCK(txq);
3811 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3812 callout_reset_on(&txq->ift_timer, hz/2, iflib_timer, txq, txq->ift_timer.c_cpu);
3820 for (txq = ctx->ifc_txqs, i = 0; i < sctx->isc_ntxqsets; i++, txq++)
3821 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
3885 iflib_txq_t txq;
3901 txq = &ctx->ifc_txqs[qidx];
3904 if (txq->ift_closed) {
3938 err = ifmp_ring_enqueue(txq->ift_br, (void **)&m, 1, TX_BATCH_SIZE);
3940 GROUPTASK_ENQUEUE(&txq->ift_task);
3944 txq->ift_closed = TRUE;
3946 ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
3957 iflib_txq_t txq = ctx->ifc_txqs;
3963 for (i = 0; i < NTXQSETS(ctx); i++, txq++)
3964 while (!(ifmp_ring_is_idle(txq->ift_br) || ifmp_ring_is_stalled(txq->ift_br)))
3965 iflib_txq_check_drain(txq, 0);
4538 iflib_txq_t txq;
4575 for (txq = ctx->ifc_txqs, i = 0; i < NTXQSETS(ctx); i++, txq++) {
4576 callout_drain(&txq->ift_timer);
4577 if (txq->ift_task.gt_uniq != NULL)
4578 taskqgroup_detach(tqg, &txq->ift_task);
4668 iflib_txq_t txq = ctx->ifc_txqs;
4674 for (int i = 0; i < NTXQSETS(ctx); i++, txq++)
4675 iflib_txq_check_drain(txq, IFLIB_RESTART_BUDGET);
4861 iflib_txq_t txq;
4895 txq = ctx->ifc_txqs;
4901 for (txconf = i = 0, cpu = CPU_FIRST(); i < ntxqsets; i++, txconf++, txq++, cpu = CPU_NEXT(cpu)) {
4909 txq->ift_ifdi = ifdip;
4916 txq->ift_txd_size[j] = scctx->isc_txd_size[j];
4919 txq->ift_ctx = ctx;
4920 txq->ift_id = i;
4922 txq->ift_br_offset = 1;
4924 txq->ift_br_offset = 0;
4927 txq->ift_timer.c_cpu = cpu;
4929 if (iflib_txsd_alloc(txq)) {
4936 snprintf(txq->ift_mtx_name, MTX_NAME_LEN, "%s:tx(%d):callout",
4937 device_get_nameunit(dev), txq->ift_id);
4938 mtx_init(&txq->ift_mtx, txq->ift_mtx_name, NULL, MTX_DEF);
4939 callout_init_mtx(&txq->ift_timer, &txq->ift_mtx, 0);
4941 snprintf(txq->ift_db_mtx_name, MTX_NAME_LEN, "%s:tx(%d):db",
4942 device_get_nameunit(dev), txq->ift_id);
4944 err = ifmp_ring_alloc(&txq->ift_br, 2048, txq, iflib_txq_drain,
5069 iflib_txq_t txq = ctx->ifc_txqs;
5072 for (i = 0; i < NTXQSETS(ctx); i++, txq++)
5073 iflib_txq_setup(txq);
5081 iflib_txq_t txq = ctx->ifc_txqs;
5084 for (i = 0; i < NTXQSETS(ctx); i++, txq++) {
5086 iflib_dma_free(&txq->ift_ifdi[j]);
5087 iflib_txq_destroy(txq);
5475 iflib_txq_t txq = ctx->ifc_txqs;
5504 GROUPTASK_INIT(&txq->ift_task, 0, _task_fn_tx, txq);
5505 taskqgroup_attach(qgroup_if_io_tqg, &txq->ift_task, txq, rman_get_start(irq->ii_res), "tx");
5578 iflib_txq_t txq = ctx->ifc_txqs;
5588 for (int i = 0; i < ctx->ifc_softc_ctx.isc_ntxqsets; i++, txq++)
5589 txq->ift_qstatus = IFLIB_QUEUE_IDLE;
5596 iflib_tx_credits_update(if_ctx_t ctx, iflib_txq_t txq)
5600 int credits_pre = txq->ift_cidx_processed;
5606 if ((credits = ctx->isc_txd_credits_update(ctx->ifc_softc, txq->ift_id, true)) == 0)
5609 txq->ift_processed += credits;
5610 txq->ift_cidx_processed += credits;
5612 MPASS(credits_pre + credits == txq->ift_cidx_processed);
5613 if (txq->ift_cidx_processed >= txq->ift_size)
5614 txq->ift_cidx_processed -= txq->ift_size;
5920 "permit #txq != #rxq");
5948 iflib_txq_t txq;
5961 qfmt = "txq%03d";
5963 qfmt = "txq%02d";
5965 qfmt = "txq%d";
5966 for (i = 0, txq = ctx->ifc_txqs; i < scctx->isc_ntxqsets; i++, txq++) {
5974 &txq->ift_dequeued, "total mbufs freed");
5977 &txq->ift_enqueued, "total mbufs enqueued");
5981 &txq->ift_mbuf_defrag, "# of times m_defrag was called");
5984 &txq->ift_pullups, "# of times m_pullup was called");
5987 &txq->ift_mbuf_defrag_failed, "# of times m_defrag failed");
5990 &txq->ift_no_desc_avail, "# of times no descriptors were available");
5993 &txq->ift_map_failed, "# of times dma map failed");
5996 &txq->ift_txd_encap_efbig, "# of times txd_encap returned EFBIG");
5999 &txq->ift_no_tx_dma_setup, "# of times map failed for other than EFBIG");
6002 &txq->ift_pidx, 1, "Producer Index");
6005 &txq->ift_cidx, 1, "Consumer Index");
6008 &txq->ift_cidx_processed, 1, "Consumer Index seen by credit update");
6011 &txq->ift_in_use, 1, "descriptors in use");
6014 &txq->ift_processed, "descriptors procesed for clean");
6017 &txq->ift_cleaned, "total cleaned");
6019 CTLTYPE_STRING | CTLFLAG_RD, __DEVOLATILE(uint64_t *, &txq->ift_br->state),
6022 CTLFLAG_RD, &txq->ift_br->enqueues,
6025 CTLFLAG_RD, &txq->ift_br->drops,
6028 CTLFLAG_RD, &txq->ift_br->starts,
6031 CTLFLAG_RD, &txq->ift_br->stalls,
6034 CTLFLAG_RD, &txq->ift_br->restarts,
6037 CTLFLAG_RD, &txq->ift_br->abdications,