Lines Matching defs:cidx

422 get_inuse(int size, qidx_t cidx, qidx_t pidx, uint8_t gen)
426 if (pidx > cidx)
427 used = pidx - cidx;
428 else if (pidx < cidx)
429 used = size - cidx + pidx;
430 else if (gen == 0 && pidx == cidx)
432 else if (gen == 1 && pidx == cidx)
447 * these are the cq cidx and pidx. Otherwise
727 static int iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget);
1405 int i, cidx;
1426 cidx = rxq->ifr_cq_cidx;
1428 cidx = rxq->ifr_fl[0].ifl_cidx;
1429 if (iflib_rxd_avail(ctx, rxq, cidx, 1))
1969 /* we avoid allowing pidx to catch up with cidx as it confuses ixl */
2327 /* also resets the free lists pidx/cidx */
2334 calc_next_rxd(iflib_fl_t fl, int cidx)
2346 cur = start + size*cidx;
2353 prefetch_pkts(iflib_fl_t fl, int cidx)
2360 nextptr = (cidx + CACHE_PTR_INCREMENT) & (nrxd-1);
2363 next_rxd = calc_next_rxd(fl, cidx);
2365 prefetch(fl->ifl_sds.ifsd_m[(cidx + 1) & (nrxd-1)]);
2366 prefetch(fl->ifl_sds.ifsd_m[(cidx + 2) & (nrxd-1)]);
2367 prefetch(fl->ifl_sds.ifsd_m[(cidx + 3) & (nrxd-1)]);
2368 prefetch(fl->ifl_sds.ifsd_m[(cidx + 4) & (nrxd-1)]);
2369 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 1) & (nrxd-1)]);
2370 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 2) & (nrxd-1)]);
2371 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 3) & (nrxd-1)]);
2372 prefetch(fl->ifl_sds.ifsd_cl[(cidx + 4) & (nrxd-1)]);
2378 int flid, cidx;
2386 cidx = irf->irf_idx;
2389 sd->ifsd_cidx = cidx;
2390 sd->ifsd_m = &fl->ifl_sds.ifsd_m[cidx];
2391 sd->ifsd_cl = &fl->ifl_sds.ifsd_cl[cidx];
2397 prefetch_pkts(fl, cidx);
2399 next = (cidx + CACHE_PTR_INCREMENT) & (fl->ifl_size-1);
2401 map = fl->ifl_sds.ifsd_map[cidx];
2403 next = (cidx + CACHE_LINE_SIZE) & (fl->ifl_size-1);
2409 MPASS(fl->ifl_cidx == cidx);
2419 bit_clear(fl->ifl_rx_bitmap, cidx);
2636 /* will advance the cidx on the corresponding free lists */
3153 calc_next_txd(iflib_txq_t txq, int cidx, uint8_t qid)
3165 cur = start + size*cidx;
3225 int err, nsegs, ndesc, max_segs, pidx, cidx, next, ntxd;
3240 cidx = txq->ift_cidx;
3243 next = (cidx + CACHE_PTR_INCREMENT) & (ntxd-1);
3245 next_txd = calc_next_txd(txq, cidx, 0);
3253 next = (cidx + CACHE_LINE_SIZE) & (ntxd-1);
3421 uint32_t qsize, cidx, mask, gen;
3427 cidx = txq->ift_cidx;
3439 prefetch(ifsd_m[(cidx + 3) & mask]);
3440 prefetch(ifsd_m[(cidx + 4) & mask]);
3442 if (ifsd_m[cidx] != NULL) {
3443 prefetch(&ifsd_m[(cidx + CACHE_PTR_INCREMENT) & mask]);
3444 prefetch(&ifsd_flags[(cidx + CACHE_PTR_INCREMENT) & mask]);
3445 if (hasmap && (ifsd_flags[cidx] & TX_SW_DESC_MAPPED)) {
3450 bus_dmamap_unload(txq->ift_desc_tag, ifsd_map[cidx]);
3451 ifsd_flags[cidx] &= ~TX_SW_DESC_MAPPED;
3453 if ((m = ifsd_m[cidx]) != NULL) {
3465 ifsd_m[cidx] = NULL;
3472 if (__predict_false(++cidx == qsize)) {
3473 cidx = 0;
3477 txq->ift_cidx = cidx;
3515 _ring_peek_one(struct ifmp_ring *r, int cidx, int offset, int remaining)
3521 next = (cidx + CACHE_PTR_INCREMENT) & (size-1);
3524 prefetch(items[(cidx + offset) & (size-1)]);
3527 prefetch2cachelines(items[(cidx + offset + 1) & (size-1)]);
3528 prefetch2cachelines(items[(cidx + offset + 2) & (size-1)]);
3529 prefetch2cachelines(items[(cidx + offset + 3) & (size-1)]);
3531 return (__DEVOLATILE(struct mbuf **, &r->items[(cidx + offset) & (size-1)]));
3552 iflib_txq_drain(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3569 avail = IDXDIFF(pidx, cidx, r->size);
3573 m_free(r->items[(cidx + i) & (r->size-1)]);
3574 r->items[(cidx + i) & (r->size-1)] = NULL;
3601 mp = _ring_peek_one(r, cidx, i, rem);
3658 iflib_txq_drain_free(struct ifmp_ring *r, uint32_t cidx, uint32_t pidx)
3671 avail = IDXDIFF(pidx, cidx, r->size);
3673 mp = _ring_peek_one(r, cidx, i, avail - i);
5619 iflib_rxd_avail(if_ctx_t ctx, iflib_rxq_t rxq, qidx_t cidx, qidx_t budget)
5622 return (ctx->isc_rxd_available(ctx->ifc_softc, rxq->ifr_id, cidx,
5833 sbuf_printf(sb, "pidx_head: %04hd pidx_tail: %04hd cidx: %04hd state: %s",
6069 SYSCTL_ADD_U16(ctx_list, fl_list, OID_AUTO, "cidx",