Lines Matching defs:ring

324 /* this should really scale with ring size - this is a fairly arbitrary value */
553 * Only allow a single packet to take up most 1/nth of the tx ring
892 struct netmap_ring *ring = kring->ring;
951 struct netmap_slot *slot = &ring->slot[nm_i];
1001 * Reconcile kernel and user view of the transmit ring.
1019 struct netmap_ring *ring = kring->ring;
1021 u_int nic_i; /* index into the NIC ring */
1030 * them every half ring, or where NS_REPORT is set
1043 * nic_i is the corresponding index in the NIC ring.
1046 * iterate over the netmap ring, fetch length and update
1047 * the corresponding slot in the NIC ring. Some drivers also
1056 * but only a few times per ring or when NS_REPORT is set.
1071 __builtin_prefetch(&ring->slot[nm_i]);
1076 struct netmap_slot *slot = &ring->slot[nm_i];
1106 /* Prepare the NIC TX ring. */
1120 __builtin_prefetch(&ring->slot[nm_i + 1]);
1142 /* synchronize the NIC ring */
1180 * Reconcile kernel and user view of the receive ring.
1196 struct netmap_ring *ring = kring->ring;
1198 uint32_t nm_i; /* index into the netmap ring */
1199 uint32_t nic_i; /* index into the NIC ring */
1224 * nm_i is the index of the next free slot in the netmap ring,
1225 * nic_i is the index of the next received packet in the NIC ring
1228 * in netmap mode. For the receive ring we have
1235 * fl->ifl_cidx is set to 0 on a ring reinit
1267 ring->slot[nm_i].len = 0;
1268 ring->slot[nm_i].flags = 0;
1270 ring->slot[nm_i].len = ri.iri_frags[i].irf_len;
1272 ring->slot[nm_i].len -= crclen;
1273 ring->slot[nm_i].flags = 0;
1279 ring->slot[nm_i].flags = NS_MOREFRAG;
1309 * As usual nm_i is the index in the netmap ring,
1310 * nic_i is the index in the NIC ring, and
1372 * the physical buffer address in the NIC ring.
2229 * Intel NICs have (per receive ring) RDH and RDT registers, where
2374 * Free receive ring data structures
3197 iflib_txd_db_check(iflib_txq_t txq, int ring)
3204 /* force || threshold exceeded || at the edge of the ring */
3205 if (ring || (txq->ift_db_pending >= max) || (TXQ_AVAIL(txq) <= MAX_TX_DESC(ctx) + 2)) {
3853 bool do_prefetch, rang, ring;
3939 ring = rang ? false : (iflib_min_tx_latency | err);
3940 iflib_txd_db_check(txq, ring);
4278 * ring. Technically, when using ALTQ, queueing to an intermediate mp_ring
6032 /* Allocate the TX ring struct memory */
6036 device_printf(dev, "Unable to allocate TX ring memory\n");
6045 device_printf(dev, "Unable to allocate RX ring memory\n");
6155 /* Allocate receive buffers for the ring */
7101 mp_ring_state_handler, "A", "soft ring state");