Searched refs:ring (Results 1 - 25 of 213) sorted by last modified time

123456789

/freebsd-11-stable/sys/dev/oce/
H A Doce_if.c708 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
711 eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe);
715 bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map,
717 RING_GET(eq->ring, 1);
1136 if (num_wqes >= RING_NUM_FREE(wq->ring)) {
1147 RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe);
1180 RING_PUT(wq->ring, 1);
1181 atomic_add_int(&wq->ring
[all...]
H A Doce_if.h293 #define RING_NUM_PENDING(ring) ring->num_used
294 #define RING_FULL(ring) (ring->num_used == ring->num_items)
295 #define RING_EMPTY(ring) (ring->num_used == 0)
296 #define RING_NUM_FREE(ring) \
297 (uint32_t)(ring->num_items - ring
605 oce_ring_buffer_t *ring; member in struct:oce_eq
639 oce_ring_buffer_t *ring; member in struct:oce_cq
655 oce_ring_buffer_t *ring; member in struct:oce_mq
697 oce_ring_buffer_t *ring; member in struct:oce_wq
751 oce_ring_buffer_t *ring; member in struct:oce_rq
[all...]
/freebsd-11-stable/sys/dev/netmap/
H A Dnetmap.c68 a system call on the same netmap ring. The OS does not enforce
76 - a spinlock on each ring, to handle producer/consumer races on
78 threads writing from the host stack to the same ring),
84 instance of *_*xsync() on the ring at any time.
87 lock on the ring is not actually used.
122 On the rx ring, the per-port lock is grabbed initially to reserve
123 a number of slot in the ring, then the lock is released,
125 the lock is acquired again and the receive ring is updated.
126 (A similar thing is done on the tx ring for NIC and host stack
159 * In this phase, the sync callbacks for each ring ar
1318 struct netmap_ring *ring = kring->ring; local
1633 nm_txsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) argument
1697 nm_rxsync_prologue(struct netmap_kring *kring, struct netmap_ring *ring) argument
1761 struct netmap_ring *ring = kring->ring; local
2392 ring_timestamp_set(struct netmap_ring *ring) argument
2871 struct netmap_ring *ring = kring->ring; local
3240 struct netmap_ring *ring; local
[all...]
H A Dif_ixl_netmap.h145 * Reconcile kernel and user view of the transmit ring.
163 struct netmap_ring *ring = kring->ring; local
164 u_int nm_i; /* index into the netmap ring */
165 u_int nic_i; /* index into the NIC ring */
171 * them every half ring, or where NS_REPORT is set
185 * nm_i is the current index in the netmap ring,
186 * nic_i is the corresponding index in the NIC ring.
189 * iterate over the netmap ring, fetch length and update
190 * the corresponding slot in the NIC ring
297 struct netmap_ring *ring = kring->ring; local
[all...]
H A Dnetmap_kern.h369 * private, kernel view of a ring. Keeps track of the status of
370 * a ring across system calls.
373 * It corresponds to ring->head
379 * ring->head, hwtail is advanced on incoming packets,
380 * and a wakeup is generated when hwtail passes ring->cur
410 * Concurrent rxsync or txsync on the same ring are prevented through
419 * and receiver. They are protected through the q_lock on the RX ring.
422 struct netmap_ring *ring; member in struct:netmap_kring
429 * at the ring (which could be modified). These are set in the
439 #define NKR_FORWARD 0x4 /* (host ring onl
2266 struct netmap_ring *ring = kring->ring; local
[all...]
H A Dnetmap_mem2.c644 DECLARE_SYSCTLS(NETMAP_RING_POOL, ring);
1744 * On the other end, the pipe ring dimension may be different from
1745 * the parent port ring dimension. As a compromise, we allocate twice the
1754 nm_prinf("req if %d*%d ring %d*%d buf %d*%d",
1861 struct netmap_ring *ring = kring->ring; local
1863 if (ring == NULL || kring->users > 0 || (kring->nr_kflags & NKR_NEEDRING)) {
1865 nm_prinf("NOT deleting ring %s (ring %p, users %d neekring %d)",
1866 kring->name, ring, krin
1900 struct netmap_ring *ring = kring->ring; local
[all...]
/freebsd-11-stable/sys/dev/ena/
H A Dena.c387 ena_init_io_rings_common(struct ena_adapter *adapter, struct ena_ring *ring, argument
391 ring->qid = qid;
392 ring->adapter = adapter;
393 ring->ena_dev = adapter->ena_dev;
394 ring->first_interrupt = false;
395 ring->no_interrupt_event_cnt = 0;
412 /* TX/RX common ring state */
416 /* TX specific ring state */
444 /* Allocate a buf ring */
745 /* Flush buffer ring, */
1448 struct ena_ring *ring; local
[all...]
H A Dena_netmap.c47 struct ena_ring *ring; member in struct:ena_netmap_ctx
114 struct netmap_ring *ring; local
134 ena_trace(NULL, ENA_NETMAP, "No free slots in netmap ring\n");
138 ring = kring->ring;
139 if (ring == NULL) {
140 device_printf(adapter->pdev, "Rx ring %d is NULL\n", qid);
143 slot = &ring->slot[nm_i];
159 rx_info->ena_buf.len = ring->nr_buf_size;
195 "netmap kernel ring
[all...]
/freebsd-11-stable/sys/dev/mlx4/mlx4_en/
H A Dmlx4_en_netdev.c66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring];
1128 int ring, err; local
1133 for (ring = 0; ring < priv->rx_ring_num; ring++) {
1135 rx_packets = priv->rx_ring[ring]->packets;
1136 rx_bytes = priv->rx_ring[ring]->bytes;
1140 priv->last_moder_packets[ring]));
1144 priv->last_moder_bytes[ring])) / packets : 0;
1163 if (moder_time != priv->last_moder_time[ring]) {
1572 struct mlx4_en_tx_ring *ring; local
[all...]
H A Dmlx4_en_tx.c60 struct mlx4_en_tx_ring *ring; local
65 ring = kzalloc_node(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL, node);
66 if (!ring) {
67 ring = kzalloc(sizeof(struct mlx4_en_tx_ring), GFP_KERNEL);
68 if (!ring) {
69 en_err(priv, "Failed allocating TX ring\n");
87 &ring->dma_tag)))
90 ring->size = size;
91 ring->size_mask = size - 1;
92 ring
194 struct mlx4_en_tx_ring *ring = *pring; local
215 mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, int cq, int user_prio) argument
243 mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring) argument
271 mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index, u8 owner) argument
290 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, struct mlx4_en_tx_ring *ring, u32 index) argument
311 mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring) argument
340 mlx4_en_tx_ring_is_full(struct mlx4_en_tx_ring *ring) argument
352 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; local
427 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; local
440 struct mlx4_en_tx_ring *ring = priv->tx_ring[cq->ring]; local
466 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; local
485 mlx4_en_get_inline_hdr_size(struct mlx4_en_tx_ring *ring, struct mbuf *mb) argument
642 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; local
933 struct mlx4_en_tx_ring *ring = priv->tx_ring[tx_ind]; local
958 struct mlx4_en_tx_ring *ring; local
[all...]
H A Dmlx4_en_rx.c49 struct mlx4_en_rx_ring *ring,
53 ((struct mlx4_en_rx_desc *)ring->buf) + index;
61 * If the number of used fragments does not fill up the ring
74 mlx4_en_alloc_mbuf(struct mlx4_en_rx_ring *ring) argument
79 mb = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, ring->rx_mb_size);
81 mb->m_pkthdr.len = mb->m_len = ring->rx_mb_size;
92 if (mb_head->m_pkthdr.len >= ring->rx_mb_size)
110 mlx4_en_alloc_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, argument
123 if (unlikely(ring->spare.mbuf == NULL)) {
124 mb = mlx4_en_alloc_mbuf(ring);
48 mlx4_en_init_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) argument
223 mlx4_en_free_buf(struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_mbuf *mb_list) argument
233 mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, int index) argument
250 mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring) argument
257 struct mlx4_en_rx_ring *ring; local
303 mlx4_en_free_rx_buf(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) argument
370 struct mlx4_en_rx_ring *ring; local
465 struct mlx4_en_rx_ring *ring; local
550 struct mlx4_en_rx_ring *ring = *pring; local
574 mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring) argument
623 mlx4_en_rx_mb(struct mlx4_en_priv *priv, struct mlx4_en_rx_ring *ring, struct mlx4_en_rx_desc *rx_desc, struct mlx4_en_rx_mbuf *mb_list, int length) argument
742 struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring]; local
916 mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, struct mlx4_en_rx_ring *ring, enum mlx4_qp_state *state, struct mlx4_qp *qp) argument
[all...]
H A Dmlx4_en_port.c165 const struct mlx4_en_rx_ring *ring = priv->rx_ring[i]; local
167 packets += READ_ONCE(ring->packets);
168 bytes += READ_ONCE(ring->bytes);
176 const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; local
178 packets += READ_ONCE(ring->packets);
179 bytes += READ_ONCE(ring->bytes);
225 const struct mlx4_en_tx_ring *ring; local
226 ring = priv->tx_ring[i];
228 priv->port_stats.tx_chksum_offload += ring->tx_csum;
229 priv->port_stats.queue_stopped += ring
463 const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; local
[all...]
/freebsd-11-stable/sys/dev/xen/evtchn/
H A Devtchn_dev.c82 /* Notification ring, accessed via /dev/xen/evtchn. */
85 evtchn_port_t *ring; member in struct:per_user_data
175 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
176 wmb(); /* Ensure ring contents visible */
194 u->ring = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK | M_ZERO);
210 free(u->ring, M_EVTCHN);
237 free(u->ring, M_EVTCHN);
284 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
305 if (uiomove(&u->ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
306 ((bytes2 != 0) && uiomove(&u->ring[
[all...]
/freebsd-11-stable/sys/dev/bnxt/
H A Dif_bnxt.c378 /* Set up the completion ring */
380 softc->tx_cp_rings[i].ring.phys_id =
382 softc->tx_cp_rings[i].ring.softc = softc;
383 softc->tx_cp_rings[i].ring.id =
385 softc->tx_cp_rings[i].ring.doorbell =
386 softc->tx_cp_rings[i].ring.id * 0x80;
387 softc->tx_cp_rings[i].ring.ring_size =
389 softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs];
390 softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs];
392 /* Set up the TX ring */
[all...]
H A Dbnxt.h117 #define NEXT_CP_CONS_V(ring, cons, v_bit) do { \
118 if (__predict_false(++(cons) == (ring)->ring_size)) \
122 #define RING_NEXT(ring, idx) (__predict_false(idx + 1 == (ring)->ring_size) ? \
126 __builtin_prefetch(&((struct cmpl_base *)(cpr)->ring.vaddr)[((idx) +\
128 ((cpr)->ring.ring_size - 1)])
132 * the completion ring has space before the RX/TX ring does. Since we can't
136 * for a single ring group.
141 #define BNXT_CP_DISABLE_DB(ring) d
476 struct bnxt_ring ring; member in struct:bnxt_cp_ring
[all...]
/freebsd-11-stable/sys/net/
H A Diflib.c314 /* this should really scale with ring size - this is a fairly arbitrary value */
541 * Only allow a single packet to take up most 1/nth of the tx ring
824 struct netmap_ring *ring = kring->ring; local
837 * IMPORTANT: we must leave one free slot in the ring,
843 struct netmap_slot *slot = &ring->slot[nm_i];
893 * Reconcile kernel and user view of the transmit ring.
911 struct netmap_ring *ring = kring->ring; local
912 u_int nm_i; /* index into the netmap ring */
1047 struct netmap_ring *ring = kring->ring; local
2773 iflib_txd_db_check(if_ctx_t ctx, iflib_txq_t txq, int ring, qidx_t in_use) argument
3560 bool do_prefetch, ring, rang; local
[all...]
/freebsd-11-stable/contrib/llvm-project/llvm/tools/llvm-readobj/
H A DARMEHABIPrinter.h37 static ArrayRef<RingEntry> ring();
70 inline ArrayRef<OpcodeDecoder::RingEntry> OpcodeDecoder::ring() { function in class:llvm::ARM::EHABI::OpcodeDecoder
311 for (const auto &RE : ring()) {
/freebsd-11-stable/sys/dev/vxge/vxgehal/
H A Dvxgehal-virtualpath.c7041 * interrupts(Can be repeated). If fifo or ring are not enabled
7555 __hal_ring_t *ring; local
7570 ring = (__hal_ring_t *) vpath->ringh;
7571 if (ring == NULL) {
7577 ring->cmpl_cnt = 0;
7578 ring->channel.poll_bytes = 0;
7583 if (ring->callback(vpath_handle, first_rxdh, rxd_priv,
7584 t_code, ring->channel.userdata) != VXGE_HAL_OK) {
7987 if (vp_config->ring.enable == VXGE_HAL_RING_DISABLE) {
7997 if (vp_config->ring
[all...]
/freebsd-11-stable/sys/dev/drm2/radeon/
H A Devergreen.c58 int ring, u32 cp_int_cntl);
1605 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
1609 radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
1610 radeon_ring_write(ring, 1);
1612 if (ring->rptr_save_reg) {
1613 next_rptr = ring->wptr + 3 + 4;
1614 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
1615 radeon_ring_write(ring, ((rin
1672 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
1738 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
2422 evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
3391 struct radeon_ring *ring = &rdev->ring[fence->ring]; local
3417 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
3461 struct radeon_ring *ring = &rdev->ring[ring_index]; local
3517 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
[all...]
/freebsd-11-stable/sys/dev/ioat/
H A Dioat.c123 0, "Set IOAT ring order. (1 << this) == ring size.");
359 if (ioat->ring != NULL)
360 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
446 struct ioat_descriptor *ring; local
524 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
527 ring = ioat->ring;
529 memset(&ring[
1397 ioat_free_ring(struct ioat_softc *ioat, uint32_t size, struct ioat_descriptor *ring) argument
[all...]
/freebsd-11-stable/usr.sbin/inetd/
H A Dbuiltins.c73 static char ring[128]; variable
114 endring = ring;
138 rs = ring;
152 memmove(text + len, ring, LINESIZ - len);
155 rs = ring;
176 for (rs = ring;;) {
181 memmove(text + len, ring, LINESIZ - len);
184 rs = ring;
/freebsd-11-stable/sys/dev/cxgbe/
H A Dt4_netmap.c544 * Write work requests to send 'npkt' frames and ring the doorbell to send them
551 struct netmap_ring *ring = kring->ring; local
574 slot = &ring->slot[kring->nr_hwcur];
762 struct netmap_ring *ring = kring->ring; local
785 struct netmap_slot *slot = &ring->slot[fl_pidx];
811 slot = &ring->slot[0];
910 struct netmap_ring *ring = kring->ring; local
[all...]
/freebsd-11-stable/sys/dev/random/
H A Drandom_harvestq.c103 * Lockless ring buffer holding entropy events
104 * If ring.in == ring.out,
106 * If ring.in != ring.out,
108 * If (ring.in + 1) == ring.out (mod RANDOM_RING_MAX),
111 * NOTE: ring.in points to the last added element,
112 * and ring.out points to the last consumed element.
114 * The ring
123 struct harvest_event ring[RANDOM_RING_MAX]; member in struct:harvest_context::entropy_ring
[all...]
/freebsd-11-stable/sys/dev/hwpmc/
H A Dhwpmc_mod.c4100 pmc_process_interrupt(int cpu, int ring, struct pmc *pm, struct trapframe *tf, argument
4113 psb = pmc_pcpu[cpu]->pc_sb[ring];
4172 /* increment write pointer, modulo ring buffer size */
4194 pmc_capture_user_callchain(int cpu, int ring, struct trapframe *tf) argument
4205 psb = pmc_pcpu[cpu]->pc_sb[ring];
4260 /* increment the pointer, modulo sample ring size */
4284 pmc_process_samples(int cpu, int ring) argument
4298 psb = pmc_pcpu[cpu]->pc_sb[ring];
/freebsd-11-stable/sys/dev/bge/
H A Dif_bge.c63 * and the use of the mini RX ring is disabled. This seems to imply
66 * ring.
1314 * Intialize a standard receive ring descriptor.
1372 * Initialize a jumbo receive ring descriptor. This allocates
1519 /* Enable the jumbo receive producer ring. */
1583 /* Initialize transmit producer index for host-memory send ring. */
1591 /* NIC-memory send ring not used; initialize to zero. */
1983 * allow us to set up the TX send ring RCBs and the RX return
1984 * ring RCBs, plus other things which live in NIC memory.
2076 * - This ring i
2917 bge_dma_ring_alloc(struct bge_softc *sc, bus_size_t alignment, bus_size_t maxsize, bus_dma_tag_t *tag, uint8_t **ring, bus_dmamap_t *map, bus_addr_t *paddr, const char *msg) argument
[all...]

Completed in 250 milliseconds

123456789