Searched refs:rings (Results 1 - 25 of 59) sorted by relevance

123

/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_fwlog.c9 bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings) argument
13 head = rings->head;
14 tail = rings->tail;
16 if (head < tail && (tail - head == (rings->size - 1)))
24 bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings) argument
26 return rings->head == rings->tail;
34 static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings) argument
39 nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN;
44 for (i = 0; i < rings
55 ice_fwlog_free_ring_buffs(struct ice_fwlog_ring *rings) argument
[all...]
H A Dice_fwlog.h56 struct ice_fwlog_data *rings; member in struct:ice_fwlog_ring
67 bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
68 bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
/linux-master/tools/net/ynl/samples/
H A Dethtool.c16 struct ethtool_rings_get_list *rings; local
42 rings = ethtool_rings_get_dump(ys, &rreq);
43 if (!rings)
47 ynl_dump_foreach(rings, dev) {
55 ethtool_rings_get_list_free(rings);
/linux-master/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
H A Dflowring.c142 if (flow->rings[i] == NULL)
160 flow->rings[i] = ring;
172 ring = flow->rings[flowid];
192 ring = flow->rings[flowid];
201 if ((flow->rings[i]) && (i != flowid)) {
202 ring = flow->rings[i];
212 flow->rings[flowid]->blocked = blocked;
236 ring = flow->rings[flowid];
247 flow->rings[flowid] = NULL;
264 ring = flow->rings[flowi
[all...]
H A Dflowring.h41 struct brcmf_flowring_ring **rings; member in struct:brcmf_flowring
/linux-master/net/9p/
H A Dtrans_xen.c57 struct xen_9pfs_dataring *rings; member in struct:xen_9pfs_front_priv
134 ring = &priv->rings[num];
282 struct xen_9pfs_dataring *ring = &priv->rings[i];
286 if (!priv->rings[i].intf)
288 if (priv->rings[i].irq > 0)
289 unbind_from_irqhandler(priv->rings[i].irq, priv->dev);
290 if (priv->rings[i].data.in) {
292 j < (1 << priv->rings[i].intf->ring_order);
296 ref = priv->rings[i].intf->ref[j];
299 free_pages_exact(priv->rings[
[all...]
/linux-master/net/ethtool/
H A DMakefile8 linkstate.o debug.o wol.o features.o privflags.o rings.o \
/linux-master/tools/testing/selftests/net/
H A Dpsock_fanout.c235 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) argument
239 ret[0] = sock_fanout_read_ring(fds[0], rings[0]);
240 ret[1] = sock_fanout_read_ring(fds[1], rings[1]);
412 char *rings[2]; local
431 rings[0] = sock_fanout_open_ring(fds[0]);
432 rings[1] = sock_fanout_open_ring(fds[1]);
435 sock_fanout_read(fds, rings, expect0);
440 ret = sock_fanout_read(fds, rings, expect1);
445 ret |= sock_fanout_read(fds, rings, expect2);
447 if (munmap(rings[
[all...]
H A Dtoeplitz.c104 static struct ring_state rings[RSS_MAX_CPUS]; variable in typeref:struct:ring_state
242 /* simple test: sleep once unconditionally and then process all rings */
250 do {} while (recv_block(&rings[i]));
355 /* block packets until all rings are added to the fanout group:
404 rings[i].cpu = i;
405 rings[i].fd = create_ring(&rings[i].mmap);
408 /* accept packets once all rings in the fanout group are up */
410 set_filter(rings[i].fd);
418 if (munmap(rings[
[all...]
/linux-master/drivers/i3c/master/mipi-i3c-hci/
H A Ddma.c167 struct hci_rings_data *rings = hci->io_data; local
171 if (!rings)
174 for (i = 0; i < rings->total; i++) {
175 rh = &rings->headers[i];
204 kfree(rings);
210 struct hci_rings_data *rings; local
219 dev_info(&hci->master.dev, "%d DMA rings available\n", nr_rings);
221 dev_err(&hci->master.dev, "number of rings should be <= 8\n");
226 rings = kzalloc(struct_size(rings, header
361 struct hci_rings_data *rings = hci->io_data; local
448 struct hci_rings_data *rings = hci->io_data; local
738 struct hci_rings_data *rings = hci->io_data; local
[all...]
/linux-master/drivers/net/ethernet/netronome/nfp/
H A DMakefile24 nfd3/rings.o \
27 nfdk/rings.o \
/linux-master/drivers/soc/ti/
H A Dk3-ringacc.c153 * @use_count: Use count for shared rings
194 * @rm_gp_range: general purpose rings range from tisci
198 * @rings: array of rings descriptors (struct @k3_ring)
200 * @req_lock: protect rings allocation
202 * @tisci_ring_ops: ti-sci rings ops
211 u32 num_rings; /* number of rings in Ringacc module */
219 struct k3_ring *rings; member in struct:k3_ringacc
221 struct mutex req_lock; /* protect rings allocation */
358 !(ringacc->rings[i
[all...]
/linux-master/drivers/crypto/intel/qat/qat_common/
H A Dadf_transport.c267 ring = &bank->rings[ring_num];
338 adf_handle_response(&bank->rings[i]);
404 /* Allocate the rings in the bank */
406 bank->rings = kzalloc_node(size, GFP_KERNEL,
408 if (!bank->rings)
425 ring = &bank->rings[i];
436 "Invalid tx rings mask config\n");
439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
456 ring = &bank->rings[i];
460 kfree(bank->rings);
[all...]
H A Dadf_transport_internal.h32 struct adf_etr_ring_data *rings; member in struct:adf_etr_bank_data
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/lib/
H A Dhv_vhca.h28 u16 rings; member in struct:mlx5_hv_vhca_control_block
/linux-master/tools/lib/bpf/
H A Dringbuf.c37 struct ring **rings; member in struct:ring_buffer
102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
105 rb->rings = tmp;
115 rb->rings[rb->ring_cnt] = r;
180 ringbuf_free_ring(rb, rb->rings[i]);
185 free(rb->rings);
288 struct ring *ring = rb->rings[i];
315 struct ring *ring = rb->rings[ring_id];
338 return rb->rings[id
[all...]
/linux-master/drivers/block/xen-blkback/
H A Dxenbus.c84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file)
110 ring = &blkif->rings[i];
124 ring = &blkif->rings[i];
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring),
136 if (!blkif->rings)
140 struct xen_blkif_ring *ring = &blkif->rings[r];
274 struct xen_blkif_ring *ring = &blkif->rings[r];
335 * blkif->rings was allocated in connect_ring, so we should free it in
338 kfree(blkif->rings);
[all...]
/linux-master/include/linux/
H A Dptr_ring.h619 static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, argument
639 spin_lock_irqsave(&(rings[i])->consumer_lock, flags);
640 spin_lock(&(rings[i])->producer_lock);
641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i],
643 spin_unlock(&(rings[i])->producer_lock);
644 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags);
H A Dskb_array.h201 static inline int skb_array_resize_multiple(struct skb_array **rings, argument
206 return ptr_ring_resize_multiple((struct ptr_ring **)rings,
/linux-master/io_uring/
H A Dio_uring.h54 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
260 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail);
289 struct io_rings *r = ctx->rings;
296 struct io_rings *rings = ctx->rings; local
300 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
H A Dio_uring.c186 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
191 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
361 struct io_rings *r = ctx->rings;
578 if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
653 /* IOPOLL rings only need to wake up if it's also SQPOLL */
712 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
806 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
835 struct io_rings *rings = ctx->rings; local
2411 struct io_rings *rings = ctx->rings; local
2592 struct io_rings *rings = ctx->rings; local
2828 struct io_rings *rings; local
3760 struct io_rings *rings; local
[all...]
H A Dsqpoll.c175 /* if we're handling multiple rings, cap submit size for fairness */
335 &ctx->rings->sq_flags);
362 &ctx->rings->sq_flags);
375 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
/linux-master/drivers/crypto/inside-secure/
H A Dsafexcel.c29 MODULE_PARM_DESC(max_rings, "Maximum number of rings to use.");
36 * Map all interfaces/rings to register index 0
51 for (i = 0; i < priv->config.rings; i++) {
510 for (i = 0; i < priv->config.rings; i++) {
558 for (i = 0; i < priv->config.rings; i++) {
600 priv->config.pes, priv->config.rings);
660 /* enable HIA input interface arbiter and rings */
662 GENMASK(priv->config.rings - 1, 0),
720 for (i = 0; i < priv->config.rings; i++) {
746 for (i = 0; i < priv->config.rings;
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_fence.c468 * Not all asics have all rings, so each asic will only
469 * start the fence driver on the rings it has.
539 * for all possible rings.
543 * Init the fence driver for all possible rings (all asics).
544 * Not all asics have all rings, so each asic will only
545 * start the fence driver on the rings it has using
560 * Interrupts for rings that belong to GFX IP don't need to be restored
592 * for all possible rings.
596 * Tear down the fence driver for all possible rings (all asics).
603 struct amdgpu_ring *ring = adev->rings[
[all...]
/linux-master/drivers/net/ethernet/mellanox/mlx5/core/en/
H A Dhv_vhca_stats.c103 block->rings = priv->stats_nch;

Completed in 245 milliseconds

123