Searched refs:ring (Results 426 - 450 of 807) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/gpu/drm/i915/gt/
H A Dintel_migrate.c318 ce->ring = NULL;
352 struct intel_ring *ring = rq->ring; local
354 pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
355 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
372 struct intel_ring *ring = rq->ring; local
422 ring->emit = (void *)cs - ring
[all...]
H A Dintel_lrc.c1128 struct intel_ring *ring; local
1138 ring = intel_engine_create_ring(engine, ce->ring_size);
1139 if (IS_ERR(ring)) {
1140 err = PTR_ERR(ring);
1163 ce->ring = ring;
1169 intel_ring_put(ring);
1179 intel_ring_reset(ce->ring, ce->ring->emit);
1183 ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring
1535 struct intel_ring *ring = ce->ring; local
1580 const struct intel_ring *ring = ce->ring; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dcik_ih.c38 * Starting with r6xx, interrupts are handled via a ring buffer.
44 * pointers are equal, the ring is idle. When the GPU
45 * writes vectors to the ring buffer, it increments the
54 * cik_ih_enable_interrupts - Enable the interrupt ring buffer
58 * Enable the interrupt ring buffer (CIK).
73 * cik_ih_disable_interrupts - Disable the interrupt ring buffer
77 * Disable the interrupt ring buffer (CIK).
96 * cik_ih_irq_init - init and enable the interrupt ring
100 * Allocate a ring buffer for the interrupt controller,
102 * ring buffe
[all...]
H A Dsi_ih.c116 dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
139 dw[0] = le32_to_cpu(ih->ring[ring_index + 0]);
140 dw[1] = le32_to_cpu(ih->ring[ring_index + 1]);
141 dw[2] = le32_to_cpu(ih->ring[ring_index + 2]);
142 dw[3] = le32_to_cpu(ih->ring[ring_index + 3]);
H A Damdgpu_gfx.h129 struct amdgpu_ring *ring);
131 struct amdgpu_ring *ring,
135 struct amdgpu_ring *ring,
153 struct amdgpu_ring ring; member in struct:amdgpu_kiq
479 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring);
503 struct amdgpu_ring *ring);
505 struct amdgpu_ring *ring);
H A Dpsp_v11_0.c266 /* Write the ring destroy command*/
293 struct psp_ring *ring = &psp->km_ring; local
297 ring->ring_wptr = 0;
304 /* Write low address of the ring to C2PMSG_102 */
305 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
307 /* Write high address of the ring to C2PMSG_103 */
308 psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
311 /* Write the ring initialization command to C2PMSG_101 */
323 /* Wait for sOS ready for ring creation */
327 DRM_ERROR("Failed to wait for sOS ready for ring creatio
361 struct psp_ring *ring = &psp->km_ring; local
[all...]
H A Damdgpu_amdkfd_arcturus.c291 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i]; local
293 if (!amdgpu_ring_sched_ready(ring))
296 /* stop secheduler and drain ring. */
298 drm_sched_stop(&ring->sched, NULL);
299 r = amdgpu_fence_wait_empty(ring);
303 drm_sched_start(&ring->sched, false);
/linux-master/drivers/net/ethernet/wangxun/libwx/
H A Dwx_lib.h11 u16 wx_desc_unused(struct wx_ring *ring);
/linux-master/tools/testing/selftests/net/
H A Dpsock_fanout.c198 char *ring; local
203 perror("packetsock ring setsockopt version");
208 perror("packetsock ring setsockopt");
212 ring = mmap(0, req.tp_block_size * req.tp_block_nr,
214 if (ring == MAP_FAILED) {
215 perror("packetsock ring mmap");
219 return ring;
222 static int sock_fanout_read_ring(int fd, void *ring) argument
224 struct tpacket2_hdr *header = ring;
229 header = ring
[all...]
/linux-master/drivers/net/ethernet/synopsys/
H A Ddwc-xlgmac.h95 #define XLGMAC_GET_DESC_DATA(ring, idx) ({ \
96 typeof(ring) _ring = (ring); \
388 struct xlgmac_ring *ring,
435 struct xlgmac_ring *ring);
638 struct xlgmac_ring *ring,
643 struct xlgmac_ring *ring,
/linux-master/drivers/net/wireless/ath/ath10k/
H A Dhtt_tx.c817 struct htt_rx_ring_setup_ring32 *ring; local
832 + (sizeof(*ring) * num_rx_ring);
840 ring = &cmd->rx_setup_32.rings[0];
866 ring->fw_idx_shadow_reg_paddr =
868 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr);
869 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size);
870 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE);
871 ring->flags = __cpu_to_le16(flags);
872 ring->fw_idx_init_val = __cpu_to_le16(fw_idx);
874 ath10k_htt_fill_rx_desc_offset_32(hw, ring);
890 struct htt_rx_ring_setup_ring64 *ring; local
959 struct htt_rx_ring_setup_ring32 *ring; local
[all...]
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_gpu.h61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
85 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
94 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
212 * TODO move to per-ring locking where feasible (ie. submit/retire
323 struct msm_ringbuffer *ring = gpu->rb[i]; local
325 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence))
434 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
444 * Each ring is associated with it's own scheduler instance. However, our
448 * is mapped to ring n
540 } ring[MSM_GPU_MAX_RINGS]; member in struct:msm_gpu_state
[all...]
/linux-master/drivers/thunderbolt/
H A Ddma_test.c49 [DMA_TEST_DMA_ERROR] = "DMA ring setup failed",
73 * @rx_ring: Software ring holding RX frames
75 * @tx_ring: Software ring holding TX frames
141 struct tb_ring *ring; local
152 ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE,
154 if (!ring)
157 dt->tx_ring = ring;
158 e2e_tx_hop = ring->hop;
175 ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE,
178 if (!ring) {
230 dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame, bool canceled) argument
299 dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame, bool canceled) argument
[all...]
/linux-master/drivers/net/wireless/realtek/rtl818x/rtl8180/
H A Ddev.c345 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; local
347 while (skb_queue_len(&ring->queue)) {
348 struct rtl8180_tx_desc *entry = &ring->desc[ring->idx];
356 ring->idx = (ring->idx + 1) % ring->entries;
357 skb = __skb_dequeue(&ring->queue);
371 if (ring->entries - skb_queue_len(&ring
461 struct rtl8180_tx_ring *ring; local
1079 struct rtl8180_tx_desc *ring; local
1107 struct rtl8180_tx_ring *ring = &priv->tx_ring[prio]; local
[all...]
/linux-master/drivers/xen/
H A Devtchn.c66 /* Notification ring, accessed via /dev/xen/evtchn. */
68 evtchn_port_t *ring; member in struct:per_user_data
73 /* Processes wait on this queue when ring is empty. */
91 static void evtchn_free_ring(evtchn_port_t *ring) argument
93 kvfree(ring);
105 return u->ring + evtchn_ring_offset(u, idx);
185 smp_wmb(); /* Ensure ring contents visible */
239 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
261 copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
324 * Ensure the ring i
[all...]
/linux-master/drivers/crypto/inside-secure/
H A Dsafexcel_hash.c227 int ring,
240 rdesc = safexcel_ring_next_rptr(priv, &priv->ring[ring].rdr);
249 safexcel_complete(priv, ring);
311 static int safexcel_ahash_send_req(struct crypto_async_request *async, int ring, argument
406 first_cdesc = safexcel_add_cdesc(priv, ring, 1,
446 cdesc = safexcel_add_cdesc(priv, ring, !n_cdesc,
480 rdesc = safexcel_add_rdesc(priv, ring, 1, 1, req->result_dma,
487 safexcel_rdr_req_set(priv, ring, rdesc, &areq->base);
505 safexcel_ring_rollback_wptr(priv, &priv->ring[rin
226 safexcel_handle_req_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, bool *should_complete, int *ret) argument
517 safexcel_handle_inv_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, bool *should_complete, int *ret) argument
567 safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring, struct crypto_async_request *async, bool *should_complete, int *ret) argument
589 safexcel_ahash_send_inv(struct crypto_async_request *async, int ring, int *commands, int *results) argument
607 safexcel_ahash_send(struct crypto_async_request *async, int ring, int *commands, int *results) argument
629 int ring = ctx->base.ring; local
694 int ret, ring; local
[all...]
H A Dsafexcel.h691 int ring; member in struct:safexcel_work_data
704 /* result ring crypto API request */
714 /* The ring is currently handling at least one request */
723 /* irq of this ring */
834 struct safexcel_ring *ring; member in struct:safexcel_crypto_priv
838 int (*send)(struct crypto_async_request *req, int ring,
840 int (*handle_result)(struct safexcel_crypto_priv *priv, int ring,
854 int ring; member in struct:safexcel_context
887 void safexcel_dequeue(struct safexcel_crypto_priv *priv, int ring);
890 void safexcel_complete(struct safexcel_crypto_priv *priv, int ring);
[all...]
/linux-master/include/linux/soc/mediatek/
H A Dmtk_wed.h121 struct mtk_wed_ring ring; member in struct:mtk_wed_device::__anon2477
196 int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
198 int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
221 void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
223 void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
/linux-master/drivers/net/ethernet/pensando/ionic/
H A Dionic_ethtool.c610 struct ethtool_ringparam *ring,
616 ring->tx_max_pending = IONIC_MAX_TX_DESC;
617 ring->tx_pending = lif->ntxq_descs;
618 ring->rx_max_pending = IONIC_MAX_RX_DESC;
619 ring->rx_pending = lif->nrxq_descs;
625 struct ethtool_ringparam *ring,
638 if (ring->rx_mini_pending || ring->rx_jumbo_pending) {
643 if (!is_power_of_2(ring->tx_pending) ||
644 !is_power_of_2(ring
609 ionic_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
624 ionic_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
[all...]
/linux-master/tools/testing/selftests/mm/
H A Dgup_longterm.c191 struct io_uring ring; local
203 ret = io_uring_queue_init(1, &ring, 0);
214 ret = io_uring_register_buffers(&ring, &iov, 1);
227 io_uring_unregister_buffers(&ring);
230 io_uring_queue_exit(&ring);
/linux-master/drivers/dma/ioat/
H A Ddma.c214 "Unable to start null desc - ring full\n");
376 struct ioat_ring_ent **ring; local
380 /* allocate the array to hold the software ring */
381 ring = kcalloc(total_descs, sizeof(*ring), flags);
382 if (!ring)
406 kfree(ring);
412 ring[i] = ioat_alloc_ring_ent(c, i, flags);
413 if (!ring[i]) {
417 ioat_free_ring_ent(ring[
[all...]
/linux-master/drivers/tty/serial/
H A Datmel_serial.c94 * Be careful, the real size of the ring buffer is
95 * sizeof(atmel_uart_char) * ATMEL_SERIAL_RINGSIZE. It means that ring buffer
734 * Stores the incoming character in the ring buffer
741 struct circ_buf *ring = &atmel_port->rx_ring; local
744 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE))
748 c = &((struct atmel_uart_char *)ring->buf)[ring->head];
755 ring->head = (ring
1104 struct circ_buf *ring = &atmel_port->rx_ring; local
1185 struct circ_buf *ring; local
1509 struct circ_buf *ring = &atmel_port->rx_ring; local
[all...]
/linux-master/drivers/net/ethernet/aquantia/atlantic/
H A Daq_nic.c395 struct aq_ring_s *ring)
397 self->aq_ring_tx[idx] = ring;
577 struct aq_ring_s *ring)
581 unsigned int dx = ring->sw_tail;
589 dx_buff = &ring->buff_ring[dx];
630 dx = aq_ring_next_dx(ring, dx);
631 dx_buff = &ring->buff_ring[dx];
653 for (dx = ring->sw_tail;
655 --ret, dx = aq_ring_next_dx(ring, dx)) {
656 dx_buff = &ring
394 aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx, struct aq_ring_s *ring) argument
575 aq_nic_map_xdp(struct aq_nic_s *self, struct xdp_frame *xdpf, struct aq_ring_s *ring) argument
672 aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb, struct aq_ring_s *ring) argument
872 struct aq_ring_s *ring = NULL; local
[all...]
/linux-master/drivers/net/ethernet/fungible/funeth/
H A Dfuneth_ethtool.c553 struct ethtool_ringparam *ring,
563 ring->rx_max_pending = max_depth / 2;
564 ring->tx_max_pending = max_depth;
566 ring->rx_pending = fp->rq_depth;
567 ring->tx_pending = fp->sq_depth;
574 struct ethtool_ringparam *ring,
581 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
585 if (!is_power_of_2(ring->rx_pending) ||
586 !is_power_of_2(ring
552 fun_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kring, struct netlink_ext_ack *extack) argument
573 fun_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kring, struct netlink_ext_ack *extack) argument
[all...]
/linux-master/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_ethtool.c103 /* per-queue ring statistics */
284 struct fm10k_ring *ring; local
286 ring = interface->tx_ring[i];
287 fm10k_add_ethtool_stats(&data, ring,
290 ring = interface->rx_ring[i];
291 fm10k_add_ethtool_stats(&data, ring,
505 struct ethtool_ringparam *ring,
511 ring->rx_max_pending = FM10K_MAX_RXD;
512 ring->tx_max_pending = FM10K_MAX_TXD;
513 ring
504 fm10k_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
521 fm10k_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
[all...]

Completed in 226 milliseconds

<<11121314151617181920>>