Searched refs:ring (Results 276 - 300 of 804) sorted by relevance

<<11121314151617181920>>

/linux-master/drivers/scsi/snic/
H A Dvnic_wq.h64 struct vnic_dev_ring ring; member in struct:vnic_wq
74 return wq->ring.desc_avail;
80 return wq->ring.desc_count - wq->ring.desc_avail - 1;
111 wq->ring.desc_avail--;
127 wq->ring.desc_avail++;
H A Dvnic_dev.h61 unsigned int svnic_dev_desc_ring_size(struct vnic_dev_ring *ring,
64 void svnic_dev_clear_desc_ring(struct vnic_dev_ring *ring);
65 int svnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring,
68 struct vnic_dev_ring *ring);
H A Dvnic_wq.c25 return svnic_dev_alloc_desc_ring(vdev, &wq->ring, desc_count,
32 unsigned int i, j, count = wq->ring.desc_count;
48 buf->desc = (u8 *)wq->ring.descs +
49 wq->ring.desc_size * buf->index;
74 svnic_dev_free_desc_ring(vdev, &wq->ring);
146 unsigned int count = wq->ring.desc_count;
148 paddr = (u64)wq->ring.base_addr | VNIC_PADDR_TARGET;
213 wq->ring.desc_avail++;
222 svnic_dev_clear_desc_ring(&wq->ring);
/linux-master/drivers/crypto/intel/qat/qat_common/
H A Dadf_hw_arbiter.c34 * ring flow control check enabled. */
48 void adf_update_ring_arb(struct adf_etr_ring_data *ring) argument
50 struct adf_accel_dev *accel_dev = ring->bank->accel_dev;
59 * Enable arbitration on a ring only if the TX half of the ring mask
65 arben_tx = (ring->bank->ring_mask & tx_ring_mask) >> 0;
66 arben_rx = (ring->bank->ring_mask & rx_ring_mask) >> shift;
69 csr_ops->write_csr_ring_srv_arb_en(ring->bank->csr_addr,
70 ring->bank->bank_number, arben);
H A Dadf_gen2_hw_data.c119 static u32 read_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring) argument
121 return READ_CSR_RING_HEAD(csr_base_addr, bank, ring);
124 static void write_csr_ring_head(void __iomem *csr_base_addr, u32 bank, u32 ring, argument
127 WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value);
130 static u32 read_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring) argument
132 return READ_CSR_RING_TAIL(csr_base_addr, bank, ring);
135 static void write_csr_ring_tail(void __iomem *csr_base_addr, u32 bank, u32 ring, argument
138 WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value);
147 u32 ring, u32 value)
149 WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, valu
146 write_csr_ring_config(void __iomem *csr_base_addr, u32 bank, u32 ring, u32 value) argument
152 write_csr_ring_base(void __iomem *csr_base_addr, u32 bank, u32 ring, dma_addr_t addr) argument
[all...]
/linux-master/drivers/net/wireless/ath/ath11k/
H A Ddp.c51 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
102 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring) argument
104 if (!ring->vaddr_unaligned)
107 if (ring->cached) {
108 dma_unmap_single(ab->dev, ring->paddr_unaligned, ring->size,
110 kfree(ring->vaddr_unaligned);
112 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
113 ring
224 ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring, enum hal_ring_type type, int ring_num, int mac_id, int num_entries) argument
648 ath11k_dp_link_desc_cleanup(struct ath11k_base *ab, struct dp_link_desc_bank *desc_bank, u32 ring_type, struct dp_srng *ring) argument
[all...]
/linux-master/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_main.c114 * @rx_ring: ring to place buffers on
161 /* update next to alloc since we have filled the ring */
177 * fm10k_reuse_rx_page - page flip buffer and store it back on the ring
178 * @rx_ring: rx descriptor ring to store buffers on
342 /* hand second half of page back to the ring */
356 static inline void fm10k_rx_checksum(struct fm10k_ring *ring, argument
363 if (!(ring->netdev->features & NETIF_F_RXCSUM))
372 ring->rx_stats.csum_err++;
384 ring->rx_stats.csum_good++;
393 static inline void fm10k_rx_hash(struct fm10k_ring *ring, argument
1103 fm10k_get_tx_completed(struct fm10k_ring *ring) argument
1113 fm10k_get_tx_pending(struct fm10k_ring *ring, bool in_sw) argument
1425 struct fm10k_ring *ring; local
1595 struct fm10k_ring *ring; local
1693 struct fm10k_ring *ring; local
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns/
H A Dhns_enet.h34 struct hnae_ring *ring; member in struct:hns_nic_ring_data
45 void (*fill_desc)(struct hnae_ring *ring, void *priv,
50 int *bnum, struct hnae_ring *ring);
66 /* the cb for nic to manage the ring buffer, the first half of the
/linux-master/drivers/net/ethernet/intel/ice/
H A Dice_txrx.h312 /* descriptor ring, associated with a VSI */
315 void *desc; /* Descriptor ring memory */
317 struct net_device *netdev; /* netdev ring maps to */
321 u16 q_index; /* Queue number of ring */
324 u16 reg_idx; /* HW register index of the ring */
359 struct ice_rx_ring *next; /* pointer to next ring in q_vector */
362 dma_addr_t dma; /* physical address of ring */
364 u8 dcb_tc; /* Traffic class of ring */
375 struct ice_tx_ring *next; /* pointer to next ring in q_vector */
376 void *desc; /* Descriptor ring memor
410 ice_ring_uses_build_skb(struct ice_rx_ring *ring) argument
415 ice_set_ring_build_skb_ena(struct ice_rx_ring *ring) argument
420 ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring) argument
425 ice_ring_ch_enabled(struct ice_tx_ring *ring) argument
430 ice_ring_is_xdp(struct ice_tx_ring *ring) argument
477 ice_rx_pg_order(struct ice_rx_ring *ring) argument
[all...]
/linux-master/drivers/net/ethernet/pasemi/
H A Dpasemi_mac.c268 struct pasemi_mac_csring *ring; local
273 ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring),
276 if (!ring) {
281 chno = ring->chan.chno;
283 ring->size = CS_RING_SIZE;
284 ring->next_to_fill = 0;
287 if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE))
291 PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma));
292 val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32);
297 ring
366 struct pasemi_mac_rxring *ring; local
450 struct pasemi_mac_txring *ring; local
[all...]
/linux-master/drivers/gpu/drm/msm/adreno/
H A Da5xx_gpu.h87 * preemption, it fills out the record with the useful information (wptr, ring
89 * the preemption. When a ring is switched out, the CP will save the ringbuffer
151 #define shadowptr(a5xx_gpu, ring) ((a5xx_gpu)->shadow_iova + \
152 ((ring)->id * sizeof(uint32_t)))
154 bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
163 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, bool sync);
/linux-master/drivers/scsi/fnic/
H A Dvnic_rq.c18 unsigned int i, j, count = rq->ring.desc_count;
33 buf->desc = (u8 *)rq->ring.descs +
34 rq->ring.desc_size * buf->index;
60 vnic_dev_free_desc_ring(vdev, &rq->ring);
86 err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size);
106 paddr = (u64)rq->ring.base_addr | VNIC_PADDR_TARGET;
108 iowrite32(rq->ring.desc_count, &rq->ctrl->ring_size);
115 /* Use current fetch_index as the ring starting point */
168 rq->ring.desc_avail++;
171 /* Use current fetch_index as the ring startin
[all...]
/linux-master/drivers/net/ipa/
H A Dgsi_private.h106 * gsi_ring_virt() - Return virtual address for a ring entry
107 * @ring: Ring whose address is to be translated
110 void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
/linux-master/drivers/crypto/cavium/nitrox/
H A Dnitrox_mbx.c39 static inline u64 pf2vf_read_mbox(struct nitrox_device *ndev, int ring) argument
43 reg_addr = NPS_PKT_MBOX_VF_PF_PFDATAX(ring);
48 int ring)
52 reg_addr = NPS_PKT_MBOX_PF_VF_PFDATAX(ring);
96 pf2vf_write_mbox(ndev, msg.value, vfdev->ring);
136 /* get the vfno from ring */
139 vfdev->ring = i;
141 vfdev->msg.value = pf2vf_read_mbox(ndev, vfdev->ring);
159 /* get the vfno from ring */
162 vfdev->ring
47 pf2vf_write_mbox(struct nitrox_device *ndev, u64 value, int ring) argument
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Dpsp_v12_0.c155 /* Change IH ring for VMC */
168 /* Change IH ring for UMC */
186 struct psp_ring *ring = &psp->km_ring; local
192 /* Write low address of the ring to C2PMSG_102 */
193 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
195 /* Write high address of the ring to C2PMSG_103 */
196 psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
199 /* Write the ring initialization command to C2PMSG_101 */
211 /* Write low address of the ring to C2PMSG_69 */
212 psp_ring_reg = lower_32_bits(ring
268 struct psp_ring *ring = &psp->km_ring; local
[all...]
H A Dpsp_v13_0_4.c201 /* Write the ring destroy command*/
210 /* Write the ring destroy command*/
228 struct psp_ring *ring = &psp->km_ring; local
238 /* Write low address of the ring to C2PMSG_102 */
239 psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr);
241 /* Write high address of the ring to C2PMSG_103 */
242 psp_ring_reg = upper_32_bits(ring->ring_mem_mc_addr);
245 /* Write the ring initialization command to C2PMSG_101 */
257 /* Wait for sOS ready for ring creation */
261 DRM_ERROR("Failed to wait for trust OS ready for ring creatio
294 struct psp_ring *ring = &psp->km_ring; local
[all...]
H A Damdgpu_sdma.h59 struct amdgpu_ring ring; member in struct:amdgpu_sdma_instance
157 amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring);
158 int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index);
159 uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid);
H A Dhdp_v6_0.c35 struct amdgpu_ring *ring)
37 if (!ring || !ring->funcs->emit_wreg)
40 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
34 hdp_v6_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) argument
H A Dhdp_v7_0.c32 struct amdgpu_ring *ring)
34 if (!ring || !ring->funcs->emit_wreg)
37 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0);
31 hdp_v7_0_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) argument
/linux-master/drivers/net/wireguard/
H A Dqueueing.c32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL);
37 ptr_ring_cleanup(&queue->ring, NULL);
46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring));
47 ptr_ring_cleanup(&queue->ring, purge ? __skb_array_destroy_skb : NULL);
/linux-master/drivers/net/can/spi/mcp251xfd/
H A Dmcp251xfd.h821 mcp251xfd_get_tx_obj_addr(const struct mcp251xfd_tx_ring *ring, u8 n)
823 return ring->base + ring->obj_size * n;
827 mcp251xfd_get_rx_obj_addr(const struct mcp251xfd_rx_ring *ring, u8 n)
829 return ring->base + ring->obj_size * n;
874 static inline u8 mcp251xfd_get_tx_head(const struct mcp251xfd_tx_ring *ring)
876 return ring->head & (ring->obj_num - 1);
879 static inline u8 mcp251xfd_get_tx_tail(const struct mcp251xfd_tx_ring *ring)
[all...]
/linux-master/drivers/net/ethernet/mediatek/
H A Dmtk_star_emac.c301 static void mtk_star_ring_init(struct mtk_star_ring *ring, argument
304 memset(ring, 0, sizeof(*ring));
305 ring->descs = descs;
306 ring->head = 0;
307 ring->tail = 0;
310 static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring, argument
313 struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
324 desc_data->dma_addr = ring
340 mtk_star_ring_push_head(struct mtk_star_ring *ring, struct mtk_star_ring_desc_data *desc_data, unsigned int flags) argument
367 mtk_star_ring_push_head_rx(struct mtk_star_ring *ring, struct mtk_star_ring_desc_data *desc_data) argument
374 mtk_star_ring_push_head_tx(struct mtk_star_ring *ring, struct mtk_star_ring_desc_data *desc_data) argument
384 mtk_star_tx_ring_avail(struct mtk_star_ring *ring) argument
696 struct mtk_star_ring *ring = &priv->rx_ring; local
726 mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring, void (*unmap_func)(struct mtk_star_priv *, struct mtk_star_ring_desc_data *)) argument
747 struct mtk_star_ring *ring = &priv->rx_ring; local
754 struct mtk_star_ring *ring = &priv->tx_ring; local
1101 struct mtk_star_ring *ring = &priv->tx_ring; local
1142 struct mtk_star_ring *ring = &priv->tx_ring; local
1162 struct mtk_star_ring *ring = &priv->tx_ring; local
1272 struct mtk_star_ring *ring = &priv->rx_ring; local
[all...]
/linux-master/drivers/net/ethernet/intel/ixgbevf/
H A Dixgbevf_main.c190 static u64 ixgbevf_get_tx_completed(struct ixgbevf_ring *ring) argument
192 return ring->stats.packets;
195 static u32 ixgbevf_get_tx_pending(struct ixgbevf_ring *ring) argument
197 struct ixgbevf_adapter *adapter = netdev_priv(ring->netdev);
200 u32 head = IXGBE_READ_REG(hw, IXGBE_VFTDH(ring->reg_idx));
201 u32 tail = IXGBE_READ_REG(hw, IXGBE_VFTDT(ring->reg_idx));
205 tail - head : (tail + ring->count - head);
261 * @tx_ring: tx ring to clean
426 * @q_vector: structure containing interrupt and ring information
441 static inline void ixgbevf_rx_hash(struct ixgbevf_ring *ring, argument
467 ixgbevf_rx_checksum(struct ixgbevf_ring *ring, union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb) argument
971 ixgbevf_xmit_xdp_ring(struct ixgbevf_ring *ring, struct xdp_buff *xdp) argument
1275 struct ixgbevf_ring *ring; local
1360 struct ixgbevf_ring *ring; local
1684 ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) argument
1767 ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring, int index) argument
1801 ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) argument
1828 ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) argument
1905 ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter, struct ixgbevf_ring *ring) argument
2703 ixgbevf_add_ring(struct ixgbevf_ring *ring, struct ixgbevf_ring_container *head) argument
2731 struct ixgbevf_ring *ring; local
2852 struct ixgbevf_ring *ring; local
4352 ixgbevf_get_tx_ring_stats(struct rtnl_link_stats64 *stats, const struct ixgbevf_ring *ring) argument
4375 const struct ixgbevf_ring *ring; local
4448 struct ixgbevf_ring *ring = adapter->rx_ring[i]; local
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns3/
H A Dhns3_debugfs.c582 static void hns3_dbg_tx_spare_info(struct hns3_enet_ring *ring, char *buf, argument
586 struct hns3_tx_spare *tx_spare = ring->tx_spare;
608 sprintf(result[j++], "%u", ring->tx_copybreak);
636 static void hns3_dump_rx_queue_info(struct hns3_enet_ring *ring, argument
645 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
648 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
651 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
654 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
657 sprintf(result[j++], "%u", readl_relaxed(ring->tqp->io_base +
660 sprintf(result[j++], "%u", readl_relaxed(ring
688 struct hns3_enet_ring *ring; local
738 hns3_dump_tx_queue_info(struct hns3_enet_ring *ring, struct hnae3_ae_dev *ae_dev, char **result, u32 index) argument
791 struct hns3_enet_ring *ring; local
922 struct hns3_enet_ring *ring; local
995 struct hns3_enet_ring *ring; local
1125 hns3_dump_page_pool_info(struct hns3_enet_ring *ring, char **result, u32 index) argument
1148 struct hns3_enet_ring *ring; local
[all...]
H A Dhns3_enet.h252 /* hardware spec ring buffer format */
343 /* desc type, used by the ring user to mark the type of the priv data */
483 /* idx of lastest sent desc, the ring is empty when equal to
487 u32 flag; /* ring attribute */
491 /* for Tx ring */
499 /* for Rx ring */
543 struct hns3_enet_ring *ring; member in struct:hns3_enet_ring_group
579 * the cb for nic to manage the ring buffer, the first half of the
582 struct hns3_enet_ring *ring; member in struct:hns3_nic_priv
622 static inline int ring_space(struct hns3_enet_ring *ring) argument
634 hns3_tqp_read_reg(struct hns3_enet_ring *ring, u32 reg) argument
680 hns3_page_order(struct hns3_enet_ring *ring) argument
[all...]

Completed in 287 milliseconds

<<11121314151617181920>>