Searched refs:ring (Results 51 - 75 of 804) sorted by relevance

1234567891011>>

/linux-master/drivers/net/wireless/ath/ath12k/
H A Ddbring.c11 struct ath12k_dbring *ring,
23 srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
30 ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
31 paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
38 spin_lock_bh(&ring->idr_lock);
39 buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
40 spin_unlock_bh(&ring->idr_lock);
64 spin_lock_bh(&ring->idr_lock);
65 idr_remove(&ring
10 ath12k_dbring_bufs_replenish(struct ath12k *ar, struct ath12k_dbring *ring, struct ath12k_dbring_element *buff, gfp_t gfp) argument
75 ath12k_dbring_fill_bufs(struct ath12k *ar, struct ath12k_dbring *ring, gfp_t gfp) argument
116 ath12k_dbring_wmi_cfg_setup(struct ath12k *ar, struct ath12k_dbring *ring, enum wmi_direct_buffer_module id) argument
148 ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring, u32 num_resp_per_event, u32 event_timeout_ms, int (*handler)(struct ath12k *, struct ath12k_dbring_data *)) argument
163 ath12k_dbring_buf_setup(struct ath12k *ar, struct ath12k_dbring *ring, struct ath12k_dbring_cap *db_cap) argument
186 ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring, int ring_num, int num_entries) argument
233 struct ath12k_dbring *ring = NULL; local
337 ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) argument
342 ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring) argument
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns/
H A Dhnae.c38 static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) argument
40 unsigned int order = hnae_page_order(ring);
50 cb->length = hnae_page_size(ring);
56 static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) argument
63 else if (unlikely(is_rx_ring(ring)))
69 static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) argument
71 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
72 cb->length, ring_to_dma_dir(ring));
74 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
80 static void hnae_unmap_buffer(struct hnae_ring *ring, struc argument
121 hnae_free_buffers(struct hnae_ring *ring) argument
130 hnae_alloc_buffers(struct hnae_ring *ring) argument
149 hnae_free_desc(struct hnae_ring *ring) argument
160 hnae_alloc_desc(struct hnae_ring *ring) argument
181 hnae_fini_ring(struct hnae_ring *ring) argument
195 hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags) argument
[all...]
H A Dhnae.h12 * ring buffer queue (rbq):
14 * ring:
16 * ring description (desc):
17 * an element in the ring with packet information
168 /* hardware spec ring buffer format */
224 /* desc type, used by the ring user to mark the type of the priv data */
232 #define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
233 #define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
234 #define is_rx_ring(ring) (!is_tx_rin
316 ring_dist(struct hnae_ring *ring, int begin, int end) argument
324 ring_space(struct hnae_ring *ring) argument
330 is_ring_empty(struct hnae_ring *ring) argument
586 hnae_reserve_buffer_map(struct hnae_ring *ring, struct hnae_desc_cb *cb) argument
608 hnae_alloc_buffer_attach(struct hnae_ring *ring, int i) argument
620 hnae_buffer_detach(struct hnae_ring *ring, int i) argument
626 hnae_free_buffer_detach(struct hnae_ring *ring, int i) argument
639 hnae_replace_buffer(struct hnae_ring *ring, int i, struct hnae_desc_cb *res_cb) argument
650 hnae_reuse_buffer(struct hnae_ring *ring, int i) argument
662 struct hnae_ring *ring; local
677 struct hnae_ring *ring; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ring_mux.c44 struct amdgpu_ring *ring)
46 return ring->entry_index < mux->ring_entry_size ?
47 &mux->ring_entry[ring->entry_index] : NULL;
50 /* copy packages on sw ring range[begin, end) */
52 struct amdgpu_ring *ring,
58 start = s_start & ring->buf_mask;
59 end = s_end & ring->buf_mask;
62 DRM_ERROR("no more data copied from sw ring\n");
66 amdgpu_ring_alloc(real_ring, (ring->ring_size >> 2) + end - start);
67 amdgpu_ring_write_multiple(real_ring, (void *)&ring
43 amdgpu_ring_mux_sw_entry(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
51 amdgpu_ring_mux_copy_pkt_from_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 s_start, u64 s_end) argument
149 amdgpu_ring_mux_init(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, unsigned int entry_size) argument
194 amdgpu_ring_mux_add_sw_ring(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
212 amdgpu_ring_mux_set_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 wptr) argument
252 amdgpu_ring_mux_get_wptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
281 amdgpu_ring_mux_get_rptr(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
315 amdgpu_sw_ring_get_rptr_gfx(struct amdgpu_ring *ring) argument
324 amdgpu_sw_ring_get_wptr_gfx(struct amdgpu_ring *ring) argument
333 amdgpu_sw_ring_set_wptr_gfx(struct amdgpu_ring *ring) argument
343 amdgpu_sw_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) argument
363 struct amdgpu_ring *ring; local
392 amdgpu_sw_ring_ib_begin(struct amdgpu_ring *ring) argument
407 amdgpu_sw_ring_ib_end(struct amdgpu_ring *ring) argument
418 amdgpu_sw_ring_ib_mark_offset(struct amdgpu_ring *ring, enum amdgpu_ring_mux_offset_type type) argument
432 amdgpu_ring_mux_start_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
461 scan_and_remove_signaled_chunk(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
483 amdgpu_ring_mux_ib_mark_offset(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring, u64 offset, enum amdgpu_ring_mux_offset_type type) argument
518 amdgpu_ring_mux_end_ib(struct amdgpu_ring_mux *mux, struct amdgpu_ring *ring) argument
544 struct amdgpu_ring *ring = NULL; local
[all...]
H A Duvd_v7_0.c67 * @ring: amdgpu_ring pointer
71 static uint64_t uvd_v7_0_ring_get_rptr(struct amdgpu_ring *ring) argument
73 struct amdgpu_device *adev = ring->adev;
75 return RREG32_SOC15(UVD, ring->me, mmUVD_RBC_RB_RPTR);
81 * @ring: amdgpu_ring pointer
85 static uint64_t uvd_v7_0_enc_ring_get_rptr(struct amdgpu_ring *ring) argument
87 struct amdgpu_device *adev = ring->adev;
89 if (ring == &adev->uvd.inst[ring->me].ring_enc[0])
90 return RREG32_SOC15(UVD, ring
102 uvd_v7_0_ring_get_wptr(struct amdgpu_ring *ring) argument
116 uvd_v7_0_enc_ring_get_wptr(struct amdgpu_ring *ring) argument
136 uvd_v7_0_ring_set_wptr(struct amdgpu_ring *ring) argument
150 uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring) argument
175 uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) argument
216 uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) argument
279 uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) argument
339 uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) argument
400 struct amdgpu_ring *ring; local
520 struct amdgpu_ring *ring; local
789 struct amdgpu_ring *ring; local
955 struct amdgpu_ring *ring; local
1180 uvd_v7_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) argument
1221 uvd_v7_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) argument
1239 uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) argument
1251 uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring) argument
1292 struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); local
1320 uvd_v7_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument
1353 uvd_v7_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument
1367 uvd_v7_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) argument
1383 uvd_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) argument
1402 uvd_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) argument
1417 uvd_v7_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) argument
1430 uvd_v7_0_enc_ring_insert_end(struct amdgpu_ring *ring) argument
1435 uvd_v7_0_enc_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) argument
1445 uvd_v7_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) argument
1458 uvd_v7_0_enc_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) argument
[all...]
H A Duvd_v6_0.c73 * @ring: amdgpu_ring pointer
77 static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) argument
79 struct amdgpu_device *adev = ring->adev;
87 * @ring: amdgpu_ring pointer
91 static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) argument
93 struct amdgpu_device *adev = ring->adev;
95 if (ring == &adev->uvd.inst->ring_enc[0])
103 * @ring: amdgpu_ring pointer
107 static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) argument
109 struct amdgpu_device *adev = ring
121 uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) argument
138 uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) argument
152 uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) argument
170 uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) argument
208 uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_bo *bo, struct dma_fence **fence) argument
271 uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct amdgpu_bo *bo, struct dma_fence **fence) argument
332 uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) argument
380 struct amdgpu_ring *ring; local
465 struct amdgpu_ring *ring = &adev->uvd.inst->ring; local
727 struct amdgpu_ring *ring = &adev->uvd.inst->ring; local
924 uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) argument
956 uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned flags) argument
973 uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) argument
985 uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) argument
1023 uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument
1051 uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument
1065 uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) argument
1076 uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned vmid, uint64_t pd_addr) argument
1091 uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) argument
1108 uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) argument
1120 uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) argument
1131 uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) argument
1136 uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) argument
[all...]
H A Damdgpu_vpe.c123 struct amdgpu_device *adev = vpe->ring.adev;
224 struct amdgpu_device *adev = vpe->ring.adev;
267 struct amdgpu_ring *ring = &vpe->ring; local
270 ring->ring_obj = NULL;
271 ring->use_doorbell = true;
272 ring->vm_hub = AMDGPU_MMHUB0(0);
273 ring->doorbell_index = (adev->doorbell_index.vpe_ring << 1);
274 snprintf(ring->name, 4, "vpe");
276 ret = amdgpu_ring_init(adev, ring, 102
445 vpe_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) argument
457 vpe_get_csa_mc_addr(struct amdgpu_ring *ring, uint32_t vmid) argument
472 vpe_ring_emit_pred_exec(struct amdgpu_ring *ring, uint32_t device_select, uint32_t exec_count) argument
484 vpe_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job, struct amdgpu_ib *ib, uint32_t flags) argument
503 vpe_ring_emit_fence(struct amdgpu_ring *ring, uint64_t addr, uint64_t seq, unsigned int flags) argument
527 vpe_ring_emit_pipeline_sync(struct amdgpu_ring *ring) argument
547 vpe_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val) argument
556 vpe_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, uint32_t val, uint32_t mask) argument
573 vpe_ring_emit_vm_flush(struct amdgpu_ring *ring, unsigned int vmid, uint64_t pd_addr) argument
579 vpe_ring_init_cond_exec(struct amdgpu_ring *ring, uint64_t addr) argument
594 vpe_ring_preempt_ib(struct amdgpu_ring *ring) argument
662 vpe_ring_get_rptr(struct amdgpu_ring *ring) argument
681 vpe_ring_get_wptr(struct amdgpu_ring *ring) argument
700 vpe_ring_set_wptr(struct amdgpu_ring *ring) argument
734 vpe_ring_test_ring(struct amdgpu_ring *ring) argument
776 vpe_ring_test_ib(struct amdgpu_ring *ring, long timeout) argument
830 vpe_ring_begin_use(struct amdgpu_ring *ring) argument
855 vpe_ring_end_use(struct amdgpu_ring *ring) argument
[all...]
H A Djpeg_v2_0.h48 void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
49 void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
50 void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
52 void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_job *job,
54 void jpeg_v2_0_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
56 void jpeg_v2_0_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
58 void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
59 void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
H A Djpeg_v4_0_3.h51 void jpeg_v4_0_3_dec_ring_emit_ib(struct amdgpu_ring *ring,
55 void jpeg_v4_0_3_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
57 void jpeg_v4_0_3_dec_ring_emit_vm_flush(struct amdgpu_ring *ring,
59 void jpeg_v4_0_3_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count);
60 void jpeg_v4_0_3_dec_ring_insert_start(struct amdgpu_ring *ring);
61 void jpeg_v4_0_3_dec_ring_insert_end(struct amdgpu_ring *ring);
62 void jpeg_v4_0_3_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val);
63 void jpeg_v4_0_3_dec_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg,
/linux-master/tools/virtio/ringtest/
H A Dvirtio_ring_0_9.c22 struct vring ring; variable in typeref:struct:vring
24 /* enabling the below activates experimental ring polling code
26 * high bits of ring id ^ 0x8000).
30 * (which skips ring updates and reads and writes len in descriptor).
63 /* implemented by ring */
72 perror("Unable to allocate ring buffer.\n");
76 vring_init(&ring, ring_size, p, 0x1000);
86 ring.desc[i].next = i + 1;
117 desc = ring.desc;
136 ring
[all...]
/linux-master/drivers/gpu/drm/radeon/
H A Duvd_v2_2.c37 * Write a fence and a trap command to the ring.
42 struct radeon_ring *ring = &rdev->ring[fence->ring]; local
43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0));
46 radeon_ring_write(ring, fence->seq);
47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0));
48 radeon_ring_write(ring, lower_32_bits(addr));
49 radeon_ring_write(ring, PACKET
72 uvd_v2_2_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
[all...]
H A Dradeon_trace.h34 __field(u32, ring)
40 __entry->ring = p->ring;
43 p->rdev, p->ring);
45 TP_printk("ring=%u, dw=%u, fences=%u",
46 __entry->ring, __entry->dw,
51 TP_PROTO(unsigned vmid, int ring),
52 TP_ARGS(vmid, ring),
55 __field(u32, ring)
60 __entry->ring
[all...]
H A Dradeon_ib.c38 * command ring and the hw will fetch the commands from the IB
41 * put in IBs for execution by the requested ring.
49 * @ring: ring index the IB is associated with
58 int radeon_ib_get(struct radeon_device *rdev, int ring, argument
72 ib->ring = ring;
105 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
112 * Schedule an IB on the associated ring (all asics).
115 * On SI, there are two parallel engines fed from the primary ring,
128 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
262 struct radeon_ring *ring = &rdev->ring[i]; local
[all...]
H A Dcik_sdma.c42 * and each one supports 1 ring buffer used for gfx
46 * (ring buffer, IBs, etc.), but sDMA has it's own
58 * @ring: radeon ring pointer
63 struct radeon_ring *ring)
68 rptr = rdev->wb.wb[ring->rptr_offs/4];
70 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
85 * @ring: radeon ring pointer
90 struct radeon_ring *ring)
62 cik_sdma_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
89 cik_sdma_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
110 cik_sdma_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
135 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
171 struct radeon_ring *ring = &rdev->ring[ridx]; local
202 struct radeon_ring *ring = &rdev->ring[fence->ring]; local
227 cik_sdma_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
366 struct radeon_ring *ring; local
586 struct radeon_ring *ring = &rdev->ring[ring_index]; local
644 cik_sdma_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
701 cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
774 cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
944 cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, unsigned vm_id, uint64_t pd_addr) argument
[all...]
H A Dsi_dma.c35 * @ring: radeon_ring structure holding ring information
40 bool si_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
45 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
51 radeon_ring_lockup_update(rdev, ring);
54 return radeon_ring_test_lockup(rdev, ring);
186 void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, argument
190 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
192 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
194 radeon_ring_write(ring, (
238 struct radeon_ring *ring = &rdev->ring[ring_index]; local
[all...]
H A Dradeon_semaphore.c61 struct radeon_ring *ring = &rdev->ring[ridx]; local
65 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) {
69 ring->last_semaphore_signal_addr = semaphore->gpu_addr;
78 struct radeon_ring *ring = &rdev->ring[ridx]; local
82 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) {
86 ring->last_semaphore_wait_addr = semaphore->gpu_addr;
/linux-master/drivers/crypto/intel/qat/qat_common/
H A Dadf_transport_debug.c15 struct adf_etr_ring_data *ring = sfile->private; local
21 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
22 ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
25 return ring->base_addr +
26 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
31 struct adf_etr_ring_data *ring = sfile->private; local
33 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
34 ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
37 return ring->base_addr +
38 (ADF_MSG_SIZE_TO_BYTES(ring
43 struct adf_etr_ring_data *ring = sfile->private; local
90 adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) argument
110 adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) argument
155 struct adf_etr_ring_data *ring = &bank->rings[ring_id]; local
[all...]
/linux-master/drivers/net/can/spi/mcp251xfd/
H A Dmcp251xfd-rx.c21 const struct mcp251xfd_rx_ring *ring,
27 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr),
40 const struct mcp251xfd_rx_ring *ring,
46 err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr),
51 fifo_ua -= ring->base - MCP251XFD_RAM_START;
52 *rx_tail = fifo_ua / ring->obj_size;
59 const struct mcp251xfd_rx_ring *ring)
67 err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip);
71 rx_tail = mcp251xfd_get_rx_tail(ring);
84 struct mcp251xfd_rx_ring *ring)
20 mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, u8 *rx_head, bool *fifo_empty) argument
39 mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, u8 *rx_tail) argument
58 mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring) argument
83 mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) argument
156 mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring, const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) argument
184 mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, const u8 offset, const u8 len) argument
201 mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) argument
253 struct mcp251xfd_rx_ring *ring; local
[all...]
/linux-master/drivers/bus/mhi/ep/
H A DMakefile2 mhi_ep-y := main.o mmio.o ring.o sm.o
/linux-master/drivers/net/ethernet/intel/i40e/
H A Di40e_trace.h107 /* Events related to a vsi & ring */
111 TP_PROTO(struct i40e_ring *ring,
115 TP_ARGS(ring, desc, buf),
125 __field(void*, ring)
128 __string(devname, ring->netdev->name)
132 __entry->ring = ring;
135 __assign_str(devname, ring->netdev->name);
139 "netdev: %s ring: %p desc: %p buf %p",
140 __get_str(devname), __entry->ring,
[all...]
/linux-master/tools/testing/selftests/net/
H A Dpsock_tpacket.c66 struct ring { struct
71 void (*walk)(int sock, struct ring *ring);
220 static void walk_v1_v2_rx(int sock, struct ring *ring) argument
227 bug_on(ring->type != PACKET_RX_RING);
239 while (__v1_v2_rx_kernel_ready(ring->rd[frame_num].iov_base,
240 ring->version)) {
241 ppd.raw = ring->rd[frame_num].iov_base;
243 switch (ring
354 get_next_frame(struct ring *ring, int n) argument
369 walk_tx(int sock, struct ring *ring) argument
500 walk_v1_v2(int sock, struct ring *ring) argument
581 walk_v3_rx(int sock, struct ring *ring) argument
622 walk_v3(int sock, struct ring *ring) argument
630 __v1_v2_fill(struct ring *ring, unsigned int blocks) argument
646 __v3_fill(struct ring *ring, unsigned int blocks, int type) argument
667 setup_ring(int sock, struct ring *ring, int version, int type) argument
708 mmap_ring(int sock, struct ring *ring) argument
726 bind_ring(int sock, struct ring *ring) argument
746 walk_ring(int sock, struct ring *ring) argument
751 unmap_ring(int sock, struct ring *ring) argument
805 struct ring ring; local
[all...]
/linux-master/drivers/net/ethernet/amd/xgbe/
H A Dxgbe-desc.c123 struct xgbe_ring *ring)
128 if (!ring)
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
133 rdata = XGBE_GET_DESC_DATA(ring, i);
137 kfree(ring->rdata);
138 ring->rdata = NULL;
141 if (ring->rx_hdr_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_hdr_pa.pages_dma,
143 ring
122 xgbe_free_ring(struct xgbe_prv_data *pdata, struct xgbe_ring *ring) argument
215 xgbe_init_ring(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, unsigned int rdesc_count) argument
356 xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, struct xgbe_ring *ring, struct xgbe_ring_data *rdata) argument
390 struct xgbe_ring *ring; local
431 struct xgbe_ring *ring; local
526 struct xgbe_ring *ring = channel->tx_ring; local
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns3/
H A Dhns3_trace.h68 TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
69 TP_ARGS(ring, cur_ntu),
77 __string(devname, ring->tqp->handle->kinfo.netdev->name)
81 __entry->index = ring->tqp->tqp_index;
82 __entry->ntu = ring->next_to_use;
83 __entry->ntc = ring->next_to_clean;
84 __entry->desc_dma = ring->desc_dma_addr,
85 memcpy(__entry->desc, &ring->desc[cur_ntu],
87 __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
99 TP_PROTO(struct hns3_enet_ring *ring),
[all...]
/linux-master/drivers/block/xen-blkback/
H A Dblkback.c73 * to fill the ring, but since this might become too high, specially with
109 * Maximum order of pages to be used for the shared ring between front and
114 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
143 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags);
144 static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
147 static void make_response(struct xen_blkif_ring *ring, u64 id,
168 static int add_persistent_gnt(struct xen_blkif_ring *ring, argument
173 struct xen_blkif *blkif = ring->blkif;
175 if (ring->persistent_gnt_c >= max_pgrants) {
181 new = &ring
205 get_persistent_gnt(struct xen_blkif_ring *ring, grant_ref_t gref) argument
232 put_persistent_gnt(struct xen_blkif_ring *ring, struct persistent_gnt *persistent_gnt) argument
242 free_persistent_gnts(struct xen_blkif_ring *ring) argument
296 struct xen_blkif_ring *ring = container_of(work, typeof(*ring), persistent_purge_work); local
332 purge_persistent_gnt(struct xen_blkif_ring *ring) argument
414 alloc_req(struct xen_blkif_ring *ring) argument
433 free_req(struct xen_blkif_ring *ring, struct pending_req *req) argument
523 blkif_notify_work(struct xen_blkif_ring *ring) argument
539 print_stats(struct xen_blkif_ring *ring) argument
556 struct xen_blkif_ring *ring = arg; local
636 xen_blkbk_free_caches(struct xen_blkif_ring *ring) argument
645 xen_blkbk_unmap_prepare( struct xen_blkif_ring *ring, struct grant_page **pages, unsigned int num, struct gnttab_unmap_grant_ref *unmap_ops, struct page **unmap_pages) argument
674 struct xen_blkif_ring *ring = pending_req->ring; local
706 struct xen_blkif_ring *ring = req->ring; local
731 xen_blkbk_unmap(struct xen_blkif_ring *ring, struct grant_page *pages[], int num) argument
756 xen_blkbk_map(struct xen_blkif_ring *ring, struct grant_page *pages[], int num, bool ro) argument
919 struct xen_blkif_ring *ring = pending_req->ring; local
966 dispatch_discard_io(struct xen_blkif_ring *ring, struct blkif_request *req) argument
1010 dispatch_other_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) argument
1020 xen_blk_drain_io(struct xen_blkif_ring *ring) argument
1187 __do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) argument
1271 do_block_io_op(struct xen_blkif_ring *ring, unsigned int *eoi_flags) argument
1290 dispatch_rw_block_io(struct xen_blkif_ring *ring, struct blkif_request *req, struct pending_req *pending_req) argument
1489 make_response(struct xen_blkif_ring *ring, u64 id, unsigned short op, int st) argument
[all...]
/linux-master/include/linux/
H A Dkvm_dirty_ring.h7 * kvm_dirty_ring: KVM internal dirty ring structure
16 * limit, vcpu that owns this ring should exit to userspace
19 * @index: index of this dirty ring
45 static inline int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, argument
52 struct kvm_dirty_ring *ring)
62 static inline struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, argument
68 static inline void kvm_dirty_ring_free(struct kvm_dirty_ring *ring) argument
78 int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size);
84 int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
95 struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u3
51 kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring) argument
[all...]

Completed in 210 milliseconds

1234567891011>>