Searched refs:ring (Results 51 - 75 of 807) sorted by last modified time

1234567891011>>

/linux-master/drivers/net/ethernet/wangxun/ngbe/
H A Dngbe_main.c123 /* set default ring sizes */
225 if (q_vector->tx.ring && q_vector->rx.ring)
H A Dngbe_ethtool.c48 struct ethtool_ringparam *ring,
57 new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD);
60 new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD);
47 ngbe_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) argument
/linux-master/drivers/net/ethernet/wangxun/libwx/
H A Dwx_type.h143 /* ring assignment */
338 #define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */
867 /* iterator for handling rings in ring container */
869 for (posm = (headm).ring; posm; posm = posm->next)
872 struct wx_ring *ring; /* pointer to linked list of rings */ member in struct:wx_ring_container
876 u8 itr; /* current ITR setting for ring */
879 struct wx_ring *next; /* pointer to next ring in q_vector */
881 struct net_device *netdev; /* netdev ring belongs to */
884 void *desc; /* descriptor ring memory */
890 dma_addr_t dma; /* phys. address of descriptor ring */
[all...]
H A Dwx_lib.h11 u16 wx_desc_unused(struct wx_ring *ring);
H A Dwx_hw.c1292 struct wx_ring *ring = wx->rx_ring[i]; local
1294 j = ring->reg_idx;
1419 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring) argument
1421 u8 reg_idx = ring->reg_idx;
1442 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring) argument
1444 u8 reg_idx = ring->reg_idx;
1479 struct wx_ring *ring)
1482 u8 reg_idx = ring->reg_idx;
1483 u64 tdba = ring->dma;
1496 ring
1478 wx_configure_tx_ring(struct wx *wx, struct wx_ring *ring) argument
1516 wx_configure_rx_ring(struct wx *wx, struct wx_ring *ring) argument
2181 wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring) argument
2192 wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring) argument
[all...]
/linux-master/drivers/net/ethernet/marvell/octeon_ep_vf/
H A Doctep_vf_mbox.c25 int ring = 0; local
33 oct->hw_ops.setup_mbox_regs(oct, ring);
/linux-master/drivers/net/ethernet/marvell/octeon_ep/
H A Doctep_pfvf_mbox.c243 int ring = 0; local
249 ring = rings_per_vf * i;
250 oct->mbox[ring] = vzalloc(sizeof(*oct->mbox[ring]));
252 if (!oct->mbox[ring])
255 memset(oct->mbox[ring], 0, sizeof(struct octep_mbox));
257 mutex_init(&oct->mbox[ring]->lock);
258 INIT_WORK(&oct->mbox[ring]->wk.work, octep_pfvf_mbox_work);
259 oct->mbox[ring]->wk.ctxptr = oct->mbox[ring];
282 int i = 0, ring = 0, vf_srn = 0; local
[all...]
/linux-master/drivers/net/ethernet/hisilicon/hns3/
H A Dhns3_enet.c428 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
434 } else if (tqp_vectors->rx_group.ring) {
439 } else if (tqp_vectors->tx_group.ring) {
751 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; local
753 while (ring) {
758 ring->tqp->tqp_index);
763 ring = ring
962 hns3_tx_spare_space(struct hns3_enet_ring *ring) argument
982 hns3_tx_spare_update(struct hns3_enet_ring *ring) argument
997 hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 space) argument
1015 hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 space) argument
1032 hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) argument
1089 hns3_tx_spare_alloc(struct hns3_enet_ring *ring, unsigned int size, dma_addr_t *dma, u32 *cb_len) argument
1116 hns3_tx_spare_rollback(struct hns3_enet_ring *ring, u32 len) argument
1128 hns3_tx_spare_reclaim_cb(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) argument
1579 hns3_handle_vlan_info(struct hns3_enet_ring *ring, struct sk_buff *skb, struct hns3_desc_param *param) argument
1604 hns3_handle_csum_partial(struct hns3_enet_ring *ring, struct sk_buff *skb, struct hns3_desc_cb *desc_cb, struct hns3_desc_param *param) argument
1648 hns3_fill_skb_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, struct hns3_desc *desc, struct hns3_desc_cb *desc_cb) argument
1680 hns3_fill_desc(struct hns3_enet_ring *ring, dma_addr_t dma, unsigned int size) argument
1723 hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, unsigned int type) argument
1893 hns3_skb_linearize(struct hns3_enet_ring *ring, struct sk_buff *skb, unsigned int bd_num) argument
1922 hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, struct net_device *netdev, struct sk_buff *skb) argument
1970 hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) argument
2010 hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, unsigned int type) argument
2044 hns3_tx_push_bd(struct hns3_enet_ring *ring, int num) argument
2073 hns3_tx_mem_doorbell(struct hns3_enet_ring *ring) argument
2091 hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, bool doorbell) argument
2144 hns3_handle_tx_bounce(struct hns3_enet_ring *ring, struct sk_buff *skb) argument
2197 hns3_handle_tx_sgl(struct hns3_enet_ring *ring, struct sk_buff *skb) argument
2249 hns3_handle_desc_filling(struct hns3_enet_ring *ring, struct sk_buff *skb) argument
2269 hns3_handle_skb_desc(struct hns3_enet_ring *ring, struct sk_buff *skb, struct hns3_desc_cb *desc_cb, int next_to_use_head) argument
2297 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; local
2490 hns3_fetch_stats(struct rtnl_link_stats64 *stats, struct hns3_enet_ring *ring, bool is_tx) argument
2540 struct hns3_enet_ring *ring; local
3363 hns3_alloc_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) argument
3400 hns3_free_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb, int budget) argument
3416 hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) argument
3427 hns3_unmap_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) argument
3441 hns3_buffer_detach(struct hns3_enet_ring *ring, int i) argument
3448 hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i, int budget) argument
3460 hns3_free_buffers(struct hns3_enet_ring *ring) argument
3469 hns3_free_desc(struct hns3_enet_ring *ring) argument
3482 hns3_alloc_desc(struct hns3_enet_ring *ring) argument
3494 hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb) argument
3515 hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) argument
3530 hns3_alloc_ring_buffers(struct hns3_enet_ring *ring) argument
3549 hns3_replace_buffer(struct hns3_enet_ring *ring, int i, struct hns3_desc_cb *res_cb) argument
3560 hns3_reuse_buffer(struct hns3_enet_ring *ring, int i) argument
3574 hns3_nic_reclaim_desc(struct hns3_enet_ring *ring, int *bytes, int *pkts, int budget) argument
3626 hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget) argument
3664 hns3_desc_unused(struct hns3_enet_ring *ring) argument
3676 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count) argument
3719 hns3_handle_rx_copybreak(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) argument
3747 hns3_nic_reuse_page(struct sk_buff *skb, int i, struct hns3_enet_ring *ring, int pull_len, struct hns3_desc_cb *desc_cb) argument
3869 hns3_checksum_complete(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 ptype, u16 csum) argument
3919 hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 l234info, u32 bd_base_info, u32 ol_info, u16 csum) argument
3956 hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) argument
3964 hns3_parse_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, u32 l234info, u16 *vlan_tag) argument
4017 hns3_rx_ring_move_fw(struct hns3_enet_ring *ring) argument
4028 hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, unsigned char *va) argument
4081 hns3_add_frag(struct hns3_enet_ring *ring) argument
4142 hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 l234info, u32 bd_base_info, u32 ol_info, u16 csum) argument
4184 hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, struct sk_buff *skb, u32 rss_hash, u32 l234info, u32 ol_info) argument
4232 hns3_handle_rx_vlan_tag(struct hns3_enet_ring *ring, struct hns3_desc *desc, struct sk_buff *skb, u32 l234info) argument
4251 hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) argument
4321 hns3_handle_rx_bd(struct hns3_enet_ring *ring) argument
4393 hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *)) argument
4467 struct hns3_enet_ring *ring; local
4524 struct hns3_enet_ring *ring; local
4596 hns3_add_ring_to_group(struct hns3_enet_ring_group *group, struct hns3_enet_ring *ring) argument
4872 struct hns3_enet_ring *ring; local
4936 hns3_alloc_page_pool(struct hns3_enet_ring *ring) argument
4958 hns3_alloc_ring_memory(struct hns3_enet_ring *ring) argument
4998 hns3_fini_ring(struct hns3_enet_ring *ring) argument
5051 hns3_init_ring_hw(struct hns3_enet_ring *ring) argument
5478 hns3_clear_tx_ring(struct hns3_enet_ring *ring) argument
5489 hns3_clear_rx_ring(struct hns3_enet_ring *ring) argument
5526 hns3_force_clear_rx_ring(struct hns3_enet_ring *ring) argument
5550 struct hns3_enet_ring *ring; local
[all...]
/linux-master/drivers/infiniband/hw/hns/
H A Dhns_roce_hw_v2.c1146 struct hns_roce_v2_cmq_ring *ring)
1148 int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
1150 ring->desc = dma_alloc_coherent(hr_dev->dev, size,
1151 &ring->desc_dma_addr, GFP_KERNEL);
1152 if (!ring->desc)
1159 struct hns_roce_v2_cmq_ring *ring)
1162 ring->desc_num * sizeof(struct hns_roce_cmq_desc),
1163 ring->desc, ring->desc_dma_addr);
1165 ring
1145 hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev, struct hns_roce_v2_cmq_ring *ring) argument
1158 hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, struct hns_roce_v2_cmq_ring *ring) argument
[all...]
/linux-master/include/trace/events/
H A Dkvm.h379 TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
380 TP_ARGS(ring, slot, offset),
391 __entry->index = ring->index;
392 __entry->dirty_index = ring->dirty_index;
393 __entry->reset_index = ring->reset_index;
398 TP_printk("ring %d: dirty 0x%x reset 0x%x "
406 TP_PROTO(struct kvm_dirty_ring *ring),
407 TP_ARGS(ring),
416 __entry->index = ring->index;
417 __entry->dirty_index = ring
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_lrc_types.h14 * struct xe_lrc - Logical ring context (LRC) and submission ring object
18 * @bo: buffer object (memory) for logical ring context, per process HW
19 * status page, and submission ring.
29 /** @ring: submission ring state */
31 /** @ring.size: size of submission ring */
33 /** @ring.tail: tail of submission ring */
37 } ring; member in struct:xe_lrc
[all...]
H A Dxe_lrc.c599 return lrc->ring.size;
660 DECL_MAP_ADDR_HELPERS(ring)
752 lrc->ring.size = ring_size;
753 lrc->ring.tail = 0;
791 xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
793 RING_CTL_SIZE(lrc->ring.size) | RING_VALID);
850 const u32 tail = lrc->ring.tail;
851 const u32 size = lrc->ring.size;
856 static void __xe_lrc_write_ring(struct xe_lrc *lrc, struct iosys_map ring, argument
861 iosys_map_incr(&ring, lr
869 struct iosys_map ring; local
[all...]
H A Dxe_guc_submit.c622 FIELD_PREP(WQ_RING_TAIL_MASK, q->lrc->ring.tail / sizeof(u64));
627 wqi[i++] = lrc->ring.tail / sizeof(u64);
661 xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
1209 q->lrc[0].ring.size / MAX_JOB_SIZE_BYTES, 64,
1493 xe_lrc_set_ring_head(q->lrc + i, q->lrc[i].ring.tail);
H A Dxe_exec_queue.c727 * xe_exec_queue_ring_full() - Whether an exec_queue's ring is full
730 * Return: True if the exec_queue's ring is full, false otherwise.
735 s32 max_job = lrc->ring.size / MAX_JOB_SIZE_BYTES;
/linux-master/drivers/gpu/drm/radeon/
H A Drv515.c51 void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) argument
55 r = radeon_ring_lock(rdev, ring, 64);
59 radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
60 radeon_ring_write(ring,
65 radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
66 radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
67 radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
68 radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
69 radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
70 radeon_ring_write(ring,
[all...]
H A Dradeon_ttm.c102 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
171 if (!rdev->ring[ridx].ready) {
172 DRM_ERROR("Trying to move memory with ring turned off.\n");
226 if (rdev->ring[radeon_copy_ring_index(rdev)].ready &&
H A Dradeon_ring.c39 * Most engines on the GPU are fed via ring buffers. Ring
45 * pointers are equal, the ring is idle. When the host
46 * writes commands to the ring buffer, it increments the
50 static void radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
53 * radeon_ring_supports_scratch_reg - check if the ring supports
57 * @ring: radeon_ring structure holding ring information
59 * Check if a specific ring supports writing to scratch registers (all asics).
60 * Returns true if the ring supports writing to scratch regs, false if not.
63 struct radeon_ring *ring)
62 radeon_ring_supports_scratch_reg(struct radeon_device *rdev, struct radeon_ring *ring) argument
83 radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring) argument
109 radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) argument
145 radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw) argument
169 radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring, bool hdp_flush) argument
200 radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring, bool hdp_flush) argument
214 radeon_ring_undo(struct radeon_ring *ring) argument
227 radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring) argument
241 radeon_ring_lockup_update(struct radeon_device *rdev, struct radeon_ring *ring) argument
254 radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
285 radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, uint32_t **data) argument
349 radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, unsigned size, uint32_t *data) argument
383 radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size, unsigned rptr_offs, u32 nop) argument
439 radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring) argument
469 struct radeon_ring *ring = m->private; local
549 radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring) argument
[all...]
H A Dradeon_pm.c268 struct radeon_ring *ring = &rdev->ring[i]; local
269 if (!ring->ready) {
1150 struct radeon_ring *ring = &rdev->ring[i]; local
1151 if (ring->ready)
1232 /* XXX select vce level based on ring/task */
1867 struct radeon_ring *ring = &rdev->ring[i]; local
1869 if (ring
[all...]
H A Dradeon_ib.c40 * command ring and the hw will fetch the commands from the IB
43 * put in IBs for execution by the requested ring.
51 * @ring: ring index the IB is associated with
60 int radeon_ib_get(struct radeon_device *rdev, int ring, argument
74 ib->ring = ring;
107 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
114 * Schedule an IB on the associated ring (all asics).
117 * On SI, there are two parallel engines fed from the primary ring,
130 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
264 struct radeon_ring *ring = &rdev->ring[i]; local
[all...]
H A Dradeon_fence.c52 * are no longer in use by the associated ring on the GPU and
63 * @ring: ring index the fence is associated with
67 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring) argument
69 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
83 * @ring: ring index the fence is associated with
88 static u32 radeon_fence_read(struct radeon_device *rdev, int ring) argument
90 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
108 * @ring
112 radeon_fence_schedule_check(struct radeon_device *rdev, int ring) argument
133 radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring) argument
197 radeon_fence_activity(struct radeon_device *rdev, int ring) argument
271 int ring; local
319 radeon_fence_process(struct radeon_device *rdev, int ring) argument
339 radeon_fence_seq_signaled(struct radeon_device *rdev, u64 seq, unsigned int ring) argument
357 unsigned int ring = fence->ring; local
629 radeon_fence_wait_next(struct radeon_device *rdev, int ring) argument
659 radeon_fence_wait_empty(struct radeon_device *rdev, int ring) argument
719 radeon_fence_count_emitted(struct radeon_device *rdev, int ring) argument
808 radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) argument
858 radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring) argument
887 int ring; local
906 int ring, r; local
934 radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring) argument
[all...]
H A Dradeon.h137 /* internal ring indices */
138 /* r1xx+ has gfx CP ring */
145 /* R600+ has an async dma ring */
147 /* cayman add a second async dma ring */
367 /* sync_seq is protected by ring emission lock */
380 unsigned ring; member in struct:radeon_fence
386 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
389 void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
390 int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
391 void radeon_fence_process(struct radeon_device *rdev, int ring);
785 int ring; member in struct:radeon_ib
795 volatile uint32_t *ring; member in struct:radeon_ring
931 volatile uint32_t *ring; member in struct:r600_ih
1044 u32 ring; member in struct:radeon_cs_parser
1879 const struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; member in struct:radeon_asic
2370 struct radeon_ring ring[RADEON_NUM_RINGS]; member in struct:radeon_device
2674 radeon_ring_write(struct radeon_ring *ring, uint32_t v) argument
[all...]
H A Dr600.c1916 * @ring: radeon_ring structure holding ring information
1921 bool r600_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
1928 radeon_ring_lockup_update(rdev, ring);
1931 return radeon_ring_test_lockup(rdev, ring);
2429 rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
2619 struct radeon_ring *ring)
2624 rptr = rdev->wb.wb[ring->rptr_offs/4];
2632 struct radeon_ring *ring)
2638 struct radeon_ring *ring)
2618 r600_gfx_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
2631 r600_gfx_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
2637 r600_gfx_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
2688 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
2718 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
2780 r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size) argument
2802 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
2823 r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
2870 struct radeon_ring *ring = &rdev->ring[fence->ring]; local
2926 r600_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
2972 struct radeon_ring *ring = &rdev->ring[ring_index]; local
3089 struct radeon_ring *ring; local
3110 struct radeon_ring *ring; local
3368 struct radeon_ring *ring = &rdev->ring[ib->ring]; local
3396 r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
[all...]
H A Dr420.c210 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
219 r = radeon_ring_lock(rdev, ring, 8);
221 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
222 radeon_ring_write(ring, rdev->config.r300.resync_scratch);
223 radeon_ring_write(ring, 0xDEADBEEF);
224 radeon_ring_unlock_commit(rdev, ring, false);
230 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
235 r = radeon_ring_lock(rdev, ring,
[all...]
H A Dr300.c52 * However, scheduling such write to the ring seems harmless, i suspect
215 struct radeon_ring *ring = &rdev->ring[fence->ring]; local
220 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
221 radeon_ring_write(ring, 0);
222 radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
223 radeon_ring_write(ring, 0);
225 radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
226 radeon_ring_write(ring, R300_RB3D_DC_FLUS
246 r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) argument
[all...]
H A Dr100.c854 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
856 * @ring: ring buffer struct for emitting packets
858 static void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring) argument
860 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
861 radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
863 radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
864 radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
872 struct radeon_ring *ring = &rdev->ring[fenc local
891 r100_semaphore_ring_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
907 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
993 r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) argument
1073 r100_gfx_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
1086 r100_gfx_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
1092 r100_gfx_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument
1123 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
2527 r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument
2953 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
3649 r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
3691 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local
3704 r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring) argument
[all...]

Completed in 276 milliseconds

1234567891011>>