Lines Matching refs:ring

47  * are no longer in use by the associated ring on the GPU and
58 * @ring: ring index the fence is associated with
62 static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
64 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
76 * @ring: ring index the fence is associated with
81 static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
83 struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
95 * radeon_fence_emit - emit a fence on the requested ring
99 * @ring: ring index the fence is associated with
101 * Emits a fence command on the requested ring (all asics).
106 int ring)
108 /* we are protected by the ring emission mutex */
115 (*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
116 (*fence)->ring = ring;
117 radeon_fence_ring_emit(rdev, ring, *fence);
118 CTR2(KTR_DRM, "radeon fence: emit (ring=%d, seq=%d)", ring, (*fence)->seq);
126 * @ring: ring index the fence is associated with
131 void radeon_fence_process(struct radeon_device *rdev, int ring)
158 last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
160 last_emitted = rdev->fence_drv[ring].sync_seq[ring];
161 seq = radeon_fence_read(rdev, ring);
185 } while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
188 rdev->fence_drv[ring].last_activity = jiffies;
211 * @ring: ring index the fence is associated with
221 u64 seq, unsigned ring)
223 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
227 radeon_fence_process(rdev, ring);
228 if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
250 if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
262 * @ring: ring index the fence is associated with
264 * @lock_ring: whether the ring should be locked or not
271 * -EDEADLK is returned when a GPU lockup has been detected and the ring is
276 unsigned ring, bool intr, bool lock_ring)
284 while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
285 if (!rdev->ring[ring].ready) {
290 if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
292 timeout = rdev->fence_drv[ring].last_activity - timeout;
299 seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
301 last_activity = rdev->fence_drv[ring].last_activity;
303 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, seq=%d)",
304 ring, seq);
306 radeon_irq_kms_sw_irq_get(rdev, ring);
310 target_seq, ring))) {
330 rdev, target_seq, ring);
338 radeon_irq_kms_sw_irq_put(rdev, ring);
342 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, seq=%d)",
343 ring, seq);
355 if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
364 if (last_activity != rdev->fence_drv[ring].last_activity) {
371 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
381 /* mark the ring as not ready any more */
382 rdev->ring[ring].ready = false;
418 fence->ring, intr, true);
439 * radeon_fence_wait_any_seq - wait for a sequence number on any ring
445 * Wait for the requested sequence number(s) to be written by any ring
446 * (all asics). Sequnce number array is indexed by ring id.
456 unsigned i, ring = RADEON_NUM_RINGS;
470 /* For lockup detection just pick the lowest ring we are
473 if (i < ring) {
474 ring = i;
479 if (ring == RADEON_NUM_RINGS) {
495 CTR2(KTR_DRM, "radeon fence: wait begin (ring=%d, target_seq=%d)",
496 ring, target_seq[ring]);
541 CTR2(KTR_DRM, "radeon fence: wait end (ring=%d, target_seq=%d)",
542 ring, target_seq[ring]);
566 if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
569 (uintmax_t)target_seq[ring]);
576 /* mark the ring as not ready any more */
577 rdev->ring[ring].ready = false;
588 * radeon_fence_wait_any - wait for a fence to signal on any ring
595 * array is indexed by ring id. @intr selects whether to use
634 * @ring: ring index the fence is associated with
636 * Wait for the next fence on the requested ring to signal (all asics).
638 * Caller must hold ring lock.
640 int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
644 seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
645 if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
650 return radeon_fence_wait_seq(rdev, seq, ring, false, false);
657 * @ring: ring index the fence is associated with
659 * Wait for all fences on the requested ring to signal (all asics).
661 * Caller must hold ring lock.
663 int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
665 uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
668 r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
673 dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
674 ring, r);
716 * @ring: ring index the fence is associated with
718 * Get the number of fences emitted on the requested ring (all asics).
719 * Returns the number of emitted fences on the ring. Used by the
720 * dynpm code to ring track activity.
722 unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
726 /* We are not protected by ring lock when reading the last sequence
729 radeon_fence_process(rdev, ring);
730 emitted = rdev->fence_drv[ring].sync_seq[ring]
731 - atomic64_read(&rdev->fence_drv[ring].last_seq);
743 * @dst_ring: which ring to check against
745 * Check if the fence needs to be synced against another ring
747 * Returns true if we need to sync with another ring, false if
758 if (fence->ring == dst_ring) {
762 /* we are protected by the ring mutex */
764 if (fence->seq <= fdrv->sync_seq[fence->ring]) {
775 * @dst_ring: which ring to check against
778 * be synced with the requested ring (all asics).
789 if (fence->ring == dst_ring) {
793 /* we are protected by the ring mutex */
794 src = &fence->rdev->fence_drv[fence->ring];
806 * ready for use on the requested ring.
809 * @ring: ring index to start the fence driver on
816 int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
821 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
822 if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
823 rdev->fence_drv[ring].scratch_reg = 0;
824 index = R600_WB_EVENT_OFFSET + ring * 4;
826 r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
832 rdev->fence_drv[ring].scratch_reg -
835 rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
836 rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
837 radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
838 rdev->fence_drv[ring].initialized = true;
839 dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016jx and cpu addr 0x%p\n",
840 ring, (uintmax_t)rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
846 * for the requested ring.
849 * @ring: ring index to start the fence driver on
851 * Init the fence driver for the requested ring (all asics).
854 static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
858 rdev->fence_drv[ring].scratch_reg = -1;
859 rdev->fence_drv[ring].cpu_addr = NULL;
860 rdev->fence_drv[ring].gpu_addr = 0;
862 rdev->fence_drv[ring].sync_seq[i] = 0;
863 atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
864 rdev->fence_drv[ring].last_activity = jiffies;
865 rdev->fence_drv[ring].initialized = false;
882 int ring;
887 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
888 radeon_fence_driver_init_ring(rdev, ring);
906 int ring, r;
909 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
910 if (!rdev->fence_drv[ring].initialized)
912 r = radeon_fence_wait_empty_locked(rdev, ring);
918 radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
919 rdev->fence_drv[ring].initialized = false;
935 int ring;
937 for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
938 if (!rdev->fence_drv[ring].initialized)
940 radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
960 seq_printf(m, "--- ring %d ---\n", i);
968 seq_printf(m, "Last sync to ring %d 0x%016llx\n",