Lines Matching refs:ring

48  * are no longer in use by the associated ring on the GPU and
56 struct amdgpu_ring *ring;
94 * @ring: ring the fence is associated with
99 static void amdgpu_fence_write(struct amdgpu_ring *ring, u32 seq)
101 struct amdgpu_fence_driver *drv = &ring->fence_drv;
110 * @ring: ring the fence is associated with
115 static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
117 struct amdgpu_fence_driver *drv = &ring->fence_drv;
129 * amdgpu_fence_emit - emit a fence on the requested ring
131 * @ring: ring the fence is associated with
136 * Emits a fence command on the requested ring (all asics).
139 int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
142 struct amdgpu_device *adev = ring->adev;
155 am_fence->ring = ring;
161 seq = ++ring->fence_drv.sync_seq;
170 &ring->fence_drv.lock,
171 adev->fence_context + ring->idx, seq);
176 &ring->fence_drv.lock,
177 adev->fence_context + ring->idx, seq);
181 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
185 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
204 * emitting the fence would mess up the hardware ring buffer.
214 * amdgpu_fence_emit_polling - emit a fence on the requeste ring
216 * @ring: ring the fence is associated with
220 * Emits a fence command on the requested ring (all asics).
224 int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s,
233 seq = ++ring->fence_drv.sync_seq;
234 r = amdgpu_fence_wait_polling(ring,
235 seq - ring->fence_drv.num_fences_mask,
240 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
251 * @ring: pointer to struct amdgpu_ring
255 static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
257 mod_timer(&ring->fence_drv.fallback_timer,
264 * @ring: pointer to struct amdgpu_ring
272 bool amdgpu_fence_process(struct amdgpu_ring *ring)
274 struct amdgpu_fence_driver *drv = &ring->fence_drv;
275 struct amdgpu_device *adev = ring->adev;
279 last_seq = atomic_read(&ring->fence_drv.last_seq);
280 seq = amdgpu_fence_read(ring);
284 if (del_timer(&ring->fence_drv.fallback_timer) &&
285 seq != ring->fence_drv.sync_seq)
286 amdgpu_fence_schedule_fallback(ring);
321 * @t: timer context used to obtain the pointer to ring structure
327 struct amdgpu_ring *ring = from_timer(ring, t,
330 if (amdgpu_fence_process(ring))
331 DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name);
337 * @ring: ring index the fence is associated with
339 * Wait for all fences on the requested ring to signal (all asics).
342 int amdgpu_fence_wait_empty(struct amdgpu_ring *ring)
344 uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq);
351 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
368 * @ring: ring index the fence is associated with
372 * Wait for all fences on the requested ring to signal (all asics).
375 signed long amdgpu_fence_wait_polling(struct amdgpu_ring *ring,
380 while ((int32_t)(wait_seq - amdgpu_fence_read(ring)) > 0 && timeout > 0) {
389 * @ring: ring the fence is associated with
391 * Get the number of fences emitted on the requested ring (all asics).
392 * Returns the number of emitted fences on the ring. Used by the
393 * dynpm code to ring track activity.
395 unsigned int amdgpu_fence_count_emitted(struct amdgpu_ring *ring)
399 /* We are not protected by ring lock when reading the last sequence
403 emitted -= atomic_read(&ring->fence_drv.last_seq);
404 emitted += READ_ONCE(ring->fence_drv.sync_seq);
410 * @ring: ring the fence is associated with
415 u64 amdgpu_fence_last_unsignaled_time_us(struct amdgpu_ring *ring)
417 struct amdgpu_fence_driver *drv = &ring->fence_drv;
421 last_seq = atomic_read(&ring->fence_drv.last_seq);
422 sync_seq = READ_ONCE(ring->fence_drv.sync_seq);
438 * @ring: ring the fence is associated with
446 void amdgpu_fence_update_start_timestamp(struct amdgpu_ring *ring, uint32_t seq, ktime_t timestamp)
448 struct amdgpu_fence_driver *drv = &ring->fence_drv;
461 * ready for use on the requested ring.
463 * @ring: ring to start the fence driver on
464 * @irq_src: interrupt source to use for this ring
465 * @irq_type: interrupt type to use for this ring
472 int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
476 struct amdgpu_device *adev = ring->adev;
479 if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
480 ring->fence_drv.cpu_addr = ring->fence_cpu_addr;
481 ring->fence_drv.gpu_addr = ring->fence_gpu_addr;
485 ring->fence_drv.cpu_addr = adev->uvd.inst[ring->me].cpu_addr + index;
486 ring->fence_drv.gpu_addr = adev->uvd.inst[ring->me].gpu_addr + index;
488 amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
490 ring->fence_drv.irq_src = irq_src;
491 ring->fence_drv.irq_type = irq_type;
492 ring->fence_drv.initialized = true;
494 DRM_DEV_DEBUG(adev->dev, "fence driver on ring %s use gpu addr 0x%016llx\n",
495 ring->name, ring->fence_drv.gpu_addr);
501 * for the requested ring.
503 * @ring: ring to init the fence driver on
505 * Init the fence driver for the requested ring (all asics).
508 int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
510 struct amdgpu_device *adev = ring->adev;
515 if (!is_power_of_2(ring->num_hw_submission))
518 ring->fence_drv.cpu_addr = NULL;
519 ring->fence_drv.gpu_addr = 0;
520 ring->fence_drv.sync_seq = 0;
521 atomic_set(&ring->fence_drv.last_seq, 0);
522 ring->fence_drv.initialized = false;
524 timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
526 ring->fence_drv.num_fences_mask = ring->num_hw_submission * 2 - 1;
527 spin_lock_init(&ring->fence_drv.lock);
528 ring->fence_drv.fences = kcalloc(ring->num_hw_submission * 2, sizeof(void *),
531 if (!ring->fence_drv.fences)
558 * @ring: ring that to be checked
565 static bool amdgpu_fence_need_ring_interrupt_restore(struct amdgpu_ring *ring)
567 struct amdgpu_device *adev = ring->adev;
570 switch (ring->funcs->type) {
603 struct amdgpu_ring *ring = adev->rings[i];
605 if (!ring || !ring->fence_drv.initialized)
610 r = amdgpu_fence_wait_empty(ring);
615 amdgpu_fence_driver_force_completion(ring);
618 ring->fence_drv.irq_src &&
619 amdgpu_fence_need_ring_interrupt_restore(ring))
620 amdgpu_irq_put(adev, ring->fence_drv.irq_src,
621 ring->fence_drv.irq_type);
623 del_timer_sync(&ring->fence_drv.fallback_timer);
633 struct amdgpu_ring *ring = adev->rings[i];
635 if (!ring || !ring->fence_drv.initialized || !ring->fence_drv.irq_src)
650 struct amdgpu_ring *ring = adev->rings[i];
652 if (!ring || !ring->fence_drv.initialized)
661 if (ring->sched.ops)
662 drm_sched_fini(&ring->sched);
664 for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
665 dma_fence_put(ring->fence_drv.fences[j]);
666 kfree(ring->fence_drv.fences);
667 ring->fence_drv.fences = NULL;
668 ring->fence_drv.initialized = false;
689 struct amdgpu_ring *ring = adev->rings[i];
691 if (!ring || !ring->fence_drv.initialized)
695 if (ring->fence_drv.irq_src &&
696 amdgpu_fence_need_ring_interrupt_restore(ring))
697 amdgpu_irq_get(adev, ring->fence_drv.irq_src,
698 ring->fence_drv.irq_type);
703 * amdgpu_fence_driver_clear_job_fences - clear job embedded fences of ring
705 * @ring: fence of the ring to be cleared
708 void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring)
713 for (i = 0; i <= ring->fence_drv.num_fences_mask; i++) {
714 ptr = &ring->fence_drv.fences[i];
734 * @ring: the ring which contains the fences
737 * Set an error code to all the fences pending on the ring.
739 void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error)
741 struct amdgpu_fence_driver *drv = &ring->fence_drv;
757 * amdgpu_fence_driver_force_completion - force signal latest fence of ring
759 * @ring: fence of the ring to signal
762 void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring)
764 amdgpu_fence_driver_set_error(ring, -ECANCELED);
765 amdgpu_fence_write(ring, ring->fence_drv.sync_seq);
766 amdgpu_fence_process(ring);
780 return (const char *)to_amdgpu_fence(f)->ring->name;
800 if (!timer_pending(&to_amdgpu_fence(f)->ring->fence_drv.fallback_timer))
801 amdgpu_fence_schedule_fallback(to_amdgpu_fence(f)->ring);
903 struct amdgpu_ring *ring = adev->rings[i];
905 if (!ring || !ring->fence_drv.initialized)
908 amdgpu_fence_process(ring);
910 seq_printf(m, "--- ring %d (%s) ---\n", i, ring->name);
912 atomic_read(&ring->fence_drv.last_seq));
914 ring->fence_drv.sync_seq);
916 if (ring->funcs->type == AMDGPU_RING_TYPE_GFX ||
917 ring->funcs->type == AMDGPU_RING_TYPE_SDMA) {
919 le32_to_cpu(*ring->trail_fence_cpu_addr));
921 ring->trail_seq);
924 if (ring->funcs->type != AMDGPU_RING_TYPE_GFX)
929 le32_to_cpu(*(ring->fence_drv.cpu_addr + 2)));
932 le32_to_cpu(*(ring->fence_drv.cpu_addr + 4)));
935 le32_to_cpu(*(ring->fence_drv.cpu_addr + 6)));