Lines Matching refs:ring

40  * Most engines on the GPU are fed via ring buffers.  Ring
46 * pointers are equal, the ring is idle. When the host
47 * writes commands to the ring buffer, it increments the
55 * @type: ring type for which to return the limit.
73 * amdgpu_ring_alloc - allocate space on the ring buffer
75 * @ring: amdgpu_ring structure holding ring information
76 * @ndw: number of dwords to allocate in the ring buffer
78 * Allocate @ndw dwords in the ring buffer (all asics).
81 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)
85 ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
90 if (WARN_ON_ONCE(ndw > ring->max_dw))
93 ring->count_dw = ndw;
94 ring->wptr_old = ring->wptr;
96 if (ring->funcs->begin_use)
97 ring->funcs->begin_use(ring);
104 * @ring: amdgpu_ring structure holding ring information
109 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
114 amdgpu_ring_write(ring, ring->funcs->nop);
120 * @ring: amdgpu_ring structure holding ring information
125 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
127 while (ib->length_dw & ring->funcs->align_mask)
128 ib->ptr[ib->length_dw++] = ring->funcs->nop;
133 * commands on the ring buffer
135 * @ring: amdgpu_ring structure holding ring information
138 * execute new commands on the ring buffer (all asics).
140 void amdgpu_ring_commit(struct amdgpu_ring *ring)
145 count = ring->funcs->align_mask + 1 -
146 (ring->wptr & ring->funcs->align_mask);
147 count %= ring->funcs->align_mask + 1;
148 ring->funcs->insert_nop(ring, count);
151 amdgpu_ring_set_wptr(ring);
153 if (ring->funcs->end_use)
154 ring->funcs->end_use(ring);
160 * @ring: amdgpu_ring structure holding ring information
164 void amdgpu_ring_undo(struct amdgpu_ring *ring)
166 ring->wptr = ring->wptr_old;
168 if (ring->funcs->end_use)
169 ring->funcs->end_use(ring);
172 #define amdgpu_ring_get_gpu_addr(ring, offset) \
173 (ring->is_mes_queue ? \
174 (ring->mes_ctx->meta_data_gpu_addr + offset) : \
175 (ring->adev->wb.gpu_addr + offset * 4))
177 #define amdgpu_ring_get_cpu_addr(ring, offset) \
178 (ring->is_mes_queue ? \
179 (void *)((uint8_t *)(ring->mes_ctx->meta_data_ptr) + offset) : \
180 (&ring->adev->wb.wb[offset]))
183 * amdgpu_ring_init - init driver ring struct.
186 * @ring: amdgpu_ring structure holding ring information
187 * @max_dw: maximum number of dw for ring alloc
188 * @irq_src: interrupt source to use for this ring
189 * @irq_type: interrupt type to use for this ring
190 * @hw_prio: ring priority (NORMAL/HIGH)
193 * Initialize the driver information for the selected ring (all asics).
196 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
211 * KIQ tasks get submitted directly to the ring.
213 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
215 else if (ring == &adev->sdma.instance[0].page)
218 if (ring->adev == NULL) {
222 ring->adev = adev;
223 ring->num_hw_submission = sched_hw_submission;
224 ring->sched_score = sched_score;
225 ring->vmid_wait = dma_fence_get_stub();
227 if (!ring->is_mes_queue) {
228 ring->idx = adev->num_rings++;
229 adev->rings[ring->idx] = ring;
232 r = amdgpu_fence_driver_init_ring(ring);
237 if (ring->is_mes_queue) {
238 ring->rptr_offs = amdgpu_mes_ctx_get_offs(ring,
240 ring->wptr_offs = amdgpu_mes_ctx_get_offs(ring,
242 ring->fence_offs = amdgpu_mes_ctx_get_offs(ring,
244 ring->trail_fence_offs = amdgpu_mes_ctx_get_offs(ring,
246 ring->cond_exe_offs = amdgpu_mes_ctx_get_offs(ring,
249 r = amdgpu_device_wb_get(adev, &ring->rptr_offs);
251 dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
255 r = amdgpu_device_wb_get(adev, &ring->wptr_offs);
257 dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
261 r = amdgpu_device_wb_get(adev, &ring->fence_offs);
263 dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
267 r = amdgpu_device_wb_get(adev, &ring->trail_fence_offs);
269 dev_err(adev->dev, "(%d) ring trail_fence_offs wb alloc failed\n", r);
273 r = amdgpu_device_wb_get(adev, &ring->cond_exe_offs);
275 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
280 ring->fence_gpu_addr =
281 amdgpu_ring_get_gpu_addr(ring, ring->fence_offs);
282 ring->fence_cpu_addr =
283 amdgpu_ring_get_cpu_addr(ring, ring->fence_offs);
285 ring->rptr_gpu_addr =
286 amdgpu_ring_get_gpu_addr(ring, ring->rptr_offs);
287 ring->rptr_cpu_addr =
288 amdgpu_ring_get_cpu_addr(ring, ring->rptr_offs);
290 ring->wptr_gpu_addr =
291 amdgpu_ring_get_gpu_addr(ring, ring->wptr_offs);
292 ring->wptr_cpu_addr =
293 amdgpu_ring_get_cpu_addr(ring, ring->wptr_offs);
295 ring->trail_fence_gpu_addr =
296 amdgpu_ring_get_gpu_addr(ring, ring->trail_fence_offs);
297 ring->trail_fence_cpu_addr =
298 amdgpu_ring_get_cpu_addr(ring, ring->trail_fence_offs);
300 ring->cond_exe_gpu_addr =
301 amdgpu_ring_get_gpu_addr(ring, ring->cond_exe_offs);
302 ring->cond_exe_cpu_addr =
303 amdgpu_ring_get_cpu_addr(ring, ring->cond_exe_offs);
306 *ring->cond_exe_cpu_addr = 1;
308 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
314 max_ibs_dw = ring->funcs->emit_frame_size +
315 amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size;
316 max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask;
321 ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);
323 ring->buf_mask = (ring->ring_size / 4) - 1;
324 ring->ptr_mask = ring->funcs->support_64bit_ptrs ?
325 0xffffffffffffffff : ring->buf_mask;
327 /* Allocate ring buffer */
328 if (ring->is_mes_queue) {
331 BUG_ON(ring->ring_size > PAGE_SIZE*4);
333 offset = amdgpu_mes_ctx_get_offs(ring,
335 ring->gpu_addr = amdgpu_mes_ctx_get_offs_gpu_addr(ring, offset);
336 ring->ring = amdgpu_mes_ctx_get_offs_cpu_addr(ring, offset);
337 amdgpu_ring_clear_ring(ring);
339 } else if (ring->ring_obj == NULL) {
340 r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE,
342 &ring->ring_obj,
343 &ring->gpu_addr,
344 (void **)&ring->ring);
346 dev_err(adev->dev, "(%d) ring create failed\n", r);
349 amdgpu_ring_clear_ring(ring);
352 ring->max_dw = max_dw;
353 ring->hw_prio = hw_prio;
355 if (!ring->no_scheduler) {
356 hw_ip = ring->funcs->type;
359 &ring->sched;
366 * amdgpu_ring_fini - tear down the driver ring struct.
368 * @ring: amdgpu_ring structure holding ring information
370 * Tear down the driver information for the selected ring (all asics).
372 void amdgpu_ring_fini(struct amdgpu_ring *ring)
375 /* Not to finish a ring which is not initialized */
376 if (!(ring->adev) ||
377 (!ring->is_mes_queue && !(ring->adev->rings[ring->idx])))
380 ring->sched.ready = false;
382 if (!ring->is_mes_queue) {
383 amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
384 amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
386 amdgpu_device_wb_free(ring->adev, ring->cond_exe_offs);
387 amdgpu_device_wb_free(ring->adev, ring->fence_offs);
389 amdgpu_bo_free_kernel(&ring->ring_obj,
390 &ring->gpu_addr,
391 (void **)&ring->ring);
393 kfree(ring->fence_drv.fences);
396 dma_fence_put(ring->vmid_wait);
397 ring->vmid_wait = NULL;
398 ring->me = 0;
400 if (!ring->is_mes_queue)
401 ring->adev->rings[ring->idx] = NULL;
405 * amdgpu_ring_emit_reg_write_reg_wait_helper - ring helper
407 * @ring: ring to write to
416 void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,
420 amdgpu_ring_emit_wreg(ring, reg0, ref);
421 amdgpu_ring_emit_reg_wait(ring, reg1, mask, mask);
425 * amdgpu_ring_soft_recovery - try to soft recover a ring lockup
427 * @ring: ring to try the recovery on
431 * Tries to get a ring proceeding again when it is stuck.
433 bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,
439 if (unlikely(ring->adev->debug_disable_soft_recovery))
444 if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)
452 atomic_inc(&ring->adev->gpu_reset_counter);
455 ring->funcs->soft_recovery(ring, vmid);
470 * followed by n-words of ring data
475 struct amdgpu_ring *ring = file_inode(f)->i_private;
485 early[0] = amdgpu_ring_get_rptr(ring) & ring->buf_mask;
486 early[1] = amdgpu_ring_get_wptr(ring) & ring->buf_mask;
487 early[2] = ring->wptr & ring->buf_mask;
500 if (*pos >= (ring->ring_size + 12))
503 value = ring->ring[(*pos - 12)/4];
525 struct amdgpu_ring *ring = file_inode(f)->i_private;
534 kbuf = kmalloc(ring->mqd_size, GFP_KERNEL);
538 r = amdgpu_bo_reserve(ring->mqd_obj, false);
542 r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd);
550 for (i = 0; i < ring->mqd_size/sizeof(u32); i++)
553 amdgpu_bo_kunmap(ring->mqd_obj);
554 amdgpu_bo_unreserve(ring->mqd_obj);
558 if (*pos >= ring->mqd_size)
575 amdgpu_bo_unreserve(ring->mqd_obj);
589 struct amdgpu_ring *ring = data;
591 amdgpu_fence_driver_set_error(ring, val);
601 struct amdgpu_ring *ring)
608 sprintf(name, "amdgpu_ring_%s", ring->name);
609 debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
611 ring->ring_size + 12);
613 if (ring->mqd_obj) {
614 sprintf(name, "amdgpu_mqd_%s", ring->name);
615 debugfs_create_file_size(name, S_IFREG | 0444, root, ring,
617 ring->mqd_size);
620 sprintf(name, "amdgpu_error_%s", ring->name);
621 debugfs_create_file(name, 0200, root, ring,
628 * amdgpu_ring_test_helper - tests ring and set sched readiness status
630 * @ring: ring to try the recovery on
632 * Tests ring and set sched readiness status
636 int amdgpu_ring_test_helper(struct amdgpu_ring *ring)
638 struct amdgpu_device *adev = ring->adev;
641 r = amdgpu_ring_test_ring(ring);
643 DRM_DEV_ERROR(adev->dev, "ring %s test failed (%d)\n",
644 ring->name, r);
646 DRM_DEV_DEBUG(adev->dev, "ring test on %s succeeded\n",
647 ring->name);
649 ring->sched.ready = !r;
654 static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring,
657 struct amdgpu_device *adev = ring->adev;
658 bool is_high_prio_compute = ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE &&
659 amdgpu_gfx_is_high_priority_compute_queue(adev, ring);
660 bool is_high_prio_gfx = ring->funcs->type == AMDGPU_RING_TYPE_GFX &&
661 amdgpu_gfx_is_high_priority_graphics_queue(adev, ring);
665 prop->mqd_gpu_addr = ring->mqd_gpu_addr;
666 prop->hqd_base_gpu_addr = ring->gpu_addr;
667 prop->rptr_gpu_addr = ring->rptr_gpu_addr;
668 prop->wptr_gpu_addr = ring->wptr_gpu_addr;
669 prop->queue_size = ring->ring_size;
670 prop->eop_gpu_addr = ring->eop_gpu_addr;
671 prop->use_doorbell = ring->use_doorbell;
672 prop->doorbell_index = ring->doorbell_index;
677 prop->hqd_active = ring->funcs->type == AMDGPU_RING_TYPE_KIQ;
686 int amdgpu_ring_init_mqd(struct amdgpu_ring *ring)
688 struct amdgpu_device *adev = ring->adev;
692 amdgpu_ring_to_mqd_prop(ring, &prop);
694 ring->wptr = 0;
696 if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
699 mqd_mgr = &adev->mqds[ring->funcs->type];
701 return mqd_mgr->init_mqd(adev, ring->mqd_ptr, &prop);
704 void amdgpu_ring_ib_begin(struct amdgpu_ring *ring)
706 if (ring->is_sw_ring)
707 amdgpu_sw_ring_ib_begin(ring);
710 void amdgpu_ring_ib_end(struct amdgpu_ring *ring)
712 if (ring->is_sw_ring)
713 amdgpu_sw_ring_ib_end(ring);
716 void amdgpu_ring_ib_on_emit_cntl(struct amdgpu_ring *ring)
718 if (ring->is_sw_ring)
719 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CONTROL);
722 void amdgpu_ring_ib_on_emit_ce(struct amdgpu_ring *ring)
724 if (ring->is_sw_ring)
725 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_CE);
728 void amdgpu_ring_ib_on_emit_de(struct amdgpu_ring *ring)
730 if (ring->is_sw_ring)
731 amdgpu_sw_ring_ib_mark_offset(ring, AMDGPU_MUX_OFFSET_TYPE_DE);
734 bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring)
736 if (!ring)
739 if (ring->no_scheduler || !drm_sched_wqueue_ready(&ring->sched))