Lines Matching refs:vpe

121 int amdgpu_vpe_configure_dpm(struct amdgpu_vpe *vpe)
123 struct amdgpu_device *adev = vpe->ring.adev;
134 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
136 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
147 /* vpe dpm only cares 4 levels. */
190 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_pratio), pratio_ctl); /* PRatio */
191 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_request_interval), 24000); /* 1ms, unit=1/24MHz */
192 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_decision_threshold), 1200000); /* 50ms */
193 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_busy_clamp_threshold), 1200000);/* 50ms */
194 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_idle_clamp_threshold), 1200000);/* 50ms */
195 dev_dbg(adev->dev, "%s: configure vpe dpm pratio done!\n", __func__);
204 dpm_ctl = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable));
206 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
207 dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
215 .mc_addr = adev->vpe.cmdbuf_gpu_addr,
222 int amdgpu_vpe_init_microcode(struct amdgpu_vpe *vpe)
224 struct amdgpu_device *adev = vpe->ring.adev;
232 ret = amdgpu_ucode_request(adev, &adev->vpe.fw, fw_name);
236 vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
237 adev->vpe.fw_version = le32_to_cpu(vpe_hdr->header.ucode_version);
238 adev->vpe.feature_version = le32_to_cpu(vpe_hdr->ucode_feature_version);
245 info->fw = adev->vpe.fw;
251 info->fw = adev->vpe.fw;
258 dev_err(adev->dev, "fail to initialize vpe microcode\n");
259 release_firmware(adev->vpe.fw);
260 adev->vpe.fw = NULL;
264 int amdgpu_vpe_ring_init(struct amdgpu_vpe *vpe)
266 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
267 struct amdgpu_ring *ring = &vpe->ring;
274 snprintf(ring->name, 4, "vpe");
276 ret = amdgpu_ring_init(adev, ring, 1024, &vpe->trap_irq, 0,
284 int amdgpu_vpe_ring_fini(struct amdgpu_vpe *vpe)
286 amdgpu_ring_fini(&vpe->ring);
294 struct amdgpu_vpe *vpe = &adev->vpe;
298 vpe_v6_1_set_funcs(vpe);
301 vpe_v6_1_set_funcs(vpe);
302 vpe->collaborate_mode = true;
309 vpe_set_regs(vpe);
311 dev_info(adev->dev, "VPE: collaborate mode %s", vpe->collaborate_mode ? "true" : "false");
319 container_of(work, struct amdgpu_device, vpe.idle_work.work);
322 fences += amdgpu_fence_count_emitted(&adev->vpe.ring);
327 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
330 static int vpe_common_init(struct amdgpu_vpe *vpe)
332 struct amdgpu_device *adev = container_of(vpe, struct amdgpu_device, vpe);
337 &adev->vpe.cmdbuf_obj,
338 &adev->vpe.cmdbuf_gpu_addr,
339 (void **)&adev->vpe.cmdbuf_cpu_addr);
345 vpe->context_started = false;
346 INIT_DELAYED_WORK(&adev->vpe.idle_work, vpe_idle_work_handler);
354 struct amdgpu_vpe *vpe = &adev->vpe;
357 ret = vpe_common_init(vpe);
361 ret = vpe_irq_init(vpe);
365 ret = vpe_ring_init(vpe);
369 ret = vpe_init_microcode(vpe);
379 struct amdgpu_vpe *vpe = &adev->vpe;
381 release_firmware(vpe->fw);
382 vpe->fw = NULL;
384 vpe_ring_fini(vpe);
386 amdgpu_bo_free_kernel(&adev->vpe.cmdbuf_obj,
387 &adev->vpe.cmdbuf_gpu_addr,
388 (void **)&adev->vpe.cmdbuf_cpu_addr);
396 struct amdgpu_vpe *vpe = &adev->vpe;
405 ret = vpe_load_microcode(vpe);
409 ret = vpe_ring_start(vpe);
419 struct amdgpu_vpe *vpe = &adev->vpe;
421 vpe_ring_stop(vpe);
433 cancel_delayed_work_sync(&adev->vpe.idle_work);
476 if (!ring->adev->vpe.collaborate_mode)
597 struct amdgpu_vpe *vpe = &adev->vpe;
598 uint32_t preempt_reg = vpe->regs.queue0_preempt;
611 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 1);
627 WREG32(vpe_get_reg_offset(vpe, ring->me, preempt_reg), 0);
645 struct amdgpu_vpe *vpe = &adev->vpe;
654 vpe->context_started = false;
665 struct amdgpu_vpe *vpe = &adev->vpe;
672 rptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_hi));
674 rptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_rptr_lo));
684 struct amdgpu_vpe *vpe = &adev->vpe;
691 wptr = RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_hi));
693 wptr |= RREG32(vpe_get_reg_offset(vpe, ring->me, vpe->regs.queue0_rb_wptr_lo));
703 struct amdgpu_vpe *vpe = &adev->vpe;
715 if (vpe->collaborate_mode)
720 for (i = 0; i < vpe->num_instances; i++) {
726 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_lo),
728 WREG32(vpe_get_reg_offset(vpe, i, vpe->regs.queue0_rb_wptr_hi),
833 struct amdgpu_vpe *vpe = &adev->vpe;
835 cancel_delayed_work_sync(&adev->vpe.idle_work);
838 if (!vpe->context_started) {
845 context_notify = RREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator));
850 WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.context_indicator), context_notify);
851 vpe->context_started = true;
859 schedule_delayed_work(&adev->vpe.idle_work, VPE_IDLE_TIMEOUT);
897 adev->vpe.ring.funcs = &vpe_ring_funcs;