Lines Matching refs:inst

132 				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
139 i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
146 VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
174 adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
176 adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
178 adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
180 adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
182 adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
184 ring = &adev->vcn.inst[j].ring_dec;
196 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
204 ring = &adev->vcn.inst[j].ring_enc[i];
217 &adev->vcn.inst[j].irq, 0,
223 fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
227 amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
263 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
303 adev->vcn.inst[j].ring_enc[0].sched.ready = true;
304 adev->vcn.inst[j].ring_enc[1].sched.ready = false;
305 adev->vcn.inst[j].ring_enc[2].sched.ready = false;
306 adev->vcn.inst[j].ring_dec.sched.ready = true;
309 ring = &adev->vcn.inst[j].ring_dec;
319 ring = &adev->vcn.inst[j].ring_enc[i];
359 amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
433 lower_32_bits(adev->vcn.inst[i].gpu_addr));
435 upper_32_bits(adev->vcn.inst[i].gpu_addr));
444 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
446 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
452 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
454 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
460 lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
462 upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
497 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
500 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
518 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
521 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
538 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
541 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
550 lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
553 upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
821 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
835 adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
917 ring = &adev->vcn.inst[inst_idx].ring_dec;
1045 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1109 ring = &adev->vcn.inst[i].ring_dec;
1135 ring = &adev->vcn.inst[i].ring_enc[0];
1144 ring = &adev->vcn.inst[i].ring_enc[1];
1260 lower_32_bits(adev->vcn.inst[i].gpu_addr));
1264 upper_32_bits(adev->vcn.inst[i].gpu_addr));
1277 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1281 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1291 lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1296 upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1305 ring = &adev->vcn.inst[i].ring_enc[0];
1318 ring = &adev->vcn.inst[i].ring_dec;
1456 if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1458 adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
1467 volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1485 ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1495 ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1517 adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1614 if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1631 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1655 if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1708 adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1709 adev->vcn.inst[i].ring_dec.me = i;
1722 adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1723 adev->vcn.inst[j].ring_enc[i].me = j;
1841 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1844 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1847 amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1875 adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1876 adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1878 adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
1879 adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1964 uint32_t inst, sub;
1967 for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1970 vcn_v2_6_query_poison_by_instance(adev, inst, sub);