1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/firmware.h>
25#include <drm/drm_drv.h>
26
27#include "amdgpu.h"
28#include "amdgpu_vcn.h"
29#include "amdgpu_pm.h"
30#include "soc15.h"
31#include "soc15d.h"
32#include "vcn_v2_0.h"
33#include "mmsch_v1_0.h"
34#include "vcn_v2_5.h"
35
36#include "vcn/vcn_2_5_offset.h"
37#include "vcn/vcn_2_5_sh_mask.h"
38#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
39
40#define VCN_VID_SOC_ADDRESS_2_0					0x1fa00
41#define VCN1_VID_SOC_ADDRESS_3_0				0x48200
42
43#define mmUVD_CONTEXT_ID_INTERNAL_OFFSET			0x27
44#define mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET			0x0f
45#define mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET			0x10
46#define mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET			0x11
47#define mmUVD_NO_OP_INTERNAL_OFFSET				0x29
48#define mmUVD_GP_SCRATCH8_INTERNAL_OFFSET			0x66
49#define mmUVD_SCRATCH9_INTERNAL_OFFSET				0xc01d
50
51#define mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET			0x431
52#define mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET		0x3b4
53#define mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET		0x3b5
54#define mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET			0x25c
55
56#define VCN25_MAX_HW_INSTANCES_ARCTURUS			2
57
58static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev);
59static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev);
60static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev);
61static int vcn_v2_5_set_powergating_state(void *handle,
62				enum amd_powergating_state state);
63static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
64				int inst_idx, struct dpg_pause_state *new_state);
65static int vcn_v2_5_sriov_start(struct amdgpu_device *adev);
66static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev);
67
68static int amdgpu_ih_clientid_vcns[] = {
69	SOC15_IH_CLIENTID_VCN,
70	SOC15_IH_CLIENTID_VCN1
71};
72
73/**
74 * vcn_v2_5_early_init - set function pointers and load microcode
75 *
76 * @handle: amdgpu_device pointer
77 *
78 * Set ring and irq function pointers
79 * Load microcode from filesystem
80 */
81static int vcn_v2_5_early_init(void *handle)
82{
83	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
84
85	if (amdgpu_sriov_vf(adev)) {
86		adev->vcn.num_vcn_inst = 2;
87		adev->vcn.harvest_config = 0;
88		adev->vcn.num_enc_rings = 1;
89	} else {
90		u32 harvest;
91		int i;
92
93		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
94			harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
95			if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
96				adev->vcn.harvest_config |= 1 << i;
97		}
98		if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
99					AMDGPU_VCN_HARVEST_VCN1))
100			/* both instances are harvested, disable the block */
101			return -ENOENT;
102
103		adev->vcn.num_enc_rings = 2;
104	}
105
106	vcn_v2_5_set_dec_ring_funcs(adev);
107	vcn_v2_5_set_enc_ring_funcs(adev);
108	vcn_v2_5_set_irq_funcs(adev);
109	vcn_v2_5_set_ras_funcs(adev);
110
111	return amdgpu_vcn_early_init(adev);
112}
113
114/**
115 * vcn_v2_5_sw_init - sw init for VCN block
116 *
117 * @handle: amdgpu_device pointer
118 *
119 * Load firmware and sw initialization
120 */
121static int vcn_v2_5_sw_init(void *handle)
122{
123	struct amdgpu_ring *ring;
124	int i, j, r;
125	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
126
127	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
128		if (adev->vcn.harvest_config & (1 << j))
129			continue;
130		/* VCN DEC TRAP */
131		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
132				VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->vcn.inst[j].irq);
133		if (r)
134			return r;
135
136		/* VCN ENC TRAP */
137		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
138			r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
139				i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq);
140			if (r)
141				return r;
142		}
143
144		/* VCN POISON TRAP */
145		r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
146			VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
147		if (r)
148			return r;
149	}
150
151	r = amdgpu_vcn_sw_init(adev);
152	if (r)
153		return r;
154
155	amdgpu_vcn_setup_ucode(adev);
156
157	r = amdgpu_vcn_resume(adev);
158	if (r)
159		return r;
160
161	for (j = 0; j < adev->vcn.num_vcn_inst; j++) {
162		volatile struct amdgpu_fw_shared *fw_shared;
163
164		if (adev->vcn.harvest_config & (1 << j))
165			continue;
166		adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET;
167		adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET;
168		adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET;
169		adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET;
170		adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET;
171		adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET;
172
173		adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET;
174		adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9);
175		adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET;
176		adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0);
177		adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET;
178		adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1);
179		adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET;
180		adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD);
181		adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET;
182		adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP);
183
184		ring = &adev->vcn.inst[j].ring_dec;
185		ring->use_doorbell = true;
186
187		ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
188				(amdgpu_sriov_vf(adev) ? 2*j : 8*j);
189
190		if (amdgpu_ip_version(adev, UVD_HWIP, 0) == IP_VERSION(2, 5, 0))
191			ring->vm_hub = AMDGPU_MMHUB1(0);
192		else
193			ring->vm_hub = AMDGPU_MMHUB0(0);
194
195		sprintf(ring->name, "vcn_dec_%d", j);
196		r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq,
197				     0, AMDGPU_RING_PRIO_DEFAULT, NULL);
198		if (r)
199			return r;
200
201		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
202			enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
203
204			ring = &adev->vcn.inst[j].ring_enc[i];
205			ring->use_doorbell = true;
206
207			ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) +
208					(amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j));
209
210			if (amdgpu_ip_version(adev, UVD_HWIP, 0) ==
211			    IP_VERSION(2, 5, 0))
212				ring->vm_hub = AMDGPU_MMHUB1(0);
213			else
214				ring->vm_hub = AMDGPU_MMHUB0(0);
215
216			sprintf(ring->name, "vcn_enc_%d.%d", j, i);
217			r = amdgpu_ring_init(adev, ring, 512,
218					     &adev->vcn.inst[j].irq, 0,
219					     hw_prio, NULL);
220			if (r)
221				return r;
222		}
223
224		fw_shared = adev->vcn.inst[j].fw_shared.cpu_addr;
225		fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG);
226
227		if (amdgpu_vcnfw_log)
228			amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]);
229	}
230
231	if (amdgpu_sriov_vf(adev)) {
232		r = amdgpu_virt_alloc_mm_table(adev);
233		if (r)
234			return r;
235	}
236
237	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
238		adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode;
239
240	r = amdgpu_vcn_ras_sw_init(adev);
241	if (r)
242		return r;
243
244	return 0;
245}
246
247/**
248 * vcn_v2_5_sw_fini - sw fini for VCN block
249 *
250 * @handle: amdgpu_device pointer
251 *
252 * VCN suspend and free up sw allocation
253 */
254static int vcn_v2_5_sw_fini(void *handle)
255{
256	int i, r, idx;
257	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
258	volatile struct amdgpu_fw_shared *fw_shared;
259
260	if (drm_dev_enter(adev_to_drm(adev), &idx)) {
261		for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
262			if (adev->vcn.harvest_config & (1 << i))
263				continue;
264			fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
265			fw_shared->present_flag_0 = 0;
266		}
267		drm_dev_exit(idx);
268	}
269
270
271	if (amdgpu_sriov_vf(adev))
272		amdgpu_virt_free_mm_table(adev);
273
274	r = amdgpu_vcn_suspend(adev);
275	if (r)
276		return r;
277
278	r = amdgpu_vcn_sw_fini(adev);
279
280	return r;
281}
282
283/**
284 * vcn_v2_5_hw_init - start and test VCN block
285 *
286 * @handle: amdgpu_device pointer
287 *
288 * Initialize the hardware, boot up the VCPU and do some testing
289 */
290static int vcn_v2_5_hw_init(void *handle)
291{
292	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
293	struct amdgpu_ring *ring;
294	int i, j, r = 0;
295
296	if (amdgpu_sriov_vf(adev))
297		r = vcn_v2_5_sriov_start(adev);
298
299	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
300		if (adev->vcn.harvest_config & (1 << j))
301			continue;
302
303		if (amdgpu_sriov_vf(adev)) {
304			adev->vcn.inst[j].ring_enc[0].sched.ready = true;
305			adev->vcn.inst[j].ring_enc[1].sched.ready = false;
306			adev->vcn.inst[j].ring_enc[2].sched.ready = false;
307			adev->vcn.inst[j].ring_dec.sched.ready = true;
308		} else {
309
310			ring = &adev->vcn.inst[j].ring_dec;
311
312			adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
313						     ring->doorbell_index, j);
314
315			r = amdgpu_ring_test_helper(ring);
316			if (r)
317				goto done;
318
319			for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
320				ring = &adev->vcn.inst[j].ring_enc[i];
321				r = amdgpu_ring_test_helper(ring);
322				if (r)
323					goto done;
324			}
325		}
326	}
327
328done:
329	if (!r)
330		DRM_INFO("VCN decode and encode initialized successfully(under %s).\n",
331			(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)?"DPG Mode":"SPG Mode");
332
333	return r;
334}
335
336/**
337 * vcn_v2_5_hw_fini - stop the hardware block
338 *
339 * @handle: amdgpu_device pointer
340 *
341 * Stop the VCN block, mark ring as not ready any more
342 */
343static int vcn_v2_5_hw_fini(void *handle)
344{
345	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
346	int i;
347
348	cancel_delayed_work_sync(&adev->vcn.idle_work);
349
350	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
351		if (adev->vcn.harvest_config & (1 << i))
352			continue;
353
354		if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) ||
355		    (adev->vcn.cur_state != AMD_PG_STATE_GATE &&
356		     RREG32_SOC15(VCN, i, mmUVD_STATUS)))
357			vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
358
359		if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
360			amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
361	}
362
363	return 0;
364}
365
366/**
367 * vcn_v2_5_suspend - suspend VCN block
368 *
369 * @handle: amdgpu_device pointer
370 *
371 * HW fini and suspend VCN block
372 */
373static int vcn_v2_5_suspend(void *handle)
374{
375	int r;
376	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
377
378	r = vcn_v2_5_hw_fini(adev);
379	if (r)
380		return r;
381
382	r = amdgpu_vcn_suspend(adev);
383
384	return r;
385}
386
387/**
388 * vcn_v2_5_resume - resume VCN block
389 *
390 * @handle: amdgpu_device pointer
391 *
392 * Resume firmware and hw init VCN block
393 */
394static int vcn_v2_5_resume(void *handle)
395{
396	int r;
397	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
398
399	r = amdgpu_vcn_resume(adev);
400	if (r)
401		return r;
402
403	r = vcn_v2_5_hw_init(adev);
404
405	return r;
406}
407
408/**
409 * vcn_v2_5_mc_resume - memory controller programming
410 *
411 * @adev: amdgpu_device pointer
412 *
413 * Let the VCN memory controller know it's offsets
414 */
415static void vcn_v2_5_mc_resume(struct amdgpu_device *adev)
416{
417	uint32_t size;
418	uint32_t offset;
419	int i;
420
421	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
422		if (adev->vcn.harvest_config & (1 << i))
423			continue;
424
425		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
426		/* cache window 0: fw */
427		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
428			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
429				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo));
430			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
431				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi));
432			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0, 0);
433			offset = 0;
434		} else {
435			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
436				lower_32_bits(adev->vcn.inst[i].gpu_addr));
437			WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
438				upper_32_bits(adev->vcn.inst[i].gpu_addr));
439			offset = size;
440			WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET0,
441				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
442		}
443		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE0, size);
444
445		/* cache window 1: stack */
446		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
447			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
448		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
449			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
450		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET1, 0);
451		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE);
452
453		/* cache window 2: context */
454		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
455			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
456		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
457			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE));
458		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_OFFSET2, 0);
459		WREG32_SOC15(VCN, i, mmUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE);
460
461		/* non-cache window */
462		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW,
463			lower_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
464		WREG32_SOC15(VCN, i, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH,
465			upper_32_bits(adev->vcn.inst[i].fw_shared.gpu_addr));
466		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_OFFSET0, 0);
467		WREG32_SOC15(VCN, i, mmUVD_VCPU_NONCACHE_SIZE0,
468			AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)));
469	}
470}
471
472static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
473{
474	uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[inst_idx]->size + 4);
475	uint32_t offset;
476
477	/* cache window 0: fw */
478	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
479		if (!indirect) {
480			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
481				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
482				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect);
483			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
484				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
485				(adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect);
486			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
487				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
488		} else {
489			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
490				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect);
491			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
492				VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect);
493			WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
494				VCN, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect);
495		}
496		offset = 0;
497	} else {
498		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
499			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
500			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
501		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
502			VCN, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
503			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect);
504		offset = size;
505		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
506			VCN, 0, mmUVD_VCPU_CACHE_OFFSET0),
507			AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect);
508	}
509
510	if (!indirect)
511		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
512			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect);
513	else
514		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
515			VCN, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect);
516
517	/* cache window 1: stack */
518	if (!indirect) {
519		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
520			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
521			lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
522		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
523			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
524			upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect);
525		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
526			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
527	} else {
528		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
529			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect);
530		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
531			VCN, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect);
532		WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
533			VCN, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect);
534	}
535	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
536		VCN, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect);
537
538	/* cache window 2: context */
539	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
540		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
541		lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
542	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
543		VCN, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
544		upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect);
545	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
546		VCN, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect);
547	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
548		VCN, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect);
549
550	/* non-cache window */
551	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
552		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW),
553		lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
554	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
555		VCN, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH),
556		upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect);
557	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
558		VCN, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect);
559	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
560		VCN, 0, mmUVD_VCPU_NONCACHE_SIZE0),
561		AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared)), 0, indirect);
562
563	/* VCN global tiling registers */
564	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
565		VCN, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect);
566}
567
568/**
569 * vcn_v2_5_disable_clock_gating - disable VCN clock gating
570 *
571 * @adev: amdgpu_device pointer
572 *
573 * Disable clock gating for VCN block
574 */
575static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev)
576{
577	uint32_t data;
578	int i;
579
580	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
581		if (adev->vcn.harvest_config & (1 << i))
582			continue;
583		/* UVD disable CGC */
584		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
585		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
586			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
587		else
588			data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
589		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
590		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
591		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
592
593		data = RREG32_SOC15(VCN, i, mmUVD_CGC_GATE);
594		data &= ~(UVD_CGC_GATE__SYS_MASK
595			| UVD_CGC_GATE__UDEC_MASK
596			| UVD_CGC_GATE__MPEG2_MASK
597			| UVD_CGC_GATE__REGS_MASK
598			| UVD_CGC_GATE__RBC_MASK
599			| UVD_CGC_GATE__LMI_MC_MASK
600			| UVD_CGC_GATE__LMI_UMC_MASK
601			| UVD_CGC_GATE__IDCT_MASK
602			| UVD_CGC_GATE__MPRD_MASK
603			| UVD_CGC_GATE__MPC_MASK
604			| UVD_CGC_GATE__LBSI_MASK
605			| UVD_CGC_GATE__LRBBM_MASK
606			| UVD_CGC_GATE__UDEC_RE_MASK
607			| UVD_CGC_GATE__UDEC_CM_MASK
608			| UVD_CGC_GATE__UDEC_IT_MASK
609			| UVD_CGC_GATE__UDEC_DB_MASK
610			| UVD_CGC_GATE__UDEC_MP_MASK
611			| UVD_CGC_GATE__WCB_MASK
612			| UVD_CGC_GATE__VCPU_MASK
613			| UVD_CGC_GATE__MMSCH_MASK);
614
615		WREG32_SOC15(VCN, i, mmUVD_CGC_GATE, data);
616
617		SOC15_WAIT_ON_RREG(VCN, i, mmUVD_CGC_GATE, 0,  0xFFFFFFFF);
618
619		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
620		data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
621			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
622			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
623			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
624			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
625			| UVD_CGC_CTRL__SYS_MODE_MASK
626			| UVD_CGC_CTRL__UDEC_MODE_MASK
627			| UVD_CGC_CTRL__MPEG2_MODE_MASK
628			| UVD_CGC_CTRL__REGS_MODE_MASK
629			| UVD_CGC_CTRL__RBC_MODE_MASK
630			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
631			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
632			| UVD_CGC_CTRL__IDCT_MODE_MASK
633			| UVD_CGC_CTRL__MPRD_MODE_MASK
634			| UVD_CGC_CTRL__MPC_MODE_MASK
635			| UVD_CGC_CTRL__LBSI_MODE_MASK
636			| UVD_CGC_CTRL__LRBBM_MODE_MASK
637			| UVD_CGC_CTRL__WCB_MODE_MASK
638			| UVD_CGC_CTRL__VCPU_MODE_MASK
639			| UVD_CGC_CTRL__MMSCH_MODE_MASK);
640		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
641
642		/* turn on */
643		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE);
644		data |= (UVD_SUVD_CGC_GATE__SRE_MASK
645			| UVD_SUVD_CGC_GATE__SIT_MASK
646			| UVD_SUVD_CGC_GATE__SMP_MASK
647			| UVD_SUVD_CGC_GATE__SCM_MASK
648			| UVD_SUVD_CGC_GATE__SDB_MASK
649			| UVD_SUVD_CGC_GATE__SRE_H264_MASK
650			| UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
651			| UVD_SUVD_CGC_GATE__SIT_H264_MASK
652			| UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
653			| UVD_SUVD_CGC_GATE__SCM_H264_MASK
654			| UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
655			| UVD_SUVD_CGC_GATE__SDB_H264_MASK
656			| UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
657			| UVD_SUVD_CGC_GATE__SCLR_MASK
658			| UVD_SUVD_CGC_GATE__UVD_SC_MASK
659			| UVD_SUVD_CGC_GATE__ENT_MASK
660			| UVD_SUVD_CGC_GATE__SIT_HEVC_DEC_MASK
661			| UVD_SUVD_CGC_GATE__SIT_HEVC_ENC_MASK
662			| UVD_SUVD_CGC_GATE__SITE_MASK
663			| UVD_SUVD_CGC_GATE__SRE_VP9_MASK
664			| UVD_SUVD_CGC_GATE__SCM_VP9_MASK
665			| UVD_SUVD_CGC_GATE__SIT_VP9_DEC_MASK
666			| UVD_SUVD_CGC_GATE__SDB_VP9_MASK
667			| UVD_SUVD_CGC_GATE__IME_HEVC_MASK);
668		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_GATE, data);
669
670		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
671		data &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
672			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
673			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
674			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
675			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
676			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
677			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
678			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
679			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
680			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
681		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
682	}
683}
684
685static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev,
686		uint8_t sram_sel, int inst_idx, uint8_t indirect)
687{
688	uint32_t reg_data = 0;
689
690	/* enable sw clock gating control */
691	if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
692		reg_data = 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
693	else
694		reg_data = 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
695	reg_data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
696	reg_data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
697	reg_data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
698		 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
699		 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
700		 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
701		 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
702		 UVD_CGC_CTRL__SYS_MODE_MASK |
703		 UVD_CGC_CTRL__UDEC_MODE_MASK |
704		 UVD_CGC_CTRL__MPEG2_MODE_MASK |
705		 UVD_CGC_CTRL__REGS_MODE_MASK |
706		 UVD_CGC_CTRL__RBC_MODE_MASK |
707		 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
708		 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
709		 UVD_CGC_CTRL__IDCT_MODE_MASK |
710		 UVD_CGC_CTRL__MPRD_MODE_MASK |
711		 UVD_CGC_CTRL__MPC_MODE_MASK |
712		 UVD_CGC_CTRL__LBSI_MODE_MASK |
713		 UVD_CGC_CTRL__LRBBM_MODE_MASK |
714		 UVD_CGC_CTRL__WCB_MODE_MASK |
715		 UVD_CGC_CTRL__VCPU_MODE_MASK |
716		 UVD_CGC_CTRL__MMSCH_MODE_MASK);
717	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
718		VCN, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect);
719
720	/* turn off clock gating */
721	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
722		VCN, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect);
723
724	/* turn on SUVD clock gating */
725	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
726		VCN, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect);
727
728	/* turn on sw mode in UVD_SUVD_CGC_CTRL */
729	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
730		VCN, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect);
731}
732
733/**
734 * vcn_v2_5_enable_clock_gating - enable VCN clock gating
735 *
736 * @adev: amdgpu_device pointer
737 *
738 * Enable clock gating for VCN block
739 */
740static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev)
741{
742	uint32_t data = 0;
743	int i;
744
745	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
746		if (adev->vcn.harvest_config & (1 << i))
747			continue;
748		/* enable UVD CGC */
749		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
750		if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
751			data |= 1 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
752		else
753			data |= 0 << UVD_CGC_CTRL__DYN_CLOCK_MODE__SHIFT;
754		data |= 1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT;
755		data |= 4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT;
756		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
757
758		data = RREG32_SOC15(VCN, i, mmUVD_CGC_CTRL);
759		data |= (UVD_CGC_CTRL__UDEC_RE_MODE_MASK
760			| UVD_CGC_CTRL__UDEC_CM_MODE_MASK
761			| UVD_CGC_CTRL__UDEC_IT_MODE_MASK
762			| UVD_CGC_CTRL__UDEC_DB_MODE_MASK
763			| UVD_CGC_CTRL__UDEC_MP_MODE_MASK
764			| UVD_CGC_CTRL__SYS_MODE_MASK
765			| UVD_CGC_CTRL__UDEC_MODE_MASK
766			| UVD_CGC_CTRL__MPEG2_MODE_MASK
767			| UVD_CGC_CTRL__REGS_MODE_MASK
768			| UVD_CGC_CTRL__RBC_MODE_MASK
769			| UVD_CGC_CTRL__LMI_MC_MODE_MASK
770			| UVD_CGC_CTRL__LMI_UMC_MODE_MASK
771			| UVD_CGC_CTRL__IDCT_MODE_MASK
772			| UVD_CGC_CTRL__MPRD_MODE_MASK
773			| UVD_CGC_CTRL__MPC_MODE_MASK
774			| UVD_CGC_CTRL__LBSI_MODE_MASK
775			| UVD_CGC_CTRL__LRBBM_MODE_MASK
776			| UVD_CGC_CTRL__WCB_MODE_MASK
777			| UVD_CGC_CTRL__VCPU_MODE_MASK);
778		WREG32_SOC15(VCN, i, mmUVD_CGC_CTRL, data);
779
780		data = RREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL);
781		data |= (UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
782			| UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
783			| UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
784			| UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
785			| UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
786			| UVD_SUVD_CGC_CTRL__SCLR_MODE_MASK
787			| UVD_SUVD_CGC_CTRL__UVD_SC_MODE_MASK
788			| UVD_SUVD_CGC_CTRL__ENT_MODE_MASK
789			| UVD_SUVD_CGC_CTRL__IME_MODE_MASK
790			| UVD_SUVD_CGC_CTRL__SITE_MODE_MASK);
791		WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data);
792	}
793}
794
795static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx,
796				bool indirect)
797{
798	uint32_t tmp;
799
800	if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0))
801		return;
802
803	tmp = VCN_RAS_CNTL__VCPU_VCODEC_REARM_MASK |
804	      VCN_RAS_CNTL__VCPU_VCODEC_IH_EN_MASK |
805	      VCN_RAS_CNTL__VCPU_VCODEC_PMI_EN_MASK |
806	      VCN_RAS_CNTL__VCPU_VCODEC_STALL_EN_MASK;
807	WREG32_SOC15_DPG_MODE(inst_idx,
808			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmVCN_RAS_CNTL),
809			      tmp, 0, indirect);
810
811	tmp = UVD_VCPU_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
812	WREG32_SOC15_DPG_MODE(inst_idx,
813			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_VCPU_INT_EN),
814			      tmp, 0, indirect);
815
816	tmp = UVD_SYS_INT_EN__RASCNTL_VCPU_VCODEC_EN_MASK;
817	WREG32_SOC15_DPG_MODE(inst_idx,
818			      SOC15_DPG_MODE_OFFSET(VCN, 0, mmUVD_SYS_INT_EN),
819			      tmp, 0, indirect);
820}
821
822static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect)
823{
824	volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
825	struct amdgpu_ring *ring;
826	uint32_t rb_bufsz, tmp;
827
828	/* disable register anti-hang mechanism */
829	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 1,
830		~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
831	/* enable dynamic power gating mode */
832	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS);
833	tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK;
834	tmp |= UVD_POWER_STATUS__UVD_PG_EN_MASK;
835	WREG32_SOC15(VCN, inst_idx, mmUVD_POWER_STATUS, tmp);
836
837	if (indirect)
838		adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr;
839
840	/* enable clock gating */
841	vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect);
842
843	/* enable VCPU clock */
844	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
845	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
846	tmp |= UVD_VCPU_CNTL__BLK_RST_MASK;
847	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
848		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
849
850	/* disable master interupt */
851	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
852		VCN, 0, mmUVD_MASTINT_EN), 0, 0, indirect);
853
854	/* setup mmUVD_LMI_CTRL */
855	tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
856		UVD_LMI_CTRL__REQ_MODE_MASK |
857		UVD_LMI_CTRL__CRC_RESET_MASK |
858		UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
859		UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
860		UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
861		(8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
862		0x00100000L);
863	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
864		VCN, 0, mmUVD_LMI_CTRL), tmp, 0, indirect);
865
866	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
867		VCN, 0, mmUVD_MPC_CNTL),
868		0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect);
869
870	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
871		VCN, 0, mmUVD_MPC_SET_MUXA0),
872		((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
873		 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
874		 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
875		 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect);
876
877	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
878		VCN, 0, mmUVD_MPC_SET_MUXB0),
879		((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
880		 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
881		 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
882		 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect);
883
884	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
885		VCN, 0, mmUVD_MPC_SET_MUX),
886		((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
887		 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
888		 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect);
889
890	vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect);
891
892	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
893		VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect);
894	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
895		VCN, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect);
896
897	/* enable LMI MC and UMC channels */
898	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
899		VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect);
900
901	vcn_v2_6_enable_ras(adev, inst_idx, indirect);
902
903	/* unblock VCPU register access */
904	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
905		VCN, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect);
906
907	tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT);
908	tmp |= UVD_VCPU_CNTL__CLK_EN_MASK;
909	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
910		VCN, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect);
911
912	/* enable master interrupt */
913	WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET(
914		VCN, 0, mmUVD_MASTINT_EN),
915		UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect);
916
917	if (indirect)
918		amdgpu_vcn_psp_update_sram(adev, inst_idx, 0);
919
920	ring = &adev->vcn.inst[inst_idx].ring_dec;
921	/* force RBC into idle state */
922	rb_bufsz = order_base_2(ring->ring_size);
923	tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
924	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
925	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
926	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
927	tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
928	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_CNTL, tmp);
929
930	/* Stall DPG before WPTR/RPTR reset */
931	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
932		UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
933		~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
934	fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
935
936	/* set the write pointer delay */
937	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR_CNTL, 0);
938
939	/* set the wb address */
940	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR_ADDR,
941		(upper_32_bits(ring->gpu_addr) >> 2));
942
943	/* program the RB_BASE for ring buffer */
944	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
945		lower_32_bits(ring->gpu_addr));
946	WREG32_SOC15(VCN, inst_idx, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
947		upper_32_bits(ring->gpu_addr));
948
949	/* Initialize the ring buffer's read and write pointers */
950	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR, 0);
951
952	WREG32_SOC15(VCN, inst_idx, mmUVD_SCRATCH2, 0);
953
954	ring->wptr = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_RPTR);
955	WREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR,
956		lower_32_bits(ring->wptr));
957
958	fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
959	/* Unstall DPG */
960	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
961		0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
962
963	return 0;
964}
965
966static int vcn_v2_5_start(struct amdgpu_device *adev)
967{
968	struct amdgpu_ring *ring;
969	uint32_t rb_bufsz, tmp;
970	int i, j, k, r;
971
972	if (adev->pm.dpm_enabled)
973		amdgpu_dpm_enable_uvd(adev, true);
974
975	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
976		if (adev->vcn.harvest_config & (1 << i))
977			continue;
978		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
979			r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram);
980			continue;
981		}
982
983		/* disable register anti-hang mechanism */
984		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0,
985			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
986
987		/* set uvd status busy */
988		tmp = RREG32_SOC15(VCN, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY;
989		WREG32_SOC15(VCN, i, mmUVD_STATUS, tmp);
990	}
991
992	if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)
993		return 0;
994
995	/*SW clock gating */
996	vcn_v2_5_disable_clock_gating(adev);
997
998	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
999		if (adev->vcn.harvest_config & (1 << i))
1000			continue;
1001		/* enable VCPU clock */
1002		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1003			UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK);
1004
1005		/* disable master interrupt */
1006		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN), 0,
1007			~UVD_MASTINT_EN__VCPU_EN_MASK);
1008
1009		/* setup mmUVD_LMI_CTRL */
1010		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL);
1011		tmp &= ~0xff;
1012		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL, tmp | 0x8|
1013			UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK	|
1014			UVD_LMI_CTRL__MASK_MC_URGENT_MASK |
1015			UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
1016			UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK);
1017
1018		/* setup mmUVD_MPC_CNTL */
1019		tmp = RREG32_SOC15(VCN, i, mmUVD_MPC_CNTL);
1020		tmp &= ~UVD_MPC_CNTL__REPLACEMENT_MODE_MASK;
1021		tmp |= 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT;
1022		WREG32_SOC15(VCN, i, mmUVD_MPC_CNTL, tmp);
1023
1024		/* setup UVD_MPC_SET_MUXA0 */
1025		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXA0,
1026			((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) |
1027			(0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) |
1028			(0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) |
1029			(0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)));
1030
1031		/* setup UVD_MPC_SET_MUXB0 */
1032		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUXB0,
1033			((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) |
1034			(0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) |
1035			(0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) |
1036			(0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)));
1037
1038		/* setup mmUVD_MPC_SET_MUX */
1039		WREG32_SOC15(VCN, i, mmUVD_MPC_SET_MUX,
1040			((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) |
1041			(0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) |
1042			(0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)));
1043	}
1044
1045	vcn_v2_5_mc_resume(adev);
1046
1047	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1048		volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr;
1049		if (adev->vcn.harvest_config & (1 << i))
1050			continue;
1051		/* VCN global tiling registers */
1052		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1053			adev->gfx.config.gb_addr_config);
1054		WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG,
1055			adev->gfx.config.gb_addr_config);
1056
1057		/* enable LMI MC and UMC channels */
1058		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_LMI_CTRL2), 0,
1059			~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
1060
1061		/* unblock VCPU register access */
1062		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), 0,
1063			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1064
1065		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1066			~UVD_VCPU_CNTL__BLK_RST_MASK);
1067
1068		for (k = 0; k < 10; ++k) {
1069			uint32_t status;
1070
1071			for (j = 0; j < 100; ++j) {
1072				status = RREG32_SOC15(VCN, i, mmUVD_STATUS);
1073				if (status & 2)
1074					break;
1075				if (amdgpu_emu_mode == 1)
1076					msleep(500);
1077				else
1078					mdelay(10);
1079			}
1080			r = 0;
1081			if (status & 2)
1082				break;
1083
1084			DRM_ERROR("VCN decode not responding, trying to reset the VCPU!!!\n");
1085			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1086				UVD_VCPU_CNTL__BLK_RST_MASK,
1087				~UVD_VCPU_CNTL__BLK_RST_MASK);
1088			mdelay(10);
1089			WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1090				~UVD_VCPU_CNTL__BLK_RST_MASK);
1091
1092			mdelay(10);
1093			r = -1;
1094		}
1095
1096		if (r) {
1097			DRM_ERROR("VCN decode not responding, giving up!!!\n");
1098			return r;
1099		}
1100
1101		/* enable master interrupt */
1102		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_MASTINT_EN),
1103			UVD_MASTINT_EN__VCPU_EN_MASK,
1104			~UVD_MASTINT_EN__VCPU_EN_MASK);
1105
1106		/* clear the busy bit of VCN_STATUS */
1107		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS), 0,
1108			~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
1109
1110		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_VMID, 0);
1111
1112		ring = &adev->vcn.inst[i].ring_dec;
1113		/* force RBC into idle state */
1114		rb_bufsz = order_base_2(ring->ring_size);
1115		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1116		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1117		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1118		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1119		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1120		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp);
1121
1122		fw_shared->multi_queue.decode_queue_mode |= FW_QUEUE_RING_RESET;
1123		/* program the RB_BASE for ring buffer */
1124		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
1125			lower_32_bits(ring->gpu_addr));
1126		WREG32_SOC15(VCN, i, mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
1127			upper_32_bits(ring->gpu_addr));
1128
1129		/* Initialize the ring buffer's read and write pointers */
1130		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR, 0);
1131
1132		ring->wptr = RREG32_SOC15(VCN, i, mmUVD_RBC_RB_RPTR);
1133		WREG32_SOC15(VCN, i, mmUVD_RBC_RB_WPTR,
1134				lower_32_bits(ring->wptr));
1135		fw_shared->multi_queue.decode_queue_mode &= ~FW_QUEUE_RING_RESET;
1136
1137		fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1138		ring = &adev->vcn.inst[i].ring_enc[0];
1139		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1140		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1141		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO, ring->gpu_addr);
1142		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1143		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE, ring->ring_size / 4);
1144		fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1145
1146		fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1147		ring = &adev->vcn.inst[i].ring_enc[1];
1148		WREG32_SOC15(VCN, i, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1149		WREG32_SOC15(VCN, i, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1150		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1151		WREG32_SOC15(VCN, i, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1152		WREG32_SOC15(VCN, i, mmUVD_RB_SIZE2, ring->ring_size / 4);
1153		fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1154	}
1155
1156	return 0;
1157}
1158
1159static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev,
1160				struct amdgpu_mm_table *table)
1161{
1162	uint32_t data = 0, loop = 0, size = 0;
1163	uint64_t addr = table->gpu_addr;
1164	struct mmsch_v1_1_init_header *header = NULL;
1165
1166	header = (struct mmsch_v1_1_init_header *)table->cpu_addr;
1167	size = header->total_size;
1168
1169	/*
1170	 * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of
1171	 *  memory descriptor location
1172	 */
1173	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
1174	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
1175
1176	/* 2, update vmid of descriptor */
1177	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID);
1178	data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
1179	/* use domain0 for MM scheduler */
1180	data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
1181	WREG32_SOC15(VCN, 0, mmMMSCH_VF_VMID, data);
1182
1183	/* 3, notify mmsch about the size of this descriptor */
1184	WREG32_SOC15(VCN, 0, mmMMSCH_VF_CTX_SIZE, size);
1185
1186	/* 4, set resp to zero */
1187	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
1188
1189	/*
1190	 * 5, kick off the initialization and wait until
1191	 * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
1192	 */
1193	WREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
1194
1195	data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1196	loop = 10;
1197	while ((data & 0x10000002) != 0x10000002) {
1198		udelay(100);
1199		data = RREG32_SOC15(VCN, 0, mmMMSCH_VF_MAILBOX_RESP);
1200		loop--;
1201		if (!loop)
1202			break;
1203	}
1204
1205	if (!loop) {
1206		dev_err(adev->dev,
1207			"failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n",
1208			data);
1209		return -EBUSY;
1210	}
1211
1212	return 0;
1213}
1214
1215static int vcn_v2_5_sriov_start(struct amdgpu_device *adev)
1216{
1217	struct amdgpu_ring *ring;
1218	uint32_t offset, size, tmp, i, rb_bufsz;
1219	uint32_t table_size = 0;
1220	struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } };
1221	struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } };
1222	struct mmsch_v1_0_cmd_end end = { { 0 } };
1223	uint32_t *init_table = adev->virt.mm_table.cpu_addr;
1224	struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table;
1225
1226	direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
1227	direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
1228	end.cmd_header.command_type = MMSCH_COMMAND__END;
1229
1230	header->version = MMSCH_VERSION;
1231	header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2;
1232	init_table += header->total_size;
1233
1234	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1235		header->eng[i].table_offset = header->total_size;
1236		header->eng[i].init_status = 0;
1237		header->eng[i].table_size = 0;
1238
1239		table_size = 0;
1240
1241		MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT(
1242			SOC15_REG_OFFSET(VCN, i, mmUVD_STATUS),
1243			~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY);
1244
1245		size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw[i]->size + 4);
1246		/* mc resume*/
1247		if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1248			MMSCH_V1_0_INSERT_DIRECT_WT(
1249				SOC15_REG_OFFSET(VCN, i,
1250					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1251				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
1252			MMSCH_V1_0_INSERT_DIRECT_WT(
1253				SOC15_REG_OFFSET(VCN, i,
1254					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1255				adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
1256			offset = 0;
1257			MMSCH_V1_0_INSERT_DIRECT_WT(
1258				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0), 0);
1259		} else {
1260			MMSCH_V1_0_INSERT_DIRECT_WT(
1261				SOC15_REG_OFFSET(VCN, i,
1262					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
1263				lower_32_bits(adev->vcn.inst[i].gpu_addr));
1264			MMSCH_V1_0_INSERT_DIRECT_WT(
1265				SOC15_REG_OFFSET(VCN, i,
1266					mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
1267				upper_32_bits(adev->vcn.inst[i].gpu_addr));
1268			offset = size;
1269			MMSCH_V1_0_INSERT_DIRECT_WT(
1270				SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET0),
1271				AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
1272		}
1273
1274		MMSCH_V1_0_INSERT_DIRECT_WT(
1275			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE0),
1276			size);
1277		MMSCH_V1_0_INSERT_DIRECT_WT(
1278			SOC15_REG_OFFSET(VCN, i,
1279				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
1280			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1281		MMSCH_V1_0_INSERT_DIRECT_WT(
1282			SOC15_REG_OFFSET(VCN, i,
1283				mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
1284			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset));
1285		MMSCH_V1_0_INSERT_DIRECT_WT(
1286			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET1),
1287			0);
1288		MMSCH_V1_0_INSERT_DIRECT_WT(
1289			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE1),
1290			AMDGPU_VCN_STACK_SIZE);
1291		MMSCH_V1_0_INSERT_DIRECT_WT(
1292			SOC15_REG_OFFSET(VCN, i,
1293				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
1294			lower_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1295				AMDGPU_VCN_STACK_SIZE));
1296		MMSCH_V1_0_INSERT_DIRECT_WT(
1297			SOC15_REG_OFFSET(VCN, i,
1298				mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
1299			upper_32_bits(adev->vcn.inst[i].gpu_addr + offset +
1300				AMDGPU_VCN_STACK_SIZE));
1301		MMSCH_V1_0_INSERT_DIRECT_WT(
1302			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_OFFSET2),
1303			0);
1304		MMSCH_V1_0_INSERT_DIRECT_WT(
1305			SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CACHE_SIZE2),
1306			AMDGPU_VCN_CONTEXT_SIZE);
1307
1308		ring = &adev->vcn.inst[i].ring_enc[0];
1309		ring->wptr = 0;
1310
1311		MMSCH_V1_0_INSERT_DIRECT_WT(
1312			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_LO),
1313			lower_32_bits(ring->gpu_addr));
1314		MMSCH_V1_0_INSERT_DIRECT_WT(
1315			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_BASE_HI),
1316			upper_32_bits(ring->gpu_addr));
1317		MMSCH_V1_0_INSERT_DIRECT_WT(
1318			SOC15_REG_OFFSET(VCN, i, mmUVD_RB_SIZE),
1319			ring->ring_size / 4);
1320
1321		ring = &adev->vcn.inst[i].ring_dec;
1322		ring->wptr = 0;
1323		MMSCH_V1_0_INSERT_DIRECT_WT(
1324			SOC15_REG_OFFSET(VCN, i,
1325				mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
1326			lower_32_bits(ring->gpu_addr));
1327		MMSCH_V1_0_INSERT_DIRECT_WT(
1328			SOC15_REG_OFFSET(VCN, i,
1329				mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
1330			upper_32_bits(ring->gpu_addr));
1331
1332		/* force RBC into idle state */
1333		rb_bufsz = order_base_2(ring->ring_size);
1334		tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
1335		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
1336		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
1337		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
1338		tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
1339		MMSCH_V1_0_INSERT_DIRECT_WT(
1340			SOC15_REG_OFFSET(VCN, i, mmUVD_RBC_RB_CNTL), tmp);
1341
1342		/* add end packet */
1343		memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end));
1344		table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1345		init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4;
1346
1347		/* refine header */
1348		header->eng[i].table_size = table_size;
1349		header->total_size += table_size;
1350	}
1351
1352	return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table);
1353}
1354
1355static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
1356{
1357	uint32_t tmp;
1358
1359	/* Wait for power status to be 1 */
1360	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1361		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1362
1363	/* wait for read ptr to be equal to write ptr */
1364	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR);
1365	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR, tmp, 0xFFFFFFFF);
1366
1367	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2);
1368	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RB_RPTR2, tmp, 0xFFFFFFFF);
1369
1370	tmp = RREG32_SOC15(VCN, inst_idx, mmUVD_RBC_RB_WPTR) & 0x7FFFFFFF;
1371	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_RBC_RB_RPTR, tmp, 0xFFFFFFFF);
1372
1373	SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 1,
1374		UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1375
1376	/* disable dynamic power gating mode */
1377	WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS), 0,
1378			~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
1379
1380	return 0;
1381}
1382
1383static int vcn_v2_5_stop(struct amdgpu_device *adev)
1384{
1385	uint32_t tmp;
1386	int i, r = 0;
1387
1388	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1389		if (adev->vcn.harvest_config & (1 << i))
1390			continue;
1391		if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
1392			r = vcn_v2_5_stop_dpg_mode(adev, i);
1393			continue;
1394		}
1395
1396		/* wait for vcn idle */
1397		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7);
1398		if (r)
1399			return r;
1400
1401		tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK |
1402			UVD_LMI_STATUS__READ_CLEAN_MASK |
1403			UVD_LMI_STATUS__WRITE_CLEAN_MASK |
1404			UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK;
1405		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1406		if (r)
1407			return r;
1408
1409		/* block LMI UMC channel */
1410		tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2);
1411		tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK;
1412		WREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2, tmp);
1413
1414		tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK|
1415			UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK;
1416		r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp);
1417		if (r)
1418			return r;
1419
1420		/* block VCPU register access */
1421		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL),
1422			UVD_RB_ARB_CTRL__VCPU_DIS_MASK,
1423			~UVD_RB_ARB_CTRL__VCPU_DIS_MASK);
1424
1425		/* reset VCPU */
1426		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL),
1427			UVD_VCPU_CNTL__BLK_RST_MASK,
1428			~UVD_VCPU_CNTL__BLK_RST_MASK);
1429
1430		/* disable VCPU clock */
1431		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), 0,
1432			~(UVD_VCPU_CNTL__CLK_EN_MASK));
1433
1434		/* clear status */
1435		WREG32_SOC15(VCN, i, mmUVD_STATUS, 0);
1436
1437		vcn_v2_5_enable_clock_gating(adev);
1438
1439		/* enable register anti-hang mechanism */
1440		WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS),
1441			UVD_POWER_STATUS__UVD_POWER_STATUS_MASK,
1442			~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1443	}
1444
1445	if (adev->pm.dpm_enabled)
1446		amdgpu_dpm_enable_uvd(adev, false);
1447
1448	return 0;
1449}
1450
1451static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
1452				int inst_idx, struct dpg_pause_state *new_state)
1453{
1454	struct amdgpu_ring *ring;
1455	uint32_t reg_data = 0;
1456	int ret_code = 0;
1457
1458	/* pause/unpause if state is changed */
1459	if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
1460		DRM_DEBUG("dpg pause state changed %d -> %d",
1461			adev->vcn.inst[inst_idx].pause_state.fw_based,	new_state->fw_based);
1462		reg_data = RREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE) &
1463			(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1464
1465		if (new_state->fw_based == VCN_DPG_STATE__PAUSE) {
1466			ret_code = SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1467				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1468
1469			if (!ret_code) {
1470				volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr;
1471
1472				/* pause DPG */
1473				reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1474				WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1475
1476				/* wait for ACK */
1477				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_DPG_PAUSE,
1478					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK,
1479					   UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
1480
1481				/* Stall DPG before WPTR/RPTR reset */
1482				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1483					   UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
1484					   ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1485
1486				/* Restore */
1487				fw_shared->multi_queue.encode_generalpurpose_queue_mode |= FW_QUEUE_RING_RESET;
1488				ring = &adev->vcn.inst[inst_idx].ring_enc[0];
1489				ring->wptr = 0;
1490				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO, ring->gpu_addr);
1491				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
1492				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE, ring->ring_size / 4);
1493				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
1494				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1495				fw_shared->multi_queue.encode_generalpurpose_queue_mode &= ~FW_QUEUE_RING_RESET;
1496
1497				fw_shared->multi_queue.encode_lowlatency_queue_mode |= FW_QUEUE_RING_RESET;
1498				ring = &adev->vcn.inst[inst_idx].ring_enc[1];
1499				ring->wptr = 0;
1500				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_LO2, ring->gpu_addr);
1501				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
1502				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_SIZE2, ring->ring_size / 4);
1503				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_RPTR2, lower_32_bits(ring->wptr));
1504				WREG32_SOC15(VCN, inst_idx, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1505				fw_shared->multi_queue.encode_lowlatency_queue_mode &= ~FW_QUEUE_RING_RESET;
1506
1507				/* Unstall DPG */
1508				WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, mmUVD_POWER_STATUS),
1509					   0, ~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
1510
1511				SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS,
1512					   UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1513			}
1514		} else {
1515			reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
1516			WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_PAUSE, reg_data);
1517			SOC15_WAIT_ON_RREG(VCN, inst_idx, mmUVD_POWER_STATUS, 0x1,
1518				UVD_POWER_STATUS__UVD_POWER_STATUS_MASK);
1519		}
1520		adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
1521	}
1522
1523	return 0;
1524}
1525
1526/**
1527 * vcn_v2_5_dec_ring_get_rptr - get read pointer
1528 *
1529 * @ring: amdgpu_ring pointer
1530 *
1531 * Returns the current hardware read pointer
1532 */
1533static uint64_t vcn_v2_5_dec_ring_get_rptr(struct amdgpu_ring *ring)
1534{
1535	struct amdgpu_device *adev = ring->adev;
1536
1537	return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_RPTR);
1538}
1539
1540/**
1541 * vcn_v2_5_dec_ring_get_wptr - get write pointer
1542 *
1543 * @ring: amdgpu_ring pointer
1544 *
1545 * Returns the current hardware write pointer
1546 */
1547static uint64_t vcn_v2_5_dec_ring_get_wptr(struct amdgpu_ring *ring)
1548{
1549	struct amdgpu_device *adev = ring->adev;
1550
1551	if (ring->use_doorbell)
1552		return *ring->wptr_cpu_addr;
1553	else
1554		return RREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR);
1555}
1556
1557/**
1558 * vcn_v2_5_dec_ring_set_wptr - set write pointer
1559 *
1560 * @ring: amdgpu_ring pointer
1561 *
1562 * Commits the write pointer to the hardware
1563 */
1564static void vcn_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
1565{
1566	struct amdgpu_device *adev = ring->adev;
1567
1568	if (ring->use_doorbell) {
1569		*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1570		WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1571	} else {
1572		WREG32_SOC15(VCN, ring->me, mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
1573	}
1574}
1575
1576static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = {
1577	.type = AMDGPU_RING_TYPE_VCN_DEC,
1578	.align_mask = 0xf,
1579	.secure_submission_supported = true,
1580	.get_rptr = vcn_v2_5_dec_ring_get_rptr,
1581	.get_wptr = vcn_v2_5_dec_ring_get_wptr,
1582	.set_wptr = vcn_v2_5_dec_ring_set_wptr,
1583	.emit_frame_size =
1584		SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
1585		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
1586		8 + /* vcn_v2_0_dec_ring_emit_vm_flush */
1587		14 + 14 + /* vcn_v2_0_dec_ring_emit_fence x2 vm fence */
1588		6,
1589	.emit_ib_size = 8, /* vcn_v2_0_dec_ring_emit_ib */
1590	.emit_ib = vcn_v2_0_dec_ring_emit_ib,
1591	.emit_fence = vcn_v2_0_dec_ring_emit_fence,
1592	.emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush,
1593	.test_ring = vcn_v2_0_dec_ring_test_ring,
1594	.test_ib = amdgpu_vcn_dec_ring_test_ib,
1595	.insert_nop = vcn_v2_0_dec_ring_insert_nop,
1596	.insert_start = vcn_v2_0_dec_ring_insert_start,
1597	.insert_end = vcn_v2_0_dec_ring_insert_end,
1598	.pad_ib = amdgpu_ring_generic_pad_ib,
1599	.begin_use = amdgpu_vcn_ring_begin_use,
1600	.end_use = amdgpu_vcn_ring_end_use,
1601	.emit_wreg = vcn_v2_0_dec_ring_emit_wreg,
1602	.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait,
1603	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1604};
1605
1606/**
1607 * vcn_v2_5_enc_ring_get_rptr - get enc read pointer
1608 *
1609 * @ring: amdgpu_ring pointer
1610 *
1611 * Returns the current hardware enc read pointer
1612 */
1613static uint64_t vcn_v2_5_enc_ring_get_rptr(struct amdgpu_ring *ring)
1614{
1615	struct amdgpu_device *adev = ring->adev;
1616
1617	if (ring == &adev->vcn.inst[ring->me].ring_enc[0])
1618		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR);
1619	else
1620		return RREG32_SOC15(VCN, ring->me, mmUVD_RB_RPTR2);
1621}
1622
1623/**
1624 * vcn_v2_5_enc_ring_get_wptr - get enc write pointer
1625 *
1626 * @ring: amdgpu_ring pointer
1627 *
1628 * Returns the current hardware enc write pointer
1629 */
1630static uint64_t vcn_v2_5_enc_ring_get_wptr(struct amdgpu_ring *ring)
1631{
1632	struct amdgpu_device *adev = ring->adev;
1633
1634	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1635		if (ring->use_doorbell)
1636			return *ring->wptr_cpu_addr;
1637		else
1638			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR);
1639	} else {
1640		if (ring->use_doorbell)
1641			return *ring->wptr_cpu_addr;
1642		else
1643			return RREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2);
1644	}
1645}
1646
1647/**
1648 * vcn_v2_5_enc_ring_set_wptr - set enc write pointer
1649 *
1650 * @ring: amdgpu_ring pointer
1651 *
1652 * Commits the enc write pointer to the hardware
1653 */
1654static void vcn_v2_5_enc_ring_set_wptr(struct amdgpu_ring *ring)
1655{
1656	struct amdgpu_device *adev = ring->adev;
1657
1658	if (ring == &adev->vcn.inst[ring->me].ring_enc[0]) {
1659		if (ring->use_doorbell) {
1660			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1661			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1662		} else {
1663			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR, lower_32_bits(ring->wptr));
1664		}
1665	} else {
1666		if (ring->use_doorbell) {
1667			*ring->wptr_cpu_addr = lower_32_bits(ring->wptr);
1668			WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
1669		} else {
1670			WREG32_SOC15(VCN, ring->me, mmUVD_RB_WPTR2, lower_32_bits(ring->wptr));
1671		}
1672	}
1673}
1674
1675static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = {
1676	.type = AMDGPU_RING_TYPE_VCN_ENC,
1677	.align_mask = 0x3f,
1678	.nop = VCN_ENC_CMD_NO_OP,
1679	.get_rptr = vcn_v2_5_enc_ring_get_rptr,
1680	.get_wptr = vcn_v2_5_enc_ring_get_wptr,
1681	.set_wptr = vcn_v2_5_enc_ring_set_wptr,
1682	.emit_frame_size =
1683		SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 +
1684		SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 +
1685		4 + /* vcn_v2_0_enc_ring_emit_vm_flush */
1686		5 + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */
1687		1, /* vcn_v2_0_enc_ring_insert_end */
1688	.emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */
1689	.emit_ib = vcn_v2_0_enc_ring_emit_ib,
1690	.emit_fence = vcn_v2_0_enc_ring_emit_fence,
1691	.emit_vm_flush = vcn_v2_0_enc_ring_emit_vm_flush,
1692	.test_ring = amdgpu_vcn_enc_ring_test_ring,
1693	.test_ib = amdgpu_vcn_enc_ring_test_ib,
1694	.insert_nop = amdgpu_ring_insert_nop,
1695	.insert_end = vcn_v2_0_enc_ring_insert_end,
1696	.pad_ib = amdgpu_ring_generic_pad_ib,
1697	.begin_use = amdgpu_vcn_ring_begin_use,
1698	.end_use = amdgpu_vcn_ring_end_use,
1699	.emit_wreg = vcn_v2_0_enc_ring_emit_wreg,
1700	.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait,
1701	.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper,
1702};
1703
1704static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
1705{
1706	int i;
1707
1708	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1709		if (adev->vcn.harvest_config & (1 << i))
1710			continue;
1711		adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
1712		adev->vcn.inst[i].ring_dec.me = i;
1713		DRM_INFO("VCN(%d) decode is enabled in VM mode\n", i);
1714	}
1715}
1716
1717static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
1718{
1719	int i, j;
1720
1721	for (j = 0; j < adev->vcn.num_vcn_inst; ++j) {
1722		if (adev->vcn.harvest_config & (1 << j))
1723			continue;
1724		for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
1725			adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
1726			adev->vcn.inst[j].ring_enc[i].me = j;
1727		}
1728		DRM_INFO("VCN(%d) encode is enabled in VM mode\n", j);
1729	}
1730}
1731
1732static bool vcn_v2_5_is_idle(void *handle)
1733{
1734	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1735	int i, ret = 1;
1736
1737	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1738		if (adev->vcn.harvest_config & (1 << i))
1739			continue;
1740		ret &= (RREG32_SOC15(VCN, i, mmUVD_STATUS) == UVD_STATUS__IDLE);
1741	}
1742
1743	return ret;
1744}
1745
1746static int vcn_v2_5_wait_for_idle(void *handle)
1747{
1748	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1749	int i, ret = 0;
1750
1751	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1752		if (adev->vcn.harvest_config & (1 << i))
1753			continue;
1754		ret = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE,
1755			UVD_STATUS__IDLE);
1756		if (ret)
1757			return ret;
1758	}
1759
1760	return ret;
1761}
1762
1763static int vcn_v2_5_set_clockgating_state(void *handle,
1764					  enum amd_clockgating_state state)
1765{
1766	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1767	bool enable = (state == AMD_CG_STATE_GATE);
1768
1769	if (amdgpu_sriov_vf(adev))
1770		return 0;
1771
1772	if (enable) {
1773		if (!vcn_v2_5_is_idle(handle))
1774			return -EBUSY;
1775		vcn_v2_5_enable_clock_gating(adev);
1776	} else {
1777		vcn_v2_5_disable_clock_gating(adev);
1778	}
1779
1780	return 0;
1781}
1782
1783static int vcn_v2_5_set_powergating_state(void *handle,
1784					  enum amd_powergating_state state)
1785{
1786	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1787	int ret;
1788
1789	if (amdgpu_sriov_vf(adev))
1790		return 0;
1791
1792	if(state == adev->vcn.cur_state)
1793		return 0;
1794
1795	if (state == AMD_PG_STATE_GATE)
1796		ret = vcn_v2_5_stop(adev);
1797	else
1798		ret = vcn_v2_5_start(adev);
1799
1800	if(!ret)
1801		adev->vcn.cur_state = state;
1802
1803	return ret;
1804}
1805
1806static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
1807					struct amdgpu_irq_src *source,
1808					unsigned type,
1809					enum amdgpu_interrupt_state state)
1810{
1811	return 0;
1812}
1813
1814static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
1815					struct amdgpu_irq_src *source,
1816					unsigned int type,
1817					enum amdgpu_interrupt_state state)
1818{
1819	return 0;
1820}
1821
1822static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
1823				      struct amdgpu_irq_src *source,
1824				      struct amdgpu_iv_entry *entry)
1825{
1826	uint32_t ip_instance;
1827
1828	switch (entry->client_id) {
1829	case SOC15_IH_CLIENTID_VCN:
1830		ip_instance = 0;
1831		break;
1832	case SOC15_IH_CLIENTID_VCN1:
1833		ip_instance = 1;
1834		break;
1835	default:
1836		DRM_ERROR("Unhandled client id: %d\n", entry->client_id);
1837		return 0;
1838	}
1839
1840	DRM_DEBUG("IH: VCN TRAP\n");
1841
1842	switch (entry->src_id) {
1843	case VCN_2_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT:
1844		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_dec);
1845		break;
1846	case VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
1847		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
1848		break;
1849	case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
1850		amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
1851		break;
1852	default:
1853		DRM_ERROR("Unhandled interrupt: %d %d\n",
1854			  entry->src_id, entry->src_data[0]);
1855		break;
1856	}
1857
1858	return 0;
1859}
1860
1861static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
1862	.set = vcn_v2_5_set_interrupt_state,
1863	.process = vcn_v2_5_process_interrupt,
1864};
1865
1866static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
1867	.set = vcn_v2_6_set_ras_interrupt_state,
1868	.process = amdgpu_vcn_process_poison_irq,
1869};
1870
1871static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
1872{
1873	int i;
1874
1875	for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
1876		if (adev->vcn.harvest_config & (1 << i))
1877			continue;
1878		adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
1879		adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
1880
1881		adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
1882		adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
1883	}
1884}
1885
1886static const struct amd_ip_funcs vcn_v2_5_ip_funcs = {
1887	.name = "vcn_v2_5",
1888	.early_init = vcn_v2_5_early_init,
1889	.late_init = NULL,
1890	.sw_init = vcn_v2_5_sw_init,
1891	.sw_fini = vcn_v2_5_sw_fini,
1892	.hw_init = vcn_v2_5_hw_init,
1893	.hw_fini = vcn_v2_5_hw_fini,
1894	.suspend = vcn_v2_5_suspend,
1895	.resume = vcn_v2_5_resume,
1896	.is_idle = vcn_v2_5_is_idle,
1897	.wait_for_idle = vcn_v2_5_wait_for_idle,
1898	.check_soft_reset = NULL,
1899	.pre_soft_reset = NULL,
1900	.soft_reset = NULL,
1901	.post_soft_reset = NULL,
1902	.set_clockgating_state = vcn_v2_5_set_clockgating_state,
1903	.set_powergating_state = vcn_v2_5_set_powergating_state,
1904};
1905
1906static const struct amd_ip_funcs vcn_v2_6_ip_funcs = {
1907        .name = "vcn_v2_6",
1908        .early_init = vcn_v2_5_early_init,
1909        .late_init = NULL,
1910        .sw_init = vcn_v2_5_sw_init,
1911        .sw_fini = vcn_v2_5_sw_fini,
1912        .hw_init = vcn_v2_5_hw_init,
1913        .hw_fini = vcn_v2_5_hw_fini,
1914        .suspend = vcn_v2_5_suspend,
1915        .resume = vcn_v2_5_resume,
1916        .is_idle = vcn_v2_5_is_idle,
1917        .wait_for_idle = vcn_v2_5_wait_for_idle,
1918        .check_soft_reset = NULL,
1919        .pre_soft_reset = NULL,
1920        .soft_reset = NULL,
1921        .post_soft_reset = NULL,
1922        .set_clockgating_state = vcn_v2_5_set_clockgating_state,
1923        .set_powergating_state = vcn_v2_5_set_powergating_state,
1924};
1925
1926const struct amdgpu_ip_block_version vcn_v2_5_ip_block =
1927{
1928		.type = AMD_IP_BLOCK_TYPE_VCN,
1929		.major = 2,
1930		.minor = 5,
1931		.rev = 0,
1932		.funcs = &vcn_v2_5_ip_funcs,
1933};
1934
1935const struct amdgpu_ip_block_version vcn_v2_6_ip_block =
1936{
1937		.type = AMD_IP_BLOCK_TYPE_VCN,
1938		.major = 2,
1939		.minor = 6,
1940		.rev = 0,
1941		.funcs = &vcn_v2_6_ip_funcs,
1942};
1943
1944static uint32_t vcn_v2_6_query_poison_by_instance(struct amdgpu_device *adev,
1945			uint32_t instance, uint32_t sub_block)
1946{
1947	uint32_t poison_stat = 0, reg_value = 0;
1948
1949	switch (sub_block) {
1950	case AMDGPU_VCN_V2_6_VCPU_VCODEC:
1951		reg_value = RREG32_SOC15(VCN, instance, mmUVD_RAS_VCPU_VCODEC_STATUS);
1952		poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF);
1953		break;
1954	default:
1955		break;
1956	}
1957
1958	if (poison_stat)
1959		dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n",
1960			instance, sub_block);
1961
1962	return poison_stat;
1963}
1964
1965static bool vcn_v2_6_query_poison_status(struct amdgpu_device *adev)
1966{
1967	uint32_t inst, sub;
1968	uint32_t poison_stat = 0;
1969
1970	for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++)
1971		for (sub = 0; sub < AMDGPU_VCN_V2_6_MAX_SUB_BLOCK; sub++)
1972			poison_stat +=
1973			vcn_v2_6_query_poison_by_instance(adev, inst, sub);
1974
1975	return !!poison_stat;
1976}
1977
1978const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
1979	.query_poison_status = vcn_v2_6_query_poison_status,
1980};
1981
1982static struct amdgpu_vcn_ras vcn_v2_6_ras = {
1983	.ras_block = {
1984		.hw_ops = &vcn_v2_6_ras_hw_ops,
1985		.ras_late_init = amdgpu_vcn_ras_late_init,
1986	},
1987};
1988
1989static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev)
1990{
1991	switch (amdgpu_ip_version(adev, VCN_HWIP, 0)) {
1992	case IP_VERSION(2, 6, 0):
1993		adev->vcn.ras = &vcn_v2_6_ras;
1994		break;
1995	default:
1996		break;
1997	}
1998}
1999