1/*
2 * Copyright 2022 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23#include "amdgpu.h"
24#include "amdgpu_xcp.h"
25#include "gfxhub_v1_2.h"
26#include "gfxhub_v1_1.h"
27
28#include "gc/gc_9_4_3_offset.h"
29#include "gc/gc_9_4_3_sh_mask.h"
30#include "vega10_enum.h"
31
32#include "soc15_common.h"
33
34#define regVM_L2_CNTL3_DEFAULT	0x80100007
35#define regVM_L2_CNTL4_DEFAULT	0x000000c1
36
37static u64 gfxhub_v1_2_get_mc_fb_offset(struct amdgpu_device *adev)
38{
39	return (u64)RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_FB_OFFSET) << 24;
40}
41
42static void gfxhub_v1_2_xcc_setup_vm_pt_regs(struct amdgpu_device *adev,
43					     uint32_t vmid,
44					     uint64_t page_table_base,
45					     uint32_t xcc_mask)
46{
47	struct amdgpu_vmhub *hub;
48	int i;
49
50	for_each_inst(i, xcc_mask) {
51		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
52		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
53				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
54				    hub->ctx_addr_distance * vmid,
55				    lower_32_bits(page_table_base));
56
57		WREG32_SOC15_OFFSET(GC, GET_INST(GC, i),
58				    regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
59				    hub->ctx_addr_distance * vmid,
60				    upper_32_bits(page_table_base));
61	}
62}
63
64static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev,
65					 uint32_t vmid,
66					 uint64_t page_table_base)
67{
68	uint32_t xcc_mask;
69
70	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
71	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, vmid, page_table_base, xcc_mask);
72}
73
74static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev,
75						    uint32_t xcc_mask)
76{
77	uint64_t pt_base;
78	int i;
79
80	if (adev->gmc.pdb0_bo)
81		pt_base = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo);
82	else
83		pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
84
85	gfxhub_v1_2_xcc_setup_vm_pt_regs(adev, 0, pt_base, xcc_mask);
86
87	/* If use GART for FB translation, vmid0 page table covers both
88	 * vram and system memory (gart)
89	 */
90	for_each_inst(i, xcc_mask) {
91		if (adev->gmc.pdb0_bo) {
92			WREG32_SOC15(GC, GET_INST(GC, i),
93				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
94				     (u32)(adev->gmc.fb_start >> 12));
95			WREG32_SOC15(GC, GET_INST(GC, i),
96				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
97				     (u32)(adev->gmc.fb_start >> 44));
98
99			WREG32_SOC15(GC, GET_INST(GC, i),
100				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
101				     (u32)(adev->gmc.gart_end >> 12));
102			WREG32_SOC15(GC, GET_INST(GC, i),
103				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
104				     (u32)(adev->gmc.gart_end >> 44));
105		} else {
106			WREG32_SOC15(GC, GET_INST(GC, i),
107				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
108				     (u32)(adev->gmc.gart_start >> 12));
109			WREG32_SOC15(GC, GET_INST(GC, i),
110				     regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
111				     (u32)(adev->gmc.gart_start >> 44));
112
113			WREG32_SOC15(GC, GET_INST(GC, i),
114				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
115				     (u32)(adev->gmc.gart_end >> 12));
116			WREG32_SOC15(GC, GET_INST(GC, i),
117				     regVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
118				     (u32)(adev->gmc.gart_end >> 44));
119		}
120	}
121}
122
123static void
124gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev,
125					  uint32_t xcc_mask)
126{
127	uint64_t value;
128	uint32_t tmp;
129	int i;
130
131	for_each_inst(i, xcc_mask) {
132		/* Program the AGP BAR */
133		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BASE, 0);
134		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, adev->gmc.agp_start >> 24);
135		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, adev->gmc.agp_end >> 24);
136
137		if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) {
138			/* Program the system aperture low logical page number. */
139			WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
140				min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
141
142			if (adev->apu_flags & (AMD_APU_IS_RAVEN2 |
143					       AMD_APU_IS_RENOIR |
144					       AMD_APU_IS_GREEN_SARDINE))
145			       /*
146				* Raven2 has a HW issue that it is unable to use the
147				* vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR.
148				* So here is the workaround that increase system
149				* aperture high address (add 1) to get rid of the VM
150				* fault and hardware hang.
151				*/
152				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
153						 regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
154						 max((adev->gmc.fb_end >> 18) + 0x1,
155						     adev->gmc.agp_end >> 18));
156			else
157				WREG32_SOC15_RLC(GC, GET_INST(GC, i),
158					regMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
159					max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
160
161			/* Set default page address. */
162			value = amdgpu_gmc_vram_mc2pa(adev, adev->mem_scratch.gpu_addr);
163			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
164				     (u32)(value >> 12));
165			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
166				     (u32)(value >> 44));
167
168			/* Program "protection fault". */
169			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
170				     (u32)(adev->dummy_page_addr >> 12));
171			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
172				     (u32)((u64)adev->dummy_page_addr >> 44));
173
174			tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2);
175			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL2,
176					    ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1);
177			WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL2, tmp);
178		}
179
180		/* In the case squeezing vram into GART aperture, we don't use
181		 * FB aperture and AGP aperture. Disable them.
182		 */
183		if (adev->gmc.pdb0_bo) {
184			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0);
185			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF);
186			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0);
187			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_BOT, 0xFFFFFF);
188			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_LOW_ADDR, 0x3FFFFFFF);
189			WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 0);
190		}
191	}
192}
193
194static void gfxhub_v1_2_xcc_init_tlb_regs(struct amdgpu_device *adev,
195					  uint32_t xcc_mask)
196{
197	uint32_t tmp;
198	int i;
199
200	for_each_inst(i, xcc_mask) {
201		/* Setup TLB control */
202		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL);
203
204		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
205				    ENABLE_L1_TLB, 1);
206		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
207				    SYSTEM_ACCESS_MODE, 3);
208		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
209				    ENABLE_ADVANCED_DRIVER_MODEL, 1);
210		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
211				    SYSTEM_APERTURE_UNMAPPED_ACCESS, 0);
212		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL,
213				    MTYPE, MTYPE_UC);/* XXX for emulation. */
214		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ATC_EN, 1);
215
216		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regMC_VM_MX_L1_TLB_CNTL, tmp);
217	}
218}
219
220static void gfxhub_v1_2_xcc_init_cache_regs(struct amdgpu_device *adev,
221					    uint32_t xcc_mask)
222{
223	uint32_t tmp;
224	int i;
225
226	for_each_inst(i, xcc_mask) {
227		/* Setup L2 cache */
228		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL);
229		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1);
230		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1);
231		/* XXX for emulation, Refer to closed source code.*/
232		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
233				    0);
234		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
235		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
236		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
237		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL, tmp);
238
239		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_CNTL2);
240		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
241		tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
242		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL2, tmp);
243
244		tmp = regVM_L2_CNTL3_DEFAULT;
245		if (adev->gmc.translate_further) {
246			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 12);
247			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
248					    L2_CACHE_BIGK_FRAGMENT_SIZE, 9);
249		} else {
250			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, 9);
251			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
252					    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
253		}
254		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL3, tmp);
255
256		tmp = regVM_L2_CNTL4_DEFAULT;
257		/* For AMD APP APUs setup WC memory */
258		if (adev->gmc.xgmi.connected_to_cpu || adev->gmc.is_app_apu) {
259			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 1);
260			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 1);
261		} else {
262			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
263			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0);
264		}
265		WREG32_SOC15_RLC(GC, GET_INST(GC, i), regVM_L2_CNTL4, tmp);
266	}
267}
268
269static void gfxhub_v1_2_xcc_enable_system_domain(struct amdgpu_device *adev,
270						 uint32_t xcc_mask)
271{
272	uint32_t tmp;
273	int i;
274
275	for_each_inst(i, xcc_mask) {
276		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
277		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1);
278		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH,
279				adev->gmc.vmid0_page_table_depth);
280		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_BLOCK_SIZE,
281				adev->gmc.vmid0_page_table_block_size);
282		tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL,
283				    RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
284		WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL, tmp);
285	}
286}
287
288static void
289gfxhub_v1_2_xcc_disable_identity_aperture(struct amdgpu_device *adev,
290					  uint32_t xcc_mask)
291{
292	int i;
293
294	for_each_inst(i, xcc_mask) {
295		WREG32_SOC15(GC, GET_INST(GC, i),
296			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32,
297			     0XFFFFFFFF);
298		WREG32_SOC15(GC, GET_INST(GC, i),
299			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32,
300			     0x0000000F);
301
302		WREG32_SOC15(GC, GET_INST(GC, i),
303			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32,
304			     0);
305		WREG32_SOC15(GC, GET_INST(GC, i),
306			     regVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32,
307			     0);
308
309		WREG32_SOC15(GC, GET_INST(GC, i),
310			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, 0);
311		WREG32_SOC15(GC, GET_INST(GC, i),
312			     regVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, 0);
313	}
314}
315
316static void gfxhub_v1_2_xcc_setup_vmid_config(struct amdgpu_device *adev,
317					      uint32_t xcc_mask)
318{
319	struct amdgpu_vmhub *hub;
320	unsigned int num_level, block_size;
321	uint32_t tmp;
322	int i, j;
323
324	num_level = adev->vm_manager.num_level;
325	block_size = adev->vm_manager.block_size;
326	if (adev->gmc.translate_further)
327		num_level -= 1;
328	else
329		block_size -= 9;
330
331	for_each_inst(j, xcc_mask) {
332		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
333		for (i = 0; i <= 14; i++) {
334			tmp = RREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
335					i * hub->ctx_distance);
336			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1);
337			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH,
338					    num_level);
339			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
340					    RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
341			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
342					    DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT,
343					    1);
344			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
345					    PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
346			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
347					    VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
348			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
349					    READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
350			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
351					    WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
352			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
353					    EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1);
354			tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
355					    PAGE_TABLE_BLOCK_SIZE,
356					    block_size);
357			/* Send no-retry XNACK on fault to suppress VM fault storm.
358			 * On 9.4.2 and 9.4.3, XNACK can be enabled in
359			 * the SQ per-process.
360			 * Retry faults need to be enabled for that to work.
361			 */
362			tmp = REG_SET_FIELD(
363				tmp, VM_CONTEXT1_CNTL,
364				RETRY_PERMISSION_OR_INVALID_PAGE_FAULT,
365				!adev->gmc.noretry ||
366					amdgpu_ip_version(adev, GC_HWIP, 0) ==
367						IP_VERSION(9, 4, 2) ||
368					amdgpu_ip_version(adev, GC_HWIP, 0) ==
369						IP_VERSION(9, 4, 3));
370			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT1_CNTL,
371					    i * hub->ctx_distance, tmp);
372			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
373					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32,
374					    i * hub->ctx_addr_distance, 0);
375			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
376					    regVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32,
377					    i * hub->ctx_addr_distance, 0);
378			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
379					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32,
380					    i * hub->ctx_addr_distance,
381					    lower_32_bits(adev->vm_manager.max_pfn - 1));
382			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j),
383					    regVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32,
384					    i * hub->ctx_addr_distance,
385					    upper_32_bits(adev->vm_manager.max_pfn - 1));
386		}
387	}
388}
389
390static void gfxhub_v1_2_xcc_program_invalidation(struct amdgpu_device *adev,
391						 uint32_t xcc_mask)
392{
393	struct amdgpu_vmhub *hub;
394	unsigned int i, j;
395
396	for_each_inst(j, xcc_mask) {
397		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
398
399		for (i = 0 ; i < 18; ++i) {
400			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32,
401					    i * hub->eng_addr_distance, 0xffffffff);
402			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_INVALIDATE_ENG0_ADDR_RANGE_HI32,
403					    i * hub->eng_addr_distance, 0x1f);
404		}
405	}
406}
407
408static int gfxhub_v1_2_xcc_gart_enable(struct amdgpu_device *adev,
409				       uint32_t xcc_mask)
410{
411	/* GART Enable. */
412	gfxhub_v1_2_xcc_init_gart_aperture_regs(adev, xcc_mask);
413	gfxhub_v1_2_xcc_init_system_aperture_regs(adev, xcc_mask);
414	gfxhub_v1_2_xcc_init_tlb_regs(adev, xcc_mask);
415	if (!amdgpu_sriov_vf(adev))
416		gfxhub_v1_2_xcc_init_cache_regs(adev, xcc_mask);
417
418	gfxhub_v1_2_xcc_enable_system_domain(adev, xcc_mask);
419	if (!amdgpu_sriov_vf(adev))
420		gfxhub_v1_2_xcc_disable_identity_aperture(adev, xcc_mask);
421	gfxhub_v1_2_xcc_setup_vmid_config(adev, xcc_mask);
422	gfxhub_v1_2_xcc_program_invalidation(adev, xcc_mask);
423
424	return 0;
425}
426
427static int gfxhub_v1_2_gart_enable(struct amdgpu_device *adev)
428{
429	uint32_t xcc_mask;
430
431	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
432	return gfxhub_v1_2_xcc_gart_enable(adev, xcc_mask);
433}
434
435static void gfxhub_v1_2_xcc_gart_disable(struct amdgpu_device *adev,
436					 uint32_t xcc_mask)
437{
438	struct amdgpu_vmhub *hub;
439	u32 tmp;
440	u32 i, j;
441
442	for_each_inst(j, xcc_mask) {
443		hub = &adev->vmhub[AMDGPU_GFXHUB(j)];
444		/* Disable all tables */
445		for (i = 0; i < 16; i++)
446			WREG32_SOC15_OFFSET(GC, GET_INST(GC, j), regVM_CONTEXT0_CNTL,
447					    i * hub->ctx_distance, 0);
448
449		/* Setup TLB control */
450		tmp = RREG32_SOC15(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL);
451		tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0);
452		tmp = REG_SET_FIELD(tmp,
453					MC_VM_MX_L1_TLB_CNTL,
454					ENABLE_ADVANCED_DRIVER_MODEL,
455					0);
456		WREG32_SOC15_RLC(GC, GET_INST(GC, j), regMC_VM_MX_L1_TLB_CNTL, tmp);
457
458		/* Setup L2 cache */
459		if (!amdgpu_sriov_vf(adev)) {
460			tmp = RREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL);
461			tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0);
462			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL, tmp);
463			WREG32_SOC15(GC, GET_INST(GC, j), regVM_L2_CNTL3, 0);
464		}
465	}
466}
467
468static void gfxhub_v1_2_gart_disable(struct amdgpu_device *adev)
469{
470	uint32_t xcc_mask;
471
472	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
473	gfxhub_v1_2_xcc_gart_disable(adev, xcc_mask);
474}
475
476static void gfxhub_v1_2_xcc_set_fault_enable_default(struct amdgpu_device *adev,
477						     bool value,
478						     uint32_t xcc_mask)
479{
480	u32 tmp;
481	int i;
482
483	for_each_inst(i, xcc_mask) {
484		tmp = RREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
485		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
486				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
487		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
488				PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value);
489		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
490				PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value);
491		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
492				PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value);
493		tmp = REG_SET_FIELD(tmp,
494				VM_L2_PROTECTION_FAULT_CNTL,
495				TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT,
496				value);
497		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
498				NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value);
499		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
500				DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
501		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
502				VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value);
503		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
504				READ_PROTECTION_FAULT_ENABLE_DEFAULT, value);
505		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
506				WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
507		tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
508				EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
509		if (!value) {
510			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
511					CRASH_ON_NO_RETRY_FAULT, 1);
512			tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
513					CRASH_ON_RETRY_FAULT, 1);
514		}
515		WREG32_SOC15(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL, tmp);
516	}
517}
518
519/**
520 * gfxhub_v1_2_set_fault_enable_default - update GART/VM fault handling
521 *
522 * @adev: amdgpu_device pointer
523 * @value: true redirects VM faults to the default page
524 */
525static void gfxhub_v1_2_set_fault_enable_default(struct amdgpu_device *adev,
526						 bool value)
527{
528	uint32_t xcc_mask;
529
530	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
531	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, xcc_mask);
532}
533
534static void gfxhub_v1_2_xcc_init(struct amdgpu_device *adev, uint32_t xcc_mask)
535{
536	struct amdgpu_vmhub *hub;
537	int i;
538
539	for_each_inst(i, xcc_mask) {
540		hub = &adev->vmhub[AMDGPU_GFXHUB(i)];
541
542		hub->ctx0_ptb_addr_lo32 =
543			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
544				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32);
545		hub->ctx0_ptb_addr_hi32 =
546			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
547				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32);
548		hub->vm_inv_eng0_sem =
549			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_SEM);
550		hub->vm_inv_eng0_req =
551			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_REQ);
552		hub->vm_inv_eng0_ack =
553			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_INVALIDATE_ENG0_ACK);
554		hub->vm_context0_cntl =
555			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_CONTEXT0_CNTL);
556		hub->vm_l2_pro_fault_status =
557			SOC15_REG_OFFSET(GC, GET_INST(GC, i),
558				regVM_L2_PROTECTION_FAULT_STATUS);
559		hub->vm_l2_pro_fault_cntl =
560			SOC15_REG_OFFSET(GC, GET_INST(GC, i), regVM_L2_PROTECTION_FAULT_CNTL);
561
562		hub->ctx_distance = regVM_CONTEXT1_CNTL -
563				regVM_CONTEXT0_CNTL;
564		hub->ctx_addr_distance =
565				regVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 -
566				regVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32;
567		hub->eng_distance = regVM_INVALIDATE_ENG1_REQ -
568				regVM_INVALIDATE_ENG0_REQ;
569		hub->eng_addr_distance =
570				regVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 -
571				regVM_INVALIDATE_ENG0_ADDR_RANGE_LO32;
572	}
573}
574
575static void gfxhub_v1_2_init(struct amdgpu_device *adev)
576{
577	uint32_t xcc_mask;
578
579	xcc_mask = GENMASK(NUM_XCC(adev->gfx.xcc_mask) - 1, 0);
580	gfxhub_v1_2_xcc_init(adev, xcc_mask);
581}
582
583static int gfxhub_v1_2_get_xgmi_info(struct amdgpu_device *adev)
584{
585	u32 max_num_physical_nodes;
586	u32 max_physical_node_id;
587	u32 xgmi_lfb_cntl;
588	u32 max_region;
589	u64 seg_size;
590
591	xgmi_lfb_cntl = RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_CNTL);
592	seg_size = REG_GET_FIELD(
593		RREG32_SOC15(GC, GET_INST(GC, 0), regMC_VM_XGMI_LFB_SIZE),
594		MC_VM_XGMI_LFB_SIZE, PF_LFB_SIZE) << 24;
595	max_region =
596		REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL, PF_MAX_REGION);
597
598
599
600	max_num_physical_nodes   = 8;
601	max_physical_node_id     = 7;
602
603	/* PF_MAX_REGION=0 means xgmi is disabled */
604	if (max_region || adev->gmc.xgmi.connected_to_cpu) {
605		adev->gmc.xgmi.num_physical_nodes = max_region + 1;
606
607		if (adev->gmc.xgmi.num_physical_nodes > max_num_physical_nodes)
608			return -EINVAL;
609
610		adev->gmc.xgmi.physical_node_id =
611			REG_GET_FIELD(xgmi_lfb_cntl, MC_VM_XGMI_LFB_CNTL,
612					PF_LFB_REGION);
613
614		if (adev->gmc.xgmi.physical_node_id > max_physical_node_id)
615			return -EINVAL;
616
617		adev->gmc.xgmi.node_segment_size = seg_size;
618	}
619
620	return 0;
621}
622
623const struct amdgpu_gfxhub_funcs gfxhub_v1_2_funcs = {
624	.get_mc_fb_offset = gfxhub_v1_2_get_mc_fb_offset,
625	.setup_vm_pt_regs = gfxhub_v1_2_setup_vm_pt_regs,
626	.gart_enable = gfxhub_v1_2_gart_enable,
627	.gart_disable = gfxhub_v1_2_gart_disable,
628	.set_fault_enable_default = gfxhub_v1_2_set_fault_enable_default,
629	.init = gfxhub_v1_2_init,
630	.get_xgmi_info = gfxhub_v1_2_get_xgmi_info,
631};
632
633static int gfxhub_v1_2_xcp_resume(void *handle, uint32_t inst_mask)
634{
635	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
636	bool value;
637
638	if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
639		value = false;
640	else
641		value = true;
642
643	gfxhub_v1_2_xcc_set_fault_enable_default(adev, value, inst_mask);
644
645	if (!amdgpu_sriov_vf(adev))
646		return gfxhub_v1_2_xcc_gart_enable(adev, inst_mask);
647
648	return 0;
649}
650
651static int gfxhub_v1_2_xcp_suspend(void *handle, uint32_t inst_mask)
652{
653	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
654
655	if (!amdgpu_sriov_vf(adev))
656		gfxhub_v1_2_xcc_gart_disable(adev, inst_mask);
657
658	return 0;
659}
660
661struct amdgpu_xcp_ip_funcs gfxhub_v1_2_xcp_funcs = {
662	.suspend = &gfxhub_v1_2_xcp_suspend,
663	.resume = &gfxhub_v1_2_xcp_resume
664};
665