1/*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Xiangliang.Yu@amd.com
23 */
24
25#include "amdgpu.h"
26#include "vi.h"
27#include "bif/bif_5_0_d.h"
28#include "bif/bif_5_0_sh_mask.h"
29#include "vid.h"
30#include "gca/gfx_8_0_d.h"
31#include "gca/gfx_8_0_sh_mask.h"
32#include "gmc_v8_0.h"
33#include "gfx_v8_0.h"
34#include "sdma_v3_0.h"
35#include "tonga_ih.h"
36#include "gmc/gmc_8_2_d.h"
37#include "gmc/gmc_8_2_sh_mask.h"
38#include "oss/oss_3_0_d.h"
39#include "oss/oss_3_0_sh_mask.h"
40#include "dce/dce_10_0_d.h"
41#include "dce/dce_10_0_sh_mask.h"
42#include "smu/smu_7_1_3_d.h"
43#include "mxgpu_vi.h"
44
45#include "amdgpu_reset.h"
46
47/* VI golden setting */
48static const u32 xgpu_fiji_mgcg_cgcg_init[] = {
49	mmRLC_CGTT_MGCG_OVERRIDE, 0xffffffff, 0xffffffff,
50	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
51	mmCB_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
52	mmCGTT_BCI_CLK_CTRL, 0xffffffff, 0x00000100,
53	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
54	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
55	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
56	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
57	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
58	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
59	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
60	mmCGTT_WD_CLK_CTRL, 0xffffffff, 0x06000100,
61	mmCGTT_PC_CLK_CTRL, 0xffffffff, 0x00000100,
62	mmCGTT_RLC_CLK_CTRL, 0xffffffff, 0x00000100,
63	mmCGTT_SC_CLK_CTRL, 0xffffffff, 0x00000100,
64	mmCGTT_SPI_CLK_CTRL, 0xffffffff, 0x00000100,
65	mmCGTT_SQ_CLK_CTRL, 0xffffffff, 0x00000100,
66	mmCGTT_SQG_CLK_CTRL, 0xffffffff, 0x00000100,
67	mmCGTT_SX_CLK_CTRL0, 0xffffffff, 0x00000100,
68	mmCGTT_SX_CLK_CTRL1, 0xffffffff, 0x00000100,
69	mmCGTT_SX_CLK_CTRL2, 0xffffffff, 0x00000100,
70	mmCGTT_SX_CLK_CTRL3, 0xffffffff, 0x00000100,
71	mmCGTT_SX_CLK_CTRL4, 0xffffffff, 0x00000100,
72	mmCGTT_TCI_CLK_CTRL, 0xffffffff, 0x00000100,
73	mmCGTT_TCP_CLK_CTRL, 0xffffffff, 0x00000100,
74	mmCGTT_VGT_CLK_CTRL, 0xffffffff, 0x06000100,
75	mmDB_CGTT_CLK_CTRL_0, 0xffffffff, 0x00000100,
76	mmTA_CGTT_CTRL, 0xffffffff, 0x00000100,
77	mmTCA_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
78	mmTCC_CGTT_SCLK_CTRL, 0xffffffff, 0x00000100,
79	mmTD_CGTT_CTRL, 0xffffffff, 0x00000100,
80	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
81	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
82	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
83	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
84	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
85	mmPCIE_DATA, 0x000f0000, 0x00000000,
86	mmSMC_IND_INDEX_4, 0xffffffff, 0xC060000C,
87	mmSMC_IND_DATA_4, 0xc0000fff, 0x00000100,
88	mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100,
89	mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
90	mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104,
91	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
92	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
93	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
94	mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100,
95	mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100,
96};
97
98static const u32 xgpu_fiji_golden_settings_a10[] = {
99	mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
100	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
101	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
102	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
103	mmFBC_MISC, 0x1f311fff, 0x12300000,
104	mmHDMI_CONTROL, 0x31000111, 0x00000011,
105	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
106	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
107	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
108	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
109	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
110	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
111	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
112	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
113	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
114	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
115	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
116	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
117	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
118	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000000ff,
119	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
120	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
121	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
122	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
123	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
124};
125
126static const u32 xgpu_fiji_golden_common_all[] = {
127	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
128	mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x3a00161a,
129	mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x0000002e,
130	mmGB_ADDR_CONFIG, 0xffffffff, 0x22011003,
131	mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
132	mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
133	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
134	mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
135	mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
136	mmSPI_CONFIG_CNTL_1, 0x0000000f, 0x00000009,
137};
138
139static const u32 xgpu_tonga_mgcg_cgcg_init[] = {
140	mmRLC_CGTT_MGCG_OVERRIDE,   0xffffffff, 0xffffffff,
141	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
142	mmCB_CGTT_SCLK_CTRL,        0xffffffff, 0x00000100,
143	mmCGTT_BCI_CLK_CTRL,        0xffffffff, 0x00000100,
144	mmCGTT_CP_CLK_CTRL,         0xffffffff, 0x00000100,
145	mmCGTT_CPC_CLK_CTRL,        0xffffffff, 0x00000100,
146	mmCGTT_CPF_CLK_CTRL,        0xffffffff, 0x40000100,
147	mmCGTT_DRM_CLK_CTRL0,       0xffffffff, 0x00600100,
148	mmCGTT_GDS_CLK_CTRL,        0xffffffff, 0x00000100,
149	mmCGTT_IA_CLK_CTRL,         0xffffffff, 0x06000100,
150	mmCGTT_PA_CLK_CTRL,         0xffffffff, 0x00000100,
151	mmCGTT_WD_CLK_CTRL,         0xffffffff, 0x06000100,
152	mmCGTT_PC_CLK_CTRL,         0xffffffff, 0x00000100,
153	mmCGTT_RLC_CLK_CTRL,        0xffffffff, 0x00000100,
154	mmCGTT_SC_CLK_CTRL,         0xffffffff, 0x00000100,
155	mmCGTT_SPI_CLK_CTRL,        0xffffffff, 0x00000100,
156	mmCGTT_SQ_CLK_CTRL,         0xffffffff, 0x00000100,
157	mmCGTT_SQG_CLK_CTRL,        0xffffffff, 0x00000100,
158	mmCGTT_SX_CLK_CTRL0,        0xffffffff, 0x00000100,
159	mmCGTT_SX_CLK_CTRL1,        0xffffffff, 0x00000100,
160	mmCGTT_SX_CLK_CTRL2,        0xffffffff, 0x00000100,
161	mmCGTT_SX_CLK_CTRL3,        0xffffffff, 0x00000100,
162	mmCGTT_SX_CLK_CTRL4,        0xffffffff, 0x00000100,
163	mmCGTT_TCI_CLK_CTRL,        0xffffffff, 0x00000100,
164	mmCGTT_TCP_CLK_CTRL,        0xffffffff, 0x00000100,
165	mmCGTT_VGT_CLK_CTRL,        0xffffffff, 0x06000100,
166	mmDB_CGTT_CLK_CTRL_0,       0xffffffff, 0x00000100,
167	mmTA_CGTT_CTRL,             0xffffffff, 0x00000100,
168	mmTCA_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
169	mmTCC_CGTT_SCLK_CTRL,       0xffffffff, 0x00000100,
170	mmTD_CGTT_CTRL,             0xffffffff, 0x00000100,
171	mmGRBM_GFX_INDEX,           0xffffffff, 0xe0000000,
172	mmCGTS_CU0_SP0_CTRL_REG,    0xffffffff, 0x00010000,
173	mmCGTS_CU0_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
174	mmCGTS_CU0_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
175	mmCGTS_CU0_SP1_CTRL_REG,    0xffffffff, 0x00060005,
176	mmCGTS_CU0_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
177	mmCGTS_CU1_SP0_CTRL_REG,    0xffffffff, 0x00010000,
178	mmCGTS_CU1_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
179	mmCGTS_CU1_TA_CTRL_REG,     0xffffffff, 0x00040007,
180	mmCGTS_CU1_SP1_CTRL_REG,    0xffffffff, 0x00060005,
181	mmCGTS_CU1_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
182	mmCGTS_CU2_SP0_CTRL_REG,    0xffffffff, 0x00010000,
183	mmCGTS_CU2_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
184	mmCGTS_CU2_TA_CTRL_REG,     0xffffffff, 0x00040007,
185	mmCGTS_CU2_SP1_CTRL_REG,    0xffffffff, 0x00060005,
186	mmCGTS_CU2_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
187	mmCGTS_CU3_SP0_CTRL_REG,    0xffffffff, 0x00010000,
188	mmCGTS_CU3_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
189	mmCGTS_CU3_TA_CTRL_REG,     0xffffffff, 0x00040007,
190	mmCGTS_CU3_SP1_CTRL_REG,    0xffffffff, 0x00060005,
191	mmCGTS_CU3_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
192	mmCGTS_CU4_SP0_CTRL_REG,    0xffffffff, 0x00010000,
193	mmCGTS_CU4_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
194	mmCGTS_CU4_TA_SQC_CTRL_REG, 0xffffffff, 0x00040007,
195	mmCGTS_CU4_SP1_CTRL_REG,    0xffffffff, 0x00060005,
196	mmCGTS_CU4_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
197	mmCGTS_CU5_SP0_CTRL_REG,    0xffffffff, 0x00010000,
198	mmCGTS_CU5_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
199	mmCGTS_CU5_TA_CTRL_REG,     0xffffffff, 0x00040007,
200	mmCGTS_CU5_SP1_CTRL_REG,    0xffffffff, 0x00060005,
201	mmCGTS_CU5_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
202	mmCGTS_CU6_SP0_CTRL_REG,    0xffffffff, 0x00010000,
203	mmCGTS_CU6_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
204	mmCGTS_CU6_TA_CTRL_REG,     0xffffffff, 0x00040007,
205	mmCGTS_CU6_SP1_CTRL_REG,    0xffffffff, 0x00060005,
206	mmCGTS_CU6_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
207	mmCGTS_CU7_SP0_CTRL_REG,    0xffffffff, 0x00010000,
208	mmCGTS_CU7_LDS_SQ_CTRL_REG, 0xffffffff, 0x00030002,
209	mmCGTS_CU7_TA_CTRL_REG,     0xffffffff, 0x00040007,
210	mmCGTS_CU7_SP1_CTRL_REG,    0xffffffff, 0x00060005,
211	mmCGTS_CU7_TD_TCP_CTRL_REG, 0xffffffff, 0x00090008,
212	mmCGTS_SM_CTRL_REG,         0xffffffff, 0x96e00200,
213	mmCP_RB_WPTR_POLL_CNTL,     0xffffffff, 0x00900100,
214	mmRLC_CGCG_CGLS_CTRL,       0xffffffff, 0x0020003c,
215	mmPCIE_INDEX,               0xffffffff, 0x0140001c,
216	mmPCIE_DATA,                0x000f0000, 0x00000000,
217	mmSMC_IND_INDEX_4,          0xffffffff, 0xC060000C,
218	mmSMC_IND_DATA_4,           0xc0000fff, 0x00000100,
219	mmXDMA_CLOCK_GATING_CNTL,   0xffffffff, 0x00000100,
220	mmXDMA_MEM_POWER_CNTL,      0x00000101, 0x00000000,
221	mmMC_MEM_POWER_LS,          0xffffffff, 0x00000104,
222	mmCGTT_DRM_CLK_CTRL0,       0xff000fff, 0x00000100,
223	mmHDP_XDP_CGTT_BLK_CTRL,    0xc0000fff, 0x00000104,
224	mmCP_MEM_SLP_CNTL,          0x00000001, 0x00000001,
225	mmSDMA0_CLK_CTRL,           0xff000ff0, 0x00000100,
226	mmSDMA1_CLK_CTRL,           0xff000ff0, 0x00000100,
227};
228
229static const u32 xgpu_tonga_golden_settings_a11[] = {
230	mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
231	mmCB_HW_CONTROL_3, 0x00000040, 0x00000040,
232	mmDB_DEBUG2, 0xf00fffff, 0x00000400,
233	mmDCI_CLK_CNTL, 0x00000080, 0x00000000,
234	mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070,
235	mmFBC_MISC, 0x1f311fff, 0x12300000,
236	mmGB_GPU_ID, 0x0000000f, 0x00000000,
237	mmHDMI_CONTROL, 0x31000111, 0x00000011,
238	mmMC_ARB_WTM_GRPWT_RD, 0x00000003, 0x00000000,
239	mmMC_HUB_RDREQ_DMIF_LIMIT, 0x0000007f, 0x00000028,
240	mmMC_HUB_WDP_UMC, 0x00007fb6, 0x00000991,
241	mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
242	mmPA_SC_FIFO_DEPTH_CNTL, 0x000003ff, 0x000000fc,
243	mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
244	mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0000003c,
245	mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007,
246	mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000,
247	mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100,
248	mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
249	mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
250	mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007,
251	mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000,
252	mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100,
253	mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100,
254	mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100,
255	mmSQ_RANDOM_WAVE_PRI, 0x001fffff, 0x000006fd,
256	mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
257	mmTCC_CTRL, 0x00100000, 0xf31fff7f,
258	mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
259	mmTCP_ADDR_CONFIG, 0x000003ff, 0x000002fb,
260	mmTCP_CHAN_STEER_HI, 0xffffffff, 0x0000543b,
261	mmTCP_CHAN_STEER_LO, 0xffffffff, 0xa9210876,
262	mmVGT_RESET_DEBUG, 0x00000004, 0x00000004,
263	mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
264	mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
265	mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
266	mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff,
267};
268
269static const u32 xgpu_tonga_golden_common_all[] = {
270	mmGRBM_GFX_INDEX,               0xffffffff, 0xe0000000,
271	mmPA_SC_RASTER_CONFIG,          0xffffffff, 0x16000012,
272	mmPA_SC_RASTER_CONFIG_1,        0xffffffff, 0x0000002A,
273	mmGB_ADDR_CONFIG,               0xffffffff, 0x22011002,
274	mmSPI_RESOURCE_RESERVE_CU_0,    0xffffffff, 0x00000800,
275	mmSPI_RESOURCE_RESERVE_CU_1,    0xffffffff, 0x00000800,
276	mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
277};
278
279void xgpu_vi_init_golden_registers(struct amdgpu_device *adev)
280{
281	switch (adev->asic_type) {
282	case CHIP_FIJI:
283		amdgpu_device_program_register_sequence(adev,
284							xgpu_fiji_mgcg_cgcg_init,
285							ARRAY_SIZE(
286								xgpu_fiji_mgcg_cgcg_init));
287		amdgpu_device_program_register_sequence(adev,
288							xgpu_fiji_golden_settings_a10,
289							ARRAY_SIZE(
290								xgpu_fiji_golden_settings_a10));
291		amdgpu_device_program_register_sequence(adev,
292							xgpu_fiji_golden_common_all,
293							ARRAY_SIZE(
294								xgpu_fiji_golden_common_all));
295		break;
296	case CHIP_TONGA:
297		amdgpu_device_program_register_sequence(adev,
298							xgpu_tonga_mgcg_cgcg_init,
299							ARRAY_SIZE(
300								xgpu_tonga_mgcg_cgcg_init));
301		amdgpu_device_program_register_sequence(adev,
302							xgpu_tonga_golden_settings_a11,
303							ARRAY_SIZE(
304								xgpu_tonga_golden_settings_a11));
305		amdgpu_device_program_register_sequence(adev,
306							xgpu_tonga_golden_common_all,
307							ARRAY_SIZE(
308								xgpu_tonga_golden_common_all));
309		break;
310	default:
311		BUG_ON("Doesn't support chip type.\n");
312		break;
313	}
314}
315
316/*
317 * Mailbox communication between GPU hypervisor and VFs
318 */
319static void xgpu_vi_mailbox_send_ack(struct amdgpu_device *adev)
320{
321	u32 reg;
322	int timeout = VI_MAILBOX_TIMEDOUT;
323	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
324
325	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
326	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL, RCV_MSG_ACK, 1);
327	WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
328
329	/*Wait for RCV_MSG_VALID to be 0*/
330	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
331	while (reg & mask) {
332		if (timeout <= 0) {
333			pr_err("RCV_MSG_VALID is not cleared\n");
334			break;
335		}
336		mdelay(1);
337		timeout -= 1;
338
339		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
340	}
341}
342
343static void xgpu_vi_mailbox_set_valid(struct amdgpu_device *adev, bool val)
344{
345	u32 reg;
346
347	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
348	reg = REG_SET_FIELD(reg, MAILBOX_CONTROL,
349			    TRN_MSG_VALID, val ? 1 : 0);
350	WREG32_NO_KIQ(mmMAILBOX_CONTROL, reg);
351}
352
353static void xgpu_vi_mailbox_trans_msg(struct amdgpu_device *adev,
354				      enum idh_request req)
355{
356	u32 reg;
357
358	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0);
359	reg = REG_SET_FIELD(reg, MAILBOX_MSGBUF_TRN_DW0,
360			    MSGBUF_DATA, req);
361	WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, reg);
362
363	xgpu_vi_mailbox_set_valid(adev, true);
364}
365
366static int xgpu_vi_mailbox_rcv_msg(struct amdgpu_device *adev,
367				   enum idh_event event)
368{
369	u32 reg;
370	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, RCV_MSG_VALID);
371
372	/* workaround: host driver doesn't set VALID for CMPL now */
373	if (event != IDH_FLR_NOTIFICATION_CMPL) {
374		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
375		if (!(reg & mask))
376			return -ENOENT;
377	}
378
379	reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
380	if (reg != event)
381		return -ENOENT;
382
383	/* send ack to PF */
384	xgpu_vi_mailbox_send_ack(adev);
385
386	return 0;
387}
388
389static int xgpu_vi_poll_ack(struct amdgpu_device *adev)
390{
391	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
392	u32 mask = REG_FIELD_MASK(MAILBOX_CONTROL, TRN_MSG_ACK);
393	u32 reg;
394
395	reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
396	while (!(reg & mask)) {
397		if (timeout <= 0) {
398			pr_err("Doesn't get ack from pf.\n");
399			r = -ETIME;
400			break;
401		}
402		mdelay(5);
403		timeout -= 5;
404
405		reg = RREG32_NO_KIQ(mmMAILBOX_CONTROL);
406	}
407
408	return r;
409}
410
411static int xgpu_vi_poll_msg(struct amdgpu_device *adev, enum idh_event event)
412{
413	int r = 0, timeout = VI_MAILBOX_TIMEDOUT;
414
415	r = xgpu_vi_mailbox_rcv_msg(adev, event);
416	while (r) {
417		if (timeout <= 0) {
418			pr_err("Doesn't get ack from pf.\n");
419			r = -ETIME;
420			break;
421		}
422		mdelay(5);
423		timeout -= 5;
424
425		r = xgpu_vi_mailbox_rcv_msg(adev, event);
426	}
427
428	return r;
429}
430
431static int xgpu_vi_send_access_requests(struct amdgpu_device *adev,
432					enum idh_request request)
433{
434	int r;
435
436	xgpu_vi_mailbox_trans_msg(adev, request);
437
438	/* start to poll ack */
439	r = xgpu_vi_poll_ack(adev);
440	if (r)
441		return r;
442
443	xgpu_vi_mailbox_set_valid(adev, false);
444
445	/* start to check msg if request is idh_req_gpu_init_access */
446	if (request == IDH_REQ_GPU_INIT_ACCESS ||
447		request == IDH_REQ_GPU_FINI_ACCESS ||
448		request == IDH_REQ_GPU_RESET_ACCESS) {
449		r = xgpu_vi_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
450		if (r) {
451			pr_err("Doesn't get ack from pf, give up\n");
452			return r;
453		}
454	}
455
456	return 0;
457}
458
459static int xgpu_vi_request_reset(struct amdgpu_device *adev)
460{
461	return xgpu_vi_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
462}
463
464static int xgpu_vi_wait_reset_cmpl(struct amdgpu_device *adev)
465{
466	return xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL);
467}
468
469static int xgpu_vi_request_full_gpu_access(struct amdgpu_device *adev,
470					   bool init)
471{
472	enum idh_request req;
473
474	req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
475	return xgpu_vi_send_access_requests(adev, req);
476}
477
478static int xgpu_vi_release_full_gpu_access(struct amdgpu_device *adev,
479					   bool init)
480{
481	enum idh_request req;
482	int r = 0;
483
484	req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
485	r = xgpu_vi_send_access_requests(adev, req);
486
487	return r;
488}
489
490/* add support mailbox interrupts */
491static int xgpu_vi_mailbox_ack_irq(struct amdgpu_device *adev,
492				   struct amdgpu_irq_src *source,
493				   struct amdgpu_iv_entry *entry)
494{
495	DRM_DEBUG("get ack intr and do nothing.\n");
496	return 0;
497}
498
499static int xgpu_vi_set_mailbox_ack_irq(struct amdgpu_device *adev,
500				       struct amdgpu_irq_src *src,
501				       unsigned type,
502				       enum amdgpu_interrupt_state state)
503{
504	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
505
506	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, ACK_INT_EN,
507			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
508	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
509
510	return 0;
511}
512
513static void xgpu_vi_mailbox_flr_work(struct work_struct *work)
514{
515	struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
516	struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
517
518	/* wait until RCV_MSG become 3 */
519	if (xgpu_vi_poll_msg(adev, IDH_FLR_NOTIFICATION_CMPL)) {
520		pr_err("failed to receive FLR_CMPL\n");
521		return;
522	}
523
524	/* Trigger recovery due to world switch failure */
525	if (amdgpu_device_should_recover_gpu(adev)) {
526		struct amdgpu_reset_context reset_context;
527		memset(&reset_context, 0, sizeof(reset_context));
528
529		reset_context.method = AMD_RESET_METHOD_NONE;
530		reset_context.reset_req_dev = adev;
531		clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
532
533		amdgpu_device_gpu_recover(adev, NULL, &reset_context);
534	}
535}
536
537static int xgpu_vi_set_mailbox_rcv_irq(struct amdgpu_device *adev,
538				       struct amdgpu_irq_src *src,
539				       unsigned type,
540				       enum amdgpu_interrupt_state state)
541{
542	u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
543
544	tmp = REG_SET_FIELD(tmp, MAILBOX_INT_CNTL, VALID_INT_EN,
545			    (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
546	WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
547
548	return 0;
549}
550
551static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev,
552				   struct amdgpu_irq_src *source,
553				   struct amdgpu_iv_entry *entry)
554{
555	int r;
556
557	/* trigger gpu-reset by hypervisor only if TDR disabled */
558	if (!amdgpu_gpu_recovery) {
559		/* see what event we get */
560		r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION);
561
562		/* only handle FLR_NOTIFY now */
563		if (!r && !amdgpu_in_reset(adev))
564			WARN_ONCE(!amdgpu_reset_domain_schedule(adev->reset_domain,
565								&adev->virt.flr_work),
566				  "Failed to queue work! at %s",
567				  __func__);
568	}
569
570	return 0;
571}
572
573static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_ack_irq_funcs = {
574	.set = xgpu_vi_set_mailbox_ack_irq,
575	.process = xgpu_vi_mailbox_ack_irq,
576};
577
578static const struct amdgpu_irq_src_funcs xgpu_vi_mailbox_rcv_irq_funcs = {
579	.set = xgpu_vi_set_mailbox_rcv_irq,
580	.process = xgpu_vi_mailbox_rcv_irq,
581};
582
583void xgpu_vi_mailbox_set_irq_funcs(struct amdgpu_device *adev)
584{
585	adev->virt.ack_irq.num_types = 1;
586	adev->virt.ack_irq.funcs = &xgpu_vi_mailbox_ack_irq_funcs;
587	adev->virt.rcv_irq.num_types = 1;
588	adev->virt.rcv_irq.funcs = &xgpu_vi_mailbox_rcv_irq_funcs;
589}
590
591int xgpu_vi_mailbox_add_irq_id(struct amdgpu_device *adev)
592{
593	int r;
594
595	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 135, &adev->virt.rcv_irq);
596	if (r)
597		return r;
598
599	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 138, &adev->virt.ack_irq);
600	if (r) {
601		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
602		return r;
603	}
604
605	return 0;
606}
607
608int xgpu_vi_mailbox_get_irq(struct amdgpu_device *adev)
609{
610	int r;
611
612	r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
613	if (r)
614		return r;
615	r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
616	if (r) {
617		amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
618		return r;
619	}
620
621	INIT_WORK(&adev->virt.flr_work, xgpu_vi_mailbox_flr_work);
622
623	return 0;
624}
625
626void xgpu_vi_mailbox_put_irq(struct amdgpu_device *adev)
627{
628	amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
629	amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
630}
631
632const struct amdgpu_virt_ops xgpu_vi_virt_ops = {
633	.req_full_gpu		= xgpu_vi_request_full_gpu_access,
634	.rel_full_gpu		= xgpu_vi_release_full_gpu_access,
635	.reset_gpu		= xgpu_vi_request_reset,
636	.wait_reset             = xgpu_vi_wait_reset_cmpl,
637	.trans_msg		= NULL, /* Does not need to trans VF errors to host. */
638};
639