1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#define SWSMU_CODE_LAYER_L2
25
26#include <linux/firmware.h>
27#include "amdgpu.h"
28#include "amdgpu_smu.h"
29#include "atomfirmware.h"
30#include "amdgpu_atomfirmware.h"
31#include "amdgpu_atombios.h"
32#include "smu_v13_0_6_pmfw.h"
33#include "smu13_driver_if_v13_0_6.h"
34#include "smu_v13_0_6_ppsmc.h"
35#include "soc15_common.h"
36#include "atom.h"
37#include "power_state.h"
38#include "smu_v13_0.h"
39#include "smu_v13_0_6_ppt.h"
40#include "nbio/nbio_7_4_offset.h"
41#include "nbio/nbio_7_4_sh_mask.h"
42#include "thm/thm_11_0_2_offset.h"
43#include "thm/thm_11_0_2_sh_mask.h"
44#include "amdgpu_xgmi.h"
45#include <linux/pci.h>
46#include "amdgpu_ras.h"
47#include "smu_cmn.h"
48#include "mp/mp_13_0_6_offset.h"
49#include "mp/mp_13_0_6_sh_mask.h"
50
51#undef MP1_Public
52#undef smnMP1_FIRMWARE_FLAGS
53
54/* TODO: Check final register offsets */
55#define MP1_Public 0x03b00000
56#define smnMP1_FIRMWARE_FLAGS 0x3010028
57/*
58 * DO NOT use these for err/warn/info/debug messages.
59 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
60 * They are more MGPU friendly.
61 */
62#undef pr_err
63#undef pr_warn
64#undef pr_info
65#undef pr_debug
66
67#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
68
69#define SMU_13_0_6_FEA_MAP(smu_feature, smu_13_0_6_feature)                    \
70	[smu_feature] = { 1, (smu_13_0_6_feature) }
71
72#define FEATURE_MASK(feature) (1ULL << feature)
73#define SMC_DPM_FEATURE                                                        \
74	(FEATURE_MASK(FEATURE_DATA_CALCULATION) |                              \
75	 FEATURE_MASK(FEATURE_DPM_GFXCLK) | FEATURE_MASK(FEATURE_DPM_UCLK) |   \
76	 FEATURE_MASK(FEATURE_DPM_SOCCLK) | FEATURE_MASK(FEATURE_DPM_FCLK) |   \
77	 FEATURE_MASK(FEATURE_DPM_LCLK) | FEATURE_MASK(FEATURE_DPM_XGMI) |     \
78	 FEATURE_MASK(FEATURE_DPM_VCN))
79
80/* possible frequency drift (1Mhz) */
81#define EPSILON 1
82
83#define smnPCIE_ESM_CTRL 0x93D0
84#define smnPCIE_LC_LINK_WIDTH_CNTL 0x1a340288
85#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
86#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
87#define MAX_LINK_WIDTH 6
88
89#define smnPCIE_LC_SPEED_CNTL                   0x1a340290
90#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xE0
91#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0x5
92#define LINK_SPEED_MAX				4
93
94static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COUNT] = {
95	MSG_MAP(TestMessage,			     PPSMC_MSG_TestMessage,			0),
96	MSG_MAP(GetSmuVersion,			     PPSMC_MSG_GetSmuVersion,			1),
97	MSG_MAP(GetDriverIfVersion,		     PPSMC_MSG_GetDriverIfVersion,		1),
98	MSG_MAP(EnableAllSmuFeatures,		     PPSMC_MSG_EnableAllSmuFeatures,		0),
99	MSG_MAP(DisableAllSmuFeatures,		     PPSMC_MSG_DisableAllSmuFeatures,		0),
100	MSG_MAP(RequestI2cTransaction,		     PPSMC_MSG_RequestI2cTransaction,		0),
101	MSG_MAP(GetMetricsTable,		     PPSMC_MSG_GetMetricsTable,			1),
102	MSG_MAP(GetEnabledSmuFeaturesHigh,	     PPSMC_MSG_GetEnabledSmuFeaturesHigh,	1),
103	MSG_MAP(GetEnabledSmuFeaturesLow,	     PPSMC_MSG_GetEnabledSmuFeaturesLow,	1),
104	MSG_MAP(SetDriverDramAddrHigh,		     PPSMC_MSG_SetDriverDramAddrHigh,		1),
105	MSG_MAP(SetDriverDramAddrLow,		     PPSMC_MSG_SetDriverDramAddrLow,		1),
106	MSG_MAP(SetToolsDramAddrHigh,		     PPSMC_MSG_SetToolsDramAddrHigh,		0),
107	MSG_MAP(SetToolsDramAddrLow,		     PPSMC_MSG_SetToolsDramAddrLow,		0),
108	MSG_MAP(SetSoftMinByFreq,		     PPSMC_MSG_SetSoftMinByFreq,		0),
109	MSG_MAP(SetSoftMaxByFreq,		     PPSMC_MSG_SetSoftMaxByFreq,		0),
110	MSG_MAP(GetMinDpmFreq,			     PPSMC_MSG_GetMinDpmFreq,			1),
111	MSG_MAP(GetMaxDpmFreq,			     PPSMC_MSG_GetMaxDpmFreq,			1),
112	MSG_MAP(GetDpmFreqByIndex,		     PPSMC_MSG_GetDpmFreqByIndex,		1),
113	MSG_MAP(SetPptLimit,			     PPSMC_MSG_SetPptLimit,			0),
114	MSG_MAP(GetPptLimit,			     PPSMC_MSG_GetPptLimit,			1),
115	MSG_MAP(GfxDeviceDriverReset,		     PPSMC_MSG_GfxDriverReset,			0),
116	MSG_MAP(DramLogSetDramAddrHigh,		     PPSMC_MSG_DramLogSetDramAddrHigh,		0),
117	MSG_MAP(DramLogSetDramAddrLow,		     PPSMC_MSG_DramLogSetDramAddrLow,		0),
118	MSG_MAP(DramLogSetDramSize,		     PPSMC_MSG_DramLogSetDramSize,		0),
119	MSG_MAP(GetDebugData,			     PPSMC_MSG_GetDebugData,			0),
120	MSG_MAP(SetNumBadHbmPagesRetired,	     PPSMC_MSG_SetNumBadHbmPagesRetired,	0),
121	MSG_MAP(DFCstateControl,		     PPSMC_MSG_DFCstateControl,			0),
122	MSG_MAP(GetGmiPwrDnHyst,		     PPSMC_MSG_GetGmiPwrDnHyst,			0),
123	MSG_MAP(SetGmiPwrDnHyst,		     PPSMC_MSG_SetGmiPwrDnHyst,			0),
124	MSG_MAP(GmiPwrDnControl,		     PPSMC_MSG_GmiPwrDnControl,			0),
125	MSG_MAP(EnterGfxoff,			     PPSMC_MSG_EnterGfxoff,			0),
126	MSG_MAP(ExitGfxoff,			     PPSMC_MSG_ExitGfxoff,			0),
127	MSG_MAP(EnableDeterminism,		     PPSMC_MSG_EnableDeterminism,		0),
128	MSG_MAP(DisableDeterminism,		     PPSMC_MSG_DisableDeterminism,		0),
129	MSG_MAP(GfxDriverResetRecovery,		     PPSMC_MSG_GfxDriverResetRecovery,		0),
130	MSG_MAP(GetMinGfxclkFrequency,               PPSMC_MSG_GetMinGfxDpmFreq,                1),
131	MSG_MAP(GetMaxGfxclkFrequency,               PPSMC_MSG_GetMaxGfxDpmFreq,                1),
132	MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                0),
133	MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                0),
134	MSG_MAP(PrepareMp1ForUnload,                 PPSMC_MSG_PrepareForDriverUnload,          0),
135	MSG_MAP(GetCTFLimit,                         PPSMC_MSG_GetCTFLimit,                     0),
136};
137
138static const struct cmn2asic_mapping smu_v13_0_6_clk_map[SMU_CLK_COUNT] = {
139	CLK_MAP(SOCCLK, PPCLK_SOCCLK),
140	CLK_MAP(FCLK, PPCLK_FCLK),
141	CLK_MAP(UCLK, PPCLK_UCLK),
142	CLK_MAP(MCLK, PPCLK_UCLK),
143	CLK_MAP(DCLK, PPCLK_DCLK),
144	CLK_MAP(VCLK, PPCLK_VCLK),
145	CLK_MAP(LCLK, PPCLK_LCLK),
146};
147
148static const struct cmn2asic_mapping smu_v13_0_6_feature_mask_map[SMU_FEATURE_COUNT] = {
149	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DATA_CALCULATIONS_BIT, 		FEATURE_DATA_CALCULATION),
150	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_GFXCLK_BIT, 			FEATURE_DPM_GFXCLK),
151	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_UCLK_BIT, 			FEATURE_DPM_UCLK),
152	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_SOCCLK_BIT, 			FEATURE_DPM_SOCCLK),
153	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_FCLK_BIT, 			FEATURE_DPM_FCLK),
154	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_LCLK_BIT, 			FEATURE_DPM_LCLK),
155	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_VCLK_BIT,			FEATURE_DPM_VCN),
156	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_DCLK_BIT,			FEATURE_DPM_VCN),
157	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DPM_XGMI_BIT, 			FEATURE_DPM_XGMI),
158	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_GFXCLK_BIT, 			FEATURE_DS_GFXCLK),
159	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_SOCCLK_BIT, 			FEATURE_DS_SOCCLK),
160	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_LCLK_BIT, 			FEATURE_DS_LCLK),
161	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DS_FCLK_BIT, 			FEATURE_DS_FCLK),
162	SMU_13_0_6_FEA_MAP(SMU_FEATURE_VCN_DPM_BIT, 			FEATURE_DPM_VCN),
163	SMU_13_0_6_FEA_MAP(SMU_FEATURE_PPT_BIT, 			FEATURE_PPT),
164	SMU_13_0_6_FEA_MAP(SMU_FEATURE_TDC_BIT, 			FEATURE_TDC),
165	SMU_13_0_6_FEA_MAP(SMU_FEATURE_APCC_DFLL_BIT, 			FEATURE_APCC_DFLL),
166	SMU_13_0_6_FEA_MAP(SMU_FEATURE_MP1_CG_BIT, 			FEATURE_SMU_CG),
167	SMU_13_0_6_FEA_MAP(SMU_FEATURE_GFXOFF_BIT, 			FEATURE_GFXOFF),
168	SMU_13_0_6_FEA_MAP(SMU_FEATURE_FW_CTF_BIT, 			FEATURE_FW_CTF),
169	SMU_13_0_6_FEA_MAP(SMU_FEATURE_THERMAL_BIT, 			FEATURE_THERMAL),
170	SMU_13_0_6_FEA_MAP(SMU_FEATURE_XGMI_PER_LINK_PWR_DWN_BIT,	FEATURE_XGMI_PER_LINK_PWR_DOWN),
171	SMU_13_0_6_FEA_MAP(SMU_FEATURE_DF_CSTATE_BIT, 			FEATURE_DF_CSTATE),
172};
173
174#define TABLE_PMSTATUSLOG             0
175#define TABLE_SMU_METRICS             1
176#define TABLE_I2C_COMMANDS            2
177#define TABLE_COUNT                   3
178
179static const struct cmn2asic_mapping smu_v13_0_6_table_map[SMU_TABLE_COUNT] = {
180	TAB_MAP(PMSTATUSLOG),
181	TAB_MAP(SMU_METRICS),
182	TAB_MAP(I2C_COMMANDS),
183};
184
185static const uint8_t smu_v13_0_6_throttler_map[] = {
186	[THROTTLER_PPT_BIT]		= (SMU_THROTTLER_PPT0_BIT),
187	[THROTTLER_THERMAL_SOCKET_BIT]	= (SMU_THROTTLER_TEMP_GPU_BIT),
188	[THROTTLER_THERMAL_HBM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
189	[THROTTLER_THERMAL_VR_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
190	[THROTTLER_PROCHOT_BIT]		= (SMU_THROTTLER_PROCHOT_GFX_BIT),
191};
192
193struct PPTable_t {
194	uint32_t MaxSocketPowerLimit;
195	uint32_t MaxGfxclkFrequency;
196	uint32_t MinGfxclkFrequency;
197	uint32_t FclkFrequencyTable[4];
198	uint32_t UclkFrequencyTable[4];
199	uint32_t SocclkFrequencyTable[4];
200	uint32_t VclkFrequencyTable[4];
201	uint32_t DclkFrequencyTable[4];
202	uint32_t LclkFrequencyTable[4];
203	uint32_t MaxLclkDpmRange;
204	uint32_t MinLclkDpmRange;
205	uint64_t PublicSerialNumber_AID;
206	bool Init;
207};
208
209#define SMUQ10_TO_UINT(x) ((x) >> 10)
210
211struct smu_v13_0_6_dpm_map {
212	enum smu_clk_type clk_type;
213	uint32_t feature_num;
214	struct smu_13_0_dpm_table *dpm_table;
215	uint32_t *freq_table;
216};
217
218static int smu_v13_0_6_tables_init(struct smu_context *smu)
219{
220	struct smu_table_context *smu_table = &smu->smu_table;
221	struct smu_table *tables = smu_table->tables;
222	struct amdgpu_device *adev = smu->adev;
223
224	if (!(adev->flags & AMD_IS_APU))
225		SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU13_TOOL_SIZE,
226			       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
227
228	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(MetricsTable_t),
229		       PAGE_SIZE,
230		       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
231
232	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
233		       PAGE_SIZE,
234		       AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT);
235
236	smu_table->metrics_table = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
237	if (!smu_table->metrics_table)
238		return -ENOMEM;
239	smu_table->metrics_time = 0;
240
241	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
242	smu_table->gpu_metrics_table =
243		kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
244	if (!smu_table->gpu_metrics_table) {
245		kfree(smu_table->metrics_table);
246		return -ENOMEM;
247	}
248
249	smu_table->driver_pptable =
250		kzalloc(sizeof(struct PPTable_t), GFP_KERNEL);
251	if (!smu_table->driver_pptable) {
252		kfree(smu_table->metrics_table);
253		kfree(smu_table->gpu_metrics_table);
254		return -ENOMEM;
255	}
256
257	return 0;
258}
259
260static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
261{
262	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
263
264	smu_dpm->dpm_context =
265		kzalloc(sizeof(struct smu_13_0_dpm_context), GFP_KERNEL);
266	if (!smu_dpm->dpm_context)
267		return -ENOMEM;
268	smu_dpm->dpm_context_size = sizeof(struct smu_13_0_dpm_context);
269
270	return 0;
271}
272
273static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
274{
275	int ret = 0;
276
277	ret = smu_v13_0_6_tables_init(smu);
278	if (ret)
279		return ret;
280
281	ret = smu_v13_0_6_allocate_dpm_context(smu);
282
283	return ret;
284}
285
286static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
287						uint32_t *feature_mask,
288						uint32_t num)
289{
290	if (num > 2)
291		return -EINVAL;
292
293	/* pptable will handle the features to enable */
294	memset(feature_mask, 0xFF, sizeof(uint32_t) * num);
295
296	return 0;
297}
298
299static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
300					 void *metrics_table, bool bypass_cache)
301{
302	struct smu_table_context *smu_table = &smu->smu_table;
303	uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size;
304	struct smu_table *table = &smu_table->driver_table;
305	int ret;
306
307	if (bypass_cache || !smu_table->metrics_time ||
308	    time_after(jiffies,
309		       smu_table->metrics_time + msecs_to_jiffies(1))) {
310		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
311		if (ret) {
312			dev_info(smu->adev->dev,
313				 "Failed to export SMU metrics table!\n");
314			return ret;
315		}
316
317		amdgpu_asic_invalidate_hdp(smu->adev, NULL);
318		memcpy(smu_table->metrics_table, table->cpu_addr, table_size);
319
320		smu_table->metrics_time = jiffies;
321	}
322
323	if (metrics_table)
324		memcpy(metrics_table, smu_table->metrics_table, table_size);
325
326	return 0;
327}
328
329static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
330{
331	struct smu_table_context *smu_table = &smu->smu_table;
332	MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
333	struct PPTable_t *pptable =
334		(struct PPTable_t *)smu_table->driver_pptable;
335	int ret, i, retry = 100;
336
337	/* Store one-time values in driver PPTable */
338	if (!pptable->Init) {
339		while (--retry) {
340			ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
341			if (ret)
342				return ret;
343
344			/* Ensure that metrics have been updated */
345			if (metrics->AccumulationCounter)
346				break;
347
348			usleep_range(1000, 1100);
349		}
350
351		if (!retry)
352			return -ETIME;
353
354		pptable->MaxSocketPowerLimit =
355			SMUQ10_TO_UINT(metrics->MaxSocketPowerLimit);
356		pptable->MaxGfxclkFrequency =
357			SMUQ10_TO_UINT(metrics->MaxGfxclkFrequency);
358		pptable->MinGfxclkFrequency =
359			SMUQ10_TO_UINT(metrics->MinGfxclkFrequency);
360
361		for (i = 0; i < 4; ++i) {
362			pptable->FclkFrequencyTable[i] =
363				SMUQ10_TO_UINT(metrics->FclkFrequencyTable[i]);
364			pptable->UclkFrequencyTable[i] =
365				SMUQ10_TO_UINT(metrics->UclkFrequencyTable[i]);
366			pptable->SocclkFrequencyTable[i] = SMUQ10_TO_UINT(
367				metrics->SocclkFrequencyTable[i]);
368			pptable->VclkFrequencyTable[i] =
369				SMUQ10_TO_UINT(metrics->VclkFrequencyTable[i]);
370			pptable->DclkFrequencyTable[i] =
371				SMUQ10_TO_UINT(metrics->DclkFrequencyTable[i]);
372			pptable->LclkFrequencyTable[i] =
373				SMUQ10_TO_UINT(metrics->LclkFrequencyTable[i]);
374		}
375
376		/* use AID0 serial number by default */
377		pptable->PublicSerialNumber_AID = metrics->PublicSerialNumber_AID[0];
378
379		pptable->Init = true;
380	}
381
382	return 0;
383}
384
385static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
386					     enum smu_clk_type clk_type,
387					     uint32_t *min, uint32_t *max)
388{
389	struct smu_table_context *smu_table = &smu->smu_table;
390	struct PPTable_t *pptable =
391		(struct PPTable_t *)smu_table->driver_pptable;
392	uint32_t clock_limit = 0, param;
393	int ret = 0, clk_id = 0;
394
395	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
396		switch (clk_type) {
397		case SMU_MCLK:
398		case SMU_UCLK:
399			if (pptable->Init)
400				clock_limit = pptable->UclkFrequencyTable[0];
401			break;
402		case SMU_GFXCLK:
403		case SMU_SCLK:
404			if (pptable->Init)
405				clock_limit = pptable->MinGfxclkFrequency;
406			break;
407		case SMU_SOCCLK:
408			if (pptable->Init)
409				clock_limit = pptable->SocclkFrequencyTable[0];
410			break;
411		case SMU_FCLK:
412			if (pptable->Init)
413				clock_limit = pptable->FclkFrequencyTable[0];
414			break;
415		case SMU_VCLK:
416			if (pptable->Init)
417				clock_limit = pptable->VclkFrequencyTable[0];
418			break;
419		case SMU_DCLK:
420			if (pptable->Init)
421				clock_limit = pptable->DclkFrequencyTable[0];
422			break;
423		default:
424			break;
425		}
426
427		if (min)
428			*min = clock_limit;
429
430		if (max)
431			*max = clock_limit;
432
433		return 0;
434	}
435
436	if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) {
437		clk_id = smu_cmn_to_asic_specific_index(
438			smu, CMN2ASIC_MAPPING_CLK, clk_type);
439		if (clk_id < 0) {
440			ret = -EINVAL;
441			goto failed;
442		}
443		param = (clk_id & 0xffff) << 16;
444	}
445
446	if (max) {
447		if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
448			ret = smu_cmn_send_smc_msg(
449				smu, SMU_MSG_GetMaxGfxclkFrequency, max);
450		else
451			ret = smu_cmn_send_smc_msg_with_param(
452				smu, SMU_MSG_GetMaxDpmFreq, param, max);
453		if (ret)
454			goto failed;
455	}
456
457	if (min) {
458		if (clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)
459			ret = smu_cmn_send_smc_msg(
460				smu, SMU_MSG_GetMinGfxclkFrequency, min);
461		else
462			ret = smu_cmn_send_smc_msg_with_param(
463				smu, SMU_MSG_GetMinDpmFreq, param, min);
464	}
465
466failed:
467	return ret;
468}
469
470static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
471					  enum smu_clk_type clk_type,
472					  uint32_t *levels)
473{
474	int ret;
475
476	ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
477	if (!ret)
478		++(*levels);
479
480	return ret;
481}
482
483static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
484{
485	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
486	struct smu_table_context *smu_table = &smu->smu_table;
487	struct smu_13_0_dpm_table *dpm_table = NULL;
488	struct PPTable_t *pptable =
489		(struct PPTable_t *)smu_table->driver_pptable;
490	uint32_t gfxclkmin, gfxclkmax, levels;
491	int ret = 0, i, j;
492	struct smu_v13_0_6_dpm_map dpm_map[] = {
493		{ SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT,
494		  &dpm_context->dpm_tables.soc_table,
495		  pptable->SocclkFrequencyTable },
496		{ SMU_UCLK, SMU_FEATURE_DPM_UCLK_BIT,
497		  &dpm_context->dpm_tables.uclk_table,
498		  pptable->UclkFrequencyTable },
499		{ SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT,
500		  &dpm_context->dpm_tables.fclk_table,
501		  pptable->FclkFrequencyTable },
502		{ SMU_VCLK, SMU_FEATURE_DPM_VCLK_BIT,
503		  &dpm_context->dpm_tables.vclk_table,
504		  pptable->VclkFrequencyTable },
505		{ SMU_DCLK, SMU_FEATURE_DPM_DCLK_BIT,
506		  &dpm_context->dpm_tables.dclk_table,
507		  pptable->DclkFrequencyTable },
508	};
509
510	smu_v13_0_6_setup_driver_pptable(smu);
511
512	/* gfxclk dpm table setup */
513	dpm_table = &dpm_context->dpm_tables.gfx_table;
514	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
515		/* In the case of gfxclk, only fine-grained dpm is honored.
516		 * Get min/max values from FW.
517		 */
518		ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
519							&gfxclkmin, &gfxclkmax);
520		if (ret)
521			return ret;
522
523		dpm_table->count = 2;
524		dpm_table->dpm_levels[0].value = gfxclkmin;
525		dpm_table->dpm_levels[0].enabled = true;
526		dpm_table->dpm_levels[1].value = gfxclkmax;
527		dpm_table->dpm_levels[1].enabled = true;
528		dpm_table->min = dpm_table->dpm_levels[0].value;
529		dpm_table->max = dpm_table->dpm_levels[1].value;
530	} else {
531		dpm_table->count = 1;
532		dpm_table->dpm_levels[0].value = pptable->MinGfxclkFrequency;
533		dpm_table->dpm_levels[0].enabled = true;
534		dpm_table->min = dpm_table->dpm_levels[0].value;
535		dpm_table->max = dpm_table->dpm_levels[0].value;
536	}
537
538	for (j = 0; j < ARRAY_SIZE(dpm_map); j++) {
539		dpm_table = dpm_map[j].dpm_table;
540		levels = 1;
541		if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
542			ret = smu_v13_0_6_get_dpm_level_count(
543				smu, dpm_map[j].clk_type, &levels);
544			if (ret)
545				return ret;
546		}
547		dpm_table->count = levels;
548		for (i = 0; i < dpm_table->count; ++i) {
549			dpm_table->dpm_levels[i].value =
550				dpm_map[j].freq_table[i];
551			dpm_table->dpm_levels[i].enabled = true;
552
553		}
554		dpm_table->min = dpm_table->dpm_levels[0].value;
555		dpm_table->max = dpm_table->dpm_levels[levels - 1].value;
556
557	}
558
559	return 0;
560}
561
562static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
563{
564	struct smu_table_context *table_context = &smu->smu_table;
565
566	/* TODO: PPTable is not available.
567	 * 1) Find an alternate way to get 'PPTable values' here.
568	 * 2) Check if there is SW CTF
569	 */
570	table_context->thermal_controller_type = 0;
571
572	return 0;
573}
574
575static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
576{
577	struct amdgpu_device *adev = smu->adev;
578	uint32_t mp1_fw_flags;
579
580	mp1_fw_flags =
581		RREG32_PCIE(MP1_Public | (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
582
583	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
584	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
585		return 0;
586
587	return -EIO;
588}
589
590static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
591{
592	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
593	struct smu_13_0_dpm_table *gfx_table =
594		&dpm_context->dpm_tables.gfx_table;
595	struct smu_13_0_dpm_table *mem_table =
596		&dpm_context->dpm_tables.uclk_table;
597	struct smu_13_0_dpm_table *soc_table =
598		&dpm_context->dpm_tables.soc_table;
599	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
600
601	pstate_table->gfxclk_pstate.min = gfx_table->min;
602	pstate_table->gfxclk_pstate.peak = gfx_table->max;
603	pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
604	pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
605
606	pstate_table->uclk_pstate.min = mem_table->min;
607	pstate_table->uclk_pstate.peak = mem_table->max;
608	pstate_table->uclk_pstate.curr.min = mem_table->min;
609	pstate_table->uclk_pstate.curr.max = mem_table->max;
610
611	pstate_table->socclk_pstate.min = soc_table->min;
612	pstate_table->socclk_pstate.peak = soc_table->max;
613	pstate_table->socclk_pstate.curr.min = soc_table->min;
614	pstate_table->socclk_pstate.curr.max = soc_table->max;
615
616	if (gfx_table->count > SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL &&
617	    mem_table->count > SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL &&
618	    soc_table->count > SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL) {
619		pstate_table->gfxclk_pstate.standard =
620			gfx_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_GFXCLK_LEVEL].value;
621		pstate_table->uclk_pstate.standard =
622			mem_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_MCLK_LEVEL].value;
623		pstate_table->socclk_pstate.standard =
624			soc_table->dpm_levels[SMU_13_0_6_UMD_PSTATE_SOCCLK_LEVEL].value;
625	} else {
626		pstate_table->gfxclk_pstate.standard =
627			pstate_table->gfxclk_pstate.min;
628		pstate_table->uclk_pstate.standard =
629			pstate_table->uclk_pstate.min;
630		pstate_table->socclk_pstate.standard =
631			pstate_table->socclk_pstate.min;
632	}
633
634	return 0;
635}
636
637static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
638				     struct pp_clock_levels_with_latency *clocks,
639				     struct smu_13_0_dpm_table *dpm_table)
640{
641	int i, count;
642
643	count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS :
644						      dpm_table->count;
645	clocks->num_levels = count;
646
647	for (i = 0; i < count; i++) {
648		clocks->data[i].clocks_in_khz =
649			dpm_table->dpm_levels[i].value * 1000;
650		clocks->data[i].latency_in_us = 0;
651	}
652
653	return 0;
654}
655
656static int smu_v13_0_6_freqs_in_same_level(int32_t frequency1,
657					   int32_t frequency2)
658{
659	return (abs(frequency1 - frequency2) <= EPSILON);
660}
661
662static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
663{
664	struct smu_power_context *smu_power = &smu->smu_power;
665	struct smu_13_0_power_context *power_context = smu_power->power_context;
666	uint32_t  throttler_status = 0;
667
668	throttler_status = atomic_read(&power_context->throttle_status);
669	dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status);
670
671	return throttler_status;
672}
673
674static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
675					    MetricsMember_t member,
676					    uint32_t *value)
677{
678	struct smu_table_context *smu_table = &smu->smu_table;
679	MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
680	struct amdgpu_device *adev = smu->adev;
681	uint32_t smu_version;
682	int ret = 0;
683	int xcc_id;
684
685	ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
686	if (ret)
687		return ret;
688
689	/* For clocks with multiple instances, only report the first one */
690	switch (member) {
691	case METRICS_CURR_GFXCLK:
692	case METRICS_AVERAGE_GFXCLK:
693		smu_cmn_get_smc_version(smu, NULL, &smu_version);
694		if (smu_version >= 0x552F00) {
695			xcc_id = GET_INST(GC, 0);
696			*value = SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc_id]);
697		} else {
698			*value = 0;
699		}
700		break;
701	case METRICS_CURR_SOCCLK:
702	case METRICS_AVERAGE_SOCCLK:
703		*value = SMUQ10_TO_UINT(metrics->SocclkFrequency[0]);
704		break;
705	case METRICS_CURR_UCLK:
706	case METRICS_AVERAGE_UCLK:
707		*value = SMUQ10_TO_UINT(metrics->UclkFrequency);
708		break;
709	case METRICS_CURR_VCLK:
710		*value = SMUQ10_TO_UINT(metrics->VclkFrequency[0]);
711		break;
712	case METRICS_CURR_DCLK:
713		*value = SMUQ10_TO_UINT(metrics->DclkFrequency[0]);
714		break;
715	case METRICS_CURR_FCLK:
716		*value = SMUQ10_TO_UINT(metrics->FclkFrequency);
717		break;
718	case METRICS_AVERAGE_GFXACTIVITY:
719		*value = SMUQ10_TO_UINT(metrics->SocketGfxBusy);
720		break;
721	case METRICS_AVERAGE_MEMACTIVITY:
722		*value = SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
723		break;
724	case METRICS_CURR_SOCKETPOWER:
725		*value = SMUQ10_TO_UINT(metrics->SocketPower) << 8;
726		break;
727	case METRICS_TEMPERATURE_HOTSPOT:
728		*value = SMUQ10_TO_UINT(metrics->MaxSocketTemperature) *
729			 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
730		break;
731	case METRICS_TEMPERATURE_MEM:
732		*value = SMUQ10_TO_UINT(metrics->MaxHbmTemperature) *
733			 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
734		break;
735	/* This is the max of all VRs and not just SOC VR.
736	 * No need to define another data type for the same.
737	 */
738	case METRICS_TEMPERATURE_VRSOC:
739		*value = SMUQ10_TO_UINT(metrics->MaxVrTemperature) *
740			 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
741		break;
742	default:
743		*value = UINT_MAX;
744		break;
745	}
746
747	return ret;
748}
749
750static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
751						     enum smu_clk_type clk_type,
752						     uint32_t *value)
753{
754	MetricsMember_t member_type;
755
756	if (!value)
757		return -EINVAL;
758
759	switch (clk_type) {
760	case SMU_GFXCLK:
761		member_type = METRICS_CURR_GFXCLK;
762		break;
763	case SMU_UCLK:
764		member_type = METRICS_CURR_UCLK;
765		break;
766	case SMU_SOCCLK:
767		member_type = METRICS_CURR_SOCCLK;
768		break;
769	case SMU_VCLK:
770		member_type = METRICS_CURR_VCLK;
771		break;
772	case SMU_DCLK:
773		member_type = METRICS_CURR_DCLK;
774		break;
775	case SMU_FCLK:
776		member_type = METRICS_CURR_FCLK;
777		break;
778	default:
779		return -EINVAL;
780	}
781
782	return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
783}
784
785static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
786					enum smu_clk_type type, char *buf)
787{
788	int i, now, size = 0;
789	int ret = 0;
790	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
791	struct pp_clock_levels_with_latency clocks;
792	struct smu_13_0_dpm_table *single_dpm_table;
793	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
794	struct smu_13_0_dpm_context *dpm_context = NULL;
795	uint32_t min_clk, max_clk;
796
797	smu_cmn_get_sysfs_buf(&buf, &size);
798
799	if (amdgpu_ras_intr_triggered()) {
800		size += sysfs_emit_at(buf, size, "unavailable\n");
801		return size;
802	}
803
804	dpm_context = smu_dpm->dpm_context;
805
806	switch (type) {
807	case SMU_OD_SCLK:
808		size += sysfs_emit_at(buf, size, "%s:\n", "GFXCLK");
809		fallthrough;
810	case SMU_SCLK:
811		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
812								&now);
813		if (ret) {
814			dev_err(smu->adev->dev,
815				"Attempt to get current gfx clk Failed!");
816			return ret;
817		}
818
819		min_clk = pstate_table->gfxclk_pstate.curr.min;
820		max_clk = pstate_table->gfxclk_pstate.curr.max;
821
822		if (!smu_v13_0_6_freqs_in_same_level(now, min_clk) &&
823		    !smu_v13_0_6_freqs_in_same_level(now, max_clk)) {
824			size += sysfs_emit_at(buf, size, "0: %uMhz\n",
825					      min_clk);
826			size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
827					      now);
828			size += sysfs_emit_at(buf, size, "2: %uMhz\n",
829					      max_clk);
830		} else {
831			size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
832					      min_clk,
833					      smu_v13_0_6_freqs_in_same_level(now, min_clk) ? "*" : "");
834			size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
835					      max_clk,
836					      smu_v13_0_6_freqs_in_same_level(now, max_clk) ? "*" : "");
837		}
838
839		break;
840
841	case SMU_OD_MCLK:
842		size += sysfs_emit_at(buf, size, "%s:\n", "MCLK");
843		fallthrough;
844	case SMU_MCLK:
845		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
846								&now);
847		if (ret) {
848			dev_err(smu->adev->dev,
849				"Attempt to get current mclk Failed!");
850			return ret;
851		}
852
853		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
854		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
855		if (ret) {
856			dev_err(smu->adev->dev,
857				"Attempt to get memory clk levels Failed!");
858			return ret;
859		}
860
861		for (i = 0; i < clocks.num_levels; i++)
862			size += sysfs_emit_at(
863				buf, size, "%d: %uMhz %s\n", i,
864				clocks.data[i].clocks_in_khz / 1000,
865				(clocks.num_levels == 1) ?
866					"*" :
867					(smu_v13_0_6_freqs_in_same_level(
868						 clocks.data[i].clocks_in_khz /
869							 1000,
870						 now) ?
871						 "*" :
872						 ""));
873		break;
874
875	case SMU_SOCCLK:
876		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
877								&now);
878		if (ret) {
879			dev_err(smu->adev->dev,
880				"Attempt to get current socclk Failed!");
881			return ret;
882		}
883
884		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
885		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
886		if (ret) {
887			dev_err(smu->adev->dev,
888				"Attempt to get socclk levels Failed!");
889			return ret;
890		}
891
892		for (i = 0; i < clocks.num_levels; i++)
893			size += sysfs_emit_at(
894				buf, size, "%d: %uMhz %s\n", i,
895				clocks.data[i].clocks_in_khz / 1000,
896				(clocks.num_levels == 1) ?
897					"*" :
898					(smu_v13_0_6_freqs_in_same_level(
899						 clocks.data[i].clocks_in_khz /
900							 1000,
901						 now) ?
902						 "*" :
903						 ""));
904		break;
905
906	case SMU_FCLK:
907		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
908								&now);
909		if (ret) {
910			dev_err(smu->adev->dev,
911				"Attempt to get current fclk Failed!");
912			return ret;
913		}
914
915		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
916		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
917		if (ret) {
918			dev_err(smu->adev->dev,
919				"Attempt to get fclk levels Failed!");
920			return ret;
921		}
922
923		for (i = 0; i < single_dpm_table->count; i++)
924			size += sysfs_emit_at(
925				buf, size, "%d: %uMhz %s\n", i,
926				single_dpm_table->dpm_levels[i].value,
927				(clocks.num_levels == 1) ?
928					"*" :
929					(smu_v13_0_6_freqs_in_same_level(
930						 clocks.data[i].clocks_in_khz /
931							 1000,
932						 now) ?
933						 "*" :
934						 ""));
935		break;
936
937	case SMU_VCLK:
938		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
939								&now);
940		if (ret) {
941			dev_err(smu->adev->dev,
942				"Attempt to get current vclk Failed!");
943			return ret;
944		}
945
946		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
947		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
948		if (ret) {
949			dev_err(smu->adev->dev,
950				"Attempt to get vclk levels Failed!");
951			return ret;
952		}
953
954		for (i = 0; i < single_dpm_table->count; i++)
955			size += sysfs_emit_at(
956				buf, size, "%d: %uMhz %s\n", i,
957				single_dpm_table->dpm_levels[i].value,
958				(clocks.num_levels == 1) ?
959					"*" :
960					(smu_v13_0_6_freqs_in_same_level(
961						 clocks.data[i].clocks_in_khz /
962							 1000,
963						 now) ?
964						 "*" :
965						 ""));
966		break;
967
968	case SMU_DCLK:
969		ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
970							       &now);
971		if (ret) {
972			dev_err(smu->adev->dev,
973				"Attempt to get current dclk Failed!");
974			return ret;
975		}
976
977		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
978		ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
979		if (ret) {
980			dev_err(smu->adev->dev,
981				"Attempt to get dclk levels Failed!");
982			return ret;
983		}
984
985		for (i = 0; i < single_dpm_table->count; i++)
986			size += sysfs_emit_at(
987				buf, size, "%d: %uMhz %s\n", i,
988				single_dpm_table->dpm_levels[i].value,
989				(clocks.num_levels == 1) ?
990					"*" :
991					(smu_v13_0_6_freqs_in_same_level(
992						 clocks.data[i].clocks_in_khz /
993							 1000,
994						 now) ?
995						 "*" :
996						 ""));
997		break;
998
999	default:
1000		break;
1001	}
1002
1003	return size;
1004}
1005
1006static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
1007					uint32_t feature_mask, uint32_t level)
1008{
1009	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1010	uint32_t freq;
1011	int ret = 0;
1012
1013	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1014	    (feature_mask & FEATURE_MASK(FEATURE_DPM_GFXCLK))) {
1015		freq = dpm_context->dpm_tables.gfx_table.dpm_levels[level].value;
1016		ret = smu_cmn_send_smc_msg_with_param(
1017			smu,
1018			(max ? SMU_MSG_SetSoftMaxGfxClk :
1019			       SMU_MSG_SetSoftMinGfxclk),
1020			freq & 0xffff, NULL);
1021		if (ret) {
1022			dev_err(smu->adev->dev,
1023				"Failed to set soft %s gfxclk !\n",
1024				max ? "max" : "min");
1025			return ret;
1026		}
1027	}
1028
1029	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
1030	    (feature_mask & FEATURE_MASK(FEATURE_DPM_UCLK))) {
1031		freq = dpm_context->dpm_tables.uclk_table.dpm_levels[level]
1032			       .value;
1033		ret = smu_cmn_send_smc_msg_with_param(
1034			smu,
1035			(max ? SMU_MSG_SetSoftMaxByFreq :
1036			       SMU_MSG_SetSoftMinByFreq),
1037			(PPCLK_UCLK << 16) | (freq & 0xffff), NULL);
1038		if (ret) {
1039			dev_err(smu->adev->dev,
1040				"Failed to set soft %s memclk !\n",
1041				max ? "max" : "min");
1042			return ret;
1043		}
1044	}
1045
1046	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
1047	    (feature_mask & FEATURE_MASK(FEATURE_DPM_SOCCLK))) {
1048		freq = dpm_context->dpm_tables.soc_table.dpm_levels[level].value;
1049		ret = smu_cmn_send_smc_msg_with_param(
1050			smu,
1051			(max ? SMU_MSG_SetSoftMaxByFreq :
1052			       SMU_MSG_SetSoftMinByFreq),
1053			(PPCLK_SOCCLK << 16) | (freq & 0xffff), NULL);
1054		if (ret) {
1055			dev_err(smu->adev->dev,
1056				"Failed to set soft %s socclk !\n",
1057				max ? "max" : "min");
1058			return ret;
1059		}
1060	}
1061
1062	return ret;
1063}
1064
1065static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
1066					enum smu_clk_type type, uint32_t mask)
1067{
1068	struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1069	struct smu_13_0_dpm_table *single_dpm_table = NULL;
1070	uint32_t soft_min_level, soft_max_level;
1071	int ret = 0;
1072
1073	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1074	soft_max_level = mask ? (fls(mask) - 1) : 0;
1075
1076	switch (type) {
1077	case SMU_SCLK:
1078		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1079		if (soft_max_level >= single_dpm_table->count) {
1080			dev_err(smu->adev->dev,
1081				"Clock level specified %d is over max allowed %d\n",
1082				soft_max_level, single_dpm_table->count - 1);
1083			ret = -EINVAL;
1084			break;
1085		}
1086
1087		ret = smu_v13_0_6_upload_dpm_level(
1088			smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1089			soft_min_level);
1090		if (ret) {
1091			dev_err(smu->adev->dev,
1092				"Failed to upload boot level to lowest!\n");
1093			break;
1094		}
1095
1096		ret = smu_v13_0_6_upload_dpm_level(
1097			smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1098			soft_max_level);
1099		if (ret)
1100			dev_err(smu->adev->dev,
1101				"Failed to upload dpm max level to highest!\n");
1102
1103		break;
1104
1105	case SMU_MCLK:
1106	case SMU_SOCCLK:
1107	case SMU_FCLK:
1108		/*
1109		 * Should not arrive here since smu_13_0_6 does not
1110		 * support mclk/socclk/fclk softmin/softmax settings
1111		 */
1112		ret = -EINVAL;
1113		break;
1114
1115	default:
1116		break;
1117	}
1118
1119	return ret;
1120}
1121
1122static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
1123						    enum amd_pp_sensors sensor,
1124						    uint32_t *value)
1125{
1126	int ret = 0;
1127
1128	if (!value)
1129		return -EINVAL;
1130
1131	switch (sensor) {
1132	case AMDGPU_PP_SENSOR_GPU_LOAD:
1133		ret = smu_v13_0_6_get_smu_metrics_data(
1134			smu, METRICS_AVERAGE_GFXACTIVITY, value);
1135		break;
1136	case AMDGPU_PP_SENSOR_MEM_LOAD:
1137		ret = smu_v13_0_6_get_smu_metrics_data(
1138			smu, METRICS_AVERAGE_MEMACTIVITY, value);
1139		break;
1140	default:
1141		dev_err(smu->adev->dev,
1142			"Invalid sensor for retrieving clock activity\n");
1143		return -EINVAL;
1144	}
1145
1146	return ret;
1147}
1148
1149static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
1150					       enum amd_pp_sensors sensor,
1151					       uint32_t *value)
1152{
1153	int ret = 0;
1154
1155	if (!value)
1156		return -EINVAL;
1157
1158	switch (sensor) {
1159	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1160		ret = smu_v13_0_6_get_smu_metrics_data(
1161			smu, METRICS_TEMPERATURE_HOTSPOT, value);
1162		break;
1163	case AMDGPU_PP_SENSOR_MEM_TEMP:
1164		ret = smu_v13_0_6_get_smu_metrics_data(
1165			smu, METRICS_TEMPERATURE_MEM, value);
1166		break;
1167	default:
1168		dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1169		return -EINVAL;
1170	}
1171
1172	return ret;
1173}
1174
1175static int smu_v13_0_6_read_sensor(struct smu_context *smu,
1176				   enum amd_pp_sensors sensor, void *data,
1177				   uint32_t *size)
1178{
1179	int ret = 0;
1180
1181	if (amdgpu_ras_intr_triggered())
1182		return 0;
1183
1184	if (!data || !size)
1185		return -EINVAL;
1186
1187	switch (sensor) {
1188	case AMDGPU_PP_SENSOR_MEM_LOAD:
1189	case AMDGPU_PP_SENSOR_GPU_LOAD:
1190		ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
1191							       (uint32_t *)data);
1192		*size = 4;
1193		break;
1194	case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1195		ret = smu_v13_0_6_get_smu_metrics_data(smu,
1196						       METRICS_CURR_SOCKETPOWER,
1197						       (uint32_t *)data);
1198		*size = 4;
1199		break;
1200	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1201	case AMDGPU_PP_SENSOR_MEM_TEMP:
1202		ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
1203							  (uint32_t *)data);
1204		*size = 4;
1205		break;
1206	case AMDGPU_PP_SENSOR_GFX_MCLK:
1207		ret = smu_v13_0_6_get_current_clk_freq_by_table(
1208			smu, SMU_UCLK, (uint32_t *)data);
1209		/* the output clock frequency in 10K unit */
1210		*(uint32_t *)data *= 100;
1211		*size = 4;
1212		break;
1213	case AMDGPU_PP_SENSOR_GFX_SCLK:
1214		ret = smu_v13_0_6_get_current_clk_freq_by_table(
1215			smu, SMU_GFXCLK, (uint32_t *)data);
1216		*(uint32_t *)data *= 100;
1217		*size = 4;
1218		break;
1219	case AMDGPU_PP_SENSOR_VDDGFX:
1220		ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1221		*size = 4;
1222		break;
1223	case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
1224	default:
1225		ret = -EOPNOTSUPP;
1226		break;
1227	}
1228
1229	return ret;
1230}
1231
1232static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
1233				       uint32_t *current_power_limit,
1234				       uint32_t *default_power_limit,
1235				       uint32_t *max_power_limit)
1236{
1237	struct smu_table_context *smu_table = &smu->smu_table;
1238	struct PPTable_t *pptable =
1239		(struct PPTable_t *)smu_table->driver_pptable;
1240	uint32_t power_limit = 0;
1241	int ret;
1242
1243	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1244
1245	if (ret) {
1246		dev_err(smu->adev->dev, "Couldn't get PPT limit");
1247		return -EINVAL;
1248	}
1249
1250	if (current_power_limit)
1251		*current_power_limit = power_limit;
1252	if (default_power_limit)
1253		*default_power_limit = power_limit;
1254
1255	if (max_power_limit) {
1256		*max_power_limit = pptable->MaxSocketPowerLimit;
1257	}
1258
1259	return 0;
1260}
1261
1262static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
1263				       enum smu_ppt_limit_type limit_type,
1264				       uint32_t limit)
1265{
1266	return smu_v13_0_set_power_limit(smu, limit_type, limit);
1267}
1268
1269static int smu_v13_0_6_irq_process(struct amdgpu_device *adev,
1270				   struct amdgpu_irq_src *source,
1271				   struct amdgpu_iv_entry *entry)
1272{
1273	struct smu_context *smu = adev->powerplay.pp_handle;
1274	struct smu_power_context *smu_power = &smu->smu_power;
1275	struct smu_13_0_power_context *power_context = smu_power->power_context;
1276	uint32_t client_id = entry->client_id;
1277	uint32_t ctxid = entry->src_data[0];
1278	uint32_t src_id = entry->src_id;
1279	uint32_t data;
1280
1281	if (client_id == SOC15_IH_CLIENTID_MP1) {
1282		if (src_id == IH_INTERRUPT_ID_TO_DRIVER) {
1283			/* ACK SMUToHost interrupt */
1284			data = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1285			data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1286			WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, data);
1287			/*
1288			 * ctxid is used to distinguish different events for SMCToHost
1289			 * interrupt.
1290			 */
1291			switch (ctxid) {
1292			case IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
1293				/*
1294				 * Increment the throttle interrupt counter
1295				 */
1296				atomic64_inc(&smu->throttle_int_counter);
1297
1298				if (!atomic_read(&adev->throttling_logging_enabled))
1299					return 0;
1300
1301				/* This uses the new method which fixes the
1302				 * incorrect throttling status reporting
1303				 * through metrics table. For older FWs,
1304				 * it will be ignored.
1305				 */
1306				if (__ratelimit(&adev->throttling_logging_rs)) {
1307					atomic_set(
1308						&power_context->throttle_status,
1309							entry->src_data[1]);
1310					schedule_work(&smu->throttling_logging_work);
1311				}
1312
1313				break;
1314			}
1315		}
1316	}
1317
1318	return 0;
1319}
1320
1321static int smu_v13_0_6_set_irq_state(struct amdgpu_device *adev,
1322			      struct amdgpu_irq_src *source,
1323			      unsigned tyep,
1324			      enum amdgpu_interrupt_state state)
1325{
1326	uint32_t val = 0;
1327
1328	switch (state) {
1329	case AMDGPU_IRQ_STATE_DISABLE:
1330		/* For MP1 SW irqs */
1331		val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1332		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1333		WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1334
1335		break;
1336	case AMDGPU_IRQ_STATE_ENABLE:
1337		/* For MP1 SW irqs */
1338		val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT);
1339		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1340		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1341		WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val);
1342
1343		val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL);
1344		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1345		WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val);
1346
1347		break;
1348	default:
1349		break;
1350	}
1351
1352	return 0;
1353}
1354
1355static const struct amdgpu_irq_src_funcs smu_v13_0_6_irq_funcs = {
1356	.set = smu_v13_0_6_set_irq_state,
1357	.process = smu_v13_0_6_irq_process,
1358};
1359
1360static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
1361{
1362	struct amdgpu_device *adev = smu->adev;
1363	struct amdgpu_irq_src *irq_src = &smu->irq_source;
1364	int ret = 0;
1365
1366	if (amdgpu_sriov_vf(adev))
1367		return 0;
1368
1369	irq_src->num_types = 1;
1370	irq_src->funcs = &smu_v13_0_6_irq_funcs;
1371
1372	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1373				IH_INTERRUPT_ID_TO_DRIVER,
1374				irq_src);
1375	if (ret)
1376		return ret;
1377
1378	return ret;
1379}
1380
1381static int smu_v13_0_6_notify_unload(struct smu_context *smu)
1382{
1383	uint32_t smu_version;
1384
1385	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1386	if (smu_version <= 0x553500)
1387		return 0;
1388
1389	dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
1390	/* Ignore return, just intimate FW that driver is not going to be there */
1391	smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
1392
1393	return 0;
1394}
1395
1396static int smu_v13_0_6_system_features_control(struct smu_context *smu,
1397					       bool enable)
1398{
1399	struct amdgpu_device *adev = smu->adev;
1400	int ret = 0;
1401
1402	if (amdgpu_sriov_vf(adev))
1403		return 0;
1404
1405	if (enable) {
1406		if (!(adev->flags & AMD_IS_APU))
1407			ret = smu_v13_0_system_features_control(smu, enable);
1408	} else {
1409		/* Notify FW that the device is no longer driver managed */
1410		smu_v13_0_6_notify_unload(smu);
1411	}
1412
1413	return ret;
1414}
1415
1416static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
1417						       uint32_t min,
1418						       uint32_t max)
1419{
1420	int ret;
1421
1422	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1423					      max & 0xffff, NULL);
1424	if (ret)
1425		return ret;
1426
1427	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
1428					      min & 0xffff, NULL);
1429
1430	return ret;
1431}
1432
1433static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
1434					     enum amd_dpm_forced_level level)
1435{
1436	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1437	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1438	struct smu_13_0_dpm_table *gfx_table =
1439		&dpm_context->dpm_tables.gfx_table;
1440	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1441	int ret;
1442
1443	/* Disable determinism if switching to another mode */
1444	if ((smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) &&
1445	    (level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)) {
1446		smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1447		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1448	}
1449
1450	switch (level) {
1451	case AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM:
1452		return 0;
1453
1454	case AMD_DPM_FORCED_LEVEL_AUTO:
1455		if ((gfx_table->min == pstate_table->gfxclk_pstate.curr.min) &&
1456		    (gfx_table->max == pstate_table->gfxclk_pstate.curr.max))
1457			return 0;
1458
1459		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(
1460			smu, gfx_table->min, gfx_table->max);
1461		if (ret)
1462			return ret;
1463
1464		pstate_table->gfxclk_pstate.curr.min = gfx_table->min;
1465		pstate_table->gfxclk_pstate.curr.max = gfx_table->max;
1466		return 0;
1467	case AMD_DPM_FORCED_LEVEL_MANUAL:
1468		return 0;
1469	default:
1470		break;
1471	}
1472
1473	return -EINVAL;
1474}
1475
1476static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
1477						   enum smu_clk_type clk_type,
1478						   uint32_t min, uint32_t max)
1479{
1480	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1481	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1482	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1483	struct amdgpu_device *adev = smu->adev;
1484	uint32_t min_clk;
1485	uint32_t max_clk;
1486	int ret = 0;
1487
1488	if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK)
1489		return -EINVAL;
1490
1491	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1492	    (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1493		return -EINVAL;
1494
1495	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
1496		if (min >= max) {
1497			dev_err(smu->adev->dev,
1498				"Minimum GFX clk should be less than the maximum allowed clock\n");
1499			return -EINVAL;
1500		}
1501
1502		if ((min == pstate_table->gfxclk_pstate.curr.min) &&
1503		    (max == pstate_table->gfxclk_pstate.curr.max))
1504			return 0;
1505
1506		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min, max);
1507		if (!ret) {
1508			pstate_table->gfxclk_pstate.curr.min = min;
1509			pstate_table->gfxclk_pstate.curr.max = max;
1510		}
1511
1512		return ret;
1513	}
1514
1515	if (smu_dpm->dpm_level == AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
1516		if (!max || (max < dpm_context->dpm_tables.gfx_table.min) ||
1517		    (max > dpm_context->dpm_tables.gfx_table.max)) {
1518			dev_warn(
1519				adev->dev,
1520				"Invalid max frequency %d MHz specified for determinism\n",
1521				max);
1522			return -EINVAL;
1523		}
1524
1525		/* Restore default min/max clocks and enable determinism */
1526		min_clk = dpm_context->dpm_tables.gfx_table.min;
1527		max_clk = dpm_context->dpm_tables.gfx_table.max;
1528		ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
1529								 max_clk);
1530		if (!ret) {
1531			usleep_range(500, 1000);
1532			ret = smu_cmn_send_smc_msg_with_param(
1533				smu, SMU_MSG_EnableDeterminism, max, NULL);
1534			if (ret) {
1535				dev_err(adev->dev,
1536					"Failed to enable determinism at GFX clock %d MHz\n",
1537					max);
1538			} else {
1539				pstate_table->gfxclk_pstate.curr.min = min_clk;
1540				pstate_table->gfxclk_pstate.curr.max = max;
1541			}
1542		}
1543	}
1544
1545	return ret;
1546}
1547
1548static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
1549					  enum PP_OD_DPM_TABLE_COMMAND type,
1550					  long input[], uint32_t size)
1551{
1552	struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1553	struct smu_13_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1554	struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1555	uint32_t min_clk;
1556	uint32_t max_clk;
1557	int ret = 0;
1558
1559	/* Only allowed in manual or determinism mode */
1560	if ((smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) &&
1561	    (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM))
1562		return -EINVAL;
1563
1564	switch (type) {
1565	case PP_OD_EDIT_SCLK_VDDC_TABLE:
1566		if (size != 2) {
1567			dev_err(smu->adev->dev,
1568				"Input parameter number not correct\n");
1569			return -EINVAL;
1570		}
1571
1572		if (input[0] == 0) {
1573			if (input[1] < dpm_context->dpm_tables.gfx_table.min) {
1574				dev_warn(
1575					smu->adev->dev,
1576					"Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1577					input[1],
1578					dpm_context->dpm_tables.gfx_table.min);
1579				pstate_table->gfxclk_pstate.custom.min =
1580					pstate_table->gfxclk_pstate.curr.min;
1581				return -EINVAL;
1582			}
1583
1584			pstate_table->gfxclk_pstate.custom.min = input[1];
1585		} else if (input[0] == 1) {
1586			if (input[1] > dpm_context->dpm_tables.gfx_table.max) {
1587				dev_warn(
1588					smu->adev->dev,
1589					"Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1590					input[1],
1591					dpm_context->dpm_tables.gfx_table.max);
1592				pstate_table->gfxclk_pstate.custom.max =
1593					pstate_table->gfxclk_pstate.curr.max;
1594				return -EINVAL;
1595			}
1596
1597			pstate_table->gfxclk_pstate.custom.max = input[1];
1598		} else {
1599			return -EINVAL;
1600		}
1601		break;
1602	case PP_OD_RESTORE_DEFAULT_TABLE:
1603		if (size != 0) {
1604			dev_err(smu->adev->dev,
1605				"Input parameter number not correct\n");
1606			return -EINVAL;
1607		} else {
1608			/* Use the default frequencies for manual and determinism mode */
1609			min_clk = dpm_context->dpm_tables.gfx_table.min;
1610			max_clk = dpm_context->dpm_tables.gfx_table.max;
1611
1612			return smu_v13_0_6_set_soft_freq_limited_range(
1613				smu, SMU_GFXCLK, min_clk, max_clk);
1614		}
1615		break;
1616	case PP_OD_COMMIT_DPM_TABLE:
1617		if (size != 0) {
1618			dev_err(smu->adev->dev,
1619				"Input parameter number not correct\n");
1620			return -EINVAL;
1621		} else {
1622			if (!pstate_table->gfxclk_pstate.custom.min)
1623				pstate_table->gfxclk_pstate.custom.min =
1624					pstate_table->gfxclk_pstate.curr.min;
1625
1626			if (!pstate_table->gfxclk_pstate.custom.max)
1627				pstate_table->gfxclk_pstate.custom.max =
1628					pstate_table->gfxclk_pstate.curr.max;
1629
1630			min_clk = pstate_table->gfxclk_pstate.custom.min;
1631			max_clk = pstate_table->gfxclk_pstate.custom.max;
1632
1633			return smu_v13_0_6_set_soft_freq_limited_range(
1634				smu, SMU_GFXCLK, min_clk, max_clk);
1635		}
1636		break;
1637	default:
1638		return -ENOSYS;
1639	}
1640
1641	return ret;
1642}
1643
1644static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
1645					uint64_t *feature_mask)
1646{
1647	uint32_t smu_version;
1648	int ret;
1649
1650	smu_cmn_get_smc_version(smu, NULL, &smu_version);
1651	ret = smu_cmn_get_enabled_mask(smu, feature_mask);
1652
1653	if (ret == -EIO && smu_version < 0x552F00) {
1654		*feature_mask = 0;
1655		ret = 0;
1656	}
1657
1658	return ret;
1659}
1660
1661static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
1662{
1663	int ret;
1664	uint64_t feature_enabled;
1665
1666	ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
1667
1668	if (ret)
1669		return false;
1670
1671	return !!(feature_enabled & SMC_DPM_FEATURE);
1672}
1673
1674static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
1675					void *table_data)
1676{
1677	struct smu_table_context *smu_table = &smu->smu_table;
1678	struct smu_table *table = &smu_table->driver_table;
1679	struct amdgpu_device *adev = smu->adev;
1680	uint32_t table_size;
1681	int ret = 0;
1682
1683	if (!table_data)
1684		return -EINVAL;
1685
1686	table_size = smu_table->tables[SMU_TABLE_I2C_COMMANDS].size;
1687
1688	memcpy(table->cpu_addr, table_data, table_size);
1689	/* Flush hdp cache */
1690	amdgpu_asic_flush_hdp(adev, NULL);
1691	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
1692					  NULL);
1693
1694	return ret;
1695}
1696
1697static int smu_v13_0_6_i2c_xfer(struct i2c_adapter *i2c_adap,
1698				struct i2c_msg *msg, int num_msgs)
1699{
1700	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1701	struct amdgpu_device *adev = smu_i2c->adev;
1702	struct smu_context *smu = adev->powerplay.pp_handle;
1703	struct smu_table_context *smu_table = &smu->smu_table;
1704	struct smu_table *table = &smu_table->driver_table;
1705	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1706	int i, j, r, c;
1707	u16 dir;
1708
1709	if (!adev->pm.dpm_enabled)
1710		return -EBUSY;
1711
1712	req = kzalloc(sizeof(*req), GFP_KERNEL);
1713	if (!req)
1714		return -ENOMEM;
1715
1716	req->I2CcontrollerPort = smu_i2c->port;
1717	req->I2CSpeed = I2C_SPEED_FAST_400K;
1718	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1719	dir = msg[0].flags & I2C_M_RD;
1720
1721	for (c = i = 0; i < num_msgs; i++) {
1722		for (j = 0; j < msg[i].len; j++, c++) {
1723			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1724
1725			if (!(msg[i].flags & I2C_M_RD)) {
1726				/* write */
1727				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1728				cmd->ReadWriteData = msg[i].buf[j];
1729			}
1730
1731			if ((dir ^ msg[i].flags) & I2C_M_RD) {
1732				/* The direction changes.
1733				 */
1734				dir = msg[i].flags & I2C_M_RD;
1735				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1736			}
1737
1738			req->NumCmds++;
1739
1740			/*
1741			 * Insert STOP if we are at the last byte of either last
1742			 * message for the transaction or the client explicitly
1743			 * requires a STOP at this particular message.
1744			 */
1745			if ((j == msg[i].len - 1) &&
1746			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1747				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1748				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1749			}
1750		}
1751	}
1752	mutex_lock(&adev->pm.mutex);
1753	r = smu_v13_0_6_request_i2c_xfer(smu, req);
1754	if (r)
1755		goto fail;
1756
1757	for (c = i = 0; i < num_msgs; i++) {
1758		if (!(msg[i].flags & I2C_M_RD)) {
1759			c += msg[i].len;
1760			continue;
1761		}
1762		for (j = 0; j < msg[i].len; j++, c++) {
1763			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1764
1765			msg[i].buf[j] = cmd->ReadWriteData;
1766		}
1767	}
1768	r = num_msgs;
1769fail:
1770	mutex_unlock(&adev->pm.mutex);
1771	kfree(req);
1772	return r;
1773}
1774
1775static u32 smu_v13_0_6_i2c_func(struct i2c_adapter *adap)
1776{
1777	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1778}
1779
1780static const struct i2c_algorithm smu_v13_0_6_i2c_algo = {
1781	.master_xfer = smu_v13_0_6_i2c_xfer,
1782	.functionality = smu_v13_0_6_i2c_func,
1783};
1784
1785static const struct i2c_adapter_quirks smu_v13_0_6_i2c_control_quirks = {
1786	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1787	.max_read_len = MAX_SW_I2C_COMMANDS,
1788	.max_write_len = MAX_SW_I2C_COMMANDS,
1789	.max_comb_1st_msg_len = 2,
1790	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1791};
1792
1793static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
1794{
1795	struct amdgpu_device *adev = smu->adev;
1796	int res, i;
1797
1798	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1799		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1800		struct i2c_adapter *control = &smu_i2c->adapter;
1801
1802		smu_i2c->adev = adev;
1803		smu_i2c->port = i;
1804		rw_init(&smu_i2c->mutex, "1306iic");
1805#ifdef __linux__
1806		control->owner = THIS_MODULE;
1807		control->class = I2C_CLASS_SPD;
1808		control->dev.parent = &adev->pdev->dev;
1809#endif
1810		control->algo = &smu_v13_0_6_i2c_algo;
1811		snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1812		control->quirks = &smu_v13_0_6_i2c_control_quirks;
1813		i2c_set_adapdata(control, smu_i2c);
1814
1815		res = i2c_add_adapter(control);
1816		if (res) {
1817			DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1818			goto Out_err;
1819		}
1820	}
1821
1822	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1823	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1824
1825	return 0;
1826Out_err:
1827	for ( ; i >= 0; i--) {
1828		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1829		struct i2c_adapter *control = &smu_i2c->adapter;
1830
1831		i2c_del_adapter(control);
1832	}
1833	return res;
1834}
1835
1836static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
1837{
1838	struct amdgpu_device *adev = smu->adev;
1839	int i;
1840
1841	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1842		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1843		struct i2c_adapter *control = &smu_i2c->adapter;
1844
1845		i2c_del_adapter(control);
1846	}
1847	adev->pm.ras_eeprom_i2c_bus = NULL;
1848	adev->pm.fru_eeprom_i2c_bus = NULL;
1849}
1850
1851static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
1852{
1853	struct amdgpu_device *adev = smu->adev;
1854	struct smu_table_context *smu_table = &smu->smu_table;
1855	struct PPTable_t *pptable =
1856		(struct PPTable_t *)smu_table->driver_pptable;
1857
1858	adev->unique_id = pptable->PublicSerialNumber_AID;
1859	if (adev->serial[0] == '\0')
1860		snprintf(adev->serial, sizeof(adev->serial), "%016llx", adev->unique_id);
1861}
1862
1863static bool smu_v13_0_6_is_baco_supported(struct smu_context *smu)
1864{
1865	/* smu_13_0_6 does not support baco */
1866
1867	return false;
1868}
1869
1870static int smu_v13_0_6_set_df_cstate(struct smu_context *smu,
1871				     enum pp_df_cstate state)
1872{
1873	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl,
1874					       state, NULL);
1875}
1876
1877static int smu_v13_0_6_allow_xgmi_power_down(struct smu_context *smu, bool en)
1878{
1879	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GmiPwrDnControl,
1880					       en ? 0 : 1, NULL);
1881}
1882
1883static const char *const throttling_logging_label[] = {
1884	[THROTTLER_PROCHOT_BIT] = "Prochot",
1885	[THROTTLER_PPT_BIT] = "PPT",
1886	[THROTTLER_THERMAL_SOCKET_BIT] = "SOC",
1887	[THROTTLER_THERMAL_VR_BIT] = "VR",
1888	[THROTTLER_THERMAL_HBM_BIT] = "HBM"
1889};
1890
1891static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
1892{
1893	int throttler_idx, throtting_events = 0, buf_idx = 0;
1894	struct amdgpu_device *adev = smu->adev;
1895	uint32_t throttler_status;
1896	char log_buf[256];
1897
1898	throttler_status = smu_v13_0_6_get_throttler_status(smu);
1899	if (!throttler_status)
1900		return;
1901
1902	memset(log_buf, 0, sizeof(log_buf));
1903	for (throttler_idx = 0;
1904	     throttler_idx < ARRAY_SIZE(throttling_logging_label);
1905	     throttler_idx++) {
1906		if (throttler_status & (1U << throttler_idx)) {
1907			throtting_events++;
1908			buf_idx += snprintf(
1909				log_buf + buf_idx, sizeof(log_buf) - buf_idx,
1910				"%s%s", throtting_events > 1 ? " and " : "",
1911				throttling_logging_label[throttler_idx]);
1912			if (buf_idx >= sizeof(log_buf)) {
1913				dev_err(adev->dev, "buffer overflow!\n");
1914				log_buf[sizeof(log_buf) - 1] = '\0';
1915				break;
1916			}
1917		}
1918	}
1919
1920	dev_warn(adev->dev,
1921		 "WARN: GPU is throttled, expect performance decrease. %s.\n",
1922		 log_buf);
1923	kgd2kfd_smi_event_throttle(
1924		smu->adev->kfd.dev,
1925		smu_cmn_get_indep_throttler_status(throttler_status,
1926						   smu_v13_0_6_throttler_map));
1927}
1928
1929static int
1930smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
1931{
1932	struct amdgpu_device *adev = smu->adev;
1933
1934	return REG_GET_FIELD(RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL),
1935			     PCIE_LC_LINK_WIDTH_CNTL, LC_LINK_WIDTH_RD);
1936}
1937
1938static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
1939{
1940	struct amdgpu_device *adev = smu->adev;
1941	uint32_t speed_level;
1942	uint32_t esm_ctrl;
1943
1944	/* TODO: confirm this on real target */
1945	esm_ctrl = RREG32_PCIE(smnPCIE_ESM_CTRL);
1946	if ((esm_ctrl >> 15) & 0x1)
1947		return (((esm_ctrl >> 8) & 0x7F) + 128);
1948
1949	speed_level = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
1950		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
1951		>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
1952	if (speed_level > LINK_SPEED_MAX)
1953		speed_level = 0;
1954
1955	return pcie_gen_to_speed(speed_level + 1);
1956}
1957
1958static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
1959{
1960	struct smu_table_context *smu_table = &smu->smu_table;
1961	struct gpu_metrics_v1_3 *gpu_metrics =
1962		(struct gpu_metrics_v1_3 *)smu_table->gpu_metrics_table;
1963	struct amdgpu_device *adev = smu->adev;
1964	int ret = 0, inst0, xcc0;
1965	MetricsTable_t *metrics;
1966	u16 link_width_level;
1967
1968	inst0 = adev->sdma.instance[0].aid_id;
1969	xcc0 = GET_INST(GC, 0);
1970
1971	metrics = kzalloc(sizeof(MetricsTable_t), GFP_KERNEL);
1972	ret = smu_v13_0_6_get_metrics_table(smu, metrics, true);
1973	if (ret) {
1974		kfree(metrics);
1975		return ret;
1976	}
1977
1978	smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
1979
1980	gpu_metrics->temperature_hotspot =
1981		SMUQ10_TO_UINT(metrics->MaxSocketTemperature);
1982	/* Individual HBM stack temperature is not reported */
1983	gpu_metrics->temperature_mem =
1984		SMUQ10_TO_UINT(metrics->MaxHbmTemperature);
1985	/* Reports max temperature of all voltage rails */
1986	gpu_metrics->temperature_vrsoc =
1987		SMUQ10_TO_UINT(metrics->MaxVrTemperature);
1988
1989	gpu_metrics->average_gfx_activity =
1990		SMUQ10_TO_UINT(metrics->SocketGfxBusy);
1991	gpu_metrics->average_umc_activity =
1992		SMUQ10_TO_UINT(metrics->DramBandwidthUtilization);
1993
1994	gpu_metrics->average_socket_power =
1995		SMUQ10_TO_UINT(metrics->SocketPower);
1996	/* Energy counter reported in 15.259uJ (2^-16) units */
1997	gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
1998
1999	gpu_metrics->current_gfxclk =
2000		SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);
2001	gpu_metrics->current_socclk =
2002		SMUQ10_TO_UINT(metrics->SocclkFrequency[inst0]);
2003	gpu_metrics->current_uclk = SMUQ10_TO_UINT(metrics->UclkFrequency);
2004	gpu_metrics->current_vclk0 =
2005		SMUQ10_TO_UINT(metrics->VclkFrequency[inst0]);
2006	gpu_metrics->current_dclk0 =
2007		SMUQ10_TO_UINT(metrics->DclkFrequency[inst0]);
2008
2009	gpu_metrics->average_gfxclk_frequency = gpu_metrics->current_gfxclk;
2010	gpu_metrics->average_socclk_frequency = gpu_metrics->current_socclk;
2011	gpu_metrics->average_uclk_frequency = gpu_metrics->current_uclk;
2012	gpu_metrics->average_vclk0_frequency = gpu_metrics->current_vclk0;
2013	gpu_metrics->average_dclk0_frequency = gpu_metrics->current_dclk0;
2014
2015	/* Throttle status is not reported through metrics now */
2016	gpu_metrics->throttle_status = 0;
2017
2018	if (!(adev->flags & AMD_IS_APU)) {
2019		link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
2020		if (link_width_level > MAX_LINK_WIDTH)
2021			link_width_level = 0;
2022
2023		gpu_metrics->pcie_link_width =
2024			DECODE_LANE_WIDTH(link_width_level);
2025		gpu_metrics->pcie_link_speed =
2026			smu_v13_0_6_get_current_pcie_link_speed(smu);
2027	}
2028
2029	gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
2030
2031	gpu_metrics->gfx_activity_acc =
2032		SMUQ10_TO_UINT(metrics->SocketGfxBusyAcc);
2033	gpu_metrics->mem_activity_acc =
2034		SMUQ10_TO_UINT(metrics->DramBandwidthUtilizationAcc);
2035
2036	gpu_metrics->firmware_timestamp = metrics->Timestamp;
2037
2038	*table = (void *)gpu_metrics;
2039	kfree(metrics);
2040
2041	return sizeof(struct gpu_metrics_v1_3);
2042}
2043
2044static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
2045{
2046	int ret = 0, index;
2047	struct amdgpu_device *adev = smu->adev;
2048	int timeout = 10;
2049
2050	index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2051					       SMU_MSG_GfxDeviceDriverReset);
2052
2053	mutex_lock(&smu->message_lock);
2054
2055	ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
2056					       SMU_RESET_MODE_2);
2057
2058	/* This is similar to FLR, wait till max FLR timeout */
2059	drm_msleep(100);
2060
2061	dev_dbg(smu->adev->dev, "restore config space...\n");
2062	/* Restore the config space saved during init */
2063	amdgpu_device_load_pci_state(adev->pdev);
2064
2065	dev_dbg(smu->adev->dev, "wait for reset ack\n");
2066	do {
2067		ret = smu_cmn_wait_for_response(smu);
2068		/* Wait a bit more time for getting ACK */
2069		if (ret == -ETIME) {
2070			--timeout;
2071			usleep_range(500, 1000);
2072			continue;
2073		}
2074
2075		if (ret) {
2076			dev_err(adev->dev,
2077				"failed to send mode2 message \tparam: 0x%08x error code %d\n",
2078				SMU_RESET_MODE_2, ret);
2079			goto out;
2080		}
2081	} while (ret == -ETIME && timeout);
2082
2083out:
2084	mutex_unlock(&smu->message_lock);
2085
2086	return ret;
2087}
2088
2089static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
2090						     struct smu_temperature_range *range)
2091{
2092	struct amdgpu_device *adev = smu->adev;
2093	u32 aid_temp, xcd_temp, mem_temp;
2094	uint32_t smu_version;
2095	u32 ccd_temp = 0;
2096	int ret;
2097
2098	if (amdgpu_sriov_vf(smu->adev))
2099		return 0;
2100
2101	if (!range)
2102		return -EINVAL;
2103
2104	/*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
2105	smu_cmn_get_smc_version(smu, NULL, &smu_version);
2106	if (smu_version < 0x554500)
2107		return 0;
2108
2109	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2110					      PPSMC_AID_THM_TYPE, &aid_temp);
2111	if (ret)
2112		goto failed;
2113
2114	if (adev->flags & AMD_IS_APU) {
2115		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2116						      PPSMC_CCD_THM_TYPE, &ccd_temp);
2117		if (ret)
2118			goto failed;
2119	}
2120
2121	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2122					      PPSMC_XCD_THM_TYPE, &xcd_temp);
2123	if (ret)
2124		goto failed;
2125
2126	range->hotspot_crit_max = max3(aid_temp, xcd_temp, ccd_temp) *
2127				       SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2128	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2129					      PPSMC_HBM_THM_TYPE, &mem_temp);
2130	if (ret)
2131		goto failed;
2132
2133	range->mem_crit_max = mem_temp * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
2134failed:
2135	return ret;
2136}
2137
2138static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
2139{
2140	struct amdgpu_device *adev = smu->adev;
2141	struct amdgpu_ras *ras;
2142	u32 fatal_err, param;
2143	int ret = 0;
2144
2145	ras = amdgpu_ras_get_context(adev);
2146	fatal_err = 0;
2147	param = SMU_RESET_MODE_1;
2148
2149	/* fatal error triggered by ras, PMFW supports the flag */
2150	if (ras && atomic_read(&ras->in_recovery))
2151		fatal_err = 1;
2152
2153	param |= (fatal_err << 16);
2154	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
2155					      param, NULL);
2156
2157	if (!ret)
2158		drm_msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
2159
2160	return ret;
2161}
2162
2163static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
2164{
2165	return true;
2166}
2167
2168static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
2169{
2170	return true;
2171}
2172
2173static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
2174						 uint32_t size)
2175{
2176	int ret = 0;
2177
2178	/* message SMU to update the bad page number on SMUBUS */
2179	ret = smu_cmn_send_smc_msg_with_param(
2180		smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
2181	if (ret)
2182		dev_err(smu->adev->dev,
2183			"[%s] failed to message SMU to update HBM bad pages number\n",
2184			__func__);
2185
2186	return ret;
2187}
2188
2189static const struct pptable_funcs smu_v13_0_6_ppt_funcs = {
2190	/* init dpm */
2191	.get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask,
2192	/* dpm/clk tables */
2193	.set_default_dpm_table = smu_v13_0_6_set_default_dpm_table,
2194	.populate_umd_state_clk = smu_v13_0_6_populate_umd_state_clk,
2195	.print_clk_levels = smu_v13_0_6_print_clk_levels,
2196	.force_clk_levels = smu_v13_0_6_force_clk_levels,
2197	.read_sensor = smu_v13_0_6_read_sensor,
2198	.set_performance_level = smu_v13_0_6_set_performance_level,
2199	.get_power_limit = smu_v13_0_6_get_power_limit,
2200	.is_dpm_running = smu_v13_0_6_is_dpm_running,
2201	.get_unique_id = smu_v13_0_6_get_unique_id,
2202	.init_smc_tables = smu_v13_0_6_init_smc_tables,
2203	.fini_smc_tables = smu_v13_0_fini_smc_tables,
2204	.init_power = smu_v13_0_init_power,
2205	.fini_power = smu_v13_0_fini_power,
2206	.check_fw_status = smu_v13_0_6_check_fw_status,
2207	/* pptable related */
2208	.check_fw_version = smu_v13_0_check_fw_version,
2209	.set_driver_table_location = smu_v13_0_set_driver_table_location,
2210	.set_tool_table_location = smu_v13_0_set_tool_table_location,
2211	.notify_memory_pool_location = smu_v13_0_notify_memory_pool_location,
2212	.system_features_control = smu_v13_0_6_system_features_control,
2213	.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2214	.send_smc_msg = smu_cmn_send_smc_msg,
2215	.get_enabled_mask = smu_v13_0_6_get_enabled_mask,
2216	.feature_is_enabled = smu_cmn_feature_is_enabled,
2217	.set_power_limit = smu_v13_0_6_set_power_limit,
2218	.set_xgmi_pstate = smu_v13_0_set_xgmi_pstate,
2219	.register_irq_handler = smu_v13_0_6_register_irq_handler,
2220	.enable_thermal_alert = smu_v13_0_enable_thermal_alert,
2221	.disable_thermal_alert = smu_v13_0_disable_thermal_alert,
2222	.setup_pptable = smu_v13_0_6_setup_pptable,
2223	.baco_is_support = smu_v13_0_6_is_baco_supported,
2224	.get_dpm_ultimate_freq = smu_v13_0_6_get_dpm_ultimate_freq,
2225	.set_soft_freq_limited_range = smu_v13_0_6_set_soft_freq_limited_range,
2226	.od_edit_dpm_table = smu_v13_0_6_usr_edit_dpm_table,
2227	.set_df_cstate = smu_v13_0_6_set_df_cstate,
2228	.allow_xgmi_power_down = smu_v13_0_6_allow_xgmi_power_down,
2229	.log_thermal_throttling_event = smu_v13_0_6_log_thermal_throttling_event,
2230	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2231	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
2232	.get_gpu_metrics = smu_v13_0_6_get_gpu_metrics,
2233	.get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range,
2234	.mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported,
2235	.mode2_reset_is_support = smu_v13_0_6_is_mode2_reset_supported,
2236	.mode1_reset = smu_v13_0_6_mode1_reset,
2237	.mode2_reset = smu_v13_0_6_mode2_reset,
2238	.wait_for_event = smu_v13_0_wait_for_event,
2239	.i2c_init = smu_v13_0_6_i2c_control_init,
2240	.i2c_fini = smu_v13_0_6_i2c_control_fini,
2241	.send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num,
2242};
2243
2244void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
2245{
2246	smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
2247	smu->message_map = smu_v13_0_6_message_map;
2248	smu->clock_map = smu_v13_0_6_clk_map;
2249	smu->feature_map = smu_v13_0_6_feature_mask_map;
2250	smu->table_map = smu_v13_0_6_table_map;
2251	smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
2252	smu_v13_0_set_smu_mailbox_registers(smu);
2253}
2254