1/*
2 * Copyright 2023 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#define SWSMU_CODE_LAYER_L2
25
26#include <linux/firmware.h>
27#include <linux/pci.h>
28#include <linux/i2c.h>
29#include "amdgpu.h"
30#include "amdgpu_smu.h"
31#include "atomfirmware.h"
32#include "amdgpu_atomfirmware.h"
33#include "amdgpu_atombios.h"
34#include "smu_v14_0.h"
35#include "smu14_driver_if_v14_0.h"
36#include "soc15_common.h"
37#include "atom.h"
38#include "smu_v14_0_2_ppt.h"
39#include "smu_v14_0_2_pptable.h"
40#include "smu_v14_0_2_ppsmc.h"
41#include "mp/mp_14_0_2_offset.h"
42#include "mp/mp_14_0_2_sh_mask.h"
43
44#include "smu_cmn.h"
45#include "amdgpu_ras.h"
46
47/*
48 * DO NOT use these for err/warn/info/debug messages.
49 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
50 * They are more MGPU friendly.
51 */
52#undef pr_err
53#undef pr_warn
54#undef pr_info
55#undef pr_debug
56
57#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
58
59#define FEATURE_MASK(feature) (1ULL << feature)
60#define SMC_DPM_FEATURE ( \
61	FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT)     | \
62	FEATURE_MASK(FEATURE_DPM_UCLK_BIT)	 | \
63	FEATURE_MASK(FEATURE_DPM_LINK_BIT)       | \
64	FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT)     | \
65	FEATURE_MASK(FEATURE_DPM_FCLK_BIT))
66
67#define MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE	0x4000
68
69static struct cmn2asic_msg_mapping smu_v14_0_2_message_map[SMU_MSG_MAX_COUNT] = {
70	MSG_MAP(TestMessage,			PPSMC_MSG_TestMessage,                 1),
71	MSG_MAP(GetSmuVersion,			PPSMC_MSG_GetSmuVersion,               1),
72	MSG_MAP(GetDriverIfVersion,		PPSMC_MSG_GetDriverIfVersion,          1),
73	MSG_MAP(SetAllowedFeaturesMaskLow,	PPSMC_MSG_SetAllowedFeaturesMaskLow,   0),
74	MSG_MAP(SetAllowedFeaturesMaskHigh,	PPSMC_MSG_SetAllowedFeaturesMaskHigh,  0),
75	MSG_MAP(EnableAllSmuFeatures,		PPSMC_MSG_EnableAllSmuFeatures,        0),
76	MSG_MAP(DisableAllSmuFeatures,		PPSMC_MSG_DisableAllSmuFeatures,       0),
77	MSG_MAP(EnableSmuFeaturesLow,		PPSMC_MSG_EnableSmuFeaturesLow,        1),
78	MSG_MAP(EnableSmuFeaturesHigh,		PPSMC_MSG_EnableSmuFeaturesHigh,       1),
79	MSG_MAP(DisableSmuFeaturesLow,		PPSMC_MSG_DisableSmuFeaturesLow,       1),
80	MSG_MAP(DisableSmuFeaturesHigh,		PPSMC_MSG_DisableSmuFeaturesHigh,      1),
81	MSG_MAP(GetEnabledSmuFeaturesLow,       PPSMC_MSG_GetRunningSmuFeaturesLow,    1),
82	MSG_MAP(GetEnabledSmuFeaturesHigh,	PPSMC_MSG_GetRunningSmuFeaturesHigh,   1),
83	MSG_MAP(SetWorkloadMask,		PPSMC_MSG_SetWorkloadMask,             1),
84	MSG_MAP(SetPptLimit,			PPSMC_MSG_SetPptLimit,                 0),
85	MSG_MAP(SetDriverDramAddrHigh,		PPSMC_MSG_SetDriverDramAddrHigh,       1),
86	MSG_MAP(SetDriverDramAddrLow,		PPSMC_MSG_SetDriverDramAddrLow,        1),
87	MSG_MAP(SetToolsDramAddrHigh,		PPSMC_MSG_SetToolsDramAddrHigh,        0),
88	MSG_MAP(SetToolsDramAddrLow,		PPSMC_MSG_SetToolsDramAddrLow,         0),
89	MSG_MAP(TransferTableSmu2Dram,		PPSMC_MSG_TransferTableSmu2Dram,       1),
90	MSG_MAP(TransferTableDram2Smu,		PPSMC_MSG_TransferTableDram2Smu,       0),
91	MSG_MAP(UseDefaultPPTable,		PPSMC_MSG_UseDefaultPPTable,           0),
92	MSG_MAP(RunDcBtc,			PPSMC_MSG_RunDcBtc,                    0),
93	MSG_MAP(EnterBaco,			PPSMC_MSG_EnterBaco,                   0),
94	MSG_MAP(ExitBaco,			PPSMC_MSG_ExitBaco,                    0),
95	MSG_MAP(SetSoftMinByFreq,		PPSMC_MSG_SetSoftMinByFreq,            1),
96	MSG_MAP(SetSoftMaxByFreq,		PPSMC_MSG_SetSoftMaxByFreq,            1),
97	MSG_MAP(SetHardMinByFreq,		PPSMC_MSG_SetHardMinByFreq,            1),
98	MSG_MAP(SetHardMaxByFreq,		PPSMC_MSG_SetHardMaxByFreq,            0),
99	MSG_MAP(GetMinDpmFreq,			PPSMC_MSG_GetMinDpmFreq,               1),
100	MSG_MAP(GetMaxDpmFreq,			PPSMC_MSG_GetMaxDpmFreq,               1),
101	MSG_MAP(GetDpmFreqByIndex,		PPSMC_MSG_GetDpmFreqByIndex,           1),
102	MSG_MAP(PowerUpVcn,			PPSMC_MSG_PowerUpVcn,                  0),
103	MSG_MAP(PowerDownVcn,			PPSMC_MSG_PowerDownVcn,                0),
104	MSG_MAP(PowerUpJpeg,			PPSMC_MSG_PowerUpJpeg,                 0),
105	MSG_MAP(PowerDownJpeg,			PPSMC_MSG_PowerDownJpeg,               0),
106	MSG_MAP(GetDcModeMaxDpmFreq,		PPSMC_MSG_GetDcModeMaxDpmFreq,         1),
107	MSG_MAP(OverridePcieParameters,		PPSMC_MSG_OverridePcieParameters,      0),
108	MSG_MAP(DramLogSetDramAddrHigh,		PPSMC_MSG_DramLogSetDramAddrHigh,      0),
109	MSG_MAP(DramLogSetDramAddrLow,		PPSMC_MSG_DramLogSetDramAddrLow,       0),
110	MSG_MAP(DramLogSetDramSize,		PPSMC_MSG_DramLogSetDramSize,          0),
111	MSG_MAP(AllowGfxOff,			PPSMC_MSG_AllowGfxOff,                 0),
112	MSG_MAP(DisallowGfxOff,			PPSMC_MSG_DisallowGfxOff,              0),
113	MSG_MAP(SetMGpuFanBoostLimitRpm,	PPSMC_MSG_SetMGpuFanBoostLimitRpm,     0),
114	MSG_MAP(GetPptLimit,			PPSMC_MSG_GetPptLimit,                 0),
115	MSG_MAP(NotifyPowerSource,		PPSMC_MSG_NotifyPowerSource,           0),
116	MSG_MAP(Mode1Reset,			PPSMC_MSG_Mode1Reset,                  0),
117	MSG_MAP(PrepareMp1ForUnload,		PPSMC_MSG_PrepareMp1ForUnload,         0),
118	MSG_MAP(DFCstateControl,		PPSMC_MSG_SetExternalClientDfCstateAllow, 0),
119	MSG_MAP(ArmD3,				PPSMC_MSG_ArmD3,                       0),
120	MSG_MAP(SetNumBadMemoryPagesRetired,	PPSMC_MSG_SetNumBadMemoryPagesRetired,   0),
121	MSG_MAP(SetBadMemoryPagesRetiredFlagsPerChannel,
122			    PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,   0),
123	MSG_MAP(AllowIHHostInterrupt,		PPSMC_MSG_AllowIHHostInterrupt,       0),
124	MSG_MAP(ReenableAcDcInterrupt,		PPSMC_MSG_ReenableAcDcInterrupt,       0),
125};
126
127static struct cmn2asic_mapping smu_v14_0_2_clk_map[SMU_CLK_COUNT] = {
128	CLK_MAP(GFXCLK,		PPCLK_GFXCLK),
129	CLK_MAP(SCLK,		PPCLK_GFXCLK),
130	CLK_MAP(SOCCLK,		PPCLK_SOCCLK),
131	CLK_MAP(FCLK,		PPCLK_FCLK),
132	CLK_MAP(UCLK,		PPCLK_UCLK),
133	CLK_MAP(MCLK,		PPCLK_UCLK),
134	CLK_MAP(VCLK,		PPCLK_VCLK_0),
135	CLK_MAP(DCLK,		PPCLK_DCLK_0),
136};
137
138static struct cmn2asic_mapping smu_v14_0_2_feature_mask_map[SMU_FEATURE_COUNT] = {
139	FEA_MAP(FW_DATA_READ),
140	FEA_MAP(DPM_GFXCLK),
141	FEA_MAP(DPM_GFX_POWER_OPTIMIZER),
142	FEA_MAP(DPM_UCLK),
143	FEA_MAP(DPM_FCLK),
144	FEA_MAP(DPM_SOCCLK),
145	FEA_MAP(DPM_LINK),
146	FEA_MAP(DPM_DCN),
147	FEA_MAP(VMEMP_SCALING),
148	FEA_MAP(VDDIO_MEM_SCALING),
149	FEA_MAP(DS_GFXCLK),
150	FEA_MAP(DS_SOCCLK),
151	FEA_MAP(DS_FCLK),
152	FEA_MAP(DS_LCLK),
153	FEA_MAP(DS_DCFCLK),
154	FEA_MAP(DS_UCLK),
155	FEA_MAP(GFX_ULV),
156	FEA_MAP(FW_DSTATE),
157	FEA_MAP(GFXOFF),
158	FEA_MAP(BACO),
159	FEA_MAP(MM_DPM),
160	FEA_MAP(SOC_MPCLK_DS),
161	FEA_MAP(BACO_MPCLK_DS),
162	FEA_MAP(THROTTLERS),
163	FEA_MAP(SMARTSHIFT),
164	FEA_MAP(GTHR),
165	FEA_MAP(ACDC),
166	FEA_MAP(VR0HOT),
167	FEA_MAP(FW_CTF),
168	FEA_MAP(FAN_CONTROL),
169	FEA_MAP(GFX_DCS),
170	FEA_MAP(GFX_READ_MARGIN),
171	FEA_MAP(LED_DISPLAY),
172	FEA_MAP(GFXCLK_SPREAD_SPECTRUM),
173	FEA_MAP(OUT_OF_BAND_MONITOR),
174	FEA_MAP(OPTIMIZED_VMIN),
175	FEA_MAP(GFX_IMU),
176	FEA_MAP(BOOT_TIME_CAL),
177	FEA_MAP(GFX_PCC_DFLL),
178	FEA_MAP(SOC_CG),
179	FEA_MAP(DF_CSTATE),
180	FEA_MAP(GFX_EDC),
181	FEA_MAP(BOOT_POWER_OPT),
182	FEA_MAP(CLOCK_POWER_DOWN_BYPASS),
183	FEA_MAP(DS_VCN),
184	FEA_MAP(BACO_CG),
185	FEA_MAP(MEM_TEMP_READ),
186	FEA_MAP(ATHUB_MMHUB_PG),
187	FEA_MAP(SOC_PCC),
188	[SMU_FEATURE_DPM_VCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
189	[SMU_FEATURE_DPM_DCLK_BIT] = {1, FEATURE_MM_DPM_BIT},
190	[SMU_FEATURE_PPT_BIT] = {1, FEATURE_THROTTLERS_BIT},
191};
192
193static struct cmn2asic_mapping smu_v14_0_2_table_map[SMU_TABLE_COUNT] = {
194	TAB_MAP(PPTABLE),
195	TAB_MAP(WATERMARKS),
196	TAB_MAP(AVFS_PSM_DEBUG),
197	TAB_MAP(PMSTATUSLOG),
198	TAB_MAP(SMU_METRICS),
199	TAB_MAP(DRIVER_SMU_CONFIG),
200	TAB_MAP(ACTIVITY_MONITOR_COEFF),
201	[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
202	TAB_MAP(I2C_COMMANDS),
203	TAB_MAP(ECCINFO),
204};
205
206static struct cmn2asic_mapping smu_v14_0_2_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
207	PWR_MAP(AC),
208	PWR_MAP(DC),
209};
210
211static struct cmn2asic_mapping smu_v14_0_2_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
212	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT,	WORKLOAD_PPLIB_DEFAULT_BIT),
213	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D,		WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
214	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_POWERSAVING,		WORKLOAD_PPLIB_POWER_SAVING_BIT),
215	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO,		WORKLOAD_PPLIB_VIDEO_BIT),
216	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR,			WORKLOAD_PPLIB_VR_BIT),
217	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE,		WORKLOAD_PPLIB_COMPUTE_BIT),
218	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM,		WORKLOAD_PPLIB_CUSTOM_BIT),
219	WORKLOAD_MAP(PP_SMC_POWER_PROFILE_WINDOW3D,		WORKLOAD_PPLIB_WINDOW_3D_BIT),
220};
221
222#if 0
223static const uint8_t smu_v14_0_2_throttler_map[] = {
224	[THROTTLER_PPT0_BIT]		= (SMU_THROTTLER_PPT0_BIT),
225	[THROTTLER_PPT1_BIT]		= (SMU_THROTTLER_PPT1_BIT),
226	[THROTTLER_PPT2_BIT]		= (SMU_THROTTLER_PPT2_BIT),
227	[THROTTLER_PPT3_BIT]		= (SMU_THROTTLER_PPT3_BIT),
228	[THROTTLER_TDC_GFX_BIT]		= (SMU_THROTTLER_TDC_GFX_BIT),
229	[THROTTLER_TDC_SOC_BIT]		= (SMU_THROTTLER_TDC_SOC_BIT),
230	[THROTTLER_TEMP_EDGE_BIT]	= (SMU_THROTTLER_TEMP_EDGE_BIT),
231	[THROTTLER_TEMP_HOTSPOT_BIT]	= (SMU_THROTTLER_TEMP_HOTSPOT_BIT),
232	[THROTTLER_TEMP_MEM_BIT]	= (SMU_THROTTLER_TEMP_MEM_BIT),
233	[THROTTLER_TEMP_VR_GFX_BIT]	= (SMU_THROTTLER_TEMP_VR_GFX_BIT),
234	[THROTTLER_TEMP_VR_SOC_BIT]	= (SMU_THROTTLER_TEMP_VR_SOC_BIT),
235	[THROTTLER_TEMP_VR_MEM0_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM0_BIT),
236	[THROTTLER_TEMP_VR_MEM1_BIT]	= (SMU_THROTTLER_TEMP_VR_MEM1_BIT),
237	[THROTTLER_TEMP_LIQUID0_BIT]	= (SMU_THROTTLER_TEMP_LIQUID0_BIT),
238	[THROTTLER_TEMP_LIQUID1_BIT]	= (SMU_THROTTLER_TEMP_LIQUID1_BIT),
239	[THROTTLER_GFX_APCC_PLUS_BIT]	= (SMU_THROTTLER_APCC_BIT),
240	[THROTTLER_FIT_BIT]		= (SMU_THROTTLER_FIT_BIT),
241};
242#endif
243
244static int
245smu_v14_0_2_get_allowed_feature_mask(struct smu_context *smu,
246				  uint32_t *feature_mask, uint32_t num)
247{
248	struct amdgpu_device *adev = smu->adev;
249	/*u32 smu_version;*/
250
251	if (num > 2)
252		return -EINVAL;
253
254	memset(feature_mask, 0xff, sizeof(uint32_t) * num);
255
256	if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
257		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
258		*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
259	}
260#if 0
261	if (!(adev->pg_flags & AMD_PG_SUPPORT_ATHUB) ||
262	    !(adev->pg_flags & AMD_PG_SUPPORT_MMHUB))
263		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_ATHUB_MMHUB_PG_BIT);
264
265	if (!(adev->pm.pp_feature & PP_SOCCLK_DPM_MASK))
266		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT);
267
268	/* PMFW 78.58 contains a critical fix for gfxoff feature */
269	smu_cmn_get_smc_version(smu, NULL, &smu_version);
270	if ((smu_version < 0x004e3a00) ||
271	     !(adev->pm.pp_feature & PP_GFXOFF_MASK))
272		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFXOFF_BIT);
273
274	if (!(adev->pm.pp_feature & PP_MCLK_DPM_MASK)) {
275		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
276		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VMEMP_SCALING_BIT);
277		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_VDDIO_MEM_SCALING_BIT);
278	}
279
280	if (!(adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK))
281		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
282
283	if (!(adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
284		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DPM_LINK_BIT);
285		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_DS_LCLK_BIT);
286	}
287
288	if (!(adev->pm.pp_feature & PP_ULV_MASK))
289		*(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_GFX_ULV_BIT);
290#endif
291
292	return 0;
293}
294
295static int smu_v14_0_2_check_powerplay_table(struct smu_context *smu)
296{
297	struct smu_table_context *table_context = &smu->smu_table;
298	struct smu_14_0_2_powerplay_table *powerplay_table =
299		table_context->power_play_table;
300	struct smu_baco_context *smu_baco = &smu->smu_baco;
301	PPTable_t *pptable = smu->smu_table.driver_pptable;
302	const OverDriveLimits_t * const overdrive_upperlimits =
303				&pptable->SkuTable.OverDriveLimitsBasicMax;
304	const OverDriveLimits_t * const overdrive_lowerlimits =
305				&pptable->SkuTable.OverDriveLimitsBasicMin;
306
307	if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_HARDWAREDC)
308		smu->dc_controlled_by_gpio = true;
309
310	if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_BACO) {
311		smu_baco->platform_support = true;
312
313		if (powerplay_table->platform_caps & SMU_14_0_2_PP_PLATFORM_CAP_MACO)
314			smu_baco->maco_support = true;
315	}
316
317	if (!overdrive_lowerlimits->FeatureCtrlMask ||
318	    !overdrive_upperlimits->FeatureCtrlMask)
319		smu->od_enabled = false;
320
321	table_context->thermal_controller_type =
322		powerplay_table->thermal_controller_type;
323
324	/*
325	 * Instead of having its own buffer space and get overdrive_table copied,
326	 * smu->od_settings just points to the actual overdrive_table
327	 */
328	smu->od_settings = &powerplay_table->overdrive_table;
329
330	smu->adev->pm.no_fan =
331		!(pptable->PFE_Settings.FeaturesToRun[0] & (1 << FEATURE_FAN_CONTROL_BIT));
332
333	return 0;
334}
335
336static int smu_v14_0_2_store_powerplay_table(struct smu_context *smu)
337{
338	struct smu_table_context *table_context = &smu->smu_table;
339	struct smu_14_0_2_powerplay_table *powerplay_table =
340		table_context->power_play_table;
341
342	memcpy(table_context->driver_pptable, &powerplay_table->smc_pptable,
343	       sizeof(PPTable_t));
344
345	return 0;
346}
347
348#ifndef atom_smc_dpm_info_table_14_0_0
349struct atom_smc_dpm_info_table_14_0_0 {
350	struct atom_common_table_header table_header;
351	BoardTable_t BoardTable;
352};
353#endif
354
355static int smu_v14_0_2_append_powerplay_table(struct smu_context *smu)
356{
357	struct smu_table_context *table_context = &smu->smu_table;
358	PPTable_t *smc_pptable = table_context->driver_pptable;
359	struct atom_smc_dpm_info_table_14_0_0 *smc_dpm_table;
360	BoardTable_t *BoardTable = &smc_pptable->BoardTable;
361	int index, ret;
362
363	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
364					    smc_dpm_info);
365
366	ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
367					     (uint8_t **)&smc_dpm_table);
368	if (ret)
369		return ret;
370
371	memcpy(BoardTable, &smc_dpm_table->BoardTable, sizeof(BoardTable_t));
372
373	return 0;
374}
375
376#if 0
377static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
378					     void **table,
379					     uint32_t *size)
380{
381	struct smu_table_context *smu_table = &smu->smu_table;
382	void *combo_pptable = smu_table->combo_pptable;
383	int ret = 0;
384
385	ret = smu_cmn_get_combo_pptable(smu);
386	if (ret)
387		return ret;
388
389	*table = combo_pptable;
390	*size = sizeof(struct smu_14_0_powerplay_table);
391
392	return 0;
393}
394#endif
395
396static int smu_v14_0_2_get_pptable_from_pmfw(struct smu_context *smu,
397					     void **table,
398					     uint32_t *size)
399{
400	struct smu_table_context *smu_table = &smu->smu_table;
401	void *combo_pptable = smu_table->combo_pptable;
402	int ret = 0;
403
404	ret = smu_cmn_get_combo_pptable(smu);
405	if (ret)
406		return ret;
407
408	*table = combo_pptable;
409	*size = sizeof(struct smu_14_0_2_powerplay_table);
410
411	return 0;
412}
413
414static int smu_v14_0_2_setup_pptable(struct smu_context *smu)
415{
416	struct smu_table_context *smu_table = &smu->smu_table;
417	struct amdgpu_device *adev = smu->adev;
418	int ret = 0;
419
420	if (amdgpu_sriov_vf(smu->adev))
421		return 0;
422
423	if (!adev->scpm_enabled)
424		ret = smu_v14_0_setup_pptable(smu);
425	else
426		ret = smu_v14_0_2_get_pptable_from_pmfw(smu,
427							&smu_table->power_play_table,
428							&smu_table->power_play_table_size);
429	if (ret)
430		return ret;
431
432	ret = smu_v14_0_2_store_powerplay_table(smu);
433	if (ret)
434		return ret;
435
436	/*
437	 * With SCPM enabled, the operation below will be handled
438	 * by PSP. Driver involvment is unnecessary and useless.
439	 */
440	if (!adev->scpm_enabled) {
441		ret = smu_v14_0_2_append_powerplay_table(smu);
442		if (ret)
443			return ret;
444	}
445
446	ret = smu_v14_0_2_check_powerplay_table(smu);
447	if (ret)
448		return ret;
449
450	return ret;
451}
452
453static int smu_v14_0_2_tables_init(struct smu_context *smu)
454{
455	struct smu_table_context *smu_table = &smu->smu_table;
456	struct smu_table *tables = smu_table->tables;
457
458	SMU_TABLE_INIT(tables, SMU_TABLE_PPTABLE, sizeof(PPTable_t),
459		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
460	SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
461		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
462	SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetricsExternal_t),
463		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
464	SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t),
465		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
466	SMU_TABLE_INIT(tables, SMU_TABLE_OVERDRIVE, sizeof(OverDriveTable_t),
467		       PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
468	SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
469		       sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
470		       AMDGPU_GEM_DOMAIN_VRAM);
471	SMU_TABLE_INIT(tables, SMU_TABLE_COMBO_PPTABLE, MP0_MP1_DATA_REGION_SIZE_COMBOPPTABLE,
472			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
473	SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
474			PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
475
476	smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
477	if (!smu_table->metrics_table)
478		goto err0_out;
479	smu_table->metrics_time = 0;
480
481	smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v1_3);
482	smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
483	if (!smu_table->gpu_metrics_table)
484		goto err1_out;
485
486	smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
487	if (!smu_table->watermarks_table)
488		goto err2_out;
489
490	smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
491	if (!smu_table->ecc_table)
492		goto err3_out;
493
494	return 0;
495
496err3_out:
497	kfree(smu_table->watermarks_table);
498err2_out:
499	kfree(smu_table->gpu_metrics_table);
500err1_out:
501	kfree(smu_table->metrics_table);
502err0_out:
503	return -ENOMEM;
504}
505
506static int smu_v14_0_2_allocate_dpm_context(struct smu_context *smu)
507{
508	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
509
510	smu_dpm->dpm_context = kzalloc(sizeof(struct smu_14_0_dpm_context),
511				       GFP_KERNEL);
512	if (!smu_dpm->dpm_context)
513		return -ENOMEM;
514
515	smu_dpm->dpm_context_size = sizeof(struct smu_14_0_dpm_context);
516
517	return 0;
518}
519
520static int smu_v14_0_2_init_smc_tables(struct smu_context *smu)
521{
522	int ret = 0;
523
524	ret = smu_v14_0_2_tables_init(smu);
525	if (ret)
526		return ret;
527
528	ret = smu_v14_0_2_allocate_dpm_context(smu);
529	if (ret)
530		return ret;
531
532	return smu_v14_0_init_smc_tables(smu);
533}
534
535static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu)
536{
537	struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
538	struct smu_table_context *table_context = &smu->smu_table;
539	PPTable_t *pptable = table_context->driver_pptable;
540	SkuTable_t *skutable = &pptable->SkuTable;
541	struct smu_14_0_dpm_table *dpm_table;
542	struct smu_14_0_pcie_table *pcie_table;
543	uint32_t link_level;
544	int ret = 0;
545
546	/* socclk dpm table setup */
547	dpm_table = &dpm_context->dpm_tables.soc_table;
548	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
549		ret = smu_v14_0_set_single_dpm_table(smu,
550						     SMU_SOCCLK,
551						     dpm_table);
552		if (ret)
553			return ret;
554	} else {
555		dpm_table->count = 1;
556		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
557		dpm_table->dpm_levels[0].enabled = true;
558		dpm_table->min = dpm_table->dpm_levels[0].value;
559		dpm_table->max = dpm_table->dpm_levels[0].value;
560	}
561
562	/* gfxclk dpm table setup */
563	dpm_table = &dpm_context->dpm_tables.gfx_table;
564	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
565		ret = smu_v14_0_set_single_dpm_table(smu,
566						     SMU_GFXCLK,
567						     dpm_table);
568		if (ret)
569			return ret;
570
571		/*
572		 * Update the reported maximum shader clock to the value
573		 * which can be guarded to be achieved on all cards. This
574		 * is aligned with Window setting. And considering that value
575		 * might be not the peak frequency the card can achieve, it
576		 * is normal some real-time clock frequency can overtake this
577		 * labelled maximum clock frequency(for example in pp_dpm_sclk
578		 * sysfs output).
579		 */
580		if (skutable->DriverReportedClocks.GameClockAc &&
581		    (dpm_table->dpm_levels[dpm_table->count - 1].value >
582		    skutable->DriverReportedClocks.GameClockAc)) {
583			dpm_table->dpm_levels[dpm_table->count - 1].value =
584				skutable->DriverReportedClocks.GameClockAc;
585			dpm_table->max = skutable->DriverReportedClocks.GameClockAc;
586		}
587	} else {
588		dpm_table->count = 1;
589		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
590		dpm_table->dpm_levels[0].enabled = true;
591		dpm_table->min = dpm_table->dpm_levels[0].value;
592		dpm_table->max = dpm_table->dpm_levels[0].value;
593	}
594
595	/* uclk dpm table setup */
596	dpm_table = &dpm_context->dpm_tables.uclk_table;
597	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
598		ret = smu_v14_0_set_single_dpm_table(smu,
599						     SMU_UCLK,
600						     dpm_table);
601		if (ret)
602			return ret;
603	} else {
604		dpm_table->count = 1;
605		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
606		dpm_table->dpm_levels[0].enabled = true;
607		dpm_table->min = dpm_table->dpm_levels[0].value;
608		dpm_table->max = dpm_table->dpm_levels[0].value;
609	}
610
611	/* fclk dpm table setup */
612	dpm_table = &dpm_context->dpm_tables.fclk_table;
613	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
614		ret = smu_v14_0_set_single_dpm_table(smu,
615						     SMU_FCLK,
616						     dpm_table);
617		if (ret)
618			return ret;
619	} else {
620		dpm_table->count = 1;
621		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
622		dpm_table->dpm_levels[0].enabled = true;
623		dpm_table->min = dpm_table->dpm_levels[0].value;
624		dpm_table->max = dpm_table->dpm_levels[0].value;
625	}
626
627	/* vclk dpm table setup */
628	dpm_table = &dpm_context->dpm_tables.vclk_table;
629	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_VCLK_BIT)) {
630		ret = smu_v14_0_set_single_dpm_table(smu,
631						     SMU_VCLK,
632						     dpm_table);
633		if (ret)
634			return ret;
635	} else {
636		dpm_table->count = 1;
637		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
638		dpm_table->dpm_levels[0].enabled = true;
639		dpm_table->min = dpm_table->dpm_levels[0].value;
640		dpm_table->max = dpm_table->dpm_levels[0].value;
641	}
642
643	/* dclk dpm table setup */
644	dpm_table = &dpm_context->dpm_tables.dclk_table;
645	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCLK_BIT)) {
646		ret = smu_v14_0_set_single_dpm_table(smu,
647						     SMU_DCLK,
648						     dpm_table);
649		if (ret)
650			return ret;
651	} else {
652		dpm_table->count = 1;
653		dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
654		dpm_table->dpm_levels[0].enabled = true;
655		dpm_table->min = dpm_table->dpm_levels[0].value;
656		dpm_table->max = dpm_table->dpm_levels[0].value;
657	}
658
659	/* lclk dpm table setup */
660	pcie_table = &dpm_context->dpm_tables.pcie_table;
661	pcie_table->num_of_link_levels = 0;
662	for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) {
663		if (!skutable->PcieGenSpeed[link_level] &&
664		    !skutable->PcieLaneCount[link_level] &&
665		    !skutable->LclkFreq[link_level])
666			continue;
667
668		pcie_table->pcie_gen[pcie_table->num_of_link_levels] =
669					skutable->PcieGenSpeed[link_level];
670		pcie_table->pcie_lane[pcie_table->num_of_link_levels] =
671					skutable->PcieLaneCount[link_level];
672		pcie_table->clk_freq[pcie_table->num_of_link_levels] =
673					skutable->LclkFreq[link_level];
674		pcie_table->num_of_link_levels++;
675	}
676
677	return 0;
678}
679
680static bool smu_v14_0_2_is_dpm_running(struct smu_context *smu)
681{
682	int ret = 0;
683	uint64_t feature_enabled;
684
685	ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
686	if (ret)
687		return false;
688
689	return !!(feature_enabled & SMC_DPM_FEATURE);
690}
691
692static void smu_v14_0_2_dump_pptable(struct smu_context *smu)
693{
694       struct smu_table_context *table_context = &smu->smu_table;
695       PPTable_t *pptable = table_context->driver_pptable;
696       PFE_Settings_t *PFEsettings = &pptable->PFE_Settings;
697
698       dev_info(smu->adev->dev, "Dumped PPTable:\n");
699
700       dev_info(smu->adev->dev, "Version = 0x%08x\n", PFEsettings->Version);
701       dev_info(smu->adev->dev, "FeaturesToRun[0] = 0x%08x\n", PFEsettings->FeaturesToRun[0]);
702       dev_info(smu->adev->dev, "FeaturesToRun[1] = 0x%08x\n", PFEsettings->FeaturesToRun[1]);
703}
704
705static uint32_t smu_v14_0_2_get_throttler_status(SmuMetrics_t *metrics)
706{
707	uint32_t throttler_status = 0;
708	int i;
709
710	for (i = 0; i < THROTTLER_COUNT; i++)
711		throttler_status |=
712			(metrics->ThrottlingPercentage[i] ? 1U << i : 0);
713
714	return throttler_status;
715}
716
717#define SMU_14_0_2_BUSY_THRESHOLD	5
718static int smu_v14_0_2_get_smu_metrics_data(struct smu_context *smu,
719					    MetricsMember_t member,
720					    uint32_t *value)
721{
722	struct smu_table_context *smu_table = &smu->smu_table;
723	SmuMetrics_t *metrics =
724		&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
725	int ret = 0;
726
727	ret = smu_cmn_get_metrics_table(smu,
728					NULL,
729					false);
730	if (ret)
731		return ret;
732
733	switch (member) {
734	case METRICS_CURR_GFXCLK:
735		*value = metrics->CurrClock[PPCLK_GFXCLK];
736		break;
737	case METRICS_CURR_SOCCLK:
738		*value = metrics->CurrClock[PPCLK_SOCCLK];
739		break;
740	case METRICS_CURR_UCLK:
741		*value = metrics->CurrClock[PPCLK_UCLK];
742		break;
743	case METRICS_CURR_VCLK:
744		*value = metrics->CurrClock[PPCLK_VCLK_0];
745		break;
746	case METRICS_CURR_DCLK:
747		*value = metrics->CurrClock[PPCLK_DCLK_0];
748		break;
749	case METRICS_CURR_FCLK:
750		*value = metrics->CurrClock[PPCLK_FCLK];
751		break;
752	case METRICS_CURR_DCEFCLK:
753		*value = metrics->CurrClock[PPCLK_DCFCLK];
754		break;
755	case METRICS_AVERAGE_GFXCLK:
756		if (metrics->AverageGfxActivity <= SMU_14_0_2_BUSY_THRESHOLD)
757			*value = metrics->AverageGfxclkFrequencyPostDs;
758		else
759			*value = metrics->AverageGfxclkFrequencyPreDs;
760		break;
761	case METRICS_AVERAGE_FCLK:
762		if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
763			*value = metrics->AverageFclkFrequencyPostDs;
764		else
765			*value = metrics->AverageFclkFrequencyPreDs;
766		break;
767	case METRICS_AVERAGE_UCLK:
768		if (metrics->AverageUclkActivity <= SMU_14_0_2_BUSY_THRESHOLD)
769			*value = metrics->AverageMemclkFrequencyPostDs;
770		else
771			*value = metrics->AverageMemclkFrequencyPreDs;
772		break;
773	case METRICS_AVERAGE_VCLK:
774		*value = metrics->AverageVclk0Frequency;
775		break;
776	case METRICS_AVERAGE_DCLK:
777		*value = metrics->AverageDclk0Frequency;
778		break;
779	case METRICS_AVERAGE_VCLK1:
780		*value = metrics->AverageVclk1Frequency;
781		break;
782	case METRICS_AVERAGE_DCLK1:
783		*value = metrics->AverageDclk1Frequency;
784		break;
785	case METRICS_AVERAGE_GFXACTIVITY:
786		*value = metrics->AverageGfxActivity;
787		break;
788	case METRICS_AVERAGE_MEMACTIVITY:
789		*value = metrics->AverageUclkActivity;
790		break;
791	case METRICS_AVERAGE_SOCKETPOWER:
792		*value = metrics->AverageSocketPower << 8;
793		break;
794	case METRICS_TEMPERATURE_EDGE:
795		*value = metrics->AvgTemperature[TEMP_EDGE] *
796			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
797		break;
798	case METRICS_TEMPERATURE_HOTSPOT:
799		*value = metrics->AvgTemperature[TEMP_HOTSPOT] *
800			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
801		break;
802	case METRICS_TEMPERATURE_MEM:
803		*value = metrics->AvgTemperature[TEMP_MEM] *
804			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
805		break;
806	case METRICS_TEMPERATURE_VRGFX:
807		*value = metrics->AvgTemperature[TEMP_VR_GFX] *
808			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
809		break;
810	case METRICS_TEMPERATURE_VRSOC:
811		*value = metrics->AvgTemperature[TEMP_VR_SOC] *
812			SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
813		break;
814	case METRICS_THROTTLER_STATUS:
815		*value = smu_v14_0_2_get_throttler_status(metrics);
816		break;
817	case METRICS_CURR_FANSPEED:
818		*value = metrics->AvgFanRpm;
819		break;
820	case METRICS_CURR_FANPWM:
821		*value = metrics->AvgFanPwm;
822		break;
823	case METRICS_VOLTAGE_VDDGFX:
824		*value = metrics->AvgVoltage[SVI_PLANE_VDD_GFX];
825		break;
826	case METRICS_PCIE_RATE:
827		*value = metrics->PcieRate;
828		break;
829	case METRICS_PCIE_WIDTH:
830		*value = metrics->PcieWidth;
831		break;
832	default:
833		*value = UINT_MAX;
834		break;
835	}
836
837	return ret;
838}
839
840static int smu_v14_0_2_get_dpm_ultimate_freq(struct smu_context *smu,
841					     enum smu_clk_type clk_type,
842					     uint32_t *min,
843					     uint32_t *max)
844{
845	struct smu_14_0_dpm_context *dpm_context =
846		smu->smu_dpm.dpm_context;
847	struct smu_14_0_dpm_table *dpm_table;
848
849	switch (clk_type) {
850	case SMU_MCLK:
851	case SMU_UCLK:
852		/* uclk dpm table */
853		dpm_table = &dpm_context->dpm_tables.uclk_table;
854		break;
855	case SMU_GFXCLK:
856	case SMU_SCLK:
857		/* gfxclk dpm table */
858		dpm_table = &dpm_context->dpm_tables.gfx_table;
859		break;
860	case SMU_SOCCLK:
861		/* socclk dpm table */
862		dpm_table = &dpm_context->dpm_tables.soc_table;
863		break;
864	case SMU_FCLK:
865		/* fclk dpm table */
866		dpm_table = &dpm_context->dpm_tables.fclk_table;
867		break;
868	case SMU_VCLK:
869	case SMU_VCLK1:
870		/* vclk dpm table */
871		dpm_table = &dpm_context->dpm_tables.vclk_table;
872		break;
873	case SMU_DCLK:
874	case SMU_DCLK1:
875		/* dclk dpm table */
876		dpm_table = &dpm_context->dpm_tables.dclk_table;
877		break;
878	default:
879		dev_err(smu->adev->dev, "Unsupported clock type!\n");
880		return -EINVAL;
881	}
882
883	if (min)
884		*min = dpm_table->min;
885	if (max)
886		*max = dpm_table->max;
887
888	return 0;
889}
890
891static int smu_v14_0_2_read_sensor(struct smu_context *smu,
892				   enum amd_pp_sensors sensor,
893				   void *data,
894				   uint32_t *size)
895{
896	struct smu_table_context *table_context = &smu->smu_table;
897	PPTable_t *smc_pptable = table_context->driver_pptable;
898	int ret = 0;
899
900	switch (sensor) {
901	case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
902		*(uint16_t *)data = smc_pptable->CustomSkuTable.FanMaximumRpm;
903		*size = 4;
904		break;
905	case AMDGPU_PP_SENSOR_MEM_LOAD:
906		ret = smu_v14_0_2_get_smu_metrics_data(smu,
907						       METRICS_AVERAGE_MEMACTIVITY,
908						       (uint32_t *)data);
909		*size = 4;
910		break;
911	case AMDGPU_PP_SENSOR_GPU_LOAD:
912		ret = smu_v14_0_2_get_smu_metrics_data(smu,
913						       METRICS_AVERAGE_GFXACTIVITY,
914						       (uint32_t *)data);
915		*size = 4;
916		break;
917	case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
918		ret = smu_v14_0_2_get_smu_metrics_data(smu,
919						       METRICS_AVERAGE_SOCKETPOWER,
920						       (uint32_t *)data);
921		*size = 4;
922		break;
923	case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
924		ret = smu_v14_0_2_get_smu_metrics_data(smu,
925						       METRICS_TEMPERATURE_HOTSPOT,
926						       (uint32_t *)data);
927		*size = 4;
928		break;
929	case AMDGPU_PP_SENSOR_EDGE_TEMP:
930		ret = smu_v14_0_2_get_smu_metrics_data(smu,
931						       METRICS_TEMPERATURE_EDGE,
932						       (uint32_t *)data);
933		*size = 4;
934		break;
935	case AMDGPU_PP_SENSOR_MEM_TEMP:
936		ret = smu_v14_0_2_get_smu_metrics_data(smu,
937						       METRICS_TEMPERATURE_MEM,
938						       (uint32_t *)data);
939		*size = 4;
940		break;
941	case AMDGPU_PP_SENSOR_GFX_MCLK:
942		ret = smu_v14_0_2_get_smu_metrics_data(smu,
943						       METRICS_CURR_UCLK,
944						       (uint32_t *)data);
945		*(uint32_t *)data *= 100;
946		*size = 4;
947		break;
948	case AMDGPU_PP_SENSOR_GFX_SCLK:
949		ret = smu_v14_0_2_get_smu_metrics_data(smu,
950						       METRICS_AVERAGE_GFXCLK,
951						       (uint32_t *)data);
952		*(uint32_t *)data *= 100;
953		*size = 4;
954		break;
955	case AMDGPU_PP_SENSOR_VDDGFX:
956		ret = smu_v14_0_2_get_smu_metrics_data(smu,
957						       METRICS_VOLTAGE_VDDGFX,
958						       (uint32_t *)data);
959		*size = 4;
960		break;
961	default:
962		ret = -EOPNOTSUPP;
963		break;
964	}
965
966	return ret;
967}
968
969static int smu_v14_0_2_get_current_clk_freq_by_table(struct smu_context *smu,
970						     enum smu_clk_type clk_type,
971						     uint32_t *value)
972{
973	MetricsMember_t member_type;
974	int clk_id = 0;
975
976	clk_id = smu_cmn_to_asic_specific_index(smu,
977						CMN2ASIC_MAPPING_CLK,
978						clk_type);
979	if (clk_id < 0)
980		return -EINVAL;
981
982	switch (clk_id) {
983	case PPCLK_GFXCLK:
984		member_type = METRICS_AVERAGE_GFXCLK;
985		break;
986	case PPCLK_UCLK:
987		member_type = METRICS_CURR_UCLK;
988		break;
989	case PPCLK_FCLK:
990		member_type = METRICS_CURR_FCLK;
991		break;
992	case PPCLK_SOCCLK:
993		member_type = METRICS_CURR_SOCCLK;
994		break;
995	case PPCLK_VCLK_0:
996		member_type = METRICS_AVERAGE_VCLK;
997		break;
998	case PPCLK_DCLK_0:
999		member_type = METRICS_AVERAGE_DCLK;
1000		break;
1001	default:
1002		return -EINVAL;
1003	}
1004
1005	return smu_v14_0_2_get_smu_metrics_data(smu,
1006						member_type,
1007						value);
1008}
1009
1010static int smu_v14_0_2_print_clk_levels(struct smu_context *smu,
1011					enum smu_clk_type clk_type,
1012					char *buf)
1013{
1014	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1015	struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1016	struct smu_14_0_dpm_table *single_dpm_table;
1017	int i, curr_freq, size = 0;
1018	int ret = 0;
1019
1020	smu_cmn_get_sysfs_buf(&buf, &size);
1021
1022	if (amdgpu_ras_intr_triggered()) {
1023		size += sysfs_emit_at(buf, size, "unavailable\n");
1024		return size;
1025	}
1026
1027	switch (clk_type) {
1028	case SMU_SCLK:
1029		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1030		break;
1031	case SMU_MCLK:
1032		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1033		break;
1034	case SMU_SOCCLK:
1035		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1036		break;
1037	case SMU_FCLK:
1038		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1039		break;
1040	case SMU_VCLK:
1041	case SMU_VCLK1:
1042		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1043		break;
1044	case SMU_DCLK:
1045	case SMU_DCLK1:
1046		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1047		break;
1048	default:
1049		break;
1050	}
1051
1052	switch (clk_type) {
1053	case SMU_SCLK:
1054	case SMU_MCLK:
1055	case SMU_SOCCLK:
1056	case SMU_FCLK:
1057	case SMU_VCLK:
1058	case SMU_VCLK1:
1059	case SMU_DCLK:
1060	case SMU_DCLK1:
1061		ret = smu_v14_0_2_get_current_clk_freq_by_table(smu, clk_type, &curr_freq);
1062		if (ret) {
1063			dev_err(smu->adev->dev, "Failed to get current clock freq!");
1064			return ret;
1065		}
1066
1067		if (single_dpm_table->is_fine_grained) {
1068			/*
1069			 * For fine grained dpms, there are only two dpm levels:
1070			 *   - level 0 -> min clock freq
1071			 *   - level 1 -> max clock freq
1072			 * And the current clock frequency can be any value between them.
1073			 * So, if the current clock frequency is not at level 0 or level 1,
1074			 * we will fake it as three dpm levels:
1075			 *   - level 0 -> min clock freq
1076			 *   - level 1 -> current actual clock freq
1077			 *   - level 2 -> max clock freq
1078			 */
1079			if ((single_dpm_table->dpm_levels[0].value != curr_freq) &&
1080			     (single_dpm_table->dpm_levels[1].value != curr_freq)) {
1081				size += sysfs_emit_at(buf, size, "0: %uMhz\n",
1082						single_dpm_table->dpm_levels[0].value);
1083				size += sysfs_emit_at(buf, size, "1: %uMhz *\n",
1084						curr_freq);
1085				size += sysfs_emit_at(buf, size, "2: %uMhz\n",
1086						single_dpm_table->dpm_levels[1].value);
1087			} else {
1088				size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
1089						single_dpm_table->dpm_levels[0].value,
1090						single_dpm_table->dpm_levels[0].value == curr_freq ? "*" : "");
1091				size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
1092						single_dpm_table->dpm_levels[1].value,
1093						single_dpm_table->dpm_levels[1].value == curr_freq ? "*" : "");
1094			}
1095		} else {
1096			for (i = 0; i < single_dpm_table->count; i++)
1097				size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
1098						i, single_dpm_table->dpm_levels[i].value,
1099						single_dpm_table->dpm_levels[i].value == curr_freq ? "*" : "");
1100		}
1101		break;
1102	case SMU_PCIE:
1103		// TODO
1104		break;
1105
1106	default:
1107		break;
1108	}
1109
1110	return size;
1111}
1112
1113static int smu_v14_0_2_force_clk_levels(struct smu_context *smu,
1114					enum smu_clk_type clk_type,
1115					uint32_t mask)
1116{
1117	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1118	struct smu_14_0_dpm_context *dpm_context = smu_dpm->dpm_context;
1119	struct smu_14_0_dpm_table *single_dpm_table;
1120	uint32_t soft_min_level, soft_max_level;
1121	uint32_t min_freq, max_freq;
1122	int ret = 0;
1123
1124	soft_min_level = mask ? (ffs(mask) - 1) : 0;
1125	soft_max_level = mask ? (fls(mask) - 1) : 0;
1126
1127	switch (clk_type) {
1128	case SMU_GFXCLK:
1129	case SMU_SCLK:
1130		single_dpm_table = &(dpm_context->dpm_tables.gfx_table);
1131		break;
1132	case SMU_MCLK:
1133	case SMU_UCLK:
1134		single_dpm_table = &(dpm_context->dpm_tables.uclk_table);
1135		break;
1136	case SMU_SOCCLK:
1137		single_dpm_table = &(dpm_context->dpm_tables.soc_table);
1138		break;
1139	case SMU_FCLK:
1140		single_dpm_table = &(dpm_context->dpm_tables.fclk_table);
1141		break;
1142	case SMU_VCLK:
1143	case SMU_VCLK1:
1144		single_dpm_table = &(dpm_context->dpm_tables.vclk_table);
1145		break;
1146	case SMU_DCLK:
1147	case SMU_DCLK1:
1148		single_dpm_table = &(dpm_context->dpm_tables.dclk_table);
1149		break;
1150	default:
1151		break;
1152	}
1153
1154	switch (clk_type) {
1155	case SMU_GFXCLK:
1156	case SMU_SCLK:
1157	case SMU_MCLK:
1158	case SMU_UCLK:
1159	case SMU_SOCCLK:
1160	case SMU_FCLK:
1161	case SMU_VCLK:
1162	case SMU_VCLK1:
1163	case SMU_DCLK:
1164	case SMU_DCLK1:
1165		if (single_dpm_table->is_fine_grained) {
1166			/* There is only 2 levels for fine grained DPM */
1167			soft_max_level = (soft_max_level >= 1 ? 1 : 0);
1168			soft_min_level = (soft_min_level >= 1 ? 1 : 0);
1169		} else {
1170			if ((soft_max_level >= single_dpm_table->count) ||
1171			    (soft_min_level >= single_dpm_table->count))
1172				return -EINVAL;
1173		}
1174
1175		min_freq = single_dpm_table->dpm_levels[soft_min_level].value;
1176		max_freq = single_dpm_table->dpm_levels[soft_max_level].value;
1177
1178		ret = smu_v14_0_set_soft_freq_limited_range(smu,
1179							    clk_type,
1180							    min_freq,
1181							    max_freq);
1182		break;
1183	case SMU_DCEFCLK:
1184	case SMU_PCIE:
1185	default:
1186		break;
1187	}
1188
1189	return ret;
1190}
1191
1192static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu,
1193					      uint8_t pcie_gen_cap,
1194					      uint8_t pcie_width_cap)
1195{
1196	struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1197	struct smu_14_0_pcie_table *pcie_table =
1198				&dpm_context->dpm_tables.pcie_table;
1199	uint32_t smu_pcie_arg;
1200	int ret, i;
1201
1202	for (i = 0; i < pcie_table->num_of_link_levels; i++) {
1203		if (pcie_table->pcie_gen[i] > pcie_gen_cap)
1204			pcie_table->pcie_gen[i] = pcie_gen_cap;
1205		if (pcie_table->pcie_lane[i] > pcie_width_cap)
1206			pcie_table->pcie_lane[i] = pcie_width_cap;
1207
1208		smu_pcie_arg = i << 16;
1209		smu_pcie_arg |= pcie_table->pcie_gen[i] << 8;
1210		smu_pcie_arg |= pcie_table->pcie_lane[i];
1211
1212		ret = smu_cmn_send_smc_msg_with_param(smu,
1213						      SMU_MSG_OverridePcieParameters,
1214						      smu_pcie_arg,
1215						      NULL);
1216		if (ret)
1217			return ret;
1218	}
1219
1220	return 0;
1221}
1222
1223static int smu_v14_0_2_get_thermal_temperature_range(struct smu_context *smu,
1224						     struct smu_temperature_range *range)
1225{
1226	// TODO
1227
1228	return 0;
1229}
1230
1231static int smu_v14_0_2_populate_umd_state_clk(struct smu_context *smu)
1232{
1233	// TODO
1234
1235	return 0;
1236}
1237
1238static void smu_v14_0_2_get_unique_id(struct smu_context *smu)
1239{
1240	struct smu_table_context *smu_table = &smu->smu_table;
1241	SmuMetrics_t *metrics =
1242		&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
1243	struct amdgpu_device *adev = smu->adev;
1244	uint32_t upper32 = 0, lower32 = 0;
1245	int ret;
1246
1247	ret = smu_cmn_get_metrics_table(smu, NULL, false);
1248	if (ret)
1249		goto out;
1250
1251	upper32 = metrics->PublicSerialNumberUpper;
1252	lower32 = metrics->PublicSerialNumberLower;
1253
1254out:
1255	adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
1256}
1257
1258static int smu_v14_0_2_get_power_limit(struct smu_context *smu,
1259				       uint32_t *current_power_limit,
1260				       uint32_t *default_power_limit,
1261				       uint32_t *max_power_limit,
1262				       uint32_t *min_power_limit)
1263{
1264	// TODO
1265
1266	return 0;
1267}
1268
1269static int smu_v14_0_2_get_power_profile_mode(struct smu_context *smu,
1270					      char *buf)
1271{
1272	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1273	DpmActivityMonitorCoeffInt_t *activity_monitor =
1274		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
1275	static const char *title[] = {
1276			"PROFILE_INDEX(NAME)",
1277			"CLOCK_TYPE(NAME)",
1278			"FPS",
1279			"MinActiveFreqType",
1280			"MinActiveFreq",
1281			"BoosterFreqType",
1282			"BoosterFreq",
1283			"PD_Data_limit_c",
1284			"PD_Data_error_coeff",
1285			"PD_Data_error_rate_coeff"};
1286	int16_t workload_type = 0;
1287	uint32_t i, size = 0;
1288	int result = 0;
1289
1290	if (!buf)
1291		return -EINVAL;
1292
1293	size += sysfs_emit_at(buf, size, "%16s %s %s %s %s %s %s %s %s %s\n",
1294			title[0], title[1], title[2], title[3], title[4], title[5],
1295			title[6], title[7], title[8], title[9]);
1296
1297	for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1298		/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1299		workload_type = smu_cmn_to_asic_specific_index(smu,
1300							       CMN2ASIC_MAPPING_WORKLOAD,
1301							       i);
1302		if (workload_type == -ENOTSUPP)
1303			continue;
1304		else if (workload_type < 0)
1305			return -EINVAL;
1306
1307		result = smu_cmn_update_table(smu,
1308					      SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1309					      workload_type,
1310					      (void *)(&activity_monitor_external),
1311					      false);
1312		if (result) {
1313			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1314			return result;
1315		}
1316
1317		size += sysfs_emit_at(buf, size, "%2d %14s%s:\n",
1318			i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1319
1320		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1321			" ",
1322			0,
1323			"GFXCLK",
1324			activity_monitor->Gfx_FPS,
1325			activity_monitor->Gfx_MinActiveFreqType,
1326			activity_monitor->Gfx_MinActiveFreq,
1327			activity_monitor->Gfx_BoosterFreqType,
1328			activity_monitor->Gfx_BoosterFreq,
1329			activity_monitor->Gfx_PD_Data_limit_c,
1330			activity_monitor->Gfx_PD_Data_error_coeff,
1331			activity_monitor->Gfx_PD_Data_error_rate_coeff);
1332
1333		size += sysfs_emit_at(buf, size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d\n",
1334			" ",
1335			1,
1336			"FCLK",
1337			activity_monitor->Fclk_FPS,
1338			activity_monitor->Fclk_MinActiveFreqType,
1339			activity_monitor->Fclk_MinActiveFreq,
1340			activity_monitor->Fclk_BoosterFreqType,
1341			activity_monitor->Fclk_BoosterFreq,
1342			activity_monitor->Fclk_PD_Data_limit_c,
1343			activity_monitor->Fclk_PD_Data_error_coeff,
1344			activity_monitor->Fclk_PD_Data_error_rate_coeff);
1345	}
1346
1347	return size;
1348}
1349
1350static int smu_v14_0_2_set_power_profile_mode(struct smu_context *smu,
1351					      long *input,
1352					      uint32_t size)
1353{
1354	DpmActivityMonitorCoeffIntExternal_t activity_monitor_external;
1355	DpmActivityMonitorCoeffInt_t *activity_monitor =
1356		&(activity_monitor_external.DpmActivityMonitorCoeffInt);
1357	int workload_type, ret = 0;
1358
1359	smu->power_profile_mode = input[size];
1360
1361	if (smu->power_profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
1362		dev_err(smu->adev->dev, "Invalid power profile mode %d\n", smu->power_profile_mode);
1363		return -EINVAL;
1364	}
1365
1366	if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1367		ret = smu_cmn_update_table(smu,
1368					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1369					   WORKLOAD_PPLIB_CUSTOM_BIT,
1370					   (void *)(&activity_monitor_external),
1371					   false);
1372		if (ret) {
1373			dev_err(smu->adev->dev, "[%s] Failed to get activity monitor!", __func__);
1374			return ret;
1375		}
1376
1377		switch (input[0]) {
1378		case 0: /* Gfxclk */
1379			activity_monitor->Gfx_FPS = input[1];
1380			activity_monitor->Gfx_MinActiveFreqType = input[2];
1381			activity_monitor->Gfx_MinActiveFreq = input[3];
1382			activity_monitor->Gfx_BoosterFreqType = input[4];
1383			activity_monitor->Gfx_BoosterFreq = input[5];
1384			activity_monitor->Gfx_PD_Data_limit_c = input[6];
1385			activity_monitor->Gfx_PD_Data_error_coeff = input[7];
1386			activity_monitor->Gfx_PD_Data_error_rate_coeff = input[8];
1387			break;
1388		case 1: /* Fclk */
1389			activity_monitor->Fclk_FPS = input[1];
1390			activity_monitor->Fclk_MinActiveFreqType = input[2];
1391			activity_monitor->Fclk_MinActiveFreq = input[3];
1392			activity_monitor->Fclk_BoosterFreqType = input[4];
1393			activity_monitor->Fclk_BoosterFreq = input[5];
1394			activity_monitor->Fclk_PD_Data_limit_c = input[6];
1395			activity_monitor->Fclk_PD_Data_error_coeff = input[7];
1396			activity_monitor->Fclk_PD_Data_error_rate_coeff = input[8];
1397			break;
1398		}
1399
1400		ret = smu_cmn_update_table(smu,
1401					   SMU_TABLE_ACTIVITY_MONITOR_COEFF,
1402					   WORKLOAD_PPLIB_CUSTOM_BIT,
1403					   (void *)(&activity_monitor_external),
1404					   true);
1405		if (ret) {
1406			dev_err(smu->adev->dev, "[%s] Failed to set activity monitor!", __func__);
1407			return ret;
1408		}
1409	}
1410
1411	/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1412	workload_type = smu_cmn_to_asic_specific_index(smu,
1413						       CMN2ASIC_MAPPING_WORKLOAD,
1414						       smu->power_profile_mode);
1415	if (workload_type < 0)
1416		return -EINVAL;
1417
1418	return smu_cmn_send_smc_msg_with_param(smu,
1419					       SMU_MSG_SetWorkloadMask,
1420					       1 << workload_type,
1421					       NULL);
1422}
1423
1424static int smu_v14_0_2_baco_enter(struct smu_context *smu)
1425{
1426	struct smu_baco_context *smu_baco = &smu->smu_baco;
1427	struct amdgpu_device *adev = smu->adev;
1428
1429	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev))
1430		return smu_v14_0_baco_set_armd3_sequence(smu,
1431				smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO);
1432	else
1433		return smu_v14_0_baco_enter(smu);
1434}
1435
1436static int smu_v14_0_2_baco_exit(struct smu_context *smu)
1437{
1438	struct amdgpu_device *adev = smu->adev;
1439
1440	if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) {
1441		/* Wait for PMFW handling for the Dstate change */
1442		usleep_range(10000, 11000);
1443		return smu_v14_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
1444	} else {
1445		return smu_v14_0_baco_exit(smu);
1446	}
1447}
1448
1449static bool smu_v14_0_2_is_mode1_reset_supported(struct smu_context *smu)
1450{
1451	// TODO
1452
1453	return true;
1454}
1455
1456static int smu_v14_0_2_i2c_xfer(struct i2c_adapter *i2c_adap,
1457				   struct i2c_msg *msg, int num_msgs)
1458{
1459	struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
1460	struct amdgpu_device *adev = smu_i2c->adev;
1461	struct smu_context *smu = adev->powerplay.pp_handle;
1462	struct smu_table_context *smu_table = &smu->smu_table;
1463	struct smu_table *table = &smu_table->driver_table;
1464	SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
1465	int i, j, r, c;
1466	u16 dir;
1467
1468	if (!adev->pm.dpm_enabled)
1469		return -EBUSY;
1470
1471	req = kzalloc(sizeof(*req), GFP_KERNEL);
1472	if (!req)
1473		return -ENOMEM;
1474
1475	req->I2CcontrollerPort = smu_i2c->port;
1476	req->I2CSpeed = I2C_SPEED_FAST_400K;
1477	req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
1478	dir = msg[0].flags & I2C_M_RD;
1479
1480	for (c = i = 0; i < num_msgs; i++) {
1481		for (j = 0; j < msg[i].len; j++, c++) {
1482			SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
1483
1484			if (!(msg[i].flags & I2C_M_RD)) {
1485				/* write */
1486				cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
1487				cmd->ReadWriteData = msg[i].buf[j];
1488			}
1489
1490			if ((dir ^ msg[i].flags) & I2C_M_RD) {
1491				/* The direction changes.
1492				 */
1493				dir = msg[i].flags & I2C_M_RD;
1494				cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
1495			}
1496
1497			req->NumCmds++;
1498
1499			/*
1500			 * Insert STOP if we are at the last byte of either last
1501			 * message for the transaction or the client explicitly
1502			 * requires a STOP at this particular message.
1503			 */
1504			if ((j == msg[i].len - 1) &&
1505			    ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
1506				cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
1507				cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
1508			}
1509		}
1510	}
1511	mutex_lock(&adev->pm.mutex);
1512	r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1513	mutex_unlock(&adev->pm.mutex);
1514	if (r)
1515		goto fail;
1516
1517	for (c = i = 0; i < num_msgs; i++) {
1518		if (!(msg[i].flags & I2C_M_RD)) {
1519			c += msg[i].len;
1520			continue;
1521		}
1522		for (j = 0; j < msg[i].len; j++, c++) {
1523			SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
1524
1525			msg[i].buf[j] = cmd->ReadWriteData;
1526		}
1527	}
1528	r = num_msgs;
1529fail:
1530	kfree(req);
1531	return r;
1532}
1533
1534static u32 smu_v14_0_2_i2c_func(struct i2c_adapter *adap)
1535{
1536	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
1537}
1538
1539static const struct i2c_algorithm smu_v14_0_2_i2c_algo = {
1540	.master_xfer = smu_v14_0_2_i2c_xfer,
1541	.functionality = smu_v14_0_2_i2c_func,
1542};
1543
1544static const struct i2c_adapter_quirks smu_v14_0_2_i2c_control_quirks = {
1545	.flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
1546	.max_read_len  = MAX_SW_I2C_COMMANDS,
1547	.max_write_len = MAX_SW_I2C_COMMANDS,
1548	.max_comb_1st_msg_len = 2,
1549	.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
1550};
1551
1552static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
1553{
1554	struct amdgpu_device *adev = smu->adev;
1555	int res, i;
1556
1557	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1558		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1559		struct i2c_adapter *control = &smu_i2c->adapter;
1560
1561		smu_i2c->adev = adev;
1562		smu_i2c->port = i;
1563		mutex_init(&smu_i2c->mutex);
1564		control->owner = THIS_MODULE;
1565		control->dev.parent = &adev->pdev->dev;
1566		control->algo = &smu_v14_0_2_i2c_algo;
1567		snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
1568		control->quirks = &smu_v14_0_2_i2c_control_quirks;
1569		i2c_set_adapdata(control, smu_i2c);
1570
1571		res = i2c_add_adapter(control);
1572		if (res) {
1573			DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
1574			goto Out_err;
1575		}
1576	}
1577
1578	/* assign the buses used for the FRU EEPROM and RAS EEPROM */
1579	/* XXX ideally this would be something in a vbios data table */
1580	adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
1581	adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
1582
1583	return 0;
1584Out_err:
1585	for ( ; i >= 0; i--) {
1586		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1587		struct i2c_adapter *control = &smu_i2c->adapter;
1588
1589		i2c_del_adapter(control);
1590	}
1591	return res;
1592}
1593
1594static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu)
1595{
1596	struct amdgpu_device *adev = smu->adev;
1597	int i;
1598
1599	for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
1600		struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
1601		struct i2c_adapter *control = &smu_i2c->adapter;
1602
1603		i2c_del_adapter(control);
1604	}
1605	adev->pm.ras_eeprom_i2c_bus = NULL;
1606	adev->pm.fru_eeprom_i2c_bus = NULL;
1607}
1608
1609static int smu_v14_0_2_set_mp1_state(struct smu_context *smu,
1610				     enum pp_mp1_state mp1_state)
1611{
1612	int ret;
1613
1614	switch (mp1_state) {
1615	case PP_MP1_STATE_UNLOAD:
1616		ret = smu_cmn_set_mp1_state(smu, mp1_state);
1617		break;
1618	default:
1619		/* Ignore others */
1620		ret = 0;
1621	}
1622
1623	return ret;
1624}
1625
1626static int smu_v14_0_2_set_df_cstate(struct smu_context *smu,
1627				     enum pp_df_cstate state)
1628{
1629	return smu_cmn_send_smc_msg_with_param(smu,
1630					       SMU_MSG_DFCstateControl,
1631					       state,
1632					       NULL);
1633}
1634
1635static int smu_v14_0_2_mode1_reset(struct smu_context *smu)
1636{
1637	int ret = 0;
1638
1639	// TODO
1640
1641	return ret;
1642}
1643
1644static int smu_v14_0_2_mode2_reset(struct smu_context *smu)
1645{
1646	int ret = 0;
1647
1648	// TODO
1649
1650	return ret;
1651}
1652
1653static int smu_v14_0_2_enable_gfx_features(struct smu_context *smu)
1654{
1655	struct amdgpu_device *adev = smu->adev;
1656
1657	if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(14, 0, 2))
1658		return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableAllSmuFeatures,
1659										   FEATURE_PWR_GFX, NULL);
1660	else
1661		return -EOPNOTSUPP;
1662}
1663
1664static void smu_v14_0_2_set_smu_mailbox_registers(struct smu_context *smu)
1665{
1666	struct amdgpu_device *adev = smu->adev;
1667
1668	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_82);
1669	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_66);
1670	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, regMP1_SMN_C2PMSG_90);
1671}
1672
1673static int smu_v14_0_2_smu_send_bad_mem_page_num(struct smu_context *smu,
1674		uint32_t size)
1675{
1676	int ret = 0;
1677
1678	/* message SMU to update the bad page number on SMUBUS */
1679	ret = smu_cmn_send_smc_msg_with_param(smu,
1680					  SMU_MSG_SetNumBadMemoryPagesRetired,
1681					  size, NULL);
1682	if (ret)
1683		dev_err(smu->adev->dev,
1684			  "[%s] failed to message SMU to update bad memory pages number\n",
1685			  __func__);
1686
1687	return ret;
1688}
1689
1690static int smu_v14_0_2_send_bad_mem_channel_flag(struct smu_context *smu,
1691		uint32_t size)
1692{
1693	int ret = 0;
1694
1695	/* message SMU to update the bad channel info on SMUBUS */
1696	ret = smu_cmn_send_smc_msg_with_param(smu,
1697				  SMU_MSG_SetBadMemoryPagesRetiredFlagsPerChannel,
1698				  size, NULL);
1699	if (ret)
1700		dev_err(smu->adev->dev,
1701			  "[%s] failed to message SMU to update bad memory pages channel info\n",
1702			  __func__);
1703
1704	return ret;
1705}
1706
1707static ssize_t smu_v14_0_2_get_ecc_info(struct smu_context *smu,
1708					void *table)
1709{
1710	int ret = 0;
1711
1712	// TODO
1713
1714	return ret;
1715}
1716
1717static const struct pptable_funcs smu_v14_0_2_ppt_funcs = {
1718	.get_allowed_feature_mask = smu_v14_0_2_get_allowed_feature_mask,
1719	.set_default_dpm_table = smu_v14_0_2_set_default_dpm_table,
1720	.i2c_init = smu_v14_0_2_i2c_control_init,
1721	.i2c_fini = smu_v14_0_2_i2c_control_fini,
1722	.is_dpm_running = smu_v14_0_2_is_dpm_running,
1723	.dump_pptable = smu_v14_0_2_dump_pptable,
1724	.init_microcode = smu_v14_0_init_microcode,
1725	.load_microcode = smu_v14_0_load_microcode,
1726	.fini_microcode = smu_v14_0_fini_microcode,
1727	.init_smc_tables = smu_v14_0_2_init_smc_tables,
1728	.fini_smc_tables = smu_v14_0_fini_smc_tables,
1729	.init_power = smu_v14_0_init_power,
1730	.fini_power = smu_v14_0_fini_power,
1731	.check_fw_status = smu_v14_0_check_fw_status,
1732	.setup_pptable = smu_v14_0_2_setup_pptable,
1733	.check_fw_version = smu_v14_0_check_fw_version,
1734	.write_pptable = smu_cmn_write_pptable,
1735	.set_driver_table_location = smu_v14_0_set_driver_table_location,
1736	.system_features_control = smu_v14_0_system_features_control,
1737	.set_allowed_mask = smu_v14_0_set_allowed_mask,
1738	.get_enabled_mask = smu_cmn_get_enabled_mask,
1739	.dpm_set_vcn_enable = smu_v14_0_set_vcn_enable,
1740	.dpm_set_jpeg_enable = smu_v14_0_set_jpeg_enable,
1741	.get_dpm_ultimate_freq = smu_v14_0_2_get_dpm_ultimate_freq,
1742	.get_vbios_bootup_values = smu_v14_0_get_vbios_bootup_values,
1743	.read_sensor = smu_v14_0_2_read_sensor,
1744	.feature_is_enabled = smu_cmn_feature_is_enabled,
1745	.print_clk_levels = smu_v14_0_2_print_clk_levels,
1746	.force_clk_levels = smu_v14_0_2_force_clk_levels,
1747	.update_pcie_parameters = smu_v14_0_2_update_pcie_parameters,
1748	.get_thermal_temperature_range = smu_v14_0_2_get_thermal_temperature_range,
1749	.register_irq_handler = smu_v14_0_register_irq_handler,
1750	.notify_memory_pool_location = smu_v14_0_notify_memory_pool_location,
1751	.set_soft_freq_limited_range = smu_v14_0_set_soft_freq_limited_range,
1752	.init_pptable_microcode = smu_v14_0_init_pptable_microcode,
1753	.populate_umd_state_clk = smu_v14_0_2_populate_umd_state_clk,
1754	.set_performance_level = smu_v14_0_set_performance_level,
1755	.gfx_off_control = smu_v14_0_gfx_off_control,
1756	.get_unique_id = smu_v14_0_2_get_unique_id,
1757	.get_power_limit = smu_v14_0_2_get_power_limit,
1758	.set_power_limit = smu_v14_0_set_power_limit,
1759	.set_power_source = smu_v14_0_set_power_source,
1760	.get_power_profile_mode = smu_v14_0_2_get_power_profile_mode,
1761	.set_power_profile_mode = smu_v14_0_2_set_power_profile_mode,
1762	.run_btc = smu_v14_0_run_btc,
1763	.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
1764	.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
1765	.set_tool_table_location = smu_v14_0_set_tool_table_location,
1766	.deep_sleep_control = smu_v14_0_deep_sleep_control,
1767	.gfx_ulv_control = smu_v14_0_gfx_ulv_control,
1768	.get_bamaco_support = smu_v14_0_get_bamaco_support,
1769	.baco_get_state = smu_v14_0_baco_get_state,
1770	.baco_set_state = smu_v14_0_baco_set_state,
1771	.baco_enter = smu_v14_0_2_baco_enter,
1772	.baco_exit = smu_v14_0_2_baco_exit,
1773	.mode1_reset_is_support = smu_v14_0_2_is_mode1_reset_supported,
1774	.mode1_reset = smu_v14_0_2_mode1_reset,
1775	.mode2_reset = smu_v14_0_2_mode2_reset,
1776	.enable_gfx_features = smu_v14_0_2_enable_gfx_features,
1777	.set_mp1_state = smu_v14_0_2_set_mp1_state,
1778	.set_df_cstate = smu_v14_0_2_set_df_cstate,
1779	.send_hbm_bad_pages_num = smu_v14_0_2_smu_send_bad_mem_page_num,
1780	.send_hbm_bad_channel_flag = smu_v14_0_2_send_bad_mem_channel_flag,
1781	.gpo_control = smu_v14_0_gpo_control,
1782	.get_ecc_info = smu_v14_0_2_get_ecc_info,
1783};
1784
1785void smu_v14_0_2_set_ppt_funcs(struct smu_context *smu)
1786{
1787	smu->ppt_funcs = &smu_v14_0_2_ppt_funcs;
1788	smu->message_map = smu_v14_0_2_message_map;
1789	smu->clock_map = smu_v14_0_2_clk_map;
1790	smu->feature_map = smu_v14_0_2_feature_mask_map;
1791	smu->table_map = smu_v14_0_2_table_map;
1792	smu->pwr_src_map = smu_v14_0_2_pwr_src_map;
1793	smu->workload_map = smu_v14_0_2_workload_map;
1794	smu_v14_0_2_set_smu_mailbox_registers(smu);
1795}
1796