1/*	$NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $	*/
2
3/*
4 * Copyright 2011 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Alex Deucher
25 */
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: amdgpu_dpm.c,v 1.6 2021/12/18 23:44:58 riastradh Exp $");
29
30#include "amdgpu.h"
31#include "amdgpu_atombios.h"
32#include "amdgpu_i2c.h"
33#include "amdgpu_dpm.h"
34#include "atom.h"
35#include "amd_pcie.h"
36
37void amdgpu_dpm_print_class_info(u32 class, u32 class2)
38{
39	const char *s;
40
41	switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
42	case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
43	default:
44		s = "none";
45		break;
46	case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
47		s = "battery";
48		break;
49	case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
50		s = "balanced";
51		break;
52	case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
53		s = "performance";
54		break;
55	}
56	printk("\tui class: %s\n", s);
57	printk("\tinternal class:");
58	if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
59	    (class2 == 0))
60		pr_cont(" none");
61	else {
62		if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
63			pr_cont(" boot");
64		if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
65			pr_cont(" thermal");
66		if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
67			pr_cont(" limited_pwr");
68		if (class & ATOM_PPLIB_CLASSIFICATION_REST)
69			pr_cont(" rest");
70		if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
71			pr_cont(" forced");
72		if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
73			pr_cont(" 3d_perf");
74		if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
75			pr_cont(" ovrdrv");
76		if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
77			pr_cont(" uvd");
78		if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
79			pr_cont(" 3d_low");
80		if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
81			pr_cont(" acpi");
82		if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
83			pr_cont(" uvd_hd2");
84		if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
85			pr_cont(" uvd_hd");
86		if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
87			pr_cont(" uvd_sd");
88		if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
89			pr_cont(" limited_pwr2");
90		if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
91			pr_cont(" ulv");
92		if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93			pr_cont(" uvd_mvc");
94	}
95	pr_cont("\n");
96}
97
98void amdgpu_dpm_print_cap_info(u32 caps)
99{
100	printk("\tcaps:");
101	if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
102		pr_cont(" single_disp");
103	if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
104		pr_cont(" video");
105	if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
106		pr_cont(" no_dc");
107	pr_cont("\n");
108}
109
110void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
111				struct amdgpu_ps *rps)
112{
113	printk("\tstatus:");
114	if (rps == adev->pm.dpm.current_ps)
115		pr_cont(" c");
116	if (rps == adev->pm.dpm.requested_ps)
117		pr_cont(" r");
118	if (rps == adev->pm.dpm.boot_ps)
119		pr_cont(" b");
120	pr_cont("\n");
121}
122
123void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
124{
125	struct drm_device *ddev = adev->ddev;
126	struct drm_crtc *crtc;
127	struct amdgpu_crtc *amdgpu_crtc;
128
129	adev->pm.dpm.new_active_crtcs = 0;
130	adev->pm.dpm.new_active_crtc_count = 0;
131	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
132		list_for_each_entry(crtc,
133				    &ddev->mode_config.crtc_list, head) {
134			amdgpu_crtc = to_amdgpu_crtc(crtc);
135			if (amdgpu_crtc->enabled) {
136				adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
137				adev->pm.dpm.new_active_crtc_count++;
138			}
139		}
140	}
141}
142
143
144u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
145{
146	struct drm_device *dev = adev->ddev;
147	struct drm_crtc *crtc;
148	struct amdgpu_crtc *amdgpu_crtc;
149	u32 vblank_in_pixels;
150	u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
151
152	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
153		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
154			amdgpu_crtc = to_amdgpu_crtc(crtc);
155			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
156				vblank_in_pixels =
157					amdgpu_crtc->hw_mode.crtc_htotal *
158					(amdgpu_crtc->hw_mode.crtc_vblank_end -
159					amdgpu_crtc->hw_mode.crtc_vdisplay +
160					(amdgpu_crtc->v_border * 2));
161
162				vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
163				break;
164			}
165		}
166	}
167
168	return vblank_time_us;
169}
170
171u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
172{
173	struct drm_device *dev = adev->ddev;
174	struct drm_crtc *crtc;
175	struct amdgpu_crtc *amdgpu_crtc;
176	u32 vrefresh = 0;
177
178	if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
179		list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
180			amdgpu_crtc = to_amdgpu_crtc(crtc);
181			if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
182				vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
183				break;
184			}
185		}
186	}
187
188	return vrefresh;
189}
190
191bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
192{
193	switch (sensor) {
194	case THERMAL_TYPE_RV6XX:
195	case THERMAL_TYPE_RV770:
196	case THERMAL_TYPE_EVERGREEN:
197	case THERMAL_TYPE_SUMO:
198	case THERMAL_TYPE_NI:
199	case THERMAL_TYPE_SI:
200	case THERMAL_TYPE_CI:
201	case THERMAL_TYPE_KV:
202		return true;
203	case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
204	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
205		return false; /* need special handling */
206	case THERMAL_TYPE_NONE:
207	case THERMAL_TYPE_EXTERNAL:
208	case THERMAL_TYPE_EXTERNAL_GPIO:
209	default:
210		return false;
211	}
212}
213
214union power_info {
215	struct _ATOM_POWERPLAY_INFO info;
216	struct _ATOM_POWERPLAY_INFO_V2 info_2;
217	struct _ATOM_POWERPLAY_INFO_V3 info_3;
218	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
219	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
220	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
221	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
222	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
223};
224
225union fan_info {
226	struct _ATOM_PPLIB_FANTABLE fan;
227	struct _ATOM_PPLIB_FANTABLE2 fan2;
228	struct _ATOM_PPLIB_FANTABLE3 fan3;
229};
230
231static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
232					      ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
233{
234	u32 size = atom_table->ucNumEntries *
235		sizeof(struct amdgpu_clock_voltage_dependency_entry);
236	int i;
237	ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
238
239	amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
240	if (!amdgpu_table->entries)
241		return -ENOMEM;
242
243	entry = &atom_table->entries[0];
244	for (i = 0; i < atom_table->ucNumEntries; i++) {
245		amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
246			(entry->ucClockHigh << 16);
247		amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
248		entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
249			((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
250	}
251	amdgpu_table->count = atom_table->ucNumEntries;
252
253	return 0;
254}
255
256int amdgpu_get_platform_caps(struct amdgpu_device *adev)
257{
258	struct amdgpu_mode_info *mode_info = &adev->mode_info;
259	union power_info *power_info;
260	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
261	u16 data_offset;
262	u8 frev, crev;
263
264	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
265				   &frev, &crev, &data_offset))
266		return -EINVAL;
267	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
268
269	adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
270	adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
271	adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
272
273	return 0;
274}
275
276/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
277#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
278#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
279#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
280#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
281#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
282#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
283#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
284#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
285
286int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
287{
288	struct amdgpu_mode_info *mode_info = &adev->mode_info;
289	union power_info *power_info;
290	union fan_info *fan_info;
291	ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
292	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
293	u16 data_offset;
294	u8 frev, crev;
295	int ret, i;
296
297	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
298				   &frev, &crev, &data_offset))
299		return -EINVAL;
300	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
301
302	/* fan table */
303	if (le16_to_cpu(power_info->pplib.usTableSize) >=
304	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
305		if (power_info->pplib3.usFanTableOffset) {
306			fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
307						      le16_to_cpu(power_info->pplib3.usFanTableOffset));
308			adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
309			adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
310			adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
311			adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
312			adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
313			adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
314			adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
315			if (fan_info->fan.ucFanTableFormat >= 2)
316				adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
317			else
318				adev->pm.dpm.fan.t_max = 10900;
319			adev->pm.dpm.fan.cycle_delay = 100000;
320			if (fan_info->fan.ucFanTableFormat >= 3) {
321				adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
322				adev->pm.dpm.fan.default_max_fan_pwm =
323					le16_to_cpu(fan_info->fan3.usFanPWMMax);
324				adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
325				adev->pm.dpm.fan.fan_output_sensitivity =
326					le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
327			}
328			adev->pm.dpm.fan.ucode_fan_control = true;
329		}
330	}
331
332	/* clock dependancy tables, shedding tables */
333	if (le16_to_cpu(power_info->pplib.usTableSize) >=
334	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
335		if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
336			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
337				(mode_info->atom_context->bios + data_offset +
338				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
339			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
340								 dep_table);
341			if (ret) {
342				amdgpu_free_extended_power_table(adev);
343				return ret;
344			}
345		}
346		if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
347			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
348				(mode_info->atom_context->bios + data_offset +
349				 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
350			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
351								 dep_table);
352			if (ret) {
353				amdgpu_free_extended_power_table(adev);
354				return ret;
355			}
356		}
357		if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
358			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
359				(mode_info->atom_context->bios + data_offset +
360				 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
361			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
362								 dep_table);
363			if (ret) {
364				amdgpu_free_extended_power_table(adev);
365				return ret;
366			}
367		}
368		if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
369			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
370				(mode_info->atom_context->bios + data_offset +
371				 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
372			ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
373								 dep_table);
374			if (ret) {
375				amdgpu_free_extended_power_table(adev);
376				return ret;
377			}
378		}
379		if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
380			ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
381				(ATOM_PPLIB_Clock_Voltage_Limit_Table *)
382				(mode_info->atom_context->bios + data_offset +
383				 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
384			if (clk_v->ucNumEntries) {
385				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
386					le16_to_cpu(clk_v->entries[0].usSclkLow) |
387					(clk_v->entries[0].ucSclkHigh << 16);
388				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
389					le16_to_cpu(clk_v->entries[0].usMclkLow) |
390					(clk_v->entries[0].ucMclkHigh << 16);
391				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
392					le16_to_cpu(clk_v->entries[0].usVddc);
393				adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
394					le16_to_cpu(clk_v->entries[0].usVddci);
395			}
396		}
397		if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
398			ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
399				(ATOM_PPLIB_PhaseSheddingLimits_Table *)
400				(mode_info->atom_context->bios + data_offset +
401				 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
402			ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
403
404			adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
405				kcalloc(psl->ucNumEntries,
406					sizeof(struct amdgpu_phase_shedding_limits_entry),
407					GFP_KERNEL);
408			if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
409				amdgpu_free_extended_power_table(adev);
410				return -ENOMEM;
411			}
412
413			entry = &psl->entries[0];
414			for (i = 0; i < psl->ucNumEntries; i++) {
415				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
416					le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
417				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
418					le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
419				adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
420					le16_to_cpu(entry->usVoltage);
421				entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
422					((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
423			}
424			adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
425				psl->ucNumEntries;
426		}
427	}
428
429	/* cac data */
430	if (le16_to_cpu(power_info->pplib.usTableSize) >=
431	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
432		adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
433		adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
434		adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
435		adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
436		if (adev->pm.dpm.tdp_od_limit)
437			adev->pm.dpm.power_control = true;
438		else
439			adev->pm.dpm.power_control = false;
440		adev->pm.dpm.tdp_adjustment = 0;
441		adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
442		adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
443		adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
444		if (power_info->pplib5.usCACLeakageTableOffset) {
445			ATOM_PPLIB_CAC_Leakage_Table *cac_table =
446				(ATOM_PPLIB_CAC_Leakage_Table *)
447				(mode_info->atom_context->bios + data_offset +
448				 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
449			ATOM_PPLIB_CAC_Leakage_Record *entry;
450			u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
451			adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
452			if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
453				amdgpu_free_extended_power_table(adev);
454				return -ENOMEM;
455			}
456			entry = &cac_table->entries[0];
457			for (i = 0; i < cac_table->ucNumEntries; i++) {
458				if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
459					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
460						le16_to_cpu(entry->usVddc1);
461					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
462						le16_to_cpu(entry->usVddc2);
463					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
464						le16_to_cpu(entry->usVddc3);
465				} else {
466					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
467						le16_to_cpu(entry->usVddc);
468					adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
469						le32_to_cpu(entry->ulLeakageValue);
470				}
471				entry = (ATOM_PPLIB_CAC_Leakage_Record *)
472					((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
473			}
474			adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
475		}
476	}
477
478	/* ext tables */
479	if (le16_to_cpu(power_info->pplib.usTableSize) >=
480	    sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
481		ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
482			(mode_info->atom_context->bios + data_offset +
483			 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
484		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
485			ext_hdr->usVCETableOffset) {
486			VCEClockInfoArray *array = (VCEClockInfoArray *)
487				(mode_info->atom_context->bios + data_offset +
488				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
489			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
490				(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
491				(mode_info->atom_context->bios + data_offset +
492				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
493				 1 + array->ucNumEntries * sizeof(VCEClockInfo));
494			ATOM_PPLIB_VCE_State_Table *states =
495				(ATOM_PPLIB_VCE_State_Table *)
496				(mode_info->atom_context->bios + data_offset +
497				 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
498				 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
499				 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
500			ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
501			ATOM_PPLIB_VCE_State_Record *state_entry;
502			VCEClockInfo *vce_clk;
503			u32 size = limits->numEntries *
504				sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
505			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
506				kzalloc(size, GFP_KERNEL);
507			if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
508				amdgpu_free_extended_power_table(adev);
509				return -ENOMEM;
510			}
511			adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
512				limits->numEntries;
513			entry = &limits->entries[0];
514			state_entry = &states->entries[0];
515			for (i = 0; i < limits->numEntries; i++) {
516				vce_clk = (VCEClockInfo *)
517					((u8 *)&array->entries[0] +
518					 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
519				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
520					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
521				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
522					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
523				adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
524					le16_to_cpu(entry->usVoltage);
525				entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
526					((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
527			}
528			adev->pm.dpm.num_of_vce_states =
529					states->numEntries > AMD_MAX_VCE_LEVELS ?
530					AMD_MAX_VCE_LEVELS : states->numEntries;
531			for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
532				vce_clk = (VCEClockInfo *)
533					((u8 *)&array->entries[0] +
534					 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
535				adev->pm.dpm.vce_states[i].evclk =
536					le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
537				adev->pm.dpm.vce_states[i].ecclk =
538					le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
539				adev->pm.dpm.vce_states[i].clk_idx =
540					state_entry->ucClockInfoIndex & 0x3f;
541				adev->pm.dpm.vce_states[i].pstate =
542					(state_entry->ucClockInfoIndex & 0xc0) >> 6;
543				state_entry = (ATOM_PPLIB_VCE_State_Record *)
544					((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
545			}
546		}
547		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
548			ext_hdr->usUVDTableOffset) {
549			UVDClockInfoArray *array = (UVDClockInfoArray *)
550				(mode_info->atom_context->bios + data_offset +
551				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
552			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
553				(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
554				(mode_info->atom_context->bios + data_offset +
555				 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
556				 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
557			ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
558			u32 size = limits->numEntries *
559				sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
560			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
561				kzalloc(size, GFP_KERNEL);
562			if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
563				amdgpu_free_extended_power_table(adev);
564				return -ENOMEM;
565			}
566			adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
567				limits->numEntries;
568			entry = &limits->entries[0];
569			for (i = 0; i < limits->numEntries; i++) {
570				UVDClockInfo *uvd_clk = (UVDClockInfo *)
571					((u8 *)&array->entries[0] +
572					 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
573				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
574					le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
575				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
576					le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
577				adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
578					le16_to_cpu(entry->usVoltage);
579				entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
580					((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
581			}
582		}
583		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
584			ext_hdr->usSAMUTableOffset) {
585			ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
586				(ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
587				(mode_info->atom_context->bios + data_offset +
588				 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
589			ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
590			u32 size = limits->numEntries *
591				sizeof(struct amdgpu_clock_voltage_dependency_entry);
592			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
593				kzalloc(size, GFP_KERNEL);
594			if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
595				amdgpu_free_extended_power_table(adev);
596				return -ENOMEM;
597			}
598			adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
599				limits->numEntries;
600			entry = &limits->entries[0];
601			for (i = 0; i < limits->numEntries; i++) {
602				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
603					le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
604				adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
605					le16_to_cpu(entry->usVoltage);
606				entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
607					((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
608			}
609		}
610		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
611		    ext_hdr->usPPMTableOffset) {
612			ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
613				(mode_info->atom_context->bios + data_offset +
614				 le16_to_cpu(ext_hdr->usPPMTableOffset));
615			adev->pm.dpm.dyn_state.ppm_table =
616				kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
617			if (!adev->pm.dpm.dyn_state.ppm_table) {
618				amdgpu_free_extended_power_table(adev);
619				return -ENOMEM;
620			}
621			adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
622			adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
623				le16_to_cpu(ppm->usCpuCoreNumber);
624			adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
625				le32_to_cpu(ppm->ulPlatformTDP);
626			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
627				le32_to_cpu(ppm->ulSmallACPlatformTDP);
628			adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
629				le32_to_cpu(ppm->ulPlatformTDC);
630			adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
631				le32_to_cpu(ppm->ulSmallACPlatformTDC);
632			adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
633				le32_to_cpu(ppm->ulApuTDP);
634			adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
635				le32_to_cpu(ppm->ulDGpuTDP);
636			adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
637				le32_to_cpu(ppm->ulDGpuUlvPower);
638			adev->pm.dpm.dyn_state.ppm_table->tj_max =
639				le32_to_cpu(ppm->ulTjmax);
640		}
641		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
642			ext_hdr->usACPTableOffset) {
643			ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
644				(ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
645				(mode_info->atom_context->bios + data_offset +
646				 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
647			ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
648			u32 size = limits->numEntries *
649				sizeof(struct amdgpu_clock_voltage_dependency_entry);
650			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
651				kzalloc(size, GFP_KERNEL);
652			if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
653				amdgpu_free_extended_power_table(adev);
654				return -ENOMEM;
655			}
656			adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
657				limits->numEntries;
658			entry = &limits->entries[0];
659			for (i = 0; i < limits->numEntries; i++) {
660				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
661					le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
662				adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
663					le16_to_cpu(entry->usVoltage);
664				entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
665					((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
666			}
667		}
668		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
669			ext_hdr->usPowerTuneTableOffset) {
670			u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
671					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
672			ATOM_PowerTune_Table *pt;
673			adev->pm.dpm.dyn_state.cac_tdp_table =
674				kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
675			if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
676				amdgpu_free_extended_power_table(adev);
677				return -ENOMEM;
678			}
679			if (rev > 0) {
680				ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
681					(mode_info->atom_context->bios + data_offset +
682					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
683				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
684					ppt->usMaximumPowerDeliveryLimit;
685				pt = &ppt->power_tune_table;
686			} else {
687				ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
688					(mode_info->atom_context->bios + data_offset +
689					 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
690				adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
691				pt = &ppt->power_tune_table;
692			}
693			adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
694			adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
695				le16_to_cpu(pt->usConfigurableTDP);
696			adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
697			adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
698				le16_to_cpu(pt->usBatteryPowerLimit);
699			adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
700				le16_to_cpu(pt->usSmallPowerLimit);
701			adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
702				le16_to_cpu(pt->usLowCACLeakage);
703			adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
704				le16_to_cpu(pt->usHighCACLeakage);
705		}
706		if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
707				ext_hdr->usSclkVddgfxTableOffset) {
708			dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
709				(mode_info->atom_context->bios + data_offset +
710				 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
711			ret = amdgpu_parse_clk_voltage_dep_table(
712					&adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
713					dep_table);
714			if (ret) {
715				kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
716				return ret;
717			}
718		}
719	}
720
721	return 0;
722}
723
724void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
725{
726	struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
727
728	kfree(dyn_state->vddc_dependency_on_sclk.entries);
729	kfree(dyn_state->vddci_dependency_on_mclk.entries);
730	kfree(dyn_state->vddc_dependency_on_mclk.entries);
731	kfree(dyn_state->mvdd_dependency_on_mclk.entries);
732	kfree(dyn_state->cac_leakage_table.entries);
733	kfree(dyn_state->phase_shedding_limits_table.entries);
734	kfree(dyn_state->ppm_table);
735	kfree(dyn_state->cac_tdp_table);
736	kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
737	kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
738	kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
739	kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
740	kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
741}
742
743static const char *pp_lib_thermal_controller_names[] = {
744	"NONE",
745	"lm63",
746	"adm1032",
747	"adm1030",
748	"max6649",
749	"lm64",
750	"f75375",
751	"RV6xx",
752	"RV770",
753	"adt7473",
754	"NONE",
755	"External GPIO",
756	"Evergreen",
757	"emc2103",
758	"Sumo",
759	"Northern Islands",
760	"Southern Islands",
761	"lm96163",
762	"Sea Islands",
763	"Kaveri/Kabini",
764};
765
766void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
767{
768	struct amdgpu_mode_info *mode_info = &adev->mode_info;
769	ATOM_PPLIB_POWERPLAYTABLE *power_table;
770	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
771	ATOM_PPLIB_THERMALCONTROLLER *controller;
772	struct amdgpu_i2c_bus_rec i2c_bus;
773	u16 data_offset;
774	u8 frev, crev;
775
776	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
777				   &frev, &crev, &data_offset))
778		return;
779	power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
780		(mode_info->atom_context->bios + data_offset);
781	controller = &power_table->sThermalController;
782
783	/* add the i2c bus for thermal/fan chip */
784	if (controller->ucType > 0) {
785		if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
786			adev->pm.no_fan = true;
787		adev->pm.fan_pulses_per_revolution =
788			controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
789		if (adev->pm.fan_pulses_per_revolution) {
790			adev->pm.fan_min_rpm = controller->ucFanMinRPM;
791			adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
792		}
793		if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
794			DRM_INFO("Internal thermal controller %s fan control\n",
795				 (controller->ucFanParameters &
796				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
797			adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
798		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
799			DRM_INFO("Internal thermal controller %s fan control\n",
800				 (controller->ucFanParameters &
801				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
802			adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
803		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
804			DRM_INFO("Internal thermal controller %s fan control\n",
805				 (controller->ucFanParameters &
806				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
807			adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
808		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
809			DRM_INFO("Internal thermal controller %s fan control\n",
810				 (controller->ucFanParameters &
811				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
812			adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
813		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
814			DRM_INFO("Internal thermal controller %s fan control\n",
815				 (controller->ucFanParameters &
816				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
817			adev->pm.int_thermal_type = THERMAL_TYPE_NI;
818		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
819			DRM_INFO("Internal thermal controller %s fan control\n",
820				 (controller->ucFanParameters &
821				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
822			adev->pm.int_thermal_type = THERMAL_TYPE_SI;
823		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
824			DRM_INFO("Internal thermal controller %s fan control\n",
825				 (controller->ucFanParameters &
826				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827			adev->pm.int_thermal_type = THERMAL_TYPE_CI;
828		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
829			DRM_INFO("Internal thermal controller %s fan control\n",
830				 (controller->ucFanParameters &
831				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832			adev->pm.int_thermal_type = THERMAL_TYPE_KV;
833		} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
834			DRM_INFO("External GPIO thermal controller %s fan control\n",
835				 (controller->ucFanParameters &
836				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
838		} else if (controller->ucType ==
839			   ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
840			DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
841				 (controller->ucFanParameters &
842				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
843			adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
844		} else if (controller->ucType ==
845			   ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
846			DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
847				 (controller->ucFanParameters &
848				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
849			adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
850		} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
851			DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
852				 pp_lib_thermal_controller_names[controller->ucType],
853				 controller->ucI2cAddress >> 1,
854				 (controller->ucFanParameters &
855				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
856			adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
857			i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
858			adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
859			if (adev->pm.i2c_bus) {
860				struct i2c_board_info info = { };
861				const char *name = pp_lib_thermal_controller_names[controller->ucType];
862				info.addr = controller->ucI2cAddress >> 1;
863				strlcpy(info.type, name, sizeof(info.type));
864				i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
865			}
866		} else {
867			DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
868				 controller->ucType,
869				 controller->ucI2cAddress >> 1,
870				 (controller->ucFanParameters &
871				  ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872		}
873	}
874}
875
876enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
877						 u32 sys_mask,
878						 enum amdgpu_pcie_gen asic_gen,
879						 enum amdgpu_pcie_gen default_gen)
880{
881	switch (asic_gen) {
882	case AMDGPU_PCIE_GEN1:
883		return AMDGPU_PCIE_GEN1;
884	case AMDGPU_PCIE_GEN2:
885		return AMDGPU_PCIE_GEN2;
886	case AMDGPU_PCIE_GEN3:
887		return AMDGPU_PCIE_GEN3;
888	default:
889		if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
890		    (default_gen == AMDGPU_PCIE_GEN3))
891			return AMDGPU_PCIE_GEN3;
892		else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
893			 (default_gen == AMDGPU_PCIE_GEN2))
894			return AMDGPU_PCIE_GEN2;
895		else
896			return AMDGPU_PCIE_GEN1;
897	}
898	return AMDGPU_PCIE_GEN1;
899}
900
901struct amd_vce_state*
902amdgpu_get_vce_clock_state(void *handle, u32 idx)
903{
904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
905
906	if (idx < adev->pm.dpm.num_of_vce_states)
907		return &adev->pm.dpm.vce_states[idx];
908
909	return NULL;
910}
911
912int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
913{
914	uint32_t clk_freq;
915	int ret = 0;
916	if (is_support_sw_smu(adev)) {
917		ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK,
918					     low ? &clk_freq : NULL,
919					     !low ? &clk_freq : NULL,
920					     true);
921		if (ret)
922			return 0;
923		return clk_freq * 100;
924
925	} else {
926		return (adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
927	}
928}
929
930int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
931{
932	uint32_t clk_freq;
933	int ret = 0;
934	if (is_support_sw_smu(adev)) {
935		ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK,
936					     low ? &clk_freq : NULL,
937					     !low ? &clk_freq : NULL,
938					     true);
939		if (ret)
940			return 0;
941		return clk_freq * 100;
942
943	} else {
944		return (adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
945	}
946}
947
948int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
949{
950	int ret = 0;
951	bool swsmu = is_support_sw_smu(adev);
952
953	switch (block_type) {
954	case AMD_IP_BLOCK_TYPE_UVD:
955	case AMD_IP_BLOCK_TYPE_VCE:
956		if (swsmu) {
957			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
958		} else if (adev->powerplay.pp_funcs &&
959			   adev->powerplay.pp_funcs->set_powergating_by_smu) {
960			/*
961			 * TODO: need a better lock mechanism
962			 *
963			 * Here adev->pm.mutex lock protection is enforced on
964			 * UVD and VCE cases only. Since for other cases, there
965			 * may be already lock protection in amdgpu_pm.c.
966			 * This is a quick fix for the deadlock issue below.
967			 *     NFO: task ocltst:2028 blocked for more than 120 seconds.
968			 *     Tainted: G           OE     5.0.0-37-generic #40~18.04.1-Ubuntu
969			 *     echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
970			 *     cltst          D    0  2028   2026 0x00000000
971			 *     all Trace:
972			 *     __schedule+0x2c0/0x870
973			 *     schedule+0x2c/0x70
974			 *     schedule_preempt_disabled+0xe/0x10
975			 *     __mutex_lock.isra.9+0x26d/0x4e0
976			 *     __mutex_lock_slowpath+0x13/0x20
977			 *     ? __mutex_lock_slowpath+0x13/0x20
978			 *     mutex_lock+0x2f/0x40
979			 *     amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
980			 *     gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
981			 *     gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
982			 *     amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
983			 *     pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
984			 *     amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
985			 */
986			mutex_lock(&adev->pm.mutex);
987			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
988				(adev)->powerplay.pp_handle, block_type, gate));
989			mutex_unlock(&adev->pm.mutex);
990		}
991		break;
992	case AMD_IP_BLOCK_TYPE_GFX:
993	case AMD_IP_BLOCK_TYPE_VCN:
994	case AMD_IP_BLOCK_TYPE_SDMA:
995		if (swsmu)
996			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
997		else if (adev->powerplay.pp_funcs &&
998			 adev->powerplay.pp_funcs->set_powergating_by_smu)
999			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1000				(adev)->powerplay.pp_handle, block_type, gate));
1001		break;
1002	case AMD_IP_BLOCK_TYPE_JPEG:
1003		if (swsmu)
1004			ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate);
1005		break;
1006	case AMD_IP_BLOCK_TYPE_GMC:
1007	case AMD_IP_BLOCK_TYPE_ACP:
1008		if (adev->powerplay.pp_funcs &&
1009		    adev->powerplay.pp_funcs->set_powergating_by_smu)
1010			ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu(
1011				(adev)->powerplay.pp_handle, block_type, gate));
1012		break;
1013	default:
1014		break;
1015	}
1016
1017	return ret;
1018}
1019
1020int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
1021{
1022	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1023	void *pp_handle = adev->powerplay.pp_handle;
1024	struct smu_context *smu = &adev->smu;
1025	int ret = 0;
1026
1027	if (is_support_sw_smu(adev)) {
1028		ret = smu_baco_enter(smu);
1029	} else {
1030		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1031			return -ENOENT;
1032
1033		/* enter BACO state */
1034		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1035	}
1036
1037	return ret;
1038}
1039
1040int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
1041{
1042	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1043	void *pp_handle = adev->powerplay.pp_handle;
1044	struct smu_context *smu = &adev->smu;
1045	int ret = 0;
1046
1047	if (is_support_sw_smu(adev)) {
1048		ret = smu_baco_exit(smu);
1049	} else {
1050		if (!pp_funcs || !pp_funcs->set_asic_baco_state)
1051			return -ENOENT;
1052
1053		/* exit BACO state */
1054		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1055	}
1056
1057	return ret;
1058}
1059
1060int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
1061			     enum pp_mp1_state mp1_state)
1062{
1063	int ret = 0;
1064
1065	if (is_support_sw_smu(adev)) {
1066		ret = smu_set_mp1_state(&adev->smu, mp1_state);
1067	} else if (adev->powerplay.pp_funcs &&
1068		   adev->powerplay.pp_funcs->set_mp1_state) {
1069		ret = adev->powerplay.pp_funcs->set_mp1_state(
1070				adev->powerplay.pp_handle,
1071				mp1_state);
1072	}
1073
1074	return ret;
1075}
1076
1077bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
1078{
1079	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1080	void *pp_handle = adev->powerplay.pp_handle;
1081	struct smu_context *smu = &adev->smu;
1082	bool baco_cap;
1083
1084	if (is_support_sw_smu(adev)) {
1085		return smu_baco_is_support(smu);
1086	} else {
1087		if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
1088			return false;
1089
1090		if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
1091			return false;
1092
1093		return baco_cap ? true : false;
1094	}
1095}
1096
1097int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
1098{
1099	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1100	void *pp_handle = adev->powerplay.pp_handle;
1101	struct smu_context *smu = &adev->smu;
1102
1103	if (is_support_sw_smu(adev)) {
1104		return smu_mode2_reset(smu);
1105	} else {
1106		if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
1107			return -ENOENT;
1108
1109		return pp_funcs->asic_reset_mode_2(pp_handle);
1110	}
1111}
1112
1113int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
1114{
1115	const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
1116	void *pp_handle = adev->powerplay.pp_handle;
1117	struct smu_context *smu = &adev->smu;
1118	int ret = 0;
1119
1120	dev_info(adev->dev, "GPU BACO reset\n");
1121
1122	if (is_support_sw_smu(adev)) {
1123		ret = smu_baco_enter(smu);
1124		if (ret)
1125			return ret;
1126
1127		ret = smu_baco_exit(smu);
1128		if (ret)
1129			return ret;
1130	} else {
1131		if (!pp_funcs
1132		    || !pp_funcs->set_asic_baco_state)
1133			return -ENOENT;
1134
1135		/* enter BACO state */
1136		ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
1137		if (ret)
1138			return ret;
1139
1140		/* exit BACO state */
1141		ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
1142		if (ret)
1143			return ret;
1144	}
1145
1146	return 0;
1147}
1148
1149int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
1150				    enum PP_SMC_POWER_PROFILE type,
1151				    bool en)
1152{
1153	int ret = 0;
1154
1155	if (is_support_sw_smu(adev))
1156		ret = smu_switch_power_profile(&adev->smu, type, en);
1157	else if (adev->powerplay.pp_funcs &&
1158		 adev->powerplay.pp_funcs->switch_power_profile)
1159		ret = adev->powerplay.pp_funcs->switch_power_profile(
1160			adev->powerplay.pp_handle, type, en);
1161
1162	return ret;
1163}
1164
1165int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
1166			       uint32_t pstate)
1167{
1168	int ret = 0;
1169
1170	if (is_support_sw_smu_xgmi(adev))
1171		ret = smu_set_xgmi_pstate(&adev->smu, pstate);
1172	else if (adev->powerplay.pp_funcs &&
1173		 adev->powerplay.pp_funcs->set_xgmi_pstate)
1174		ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
1175								pstate);
1176
1177	return ret;
1178}
1179