1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/firmware.h>
24#include <linux/module.h>
25#include <linux/pci.h>
26#include <linux/reboot.h>
27
28#define SMU_11_0_PARTIAL_PPTABLE
29#define SWSMU_CODE_LAYER_L3
30
31#include "amdgpu.h"
32#include "amdgpu_smu.h"
33#include "atomfirmware.h"
34#include "amdgpu_atomfirmware.h"
35#include "amdgpu_atombios.h"
36#include "smu_v11_0.h"
37#include "soc15_common.h"
38#include "atom.h"
39#include "amdgpu_ras.h"
40#include "smu_cmn.h"
41
42#include "asic_reg/thm/thm_11_0_2_offset.h"
43#include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44#include "asic_reg/mp/mp_11_0_offset.h"
45#include "asic_reg/mp/mp_11_0_sh_mask.h"
46#include "asic_reg/smuio/smuio_11_0_0_offset.h"
47#include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
48
49/*
50 * DO NOT use these for err/warn/info/debug messages.
51 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
52 * They are more MGPU friendly.
53 */
54#undef pr_err
55#undef pr_warn
56#undef pr_info
57#undef pr_debug
58
59MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
60MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
61MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
62MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
63MODULE_FIRMWARE("amdgpu/sienna_cichlid_smc.bin");
64MODULE_FIRMWARE("amdgpu/navy_flounder_smc.bin");
65MODULE_FIRMWARE("amdgpu/dimgrey_cavefish_smc.bin");
66MODULE_FIRMWARE("amdgpu/beige_goby_smc.bin");
67
68#define SMU11_VOLTAGE_SCALE 4
69
70#define SMU11_MODE1_RESET_WAIT_TIME_IN_MS 500  //500ms
71
72#define smnPCIE_LC_LINK_WIDTH_CNTL		0x11140288
73#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK 0x00000070L
74#define PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT 0x4
75#define smnPCIE_LC_SPEED_CNTL			0x11140290
76#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK 0xC000
77#define PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT 0xE
78
79#define mmTHM_BACO_CNTL_ARCT			0xA7
80#define mmTHM_BACO_CNTL_ARCT_BASE_IDX		0
81
82static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
83{
84	struct amdgpu_device *adev = smu->adev;
85	uint32_t data, loop = 0;
86
87	do {
88		usleep_range(1000, 1100);
89		data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
90	} while ((data & 0x100) && (++loop < 100));
91}
92
93int smu_v11_0_init_microcode(struct smu_context *smu)
94{
95	struct amdgpu_device *adev = smu->adev;
96	char ucode_prefix[30];
97	char fw_name[SMU_FW_NAME_LEN];
98	int err = 0;
99	const struct smc_firmware_header_v1_0 *hdr;
100	const struct common_firmware_header *header;
101	struct amdgpu_firmware_info *ucode = NULL;
102
103	if (amdgpu_sriov_vf(adev) &&
104	    ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 9)) ||
105	     (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 7))))
106		return 0;
107
108	amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix));
109
110	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix);
111
112	err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name);
113	if (err)
114		goto out;
115
116	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
117	amdgpu_ucode_print_smc_hdr(&hdr->header);
118	adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
119
120	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
121		ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
122		ucode->ucode_id = AMDGPU_UCODE_ID_SMC;
123		ucode->fw = adev->pm.fw;
124		header = (const struct common_firmware_header *)ucode->fw->data;
125		adev->firmware.fw_size +=
126			ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE);
127	}
128
129out:
130	if (err)
131		amdgpu_ucode_release(&adev->pm.fw);
132	return err;
133}
134
135void smu_v11_0_fini_microcode(struct smu_context *smu)
136{
137	struct amdgpu_device *adev = smu->adev;
138
139	amdgpu_ucode_release(&adev->pm.fw);
140	adev->pm.fw_version = 0;
141}
142
143int smu_v11_0_load_microcode(struct smu_context *smu)
144{
145	struct amdgpu_device *adev = smu->adev;
146	const uint32_t *src;
147	const struct smc_firmware_header_v1_0 *hdr;
148	uint32_t addr_start = MP1_SRAM;
149	uint32_t i;
150	uint32_t smc_fw_size;
151	uint32_t mp1_fw_flags;
152
153	hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
154	src = (const uint32_t *)(adev->pm.fw->data +
155		le32_to_cpu(hdr->header.ucode_array_offset_bytes));
156	smc_fw_size = hdr->header.ucode_size_bytes;
157
158	for (i = 1; i < smc_fw_size/4 - 1; i++) {
159		WREG32_PCIE(addr_start, src[i]);
160		addr_start += 4;
161	}
162
163	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
164		1 & MP1_SMN_PUB_CTRL__RESET_MASK);
165	WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff),
166		1 & ~MP1_SMN_PUB_CTRL__RESET_MASK);
167
168	for (i = 0; i < adev->usec_timeout; i++) {
169		mp1_fw_flags = RREG32_PCIE(MP1_Public |
170			(smnMP1_FIRMWARE_FLAGS & 0xffffffff));
171		if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
172			MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
173			break;
174		udelay(1);
175	}
176
177	if (i == adev->usec_timeout)
178		return -ETIME;
179
180	return 0;
181}
182
183int smu_v11_0_check_fw_status(struct smu_context *smu)
184{
185	struct amdgpu_device *adev = smu->adev;
186	uint32_t mp1_fw_flags;
187
188	mp1_fw_flags = RREG32_PCIE(MP1_Public |
189				   (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
190
191	if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
192	    MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
193		return 0;
194
195	return -EIO;
196}
197
198int smu_v11_0_check_fw_version(struct smu_context *smu)
199{
200	struct amdgpu_device *adev = smu->adev;
201	uint32_t if_version = 0xff, smu_version = 0xff;
202	uint8_t smu_program, smu_major, smu_minor, smu_debug;
203	int ret = 0;
204
205	ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
206	if (ret)
207		return ret;
208
209	smu_program = (smu_version >> 24) & 0xff;
210	smu_major = (smu_version >> 16) & 0xff;
211	smu_minor = (smu_version >> 8) & 0xff;
212	smu_debug = (smu_version >> 0) & 0xff;
213	if (smu->is_apu)
214		adev->pm.fw_version = smu_version;
215
216	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
217	case IP_VERSION(11, 0, 0):
218		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
219		break;
220	case IP_VERSION(11, 0, 9):
221		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
222		break;
223	case IP_VERSION(11, 0, 5):
224		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
225		break;
226	case IP_VERSION(11, 0, 7):
227		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
228		break;
229	case IP_VERSION(11, 0, 11):
230		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
231		break;
232	case IP_VERSION(11, 5, 0):
233		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
234		break;
235	case IP_VERSION(11, 0, 12):
236		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
237		break;
238	case IP_VERSION(11, 0, 13):
239		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
240		break;
241	case IP_VERSION(11, 0, 8):
242		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
243		break;
244	case IP_VERSION(11, 0, 2):
245		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
246		break;
247	default:
248		dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
249			amdgpu_ip_version(adev, MP1_HWIP, 0));
250		smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
251		break;
252	}
253
254	/*
255	 * 1. if_version mismatch is not critical as our fw is designed
256	 * to be backward compatible.
257	 * 2. New fw usually brings some optimizations. But that's visible
258	 * only on the paired driver.
259	 * Considering above, we just leave user a verbal message instead
260	 * of halt driver loading.
261	 */
262	if (if_version != smu->smc_driver_if_version) {
263		dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
264			"smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
265			smu->smc_driver_if_version, if_version,
266			smu_program, smu_version, smu_major, smu_minor, smu_debug);
267		dev_info(smu->adev->dev, "SMU driver if version not matched\n");
268	}
269
270	return ret;
271}
272
273static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
274{
275	struct amdgpu_device *adev = smu->adev;
276	uint32_t ppt_offset_bytes;
277	const struct smc_firmware_header_v2_0 *v2;
278
279	v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data;
280
281	ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes);
282	*size = le32_to_cpu(v2->ppt_size_bytes);
283	*table = (uint8_t *)v2 + ppt_offset_bytes;
284
285	return 0;
286}
287
288static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
289				      uint32_t *size, uint32_t pptable_id)
290{
291	struct amdgpu_device *adev = smu->adev;
292	const struct smc_firmware_header_v2_1 *v2_1;
293	struct smc_soft_pptable_entry *entries;
294	uint32_t pptable_count = 0;
295	int i = 0;
296
297	v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data;
298	entries = (struct smc_soft_pptable_entry *)
299		((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset));
300	pptable_count = le32_to_cpu(v2_1->pptable_count);
301	for (i = 0; i < pptable_count; i++) {
302		if (le32_to_cpu(entries[i].id) == pptable_id) {
303			*table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes));
304			*size = le32_to_cpu(entries[i].ppt_size_bytes);
305			break;
306		}
307	}
308
309	if (i == pptable_count)
310		return -EINVAL;
311
312	return 0;
313}
314
315int smu_v11_0_setup_pptable(struct smu_context *smu)
316{
317	struct amdgpu_device *adev = smu->adev;
318	const struct smc_firmware_header_v1_0 *hdr;
319	int ret, index;
320	uint32_t size = 0;
321	uint16_t atom_table_size;
322	uint8_t frev, crev;
323	void *table;
324	uint16_t version_major, version_minor;
325
326	if (!amdgpu_sriov_vf(adev)) {
327		hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
328		version_major = le16_to_cpu(hdr->header.header_version_major);
329		version_minor = le16_to_cpu(hdr->header.header_version_minor);
330		if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
331			dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
332			switch (version_minor) {
333			case 0:
334				ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
335				break;
336			case 1:
337				ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
338								smu->smu_table.boot_values.pp_table_id);
339				break;
340			default:
341				ret = -EINVAL;
342				break;
343			}
344			if (ret)
345				return ret;
346			goto out;
347		}
348	}
349
350	dev_info(adev->dev, "use vbios provided pptable\n");
351	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
352						powerplayinfo);
353
354	ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev,
355						(uint8_t **)&table);
356	if (ret)
357		return ret;
358	size = atom_table_size;
359
360out:
361	if (!smu->smu_table.power_play_table)
362		smu->smu_table.power_play_table = table;
363	if (!smu->smu_table.power_play_table_size)
364		smu->smu_table.power_play_table_size = size;
365
366	return 0;
367}
368
369int smu_v11_0_init_smc_tables(struct smu_context *smu)
370{
371	struct smu_table_context *smu_table = &smu->smu_table;
372	struct smu_table *tables = smu_table->tables;
373	int ret = 0;
374
375	smu_table->driver_pptable =
376		kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL);
377	if (!smu_table->driver_pptable) {
378		ret = -ENOMEM;
379		goto err0_out;
380	}
381
382	smu_table->max_sustainable_clocks =
383		kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks), GFP_KERNEL);
384	if (!smu_table->max_sustainable_clocks) {
385		ret = -ENOMEM;
386		goto err1_out;
387	}
388
389	/* Arcturus does not support OVERDRIVE */
390	if (tables[SMU_TABLE_OVERDRIVE].size) {
391		smu_table->overdrive_table =
392			kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
393		if (!smu_table->overdrive_table) {
394			ret = -ENOMEM;
395			goto err2_out;
396		}
397
398		smu_table->boot_overdrive_table =
399			kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
400		if (!smu_table->boot_overdrive_table) {
401			ret = -ENOMEM;
402			goto err3_out;
403		}
404
405		smu_table->user_overdrive_table =
406			kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL);
407		if (!smu_table->user_overdrive_table) {
408			ret = -ENOMEM;
409			goto err4_out;
410		}
411
412	}
413
414	return 0;
415
416err4_out:
417	kfree(smu_table->boot_overdrive_table);
418err3_out:
419	kfree(smu_table->overdrive_table);
420err2_out:
421	kfree(smu_table->max_sustainable_clocks);
422err1_out:
423	kfree(smu_table->driver_pptable);
424err0_out:
425	return ret;
426}
427
428int smu_v11_0_fini_smc_tables(struct smu_context *smu)
429{
430	struct smu_table_context *smu_table = &smu->smu_table;
431	struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
432
433	kfree(smu_table->gpu_metrics_table);
434	kfree(smu_table->user_overdrive_table);
435	kfree(smu_table->boot_overdrive_table);
436	kfree(smu_table->overdrive_table);
437	kfree(smu_table->max_sustainable_clocks);
438	kfree(smu_table->driver_pptable);
439	kfree(smu_table->clocks_table);
440	smu_table->gpu_metrics_table = NULL;
441	smu_table->user_overdrive_table = NULL;
442	smu_table->boot_overdrive_table = NULL;
443	smu_table->overdrive_table = NULL;
444	smu_table->max_sustainable_clocks = NULL;
445	smu_table->driver_pptable = NULL;
446	smu_table->clocks_table = NULL;
447	kfree(smu_table->hardcode_pptable);
448	smu_table->hardcode_pptable = NULL;
449
450	kfree(smu_table->driver_smu_config_table);
451	kfree(smu_table->ecc_table);
452	kfree(smu_table->metrics_table);
453	kfree(smu_table->watermarks_table);
454	smu_table->driver_smu_config_table = NULL;
455	smu_table->ecc_table = NULL;
456	smu_table->metrics_table = NULL;
457	smu_table->watermarks_table = NULL;
458	smu_table->metrics_time = 0;
459
460	kfree(smu_dpm->dpm_context);
461	kfree(smu_dpm->golden_dpm_context);
462	kfree(smu_dpm->dpm_current_power_state);
463	kfree(smu_dpm->dpm_request_power_state);
464	smu_dpm->dpm_context = NULL;
465	smu_dpm->golden_dpm_context = NULL;
466	smu_dpm->dpm_context_size = 0;
467	smu_dpm->dpm_current_power_state = NULL;
468	smu_dpm->dpm_request_power_state = NULL;
469
470	return 0;
471}
472
473int smu_v11_0_init_power(struct smu_context *smu)
474{
475	struct amdgpu_device *adev = smu->adev;
476	struct smu_power_context *smu_power = &smu->smu_power;
477	size_t size = amdgpu_ip_version(adev, MP1_HWIP, 0) ==
478				      IP_VERSION(11, 5, 0) ?
479			      sizeof(struct smu_11_5_power_context) :
480			      sizeof(struct smu_11_0_power_context);
481
482	smu_power->power_context = kzalloc(size, GFP_KERNEL);
483	if (!smu_power->power_context)
484		return -ENOMEM;
485	smu_power->power_context_size = size;
486
487	return 0;
488}
489
490int smu_v11_0_fini_power(struct smu_context *smu)
491{
492	struct smu_power_context *smu_power = &smu->smu_power;
493
494	kfree(smu_power->power_context);
495	smu_power->power_context = NULL;
496	smu_power->power_context_size = 0;
497
498	return 0;
499}
500
501static int smu_v11_0_atom_get_smu_clockinfo(struct amdgpu_device *adev,
502					    uint8_t clk_id,
503					    uint8_t syspll_id,
504					    uint32_t *clk_freq)
505{
506	struct atom_get_smu_clock_info_parameters_v3_1 input = {0};
507	struct atom_get_smu_clock_info_output_parameters_v3_1 *output;
508	int ret, index;
509
510	input.clk_id = clk_id;
511	input.syspll_id = syspll_id;
512	input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ;
513	index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1,
514					    getsmuclockinfo);
515
516	ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index,
517					(uint32_t *)&input, sizeof(input));
518	if (ret)
519		return -EINVAL;
520
521	output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input;
522	*clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
523
524	return 0;
525}
526
527int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
528{
529	int ret, index;
530	uint16_t size;
531	uint8_t frev, crev;
532	struct atom_common_table_header *header;
533	struct atom_firmware_info_v3_3 *v_3_3;
534	struct atom_firmware_info_v3_1 *v_3_1;
535
536	index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1,
537					    firmwareinfo);
538
539	ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
540				      (uint8_t **)&header);
541	if (ret)
542		return ret;
543
544	if (header->format_revision != 3) {
545		dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
546		return -EINVAL;
547	}
548
549	switch (header->content_revision) {
550	case 0:
551	case 1:
552	case 2:
553		v_3_1 = (struct atom_firmware_info_v3_1 *)header;
554		smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
555		smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
556		smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
557		smu->smu_table.boot_values.socclk = 0;
558		smu->smu_table.boot_values.dcefclk = 0;
559		smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
560		smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
561		smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
562		smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
563		smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
564		smu->smu_table.boot_values.pp_table_id = 0;
565		smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
566		break;
567	case 3:
568	case 4:
569	default:
570		v_3_3 = (struct atom_firmware_info_v3_3 *)header;
571		smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
572		smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
573		smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
574		smu->smu_table.boot_values.socclk = 0;
575		smu->smu_table.boot_values.dcefclk = 0;
576		smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
577		smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
578		smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
579		smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
580		smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
581		smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
582		smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
583	}
584
585	smu->smu_table.boot_values.format_revision = header->format_revision;
586	smu->smu_table.boot_values.content_revision = header->content_revision;
587
588	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
589					 (uint8_t)SMU11_SYSPLL0_SOCCLK_ID,
590					 (uint8_t)0,
591					 &smu->smu_table.boot_values.socclk);
592
593	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
594					 (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID,
595					 (uint8_t)0,
596					 &smu->smu_table.boot_values.dcefclk);
597
598	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
599					 (uint8_t)SMU11_SYSPLL0_ECLK_ID,
600					 (uint8_t)0,
601					 &smu->smu_table.boot_values.eclk);
602
603	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
604					 (uint8_t)SMU11_SYSPLL0_VCLK_ID,
605					 (uint8_t)0,
606					 &smu->smu_table.boot_values.vclk);
607
608	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
609					 (uint8_t)SMU11_SYSPLL0_DCLK_ID,
610					 (uint8_t)0,
611					 &smu->smu_table.boot_values.dclk);
612
613	if ((smu->smu_table.boot_values.format_revision == 3) &&
614	    (smu->smu_table.boot_values.content_revision >= 2))
615		smu_v11_0_atom_get_smu_clockinfo(smu->adev,
616						 (uint8_t)SMU11_SYSPLL1_0_FCLK_ID,
617						 (uint8_t)SMU11_SYSPLL1_2_ID,
618						 &smu->smu_table.boot_values.fclk);
619
620	smu_v11_0_atom_get_smu_clockinfo(smu->adev,
621					 (uint8_t)SMU11_SYSPLL3_1_LCLK_ID,
622					 (uint8_t)SMU11_SYSPLL3_1_ID,
623					 &smu->smu_table.boot_values.lclk);
624
625	return 0;
626}
627
628int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
629{
630	struct smu_table_context *smu_table = &smu->smu_table;
631	struct smu_table *memory_pool = &smu_table->memory_pool;
632	int ret = 0;
633	uint64_t address;
634	uint32_t address_low, address_high;
635
636	if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL)
637		return ret;
638
639	address = (uintptr_t)memory_pool->cpu_addr;
640	address_high = (uint32_t)upper_32_bits(address);
641	address_low  = (uint32_t)lower_32_bits(address);
642
643	ret = smu_cmn_send_smc_msg_with_param(smu,
644					  SMU_MSG_SetSystemVirtualDramAddrHigh,
645					  address_high,
646					  NULL);
647	if (ret)
648		return ret;
649	ret = smu_cmn_send_smc_msg_with_param(smu,
650					  SMU_MSG_SetSystemVirtualDramAddrLow,
651					  address_low,
652					  NULL);
653	if (ret)
654		return ret;
655
656	address = memory_pool->mc_address;
657	address_high = (uint32_t)upper_32_bits(address);
658	address_low  = (uint32_t)lower_32_bits(address);
659
660	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
661					  address_high, NULL);
662	if (ret)
663		return ret;
664	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
665					  address_low, NULL);
666	if (ret)
667		return ret;
668	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
669					  (uint32_t)memory_pool->size, NULL);
670	if (ret)
671		return ret;
672
673	return ret;
674}
675
676int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
677{
678	int ret;
679
680	ret = smu_cmn_send_smc_msg_with_param(smu,
681					  SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
682	if (ret)
683		dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
684
685	return ret;
686}
687
688int smu_v11_0_set_driver_table_location(struct smu_context *smu)
689{
690	struct smu_table *driver_table = &smu->smu_table.driver_table;
691	int ret = 0;
692
693	if (driver_table->mc_address) {
694		ret = smu_cmn_send_smc_msg_with_param(smu,
695				SMU_MSG_SetDriverDramAddrHigh,
696				upper_32_bits(driver_table->mc_address),
697				NULL);
698		if (!ret)
699			ret = smu_cmn_send_smc_msg_with_param(smu,
700				SMU_MSG_SetDriverDramAddrLow,
701				lower_32_bits(driver_table->mc_address),
702				NULL);
703	}
704
705	return ret;
706}
707
708int smu_v11_0_set_tool_table_location(struct smu_context *smu)
709{
710	int ret = 0;
711	struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
712
713	if (tool_table->mc_address) {
714		ret = smu_cmn_send_smc_msg_with_param(smu,
715				SMU_MSG_SetToolsDramAddrHigh,
716				upper_32_bits(tool_table->mc_address),
717				NULL);
718		if (!ret)
719			ret = smu_cmn_send_smc_msg_with_param(smu,
720				SMU_MSG_SetToolsDramAddrLow,
721				lower_32_bits(tool_table->mc_address),
722				NULL);
723	}
724
725	return ret;
726}
727
728int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
729{
730	struct amdgpu_device *adev = smu->adev;
731
732	/* Navy_Flounder/Dimgrey_Cavefish do not support to change
733	 * display num currently
734	 */
735	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 11) ||
736	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 5, 0) ||
737	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 12) ||
738	    amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 13))
739		return 0;
740
741	return smu_cmn_send_smc_msg_with_param(smu,
742					       SMU_MSG_NumOfDisplays,
743					       count,
744					       NULL);
745}
746
747
748int smu_v11_0_set_allowed_mask(struct smu_context *smu)
749{
750	struct smu_feature *feature = &smu->smu_feature;
751	int ret = 0;
752	uint32_t feature_mask[2];
753
754	if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) {
755		ret = -EINVAL;
756		goto failed;
757	}
758
759	bitmap_to_arr32(feature_mask, feature->allowed, 64);
760
761	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
762					  feature_mask[1], NULL);
763	if (ret)
764		goto failed;
765
766	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
767					  feature_mask[0], NULL);
768	if (ret)
769		goto failed;
770
771failed:
772	return ret;
773}
774
775int smu_v11_0_system_features_control(struct smu_context *smu,
776					     bool en)
777{
778	return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
779					  SMU_MSG_DisableAllSmuFeatures), NULL);
780}
781
782int smu_v11_0_notify_display_change(struct smu_context *smu)
783{
784	int ret = 0;
785
786	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
787	    smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
788		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
789
790	return ret;
791}
792
793static int
794smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
795				    enum smu_clk_type clock_select)
796{
797	int ret = 0;
798	int clk_id;
799
800	if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
801	    (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
802		return 0;
803
804	clk_id = smu_cmn_to_asic_specific_index(smu,
805						CMN2ASIC_MAPPING_CLK,
806						clock_select);
807	if (clk_id < 0)
808		return -EINVAL;
809
810	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
811					  clk_id << 16, clock);
812	if (ret) {
813		dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
814		return ret;
815	}
816
817	if (*clock != 0)
818		return 0;
819
820	/* if DC limit is zero, return AC limit */
821	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
822					  clk_id << 16, clock);
823	if (ret) {
824		dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
825		return ret;
826	}
827
828	return 0;
829}
830
831int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
832{
833	struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks =
834			smu->smu_table.max_sustainable_clocks;
835	int ret = 0;
836
837	max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
838	max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
839	max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
840	max_sustainable_clocks->display_clock = 0xFFFFFFFF;
841	max_sustainable_clocks->phy_clock = 0xFFFFFFFF;
842	max_sustainable_clocks->pixel_clock = 0xFFFFFFFF;
843
844	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
845		ret = smu_v11_0_get_max_sustainable_clock(smu,
846							  &(max_sustainable_clocks->uclock),
847							  SMU_UCLK);
848		if (ret) {
849			dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
850			       __func__);
851			return ret;
852		}
853	}
854
855	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
856		ret = smu_v11_0_get_max_sustainable_clock(smu,
857							  &(max_sustainable_clocks->soc_clock),
858							  SMU_SOCCLK);
859		if (ret) {
860			dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
861			       __func__);
862			return ret;
863		}
864	}
865
866	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
867		ret = smu_v11_0_get_max_sustainable_clock(smu,
868							  &(max_sustainable_clocks->dcef_clock),
869							  SMU_DCEFCLK);
870		if (ret) {
871			dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
872			       __func__);
873			return ret;
874		}
875
876		ret = smu_v11_0_get_max_sustainable_clock(smu,
877							  &(max_sustainable_clocks->display_clock),
878							  SMU_DISPCLK);
879		if (ret) {
880			dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
881			       __func__);
882			return ret;
883		}
884		ret = smu_v11_0_get_max_sustainable_clock(smu,
885							  &(max_sustainable_clocks->phy_clock),
886							  SMU_PHYCLK);
887		if (ret) {
888			dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
889			       __func__);
890			return ret;
891		}
892		ret = smu_v11_0_get_max_sustainable_clock(smu,
893							  &(max_sustainable_clocks->pixel_clock),
894							  SMU_PIXCLK);
895		if (ret) {
896			dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
897			       __func__);
898			return ret;
899		}
900	}
901
902	if (max_sustainable_clocks->soc_clock < max_sustainable_clocks->uclock)
903		max_sustainable_clocks->uclock = max_sustainable_clocks->soc_clock;
904
905	return 0;
906}
907
908int smu_v11_0_get_current_power_limit(struct smu_context *smu,
909				      uint32_t *power_limit)
910{
911	int power_src;
912	int ret = 0;
913
914	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
915		return -EINVAL;
916
917	power_src = smu_cmn_to_asic_specific_index(smu,
918					CMN2ASIC_MAPPING_PWR,
919					smu->adev->pm.ac_power ?
920					SMU_POWER_SOURCE_AC :
921					SMU_POWER_SOURCE_DC);
922	if (power_src < 0)
923		return -EINVAL;
924
925	/*
926	 * BIT 24-31: ControllerId (only PPT0 is supported for now)
927	 * BIT 16-23: PowerSource
928	 */
929	ret = smu_cmn_send_smc_msg_with_param(smu,
930					  SMU_MSG_GetPptLimit,
931					  (0 << 24) | (power_src << 16),
932					  power_limit);
933	if (ret)
934		dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
935
936	return ret;
937}
938
939int smu_v11_0_set_power_limit(struct smu_context *smu,
940			      enum smu_ppt_limit_type limit_type,
941			      uint32_t limit)
942{
943	int power_src;
944	int ret = 0;
945	uint32_t limit_param;
946
947	if (limit_type != SMU_DEFAULT_PPT_LIMIT)
948		return -EINVAL;
949
950	if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
951		dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
952		return -EOPNOTSUPP;
953	}
954
955	power_src = smu_cmn_to_asic_specific_index(smu,
956					CMN2ASIC_MAPPING_PWR,
957					smu->adev->pm.ac_power ?
958					SMU_POWER_SOURCE_AC :
959					SMU_POWER_SOURCE_DC);
960	if (power_src < 0)
961		return -EINVAL;
962
963	/*
964	 * BIT 24-31: ControllerId (only PPT0 is supported for now)
965	 * BIT 16-23: PowerSource
966	 * BIT 0-15: PowerLimit
967	 */
968	limit_param  = (limit & 0xFFFF);
969	limit_param |= 0 << 24;
970	limit_param |= (power_src) << 16;
971	ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL);
972	if (ret) {
973		dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
974		return ret;
975	}
976
977	smu->current_power_limit = limit;
978
979	return 0;
980}
981
982static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
983{
984	return smu_cmn_send_smc_msg(smu,
985				SMU_MSG_ReenableAcDcInterrupt,
986				NULL);
987}
988
989static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
990{
991	int ret = 0;
992
993	if (smu->dc_controlled_by_gpio &&
994	    smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
995		ret = smu_v11_0_ack_ac_dc_interrupt(smu);
996
997	return ret;
998}
999
1000void smu_v11_0_interrupt_work(struct smu_context *smu)
1001{
1002	if (smu_v11_0_ack_ac_dc_interrupt(smu))
1003		dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
1004}
1005
1006int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1007{
1008	int ret = 0;
1009
1010	if (smu->smu_table.thermal_controller_type) {
1011		ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1012		if (ret)
1013			return ret;
1014	}
1015
1016	/*
1017	 * After init there might have been missed interrupts triggered
1018	 * before driver registers for interrupt (Ex. AC/DC).
1019	 */
1020	return smu_v11_0_process_pending_interrupt(smu);
1021}
1022
1023int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
1024{
1025	return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1026}
1027
1028static uint16_t convert_to_vddc(uint8_t vid)
1029{
1030	return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE);
1031}
1032
1033int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1034{
1035	struct amdgpu_device *adev = smu->adev;
1036	uint32_t vdd = 0, val_vid = 0;
1037
1038	if (!value)
1039		return -EINVAL;
1040	val_vid = (RREG32_SOC15(SMUIO, 0, mmSMUSVI0_TEL_PLANE0) &
1041		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK) >>
1042		SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT;
1043
1044	vdd = (uint32_t)convert_to_vddc((uint8_t)val_vid);
1045
1046	*value = vdd;
1047
1048	return 0;
1049
1050}
1051
1052int
1053smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1054					struct pp_display_clock_request
1055					*clock_req)
1056{
1057	enum amd_pp_clock_type clk_type = clock_req->clock_type;
1058	int ret = 0;
1059	enum smu_clk_type clk_select = 0;
1060	uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
1061
1062	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1063		smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1064		switch (clk_type) {
1065		case amd_pp_dcef_clock:
1066			clk_select = SMU_DCEFCLK;
1067			break;
1068		case amd_pp_disp_clock:
1069			clk_select = SMU_DISPCLK;
1070			break;
1071		case amd_pp_pixel_clock:
1072			clk_select = SMU_PIXCLK;
1073			break;
1074		case amd_pp_phy_clock:
1075			clk_select = SMU_PHYCLK;
1076			break;
1077		case amd_pp_mem_clock:
1078			clk_select = SMU_UCLK;
1079			break;
1080		default:
1081			dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1082			ret = -EINVAL;
1083			break;
1084		}
1085
1086		if (ret)
1087			goto failed;
1088
1089		if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1090			return 0;
1091
1092		ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1093
1094		if(clk_select == SMU_UCLK)
1095			smu->hard_min_uclk_req_from_dal = clk_freq;
1096	}
1097
1098failed:
1099	return ret;
1100}
1101
1102int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1103{
1104	int ret = 0;
1105	struct amdgpu_device *adev = smu->adev;
1106
1107	switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1108	case IP_VERSION(11, 0, 0):
1109	case IP_VERSION(11, 0, 5):
1110	case IP_VERSION(11, 0, 9):
1111	case IP_VERSION(11, 0, 7):
1112	case IP_VERSION(11, 0, 11):
1113	case IP_VERSION(11, 0, 12):
1114	case IP_VERSION(11, 0, 13):
1115	case IP_VERSION(11, 5, 0):
1116		if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
1117			return 0;
1118		if (enable)
1119			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1120		else
1121			ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1122		break;
1123	default:
1124		break;
1125	}
1126
1127	return ret;
1128}
1129
1130uint32_t
1131smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1132{
1133	if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1134		return AMD_FAN_CTRL_AUTO;
1135	else
1136		return smu->user_dpm_profile.fan_mode;
1137}
1138
1139static int
1140smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1141{
1142	int ret = 0;
1143
1144	if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1145		return 0;
1146
1147	ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1148	if (ret)
1149		dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1150		       __func__, (auto_fan_control ? "Start" : "Stop"));
1151
1152	return ret;
1153}
1154
1155static int
1156smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1157{
1158	struct amdgpu_device *adev = smu->adev;
1159
1160	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1161		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1162				   CG_FDO_CTRL2, TMIN, 0));
1163	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL2,
1164		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL2),
1165				   CG_FDO_CTRL2, FDO_PWM_MODE, mode));
1166
1167	return 0;
1168}
1169
1170int
1171smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
1172{
1173	struct amdgpu_device *adev = smu->adev;
1174	uint32_t duty100, duty;
1175	uint64_t tmp64;
1176
1177	speed = min_t(uint32_t, speed, 255);
1178
1179	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1180				CG_FDO_CTRL1, FMAX_DUTY100);
1181	if (!duty100)
1182		return -EINVAL;
1183
1184	tmp64 = (uint64_t)speed * duty100;
1185	do_div(tmp64, 255);
1186	duty = (uint32_t)tmp64;
1187
1188	WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0,
1189		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0),
1190				   CG_FDO_CTRL0, FDO_STATIC_DUTY, duty));
1191
1192	return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1193}
1194
1195int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1196				uint32_t speed)
1197{
1198	struct amdgpu_device *adev = smu->adev;
1199	/*
1200	 * crystal_clock_freq used for fan speed rpm calculation is
1201	 * always 25Mhz. So, hardcode it as 2500(in 10K unit).
1202	 */
1203	uint32_t crystal_clock_freq = 2500;
1204	uint32_t tach_period;
1205
1206	if (speed == 0)
1207		return -EINVAL;
1208	/*
1209	 * To prevent from possible overheat, some ASICs may have requirement
1210	 * for minimum fan speed:
1211	 * - For some NV10 SKU, the fan speed cannot be set lower than
1212	 *   700 RPM.
1213	 * - For some Sienna Cichlid SKU, the fan speed cannot be set
1214	 *   lower than 500 RPM.
1215	 */
1216	tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed);
1217	WREG32_SOC15(THM, 0, mmCG_TACH_CTRL,
1218		     REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_TACH_CTRL),
1219				   CG_TACH_CTRL, TARGET_PERIOD,
1220				   tach_period));
1221
1222	return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1223}
1224
1225int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
1226				uint32_t *speed)
1227{
1228	struct amdgpu_device *adev = smu->adev;
1229	uint32_t duty100, duty;
1230	uint64_t tmp64;
1231
1232	/*
1233	 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
1234	 * detected via register retrieving. To workaround this, we will
1235	 * report the fan speed as 0 PWM if user just requested such.
1236	 */
1237	if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
1238	     && !smu->user_dpm_profile.fan_speed_pwm) {
1239		*speed = 0;
1240		return 0;
1241	}
1242
1243	duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1),
1244				CG_FDO_CTRL1, FMAX_DUTY100);
1245	duty = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_THERMAL_STATUS),
1246				CG_THERMAL_STATUS, FDO_PWM_DUTY);
1247	if (!duty100)
1248		return -EINVAL;
1249
1250	tmp64 = (uint64_t)duty * 255;
1251	do_div(tmp64, duty100);
1252	*speed = min_t(uint32_t, tmp64, 255);
1253
1254	return 0;
1255}
1256
1257int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
1258				uint32_t *speed)
1259{
1260	struct amdgpu_device *adev = smu->adev;
1261	uint32_t crystal_clock_freq = 2500;
1262	uint32_t tach_status;
1263	uint64_t tmp64;
1264
1265	/*
1266	 * For pre Sienna Cichlid ASICs, the 0 RPM may be not correctly
1267	 * detected via register retrieving. To workaround this, we will
1268	 * report the fan speed as 0 RPM if user just requested such.
1269	 */
1270	if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
1271	     && !smu->user_dpm_profile.fan_speed_rpm) {
1272		*speed = 0;
1273		return 0;
1274	}
1275
1276	tmp64 = (uint64_t)crystal_clock_freq * 60 * 10000;
1277
1278	tach_status = RREG32_SOC15(THM, 0, mmCG_TACH_STATUS);
1279	if (tach_status) {
1280		do_div(tmp64, tach_status);
1281		*speed = (uint32_t)tmp64;
1282	} else {
1283		dev_warn_once(adev->dev, "Got zero output on CG_TACH_STATUS reading!\n");
1284		*speed = 0;
1285	}
1286
1287	return 0;
1288}
1289
1290int
1291smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1292			       uint32_t mode)
1293{
1294	int ret = 0;
1295
1296	switch (mode) {
1297	case AMD_FAN_CTRL_NONE:
1298		ret = smu_v11_0_auto_fan_control(smu, 0);
1299		if (!ret)
1300			ret = smu_v11_0_set_fan_speed_pwm(smu, 255);
1301		break;
1302	case AMD_FAN_CTRL_MANUAL:
1303		ret = smu_v11_0_auto_fan_control(smu, 0);
1304		break;
1305	case AMD_FAN_CTRL_AUTO:
1306		ret = smu_v11_0_auto_fan_control(smu, 1);
1307		break;
1308	default:
1309		break;
1310	}
1311
1312	if (ret) {
1313		dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1314		return -EINVAL;
1315	}
1316
1317	return ret;
1318}
1319
1320int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1321				     uint32_t pstate)
1322{
1323	return smu_cmn_send_smc_msg_with_param(smu,
1324					       SMU_MSG_SetXgmiMode,
1325					       pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
1326					  NULL);
1327}
1328
1329static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
1330				   struct amdgpu_irq_src *source,
1331				   unsigned tyep,
1332				   enum amdgpu_interrupt_state state)
1333{
1334	struct smu_context *smu = adev->powerplay.pp_handle;
1335	uint32_t low, high;
1336	uint32_t val = 0;
1337
1338	switch (state) {
1339	case AMDGPU_IRQ_STATE_DISABLE:
1340		/* For THM irqs */
1341		val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1342		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 1);
1343		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 1);
1344		WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1345
1346		WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0);
1347
1348		/* For MP1 SW irqs */
1349		val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1350		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1);
1351		WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1352
1353		break;
1354	case AMDGPU_IRQ_STATE_ENABLE:
1355		/* For THM irqs */
1356		low = max(SMU_THERMAL_MINIMUM_ALERT_TEMP,
1357				smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1358		high = min(SMU_THERMAL_MAXIMUM_ALERT_TEMP,
1359				smu->thermal_range.software_shutdown_temp);
1360
1361		val = RREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL);
1362		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1363		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1364		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTH_MASK, 0);
1365		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_INTL_MASK, 0);
1366		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high & 0xff));
1367		val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low & 0xff));
1368		val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1369		WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1370
1371		val = (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT);
1372		val |= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT);
1373		val |= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT);
1374		WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, val);
1375
1376		/* For MP1 SW irqs */
1377		val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT);
1378		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE);
1379		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0);
1380		WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT, val);
1381
1382		val = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1383		val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0);
1384		WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, val);
1385
1386		break;
1387	default:
1388		break;
1389	}
1390
1391	return 0;
1392}
1393
1394#define THM_11_0__SRCID__THM_DIG_THERM_L2H		0		/* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH  */
1395#define THM_11_0__SRCID__THM_DIG_THERM_H2L		1		/* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL  */
1396
1397#define SMUIO_11_0__SRCID__SMUIO_GPIO19			83
1398
1399static int smu_v11_0_irq_process(struct amdgpu_device *adev,
1400				 struct amdgpu_irq_src *source,
1401				 struct amdgpu_iv_entry *entry)
1402{
1403	struct smu_context *smu = adev->powerplay.pp_handle;
1404	uint32_t client_id = entry->client_id;
1405	uint32_t src_id = entry->src_id;
1406	/*
1407	 * ctxid is used to distinguish different
1408	 * events for SMCToHost interrupt.
1409	 */
1410	uint32_t ctxid = entry->src_data[0];
1411	uint32_t data;
1412
1413	if (client_id == SOC15_IH_CLIENTID_THM) {
1414		switch (src_id) {
1415		case THM_11_0__SRCID__THM_DIG_THERM_L2H:
1416			schedule_delayed_work(&smu->swctf_delayed_work,
1417					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
1418		break;
1419		case THM_11_0__SRCID__THM_DIG_THERM_H2L:
1420			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected\n");
1421		break;
1422		default:
1423			dev_emerg(adev->dev, "ERROR: GPU under temperature range unknown src id (%d)\n",
1424				src_id);
1425		break;
1426		}
1427	} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
1428		dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
1429		/*
1430		 * HW CTF just occurred. Shutdown to prevent further damage.
1431		 */
1432		dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
1433		orderly_poweroff(true);
1434	} else if (client_id == SOC15_IH_CLIENTID_MP1) {
1435		if (src_id == SMU_IH_INTERRUPT_ID_TO_DRIVER) {
1436			/* ACK SMUToHost interrupt */
1437			data = RREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL);
1438			data = REG_SET_FIELD(data, MP1_SMN_IH_SW_INT_CTRL, INT_ACK, 1);
1439			WREG32_SOC15(MP1, 0, mmMP1_SMN_IH_SW_INT_CTRL, data);
1440
1441			switch (ctxid) {
1442			case SMU_IH_INTERRUPT_CONTEXT_ID_AC:
1443				dev_dbg(adev->dev, "Switched to AC mode!\n");
1444				schedule_work(&smu->interrupt_work);
1445				adev->pm.ac_power = true;
1446				break;
1447			case SMU_IH_INTERRUPT_CONTEXT_ID_DC:
1448				dev_dbg(adev->dev, "Switched to DC mode!\n");
1449				schedule_work(&smu->interrupt_work);
1450				adev->pm.ac_power = false;
1451				break;
1452			case SMU_IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING:
1453				/*
1454				 * Increment the throttle interrupt counter
1455				 */
1456				atomic64_inc(&smu->throttle_int_counter);
1457
1458				if (!atomic_read(&adev->throttling_logging_enabled))
1459					return 0;
1460
1461				if (__ratelimit(&adev->throttling_logging_rs))
1462					schedule_work(&smu->throttling_logging_work);
1463
1464				break;
1465			default:
1466				dev_dbg(adev->dev, "Unhandled context id %d from client:%d!\n",
1467									ctxid, client_id);
1468				break;
1469			}
1470		}
1471	}
1472
1473	return 0;
1474}
1475
1476static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs =
1477{
1478	.set = smu_v11_0_set_irq_state,
1479	.process = smu_v11_0_irq_process,
1480};
1481
1482int smu_v11_0_register_irq_handler(struct smu_context *smu)
1483{
1484	struct amdgpu_device *adev = smu->adev;
1485	struct amdgpu_irq_src *irq_src = &smu->irq_source;
1486	int ret = 0;
1487
1488	irq_src->num_types = 1;
1489	irq_src->funcs = &smu_v11_0_irq_funcs;
1490
1491	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1492				THM_11_0__SRCID__THM_DIG_THERM_L2H,
1493				irq_src);
1494	if (ret)
1495		return ret;
1496
1497	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_THM,
1498				THM_11_0__SRCID__THM_DIG_THERM_H2L,
1499				irq_src);
1500	if (ret)
1501		return ret;
1502
1503	/* Register CTF(GPIO_19) interrupt */
1504	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_ROM_SMUIO,
1505				SMUIO_11_0__SRCID__SMUIO_GPIO19,
1506				irq_src);
1507	if (ret)
1508		return ret;
1509
1510	ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1,
1511				SMU_IH_INTERRUPT_ID_TO_DRIVER,
1512				irq_src);
1513	if (ret)
1514		return ret;
1515
1516	return ret;
1517}
1518
1519int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1520		struct pp_smu_nv_clock_table *max_clocks)
1521{
1522	struct smu_table_context *table_context = &smu->smu_table;
1523	struct smu_11_0_max_sustainable_clocks *sustainable_clocks = NULL;
1524
1525	if (!max_clocks || !table_context->max_sustainable_clocks)
1526		return -EINVAL;
1527
1528	sustainable_clocks = table_context->max_sustainable_clocks;
1529
1530	max_clocks->dcfClockInKhz =
1531			(unsigned int) sustainable_clocks->dcef_clock * 1000;
1532	max_clocks->displayClockInKhz =
1533			(unsigned int) sustainable_clocks->display_clock * 1000;
1534	max_clocks->phyClockInKhz =
1535			(unsigned int) sustainable_clocks->phy_clock * 1000;
1536	max_clocks->pixelClockInKhz =
1537			(unsigned int) sustainable_clocks->pixel_clock * 1000;
1538	max_clocks->uClockInKhz =
1539			(unsigned int) sustainable_clocks->uclock * 1000;
1540	max_clocks->socClockInKhz =
1541			(unsigned int) sustainable_clocks->soc_clock * 1000;
1542	max_clocks->dscClockInKhz = 0;
1543	max_clocks->dppClockInKhz = 0;
1544	max_clocks->fabricClockInKhz = 0;
1545
1546	return 0;
1547}
1548
1549int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1550{
1551	return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1552}
1553
1554int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
1555				      enum smu_baco_seq baco_seq)
1556{
1557	return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1558}
1559
1560bool smu_v11_0_baco_is_support(struct smu_context *smu)
1561{
1562	struct smu_baco_context *smu_baco = &smu->smu_baco;
1563
1564	if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
1565		return false;
1566
1567	/* return true if ASIC is in BACO state already */
1568	if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
1569		return true;
1570
1571	/* Arcturus does not support this bit mask */
1572	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1573	   !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1574		return false;
1575
1576	return true;
1577}
1578
1579enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1580{
1581	struct smu_baco_context *smu_baco = &smu->smu_baco;
1582
1583	return smu_baco->state;
1584}
1585
1586#define D3HOT_BACO_SEQUENCE 0
1587#define D3HOT_BAMACO_SEQUENCE 2
1588
1589int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1590{
1591	struct smu_baco_context *smu_baco = &smu->smu_baco;
1592	struct amdgpu_device *adev = smu->adev;
1593	struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
1594	uint32_t data;
1595	int ret = 0;
1596
1597	if (smu_v11_0_baco_get_state(smu) == state)
1598		return 0;
1599
1600	if (state == SMU_BACO_STATE_ENTER) {
1601		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1602		case IP_VERSION(11, 0, 7):
1603		case IP_VERSION(11, 0, 11):
1604		case IP_VERSION(11, 0, 12):
1605		case IP_VERSION(11, 0, 13):
1606			if (amdgpu_runtime_pm == 2)
1607				ret = smu_cmn_send_smc_msg_with_param(smu,
1608								      SMU_MSG_EnterBaco,
1609								      D3HOT_BAMACO_SEQUENCE,
1610								      NULL);
1611			else
1612				ret = smu_cmn_send_smc_msg_with_param(smu,
1613								      SMU_MSG_EnterBaco,
1614								      D3HOT_BACO_SEQUENCE,
1615								      NULL);
1616			break;
1617		default:
1618			if (!ras || !adev->ras_enabled ||
1619			    adev->gmc.xgmi.pending_reset) {
1620				if (amdgpu_ip_version(adev, MP1_HWIP, 0) ==
1621				    IP_VERSION(11, 0, 2)) {
1622					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
1623					data |= 0x80000000;
1624					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
1625				} else {
1626					data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL);
1627					data |= 0x80000000;
1628					WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
1629				}
1630
1631				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1632			} else {
1633				ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1634			}
1635			break;
1636		}
1637
1638	} else {
1639		ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1640		if (ret)
1641			return ret;
1642
1643		/* clear vbios scratch 6 and 7 for coming asic reinit */
1644		WREG32(adev->bios_scratch_reg_offset + 6, 0);
1645		WREG32(adev->bios_scratch_reg_offset + 7, 0);
1646	}
1647
1648	if (!ret)
1649		smu_baco->state = state;
1650
1651	return ret;
1652}
1653
1654int smu_v11_0_baco_enter(struct smu_context *smu)
1655{
1656	int ret = 0;
1657
1658	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1659	if (ret)
1660		return ret;
1661
1662	msleep(10);
1663
1664	return ret;
1665}
1666
1667int smu_v11_0_baco_exit(struct smu_context *smu)
1668{
1669	int ret;
1670
1671	ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1672	if (!ret) {
1673		/*
1674		 * Poll BACO exit status to ensure FW has completed
1675		 * BACO exit process to avoid timing issues.
1676		 */
1677		smu_v11_0_poll_baco_exit(smu);
1678	}
1679
1680	return ret;
1681}
1682
1683int smu_v11_0_mode1_reset(struct smu_context *smu)
1684{
1685	int ret = 0;
1686
1687	ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1688	if (!ret)
1689		msleep(SMU11_MODE1_RESET_WAIT_TIME_IN_MS);
1690
1691	return ret;
1692}
1693
1694int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
1695{
1696	int ret = 0;
1697
1698	ret =  smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL);
1699
1700	return ret;
1701}
1702
1703
1704int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1705						 uint32_t *min, uint32_t *max)
1706{
1707	int ret = 0, clk_id = 0;
1708	uint32_t param = 0;
1709	uint32_t clock_limit;
1710
1711	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1712		switch (clk_type) {
1713		case SMU_MCLK:
1714		case SMU_UCLK:
1715			clock_limit = smu->smu_table.boot_values.uclk;
1716			break;
1717		case SMU_GFXCLK:
1718		case SMU_SCLK:
1719			clock_limit = smu->smu_table.boot_values.gfxclk;
1720			break;
1721		case SMU_SOCCLK:
1722			clock_limit = smu->smu_table.boot_values.socclk;
1723			break;
1724		default:
1725			clock_limit = 0;
1726			break;
1727		}
1728
1729		/* clock in Mhz unit */
1730		if (min)
1731			*min = clock_limit / 100;
1732		if (max)
1733			*max = clock_limit / 100;
1734
1735		return 0;
1736	}
1737
1738	clk_id = smu_cmn_to_asic_specific_index(smu,
1739						CMN2ASIC_MAPPING_CLK,
1740						clk_type);
1741	if (clk_id < 0) {
1742		ret = -EINVAL;
1743		goto failed;
1744	}
1745	param = (clk_id & 0xffff) << 16;
1746
1747	if (max) {
1748		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1749		if (ret)
1750			goto failed;
1751	}
1752
1753	if (min) {
1754		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1755		if (ret)
1756			goto failed;
1757	}
1758
1759failed:
1760	return ret;
1761}
1762
1763int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
1764					  enum smu_clk_type clk_type,
1765					  uint32_t min,
1766					  uint32_t max)
1767{
1768	int ret = 0, clk_id = 0;
1769	uint32_t param;
1770
1771	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1772		return 0;
1773
1774	clk_id = smu_cmn_to_asic_specific_index(smu,
1775						CMN2ASIC_MAPPING_CLK,
1776						clk_type);
1777	if (clk_id < 0)
1778		return clk_id;
1779
1780	if (max > 0) {
1781		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1782		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1783						  param, NULL);
1784		if (ret)
1785			goto out;
1786	}
1787
1788	if (min > 0) {
1789		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1790		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1791						  param, NULL);
1792		if (ret)
1793			goto out;
1794	}
1795
1796out:
1797	return ret;
1798}
1799
1800int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
1801					  enum smu_clk_type clk_type,
1802					  uint32_t min,
1803					  uint32_t max)
1804{
1805	int ret = 0, clk_id = 0;
1806	uint32_t param;
1807
1808	if (min <= 0 && max <= 0)
1809		return -EINVAL;
1810
1811	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1812		return 0;
1813
1814	clk_id = smu_cmn_to_asic_specific_index(smu,
1815						CMN2ASIC_MAPPING_CLK,
1816						clk_type);
1817	if (clk_id < 0)
1818		return clk_id;
1819
1820	if (max > 0) {
1821		param = (uint32_t)((clk_id << 16) | (max & 0xffff));
1822		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1823						  param, NULL);
1824		if (ret)
1825			return ret;
1826	}
1827
1828	if (min > 0) {
1829		param = (uint32_t)((clk_id << 16) | (min & 0xffff));
1830		ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1831						  param, NULL);
1832		if (ret)
1833			return ret;
1834	}
1835
1836	return ret;
1837}
1838
1839int smu_v11_0_set_performance_level(struct smu_context *smu,
1840				    enum amd_dpm_forced_level level)
1841{
1842	struct smu_11_0_dpm_context *dpm_context =
1843				smu->smu_dpm.dpm_context;
1844	struct smu_11_0_dpm_table *gfx_table =
1845				&dpm_context->dpm_tables.gfx_table;
1846	struct smu_11_0_dpm_table *mem_table =
1847				&dpm_context->dpm_tables.uclk_table;
1848	struct smu_11_0_dpm_table *soc_table =
1849				&dpm_context->dpm_tables.soc_table;
1850	struct smu_umd_pstate_table *pstate_table =
1851				&smu->pstate_table;
1852	struct amdgpu_device *adev = smu->adev;
1853	uint32_t sclk_min = 0, sclk_max = 0;
1854	uint32_t mclk_min = 0, mclk_max = 0;
1855	uint32_t socclk_min = 0, socclk_max = 0;
1856	int ret = 0;
1857
1858	switch (level) {
1859	case AMD_DPM_FORCED_LEVEL_HIGH:
1860		sclk_min = sclk_max = gfx_table->max;
1861		mclk_min = mclk_max = mem_table->max;
1862		socclk_min = socclk_max = soc_table->max;
1863		break;
1864	case AMD_DPM_FORCED_LEVEL_LOW:
1865		sclk_min = sclk_max = gfx_table->min;
1866		mclk_min = mclk_max = mem_table->min;
1867		socclk_min = socclk_max = soc_table->min;
1868		break;
1869	case AMD_DPM_FORCED_LEVEL_AUTO:
1870		sclk_min = gfx_table->min;
1871		sclk_max = gfx_table->max;
1872		mclk_min = mem_table->min;
1873		mclk_max = mem_table->max;
1874		socclk_min = soc_table->min;
1875		socclk_max = soc_table->max;
1876		break;
1877	case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1878		sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard;
1879		mclk_min = mclk_max = pstate_table->uclk_pstate.standard;
1880		socclk_min = socclk_max = pstate_table->socclk_pstate.standard;
1881		break;
1882	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1883		sclk_min = sclk_max = pstate_table->gfxclk_pstate.min;
1884		break;
1885	case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1886		mclk_min = mclk_max = pstate_table->uclk_pstate.min;
1887		break;
1888	case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1889		sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak;
1890		mclk_min = mclk_max = pstate_table->uclk_pstate.peak;
1891		socclk_min = socclk_max = pstate_table->socclk_pstate.peak;
1892		break;
1893	case AMD_DPM_FORCED_LEVEL_MANUAL:
1894	case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1895		return 0;
1896	default:
1897		dev_err(adev->dev, "Invalid performance level %d\n", level);
1898		return -EINVAL;
1899	}
1900
1901	/*
1902	 * Separate MCLK and SOCCLK soft min/max settings are not allowed
1903	 * on Arcturus.
1904	 */
1905	if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1906		mclk_min = mclk_max = 0;
1907		socclk_min = socclk_max = 0;
1908	}
1909
1910	if (sclk_min && sclk_max) {
1911		ret = smu_v11_0_set_soft_freq_limited_range(smu,
1912							    SMU_GFXCLK,
1913							    sclk_min,
1914							    sclk_max);
1915		if (ret)
1916			return ret;
1917	}
1918
1919	if (mclk_min && mclk_max) {
1920		ret = smu_v11_0_set_soft_freq_limited_range(smu,
1921							    SMU_MCLK,
1922							    mclk_min,
1923							    mclk_max);
1924		if (ret)
1925			return ret;
1926	}
1927
1928	if (socclk_min && socclk_max) {
1929		ret = smu_v11_0_set_soft_freq_limited_range(smu,
1930							    SMU_SOCCLK,
1931							    socclk_min,
1932							    socclk_max);
1933		if (ret)
1934			return ret;
1935	}
1936
1937	return ret;
1938}
1939
1940int smu_v11_0_set_power_source(struct smu_context *smu,
1941			       enum smu_power_src_type power_src)
1942{
1943	int pwr_source;
1944
1945	pwr_source = smu_cmn_to_asic_specific_index(smu,
1946						    CMN2ASIC_MAPPING_PWR,
1947						    (uint32_t)power_src);
1948	if (pwr_source < 0)
1949		return -EINVAL;
1950
1951	return smu_cmn_send_smc_msg_with_param(smu,
1952					SMU_MSG_NotifyPowerSource,
1953					pwr_source,
1954					NULL);
1955}
1956
1957int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
1958				    enum smu_clk_type clk_type,
1959				    uint16_t level,
1960				    uint32_t *value)
1961{
1962	int ret = 0, clk_id = 0;
1963	uint32_t param;
1964
1965	if (!value)
1966		return -EINVAL;
1967
1968	if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1969		return 0;
1970
1971	clk_id = smu_cmn_to_asic_specific_index(smu,
1972						CMN2ASIC_MAPPING_CLK,
1973						clk_type);
1974	if (clk_id < 0)
1975		return clk_id;
1976
1977	param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
1978
1979	ret = smu_cmn_send_smc_msg_with_param(smu,
1980					  SMU_MSG_GetDpmFreqByIndex,
1981					  param,
1982					  value);
1983	if (ret)
1984		return ret;
1985
1986	/*
1987	 * BIT31:  0 - Fine grained DPM, 1 - Dicrete DPM
1988	 * now, we un-support it
1989	 */
1990	*value = *value & 0x7fffffff;
1991
1992	return ret;
1993}
1994
1995int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
1996				  enum smu_clk_type clk_type,
1997				  uint32_t *value)
1998{
1999	return smu_v11_0_get_dpm_freq_by_index(smu,
2000					       clk_type,
2001					       0xff,
2002					       value);
2003}
2004
2005int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
2006				   enum smu_clk_type clk_type,
2007				   struct smu_11_0_dpm_table *single_dpm_table)
2008{
2009	int ret = 0;
2010	uint32_t clk;
2011	int i;
2012
2013	ret = smu_v11_0_get_dpm_level_count(smu,
2014					    clk_type,
2015					    &single_dpm_table->count);
2016	if (ret) {
2017		dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
2018		return ret;
2019	}
2020
2021	for (i = 0; i < single_dpm_table->count; i++) {
2022		ret = smu_v11_0_get_dpm_freq_by_index(smu,
2023						      clk_type,
2024						      i,
2025						      &clk);
2026		if (ret) {
2027			dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2028			return ret;
2029		}
2030
2031		single_dpm_table->dpm_levels[i].value = clk;
2032		single_dpm_table->dpm_levels[i].enabled = true;
2033
2034		if (i == 0)
2035			single_dpm_table->min = clk;
2036		else if (i == single_dpm_table->count - 1)
2037			single_dpm_table->max = clk;
2038	}
2039
2040	return 0;
2041}
2042
2043int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
2044				  enum smu_clk_type clk_type,
2045				  uint32_t *min_value,
2046				  uint32_t *max_value)
2047{
2048	uint32_t level_count = 0;
2049	int ret = 0;
2050
2051	if (!min_value && !max_value)
2052		return -EINVAL;
2053
2054	if (min_value) {
2055		/* by default, level 0 clock value as min value */
2056		ret = smu_v11_0_get_dpm_freq_by_index(smu,
2057						      clk_type,
2058						      0,
2059						      min_value);
2060		if (ret)
2061			return ret;
2062	}
2063
2064	if (max_value) {
2065		ret = smu_v11_0_get_dpm_level_count(smu,
2066						    clk_type,
2067						    &level_count);
2068		if (ret)
2069			return ret;
2070
2071		ret = smu_v11_0_get_dpm_freq_by_index(smu,
2072						      clk_type,
2073						      level_count - 1,
2074						      max_value);
2075		if (ret)
2076			return ret;
2077	}
2078
2079	return ret;
2080}
2081
2082int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
2083{
2084	struct amdgpu_device *adev = smu->adev;
2085
2086	return (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) &
2087		PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK)
2088		>> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
2089}
2090
2091uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
2092{
2093	uint32_t width_level;
2094
2095	width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
2096	if (width_level > LINK_WIDTH_MAX)
2097		width_level = 0;
2098
2099	return link_width[width_level];
2100}
2101
2102int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2103{
2104	struct amdgpu_device *adev = smu->adev;
2105
2106	return (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) &
2107		PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK)
2108		>> PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
2109}
2110
2111uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
2112{
2113	uint32_t speed_level;
2114
2115	speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
2116	if (speed_level > LINK_SPEED_MAX)
2117		speed_level = 0;
2118
2119	return link_speed[speed_level];
2120}
2121
2122int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
2123			      bool enablement)
2124{
2125	int ret = 0;
2126
2127	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2128		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2129
2130	return ret;
2131}
2132
2133int smu_v11_0_deep_sleep_control(struct smu_context *smu,
2134				 bool enablement)
2135{
2136	struct amdgpu_device *adev = smu->adev;
2137	int ret = 0;
2138
2139	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2140		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2141		if (ret) {
2142			dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable");
2143			return ret;
2144		}
2145	}
2146
2147	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2148		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2149		if (ret) {
2150			dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable");
2151			return ret;
2152		}
2153	}
2154
2155	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2156		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2157		if (ret) {
2158			dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable");
2159			return ret;
2160		}
2161	}
2162
2163	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2164		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2165		if (ret) {
2166			dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable");
2167			return ret;
2168		}
2169	}
2170
2171	if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2172		ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2173		if (ret) {
2174			dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable");
2175			return ret;
2176		}
2177	}
2178
2179	return ret;
2180}
2181
2182int smu_v11_0_restore_user_od_settings(struct smu_context *smu)
2183{
2184	struct smu_table_context *table_context = &smu->smu_table;
2185	void *user_od_table = table_context->user_overdrive_table;
2186	int ret = 0;
2187
2188	ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true);
2189	if (ret)
2190		dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2191
2192	return ret;
2193}
2194
2195void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
2196{
2197	struct amdgpu_device *adev = smu->adev;
2198
2199	smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2200	smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2201	smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2202}
2203