1/*
2 * Copyright 2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/pci.h>
25#include <linux/reboot.h>
26
27#include "hwmgr.h"
28#include "pp_debug.h"
29#include "ppatomctrl.h"
30#include "ppsmc.h"
31#include "atom.h"
32#include "ivsrcid/thm/irqsrcs_thm_9_0.h"
33#include "ivsrcid/smuio/irqsrcs_smuio_9_0.h"
34#include "ivsrcid/ivsrcid_vislands30.h"
35
36uint8_t convert_to_vid(uint16_t vddc)
37{
38	return (uint8_t) ((6200 - (vddc * VOLTAGE_SCALE)) / 25);
39}
40
41uint16_t convert_to_vddc(uint8_t vid)
42{
43	return (uint16_t) ((6200 - (vid * 25)) / VOLTAGE_SCALE);
44}
45
46int phm_copy_clock_limits_array(
47	struct pp_hwmgr *hwmgr,
48	uint32_t **pptable_info_array,
49	const uint32_t *pptable_array,
50	uint32_t power_saving_clock_count)
51{
52	uint32_t array_size, i;
53	uint32_t *table;
54
55	array_size = sizeof(uint32_t) * power_saving_clock_count;
56	table = kzalloc(array_size, GFP_KERNEL);
57	if (NULL == table)
58		return -ENOMEM;
59
60	for (i = 0; i < power_saving_clock_count; i++)
61		table[i] = le32_to_cpu(pptable_array[i]);
62
63	*pptable_info_array = table;
64
65	return 0;
66}
67
68int phm_copy_overdrive_settings_limits_array(
69	struct pp_hwmgr *hwmgr,
70	uint32_t **pptable_info_array,
71	const uint32_t *pptable_array,
72	uint32_t od_setting_count)
73{
74	uint32_t array_size, i;
75	uint32_t *table;
76
77	array_size = sizeof(uint32_t) * od_setting_count;
78	table = kzalloc(array_size, GFP_KERNEL);
79	if (NULL == table)
80		return -ENOMEM;
81
82	for (i = 0; i < od_setting_count; i++)
83		table[i] = le32_to_cpu(pptable_array[i]);
84
85	*pptable_info_array = table;
86
87	return 0;
88}
89
90uint32_t phm_set_field_to_u32(u32 offset, u32 original_data, u32 field, u32 size)
91{
92	u32 mask = 0;
93	u32 shift = 0;
94
95	shift = (offset % 4) << 3;
96	if (size == sizeof(uint8_t))
97		mask = 0xFF << shift;
98	else if (size == sizeof(uint16_t))
99		mask = 0xFFFF << shift;
100
101	original_data &= ~mask;
102	original_data |= (field << shift);
103	return original_data;
104}
105
106/*
107 * Returns once the part of the register indicated by the mask has
108 * reached the given value.
109 */
110int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index,
111			 uint32_t value, uint32_t mask)
112{
113	uint32_t i;
114	uint32_t cur_value;
115
116	if (hwmgr == NULL || hwmgr->device == NULL) {
117		pr_err("Invalid Hardware Manager!");
118		return -EINVAL;
119	}
120
121	for (i = 0; i < hwmgr->usec_timeout; i++) {
122		cur_value = cgs_read_register(hwmgr->device, index);
123		if ((cur_value & mask) == (value & mask))
124			break;
125		udelay(1);
126	}
127
128	/* timeout means wrong logic*/
129	if (i == hwmgr->usec_timeout)
130		return -1;
131	return 0;
132}
133
134
135/*
136 * Returns once the part of the register indicated by the mask has
137 * reached the given value.The indirect space is described by giving
138 * the memory-mapped index of the indirect index register.
139 */
140int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr,
141				uint32_t indirect_port,
142				uint32_t index,
143				uint32_t value,
144				uint32_t mask)
145{
146	if (hwmgr == NULL || hwmgr->device == NULL) {
147		pr_err("Invalid Hardware Manager!");
148		return -EINVAL;
149	}
150
151	cgs_write_register(hwmgr->device, indirect_port, index);
152	return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value);
153}
154
155int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr,
156					uint32_t index,
157					uint32_t value, uint32_t mask)
158{
159	uint32_t i;
160	uint32_t cur_value;
161
162	if (hwmgr == NULL || hwmgr->device == NULL)
163		return -EINVAL;
164
165	for (i = 0; i < hwmgr->usec_timeout; i++) {
166		cur_value = cgs_read_register(hwmgr->device,
167									index);
168		if ((cur_value & mask) != (value & mask))
169			break;
170		udelay(1);
171	}
172
173	/* timeout means wrong logic */
174	if (i == hwmgr->usec_timeout)
175		return -ETIME;
176	return 0;
177}
178
179int phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr,
180						uint32_t indirect_port,
181						uint32_t index,
182						uint32_t value,
183						uint32_t mask)
184{
185	if (hwmgr == NULL || hwmgr->device == NULL)
186		return -EINVAL;
187
188	cgs_write_register(hwmgr->device, indirect_port, index);
189	return phm_wait_for_register_unequal(hwmgr, indirect_port + 1,
190						value, mask);
191}
192
193bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr)
194{
195	return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating);
196}
197
198bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr)
199{
200	return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating);
201}
202
203
204int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table)
205{
206	uint32_t i, j;
207	uint16_t vvalue;
208	bool found = false;
209	struct pp_atomctrl_voltage_table *table;
210
211	PP_ASSERT_WITH_CODE((NULL != vol_table),
212			"Voltage Table empty.", return -EINVAL);
213
214	table = kzalloc(sizeof(struct pp_atomctrl_voltage_table),
215			GFP_KERNEL);
216
217	if (NULL == table)
218		return -EINVAL;
219
220	table->mask_low = vol_table->mask_low;
221	table->phase_delay = vol_table->phase_delay;
222
223	for (i = 0; i < vol_table->count; i++) {
224		vvalue = vol_table->entries[i].value;
225		found = false;
226
227		for (j = 0; j < table->count; j++) {
228			if (vvalue == table->entries[j].value) {
229				found = true;
230				break;
231			}
232		}
233
234		if (!found) {
235			table->entries[table->count].value = vvalue;
236			table->entries[table->count].smio_low =
237					vol_table->entries[i].smio_low;
238			table->count++;
239		}
240	}
241
242	memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table));
243	kfree(table);
244	table = NULL;
245	return 0;
246}
247
248int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
249		phm_ppt_v1_clock_voltage_dependency_table *dep_table)
250{
251	uint32_t i;
252	int result;
253
254	PP_ASSERT_WITH_CODE((0 != dep_table->count),
255			"Voltage Dependency Table empty.", return -EINVAL);
256
257	PP_ASSERT_WITH_CODE((NULL != vol_table),
258			"vol_table empty.", return -EINVAL);
259
260	vol_table->mask_low = 0;
261	vol_table->phase_delay = 0;
262	vol_table->count = dep_table->count;
263
264	for (i = 0; i < dep_table->count; i++) {
265		vol_table->entries[i].value = dep_table->entries[i].mvdd;
266		vol_table->entries[i].smio_low = 0;
267	}
268
269	result = phm_trim_voltage_table(vol_table);
270	PP_ASSERT_WITH_CODE((0 == result),
271			"Failed to trim MVDD table.", return result);
272
273	return 0;
274}
275
276int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
277		phm_ppt_v1_clock_voltage_dependency_table *dep_table)
278{
279	uint32_t i;
280	int result;
281
282	PP_ASSERT_WITH_CODE((0 != dep_table->count),
283			"Voltage Dependency Table empty.", return -EINVAL);
284
285	PP_ASSERT_WITH_CODE((NULL != vol_table),
286			"vol_table empty.", return -EINVAL);
287
288	vol_table->mask_low = 0;
289	vol_table->phase_delay = 0;
290	vol_table->count = dep_table->count;
291
292	for (i = 0; i < dep_table->count; i++) {
293		vol_table->entries[i].value = dep_table->entries[i].vddci;
294		vol_table->entries[i].smio_low = 0;
295	}
296
297	result = phm_trim_voltage_table(vol_table);
298	PP_ASSERT_WITH_CODE((0 == result),
299			"Failed to trim VDDCI table.", return result);
300
301	return 0;
302}
303
304int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table,
305		phm_ppt_v1_voltage_lookup_table *lookup_table)
306{
307	int i = 0;
308
309	PP_ASSERT_WITH_CODE((0 != lookup_table->count),
310			"Voltage Lookup Table empty.", return -EINVAL);
311
312	PP_ASSERT_WITH_CODE((NULL != vol_table),
313			"vol_table empty.", return -EINVAL);
314
315	vol_table->mask_low = 0;
316	vol_table->phase_delay = 0;
317
318	vol_table->count = lookup_table->count;
319
320	for (i = 0; i < vol_table->count; i++) {
321		vol_table->entries[i].value = lookup_table->entries[i].us_vdd;
322		vol_table->entries[i].smio_low = 0;
323	}
324
325	return 0;
326}
327
328void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps,
329				struct pp_atomctrl_voltage_table *vol_table)
330{
331	unsigned int i, diff;
332
333	if (vol_table->count <= max_vol_steps)
334		return;
335
336	diff = vol_table->count - max_vol_steps;
337
338	for (i = 0; i < max_vol_steps; i++)
339		vol_table->entries[i] = vol_table->entries[i + diff];
340
341	vol_table->count = max_vol_steps;
342
343	return;
344}
345
346int phm_reset_single_dpm_table(void *table,
347				uint32_t count, int max)
348{
349	int i;
350
351	struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
352
353	dpm_table->count = count > max ? max : count;
354
355	for (i = 0; i < dpm_table->count; i++)
356		dpm_table->dpm_level[i].enabled = false;
357
358	return 0;
359}
360
361void phm_setup_pcie_table_entry(
362	void *table,
363	uint32_t index, uint32_t pcie_gen,
364	uint32_t pcie_lanes)
365{
366	struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
367	dpm_table->dpm_level[index].value = pcie_gen;
368	dpm_table->dpm_level[index].param1 = pcie_lanes;
369	dpm_table->dpm_level[index].enabled = 1;
370}
371
372int32_t phm_get_dpm_level_enable_mask_value(void *table)
373{
374	int32_t i;
375	int32_t mask = 0;
376	struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
377
378	for (i = dpm_table->count; i > 0; i--) {
379		mask = mask << 1;
380		if (dpm_table->dpm_level[i - 1].enabled)
381			mask |= 0x1;
382		else
383			mask &= 0xFFFFFFFE;
384	}
385
386	return mask;
387}
388
389uint8_t phm_get_voltage_index(
390		struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage)
391{
392	uint8_t count = (uint8_t) (lookup_table->count);
393	uint8_t i;
394
395	PP_ASSERT_WITH_CODE((NULL != lookup_table),
396			"Lookup Table empty.", return 0);
397	PP_ASSERT_WITH_CODE((0 != count),
398			"Lookup Table empty.", return 0);
399
400	for (i = 0; i < lookup_table->count; i++) {
401		/* find first voltage equal or bigger than requested */
402		if (lookup_table->entries[i].us_vdd >= voltage)
403			return i;
404	}
405	/* voltage is bigger than max voltage in the table */
406	return i - 1;
407}
408
409uint8_t phm_get_voltage_id(pp_atomctrl_voltage_table *voltage_table,
410		uint32_t voltage)
411{
412	uint8_t count = (uint8_t) (voltage_table->count);
413	uint8_t i = 0;
414
415	PP_ASSERT_WITH_CODE((NULL != voltage_table),
416		"Voltage Table empty.", return 0;);
417	PP_ASSERT_WITH_CODE((0 != count),
418		"Voltage Table empty.", return 0;);
419
420	for (i = 0; i < count; i++) {
421		/* find first voltage bigger than requested */
422		if (voltage_table->entries[i].value >= voltage)
423			return i;
424	}
425
426	/* voltage is bigger than max voltage in the table */
427	return i - 1;
428}
429
430uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci)
431{
432	uint32_t  i;
433
434	for (i = 0; i < vddci_table->count; i++) {
435		if (vddci_table->entries[i].value >= vddci)
436			return vddci_table->entries[i].value;
437	}
438
439	pr_debug("vddci is larger than max value in vddci_table\n");
440	return vddci_table->entries[i-1].value;
441}
442
443int phm_find_boot_level(void *table,
444		uint32_t value, uint32_t *boot_level)
445{
446	int result = -EINVAL;
447	uint32_t i;
448	struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table;
449
450	for (i = 0; i < dpm_table->count; i++) {
451		if (value == dpm_table->dpm_level[i].value) {
452			*boot_level = i;
453			result = 0;
454		}
455	}
456
457	return result;
458}
459
460int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
461	phm_ppt_v1_voltage_lookup_table *lookup_table,
462	uint16_t virtual_voltage_id, int32_t *sclk)
463{
464	uint8_t entry_id;
465	uint8_t voltage_id;
466	struct phm_ppt_v1_information *table_info =
467			(struct phm_ppt_v1_information *)(hwmgr->pptable);
468
469	PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL);
470
471	/* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
472	for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
473		voltage_id = table_info->vdd_dep_on_sclk->entries[entry_id].vddInd;
474		if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
475			break;
476	}
477
478	if (entry_id >= table_info->vdd_dep_on_sclk->count) {
479		pr_debug("Can't find requested voltage id in vdd_dep_on_sclk table\n");
480		return -EINVAL;
481	}
482
483	*sclk = table_info->vdd_dep_on_sclk->entries[entry_id].clk;
484
485	return 0;
486}
487
488/**
489 * phm_initializa_dynamic_state_adjustment_rule_settings - Initialize Dynamic State Adjustment Rule Settings
490 *
491 * @hwmgr:  the address of the powerplay hardware manager.
492 */
493int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr)
494{
495	struct phm_clock_voltage_dependency_table *table_clk_vlt;
496	struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
497
498	/* initialize vddc_dep_on_dal_pwrl table */
499	table_clk_vlt = kzalloc(struct_size(table_clk_vlt, entries, 4),
500				GFP_KERNEL);
501
502	if (NULL == table_clk_vlt) {
503		pr_err("Can not allocate space for vddc_dep_on_dal_pwrl! \n");
504		return -ENOMEM;
505	} else {
506		table_clk_vlt->count = 4;
507		table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW;
508		if (hwmgr->chip_id >= CHIP_POLARIS10 &&
509		    hwmgr->chip_id <= CHIP_VEGAM)
510			table_clk_vlt->entries[0].v = 700;
511		else
512			table_clk_vlt->entries[0].v = 0;
513		table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW;
514		if (hwmgr->chip_id >= CHIP_POLARIS10 &&
515		    hwmgr->chip_id <= CHIP_VEGAM)
516			table_clk_vlt->entries[1].v = 740;
517		else
518			table_clk_vlt->entries[1].v = 720;
519		table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL;
520		if (hwmgr->chip_id >= CHIP_POLARIS10 &&
521		    hwmgr->chip_id <= CHIP_VEGAM)
522			table_clk_vlt->entries[2].v = 800;
523		else
524			table_clk_vlt->entries[2].v = 810;
525		table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE;
526		table_clk_vlt->entries[3].v = 900;
527		if (pptable_info != NULL)
528			pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt;
529		hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt;
530	}
531
532	return 0;
533}
534
535uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask)
536{
537	uint32_t level = 0;
538
539	while (0 == (mask & (1 << level)))
540		level++;
541
542	return level;
543}
544
545void phm_apply_dal_min_voltage_request(struct pp_hwmgr *hwmgr)
546{
547	struct phm_ppt_v1_information *table_info =
548			(struct phm_ppt_v1_information *)hwmgr->pptable;
549	struct phm_clock_voltage_dependency_table *table =
550				table_info->vddc_dep_on_dal_pwrl;
551	struct phm_ppt_v1_clock_voltage_dependency_table *vddc_table;
552	enum PP_DAL_POWERLEVEL dal_power_level = hwmgr->dal_power_level;
553	uint32_t req_vddc = 0, req_volt, i;
554
555	if (!table || table->count <= 0
556		|| dal_power_level < PP_DAL_POWERLEVEL_ULTRALOW
557		|| dal_power_level > PP_DAL_POWERLEVEL_PERFORMANCE)
558		return;
559
560	for (i = 0; i < table->count; i++) {
561		if (dal_power_level == table->entries[i].clk) {
562			req_vddc = table->entries[i].v;
563			break;
564		}
565	}
566
567	vddc_table = table_info->vdd_dep_on_sclk;
568	for (i = 0; i < vddc_table->count; i++) {
569		if (req_vddc <= vddc_table->entries[i].vddc) {
570			req_volt = (((uint32_t)vddc_table->entries[i].vddc) * VOLTAGE_SCALE);
571			smum_send_msg_to_smc_with_parameter(hwmgr,
572					PPSMC_MSG_VddC_Request,
573					req_volt,
574					NULL);
575			return;
576		}
577	}
578	pr_err("DAL requested level can not"
579			" found a available voltage in VDDC DPM Table \n");
580}
581
582int phm_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
583				uint32_t sclk, uint16_t id, uint16_t *voltage)
584{
585	uint32_t vol;
586	int ret = 0;
587
588	if (hwmgr->chip_id < CHIP_TONGA) {
589		ret = atomctrl_get_voltage_evv(hwmgr, id, voltage);
590	} else if (hwmgr->chip_id < CHIP_POLARIS10) {
591		ret = atomctrl_get_voltage_evv_on_sclk(hwmgr, voltage_type, sclk, id, voltage);
592		if (*voltage >= 2000 || *voltage == 0)
593			*voltage = 1150;
594	} else {
595		ret = atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, voltage_type, sclk, id, &vol);
596		*voltage = (uint16_t)(vol/100);
597	}
598	return ret;
599}
600
601
602int phm_irq_process(struct amdgpu_device *adev,
603			   struct amdgpu_irq_src *source,
604			   struct amdgpu_iv_entry *entry)
605{
606	struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
607	uint32_t client_id = entry->client_id;
608	uint32_t src_id = entry->src_id;
609
610	if (client_id == AMDGPU_IRQ_CLIENTID_LEGACY) {
611		if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_LOW_TO_HIGH) {
612			schedule_delayed_work(&hwmgr->swctf_delayed_work,
613					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
614		} else if (src_id == VISLANDS30_IV_SRCID_CG_TSS_THERMAL_HIGH_TO_LOW) {
615			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
616		} else if (src_id == VISLANDS30_IV_SRCID_GPIO_19) {
617			dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
618			/*
619			 * HW CTF just occurred. Shutdown to prevent further damage.
620			 */
621			dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
622			orderly_poweroff(true);
623		}
624	} else if (client_id == SOC15_IH_CLIENTID_THM) {
625		if (src_id == 0)
626			schedule_delayed_work(&hwmgr->swctf_delayed_work,
627					      msecs_to_jiffies(AMDGPU_SWCTF_EXTRA_DELAY));
628		else
629			dev_emerg(adev->dev, "ERROR: GPU under temperature range detected!\n");
630	} else if (client_id == SOC15_IH_CLIENTID_ROM_SMUIO) {
631		dev_emerg(adev->dev, "ERROR: GPU HW Critical Temperature Fault(aka CTF) detected!\n");
632		/*
633		 * HW CTF just occurred. Shutdown to prevent further damage.
634		 */
635		dev_emerg(adev->dev, "ERROR: System is going to shutdown due to GPU HW CTF!\n");
636		orderly_poweroff(true);
637	}
638
639	return 0;
640}
641
642static const struct amdgpu_irq_src_funcs smu9_irq_funcs = {
643	.process = phm_irq_process,
644};
645
646int smu9_register_irq_handlers(struct pp_hwmgr *hwmgr)
647{
648	struct amdgpu_irq_src *source =
649		kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL);
650
651	if (!source)
652		return -ENOMEM;
653
654	source->funcs = &smu9_irq_funcs;
655
656	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
657			SOC15_IH_CLIENTID_THM,
658			THM_9_0__SRCID__THM_DIG_THERM_L2H,
659			source);
660	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
661			SOC15_IH_CLIENTID_THM,
662			THM_9_0__SRCID__THM_DIG_THERM_H2L,
663			source);
664
665	/* Register CTF(GPIO_19) interrupt */
666	amdgpu_irq_add_id((struct amdgpu_device *)(hwmgr->adev),
667			SOC15_IH_CLIENTID_ROM_SMUIO,
668			SMUIO_9_0__SRCID__SMUIO_GPIO19,
669			source);
670
671	return 0;
672}
673
674void *smu_atom_get_data_table(void *dev, uint32_t table, uint16_t *size,
675						uint8_t *frev, uint8_t *crev)
676{
677	struct amdgpu_device *adev = dev;
678	uint16_t data_start;
679
680	if (amdgpu_atom_parse_data_header(
681		    adev->mode_info.atom_context, table, size,
682		    frev, crev, &data_start))
683		return (uint8_t *)adev->mode_info.atom_context->bios +
684			data_start;
685
686	return NULL;
687}
688
689int smu_get_voltage_dependency_table_ppt_v1(
690			const struct phm_ppt_v1_clock_voltage_dependency_table *allowed_dep_table,
691			struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
692{
693	uint8_t i = 0;
694	PP_ASSERT_WITH_CODE((0 != allowed_dep_table->count),
695				"Voltage Lookup Table empty",
696				return -EINVAL);
697
698	dep_table->count = allowed_dep_table->count;
699	for (i = 0; i < dep_table->count; i++) {
700		dep_table->entries[i].clk = allowed_dep_table->entries[i].clk;
701		dep_table->entries[i].vddInd = allowed_dep_table->entries[i].vddInd;
702		dep_table->entries[i].vdd_offset = allowed_dep_table->entries[i].vdd_offset;
703		dep_table->entries[i].vddc = allowed_dep_table->entries[i].vddc;
704		dep_table->entries[i].vddgfx = allowed_dep_table->entries[i].vddgfx;
705		dep_table->entries[i].vddci = allowed_dep_table->entries[i].vddci;
706		dep_table->entries[i].mvdd = allowed_dep_table->entries[i].mvdd;
707		dep_table->entries[i].phases = allowed_dep_table->entries[i].phases;
708		dep_table->entries[i].cks_enable = allowed_dep_table->entries[i].cks_enable;
709		dep_table->entries[i].cks_voffset = allowed_dep_table->entries[i].cks_voffset;
710	}
711
712	return 0;
713}
714
715int smu_set_watermarks_for_clocks_ranges(void *wt_table,
716		struct dm_pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
717{
718	uint32_t i;
719	struct watermarks *table = wt_table;
720
721	if (!table || !wm_with_clock_ranges)
722		return -EINVAL;
723
724	if (wm_with_clock_ranges->num_wm_dmif_sets > 4 || wm_with_clock_ranges->num_wm_mcif_sets > 4)
725		return -EINVAL;
726
727	for (i = 0; i < wm_with_clock_ranges->num_wm_dmif_sets; i++) {
728		table->WatermarkRow[1][i].MinClock =
729			cpu_to_le16((uint16_t)
730			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz /
731			1000));
732		table->WatermarkRow[1][i].MaxClock =
733			cpu_to_le16((uint16_t)
734			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz /
735			1000));
736		table->WatermarkRow[1][i].MinUclk =
737			cpu_to_le16((uint16_t)
738			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz /
739			1000));
740		table->WatermarkRow[1][i].MaxUclk =
741			cpu_to_le16((uint16_t)
742			(wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz /
743			1000));
744		table->WatermarkRow[1][i].WmSetting = (uint8_t)
745				wm_with_clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id;
746	}
747
748	for (i = 0; i < wm_with_clock_ranges->num_wm_mcif_sets; i++) {
749		table->WatermarkRow[0][i].MinClock =
750			cpu_to_le16((uint16_t)
751			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz /
752			1000));
753		table->WatermarkRow[0][i].MaxClock =
754			cpu_to_le16((uint16_t)
755			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz /
756			1000));
757		table->WatermarkRow[0][i].MinUclk =
758			cpu_to_le16((uint16_t)
759			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz /
760			1000));
761		table->WatermarkRow[0][i].MaxUclk =
762			cpu_to_le16((uint16_t)
763			(wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz /
764			1000));
765		table->WatermarkRow[0][i].WmSetting = (uint8_t)
766				wm_with_clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
767	}
768	return 0;
769}
770