1/*	$NetBSD: amdgpu_smu7_smumgr.c,v 1.2 2021/12/18 23:45:27 riastradh Exp $	*/
2
3/*
4 * Copyright 2015 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26
27#include <sys/cdefs.h>
28__KERNEL_RCSID(0, "$NetBSD: amdgpu_smu7_smumgr.c,v 1.2 2021/12/18 23:45:27 riastradh Exp $");
29
30#include "pp_debug.h"
31#include "smumgr.h"
32#include "smu_ucode_xfer_vi.h"
33#include "ppatomctrl.h"
34#include "cgs_common.h"
35#include "smu7_ppsmc.h"
36#include "smu7_smumgr.h"
37#include "smu7_common.h"
38
39#include "polaris10_pwrvirus.h"
40
41#define SMU7_SMC_SIZE 0x20000
42
43static int smu7_set_smc_sram_address(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t limit)
44{
45	PP_ASSERT_WITH_CODE((0 == (3 & smc_addr)), "SMC address must be 4 byte aligned.", return -EINVAL);
46	PP_ASSERT_WITH_CODE((limit > (smc_addr + 3)), "SMC addr is beyond the SMC RAM area.", return -EINVAL);
47
48	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, smc_addr);
49	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0); /* on ci, SMC_IND_ACCESS_CNTL is different */
50	return 0;
51}
52
53
54int smu7_copy_bytes_from_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address, uint32_t *dest, uint32_t byte_count, uint32_t limit)
55{
56	uint32_t data;
57	uint32_t addr;
58	uint8_t *dest_byte;
59	uint8_t i, data_byte[4] = {0};
60	uint32_t *pdata = (uint32_t *)&data_byte;
61
62	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
63	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
64
65	addr = smc_start_address;
66
67	while (byte_count >= 4) {
68		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
69
70		*dest = PP_SMC_TO_HOST_UL(data);
71
72		dest += 1;
73		byte_count -= 4;
74		addr += 4;
75	}
76
77	if (byte_count) {
78		smu7_read_smc_sram_dword(hwmgr, addr, &data, limit);
79		*pdata = PP_SMC_TO_HOST_UL(data);
80	/* Cast dest into byte type in dest_byte.  This way, we don't overflow if the allocated memory is not 4-byte aligned. */
81		dest_byte = (uint8_t *)dest;
82		for (i = 0; i < byte_count; i++)
83			dest_byte[i] = data_byte[i];
84	}
85
86	return 0;
87}
88
89
90int smu7_copy_bytes_to_smc(struct pp_hwmgr *hwmgr, uint32_t smc_start_address,
91				const uint8_t *src, uint32_t byte_count, uint32_t limit)
92{
93	int result;
94	uint32_t data = 0;
95	uint32_t original_data;
96	uint32_t addr = 0;
97	uint32_t extra_shift;
98
99	PP_ASSERT_WITH_CODE((0 == (3 & smc_start_address)), "SMC address must be 4 byte aligned.", return -EINVAL);
100	PP_ASSERT_WITH_CODE((limit > (smc_start_address + byte_count)), "SMC address is beyond the SMC RAM area.", return -EINVAL);
101
102	addr = smc_start_address;
103
104	while (byte_count >= 4) {
105	/* Bytes are written into the SMC addres space with the MSB first. */
106		data = src[0] * 0x1000000 + src[1] * 0x10000 + src[2] * 0x100 + src[3];
107
108		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
109
110		if (0 != result)
111			return result;
112
113		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
114
115		src += 4;
116		byte_count -= 4;
117		addr += 4;
118	}
119
120	if (0 != byte_count) {
121
122		data = 0;
123
124		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
125
126		if (0 != result)
127			return result;
128
129
130		original_data = cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
131
132		extra_shift = 8 * (4 - byte_count);
133
134		while (byte_count > 0) {
135			/* Bytes are written into the SMC addres space with the MSB first. */
136			data = (0x100 * data) + *src++;
137			byte_count--;
138		}
139
140		data <<= extra_shift;
141
142		data |= (original_data & ~((~0UL) << extra_shift));
143
144		result = smu7_set_smc_sram_address(hwmgr, addr, limit);
145
146		if (0 != result)
147			return result;
148
149		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, data);
150	}
151
152	return 0;
153}
154
155
156int smu7_program_jump_on_start(struct pp_hwmgr *hwmgr)
157{
158	static const unsigned char data[4] = { 0xE0, 0x00, 0x80, 0x40 };
159
160	smu7_copy_bytes_to_smc(hwmgr, 0x0, data, 4, sizeof(data)+1);
161
162	return 0;
163}
164
165bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
166{
167	return ((0 == PHM_READ_VFPF_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SMC_SYSCON_CLOCK_CNTL_0, ck_disable))
168	&& (0x20100 <= cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
169}
170
171int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
172{
173	int ret;
174
175	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
176
177	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
178
179	if (ret == 0xFE)
180		pr_debug("last message was not supported\n");
181	else if (ret != 1)
182		pr_info("\n last message was failed ret is %d\n", ret);
183
184	cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
185	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
186
187	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
188
189	ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
190
191	if (ret == 0xFE)
192		pr_debug("message %x was not supported\n", msg);
193	else if (ret != 1)
194		pr_info("\n failed to send message %x ret is %d \n",  msg, ret);
195
196	return 0;
197}
198
199int smu7_send_msg_to_smc_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg)
200{
201	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
202
203	return 0;
204}
205
206int smu7_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
207{
208	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
209
210	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
211
212	return smu7_send_msg_to_smc(hwmgr, msg);
213}
214
215int smu7_send_msg_to_smc_with_parameter_without_waiting(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t parameter)
216{
217	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, parameter);
218
219	return smu7_send_msg_to_smc_without_waiting(hwmgr, msg);
220}
221
222int smu7_send_msg_to_smc_offset(struct pp_hwmgr *hwmgr)
223{
224	cgs_write_register(hwmgr->device, mmSMC_MSG_ARG_0, 0x20000);
225
226	cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, PPSMC_MSG_Test);
227
228	PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
229
230	if (1 != PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP))
231		pr_info("Failed to send Message.\n");
232
233	return 0;
234}
235
236enum cgs_ucode_id smu7_convert_fw_type_to_cgs(uint32_t fw_type)
237{
238	enum cgs_ucode_id result = CGS_UCODE_ID_MAXIMUM;
239
240	switch (fw_type) {
241	case UCODE_ID_SMU:
242		result = CGS_UCODE_ID_SMU;
243		break;
244	case UCODE_ID_SMU_SK:
245		result = CGS_UCODE_ID_SMU_SK;
246		break;
247	case UCODE_ID_SDMA0:
248		result = CGS_UCODE_ID_SDMA0;
249		break;
250	case UCODE_ID_SDMA1:
251		result = CGS_UCODE_ID_SDMA1;
252		break;
253	case UCODE_ID_CP_CE:
254		result = CGS_UCODE_ID_CP_CE;
255		break;
256	case UCODE_ID_CP_PFP:
257		result = CGS_UCODE_ID_CP_PFP;
258		break;
259	case UCODE_ID_CP_ME:
260		result = CGS_UCODE_ID_CP_ME;
261		break;
262	case UCODE_ID_CP_MEC:
263		result = CGS_UCODE_ID_CP_MEC;
264		break;
265	case UCODE_ID_CP_MEC_JT1:
266		result = CGS_UCODE_ID_CP_MEC_JT1;
267		break;
268	case UCODE_ID_CP_MEC_JT2:
269		result = CGS_UCODE_ID_CP_MEC_JT2;
270		break;
271	case UCODE_ID_RLC_G:
272		result = CGS_UCODE_ID_RLC_G;
273		break;
274	case UCODE_ID_MEC_STORAGE:
275		result = CGS_UCODE_ID_STORAGE;
276		break;
277	default:
278		break;
279	}
280
281	return result;
282}
283
284
285int smu7_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t *value, uint32_t limit)
286{
287	int result;
288
289	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
290
291	*value = result ? 0 : cgs_read_register(hwmgr->device, mmSMC_IND_DATA_11);
292
293	return result;
294}
295
296int smu7_write_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, uint32_t value, uint32_t limit)
297{
298	int result;
299
300	result = smu7_set_smc_sram_address(hwmgr, smc_addr, limit);
301
302	if (result)
303		return result;
304
305	cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, value);
306
307	return 0;
308}
309
310static int smu7_populate_single_firmware_entry(struct pp_hwmgr *hwmgr,
311						uint32_t fw_type,
312						struct SMU_Entry *entry)
313{
314	int result = 0;
315	struct cgs_firmware_info info = {0};
316
317	result = cgs_get_firmware_info(hwmgr->device,
318				smu7_convert_fw_type_to_cgs(fw_type),
319				&info);
320
321	if (!result) {
322		entry->version = info.fw_version;
323		entry->id = (uint16_t)fw_type;
324		entry->image_addr_high = upper_32_bits(info.mc_addr);
325		entry->image_addr_low = lower_32_bits(info.mc_addr);
326		entry->meta_data_addr_high = 0;
327		entry->meta_data_addr_low = 0;
328
329		/* digest need be excluded out */
330		if (!hwmgr->not_vf)
331			info.image_size -= 20;
332		entry->data_size_byte = info.image_size;
333		entry->num_register_entries = 0;
334	}
335
336	if ((fw_type == UCODE_ID_RLC_G)
337		|| (fw_type == UCODE_ID_CP_MEC))
338		entry->flags = 1;
339	else
340		entry->flags = 0;
341
342	return 0;
343}
344
345int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
346{
347	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
348	uint32_t fw_to_load;
349	int r = 0;
350
351	amdgpu_ucode_init_bo(hwmgr->adev);
352
353	if (smu_data->soft_regs_start)
354		cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
355					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
356					SMU_SoftRegisters, UcodeLoadStatus),
357					0x0);
358
359	if (hwmgr->chip_id > CHIP_TOPAZ) { /* add support for Topaz */
360		if (hwmgr->not_vf) {
361			smu7_send_msg_to_smc_with_parameter(hwmgr,
362						PPSMC_MSG_SMU_DRAM_ADDR_HI,
363						upper_32_bits(smu_data->smu_buffer.mc_addr));
364			smu7_send_msg_to_smc_with_parameter(hwmgr,
365						PPSMC_MSG_SMU_DRAM_ADDR_LO,
366						lower_32_bits(smu_data->smu_buffer.mc_addr));
367		}
368		fw_to_load = UCODE_ID_RLC_G_MASK
369			   + UCODE_ID_SDMA0_MASK
370			   + UCODE_ID_SDMA1_MASK
371			   + UCODE_ID_CP_CE_MASK
372			   + UCODE_ID_CP_ME_MASK
373			   + UCODE_ID_CP_PFP_MASK
374			   + UCODE_ID_CP_MEC_MASK;
375	} else {
376		fw_to_load = UCODE_ID_RLC_G_MASK
377			   + UCODE_ID_SDMA0_MASK
378			   + UCODE_ID_SDMA1_MASK
379			   + UCODE_ID_CP_CE_MASK
380			   + UCODE_ID_CP_ME_MASK
381			   + UCODE_ID_CP_PFP_MASK
382			   + UCODE_ID_CP_MEC_MASK
383			   + UCODE_ID_CP_MEC_JT1_MASK
384			   + UCODE_ID_CP_MEC_JT2_MASK;
385	}
386
387	if (!smu_data->toc) {
388		struct SMU_DRAMData_TOC *toc;
389
390		smu_data->toc = kzalloc(sizeof(struct SMU_DRAMData_TOC), GFP_KERNEL);
391		if (!smu_data->toc)
392			return -ENOMEM;
393		toc = smu_data->toc;
394		toc->num_entries = 0;
395		toc->structure_version = 1;
396
397		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
398				UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
399				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
400		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
401				UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
402				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
403		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
404				UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
405				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
406		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
407				UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
408				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
409		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
410				UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
411				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
412		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
413				UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
414				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
415		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
416				UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
417				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
418		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
419				UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
420				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
421		PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
422				UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
423				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
424		if (!hwmgr->not_vf)
425			PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
426				UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
427				"Failed to Get Firmware Entry.", r = -EINVAL; goto failed);
428	}
429	memcpy_toio(smu_data->header_buffer.kaddr, smu_data->toc,
430		    sizeof(struct SMU_DRAMData_TOC));
431	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
432	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
433
434	smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_LoadUcodes, fw_to_load);
435
436	r = smu7_check_fw_load_finish(hwmgr, fw_to_load);
437	if (!r)
438		return 0;
439
440	pr_err("SMU load firmware failed\n");
441
442failed:
443	kfree(smu_data->toc);
444	smu_data->toc = NULL;
445	return r;
446}
447
448/* Check if the FW has been loaded, SMU will not return if loading has not finished. */
449int smu7_check_fw_load_finish(struct pp_hwmgr *hwmgr, uint32_t fw_type)
450{
451	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
452	uint32_t ret;
453
454	ret = phm_wait_on_indirect_register(hwmgr, mmSMC_IND_INDEX_11,
455					smu_data->soft_regs_start + smum_get_offsetof(hwmgr,
456					SMU_SoftRegisters, UcodeLoadStatus),
457					fw_type, fw_type);
458	return ret;
459}
460
461int smu7_reload_firmware(struct pp_hwmgr *hwmgr)
462{
463	return hwmgr->smumgr_funcs->start_smu(hwmgr);
464}
465
466static int smu7_upload_smc_firmware_data(struct pp_hwmgr *hwmgr, uint32_t length, uint32_t *src, uint32_t limit)
467{
468	uint32_t byte_count = length;
469
470	PP_ASSERT_WITH_CODE((limit >= byte_count), "SMC address is beyond the SMC RAM area.", return -EINVAL);
471
472	cgs_write_register(hwmgr->device, mmSMC_IND_INDEX_11, 0x20000);
473	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 1);
474
475	for (; byte_count >= 4; byte_count -= 4)
476		cgs_write_register(hwmgr->device, mmSMC_IND_DATA_11, *src++);
477
478	PHM_WRITE_FIELD(hwmgr->device, SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_11, 0);
479
480	PP_ASSERT_WITH_CODE((0 == byte_count), "SMC size must be divisible by 4.", return -EINVAL);
481
482	return 0;
483}
484
485
486int smu7_upload_smu_firmware_image(struct pp_hwmgr *hwmgr)
487{
488	int result = 0;
489	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
490
491	struct cgs_firmware_info info = {0};
492
493	if (smu_data->security_hard_key == 1)
494		cgs_get_firmware_info(hwmgr->device,
495			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU), &info);
496	else
497		cgs_get_firmware_info(hwmgr->device,
498			smu7_convert_fw_type_to_cgs(UCODE_ID_SMU_SK), &info);
499
500	hwmgr->is_kicker = info.is_kicker;
501	hwmgr->smu_version = info.version;
502	result = smu7_upload_smc_firmware_data(hwmgr, info.image_size, (uint32_t *)info.kptr, SMU7_SMC_SIZE);
503
504	return result;
505}
506
507static void execute_pwr_table(struct pp_hwmgr *hwmgr, const PWR_Command_Table *pvirus, int size)
508{
509	int i;
510	uint32_t reg, data;
511
512	for (i = 0; i < size; i++) {
513		reg  = pvirus->reg;
514		data = pvirus->data;
515		if (reg != 0xffffffff)
516			cgs_write_register(hwmgr->device, reg, data);
517		else
518			break;
519		pvirus++;
520	}
521}
522
523static void execute_pwr_dfy_table(struct pp_hwmgr *hwmgr, const PWR_DFY_Section *section)
524{
525	int i;
526
527	cgs_write_register(hwmgr->device, mmCP_DFY_CNTL, section->dfy_cntl);
528	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_HI, section->dfy_addr_hi);
529	cgs_write_register(hwmgr->device, mmCP_DFY_ADDR_LO, section->dfy_addr_lo);
530	for (i = 0; i < section->dfy_size; i++)
531		cgs_write_register(hwmgr->device, mmCP_DFY_DATA_0, section->dfy_data[i]);
532}
533
534int smu7_setup_pwr_virus(struct pp_hwmgr *hwmgr)
535{
536	execute_pwr_table(hwmgr, pwr_virus_table_pre, ARRAY_SIZE(pwr_virus_table_pre));
537	execute_pwr_dfy_table(hwmgr, &pwr_virus_section1);
538	execute_pwr_dfy_table(hwmgr, &pwr_virus_section2);
539	execute_pwr_dfy_table(hwmgr, &pwr_virus_section3);
540	execute_pwr_dfy_table(hwmgr, &pwr_virus_section4);
541	execute_pwr_dfy_table(hwmgr, &pwr_virus_section5);
542	execute_pwr_dfy_table(hwmgr, &pwr_virus_section6);
543	execute_pwr_table(hwmgr, pwr_virus_table_post, ARRAY_SIZE(pwr_virus_table_post));
544
545	return 0;
546}
547
548int smu7_init(struct pp_hwmgr *hwmgr)
549{
550	struct smu7_smumgr *smu_data;
551	int r;
552	/* Allocate memory for backend private data */
553	smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
554	smu_data->header_buffer.data_size =
555			((sizeof(struct SMU_DRAMData_TOC) / 4096) + 1) * 4096;
556
557/* Allocate FW image data structure and header buffer and
558 * send the header buffer address to SMU */
559	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
560		smu_data->header_buffer.data_size,
561		PAGE_SIZE,
562		AMDGPU_GEM_DOMAIN_VRAM,
563		&smu_data->header_buffer.handle,
564		&smu_data->header_buffer.mc_addr,
565		&smu_data->header_buffer.kaddr);
566
567	if (r)
568		return -EINVAL;
569
570	if (!hwmgr->not_vf)
571		return 0;
572
573	smu_data->smu_buffer.data_size = 200*4096;
574	r = amdgpu_bo_create_kernel((struct amdgpu_device *)hwmgr->adev,
575		smu_data->smu_buffer.data_size,
576		PAGE_SIZE,
577		AMDGPU_GEM_DOMAIN_VRAM,
578		&smu_data->smu_buffer.handle,
579		&smu_data->smu_buffer.mc_addr,
580		&smu_data->smu_buffer.kaddr);
581
582	if (r) {
583		amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
584					&smu_data->header_buffer.mc_addr,
585					&smu_data->header_buffer.kaddr);
586		return -EINVAL;
587	}
588
589	if (smum_is_hw_avfs_present(hwmgr) &&
590	    (hwmgr->feature_mask & PP_AVFS_MASK))
591		hwmgr->avfs_supported = true;
592
593	return 0;
594}
595
596
597int smu7_smu_fini(struct pp_hwmgr *hwmgr)
598{
599	struct smu7_smumgr *smu_data = (struct smu7_smumgr *)(hwmgr->smu_backend);
600
601	amdgpu_bo_free_kernel(&smu_data->header_buffer.handle,
602					&smu_data->header_buffer.mc_addr,
603					&smu_data->header_buffer.kaddr);
604
605	if (hwmgr->not_vf)
606		amdgpu_bo_free_kernel(&smu_data->smu_buffer.handle,
607					&smu_data->smu_buffer.mc_addr,
608					&smu_data->smu_buffer.kaddr);
609
610
611	kfree(smu_data->toc);
612	smu_data->toc = NULL;
613	kfree(hwmgr->smu_backend);
614	hwmgr->smu_backend = NULL;
615	return 0;
616}
617