1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * AMD Platform Management Framework Driver
4 *
5 * Copyright (c) 2022, Advanced Micro Devices, Inc.
6 * All Rights Reserved.
7 *
8 * Author: Shyam Sundar S K <Shyam-sundar.S-k@amd.com>
9 */
10
11#include <asm/amd_nb.h>
12#include <linux/debugfs.h>
13#include <linux/iopoll.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16#include <linux/platform_device.h>
17#include <linux/power_supply.h>
18#include "pmf.h"
19
20/* PMF-SMU communication registers */
21#define AMD_PMF_REGISTER_MESSAGE	0xA18
22#define AMD_PMF_REGISTER_RESPONSE	0xA78
23#define AMD_PMF_REGISTER_ARGUMENT	0xA58
24
25/* Base address of SMU for mapping physical address to virtual address */
26#define AMD_PMF_MAPPING_SIZE		0x01000
27#define AMD_PMF_BASE_ADDR_OFFSET	0x10000
28#define AMD_PMF_BASE_ADDR_LO		0x13B102E8
29#define AMD_PMF_BASE_ADDR_HI		0x13B102EC
30#define AMD_PMF_BASE_ADDR_LO_MASK	GENMASK(15, 0)
31#define AMD_PMF_BASE_ADDR_HI_MASK	GENMASK(31, 20)
32
33/* SMU Response Codes */
34#define AMD_PMF_RESULT_OK                    0x01
35#define AMD_PMF_RESULT_CMD_REJECT_BUSY       0xFC
36#define AMD_PMF_RESULT_CMD_REJECT_PREREQ     0xFD
37#define AMD_PMF_RESULT_CMD_UNKNOWN           0xFE
38#define AMD_PMF_RESULT_FAILED                0xFF
39
40/* List of supported CPU ids */
41#define AMD_CPU_ID_RMB			0x14b5
42#define AMD_CPU_ID_PS			0x14e8
43#define PCI_DEVICE_ID_AMD_1AH_M20H_ROOT	0x1507
44
45#define PMF_MSG_DELAY_MIN_US		50
46#define RESPONSE_REGISTER_LOOP_MAX	20000
47
48#define DELAY_MIN_US	2000
49#define DELAY_MAX_US	3000
50
51/* override Metrics Table sample size time (in ms) */
52static int metrics_table_loop_ms = 1000;
53module_param(metrics_table_loop_ms, int, 0644);
54MODULE_PARM_DESC(metrics_table_loop_ms, "Metrics Table sample size time (default = 1000ms)");
55
56/* Force load on supported older platforms */
57static bool force_load;
58module_param(force_load, bool, 0444);
59MODULE_PARM_DESC(force_load, "Force load this driver on supported older platforms (experimental)");
60
61static int amd_pmf_pwr_src_notify_call(struct notifier_block *nb, unsigned long event, void *data)
62{
63	struct amd_pmf_dev *pmf = container_of(nb, struct amd_pmf_dev, pwr_src_notifier);
64
65	if (event != PSY_EVENT_PROP_CHANGED)
66		return NOTIFY_OK;
67
68	if (is_apmf_func_supported(pmf, APMF_FUNC_AUTO_MODE) ||
69	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_DC) ||
70	    is_apmf_func_supported(pmf, APMF_FUNC_DYN_SLIDER_AC)) {
71		if ((pmf->amt_enabled || pmf->cnqf_enabled) && is_pprof_balanced(pmf))
72			return NOTIFY_DONE;
73	}
74
75	if (is_apmf_func_supported(pmf, APMF_FUNC_STATIC_SLIDER_GRANULAR))
76		amd_pmf_set_sps_power_limits(pmf);
77
78	if (is_apmf_func_supported(pmf, APMF_FUNC_OS_POWER_SLIDER_UPDATE))
79		amd_pmf_power_slider_update_event(pmf);
80
81	return NOTIFY_OK;
82}
83
84static int current_power_limits_show(struct seq_file *seq, void *unused)
85{
86	struct amd_pmf_dev *dev = seq->private;
87	struct amd_pmf_static_slider_granular table;
88	int mode, src = 0;
89
90	mode = amd_pmf_get_pprof_modes(dev);
91	if (mode < 0)
92		return mode;
93
94	src = amd_pmf_get_power_source();
95	amd_pmf_update_slider(dev, SLIDER_OP_GET, mode, &table);
96	seq_printf(seq, "spl:%u fppt:%u sppt:%u sppt_apu_only:%u stt_min:%u stt[APU]:%u stt[HS2]: %u\n",
97		   table.prop[src][mode].spl,
98		   table.prop[src][mode].fppt,
99		   table.prop[src][mode].sppt,
100		   table.prop[src][mode].sppt_apu_only,
101		   table.prop[src][mode].stt_min,
102		   table.prop[src][mode].stt_skin_temp[STT_TEMP_APU],
103		   table.prop[src][mode].stt_skin_temp[STT_TEMP_HS2]);
104	return 0;
105}
106DEFINE_SHOW_ATTRIBUTE(current_power_limits);
107
108static void amd_pmf_dbgfs_unregister(struct amd_pmf_dev *dev)
109{
110	debugfs_remove_recursive(dev->dbgfs_dir);
111}
112
113static void amd_pmf_dbgfs_register(struct amd_pmf_dev *dev)
114{
115	dev->dbgfs_dir = debugfs_create_dir("amd_pmf", NULL);
116	if (dev->pmf_if_version == PMF_IF_V1)
117		debugfs_create_file("current_power_limits", 0644, dev->dbgfs_dir, dev,
118				    &current_power_limits_fops);
119}
120
121int amd_pmf_get_power_source(void)
122{
123	if (power_supply_is_system_supplied() > 0)
124		return POWER_SOURCE_AC;
125	else
126		return POWER_SOURCE_DC;
127}
128
129static void amd_pmf_get_metrics(struct work_struct *work)
130{
131	struct amd_pmf_dev *dev = container_of(work, struct amd_pmf_dev, work_buffer.work);
132	ktime_t time_elapsed_ms;
133	int socket_power;
134
135	mutex_lock(&dev->update_mutex);
136	/* Transfer table contents */
137	memset(dev->buf, 0, sizeof(dev->m_table));
138	amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
139	memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
140
141	time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
142	/* Calculate the avg SoC power consumption */
143	socket_power = dev->m_table.apu_power + dev->m_table.dgpu_power;
144
145	if (dev->amt_enabled) {
146		/* Apply the Auto Mode transition */
147		amd_pmf_trans_automode(dev, socket_power, time_elapsed_ms);
148	}
149
150	if (dev->cnqf_enabled) {
151		/* Apply the CnQF transition */
152		amd_pmf_trans_cnqf(dev, socket_power, time_elapsed_ms);
153	}
154
155	dev->start_time = ktime_to_ms(ktime_get());
156	schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms));
157	mutex_unlock(&dev->update_mutex);
158}
159
160static inline u32 amd_pmf_reg_read(struct amd_pmf_dev *dev, int reg_offset)
161{
162	return ioread32(dev->regbase + reg_offset);
163}
164
165static inline void amd_pmf_reg_write(struct amd_pmf_dev *dev, int reg_offset, u32 val)
166{
167	iowrite32(val, dev->regbase + reg_offset);
168}
169
170static void __maybe_unused amd_pmf_dump_registers(struct amd_pmf_dev *dev)
171{
172	u32 value;
173
174	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_RESPONSE);
175	dev_dbg(dev->dev, "AMD_PMF_REGISTER_RESPONSE:%x\n", value);
176
177	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
178	dev_dbg(dev->dev, "AMD_PMF_REGISTER_ARGUMENT:%d\n", value);
179
180	value = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_MESSAGE);
181	dev_dbg(dev->dev, "AMD_PMF_REGISTER_MESSAGE:%x\n", value);
182}
183
184int amd_pmf_send_cmd(struct amd_pmf_dev *dev, u8 message, bool get, u32 arg, u32 *data)
185{
186	int rc;
187	u32 val;
188
189	mutex_lock(&dev->lock);
190
191	/* Wait until we get a valid response */
192	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
193				val, val != 0, PMF_MSG_DELAY_MIN_US,
194				PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
195	if (rc) {
196		dev_err(dev->dev, "failed to talk to SMU\n");
197		goto out_unlock;
198	}
199
200	/* Write zero to response register */
201	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_RESPONSE, 0);
202
203	/* Write argument into argument register */
204	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_ARGUMENT, arg);
205
206	/* Write message ID to message ID register */
207	amd_pmf_reg_write(dev, AMD_PMF_REGISTER_MESSAGE, message);
208
209	/* Wait until we get a valid response */
210	rc = readx_poll_timeout(ioread32, dev->regbase + AMD_PMF_REGISTER_RESPONSE,
211				val, val != 0, PMF_MSG_DELAY_MIN_US,
212				PMF_MSG_DELAY_MIN_US * RESPONSE_REGISTER_LOOP_MAX);
213	if (rc) {
214		dev_err(dev->dev, "SMU response timed out\n");
215		goto out_unlock;
216	}
217
218	switch (val) {
219	case AMD_PMF_RESULT_OK:
220		if (get) {
221			/* PMFW may take longer time to return back the data */
222			usleep_range(DELAY_MIN_US, 10 * DELAY_MAX_US);
223			*data = amd_pmf_reg_read(dev, AMD_PMF_REGISTER_ARGUMENT);
224		}
225		break;
226	case AMD_PMF_RESULT_CMD_REJECT_BUSY:
227		dev_err(dev->dev, "SMU not ready. err: 0x%x\n", val);
228		rc = -EBUSY;
229		goto out_unlock;
230	case AMD_PMF_RESULT_CMD_UNKNOWN:
231		dev_err(dev->dev, "SMU cmd unknown. err: 0x%x\n", val);
232		rc = -EINVAL;
233		goto out_unlock;
234	case AMD_PMF_RESULT_CMD_REJECT_PREREQ:
235	case AMD_PMF_RESULT_FAILED:
236	default:
237		dev_err(dev->dev, "SMU cmd failed. err: 0x%x\n", val);
238		rc = -EIO;
239		goto out_unlock;
240	}
241
242out_unlock:
243	mutex_unlock(&dev->lock);
244	amd_pmf_dump_registers(dev);
245	return rc;
246}
247
248static const struct pci_device_id pmf_pci_ids[] = {
249	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_RMB) },
250	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, AMD_CPU_ID_PS) },
251	{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_1AH_M20H_ROOT) },
252	{ }
253};
254
255int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
256{
257	u64 phys_addr;
258	u32 hi, low;
259
260	/* Get Metrics Table Address */
261	if (alloc_buffer) {
262		dev->buf = kzalloc(sizeof(dev->m_table), GFP_KERNEL);
263		if (!dev->buf)
264			return -ENOMEM;
265	}
266
267	phys_addr = virt_to_phys(dev->buf);
268	hi = phys_addr >> 32;
269	low = phys_addr & GENMASK(31, 0);
270
271	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
272	amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
273
274	return 0;
275}
276
277int amd_pmf_init_metrics_table(struct amd_pmf_dev *dev)
278{
279	int ret;
280
281	INIT_DELAYED_WORK(&dev->work_buffer, amd_pmf_get_metrics);
282
283	ret = amd_pmf_set_dram_addr(dev, true);
284	if (ret)
285		return ret;
286
287	/*
288	 * Start collecting the metrics data after a small delay
289	 * or else, we might end up getting stale values from PMFW.
290	 */
291	schedule_delayed_work(&dev->work_buffer, msecs_to_jiffies(metrics_table_loop_ms * 3));
292
293	return 0;
294}
295
296static int amd_pmf_suspend_handler(struct device *dev)
297{
298	struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
299
300	if (pdev->smart_pc_enabled)
301		cancel_delayed_work_sync(&pdev->pb_work);
302
303	if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
304		amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_SUSPEND);
305
306	return 0;
307}
308
309static int amd_pmf_resume_handler(struct device *dev)
310{
311	struct amd_pmf_dev *pdev = dev_get_drvdata(dev);
312	int ret;
313
314	if (pdev->buf) {
315		ret = amd_pmf_set_dram_addr(pdev, false);
316		if (ret)
317			return ret;
318	}
319
320	if (is_apmf_func_supported(pdev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
321		amd_pmf_notify_sbios_heartbeat_event_v2(pdev, ON_RESUME);
322
323	if (pdev->smart_pc_enabled)
324		schedule_delayed_work(&pdev->pb_work, msecs_to_jiffies(2000));
325
326	return 0;
327}
328
329static DEFINE_SIMPLE_DEV_PM_OPS(amd_pmf_pm, amd_pmf_suspend_handler, amd_pmf_resume_handler);
330
331static void amd_pmf_init_features(struct amd_pmf_dev *dev)
332{
333	int ret;
334
335	/* Enable Static Slider */
336	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
337	    is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
338		amd_pmf_init_sps(dev);
339		dev->pwr_src_notifier.notifier_call = amd_pmf_pwr_src_notify_call;
340		power_supply_reg_notifier(&dev->pwr_src_notifier);
341		dev_dbg(dev->dev, "SPS enabled and Platform Profiles registered\n");
342	}
343
344	amd_pmf_init_smart_pc(dev);
345	if (dev->smart_pc_enabled) {
346		dev_dbg(dev->dev, "Smart PC Solution Enabled\n");
347		/* If Smart PC is enabled, no need to check for other features */
348		return;
349	}
350
351	if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
352		amd_pmf_init_auto_mode(dev);
353		dev_dbg(dev->dev, "Auto Mode Init done\n");
354	} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
355			  is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
356		ret = amd_pmf_init_cnqf(dev);
357		if (ret)
358			dev_warn(dev->dev, "CnQF Init failed\n");
359	}
360}
361
362static void amd_pmf_deinit_features(struct amd_pmf_dev *dev)
363{
364	if (is_apmf_func_supported(dev, APMF_FUNC_STATIC_SLIDER_GRANULAR) ||
365	    is_apmf_func_supported(dev, APMF_FUNC_OS_POWER_SLIDER_UPDATE)) {
366		power_supply_unreg_notifier(&dev->pwr_src_notifier);
367		amd_pmf_deinit_sps(dev);
368	}
369
370	if (dev->smart_pc_enabled) {
371		amd_pmf_deinit_smart_pc(dev);
372	} else if (is_apmf_func_supported(dev, APMF_FUNC_AUTO_MODE)) {
373		amd_pmf_deinit_auto_mode(dev);
374	} else if (is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_AC) ||
375			  is_apmf_func_supported(dev, APMF_FUNC_DYN_SLIDER_DC)) {
376		amd_pmf_deinit_cnqf(dev);
377	}
378}
379
380static const struct acpi_device_id amd_pmf_acpi_ids[] = {
381	{"AMDI0100", 0x100},
382	{"AMDI0102", 0},
383	{"AMDI0103", 0},
384	{ }
385};
386MODULE_DEVICE_TABLE(acpi, amd_pmf_acpi_ids);
387
388static int amd_pmf_probe(struct platform_device *pdev)
389{
390	const struct acpi_device_id *id;
391	struct amd_pmf_dev *dev;
392	struct pci_dev *rdev;
393	u32 base_addr_lo;
394	u32 base_addr_hi;
395	u64 base_addr;
396	u32 val;
397	int err;
398
399	id = acpi_match_device(amd_pmf_acpi_ids, &pdev->dev);
400	if (!id)
401		return -ENODEV;
402
403	if (id->driver_data == 0x100 && !force_load)
404		return -ENODEV;
405
406	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
407	if (!dev)
408		return -ENOMEM;
409
410	dev->dev = &pdev->dev;
411
412	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
413	if (!rdev || !pci_match_id(pmf_pci_ids, rdev)) {
414		pci_dev_put(rdev);
415		return -ENODEV;
416	}
417
418	dev->cpu_id = rdev->device;
419
420	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_LO, &val);
421	if (err) {
422		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_LO);
423		pci_dev_put(rdev);
424		return pcibios_err_to_errno(err);
425	}
426
427	base_addr_lo = val & AMD_PMF_BASE_ADDR_HI_MASK;
428
429	err = amd_smn_read(0, AMD_PMF_BASE_ADDR_HI, &val);
430	if (err) {
431		dev_err(dev->dev, "error in reading from 0x%x\n", AMD_PMF_BASE_ADDR_HI);
432		pci_dev_put(rdev);
433		return pcibios_err_to_errno(err);
434	}
435
436	base_addr_hi = val & AMD_PMF_BASE_ADDR_LO_MASK;
437	pci_dev_put(rdev);
438	base_addr = ((u64)base_addr_hi << 32 | base_addr_lo);
439
440	dev->regbase = devm_ioremap(dev->dev, base_addr + AMD_PMF_BASE_ADDR_OFFSET,
441				    AMD_PMF_MAPPING_SIZE);
442	if (!dev->regbase)
443		return -ENOMEM;
444
445	mutex_init(&dev->lock);
446	mutex_init(&dev->update_mutex);
447
448	amd_pmf_quirks_init(dev);
449	apmf_acpi_init(dev);
450	platform_set_drvdata(pdev, dev);
451	amd_pmf_dbgfs_register(dev);
452	amd_pmf_init_features(dev);
453	apmf_install_handler(dev);
454	if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
455		amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_LOAD);
456
457	dev_info(dev->dev, "registered PMF device successfully\n");
458
459	return 0;
460}
461
462static void amd_pmf_remove(struct platform_device *pdev)
463{
464	struct amd_pmf_dev *dev = platform_get_drvdata(pdev);
465
466	amd_pmf_deinit_features(dev);
467	if (is_apmf_func_supported(dev, APMF_FUNC_SBIOS_HEARTBEAT_V2))
468		amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
469	apmf_acpi_deinit(dev);
470	amd_pmf_dbgfs_unregister(dev);
471	mutex_destroy(&dev->lock);
472	mutex_destroy(&dev->update_mutex);
473	kfree(dev->buf);
474}
475
476static const struct attribute_group *amd_pmf_driver_groups[] = {
477	&cnqf_feature_attribute_group,
478	NULL,
479};
480
481static struct platform_driver amd_pmf_driver = {
482	.driver = {
483		.name = "amd-pmf",
484		.acpi_match_table = amd_pmf_acpi_ids,
485		.dev_groups = amd_pmf_driver_groups,
486		.pm = pm_sleep_ptr(&amd_pmf_pm),
487	},
488	.probe = amd_pmf_probe,
489	.remove_new = amd_pmf_remove,
490};
491module_platform_driver(amd_pmf_driver);
492
493MODULE_LICENSE("GPL");
494MODULE_DESCRIPTION("AMD Platform Management Framework Driver");
495MODULE_SOFTDEP("pre: amdtee");
496