1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Uncore Frequency Setting
4 * Copyright (c) 2022, Intel Corporation.
5 * All rights reserved.
6 *
7 * Provide interface to set MSR 620 at a granularity of per die. On CPU online,
8 * one control CPU is identified per die to read/write limit. This control CPU
9 * is changed, if the CPU state is changed to offline. When the last CPU is
10 * offline in a die then remove the sysfs object for that die.
11 * The majority of actual code is related to sysfs create and read/write
12 * attributes.
13 *
14 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
15 */
16
17#include <linux/cpu.h>
18#include <linux/module.h>
19#include <linux/slab.h>
20#include <linux/suspend.h>
21#include <asm/cpu_device_id.h>
22#include <asm/intel-family.h>
23
24#include "uncore-frequency-common.h"
25
26/* Max instances for uncore data, one for each die */
27static int uncore_max_entries __read_mostly;
28/* Storage for uncore data for all instances */
29static struct uncore_data *uncore_instances;
30/* Stores the CPU mask of the target CPUs to use during uncore read/write */
31static cpumask_t uncore_cpu_mask;
32/* CPU online callback register instance */
33static enum cpuhp_state uncore_hp_state __read_mostly;
34
35#define MSR_UNCORE_RATIO_LIMIT	0x620
36#define MSR_UNCORE_PERF_STATUS	0x621
37#define UNCORE_FREQ_KHZ_MULTIPLIER	100000
38
39static int uncore_read_control_freq(struct uncore_data *data, unsigned int *min,
40				    unsigned int *max)
41{
42	u64 cap;
43	int ret;
44
45	if (data->control_cpu < 0)
46		return -ENXIO;
47
48	ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
49	if (ret)
50		return ret;
51
52	*max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
53	*min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER;
54
55	return 0;
56}
57
58static int uncore_write_control_freq(struct uncore_data *data, unsigned int input,
59				     unsigned int min_max)
60{
61	int ret;
62	u64 cap;
63
64	input /= UNCORE_FREQ_KHZ_MULTIPLIER;
65	if (!input || input > 0x7F)
66		return -EINVAL;
67
68	if (data->control_cpu < 0)
69		return -ENXIO;
70
71	ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap);
72	if (ret)
73		return ret;
74
75	if (min_max) {
76		cap &= ~0x7F;
77		cap |= input;
78	} else  {
79		cap &= ~GENMASK(14, 8);
80		cap |= (input << 8);
81	}
82
83	ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap);
84	if (ret)
85		return ret;
86
87	data->stored_uncore_data = cap;
88
89	return 0;
90}
91
92static int uncore_read_freq(struct uncore_data *data, unsigned int *freq)
93{
94	u64 ratio;
95	int ret;
96
97	if (data->control_cpu < 0)
98		return -ENXIO;
99
100	ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_PERF_STATUS, &ratio);
101	if (ret)
102		return ret;
103
104	*freq = (ratio & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER;
105
106	return 0;
107}
108
109/* Caller provides protection */
110static struct uncore_data *uncore_get_instance(unsigned int cpu)
111{
112	int id = topology_logical_die_id(cpu);
113
114	if (id >= 0 && id < uncore_max_entries)
115		return &uncore_instances[id];
116
117	return NULL;
118}
119
120static int uncore_event_cpu_online(unsigned int cpu)
121{
122	struct uncore_data *data;
123	int target;
124
125	/* Check if there is an online cpu in the package for uncore MSR */
126	target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu));
127	if (target < nr_cpu_ids)
128		return 0;
129
130	/* Use this CPU on this die as a control CPU */
131	cpumask_set_cpu(cpu, &uncore_cpu_mask);
132
133	data = uncore_get_instance(cpu);
134	if (!data)
135		return 0;
136
137	data->package_id = topology_physical_package_id(cpu);
138	data->die_id = topology_die_id(cpu);
139	data->domain_id = UNCORE_DOMAIN_ID_INVALID;
140
141	return uncore_freq_add_entry(data, cpu);
142}
143
144static int uncore_event_cpu_offline(unsigned int cpu)
145{
146	struct uncore_data *data;
147	int target;
148
149	data = uncore_get_instance(cpu);
150	if (!data)
151		return 0;
152
153	/* Check if existing cpu is used for uncore MSRs */
154	if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
155		return 0;
156
157	/* Find a new cpu to set uncore MSR */
158	target = cpumask_any_but(topology_die_cpumask(cpu), cpu);
159
160	if (target < nr_cpu_ids) {
161		cpumask_set_cpu(target, &uncore_cpu_mask);
162		uncore_freq_add_entry(data, target);
163	} else {
164		uncore_freq_remove_die_entry(data);
165	}
166
167	return 0;
168}
169
170static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode,
171			    void *_unused)
172{
173	int i;
174
175	switch (mode) {
176	case PM_POST_HIBERNATION:
177	case PM_POST_RESTORE:
178	case PM_POST_SUSPEND:
179		for (i = 0; i < uncore_max_entries; ++i) {
180			struct uncore_data *data = &uncore_instances[i];
181
182			if (!data || !data->valid || !data->stored_uncore_data)
183				return 0;
184
185			wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT,
186				      data->stored_uncore_data);
187		}
188		break;
189	default:
190		break;
191	}
192	return 0;
193}
194
195static struct notifier_block uncore_pm_nb = {
196	.notifier_call = uncore_pm_notify,
197};
198
199static const struct x86_cpu_id intel_uncore_cpu_ids[] = {
200	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_G,	NULL),
201	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	NULL),
202	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,	NULL),
203	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,	NULL),
204	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X,	NULL),
205	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D,	NULL),
206	X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL),
207	X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, NULL),
208	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, NULL),
209	X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, NULL),
210	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, NULL),
211	X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE_L, NULL),
212	X86_MATCH_INTEL_FAM6_MODEL(CANNONLAKE_L, NULL),
213	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, NULL),
214	X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, NULL),
215	X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, NULL),
216	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE, NULL),
217	X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
218	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
219	X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
220	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
221	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
222	X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, NULL),
223	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE, NULL),
224	X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, NULL),
225	X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE, NULL),
226	X86_MATCH_INTEL_FAM6_MODEL(ARROWLAKE_H, NULL),
227	X86_MATCH_INTEL_FAM6_MODEL(LUNARLAKE_M, NULL),
228	{}
229};
230MODULE_DEVICE_TABLE(x86cpu, intel_uncore_cpu_ids);
231
232static int __init intel_uncore_init(void)
233{
234	const struct x86_cpu_id *id;
235	int ret;
236
237	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
238		return -ENODEV;
239
240	id = x86_match_cpu(intel_uncore_cpu_ids);
241	if (!id)
242		return -ENODEV;
243
244	uncore_max_entries = topology_max_packages() *
245					topology_max_dies_per_package();
246	uncore_instances = kcalloc(uncore_max_entries,
247				   sizeof(*uncore_instances), GFP_KERNEL);
248	if (!uncore_instances)
249		return -ENOMEM;
250
251	ret = uncore_freq_common_init(uncore_read_control_freq, uncore_write_control_freq,
252				      uncore_read_freq);
253	if (ret)
254		goto err_free;
255
256	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
257				"platform/x86/uncore-freq:online",
258				uncore_event_cpu_online,
259				uncore_event_cpu_offline);
260	if (ret < 0)
261		goto err_rem_kobj;
262
263	uncore_hp_state = ret;
264
265	ret = register_pm_notifier(&uncore_pm_nb);
266	if (ret)
267		goto err_rem_state;
268
269	return 0;
270
271err_rem_state:
272	cpuhp_remove_state(uncore_hp_state);
273err_rem_kobj:
274	uncore_freq_common_exit();
275err_free:
276	kfree(uncore_instances);
277
278	return ret;
279}
280module_init(intel_uncore_init)
281
282static void __exit intel_uncore_exit(void)
283{
284	int i;
285
286	unregister_pm_notifier(&uncore_pm_nb);
287	cpuhp_remove_state(uncore_hp_state);
288	for (i = 0; i < uncore_max_entries; ++i)
289		uncore_freq_remove_die_entry(&uncore_instances[i]);
290	uncore_freq_common_exit();
291	kfree(uncore_instances);
292}
293module_exit(intel_uncore_exit)
294
295MODULE_IMPORT_NS(INTEL_UNCORE_FREQUENCY);
296MODULE_LICENSE("GPL v2");
297MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver");
298