1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Intel Turbo Boost Max Technology 3.0 legacy (non HWP) enumeration driver
4 * Copyright (c) 2017, Intel Corporation.
5 * All rights reserved.
6 *
7 * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
8 */
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11#include <linux/cpufeature.h>
12#include <linux/cpuhotplug.h>
13#include <linux/init.h>
14#include <linux/kernel.h>
15#include <linux/topology.h>
16#include <linux/workqueue.h>
17
18#include <asm/cpu_device_id.h>
19#include <asm/intel-family.h>
20
21#define MSR_OC_MAILBOX			0x150
22#define MSR_OC_MAILBOX_CMD_OFFSET	32
23#define MSR_OC_MAILBOX_RSP_OFFSET	32
24#define MSR_OC_MAILBOX_BUSY_BIT		63
25#define OC_MAILBOX_FC_CONTROL_CMD	0x1C
26
27/*
28 * Typical latency to get mail box response is ~3us, It takes +3 us to
29 * process reading mailbox after issuing mailbox write on a Broadwell 3.4 GHz
30 * system. So for most of the time, the first mailbox read should have the
31 * response, but to avoid some boundary cases retry twice.
32 */
33#define OC_MAILBOX_RETRY_COUNT		2
34
35static int get_oc_core_priority(unsigned int cpu)
36{
37	u64 value, cmd = OC_MAILBOX_FC_CONTROL_CMD;
38	int ret, i;
39
40	/* Issue favored core read command */
41	value = cmd << MSR_OC_MAILBOX_CMD_OFFSET;
42	/* Set the busy bit to indicate OS is trying to issue command */
43	value |=  BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT);
44	ret = wrmsrl_safe(MSR_OC_MAILBOX, value);
45	if (ret) {
46		pr_debug("cpu %d OC mailbox write failed\n", cpu);
47		return ret;
48	}
49
50	for (i = 0; i < OC_MAILBOX_RETRY_COUNT; ++i) {
51		ret = rdmsrl_safe(MSR_OC_MAILBOX, &value);
52		if (ret) {
53			pr_debug("cpu %d OC mailbox read failed\n", cpu);
54			break;
55		}
56
57		if (value & BIT_ULL(MSR_OC_MAILBOX_BUSY_BIT)) {
58			pr_debug("cpu %d OC mailbox still processing\n", cpu);
59			ret = -EBUSY;
60			continue;
61		}
62
63		if ((value >> MSR_OC_MAILBOX_RSP_OFFSET) & 0xff) {
64			pr_debug("cpu %d OC mailbox cmd failed\n", cpu);
65			ret = -ENXIO;
66			break;
67		}
68
69		ret = value & 0xff;
70		pr_debug("cpu %d max_ratio %d\n", cpu, ret);
71		break;
72	}
73
74	return ret;
75}
76
77/*
78 * The work item is needed to avoid CPU hotplug locking issues. The function
79 * itmt_legacy_set_priority() is called from CPU online callback, so can't
80 * call sched_set_itmt_support() from there as this function will aquire
81 * hotplug locks in its path.
82 */
83static void itmt_legacy_work_fn(struct work_struct *work)
84{
85	sched_set_itmt_support();
86}
87
88static DECLARE_WORK(sched_itmt_work, itmt_legacy_work_fn);
89
90static int itmt_legacy_cpu_online(unsigned int cpu)
91{
92	static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
93	int priority;
94
95	priority = get_oc_core_priority(cpu);
96	if (priority < 0)
97		return 0;
98
99	sched_set_itmt_core_prio(priority, cpu);
100
101	/* Enable ITMT feature when a core with different priority is found */
102	if (max_highest_perf <= min_highest_perf) {
103		if (priority > max_highest_perf)
104			max_highest_perf = priority;
105
106		if (priority < min_highest_perf)
107			min_highest_perf = priority;
108
109		if (max_highest_perf > min_highest_perf)
110			schedule_work(&sched_itmt_work);
111	}
112
113	return 0;
114}
115
116static const struct x86_cpu_id itmt_legacy_cpu_ids[] = {
117	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	NULL),
118	X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X,	NULL),
119	{}
120};
121
122static int __init itmt_legacy_init(void)
123{
124	const struct x86_cpu_id *id;
125	int ret;
126
127	id = x86_match_cpu(itmt_legacy_cpu_ids);
128	if (!id)
129		return -ENODEV;
130
131	ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
132				"platform/x86/turbo_max_3:online",
133				itmt_legacy_cpu_online,	NULL);
134	if (ret < 0)
135		return ret;
136
137	return 0;
138}
139late_initcall(itmt_legacy_init)
140