1/*
2 * Copyright 2020-2022, J��r��me Duval, jerome.duval@gmail.com.
3 * Copyright 2013, Haiku, Inc. All Rights Reserved.
4 * Distributed under the terms of the MIT License.
5 *
6 * Authors:
7 *		Pawe�� Dziepak, <pdziepak@quarnos.org>
8 */
9
10
11#include <cpufreq.h>
12#include <KernelExport.h>
13
14#include <arch_cpu.h>
15#include <cpu.h>
16#include <smp.h>
17#include <util/AutoLock.h>
18
19
20#define AMD_PSTATES_MODULE_NAME	CPUFREQ_MODULES_PREFIX "/amd_pstates/v1"
21
22
23static uint32 sHWPLowest;
24static uint32 sHWPGuaranteed;
25static uint32 sHWPEfficient;
26static uint32 sHWPHighest;
27
28static bool sAvoidBoost = true;
29
30
31static void set_normal_pstate(void* /* dummy */, int cpu);
32
33
34static void
35pstates_set_scheduler_mode(scheduler_mode mode)
36{
37	sAvoidBoost = mode == SCHEDULER_MODE_POWER_SAVING;
38	call_all_cpus(set_normal_pstate, NULL);
39}
40
41
42static status_t
43pstates_increase_performance(int delta)
44{
45	return B_NOT_SUPPORTED;
46}
47
48
49static status_t
50pstates_decrease_performance(int delta)
51{
52	return B_NOT_SUPPORTED;
53}
54
55
56static bool
57is_cpu_model_supported(cpu_ent* cpu)
58{
59	if (cpu->arch.vendor != VENDOR_AMD)
60		return false;
61
62	return true;
63}
64
65
66static void
67set_normal_pstate(void* /* dummy */, int cpu)
68{
69	x86_write_msr(MSR_AMD_CPPC_ENABLE, 1);
70
71	uint64 cap1 = x86_read_msr(MSR_AMD_CPPC_CAP1);
72	sHWPLowest = AMD_CPPC_LOWEST_PERF(cap1);
73	sHWPEfficient = AMD_CPPC_LOWNONLIN_PERF(cap1);
74	sHWPGuaranteed = AMD_CPPC_NOMINAL_PERF(cap1);
75	sHWPHighest = AMD_CPPC_HIGHEST_PERF(cap1);
76
77	uint64 request = AMD_CPPC_MIN_PERF(sHWPEfficient);
78	request |= AMD_CPPC_MAX_PERF(sAvoidBoost ? sHWPGuaranteed : sHWPHighest);
79	request |= AMD_CPPC_EPP_PERF(
80		sAvoidBoost ? AMD_CPPC_EPP_BALANCE_PERFORMANCE : AMD_CPPC_EPP_PERFORMANCE);
81	x86_write_msr(MSR_AMD_CPPC_REQ, request & 0xffffffff);
82}
83
84
85static status_t
86init_pstates()
87{
88	if (!x86_check_feature(IA32_FEATURE_CPPC, FEATURE_EXT_8_EBX))
89		return B_ERROR;
90
91	int32 cpuCount = smp_get_num_cpus();
92	for (int32 i = 0; i < cpuCount; i++) {
93		if (!is_cpu_model_supported(&gCPU[i]))
94			return B_ERROR;
95	}
96
97	dprintf("using AMD P-States (capabilities: 0x%08" B_PRIx64 "\n",
98		x86_read_msr(MSR_AMD_CPPC_CAP1));
99
100	pstates_set_scheduler_mode(SCHEDULER_MODE_LOW_LATENCY);
101
102	call_all_cpus_sync(set_normal_pstate, NULL);
103	return B_OK;
104}
105
106
107static status_t
108uninit_pstates()
109{
110	call_all_cpus_sync(set_normal_pstate, NULL);
111
112	return B_OK;
113}
114
115
116static status_t
117std_ops(int32 op, ...)
118{
119	switch (op) {
120		case B_MODULE_INIT:
121			return init_pstates();
122
123		case B_MODULE_UNINIT:
124			uninit_pstates();
125			return B_OK;
126	}
127
128	return B_ERROR;
129}
130
131
132static cpufreq_module_info sAMDPStates = {
133	{
134		AMD_PSTATES_MODULE_NAME,
135		0,
136		std_ops,
137	},
138
139	1.0f,
140
141	pstates_set_scheduler_mode,
142
143	pstates_increase_performance,
144	pstates_decrease_performance,
145};
146
147
148module_info* modules[] = {
149	(module_info*)&sAMDPStates,
150	NULL
151};
152
153