1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_ENERGY_MODEL_H
3#define _LINUX_ENERGY_MODEL_H
4#include <linux/cpumask.h>
5#include <linux/device.h>
6#include <linux/jump_label.h>
7#include <linux/kobject.h>
8#include <linux/kref.h>
9#include <linux/rcupdate.h>
10#include <linux/sched/cpufreq.h>
11#include <linux/sched/topology.h>
12#include <linux/types.h>
13
14/**
15 * struct em_perf_state - Performance state of a performance domain
16 * @performance:	CPU performance (capacity) at a given frequency
17 * @frequency:	The frequency in KHz, for consistency with CPUFreq
18 * @power:	The power consumed at this level (by 1 CPU or by a registered
19 *		device). It can be a total power: static and dynamic.
20 * @cost:	The cost coefficient associated with this level, used during
21 *		energy calculation. Equal to: power * max_frequency / frequency
22 * @flags:	see "em_perf_state flags" description below.
23 */
24struct em_perf_state {
25	unsigned long performance;
26	unsigned long frequency;
27	unsigned long power;
28	unsigned long cost;
29	unsigned long flags;
30};
31
32/*
33 * em_perf_state flags:
34 *
35 * EM_PERF_STATE_INEFFICIENT: The performance state is inefficient. There is
36 * in this em_perf_domain, another performance state with a higher frequency
37 * but a lower or equal power cost. Such inefficient states are ignored when
38 * using em_pd_get_efficient_*() functions.
39 */
40#define EM_PERF_STATE_INEFFICIENT BIT(0)
41
42/**
43 * struct em_perf_table - Performance states table
44 * @rcu:	RCU used for safe access and destruction
45 * @kref:	Reference counter to track the users
46 * @state:	List of performance states, in ascending order
47 */
48struct em_perf_table {
49	struct rcu_head rcu;
50	struct kref kref;
51	struct em_perf_state state[];
52};
53
54/**
55 * struct em_perf_domain - Performance domain
56 * @em_table:		Pointer to the runtime modifiable em_perf_table
57 * @nr_perf_states:	Number of performance states
58 * @flags:		See "em_perf_domain flags"
59 * @cpus:		Cpumask covering the CPUs of the domain. It's here
60 *			for performance reasons to avoid potential cache
61 *			misses during energy calculations in the scheduler
62 *			and simplifies allocating/freeing that memory region.
63 *
64 * In case of CPU device, a "performance domain" represents a group of CPUs
65 * whose performance is scaled together. All CPUs of a performance domain
66 * must have the same micro-architecture. Performance domains often have
67 * a 1-to-1 mapping with CPUFreq policies. In case of other devices the @cpus
68 * field is unused.
69 */
70struct em_perf_domain {
71	struct em_perf_table __rcu *em_table;
72	int nr_perf_states;
73	unsigned long flags;
74	unsigned long cpus[];
75};
76
77/*
78 *  em_perf_domain flags:
79 *
80 *  EM_PERF_DOMAIN_MICROWATTS: The power values are in micro-Watts or some
81 *  other scale.
82 *
83 *  EM_PERF_DOMAIN_SKIP_INEFFICIENCIES: Skip inefficient states when estimating
84 *  energy consumption.
85 *
86 *  EM_PERF_DOMAIN_ARTIFICIAL: The power values are artificial and might be
87 *  created by platform missing real power information
88 */
89#define EM_PERF_DOMAIN_MICROWATTS BIT(0)
90#define EM_PERF_DOMAIN_SKIP_INEFFICIENCIES BIT(1)
91#define EM_PERF_DOMAIN_ARTIFICIAL BIT(2)
92
93#define em_span_cpus(em) (to_cpumask((em)->cpus))
94#define em_is_artificial(em) ((em)->flags & EM_PERF_DOMAIN_ARTIFICIAL)
95
96#ifdef CONFIG_ENERGY_MODEL
97/*
98 * The max power value in micro-Watts. The limit of 64 Watts is set as
99 * a safety net to not overflow multiplications on 32bit platforms. The
100 * 32bit value limit for total Perf Domain power implies a limit of
101 * maximum CPUs in such domain to 64.
102 */
103#define EM_MAX_POWER (64000000) /* 64 Watts */
104
105/*
106 * To avoid possible energy estimation overflow on 32bit machines add
107 * limits to number of CPUs in the Perf. Domain.
108 * We are safe on 64bit machine, thus some big number.
109 */
110#ifdef CONFIG_64BIT
111#define EM_MAX_NUM_CPUS 4096
112#else
113#define EM_MAX_NUM_CPUS 16
114#endif
115
116struct em_data_callback {
117	/**
118	 * active_power() - Provide power at the next performance state of
119	 *		a device
120	 * @dev		: Device for which we do this operation (can be a CPU)
121	 * @power	: Active power at the performance state
122	 *		(modified)
123	 * @freq	: Frequency at the performance state in kHz
124	 *		(modified)
125	 *
126	 * active_power() must find the lowest performance state of 'dev' above
127	 * 'freq' and update 'power' and 'freq' to the matching active power
128	 * and frequency.
129	 *
130	 * In case of CPUs, the power is the one of a single CPU in the domain,
131	 * expressed in micro-Watts or an abstract scale. It is expected to
132	 * fit in the [0, EM_MAX_POWER] range.
133	 *
134	 * Return 0 on success.
135	 */
136	int (*active_power)(struct device *dev, unsigned long *power,
137			    unsigned long *freq);
138
139	/**
140	 * get_cost() - Provide the cost at the given performance state of
141	 *		a device
142	 * @dev		: Device for which we do this operation (can be a CPU)
143	 * @freq	: Frequency at the performance state in kHz
144	 * @cost	: The cost value for the performance state
145	 *		(modified)
146	 *
147	 * In case of CPUs, the cost is the one of a single CPU in the domain.
148	 * It is expected to fit in the [0, EM_MAX_POWER] range due to internal
149	 * usage in EAS calculation.
150	 *
151	 * Return 0 on success, or appropriate error value in case of failure.
152	 */
153	int (*get_cost)(struct device *dev, unsigned long freq,
154			unsigned long *cost);
155};
156#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) ((em_cb).active_power = cb)
157#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb)	\
158	{ .active_power = _active_power_cb,		\
159	  .get_cost = _cost_cb }
160#define EM_DATA_CB(_active_power_cb)			\
161		EM_ADV_DATA_CB(_active_power_cb, NULL)
162
163struct em_perf_domain *em_cpu_get(int cpu);
164struct em_perf_domain *em_pd_get(struct device *dev);
165int em_dev_update_perf_domain(struct device *dev,
166			      struct em_perf_table __rcu *new_table);
167int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
168				struct em_data_callback *cb, cpumask_t *span,
169				bool microwatts);
170void em_dev_unregister_perf_domain(struct device *dev);
171struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd);
172void em_table_free(struct em_perf_table __rcu *table);
173int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
174			 int nr_states);
175
176/**
177 * em_pd_get_efficient_state() - Get an efficient performance state from the EM
178 * @table:		List of performance states, in ascending order
179 * @nr_perf_states:	Number of performance states
180 * @max_util:		Max utilization to map with the EM
181 * @pd_flags:		Performance Domain flags
182 *
183 * It is called from the scheduler code quite frequently and as a consequence
184 * doesn't implement any check.
185 *
186 * Return: An efficient performance state id, high enough to meet @max_util
187 * requirement.
188 */
189static inline int
190em_pd_get_efficient_state(struct em_perf_state *table, int nr_perf_states,
191			  unsigned long max_util, unsigned long pd_flags)
192{
193	struct em_perf_state *ps;
194	int i;
195
196	for (i = 0; i < nr_perf_states; i++) {
197		ps = &table[i];
198		if (ps->performance >= max_util) {
199			if (pd_flags & EM_PERF_DOMAIN_SKIP_INEFFICIENCIES &&
200			    ps->flags & EM_PERF_STATE_INEFFICIENT)
201				continue;
202			return i;
203		}
204	}
205
206	return nr_perf_states - 1;
207}
208
209/**
210 * em_cpu_energy() - Estimates the energy consumed by the CPUs of a
211 *		performance domain
212 * @pd		: performance domain for which energy has to be estimated
213 * @max_util	: highest utilization among CPUs of the domain
214 * @sum_util	: sum of the utilization of all CPUs in the domain
215 * @allowed_cpu_cap	: maximum allowed CPU capacity for the @pd, which
216 *			  might reflect reduced frequency (due to thermal)
217 *
218 * This function must be used only for CPU devices. There is no validation,
219 * i.e. if the EM is a CPU type and has cpumask allocated. It is called from
220 * the scheduler code quite frequently and that is why there is not checks.
221 *
222 * Return: the sum of the energy consumed by the CPUs of the domain assuming
223 * a capacity state satisfying the max utilization of the domain.
224 */
225static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
226				unsigned long max_util, unsigned long sum_util,
227				unsigned long allowed_cpu_cap)
228{
229	struct em_perf_table *em_table;
230	struct em_perf_state *ps;
231	int i;
232
233#ifdef CONFIG_SCHED_DEBUG
234	WARN_ONCE(!rcu_read_lock_held(), "EM: rcu read lock needed\n");
235#endif
236
237	if (!sum_util)
238		return 0;
239
240	/*
241	 * In order to predict the performance state, map the utilization of
242	 * the most utilized CPU of the performance domain to a requested
243	 * performance, like schedutil. Take also into account that the real
244	 * performance might be set lower (due to thermal capping). Thus, clamp
245	 * max utilization to the allowed CPU capacity before calculating
246	 * effective performance.
247	 */
248	max_util = min(max_util, allowed_cpu_cap);
249
250	/*
251	 * Find the lowest performance state of the Energy Model above the
252	 * requested performance.
253	 */
254	em_table = rcu_dereference(pd->em_table);
255	i = em_pd_get_efficient_state(em_table->state, pd->nr_perf_states,
256				      max_util, pd->flags);
257	ps = &em_table->state[i];
258
259	/*
260	 * The performance (capacity) of a CPU in the domain at the performance
261	 * state (ps) can be computed as:
262	 *
263	 *                     ps->freq * scale_cpu
264	 *   ps->performance = --------------------                  (1)
265	 *                         cpu_max_freq
266	 *
267	 * So, ignoring the costs of idle states (which are not available in
268	 * the EM), the energy consumed by this CPU at that performance state
269	 * is estimated as:
270	 *
271	 *             ps->power * cpu_util
272	 *   cpu_nrg = --------------------                          (2)
273	 *               ps->performance
274	 *
275	 * since 'cpu_util / ps->performance' represents its percentage of busy
276	 * time.
277	 *
278	 *   NOTE: Although the result of this computation actually is in
279	 *         units of power, it can be manipulated as an energy value
280	 *         over a scheduling period, since it is assumed to be
281	 *         constant during that interval.
282	 *
283	 * By injecting (1) in (2), 'cpu_nrg' can be re-expressed as a product
284	 * of two terms:
285	 *
286	 *             ps->power * cpu_max_freq
287	 *   cpu_nrg = ------------------------ * cpu_util           (3)
288	 *               ps->freq * scale_cpu
289	 *
290	 * The first term is static, and is stored in the em_perf_state struct
291	 * as 'ps->cost'.
292	 *
293	 * Since all CPUs of the domain have the same micro-architecture, they
294	 * share the same 'ps->cost', and the same CPU capacity. Hence, the
295	 * total energy of the domain (which is the simple sum of the energy of
296	 * all of its CPUs) can be factorized as:
297	 *
298	 *   pd_nrg = ps->cost * \Sum cpu_util                       (4)
299	 */
300	return ps->cost * sum_util;
301}
302
303/**
304 * em_pd_nr_perf_states() - Get the number of performance states of a perf.
305 *				domain
306 * @pd		: performance domain for which this must be done
307 *
308 * Return: the number of performance states in the performance domain table
309 */
310static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
311{
312	return pd->nr_perf_states;
313}
314
315/**
316 * em_perf_state_from_pd() - Get the performance states table of perf.
317 *				domain
318 * @pd		: performance domain for which this must be done
319 *
320 * To use this function the rcu_read_lock() should be hold. After the usage
321 * of the performance states table is finished, the rcu_read_unlock() should
322 * be called.
323 *
324 * Return: the pointer to performance states table of the performance domain
325 */
326static inline
327struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
328{
329	return rcu_dereference(pd->em_table)->state;
330}
331
332#else
333struct em_data_callback {};
334#define EM_ADV_DATA_CB(_active_power_cb, _cost_cb) { }
335#define EM_DATA_CB(_active_power_cb) { }
336#define EM_SET_ACTIVE_POWER_CB(em_cb, cb) do { } while (0)
337
338static inline
339int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
340				struct em_data_callback *cb, cpumask_t *span,
341				bool microwatts)
342{
343	return -EINVAL;
344}
345static inline void em_dev_unregister_perf_domain(struct device *dev)
346{
347}
348static inline struct em_perf_domain *em_cpu_get(int cpu)
349{
350	return NULL;
351}
352static inline struct em_perf_domain *em_pd_get(struct device *dev)
353{
354	return NULL;
355}
356static inline unsigned long em_cpu_energy(struct em_perf_domain *pd,
357			unsigned long max_util, unsigned long sum_util,
358			unsigned long allowed_cpu_cap)
359{
360	return 0;
361}
362static inline int em_pd_nr_perf_states(struct em_perf_domain *pd)
363{
364	return 0;
365}
366static inline
367struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
368{
369	return NULL;
370}
371static inline void em_table_free(struct em_perf_table __rcu *table) {}
372static inline
373int em_dev_update_perf_domain(struct device *dev,
374			      struct em_perf_table __rcu *new_table)
375{
376	return -EINVAL;
377}
378static inline
379struct em_perf_state *em_perf_state_from_pd(struct em_perf_domain *pd)
380{
381	return NULL;
382}
383static inline
384int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
385			 int nr_states)
386{
387	return -EINVAL;
388}
389#endif
390
391#endif
392