1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/drivers/devfreq/governor_passive.c
4 *
5 * Copyright (C) 2016 Samsung Electronics
6 * Author: Chanwoo Choi <cw00.choi@samsung.com>
7 * Author: MyungJoo Ham <myungjoo.ham@samsung.com>
8 */
9
10#include <linux/module.h>
11#include <linux/cpu.h>
12#include <linux/cpufreq.h>
13#include <linux/cpumask.h>
14#include <linux/slab.h>
15#include <linux/device.h>
16#include <linux/devfreq.h>
17#include <linux/units.h>
18#include "governor.h"
19
20static struct devfreq_cpu_data *
21get_parent_cpu_data(struct devfreq_passive_data *p_data,
22		    struct cpufreq_policy *policy)
23{
24	struct devfreq_cpu_data *parent_cpu_data;
25
26	if (!p_data || !policy)
27		return NULL;
28
29	list_for_each_entry(parent_cpu_data, &p_data->cpu_data_list, node)
30		if (parent_cpu_data->first_cpu == cpumask_first(policy->related_cpus))
31			return parent_cpu_data;
32
33	return NULL;
34}
35
36static void delete_parent_cpu_data(struct devfreq_passive_data *p_data)
37{
38	struct devfreq_cpu_data *parent_cpu_data, *tmp;
39
40	list_for_each_entry_safe(parent_cpu_data, tmp, &p_data->cpu_data_list, node) {
41		list_del(&parent_cpu_data->node);
42
43		if (parent_cpu_data->opp_table)
44			dev_pm_opp_put_opp_table(parent_cpu_data->opp_table);
45
46		kfree(parent_cpu_data);
47	}
48}
49
50static unsigned long get_target_freq_by_required_opp(struct device *p_dev,
51						struct opp_table *p_opp_table,
52						struct opp_table *opp_table,
53						unsigned long *freq)
54{
55	struct dev_pm_opp *opp = NULL, *p_opp = NULL;
56	unsigned long target_freq;
57
58	if (!p_dev || !p_opp_table || !opp_table || !freq)
59		return 0;
60
61	p_opp = devfreq_recommended_opp(p_dev, freq, 0);
62	if (IS_ERR(p_opp))
63		return 0;
64
65	opp = dev_pm_opp_xlate_required_opp(p_opp_table, opp_table, p_opp);
66	dev_pm_opp_put(p_opp);
67
68	if (IS_ERR(opp))
69		return 0;
70
71	target_freq = dev_pm_opp_get_freq(opp);
72	dev_pm_opp_put(opp);
73
74	return target_freq;
75}
76
77static int get_target_freq_with_cpufreq(struct devfreq *devfreq,
78					unsigned long *target_freq)
79{
80	struct devfreq_passive_data *p_data =
81				(struct devfreq_passive_data *)devfreq->data;
82	struct devfreq_cpu_data *parent_cpu_data;
83	struct cpufreq_policy *policy;
84	unsigned long cpu, cpu_cur, cpu_min, cpu_max, cpu_percent;
85	unsigned long dev_min, dev_max;
86	unsigned long freq = 0;
87	int ret = 0;
88
89	for_each_online_cpu(cpu) {
90		policy = cpufreq_cpu_get(cpu);
91		if (!policy) {
92			ret = -EINVAL;
93			continue;
94		}
95
96		parent_cpu_data = get_parent_cpu_data(p_data, policy);
97		if (!parent_cpu_data) {
98			cpufreq_cpu_put(policy);
99			continue;
100		}
101
102		/* Get target freq via required opps */
103		cpu_cur = parent_cpu_data->cur_freq * HZ_PER_KHZ;
104		freq = get_target_freq_by_required_opp(parent_cpu_data->dev,
105					parent_cpu_data->opp_table,
106					devfreq->opp_table, &cpu_cur);
107		if (freq) {
108			*target_freq = max(freq, *target_freq);
109			cpufreq_cpu_put(policy);
110			continue;
111		}
112
113		/* Use interpolation if required opps is not available */
114		devfreq_get_freq_range(devfreq, &dev_min, &dev_max);
115
116		cpu_min = parent_cpu_data->min_freq;
117		cpu_max = parent_cpu_data->max_freq;
118		cpu_cur = parent_cpu_data->cur_freq;
119
120		cpu_percent = ((cpu_cur - cpu_min) * 100) / (cpu_max - cpu_min);
121		freq = dev_min + mult_frac(dev_max - dev_min, cpu_percent, 100);
122
123		*target_freq = max(freq, *target_freq);
124		cpufreq_cpu_put(policy);
125	}
126
127	return ret;
128}
129
130static int get_target_freq_with_devfreq(struct devfreq *devfreq,
131					unsigned long *freq)
132{
133	struct devfreq_passive_data *p_data
134			= (struct devfreq_passive_data *)devfreq->data;
135	struct devfreq *parent_devfreq = (struct devfreq *)p_data->parent;
136	unsigned long child_freq = ULONG_MAX;
137	int i, count;
138
139	/* Get target freq via required opps */
140	child_freq = get_target_freq_by_required_opp(parent_devfreq->dev.parent,
141						parent_devfreq->opp_table,
142						devfreq->opp_table, freq);
143	if (child_freq)
144		goto out;
145
146	/* Use interpolation if required opps is not available */
147	for (i = 0; i < parent_devfreq->max_state; i++)
148		if (parent_devfreq->freq_table[i] == *freq)
149			break;
150
151	if (i == parent_devfreq->max_state)
152		return -EINVAL;
153
154	if (i < devfreq->max_state) {
155		child_freq = devfreq->freq_table[i];
156	} else {
157		count = devfreq->max_state;
158		child_freq = devfreq->freq_table[count - 1];
159	}
160
161out:
162	*freq = child_freq;
163
164	return 0;
165}
166
167static int devfreq_passive_get_target_freq(struct devfreq *devfreq,
168					   unsigned long *freq)
169{
170	struct devfreq_passive_data *p_data =
171				(struct devfreq_passive_data *)devfreq->data;
172	int ret;
173
174	if (!p_data)
175		return -EINVAL;
176
177	/*
178	 * If the devfreq device with passive governor has the specific method
179	 * to determine the next frequency, should use the get_target_freq()
180	 * of struct devfreq_passive_data.
181	 */
182	if (p_data->get_target_freq)
183		return p_data->get_target_freq(devfreq, freq);
184
185	switch (p_data->parent_type) {
186	case DEVFREQ_PARENT_DEV:
187		ret = get_target_freq_with_devfreq(devfreq, freq);
188		break;
189	case CPUFREQ_PARENT_DEV:
190		ret = get_target_freq_with_cpufreq(devfreq, freq);
191		break;
192	default:
193		ret = -EINVAL;
194		dev_err(&devfreq->dev, "Invalid parent type\n");
195		break;
196	}
197
198	return ret;
199}
200
201static int cpufreq_passive_notifier_call(struct notifier_block *nb,
202					 unsigned long event, void *ptr)
203{
204	struct devfreq_passive_data *p_data =
205			container_of(nb, struct devfreq_passive_data, nb);
206	struct devfreq *devfreq = (struct devfreq *)p_data->this;
207	struct devfreq_cpu_data *parent_cpu_data;
208	struct cpufreq_freqs *freqs = ptr;
209	unsigned int cur_freq;
210	int ret;
211
212	if (event != CPUFREQ_POSTCHANGE || !freqs)
213		return 0;
214
215	parent_cpu_data = get_parent_cpu_data(p_data, freqs->policy);
216	if (!parent_cpu_data || parent_cpu_data->cur_freq == freqs->new)
217		return 0;
218
219	cur_freq = parent_cpu_data->cur_freq;
220	parent_cpu_data->cur_freq = freqs->new;
221
222	mutex_lock(&devfreq->lock);
223	ret = devfreq_update_target(devfreq, freqs->new);
224	mutex_unlock(&devfreq->lock);
225	if (ret) {
226		parent_cpu_data->cur_freq = cur_freq;
227		dev_err(&devfreq->dev, "failed to update the frequency.\n");
228		return ret;
229	}
230
231	return 0;
232}
233
234static int cpufreq_passive_unregister_notifier(struct devfreq *devfreq)
235{
236	struct devfreq_passive_data *p_data
237			= (struct devfreq_passive_data *)devfreq->data;
238	int ret;
239
240	if (p_data->nb.notifier_call) {
241		ret = cpufreq_unregister_notifier(&p_data->nb,
242					CPUFREQ_TRANSITION_NOTIFIER);
243		if (ret < 0)
244			return ret;
245	}
246
247	delete_parent_cpu_data(p_data);
248
249	return 0;
250}
251
252static int cpufreq_passive_register_notifier(struct devfreq *devfreq)
253{
254	struct devfreq_passive_data *p_data
255			= (struct devfreq_passive_data *)devfreq->data;
256	struct device *dev = devfreq->dev.parent;
257	struct opp_table *opp_table = NULL;
258	struct devfreq_cpu_data *parent_cpu_data;
259	struct cpufreq_policy *policy;
260	struct device *cpu_dev;
261	unsigned int cpu;
262	int ret;
263
264	p_data->cpu_data_list
265		= (struct list_head)LIST_HEAD_INIT(p_data->cpu_data_list);
266
267	p_data->nb.notifier_call = cpufreq_passive_notifier_call;
268	ret = cpufreq_register_notifier(&p_data->nb, CPUFREQ_TRANSITION_NOTIFIER);
269	if (ret) {
270		dev_err(dev, "failed to register cpufreq notifier\n");
271		p_data->nb.notifier_call = NULL;
272		goto err;
273	}
274
275	for_each_possible_cpu(cpu) {
276		policy = cpufreq_cpu_get(cpu);
277		if (!policy) {
278			ret = -EPROBE_DEFER;
279			goto err;
280		}
281
282		parent_cpu_data = get_parent_cpu_data(p_data, policy);
283		if (parent_cpu_data) {
284			cpufreq_cpu_put(policy);
285			continue;
286		}
287
288		parent_cpu_data = kzalloc(sizeof(*parent_cpu_data),
289						GFP_KERNEL);
290		if (!parent_cpu_data) {
291			ret = -ENOMEM;
292			goto err_put_policy;
293		}
294
295		cpu_dev = get_cpu_device(cpu);
296		if (!cpu_dev) {
297			dev_err(dev, "failed to get cpu device\n");
298			ret = -ENODEV;
299			goto err_free_cpu_data;
300		}
301
302		opp_table = dev_pm_opp_get_opp_table(cpu_dev);
303		if (IS_ERR(opp_table)) {
304			dev_err(dev, "failed to get opp_table of cpu%d\n", cpu);
305			ret = PTR_ERR(opp_table);
306			goto err_free_cpu_data;
307		}
308
309		parent_cpu_data->dev = cpu_dev;
310		parent_cpu_data->opp_table = opp_table;
311		parent_cpu_data->first_cpu = cpumask_first(policy->related_cpus);
312		parent_cpu_data->cur_freq = policy->cur;
313		parent_cpu_data->min_freq = policy->cpuinfo.min_freq;
314		parent_cpu_data->max_freq = policy->cpuinfo.max_freq;
315
316		list_add_tail(&parent_cpu_data->node, &p_data->cpu_data_list);
317		cpufreq_cpu_put(policy);
318	}
319
320	mutex_lock(&devfreq->lock);
321	ret = devfreq_update_target(devfreq, 0L);
322	mutex_unlock(&devfreq->lock);
323	if (ret)
324		dev_err(dev, "failed to update the frequency\n");
325
326	return ret;
327
328err_free_cpu_data:
329	kfree(parent_cpu_data);
330err_put_policy:
331	cpufreq_cpu_put(policy);
332err:
333
334	return ret;
335}
336
337static int devfreq_passive_notifier_call(struct notifier_block *nb,
338				unsigned long event, void *ptr)
339{
340	struct devfreq_passive_data *data
341			= container_of(nb, struct devfreq_passive_data, nb);
342	struct devfreq *devfreq = (struct devfreq *)data->this;
343	struct devfreq *parent = (struct devfreq *)data->parent;
344	struct devfreq_freqs *freqs = (struct devfreq_freqs *)ptr;
345	unsigned long freq = freqs->new;
346	int ret = 0;
347
348	mutex_lock_nested(&devfreq->lock, SINGLE_DEPTH_NESTING);
349	switch (event) {
350	case DEVFREQ_PRECHANGE:
351		if (parent->previous_freq > freq)
352			ret = devfreq_update_target(devfreq, freq);
353
354		break;
355	case DEVFREQ_POSTCHANGE:
356		if (parent->previous_freq < freq)
357			ret = devfreq_update_target(devfreq, freq);
358		break;
359	}
360	mutex_unlock(&devfreq->lock);
361
362	if (ret < 0)
363		dev_warn(&devfreq->dev,
364			"failed to update devfreq using passive governor\n");
365
366	return NOTIFY_DONE;
367}
368
369static int devfreq_passive_unregister_notifier(struct devfreq *devfreq)
370{
371	struct devfreq_passive_data *p_data
372			= (struct devfreq_passive_data *)devfreq->data;
373	struct devfreq *parent = (struct devfreq *)p_data->parent;
374	struct notifier_block *nb = &p_data->nb;
375
376	return devfreq_unregister_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
377}
378
379static int devfreq_passive_register_notifier(struct devfreq *devfreq)
380{
381	struct devfreq_passive_data *p_data
382			= (struct devfreq_passive_data *)devfreq->data;
383	struct devfreq *parent = (struct devfreq *)p_data->parent;
384	struct notifier_block *nb = &p_data->nb;
385
386	if (!parent)
387		return -EPROBE_DEFER;
388
389	nb->notifier_call = devfreq_passive_notifier_call;
390	return devfreq_register_notifier(parent, nb, DEVFREQ_TRANSITION_NOTIFIER);
391}
392
393static int devfreq_passive_event_handler(struct devfreq *devfreq,
394				unsigned int event, void *data)
395{
396	struct devfreq_passive_data *p_data
397			= (struct devfreq_passive_data *)devfreq->data;
398	int ret = 0;
399
400	if (!p_data)
401		return -EINVAL;
402
403	p_data->this = devfreq;
404
405	switch (event) {
406	case DEVFREQ_GOV_START:
407		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
408			ret = devfreq_passive_register_notifier(devfreq);
409		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
410			ret = cpufreq_passive_register_notifier(devfreq);
411		break;
412	case DEVFREQ_GOV_STOP:
413		if (p_data->parent_type == DEVFREQ_PARENT_DEV)
414			WARN_ON(devfreq_passive_unregister_notifier(devfreq));
415		else if (p_data->parent_type == CPUFREQ_PARENT_DEV)
416			WARN_ON(cpufreq_passive_unregister_notifier(devfreq));
417		break;
418	default:
419		break;
420	}
421
422	return ret;
423}
424
425static struct devfreq_governor devfreq_passive = {
426	.name = DEVFREQ_GOV_PASSIVE,
427	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE,
428	.get_target_freq = devfreq_passive_get_target_freq,
429	.event_handler = devfreq_passive_event_handler,
430};
431
432static int __init devfreq_passive_init(void)
433{
434	return devfreq_add_governor(&devfreq_passive);
435}
436subsys_initcall(devfreq_passive_init);
437
438static void __exit devfreq_passive_exit(void)
439{
440	int ret;
441
442	ret = devfreq_remove_governor(&devfreq_passive);
443	if (ret)
444		pr_err("%s: failed remove governor %d\n", __func__, ret);
445}
446module_exit(devfreq_passive_exit);
447
448MODULE_AUTHOR("Chanwoo Choi <cw00.choi@samsung.com>");
449MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
450MODULE_DESCRIPTION("DEVFREQ Passive governor");
451MODULE_LICENSE("GPL v2");
452