1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2018, The Linux Foundation. All rights reserved.
3
4#include <linux/kernel.h>
5#include <linux/init.h>
6#include <linux/module.h>
7#include <linux/platform_device.h>
8#include <linux/property.h>
9#include <linux/err.h>
10#include <linux/io.h>
11#include <linux/of.h>
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/slab.h>
15
16#include "clk-krait.h"
17
18enum {
19	cpu0_mux = 0,
20	cpu1_mux,
21	cpu2_mux,
22	cpu3_mux,
23	l2_mux,
24
25	clks_max,
26};
27
28static unsigned int sec_mux_map[] = {
29	2,
30	0,
31};
32
33static unsigned int pri_mux_map[] = {
34	1,
35	2,
36	0,
37};
38
39/*
40 * Notifier function for switching the muxes to safe parent
41 * while the hfpll is getting reprogrammed.
42 */
43static int krait_notifier_cb(struct notifier_block *nb,
44			     unsigned long event,
45			     void *data)
46{
47	int ret = 0;
48	struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk,
49						 clk_nb);
50	/* Switch to safe parent */
51	if (event == PRE_RATE_CHANGE) {
52		mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw);
53		ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel);
54		mux->reparent = false;
55	/*
56	 * By the time POST_RATE_CHANGE notifier is called,
57	 * clk framework itself would have changed the parent for the new rate.
58	 * Only otherwise, put back to the old parent.
59	 */
60	} else if (event == POST_RATE_CHANGE) {
61		if (!mux->reparent)
62			ret = krait_mux_clk_ops.set_parent(&mux->hw,
63							   mux->old_index);
64	}
65
66	return notifier_from_errno(ret);
67}
68
69static int krait_notifier_register(struct device *dev, struct clk *clk,
70				   struct krait_mux_clk *mux)
71{
72	int ret = 0;
73
74	mux->clk_nb.notifier_call = krait_notifier_cb;
75	ret = devm_clk_notifier_register(dev, clk, &mux->clk_nb);
76	if (ret)
77		dev_err(dev, "failed to register clock notifier: %d\n", ret);
78
79	return ret;
80}
81
82static struct clk_hw *
83krait_add_div(struct device *dev, int id, const char *s, unsigned int offset)
84{
85	struct krait_div2_clk *div;
86	static struct clk_parent_data p_data[1];
87	struct clk_init_data init = {
88		.num_parents = ARRAY_SIZE(p_data),
89		.ops = &krait_div2_clk_ops,
90		.flags = CLK_SET_RATE_PARENT,
91	};
92	struct clk_hw *clk;
93	char *parent_name;
94	int cpu, ret;
95
96	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
97	if (!div)
98		return ERR_PTR(-ENOMEM);
99
100	div->width = 2;
101	div->shift = 6;
102	div->lpl = id >= 0;
103	div->offset = offset;
104	div->hw.init = &init;
105
106	init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
107	if (!init.name)
108		return ERR_PTR(-ENOMEM);
109
110	init.parent_data = p_data;
111	parent_name = kasprintf(GFP_KERNEL, "hfpll%s", s);
112	if (!parent_name) {
113		clk = ERR_PTR(-ENOMEM);
114		goto err_parent_name;
115	}
116
117	p_data[0].fw_name = parent_name;
118	p_data[0].name = parent_name;
119
120	ret = devm_clk_hw_register(dev, &div->hw);
121	if (ret) {
122		clk = ERR_PTR(ret);
123		goto err_clk;
124	}
125
126	clk = &div->hw;
127
128	/* clk-krait ignore any rate change if mux is not flagged as enabled */
129	if (id < 0)
130		for_each_online_cpu(cpu)
131			clk_prepare_enable(div->hw.clk);
132	else
133		clk_prepare_enable(div->hw.clk);
134
135err_clk:
136	kfree(parent_name);
137err_parent_name:
138	kfree(init.name);
139
140	return clk;
141}
142
143static struct clk_hw *
144krait_add_sec_mux(struct device *dev, int id, const char *s,
145		  unsigned int offset, bool unique_aux)
146{
147	int cpu, ret;
148	struct krait_mux_clk *mux;
149	static struct clk_parent_data sec_mux_list[2] = {
150		{ .name = "qsb", .fw_name = "qsb" },
151		{},
152	};
153	struct clk_init_data init = {
154		.parent_data = sec_mux_list,
155		.num_parents = ARRAY_SIZE(sec_mux_list),
156		.ops = &krait_mux_clk_ops,
157		.flags = CLK_SET_RATE_PARENT,
158	};
159	struct clk_hw *clk;
160	char *parent_name;
161
162	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
163	if (!mux)
164		return ERR_PTR(-ENOMEM);
165
166	mux->offset = offset;
167	mux->lpl = id >= 0;
168	mux->mask = 0x3;
169	mux->shift = 2;
170	mux->parent_map = sec_mux_map;
171	mux->hw.init = &init;
172	mux->safe_sel = 0;
173
174	/* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
175	 * enough to limit this to apq/ipq8064. Directly check machine
176	 * compatible to correctly handle this errata.
177	 */
178	if (of_machine_is_compatible("qcom,ipq8064") ||
179	    of_machine_is_compatible("qcom,apq8064"))
180		mux->disable_sec_src_gating = true;
181
182	init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
183	if (!init.name)
184		return ERR_PTR(-ENOMEM);
185
186	if (unique_aux) {
187		parent_name = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
188		if (!parent_name) {
189			clk = ERR_PTR(-ENOMEM);
190			goto err_aux;
191		}
192		sec_mux_list[1].fw_name = parent_name;
193		sec_mux_list[1].name = parent_name;
194	} else {
195		sec_mux_list[1].name = "apu_aux";
196	}
197
198	ret = devm_clk_hw_register(dev, &mux->hw);
199	if (ret) {
200		clk = ERR_PTR(ret);
201		goto err_clk;
202	}
203
204	clk = &mux->hw;
205
206	ret = krait_notifier_register(dev, mux->hw.clk, mux);
207	if (ret) {
208		clk = ERR_PTR(ret);
209		goto err_clk;
210	}
211
212	/* clk-krait ignore any rate change if mux is not flagged as enabled */
213	if (id < 0)
214		for_each_online_cpu(cpu)
215			clk_prepare_enable(mux->hw.clk);
216	else
217		clk_prepare_enable(mux->hw.clk);
218
219err_clk:
220	if (unique_aux)
221		kfree(parent_name);
222err_aux:
223	kfree(init.name);
224	return clk;
225}
226
227static struct clk_hw *
228krait_add_pri_mux(struct device *dev, struct clk_hw *hfpll_div, struct clk_hw *sec_mux,
229		  int id, const char *s, unsigned int offset)
230{
231	int ret;
232	struct krait_mux_clk *mux;
233	static struct clk_parent_data p_data[3];
234	struct clk_init_data init = {
235		.parent_data = p_data,
236		.num_parents = ARRAY_SIZE(p_data),
237		.ops = &krait_mux_clk_ops,
238		.flags = CLK_SET_RATE_PARENT,
239	};
240	struct clk_hw *clk;
241	char *hfpll_name;
242
243	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
244	if (!mux)
245		return ERR_PTR(-ENOMEM);
246
247	mux->mask = 0x3;
248	mux->shift = 0;
249	mux->offset = offset;
250	mux->lpl = id >= 0;
251	mux->parent_map = pri_mux_map;
252	mux->hw.init = &init;
253	mux->safe_sel = 2;
254
255	init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
256	if (!init.name)
257		return ERR_PTR(-ENOMEM);
258
259	hfpll_name = kasprintf(GFP_KERNEL, "hfpll%s", s);
260	if (!hfpll_name) {
261		clk = ERR_PTR(-ENOMEM);
262		goto err_hfpll;
263	}
264
265	p_data[0].fw_name = hfpll_name;
266	p_data[0].name = hfpll_name;
267
268	p_data[1].hw = hfpll_div;
269	p_data[2].hw = sec_mux;
270
271	ret = devm_clk_hw_register(dev, &mux->hw);
272	if (ret) {
273		clk = ERR_PTR(ret);
274		goto err_clk;
275	}
276
277	clk = &mux->hw;
278
279	ret = krait_notifier_register(dev, mux->hw.clk, mux);
280	if (ret)
281		clk = ERR_PTR(ret);
282
283err_clk:
284	kfree(hfpll_name);
285err_hfpll:
286	kfree(init.name);
287	return clk;
288}
289
290/* id < 0 for L2, otherwise id == physical CPU number */
291static struct clk_hw *krait_add_clks(struct device *dev, int id, bool unique_aux)
292{
293	struct clk_hw *hfpll_div, *sec_mux, *pri_mux;
294	unsigned int offset;
295	void *p = NULL;
296	const char *s;
297
298	if (id >= 0) {
299		offset = 0x4501 + (0x1000 * id);
300		s = p = kasprintf(GFP_KERNEL, "%d", id);
301		if (!s)
302			return ERR_PTR(-ENOMEM);
303	} else {
304		offset = 0x500;
305		s = "_l2";
306	}
307
308	hfpll_div = krait_add_div(dev, id, s, offset);
309	if (IS_ERR(hfpll_div)) {
310		pri_mux = hfpll_div;
311		goto err;
312	}
313
314	sec_mux = krait_add_sec_mux(dev, id, s, offset, unique_aux);
315	if (IS_ERR(sec_mux)) {
316		pri_mux = sec_mux;
317		goto err;
318	}
319
320	pri_mux = krait_add_pri_mux(dev, hfpll_div, sec_mux, id, s, offset);
321
322err:
323	kfree(p);
324	return pri_mux;
325}
326
327static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
328{
329	unsigned int idx = clkspec->args[0];
330	struct clk **clks = data;
331
332	if (idx >= clks_max) {
333		pr_err("%s: invalid clock index %d\n", __func__, idx);
334		return ERR_PTR(-EINVAL);
335	}
336
337	return clks[idx] ? : ERR_PTR(-ENODEV);
338}
339
340static const struct of_device_id krait_cc_match_table[] = {
341	{ .compatible = "qcom,krait-cc-v1", (void *)1UL },
342	{ .compatible = "qcom,krait-cc-v2" },
343	{}
344};
345MODULE_DEVICE_TABLE(of, krait_cc_match_table);
346
347static int krait_cc_probe(struct platform_device *pdev)
348{
349	struct device *dev = &pdev->dev;
350	unsigned long cur_rate, aux_rate;
351	int cpu;
352	struct clk_hw *mux, *l2_pri_mux;
353	struct clk *clk, **clks;
354	bool unique_aux = !!device_get_match_data(dev);
355
356	/* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
357	clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1);
358	if (IS_ERR(clk))
359		return PTR_ERR(clk);
360
361	if (!unique_aux) {
362		clk = clk_register_fixed_factor(dev, "acpu_aux",
363						"gpll0_vote", 0, 1, 2);
364		if (IS_ERR(clk))
365			return PTR_ERR(clk);
366	}
367
368	/* Krait configurations have at most 4 CPUs and one L2 */
369	clks = devm_kcalloc(dev, clks_max, sizeof(*clks), GFP_KERNEL);
370	if (!clks)
371		return -ENOMEM;
372
373	for_each_possible_cpu(cpu) {
374		mux = krait_add_clks(dev, cpu, unique_aux);
375		if (IS_ERR(mux))
376			return PTR_ERR(mux);
377		clks[cpu] = mux->clk;
378	}
379
380	l2_pri_mux = krait_add_clks(dev, -1, unique_aux);
381	if (IS_ERR(l2_pri_mux))
382		return PTR_ERR(l2_pri_mux);
383	clks[l2_mux] = l2_pri_mux->clk;
384
385	/*
386	 * We don't want the CPU or L2 clocks to be turned off at late init
387	 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
388	 * refcount of these clocks. Any cpufreq/hotplug manager can assume
389	 * that the clocks have already been prepared and enabled by the time
390	 * they take over.
391	 */
392	for_each_online_cpu(cpu) {
393		clk_prepare_enable(clks[l2_mux]);
394		WARN(clk_prepare_enable(clks[cpu]),
395		     "Unable to turn on CPU%d clock", cpu);
396	}
397
398	/*
399	 * Force reinit of HFPLLs and muxes to overwrite any potential
400	 * incorrect configuration of HFPLLs and muxes by the bootloader.
401	 * While at it, also make sure the cores are running at known rates
402	 * and print the current rate.
403	 *
404	 * The clocks are set to aux clock rate first to make sure the
405	 * secondary mux is not sourcing off of QSB. The rate is then set to
406	 * two different rates to force a HFPLL reinit under all
407	 * circumstances.
408	 */
409	cur_rate = clk_get_rate(clks[l2_mux]);
410	aux_rate = 384000000;
411	if (cur_rate < aux_rate) {
412		pr_info("L2 @ Undefined rate. Forcing new rate.\n");
413		cur_rate = aux_rate;
414	}
415	clk_set_rate(clks[l2_mux], aux_rate);
416	clk_set_rate(clks[l2_mux], 2);
417	clk_set_rate(clks[l2_mux], cur_rate);
418	pr_info("L2 @ %lu KHz\n", clk_get_rate(clks[l2_mux]) / 1000);
419	for_each_possible_cpu(cpu) {
420		clk = clks[cpu];
421		cur_rate = clk_get_rate(clk);
422		if (cur_rate < aux_rate) {
423			pr_info("CPU%d @ Undefined rate. Forcing new rate.\n", cpu);
424			cur_rate = aux_rate;
425		}
426
427		clk_set_rate(clk, aux_rate);
428		clk_set_rate(clk, 2);
429		clk_set_rate(clk, cur_rate);
430		pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
431	}
432
433	of_clk_add_provider(dev->of_node, krait_of_get, clks);
434
435	return 0;
436}
437
438static struct platform_driver krait_cc_driver = {
439	.probe = krait_cc_probe,
440	.driver = {
441		.name = "krait-cc",
442		.of_match_table = krait_cc_match_table,
443	},
444};
445module_platform_driver(krait_cc_driver);
446
447MODULE_DESCRIPTION("Krait CPU Clock Driver");
448MODULE_LICENSE("GPL v2");
449MODULE_ALIAS("platform:krait-cc");
450