1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * drivers/clk/tegra/clk-emc.c
4 *
5 * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
6 *
7 * Author:
8 *	Mikko Perttunen <mperttunen@nvidia.com>
9 */
10
11#include <linux/clk-provider.h>
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/clk/tegra.h>
15#include <linux/delay.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_address.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
22#include <linux/sort.h>
23#include <linux/string.h>
24
25#include <soc/tegra/fuse.h>
26
27#include "clk.h"
28
29#define CLK_SOURCE_EMC 0x19c
30
31#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT 0
32#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK 0xff
33#define CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK) << \
34					      CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_SHIFT)
35
36#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT 29
37#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK 0x7
38#define CLK_SOURCE_EMC_EMC_2X_CLK_SRC(x) (((x) & CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK) << \
39					  CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
40
41static const char * const emc_parent_clk_names[] = {
42	"pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud",
43	"pll_c2", "pll_c3", "pll_c_ud"
44};
45
46/*
47 * List of clock sources for various parents the EMC clock can have.
48 * When we change the timing to a timing with a parent that has the same
49 * clock source as the current parent, we must first change to a backup
50 * timing that has a different clock source.
51 */
52
53#define EMC_SRC_PLL_M 0
54#define EMC_SRC_PLL_C 1
55#define EMC_SRC_PLL_P 2
56#define EMC_SRC_CLK_M 3
57#define EMC_SRC_PLL_C2 4
58#define EMC_SRC_PLL_C3 5
59
60static const char emc_parent_clk_sources[] = {
61	EMC_SRC_PLL_M, EMC_SRC_PLL_C, EMC_SRC_PLL_P, EMC_SRC_CLK_M,
62	EMC_SRC_PLL_M, EMC_SRC_PLL_C2, EMC_SRC_PLL_C3, EMC_SRC_PLL_C
63};
64
65struct emc_timing {
66	unsigned long rate, parent_rate;
67	u8 parent_index;
68	struct clk *parent;
69	u32 ram_code;
70};
71
72struct tegra_clk_emc {
73	struct clk_hw hw;
74	void __iomem *clk_regs;
75	struct clk *prev_parent;
76	bool changing_timing;
77
78	struct device_node *emc_node;
79	struct tegra_emc *emc;
80
81	int num_timings;
82	struct emc_timing *timings;
83	spinlock_t *lock;
84
85	tegra124_emc_prepare_timing_change_cb *prepare_timing_change;
86	tegra124_emc_complete_timing_change_cb *complete_timing_change;
87};
88
89/* Common clock framework callback implementations */
90
91static unsigned long emc_recalc_rate(struct clk_hw *hw,
92				     unsigned long parent_rate)
93{
94	struct tegra_clk_emc *tegra;
95	u32 val, div;
96
97	tegra = container_of(hw, struct tegra_clk_emc, hw);
98
99	/*
100	 * CCF wrongly assumes that the parent won't change during set_rate,
101	 * so get the parent rate explicitly.
102	 */
103	parent_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
104
105	val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
106	div = val & CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR_MASK;
107
108	return parent_rate / (div + 2) * 2;
109}
110
111/*
112 * Rounds up unless no higher rate exists, in which case down. This way is
113 * safer since things have EMC rate floors. Also don't touch parent_rate
114 * since we don't want the CCF to play with our parent clocks.
115 */
116static int emc_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
117{
118	struct tegra_clk_emc *tegra;
119	u8 ram_code = tegra_read_ram_code();
120	struct emc_timing *timing = NULL;
121	int i, k, t;
122
123	tegra = container_of(hw, struct tegra_clk_emc, hw);
124
125	for (k = 0; k < tegra->num_timings; k++) {
126		if (tegra->timings[k].ram_code == ram_code)
127			break;
128	}
129
130	for (t = k; t < tegra->num_timings; t++) {
131		if (tegra->timings[t].ram_code != ram_code)
132			break;
133	}
134
135	for (i = k; i < t; i++) {
136		timing = tegra->timings + i;
137
138		if (timing->rate < req->rate && i != t - 1)
139			continue;
140
141		if (timing->rate > req->max_rate) {
142			i = max(i, k + 1);
143			req->rate = tegra->timings[i - 1].rate;
144			return 0;
145		}
146
147		if (timing->rate < req->min_rate)
148			continue;
149
150		req->rate = timing->rate;
151		return 0;
152	}
153
154	if (timing) {
155		req->rate = timing->rate;
156		return 0;
157	}
158
159	req->rate = clk_hw_get_rate(hw);
160	return 0;
161}
162
163static u8 emc_get_parent(struct clk_hw *hw)
164{
165	struct tegra_clk_emc *tegra;
166	u32 val;
167
168	tegra = container_of(hw, struct tegra_clk_emc, hw);
169
170	val = readl(tegra->clk_regs + CLK_SOURCE_EMC);
171
172	return (val >> CLK_SOURCE_EMC_EMC_2X_CLK_SRC_SHIFT)
173		& CLK_SOURCE_EMC_EMC_2X_CLK_SRC_MASK;
174}
175
176static struct tegra_emc *emc_ensure_emc_driver(struct tegra_clk_emc *tegra)
177{
178	struct platform_device *pdev;
179
180	if (tegra->emc)
181		return tegra->emc;
182
183	if (!tegra->prepare_timing_change || !tegra->complete_timing_change)
184		return NULL;
185
186	if (!tegra->emc_node)
187		return NULL;
188
189	pdev = of_find_device_by_node(tegra->emc_node);
190	if (!pdev) {
191		pr_err("%s: could not get external memory controller\n",
192		       __func__);
193		return NULL;
194	}
195
196	of_node_put(tegra->emc_node);
197	tegra->emc_node = NULL;
198
199	tegra->emc = platform_get_drvdata(pdev);
200	if (!tegra->emc) {
201		put_device(&pdev->dev);
202		pr_err("%s: cannot find EMC driver\n", __func__);
203		return NULL;
204	}
205
206	return tegra->emc;
207}
208
209static int emc_set_timing(struct tegra_clk_emc *tegra,
210			  struct emc_timing *timing)
211{
212	int err;
213	u8 div;
214	u32 car_value;
215	unsigned long flags = 0;
216	struct tegra_emc *emc = emc_ensure_emc_driver(tegra);
217
218	if (!emc)
219		return -ENOENT;
220
221	pr_debug("going to rate %ld prate %ld p %s\n", timing->rate,
222		 timing->parent_rate, __clk_get_name(timing->parent));
223
224	if (emc_get_parent(&tegra->hw) == timing->parent_index &&
225	    clk_get_rate(timing->parent) != timing->parent_rate) {
226		WARN_ONCE(1, "parent %s rate mismatch %lu %lu\n",
227			  __clk_get_name(timing->parent),
228			  clk_get_rate(timing->parent),
229			  timing->parent_rate);
230		return -EINVAL;
231	}
232
233	tegra->changing_timing = true;
234
235	err = clk_set_rate(timing->parent, timing->parent_rate);
236	if (err) {
237		pr_err("cannot change parent %s rate to %ld: %d\n",
238		       __clk_get_name(timing->parent), timing->parent_rate,
239		       err);
240
241		return err;
242	}
243
244	err = clk_prepare_enable(timing->parent);
245	if (err) {
246		pr_err("cannot enable parent clock: %d\n", err);
247		return err;
248	}
249
250	div = timing->parent_rate / (timing->rate / 2) - 2;
251
252	err = tegra->prepare_timing_change(emc, timing->rate);
253	if (err) {
254		clk_disable_unprepare(timing->parent);
255		return err;
256	}
257
258	spin_lock_irqsave(tegra->lock, flags);
259
260	car_value = readl(tegra->clk_regs + CLK_SOURCE_EMC);
261
262	car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_SRC(~0);
263	car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_SRC(timing->parent_index);
264
265	car_value &= ~CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(~0);
266	car_value |= CLK_SOURCE_EMC_EMC_2X_CLK_DIVISOR(div);
267
268	writel(car_value, tegra->clk_regs + CLK_SOURCE_EMC);
269
270	spin_unlock_irqrestore(tegra->lock, flags);
271
272	tegra->complete_timing_change(emc, timing->rate);
273
274	clk_hw_reparent(&tegra->hw, __clk_get_hw(timing->parent));
275	clk_disable_unprepare(tegra->prev_parent);
276
277	tegra->prev_parent = timing->parent;
278	tegra->changing_timing = false;
279
280	return 0;
281}
282
283/*
284 * Get backup timing to use as an intermediate step when a change between
285 * two timings with the same clock source has been requested. First try to
286 * find a timing with a higher clock rate to avoid a rate below any set rate
287 * floors. If that is not possible, find a lower rate.
288 */
289static struct emc_timing *get_backup_timing(struct tegra_clk_emc *tegra,
290					    int timing_index)
291{
292	int i;
293	u32 ram_code = tegra_read_ram_code();
294	struct emc_timing *timing;
295
296	for (i = timing_index+1; i < tegra->num_timings; i++) {
297		timing = tegra->timings + i;
298		if (timing->ram_code != ram_code)
299			break;
300
301		if (emc_parent_clk_sources[timing->parent_index] !=
302		    emc_parent_clk_sources[
303		      tegra->timings[timing_index].parent_index])
304			return timing;
305	}
306
307	for (i = timing_index-1; i >= 0; --i) {
308		timing = tegra->timings + i;
309		if (timing->ram_code != ram_code)
310			break;
311
312		if (emc_parent_clk_sources[timing->parent_index] !=
313		    emc_parent_clk_sources[
314		      tegra->timings[timing_index].parent_index])
315			return timing;
316	}
317
318	return NULL;
319}
320
321static int emc_set_rate(struct clk_hw *hw, unsigned long rate,
322			unsigned long parent_rate)
323{
324	struct tegra_clk_emc *tegra;
325	struct emc_timing *timing = NULL;
326	int i, err;
327	u32 ram_code = tegra_read_ram_code();
328
329	tegra = container_of(hw, struct tegra_clk_emc, hw);
330
331	if (clk_hw_get_rate(hw) == rate)
332		return 0;
333
334	/*
335	 * When emc_set_timing changes the parent rate, CCF will propagate
336	 * that downward to us, so ignore any set_rate calls while a rate
337	 * change is already going on.
338	 */
339	if (tegra->changing_timing)
340		return 0;
341
342	for (i = 0; i < tegra->num_timings; i++) {
343		if (tegra->timings[i].rate == rate &&
344		    tegra->timings[i].ram_code == ram_code) {
345			timing = tegra->timings + i;
346			break;
347		}
348	}
349
350	if (!timing) {
351		pr_err("cannot switch to rate %ld without emc table\n", rate);
352		return -EINVAL;
353	}
354
355	if (emc_parent_clk_sources[emc_get_parent(hw)] ==
356	    emc_parent_clk_sources[timing->parent_index] &&
357	    clk_get_rate(timing->parent) != timing->parent_rate) {
358		/*
359		 * Parent clock source not changed but parent rate has changed,
360		 * need to temporarily switch to another parent
361		 */
362
363		struct emc_timing *backup_timing;
364
365		backup_timing = get_backup_timing(tegra, i);
366		if (!backup_timing) {
367			pr_err("cannot find backup timing\n");
368			return -EINVAL;
369		}
370
371		pr_debug("using %ld as backup rate when going to %ld\n",
372			 backup_timing->rate, rate);
373
374		err = emc_set_timing(tegra, backup_timing);
375		if (err) {
376			pr_err("cannot set backup timing: %d\n", err);
377			return err;
378		}
379	}
380
381	return emc_set_timing(tegra, timing);
382}
383
384/* Initialization and deinitialization */
385
386static int load_one_timing_from_dt(struct tegra_clk_emc *tegra,
387				   struct emc_timing *timing,
388				   struct device_node *node)
389{
390	int err, i;
391	u32 tmp;
392
393	err = of_property_read_u32(node, "clock-frequency", &tmp);
394	if (err) {
395		pr_err("timing %pOF: failed to read rate\n", node);
396		return err;
397	}
398
399	timing->rate = tmp;
400
401	err = of_property_read_u32(node, "nvidia,parent-clock-frequency", &tmp);
402	if (err) {
403		pr_err("timing %pOF: failed to read parent rate\n", node);
404		return err;
405	}
406
407	timing->parent_rate = tmp;
408
409	timing->parent = of_clk_get_by_name(node, "emc-parent");
410	if (IS_ERR(timing->parent)) {
411		pr_err("timing %pOF: failed to get parent clock\n", node);
412		return PTR_ERR(timing->parent);
413	}
414
415	timing->parent_index = 0xff;
416	i = match_string(emc_parent_clk_names, ARRAY_SIZE(emc_parent_clk_names),
417			 __clk_get_name(timing->parent));
418	if (i < 0) {
419		pr_err("timing %pOF: %s is not a valid parent\n",
420		       node, __clk_get_name(timing->parent));
421		clk_put(timing->parent);
422		return -EINVAL;
423	}
424
425	timing->parent_index = i;
426	return 0;
427}
428
429static int cmp_timings(const void *_a, const void *_b)
430{
431	const struct emc_timing *a = _a;
432	const struct emc_timing *b = _b;
433
434	if (a->rate < b->rate)
435		return -1;
436	else if (a->rate == b->rate)
437		return 0;
438	else
439		return 1;
440}
441
442static int load_timings_from_dt(struct tegra_clk_emc *tegra,
443				struct device_node *node,
444				u32 ram_code)
445{
446	struct emc_timing *timings_ptr;
447	struct device_node *child;
448	int child_count = of_get_child_count(node);
449	int i = 0, err;
450	size_t size;
451
452	size = (tegra->num_timings + child_count) * sizeof(struct emc_timing);
453
454	tegra->timings = krealloc(tegra->timings, size, GFP_KERNEL);
455	if (!tegra->timings)
456		return -ENOMEM;
457
458	timings_ptr = tegra->timings + tegra->num_timings;
459	tegra->num_timings += child_count;
460
461	for_each_child_of_node(node, child) {
462		struct emc_timing *timing = timings_ptr + (i++);
463
464		err = load_one_timing_from_dt(tegra, timing, child);
465		if (err) {
466			of_node_put(child);
467			kfree(tegra->timings);
468			return err;
469		}
470
471		timing->ram_code = ram_code;
472	}
473
474	sort(timings_ptr, child_count, sizeof(struct emc_timing),
475	     cmp_timings, NULL);
476
477	return 0;
478}
479
480static const struct clk_ops tegra_clk_emc_ops = {
481	.recalc_rate = emc_recalc_rate,
482	.determine_rate = emc_determine_rate,
483	.set_rate = emc_set_rate,
484	.get_parent = emc_get_parent,
485};
486
487struct clk *tegra124_clk_register_emc(void __iomem *base, struct device_node *np,
488				      spinlock_t *lock)
489{
490	struct tegra_clk_emc *tegra;
491	struct clk_init_data init;
492	struct device_node *node;
493	u32 node_ram_code;
494	struct clk *clk;
495	int err;
496
497	tegra = kcalloc(1, sizeof(*tegra), GFP_KERNEL);
498	if (!tegra)
499		return ERR_PTR(-ENOMEM);
500
501	tegra->clk_regs = base;
502	tegra->lock = lock;
503
504	tegra->num_timings = 0;
505
506	for_each_child_of_node(np, node) {
507		err = of_property_read_u32(node, "nvidia,ram-code",
508					   &node_ram_code);
509		if (err)
510			continue;
511
512		/*
513		 * Store timings for all ram codes as we cannot read the
514		 * fuses until the apbmisc driver is loaded.
515		 */
516		err = load_timings_from_dt(tegra, node, node_ram_code);
517		if (err) {
518			of_node_put(node);
519			kfree(tegra);
520			return ERR_PTR(err);
521		}
522	}
523
524	if (tegra->num_timings == 0)
525		pr_warn("%s: no memory timings registered\n", __func__);
526
527	tegra->emc_node = of_parse_phandle(np,
528			"nvidia,external-memory-controller", 0);
529	if (!tegra->emc_node)
530		pr_warn("%s: couldn't find node for EMC driver\n", __func__);
531
532	init.name = "emc";
533	init.ops = &tegra_clk_emc_ops;
534	init.flags = CLK_IS_CRITICAL;
535	init.parent_names = emc_parent_clk_names;
536	init.num_parents = ARRAY_SIZE(emc_parent_clk_names);
537
538	tegra->hw.init = &init;
539
540	clk = clk_register(NULL, &tegra->hw);
541	if (IS_ERR(clk))
542		return clk;
543
544	tegra->prev_parent = clk_hw_get_parent_by_index(
545		&tegra->hw, emc_get_parent(&tegra->hw))->clk;
546	tegra->changing_timing = false;
547
548	/* Allow debugging tools to see the EMC clock */
549	clk_register_clkdev(clk, "emc", "tegra-clk-debug");
550
551	return clk;
552};
553
554void tegra124_clk_set_emc_callbacks(tegra124_emc_prepare_timing_change_cb *prep_cb,
555				    tegra124_emc_complete_timing_change_cb *complete_cb)
556{
557	struct clk *clk = __clk_lookup("emc");
558	struct tegra_clk_emc *tegra;
559	struct clk_hw *hw;
560
561	if (clk) {
562		hw = __clk_get_hw(clk);
563		tegra = container_of(hw, struct tegra_clk_emc, hw);
564
565		tegra->prepare_timing_change = prep_cb;
566		tegra->complete_timing_change = complete_cb;
567	}
568}
569EXPORT_SYMBOL_GPL(tegra124_clk_set_emc_callbacks);
570
571bool tegra124_clk_emc_driver_available(struct clk_hw *hw)
572{
573	struct tegra_clk_emc *tegra = container_of(hw, struct tegra_clk_emc, hw);
574
575	return tegra->prepare_timing_change && tegra->complete_timing_change;
576}
577