1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
4 */
5
6#include <linux/kernel.h>
7#include <linux/io.h>
8#include <linux/delay.h>
9#include <linux/err.h>
10#include <linux/slab.h>
11#include <linux/clk-provider.h>
12
13#include "clk.h"
14
15#define SUPER_STATE_IDLE 0
16#define SUPER_STATE_RUN 1
17#define SUPER_STATE_IRQ 2
18#define SUPER_STATE_FIQ 3
19
20#define SUPER_STATE_SHIFT 28
21#define SUPER_STATE_MASK ((BIT(SUPER_STATE_IDLE) | BIT(SUPER_STATE_RUN) | \
22			   BIT(SUPER_STATE_IRQ) | BIT(SUPER_STATE_FIQ))	\
23			  << SUPER_STATE_SHIFT)
24
25#define SUPER_LP_DIV2_BYPASS (1 << 16)
26
27#define super_state(s) (BIT(s) << SUPER_STATE_SHIFT)
28#define super_state_to_src_shift(m, s) ((m->width * s))
29#define super_state_to_src_mask(m) (((1 << m->width) - 1))
30
31#define CCLK_SRC_PLLP_OUT0 4
32#define CCLK_SRC_PLLP_OUT4 5
33
34static u8 clk_super_get_parent(struct clk_hw *hw)
35{
36	struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
37	u32 val, state;
38	u8 source, shift;
39
40	val = readl_relaxed(mux->reg);
41
42	state = val & SUPER_STATE_MASK;
43
44	BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
45	       (state != super_state(SUPER_STATE_IDLE)));
46	shift = (state == super_state(SUPER_STATE_IDLE)) ?
47		super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
48		super_state_to_src_shift(mux, SUPER_STATE_RUN);
49
50	source = (val >> shift) & super_state_to_src_mask(mux);
51
52	/*
53	 * If LP_DIV2_BYPASS is not set and PLLX is current parent then
54	 * PLLX/2 is the input source to CCLKLP.
55	 */
56	if ((mux->flags & TEGRA_DIVIDER_2) && !(val & SUPER_LP_DIV2_BYPASS) &&
57	    (source == mux->pllx_index))
58		source = mux->div2_index;
59
60	return source;
61}
62
63static int clk_super_set_parent(struct clk_hw *hw, u8 index)
64{
65	struct tegra_clk_super_mux *mux = to_clk_super_mux(hw);
66	u32 val, state;
67	int err = 0;
68	u8 parent_index, shift;
69	unsigned long flags = 0;
70
71	if (mux->lock)
72		spin_lock_irqsave(mux->lock, flags);
73
74	val = readl_relaxed(mux->reg);
75	state = val & SUPER_STATE_MASK;
76	BUG_ON((state != super_state(SUPER_STATE_RUN)) &&
77	       (state != super_state(SUPER_STATE_IDLE)));
78	shift = (state == super_state(SUPER_STATE_IDLE)) ?
79		super_state_to_src_shift(mux, SUPER_STATE_IDLE) :
80		super_state_to_src_shift(mux, SUPER_STATE_RUN);
81
82	/*
83	 * For LP mode super-clock switch between PLLX direct
84	 * and divided-by-2 outputs is allowed only when other
85	 * than PLLX clock source is current parent.
86	 */
87	if ((mux->flags & TEGRA_DIVIDER_2) && ((index == mux->div2_index) ||
88					       (index == mux->pllx_index))) {
89		parent_index = clk_super_get_parent(hw);
90		if ((parent_index == mux->div2_index) ||
91		    (parent_index == mux->pllx_index)) {
92			err = -EINVAL;
93			goto out;
94		}
95
96		val ^= SUPER_LP_DIV2_BYPASS;
97		writel_relaxed(val, mux->reg);
98		udelay(2);
99
100		if (index == mux->div2_index)
101			index = mux->pllx_index;
102	}
103
104	/* enable PLLP branches to CPU before selecting PLLP source */
105	if ((mux->flags & TEGRA210_CPU_CLK) &&
106	    (index == CCLK_SRC_PLLP_OUT0 || index == CCLK_SRC_PLLP_OUT4))
107		tegra_clk_set_pllp_out_cpu(true);
108
109	val &= ~((super_state_to_src_mask(mux)) << shift);
110	val |= (index & (super_state_to_src_mask(mux))) << shift;
111
112	writel_relaxed(val, mux->reg);
113	udelay(2);
114
115	/* disable PLLP branches to CPU if not used */
116	if ((mux->flags & TEGRA210_CPU_CLK) &&
117	    index != CCLK_SRC_PLLP_OUT0 && index != CCLK_SRC_PLLP_OUT4)
118		tegra_clk_set_pllp_out_cpu(false);
119
120out:
121	if (mux->lock)
122		spin_unlock_irqrestore(mux->lock, flags);
123
124	return err;
125}
126
127static void clk_super_mux_restore_context(struct clk_hw *hw)
128{
129	int parent_id;
130
131	parent_id = clk_hw_get_parent_index(hw);
132	if (WARN_ON(parent_id < 0))
133		return;
134
135	clk_super_set_parent(hw, parent_id);
136}
137
138static const struct clk_ops tegra_clk_super_mux_ops = {
139	.determine_rate = clk_hw_determine_rate_no_reparent,
140	.get_parent = clk_super_get_parent,
141	.set_parent = clk_super_set_parent,
142	.restore_context = clk_super_mux_restore_context,
143};
144
145static int clk_super_determine_rate(struct clk_hw *hw,
146				    struct clk_rate_request *req)
147{
148	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
149	struct clk_hw *div_hw = &super->frac_div.hw;
150	unsigned long rate;
151
152	__clk_hw_set_clk(div_hw, hw);
153
154	rate = super->div_ops->round_rate(div_hw, req->rate,
155					  &req->best_parent_rate);
156	if (rate < 0)
157		return rate;
158
159	req->rate = rate;
160	return 0;
161}
162
163static unsigned long clk_super_recalc_rate(struct clk_hw *hw,
164					   unsigned long parent_rate)
165{
166	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
167	struct clk_hw *div_hw = &super->frac_div.hw;
168
169	__clk_hw_set_clk(div_hw, hw);
170
171	return super->div_ops->recalc_rate(div_hw, parent_rate);
172}
173
174static int clk_super_set_rate(struct clk_hw *hw, unsigned long rate,
175			      unsigned long parent_rate)
176{
177	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
178	struct clk_hw *div_hw = &super->frac_div.hw;
179
180	__clk_hw_set_clk(div_hw, hw);
181
182	return super->div_ops->set_rate(div_hw, rate, parent_rate);
183}
184
185static void clk_super_restore_context(struct clk_hw *hw)
186{
187	struct tegra_clk_super_mux *super = to_clk_super_mux(hw);
188	struct clk_hw *div_hw = &super->frac_div.hw;
189	int parent_id;
190
191	parent_id = clk_hw_get_parent_index(hw);
192	if (WARN_ON(parent_id < 0))
193		return;
194
195	super->div_ops->restore_context(div_hw);
196	clk_super_set_parent(hw, parent_id);
197}
198
199const struct clk_ops tegra_clk_super_ops = {
200	.get_parent = clk_super_get_parent,
201	.set_parent = clk_super_set_parent,
202	.set_rate = clk_super_set_rate,
203	.determine_rate = clk_super_determine_rate,
204	.recalc_rate = clk_super_recalc_rate,
205	.restore_context = clk_super_restore_context,
206};
207
208struct clk *tegra_clk_register_super_mux(const char *name,
209		const char **parent_names, u8 num_parents,
210		unsigned long flags, void __iomem *reg, u8 clk_super_flags,
211		u8 width, u8 pllx_index, u8 div2_index, spinlock_t *lock)
212{
213	struct tegra_clk_super_mux *super;
214	struct clk *clk;
215	struct clk_init_data init;
216
217	super = kzalloc(sizeof(*super), GFP_KERNEL);
218	if (!super)
219		return ERR_PTR(-ENOMEM);
220
221	init.name = name;
222	init.ops = &tegra_clk_super_mux_ops;
223	init.flags = flags;
224	init.parent_names = parent_names;
225	init.num_parents = num_parents;
226
227	super->reg = reg;
228	super->pllx_index = pllx_index;
229	super->div2_index = div2_index;
230	super->lock = lock;
231	super->width = width;
232	super->flags = clk_super_flags;
233
234	/* Data in .init is copied by clk_register(), so stack variable OK */
235	super->hw.init = &init;
236
237	clk = tegra_clk_dev_register(&super->hw);
238	if (IS_ERR(clk))
239		kfree(super);
240
241	return clk;
242}
243
244struct clk *tegra_clk_register_super_clk(const char *name,
245		const char * const *parent_names, u8 num_parents,
246		unsigned long flags, void __iomem *reg, u8 clk_super_flags,
247		spinlock_t *lock)
248{
249	struct tegra_clk_super_mux *super;
250	struct clk *clk;
251	struct clk_init_data init;
252
253	super = kzalloc(sizeof(*super), GFP_KERNEL);
254	if (!super)
255		return ERR_PTR(-ENOMEM);
256
257	init.name = name;
258	init.ops = &tegra_clk_super_ops;
259	init.flags = flags;
260	init.parent_names = parent_names;
261	init.num_parents = num_parents;
262
263	super->reg = reg;
264	super->lock = lock;
265	super->width = 4;
266	super->flags = clk_super_flags;
267	super->frac_div.reg = reg + 4;
268	super->frac_div.shift = 16;
269	super->frac_div.width = 8;
270	super->frac_div.frac_width = 1;
271	super->frac_div.lock = lock;
272	super->div_ops = &tegra_clk_frac_div_ops;
273
274	/* Data in .init is copied by clk_register(), so stack variable OK */
275	super->hw.init = &init;
276
277	clk = clk_register(NULL, &super->hw);
278	if (IS_ERR(clk))
279		kfree(super);
280
281	return clk;
282}
283