1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 Fuzhou Rockchip Electronics Co., Ltd
4 */
5
6#include <linux/clk-provider.h>
7#include <linux/io.h>
8#include <linux/slab.h>
9#include "clk.h"
10
11#define div_mask(width)	((1 << (width)) - 1)
12
13static bool _is_best_half_div(unsigned long rate, unsigned long now,
14			      unsigned long best, unsigned long flags)
15{
16	if (flags & CLK_DIVIDER_ROUND_CLOSEST)
17		return abs(rate - now) < abs(rate - best);
18
19	return now <= rate && now > best;
20}
21
22static unsigned long clk_half_divider_recalc_rate(struct clk_hw *hw,
23						  unsigned long parent_rate)
24{
25	struct clk_divider *divider = to_clk_divider(hw);
26	unsigned int val;
27
28	val = readl(divider->reg) >> divider->shift;
29	val &= div_mask(divider->width);
30	val = val * 2 + 3;
31
32	return DIV_ROUND_UP_ULL(((u64)parent_rate * 2), val);
33}
34
35static int clk_half_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
36				    unsigned long *best_parent_rate, u8 width,
37				    unsigned long flags)
38{
39	unsigned int i, bestdiv = 0;
40	unsigned long parent_rate, best = 0, now, maxdiv;
41	unsigned long parent_rate_saved = *best_parent_rate;
42
43	if (!rate)
44		rate = 1;
45
46	maxdiv = div_mask(width);
47
48	if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) {
49		parent_rate = *best_parent_rate;
50		bestdiv = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
51		if (bestdiv < 3)
52			bestdiv = 0;
53		else
54			bestdiv = (bestdiv - 3) / 2;
55		bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
56		return bestdiv;
57	}
58
59	/*
60	 * The maximum divider we can use without overflowing
61	 * unsigned long in rate * i below
62	 */
63	maxdiv = min(ULONG_MAX / rate, maxdiv);
64
65	for (i = 0; i <= maxdiv; i++) {
66		if (((u64)rate * (i * 2 + 3)) == ((u64)parent_rate_saved * 2)) {
67			/*
68			 * It's the most ideal case if the requested rate can be
69			 * divided from parent clock without needing to change
70			 * parent rate, so return the divider immediately.
71			 */
72			*best_parent_rate = parent_rate_saved;
73			return i;
74		}
75		parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
76						((u64)rate * (i * 2 + 3)) / 2);
77		now = DIV_ROUND_UP_ULL(((u64)parent_rate * 2),
78				       (i * 2 + 3));
79
80		if (_is_best_half_div(rate, now, best, flags)) {
81			bestdiv = i;
82			best = now;
83			*best_parent_rate = parent_rate;
84		}
85	}
86
87	if (!bestdiv) {
88		bestdiv = div_mask(width);
89		*best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw), 1);
90	}
91
92	return bestdiv;
93}
94
95static long clk_half_divider_round_rate(struct clk_hw *hw, unsigned long rate,
96					unsigned long *prate)
97{
98	struct clk_divider *divider = to_clk_divider(hw);
99	int div;
100
101	div = clk_half_divider_bestdiv(hw, rate, prate,
102				       divider->width,
103				       divider->flags);
104
105	return DIV_ROUND_UP_ULL(((u64)*prate * 2), div * 2 + 3);
106}
107
108static int clk_half_divider_set_rate(struct clk_hw *hw, unsigned long rate,
109				     unsigned long parent_rate)
110{
111	struct clk_divider *divider = to_clk_divider(hw);
112	unsigned int value;
113	unsigned long flags = 0;
114	u32 val;
115
116	value = DIV_ROUND_UP_ULL(((u64)parent_rate * 2), rate);
117	value = (value - 3) / 2;
118	value =  min_t(unsigned int, value, div_mask(divider->width));
119
120	if (divider->lock)
121		spin_lock_irqsave(divider->lock, flags);
122	else
123		__acquire(divider->lock);
124
125	if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
126		val = div_mask(divider->width) << (divider->shift + 16);
127	} else {
128		val = readl(divider->reg);
129		val &= ~(div_mask(divider->width) << divider->shift);
130	}
131	val |= value << divider->shift;
132	writel(val, divider->reg);
133
134	if (divider->lock)
135		spin_unlock_irqrestore(divider->lock, flags);
136	else
137		__release(divider->lock);
138
139	return 0;
140}
141
142static const struct clk_ops clk_half_divider_ops = {
143	.recalc_rate = clk_half_divider_recalc_rate,
144	.round_rate = clk_half_divider_round_rate,
145	.set_rate = clk_half_divider_set_rate,
146};
147
148/*
149 * Register a clock branch.
150 * Most clock branches have a form like
151 *
152 * src1 --|--\
153 *        |M |--[GATE]-[DIV]-
154 * src2 --|--/
155 *
156 * sometimes without one of those components.
157 */
158struct clk *rockchip_clk_register_halfdiv(const char *name,
159					  const char *const *parent_names,
160					  u8 num_parents, void __iomem *base,
161					  int muxdiv_offset, u8 mux_shift,
162					  u8 mux_width, u8 mux_flags,
163					  u8 div_shift, u8 div_width,
164					  u8 div_flags, int gate_offset,
165					  u8 gate_shift, u8 gate_flags,
166					  unsigned long flags,
167					  spinlock_t *lock)
168{
169	struct clk_hw *hw = ERR_PTR(-ENOMEM);
170	struct clk_mux *mux = NULL;
171	struct clk_gate *gate = NULL;
172	struct clk_divider *div = NULL;
173	const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
174			     *gate_ops = NULL;
175
176	if (num_parents > 1) {
177		mux = kzalloc(sizeof(*mux), GFP_KERNEL);
178		if (!mux)
179			return ERR_PTR(-ENOMEM);
180
181		mux->reg = base + muxdiv_offset;
182		mux->shift = mux_shift;
183		mux->mask = BIT(mux_width) - 1;
184		mux->flags = mux_flags;
185		mux->lock = lock;
186		mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
187							: &clk_mux_ops;
188	}
189
190	if (gate_offset >= 0) {
191		gate = kzalloc(sizeof(*gate), GFP_KERNEL);
192		if (!gate)
193			goto err_gate;
194
195		gate->flags = gate_flags;
196		gate->reg = base + gate_offset;
197		gate->bit_idx = gate_shift;
198		gate->lock = lock;
199		gate_ops = &clk_gate_ops;
200	}
201
202	if (div_width > 0) {
203		div = kzalloc(sizeof(*div), GFP_KERNEL);
204		if (!div)
205			goto err_div;
206
207		div->flags = div_flags;
208		div->reg = base + muxdiv_offset;
209		div->shift = div_shift;
210		div->width = div_width;
211		div->lock = lock;
212		div_ops = &clk_half_divider_ops;
213	}
214
215	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
216				       mux ? &mux->hw : NULL, mux_ops,
217				       div ? &div->hw : NULL, div_ops,
218				       gate ? &gate->hw : NULL, gate_ops,
219				       flags);
220	if (IS_ERR(hw))
221		goto err_div;
222
223	return hw->clk;
224err_div:
225	kfree(gate);
226err_gate:
227	kfree(mux);
228	return ERR_CAST(hw);
229}
230