1#include <linux/clk.h>
2#include <linux/compiler.h>
3#include <linux/slab.h>
4#include <linux/io.h>
5#include <linux/sh_clk.h>
6
7static int sh_clk_mstp32_enable(struct clk *clk)
8{
9	__raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10		     clk->enable_reg);
11	return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16	__raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17		     clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21	.enable		= sh_clk_mstp32_enable,
22	.disable	= sh_clk_mstp32_disable,
23	.recalc		= followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28	struct clk *clkp;
29	int ret = 0;
30	int k;
31
32	for (k = 0; !ret && (k < nr); k++) {
33		clkp = clks + k;
34		clkp->ops = &sh_clk_mstp32_clk_ops;
35		ret |= clk_register(clkp);
36	}
37
38	return ret;
39}
40
41static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
42{
43	return clk_rate_table_round(clk, clk->freq_table, rate);
44}
45
46static int sh_clk_div6_divisors[64] = {
47	1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48	17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49	33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50	49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
51};
52
53static struct clk_div_mult_table sh_clk_div6_table = {
54	.divisors = sh_clk_div6_divisors,
55	.nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
56};
57
58static unsigned long sh_clk_div6_recalc(struct clk *clk)
59{
60	struct clk_div_mult_table *table = &sh_clk_div6_table;
61	unsigned int idx;
62
63	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
64			     table, NULL);
65
66	idx = __raw_readl(clk->enable_reg) & 0x003f;
67
68	return clk->freq_table[idx].frequency;
69}
70
71static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
72{
73	struct clk_div_mult_table *table = &sh_clk_div6_table;
74	u32 value;
75	int ret, i;
76
77	if (!clk->parent_table || !clk->parent_num)
78		return -EINVAL;
79
80	/* Search the parent */
81	for (i = 0; i < clk->parent_num; i++)
82		if (clk->parent_table[i] == parent)
83			break;
84
85	if (i == clk->parent_num)
86		return -ENODEV;
87
88	ret = clk_reparent(clk, parent);
89	if (ret < 0)
90		return ret;
91
92	value = __raw_readl(clk->enable_reg) &
93		~(((1 << clk->src_width) - 1) << clk->src_shift);
94
95	__raw_writel(value | (i << clk->src_shift), clk->enable_reg);
96
97	/* Rebuild the frequency table */
98	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
99			     table, &clk->arch_flags);
100
101	return 0;
102}
103
104static int sh_clk_div6_set_rate(struct clk *clk,
105				unsigned long rate, int algo_id)
106{
107	unsigned long value;
108	int idx;
109
110	idx = clk_rate_table_find(clk, clk->freq_table, rate);
111	if (idx < 0)
112		return idx;
113
114	value = __raw_readl(clk->enable_reg);
115	value &= ~0x3f;
116	value |= idx;
117	__raw_writel(value, clk->enable_reg);
118	return 0;
119}
120
121static int sh_clk_div6_enable(struct clk *clk)
122{
123	unsigned long value;
124	int ret;
125
126	ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
127	if (ret == 0) {
128		value = __raw_readl(clk->enable_reg);
129		value &= ~0x100; /* clear stop bit to enable clock */
130		__raw_writel(value, clk->enable_reg);
131	}
132	return ret;
133}
134
135static void sh_clk_div6_disable(struct clk *clk)
136{
137	unsigned long value;
138
139	value = __raw_readl(clk->enable_reg);
140	value |= 0x100; /* stop clock */
141	value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
142	__raw_writel(value, clk->enable_reg);
143}
144
145static struct clk_ops sh_clk_div6_clk_ops = {
146	.recalc		= sh_clk_div6_recalc,
147	.round_rate	= sh_clk_div_round_rate,
148	.set_rate	= sh_clk_div6_set_rate,
149	.enable		= sh_clk_div6_enable,
150	.disable	= sh_clk_div6_disable,
151};
152
153static struct clk_ops sh_clk_div6_reparent_clk_ops = {
154	.recalc		= sh_clk_div6_recalc,
155	.round_rate	= sh_clk_div_round_rate,
156	.set_rate	= sh_clk_div6_set_rate,
157	.enable		= sh_clk_div6_enable,
158	.disable	= sh_clk_div6_disable,
159	.set_parent	= sh_clk_div6_set_parent,
160};
161
162static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
163					   struct clk_ops *ops)
164{
165	struct clk *clkp;
166	void *freq_table;
167	int nr_divs = sh_clk_div6_table.nr_divisors;
168	int freq_table_size = sizeof(struct cpufreq_frequency_table);
169	int ret = 0;
170	int k;
171
172	freq_table_size *= (nr_divs + 1);
173	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
174	if (!freq_table) {
175		pr_err("sh_clk_div6_register: unable to alloc memory\n");
176		return -ENOMEM;
177	}
178
179	for (k = 0; !ret && (k < nr); k++) {
180		clkp = clks + k;
181
182		clkp->ops = ops;
183		clkp->id = -1;
184		clkp->freq_table = freq_table + (k * freq_table_size);
185		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
186
187		ret = clk_register(clkp);
188	}
189
190	return ret;
191}
192
193int __init sh_clk_div6_register(struct clk *clks, int nr)
194{
195	return sh_clk_div6_register_ops(clks, nr, &sh_clk_div6_clk_ops);
196}
197
198int __init sh_clk_div6_reparent_register(struct clk *clks, int nr)
199{
200	return sh_clk_div6_register_ops(clks, nr,
201					&sh_clk_div6_reparent_clk_ops);
202}
203
204static unsigned long sh_clk_div4_recalc(struct clk *clk)
205{
206	struct clk_div4_table *d4t = clk->priv;
207	struct clk_div_mult_table *table = d4t->div_mult_table;
208	unsigned int idx;
209
210	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
211			     table, &clk->arch_flags);
212
213	idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
214
215	return clk->freq_table[idx].frequency;
216}
217
218static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
219{
220	struct clk_div4_table *d4t = clk->priv;
221	struct clk_div_mult_table *table = d4t->div_mult_table;
222	u32 value;
223	int ret;
224
225	/* we really need a better way to determine parent index, but for
226	 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
227	 * no CLK_ENABLE_ON_INIT means external clock...
228	 */
229
230	if (parent->flags & CLK_ENABLE_ON_INIT)
231		value = __raw_readl(clk->enable_reg) & ~(1 << 7);
232	else
233		value = __raw_readl(clk->enable_reg) | (1 << 7);
234
235	ret = clk_reparent(clk, parent);
236	if (ret < 0)
237		return ret;
238
239	__raw_writel(value, clk->enable_reg);
240
241	/* Rebiuld the frequency table */
242	clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
243			     table, &clk->arch_flags);
244
245	return 0;
246}
247
248static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
249{
250	struct clk_div4_table *d4t = clk->priv;
251	unsigned long value;
252	int idx = clk_rate_table_find(clk, clk->freq_table, rate);
253	if (idx < 0)
254		return idx;
255
256	value = __raw_readl(clk->enable_reg);
257	value &= ~(0xf << clk->enable_bit);
258	value |= (idx << clk->enable_bit);
259	__raw_writel(value, clk->enable_reg);
260
261	if (d4t->kick)
262		d4t->kick(clk);
263
264	return 0;
265}
266
267static int sh_clk_div4_enable(struct clk *clk)
268{
269	__raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
270	return 0;
271}
272
273static void sh_clk_div4_disable(struct clk *clk)
274{
275	__raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
276}
277
278static struct clk_ops sh_clk_div4_clk_ops = {
279	.recalc		= sh_clk_div4_recalc,
280	.set_rate	= sh_clk_div4_set_rate,
281	.round_rate	= sh_clk_div_round_rate,
282};
283
284static struct clk_ops sh_clk_div4_enable_clk_ops = {
285	.recalc		= sh_clk_div4_recalc,
286	.set_rate	= sh_clk_div4_set_rate,
287	.round_rate	= sh_clk_div_round_rate,
288	.enable		= sh_clk_div4_enable,
289	.disable	= sh_clk_div4_disable,
290};
291
292static struct clk_ops sh_clk_div4_reparent_clk_ops = {
293	.recalc		= sh_clk_div4_recalc,
294	.set_rate	= sh_clk_div4_set_rate,
295	.round_rate	= sh_clk_div_round_rate,
296	.enable		= sh_clk_div4_enable,
297	.disable	= sh_clk_div4_disable,
298	.set_parent	= sh_clk_div4_set_parent,
299};
300
301static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
302			struct clk_div4_table *table, struct clk_ops *ops)
303{
304	struct clk *clkp;
305	void *freq_table;
306	int nr_divs = table->div_mult_table->nr_divisors;
307	int freq_table_size = sizeof(struct cpufreq_frequency_table);
308	int ret = 0;
309	int k;
310
311	freq_table_size *= (nr_divs + 1);
312	freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
313	if (!freq_table) {
314		pr_err("sh_clk_div4_register: unable to alloc memory\n");
315		return -ENOMEM;
316	}
317
318	for (k = 0; !ret && (k < nr); k++) {
319		clkp = clks + k;
320
321		clkp->ops = ops;
322		clkp->id = -1;
323		clkp->priv = table;
324
325		clkp->freq_table = freq_table + (k * freq_table_size);
326		clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
327
328		ret = clk_register(clkp);
329	}
330
331	return ret;
332}
333
334int __init sh_clk_div4_register(struct clk *clks, int nr,
335				struct clk_div4_table *table)
336{
337	return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
338}
339
340int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
341				struct clk_div4_table *table)
342{
343	return sh_clk_div4_register_ops(clks, nr, table,
344					&sh_clk_div4_enable_clk_ops);
345}
346
347int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
348				struct clk_div4_table *table)
349{
350	return sh_clk_div4_register_ops(clks, nr, table,
351					&sh_clk_div4_reparent_clk_ops);
352}
353