1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  linux/arch/arm/mach-omap1/clock.c
4 *
5 *  Copyright (C) 2004 - 2005, 2009-2010 Nokia Corporation
6 *  Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
7 *
8 *  Modified to use omap shared clock framework by
9 *  Tony Lindgren <tony@atomide.com>
10 */
11#include <linux/kernel.h>
12#include <linux/export.h>
13#include <linux/list.h>
14#include <linux/errno.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/clk.h>
18#include <linux/clkdev.h>
19#include <linux/clk-provider.h>
20#include <linux/soc/ti/omap1-io.h>
21#include <linux/spinlock.h>
22
23#include <asm/mach-types.h>
24
25#include "hardware.h"
26#include "soc.h"
27#include "iomap.h"
28#include "clock.h"
29#include "opp.h"
30#include "sram.h"
31
32__u32 arm_idlect1_mask;
33/* provide direct internal access (not via clk API) to some clocks */
34struct omap1_clk *api_ck_p, *ck_dpll1_p, *ck_ref_p;
35
36/* protect registeres shared among clk_enable/disable() and clk_set_rate() operations */
37static DEFINE_SPINLOCK(arm_ckctl_lock);
38static DEFINE_SPINLOCK(arm_idlect2_lock);
39static DEFINE_SPINLOCK(mod_conf_ctrl_0_lock);
40static DEFINE_SPINLOCK(mod_conf_ctrl_1_lock);
41static DEFINE_SPINLOCK(swd_clk_div_ctrl_sel_lock);
42
43/*
44 * Omap1 specific clock functions
45 */
46
47unsigned long omap1_uart_recalc(struct omap1_clk *clk, unsigned long p_rate)
48{
49	unsigned int val = __raw_readl(clk->enable_reg);
50	return val & 1 << clk->enable_bit ? 48000000 : 12000000;
51}
52
53unsigned long omap1_sossi_recalc(struct omap1_clk *clk, unsigned long p_rate)
54{
55	u32 div = omap_readl(MOD_CONF_CTRL_1);
56
57	div = (div >> 17) & 0x7;
58	div++;
59
60	return p_rate / div;
61}
62
63static void omap1_clk_allow_idle(struct omap1_clk *clk)
64{
65	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
66
67	if (!(clk->flags & CLOCK_IDLE_CONTROL))
68		return;
69
70	if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
71		arm_idlect1_mask |= 1 << iclk->idlect_shift;
72}
73
74static void omap1_clk_deny_idle(struct omap1_clk *clk)
75{
76	struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
77
78	if (!(clk->flags & CLOCK_IDLE_CONTROL))
79		return;
80
81	if (iclk->no_idle_count++ == 0)
82		arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
83}
84
85static __u16 verify_ckctl_value(__u16 newval)
86{
87	/* This function checks for following limitations set
88	 * by the hardware (all conditions must be true):
89	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
90	 * ARM_CK >= TC_CK
91	 * DSP_CK >= TC_CK
92	 * DSPMMU_CK >= TC_CK
93	 *
94	 * In addition following rules are enforced:
95	 * LCD_CK <= TC_CK
96	 * ARMPER_CK <= TC_CK
97	 *
98	 * However, maximum frequencies are not checked for!
99	 */
100	__u8 per_exp;
101	__u8 lcd_exp;
102	__u8 arm_exp;
103	__u8 dsp_exp;
104	__u8 tc_exp;
105	__u8 dspmmu_exp;
106
107	per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
108	lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
109	arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
110	dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
111	tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
112	dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
113
114	if (dspmmu_exp < dsp_exp)
115		dspmmu_exp = dsp_exp;
116	if (dspmmu_exp > dsp_exp+1)
117		dspmmu_exp = dsp_exp+1;
118	if (tc_exp < arm_exp)
119		tc_exp = arm_exp;
120	if (tc_exp < dspmmu_exp)
121		tc_exp = dspmmu_exp;
122	if (tc_exp > lcd_exp)
123		lcd_exp = tc_exp;
124	if (tc_exp > per_exp)
125		per_exp = tc_exp;
126
127	newval &= 0xf000;
128	newval |= per_exp << CKCTL_PERDIV_OFFSET;
129	newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
130	newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
131	newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
132	newval |= tc_exp << CKCTL_TCDIV_OFFSET;
133	newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
134
135	return newval;
136}
137
138static int calc_dsor_exp(unsigned long rate, unsigned long realrate)
139{
140	/* Note: If target frequency is too low, this function will return 4,
141	 * which is invalid value. Caller must check for this value and act
142	 * accordingly.
143	 *
144	 * Note: This function does not check for following limitations set
145	 * by the hardware (all conditions must be true):
146	 * DSPMMU_CK == DSP_CK  or  DSPMMU_CK == DSP_CK/2
147	 * ARM_CK >= TC_CK
148	 * DSP_CK >= TC_CK
149	 * DSPMMU_CK >= TC_CK
150	 */
151	unsigned  dsor_exp;
152
153	if (unlikely(realrate == 0))
154		return -EIO;
155
156	for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
157		if (realrate <= rate)
158			break;
159
160		realrate /= 2;
161	}
162
163	return dsor_exp;
164}
165
166unsigned long omap1_ckctl_recalc(struct omap1_clk *clk, unsigned long p_rate)
167{
168	/* Calculate divisor encoded as 2-bit exponent */
169	int dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
170
171	/* update locally maintained rate, required by arm_ck for omap1_show_rates() */
172	clk->rate = p_rate / dsor;
173	return clk->rate;
174}
175
176static int omap1_clk_is_enabled(struct clk_hw *hw)
177{
178	struct omap1_clk *clk = to_omap1_clk(hw);
179	bool api_ck_was_enabled = true;
180	__u32 regval32;
181	int ret;
182
183	if (!clk->ops)	/* no gate -- always enabled */
184		return 1;
185
186	if (clk->ops == &clkops_dspck) {
187		api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
188		if (!api_ck_was_enabled)
189			if (api_ck_p->ops->enable(api_ck_p) < 0)
190				return 0;
191	}
192
193	if (clk->flags & ENABLE_REG_32BIT)
194		regval32 = __raw_readl(clk->enable_reg);
195	else
196		regval32 = __raw_readw(clk->enable_reg);
197
198	ret = regval32 & (1 << clk->enable_bit);
199
200	if (!api_ck_was_enabled)
201		api_ck_p->ops->disable(api_ck_p);
202
203	return ret;
204}
205
206
207unsigned long omap1_ckctl_recalc_dsp_domain(struct omap1_clk *clk, unsigned long p_rate)
208{
209	bool api_ck_was_enabled;
210	int dsor;
211
212	/* Calculate divisor encoded as 2-bit exponent
213	 *
214	 * The clock control bits are in DSP domain,
215	 * so api_ck is needed for access.
216	 * Note that DSP_CKCTL virt addr = phys addr, so
217	 * we must use __raw_readw() instead of omap_readw().
218	 */
219	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
220	if (!api_ck_was_enabled)
221		api_ck_p->ops->enable(api_ck_p);
222	dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
223	if (!api_ck_was_enabled)
224		api_ck_p->ops->disable(api_ck_p);
225
226	return p_rate / dsor;
227}
228
229/* MPU virtual clock functions */
230int omap1_select_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
231{
232	/* Find the highest supported frequency <= rate and switch to it */
233	struct mpu_rate * ptr;
234	unsigned long ref_rate;
235
236	ref_rate = ck_ref_p->rate;
237
238	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
239		if (!(ptr->flags & cpu_mask))
240			continue;
241
242		if (ptr->xtal != ref_rate)
243			continue;
244
245		/* Can check only after xtal frequency check */
246		if (ptr->rate <= rate)
247			break;
248	}
249
250	if (!ptr->rate)
251		return -EINVAL;
252
253	/*
254	 * In most cases we should not need to reprogram DPLL.
255	 * Reprogramming the DPLL is tricky, it must be done from SRAM.
256	 */
257	omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
258
259	/* XXX Do we need to recalculate the tree below DPLL1 at this point? */
260	ck_dpll1_p->rate = ptr->pll_rate;
261
262	return 0;
263}
264
265int omap1_clk_set_rate_dsp_domain(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
266{
267	int dsor_exp;
268	u16 regval;
269
270	dsor_exp = calc_dsor_exp(rate, p_rate);
271	if (dsor_exp > 3)
272		dsor_exp = -EINVAL;
273	if (dsor_exp < 0)
274		return dsor_exp;
275
276	regval = __raw_readw(DSP_CKCTL);
277	regval &= ~(3 << clk->rate_offset);
278	regval |= dsor_exp << clk->rate_offset;
279	__raw_writew(regval, DSP_CKCTL);
280	clk->rate = p_rate / (1 << dsor_exp);
281
282	return 0;
283}
284
285long omap1_clk_round_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate,
286				    unsigned long *p_rate)
287{
288	int dsor_exp = calc_dsor_exp(rate, *p_rate);
289
290	if (dsor_exp < 0)
291		return dsor_exp;
292	if (dsor_exp > 3)
293		dsor_exp = 3;
294	return *p_rate / (1 << dsor_exp);
295}
296
297int omap1_clk_set_rate_ckctl_arm(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
298{
299	unsigned long flags;
300	int dsor_exp;
301	u16 regval;
302
303	dsor_exp = calc_dsor_exp(rate, p_rate);
304	if (dsor_exp > 3)
305		dsor_exp = -EINVAL;
306	if (dsor_exp < 0)
307		return dsor_exp;
308
309	/* protect ARM_CKCTL register from concurrent access via clk_enable/disable() */
310	spin_lock_irqsave(&arm_ckctl_lock, flags);
311
312	regval = omap_readw(ARM_CKCTL);
313	regval &= ~(3 << clk->rate_offset);
314	regval |= dsor_exp << clk->rate_offset;
315	regval = verify_ckctl_value(regval);
316	omap_writew(regval, ARM_CKCTL);
317	clk->rate = p_rate / (1 << dsor_exp);
318
319	spin_unlock_irqrestore(&arm_ckctl_lock, flags);
320
321	return 0;
322}
323
324long omap1_round_to_table_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
325{
326	/* Find the highest supported frequency <= rate */
327	struct mpu_rate * ptr;
328	long highest_rate;
329	unsigned long ref_rate;
330
331	ref_rate = ck_ref_p->rate;
332
333	highest_rate = -EINVAL;
334
335	for (ptr = omap1_rate_table; ptr->rate; ptr++) {
336		if (!(ptr->flags & cpu_mask))
337			continue;
338
339		if (ptr->xtal != ref_rate)
340			continue;
341
342		highest_rate = ptr->rate;
343
344		/* Can check only after xtal frequency check */
345		if (ptr->rate <= rate)
346			break;
347	}
348
349	return highest_rate;
350}
351
352static unsigned calc_ext_dsor(unsigned long rate)
353{
354	unsigned dsor;
355
356	/* MCLK and BCLK divisor selection is not linear:
357	 * freq = 96MHz / dsor
358	 *
359	 * RATIO_SEL range: dsor <-> RATIO_SEL
360	 * 0..6: (RATIO_SEL+2) <-> (dsor-2)
361	 * 6..48:  (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
362	 * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
363	 * can not be used.
364	 */
365	for (dsor = 2; dsor < 96; ++dsor) {
366		if ((dsor & 1) && dsor > 8)
367			continue;
368		if (rate >= 96000000 / dsor)
369			break;
370	}
371	return dsor;
372}
373
374/* XXX Only needed on 1510 */
375long omap1_round_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
376{
377	return rate > 24000000 ? 48000000 : 12000000;
378}
379
380int omap1_set_uart_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
381{
382	unsigned long flags;
383	unsigned int val;
384
385	if (rate == 12000000)
386		val = 0;
387	else if (rate == 48000000)
388		val = 1 << clk->enable_bit;
389	else
390		return -EINVAL;
391
392	/* protect MOD_CONF_CTRL_0 register from concurrent access via clk_enable/disable() */
393	spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
394
395	val |= __raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit);
396	__raw_writel(val, clk->enable_reg);
397
398	spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
399
400	clk->rate = rate;
401
402	return 0;
403}
404
405/* External clock (MCLK & BCLK) functions */
406int omap1_set_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
407{
408	unsigned long flags;
409	unsigned dsor;
410	__u16 ratio_bits;
411
412	dsor = calc_ext_dsor(rate);
413	clk->rate = 96000000 / dsor;
414	if (dsor > 8)
415		ratio_bits = ((dsor - 8) / 2 + 6) << 2;
416	else
417		ratio_bits = (dsor - 2) << 2;
418
419	/* protect SWD_CLK_DIV_CTRL_SEL register from concurrent access via clk_enable/disable() */
420	spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
421
422	ratio_bits |= __raw_readw(clk->enable_reg) & ~0xfd;
423	__raw_writew(ratio_bits, clk->enable_reg);
424
425	spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
426
427	return 0;
428}
429
430static int calc_div_sossi(unsigned long rate, unsigned long p_rate)
431{
432	int div;
433
434	/* Round towards slower frequency */
435	div = (p_rate + rate - 1) / rate;
436
437	return --div;
438}
439
440long omap1_round_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
441{
442	int div;
443
444	div = calc_div_sossi(rate, *p_rate);
445	if (div < 0)
446		div = 0;
447	else if (div > 7)
448		div = 7;
449
450	return *p_rate / (div + 1);
451}
452
453int omap1_set_sossi_rate(struct omap1_clk *clk, unsigned long rate, unsigned long p_rate)
454{
455	unsigned long flags;
456	u32 l;
457	int div;
458
459	div = calc_div_sossi(rate, p_rate);
460	if (div < 0 || div > 7)
461		return -EINVAL;
462
463	/* protect MOD_CONF_CTRL_1 register from concurrent access via clk_enable/disable() */
464	spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
465
466	l = omap_readl(MOD_CONF_CTRL_1);
467	l &= ~(7 << 17);
468	l |= div << 17;
469	omap_writel(l, MOD_CONF_CTRL_1);
470
471	clk->rate = p_rate / (div + 1);
472
473	spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
474
475	return 0;
476}
477
478long omap1_round_ext_clk_rate(struct omap1_clk *clk, unsigned long rate, unsigned long *p_rate)
479{
480	return 96000000 / calc_ext_dsor(rate);
481}
482
483int omap1_init_ext_clk(struct omap1_clk *clk)
484{
485	unsigned dsor;
486	__u16 ratio_bits;
487
488	/* Determine current rate and ensure clock is based on 96MHz APLL */
489	ratio_bits = __raw_readw(clk->enable_reg) & ~1;
490	__raw_writew(ratio_bits, clk->enable_reg);
491
492	ratio_bits = (ratio_bits & 0xfc) >> 2;
493	if (ratio_bits > 6)
494		dsor = (ratio_bits - 6) * 2 + 8;
495	else
496		dsor = ratio_bits + 2;
497
498	clk-> rate = 96000000 / dsor;
499
500	return 0;
501}
502
503static int omap1_clk_enable(struct clk_hw *hw)
504{
505	struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
506	int ret = 0;
507
508	if (parent && clk->flags & CLOCK_NO_IDLE_PARENT)
509		omap1_clk_deny_idle(parent);
510
511	if (clk->ops && !(WARN_ON(!clk->ops->enable)))
512		ret = clk->ops->enable(clk);
513
514	return ret;
515}
516
517static void omap1_clk_disable(struct clk_hw *hw)
518{
519	struct omap1_clk *clk = to_omap1_clk(hw), *parent = to_omap1_clk(clk_hw_get_parent(hw));
520
521	if (clk->ops && !(WARN_ON(!clk->ops->disable)))
522		clk->ops->disable(clk);
523
524	if (likely(parent) && clk->flags & CLOCK_NO_IDLE_PARENT)
525		omap1_clk_allow_idle(parent);
526}
527
528static int omap1_clk_enable_generic(struct omap1_clk *clk)
529{
530	unsigned long flags;
531	__u16 regval16;
532	__u32 regval32;
533
534	if (unlikely(clk->enable_reg == NULL)) {
535		printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
536		       clk_hw_get_name(&clk->hw));
537		return -EINVAL;
538	}
539
540	/* protect clk->enable_reg from concurrent access via clk_set_rate() */
541	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
542		spin_lock_irqsave(&arm_ckctl_lock, flags);
543	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
544		spin_lock_irqsave(&arm_idlect2_lock, flags);
545	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
546		spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
547	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
548		spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
549	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
550		spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
551
552	if (clk->flags & ENABLE_REG_32BIT) {
553		regval32 = __raw_readl(clk->enable_reg);
554		regval32 |= (1 << clk->enable_bit);
555		__raw_writel(regval32, clk->enable_reg);
556	} else {
557		regval16 = __raw_readw(clk->enable_reg);
558		regval16 |= (1 << clk->enable_bit);
559		__raw_writew(regval16, clk->enable_reg);
560	}
561
562	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
563		spin_unlock_irqrestore(&arm_ckctl_lock, flags);
564	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
565		spin_unlock_irqrestore(&arm_idlect2_lock, flags);
566	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
567		spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
568	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
569		spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
570	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
571		spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
572
573	return 0;
574}
575
576static void omap1_clk_disable_generic(struct omap1_clk *clk)
577{
578	unsigned long flags;
579	__u16 regval16;
580	__u32 regval32;
581
582	if (clk->enable_reg == NULL)
583		return;
584
585	/* protect clk->enable_reg from concurrent access via clk_set_rate() */
586	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
587		spin_lock_irqsave(&arm_ckctl_lock, flags);
588	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
589		spin_lock_irqsave(&arm_idlect2_lock, flags);
590	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
591		spin_lock_irqsave(&mod_conf_ctrl_0_lock, flags);
592	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
593		spin_lock_irqsave(&mod_conf_ctrl_1_lock, flags);
594	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
595		spin_lock_irqsave(&swd_clk_div_ctrl_sel_lock, flags);
596
597	if (clk->flags & ENABLE_REG_32BIT) {
598		regval32 = __raw_readl(clk->enable_reg);
599		regval32 &= ~(1 << clk->enable_bit);
600		__raw_writel(regval32, clk->enable_reg);
601	} else {
602		regval16 = __raw_readw(clk->enable_reg);
603		regval16 &= ~(1 << clk->enable_bit);
604		__raw_writew(regval16, clk->enable_reg);
605	}
606
607	if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_CKCTL))
608		spin_unlock_irqrestore(&arm_ckctl_lock, flags);
609	else if (clk->enable_reg == OMAP1_IO_ADDRESS(ARM_IDLECT2))
610		spin_unlock_irqrestore(&arm_idlect2_lock, flags);
611	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_0))
612		spin_unlock_irqrestore(&mod_conf_ctrl_0_lock, flags);
613	else if (clk->enable_reg == OMAP1_IO_ADDRESS(MOD_CONF_CTRL_1))
614		spin_unlock_irqrestore(&mod_conf_ctrl_1_lock, flags);
615	else if (clk->enable_reg == OMAP1_IO_ADDRESS(SWD_CLK_DIV_CTRL_SEL))
616		spin_unlock_irqrestore(&swd_clk_div_ctrl_sel_lock, flags);
617}
618
619const struct clkops clkops_generic = {
620	.enable		= omap1_clk_enable_generic,
621	.disable	= omap1_clk_disable_generic,
622};
623
624static int omap1_clk_enable_dsp_domain(struct omap1_clk *clk)
625{
626	bool api_ck_was_enabled;
627	int retval = 0;
628
629	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
630	if (!api_ck_was_enabled)
631		retval = api_ck_p->ops->enable(api_ck_p);
632
633	if (!retval) {
634		retval = omap1_clk_enable_generic(clk);
635
636		if (!api_ck_was_enabled)
637			api_ck_p->ops->disable(api_ck_p);
638	}
639
640	return retval;
641}
642
643static void omap1_clk_disable_dsp_domain(struct omap1_clk *clk)
644{
645	bool api_ck_was_enabled;
646
647	api_ck_was_enabled = omap1_clk_is_enabled(&api_ck_p->hw);
648	if (!api_ck_was_enabled)
649		if (api_ck_p->ops->enable(api_ck_p) < 0)
650			return;
651
652	omap1_clk_disable_generic(clk);
653
654	if (!api_ck_was_enabled)
655		api_ck_p->ops->disable(api_ck_p);
656}
657
658const struct clkops clkops_dspck = {
659	.enable		= omap1_clk_enable_dsp_domain,
660	.disable	= omap1_clk_disable_dsp_domain,
661};
662
663/* XXX SYSC register handling does not belong in the clock framework */
664static int omap1_clk_enable_uart_functional_16xx(struct omap1_clk *clk)
665{
666	int ret;
667	struct uart_clk *uclk;
668
669	ret = omap1_clk_enable_generic(clk);
670	if (ret == 0) {
671		/* Set smart idle acknowledgement mode */
672		uclk = (struct uart_clk *)clk;
673		omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
674			    uclk->sysc_addr);
675	}
676
677	return ret;
678}
679
680/* XXX SYSC register handling does not belong in the clock framework */
681static void omap1_clk_disable_uart_functional_16xx(struct omap1_clk *clk)
682{
683	struct uart_clk *uclk;
684
685	/* Set force idle acknowledgement mode */
686	uclk = (struct uart_clk *)clk;
687	omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
688
689	omap1_clk_disable_generic(clk);
690}
691
692/* XXX SYSC register handling does not belong in the clock framework */
693const struct clkops clkops_uart_16xx = {
694	.enable		= omap1_clk_enable_uart_functional_16xx,
695	.disable	= omap1_clk_disable_uart_functional_16xx,
696};
697
698static unsigned long omap1_clk_recalc_rate(struct clk_hw *hw, unsigned long p_rate)
699{
700	struct omap1_clk *clk = to_omap1_clk(hw);
701
702	if (clk->recalc)
703		return clk->recalc(clk, p_rate);
704
705	return clk->rate;
706}
707
708static long omap1_clk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *p_rate)
709{
710	struct omap1_clk *clk = to_omap1_clk(hw);
711
712	if (clk->round_rate != NULL)
713		return clk->round_rate(clk, rate, p_rate);
714
715	return omap1_clk_recalc_rate(hw, *p_rate);
716}
717
718static int omap1_clk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long p_rate)
719{
720	struct omap1_clk *clk = to_omap1_clk(hw);
721	int  ret = -EINVAL;
722
723	if (clk->set_rate)
724		ret = clk->set_rate(clk, rate, p_rate);
725	return ret;
726}
727
728/*
729 * Omap1 clock reset and init functions
730 */
731
732static int omap1_clk_init_op(struct clk_hw *hw)
733{
734	struct omap1_clk *clk = to_omap1_clk(hw);
735
736	if (clk->init)
737		return clk->init(clk);
738
739	return 0;
740}
741
742#ifdef CONFIG_OMAP_RESET_CLOCKS
743
744static void omap1_clk_disable_unused(struct clk_hw *hw)
745{
746	struct omap1_clk *clk = to_omap1_clk(hw);
747	const char *name = clk_hw_get_name(hw);
748
749	/* Clocks in the DSP domain need api_ck. Just assume bootloader
750	 * has not enabled any DSP clocks */
751	if (clk->enable_reg == DSP_IDLECT2) {
752		pr_info("Skipping reset check for DSP domain clock \"%s\"\n", name);
753		return;
754	}
755
756	pr_info("Disabling unused clock \"%s\"... ", name);
757	omap1_clk_disable(hw);
758	printk(" done\n");
759}
760
761#endif
762
763const struct clk_ops omap1_clk_gate_ops = {
764	.enable		= omap1_clk_enable,
765	.disable	= omap1_clk_disable,
766	.is_enabled	= omap1_clk_is_enabled,
767#ifdef CONFIG_OMAP_RESET_CLOCKS
768	.disable_unused	= omap1_clk_disable_unused,
769#endif
770};
771
772const struct clk_ops omap1_clk_rate_ops = {
773	.recalc_rate	= omap1_clk_recalc_rate,
774	.round_rate	= omap1_clk_round_rate,
775	.set_rate	= omap1_clk_set_rate,
776	.init		= omap1_clk_init_op,
777};
778
779const struct clk_ops omap1_clk_full_ops = {
780	.enable		= omap1_clk_enable,
781	.disable	= omap1_clk_disable,
782	.is_enabled	= omap1_clk_is_enabled,
783#ifdef CONFIG_OMAP_RESET_CLOCKS
784	.disable_unused	= omap1_clk_disable_unused,
785#endif
786	.recalc_rate	= omap1_clk_recalc_rate,
787	.round_rate	= omap1_clk_round_rate,
788	.set_rate	= omap1_clk_set_rate,
789	.init		= omap1_clk_init_op,
790};
791
792/*
793 * OMAP specific clock functions shared between omap1 and omap2
794 */
795
796/* Used for clocks that always have same value as the parent clock */
797unsigned long followparent_recalc(struct omap1_clk *clk, unsigned long p_rate)
798{
799	return p_rate;
800}
801
802/*
803 * Used for clocks that have the same value as the parent clock,
804 * divided by some factor
805 */
806unsigned long omap_fixed_divisor_recalc(struct omap1_clk *clk, unsigned long p_rate)
807{
808	WARN_ON(!clk->fixed_div);
809
810	return p_rate / clk->fixed_div;
811}
812
813/* Propagate rate to children */
814void propagate_rate(struct omap1_clk *tclk)
815{
816	struct clk *clkp;
817
818	/* depend on CCF ability to recalculate new rates across whole clock subtree */
819	if (WARN_ON(!(clk_hw_get_flags(&tclk->hw) & CLK_GET_RATE_NOCACHE)))
820		return;
821
822	clkp = clk_get_sys(NULL, clk_hw_get_name(&tclk->hw));
823	if (WARN_ON(!clkp))
824		return;
825
826	clk_get_rate(clkp);
827	clk_put(clkp);
828}
829
830const struct clk_ops omap1_clk_null_ops = {
831};
832
833/*
834 * Dummy clock
835 *
836 * Used for clock aliases that are needed on some OMAPs, but not others
837 */
838struct omap1_clk dummy_ck __refdata = {
839	.hw.init	= CLK_HW_INIT_NO_PARENT("dummy", &omap1_clk_null_ops, 0),
840};
841