1/*-
2 * Copyright (c) 2017 Emmanuel Vadot <manu@freebsd.org>
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
18 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
19 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
20 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
21 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28#include <sys/cdefs.h>
29__FBSDID("$FreeBSD$");
30
31#include <sys/param.h>
32#include <sys/systm.h>
33#include <sys/bus.h>
34
35#include <dev/extres/clk/clk.h>
36
37#include <arm/allwinner/clkng/aw_clk.h>
38#include <arm/allwinner/clkng/aw_clk_nm.h>
39
40#include "clkdev_if.h"
41
42/*
43 * clknode for clocks matching the formula :
44 *
45 * clk = clkin / n / m
46 *
47 */
48
49struct aw_clk_nm_sc {
50	uint32_t	offset;
51
52	struct aw_clk_factor	m;
53	struct aw_clk_factor	n;
54	struct aw_clk_factor	prediv;
55
56	uint32_t	mux_shift;
57	uint32_t	mux_mask;
58	uint32_t	gate_shift;
59	uint32_t	lock_shift;
60	uint32_t	lock_retries;
61
62	uint32_t	flags;
63};
64
65#define	WRITE4(_clk, off, val)						\
66	CLKDEV_WRITE_4(clknode_get_device(_clk), off, val)
67#define	READ4(_clk, off, val)						\
68	CLKDEV_READ_4(clknode_get_device(_clk), off, val)
69#define	DEVICE_LOCK(_clk)							\
70	CLKDEV_DEVICE_LOCK(clknode_get_device(_clk))
71#define	DEVICE_UNLOCK(_clk)						\
72	CLKDEV_DEVICE_UNLOCK(clknode_get_device(_clk))
73
74static int
75aw_clk_nm_init(struct clknode *clk, device_t dev)
76{
77	struct aw_clk_nm_sc *sc;
78	uint32_t val, idx;
79
80	sc = clknode_get_softc(clk);
81
82	idx = 0;
83	if ((sc->flags & AW_CLK_HAS_MUX) != 0) {
84		DEVICE_LOCK(clk);
85		READ4(clk, sc->offset, &val);
86		DEVICE_UNLOCK(clk);
87
88		idx = (val & sc->mux_mask) >> sc->mux_shift;
89	}
90
91	clknode_init_parent_idx(clk, idx);
92	return (0);
93}
94
95static int
96aw_clk_nm_set_gate(struct clknode *clk, bool enable)
97{
98	struct aw_clk_nm_sc *sc;
99	uint32_t val;
100
101	sc = clknode_get_softc(clk);
102
103	if ((sc->flags & AW_CLK_HAS_GATE) == 0)
104		return (0);
105
106	DEVICE_LOCK(clk);
107	READ4(clk, sc->offset, &val);
108	if (enable)
109		val |= (1 << sc->gate_shift);
110	else
111		val &= ~(1 << sc->gate_shift);
112	WRITE4(clk, sc->offset, val);
113	DEVICE_UNLOCK(clk);
114
115	return (0);
116}
117
118static int
119aw_clk_nm_set_mux(struct clknode *clk, int index)
120{
121	struct aw_clk_nm_sc *sc;
122	uint32_t val;
123
124	sc = clknode_get_softc(clk);
125
126	if ((sc->flags & AW_CLK_HAS_MUX) == 0)
127		return (0);
128
129	DEVICE_LOCK(clk);
130	READ4(clk, sc->offset, &val);
131	val &= ~sc->mux_mask;
132	val |= index << sc->mux_shift;
133	WRITE4(clk, sc->offset, val);
134	DEVICE_UNLOCK(clk);
135
136	return (0);
137}
138
139static uint64_t
140aw_clk_nm_find_best(struct aw_clk_nm_sc *sc, uint64_t fparent, uint64_t *fout,
141    uint32_t *factor_n, uint32_t *factor_m)
142{
143	uint64_t cur, best;
144	uint32_t m, n, max_m, max_n, min_m, min_n;
145
146	*factor_n = *factor_m = 0;
147
148	max_m = aw_clk_factor_get_max(&sc->m);
149	max_n = aw_clk_factor_get_max(&sc->n);
150	min_m = aw_clk_factor_get_min(&sc->m);
151	min_n = aw_clk_factor_get_min(&sc->n);
152
153	for (m = min_m; m <= max_m; ) {
154		for (n = min_n; n <= max_n; ) {
155			cur = fparent / n / m;
156			if (clk_freq_diff(*fout, cur) <
157			    clk_freq_diff(*fout, best)) {
158				best = cur;
159				*factor_n = n;
160				*factor_m = m;
161			}
162
163			if ((sc->n.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
164				n <<= 1;
165			else
166				n++;
167		}
168		if ((sc->m.flags & AW_CLK_FACTOR_POWER_OF_TWO) != 0)
169			m <<= 1;
170		else
171			m++;
172	}
173
174	return (best);
175}
176
177static int
178aw_clk_nm_set_freq(struct clknode *clk, uint64_t fparent, uint64_t *fout,
179    int flags, int *stop)
180{
181	struct aw_clk_nm_sc *sc;
182	struct clknode *p_clk;
183	const char **p_names;
184	uint64_t cur, best;
185	uint32_t val, m, n, best_m, best_n;
186	int p_idx, best_parent, retry;
187
188	sc = clknode_get_softc(clk);
189
190	best = cur = 0;
191	best_parent = 0;
192
193	if ((sc->flags & AW_CLK_REPARENT) != 0) {
194		p_names = clknode_get_parent_names(clk);
195		for (p_idx = 0; p_idx != clknode_get_parents_num(clk); p_idx++) {
196			p_clk = clknode_find_by_name(p_names[p_idx]);
197			clknode_get_freq(p_clk, &fparent);
198
199			cur = aw_clk_nm_find_best(sc, fparent, fout, &n, &m);
200			if (clk_freq_diff(*fout, cur) <
201			    clk_freq_diff(*fout, best)) {
202				best = cur;
203				best_parent = p_idx;
204				best_n = n;
205				best_m = m;
206			}
207		}
208
209		p_idx = clknode_get_parent_idx(clk);
210		p_clk = clknode_get_parent(clk);
211		clknode_get_freq(p_clk, &fparent);
212	} else {
213		best = aw_clk_nm_find_best(sc, fparent, fout,
214		    &best_n, &best_m);
215	}
216
217	if ((flags & CLK_SET_DRYRUN) != 0) {
218		*fout = best;
219		*stop = 1;
220		return (0);
221	}
222
223	if ((best < *fout) &&
224	  ((flags & CLK_SET_ROUND_DOWN) == 0)) {
225		*stop = 1;
226		printf("best freq (%ju) < requested freq(%ju)\n",
227		    best, *fout);
228		return (ERANGE);
229	}
230	if ((best > *fout) &&
231	  ((flags & CLK_SET_ROUND_UP) == 0)) {
232		*stop = 1;
233		printf("best freq (%ju) > requested freq(%ju)\n",
234		    best, *fout);
235		return (ERANGE);
236	}
237
238	if ((sc->flags & AW_CLK_REPARENT) != 0 && p_idx != best_parent)
239		clknode_set_parent_by_idx(clk, best_parent);
240
241	DEVICE_LOCK(clk);
242	READ4(clk, sc->offset, &val);
243
244	n = aw_clk_factor_get_value(&sc->n, best_n);
245	m = aw_clk_factor_get_value(&sc->m, best_m);
246	val &= ~sc->n.mask;
247	val &= ~sc->m.mask;
248	val |= n << sc->n.shift;
249	val |= m << sc->m.shift;
250
251	WRITE4(clk, sc->offset, val);
252	DEVICE_UNLOCK(clk);
253
254	if ((sc->flags & AW_CLK_HAS_LOCK) != 0) {
255		for (retry = 0; retry < sc->lock_retries; retry++) {
256			READ4(clk, sc->offset, &val);
257			if ((val & (1 << sc->lock_shift)) != 0)
258				break;
259			DELAY(1000);
260		}
261	}
262
263	*fout = best;
264	*stop = 1;
265
266	return (0);
267}
268
269static int
270aw_clk_nm_recalc(struct clknode *clk, uint64_t *freq)
271{
272	struct aw_clk_nm_sc *sc;
273	uint32_t val, m, n, prediv;
274
275	sc = clknode_get_softc(clk);
276
277	DEVICE_LOCK(clk);
278	READ4(clk, sc->offset, &val);
279	DEVICE_UNLOCK(clk);
280
281	m = aw_clk_get_factor(val, &sc->m);
282	n = aw_clk_get_factor(val, &sc->n);
283	if (sc->flags & AW_CLK_HAS_PREDIV)
284		prediv = aw_clk_get_factor(val, &sc->prediv);
285	else
286		prediv = 1;
287
288	*freq = *freq / prediv / n / m;
289
290	return (0);
291}
292
293static clknode_method_t aw_nm_clknode_methods[] = {
294	/* Device interface */
295	CLKNODEMETHOD(clknode_init,		aw_clk_nm_init),
296	CLKNODEMETHOD(clknode_set_gate,		aw_clk_nm_set_gate),
297	CLKNODEMETHOD(clknode_set_mux,		aw_clk_nm_set_mux),
298	CLKNODEMETHOD(clknode_recalc_freq,	aw_clk_nm_recalc),
299	CLKNODEMETHOD(clknode_set_freq,		aw_clk_nm_set_freq),
300	CLKNODEMETHOD_END
301};
302
303DEFINE_CLASS_1(aw_nm_clknode, aw_nm_clknode_class, aw_nm_clknode_methods,
304    sizeof(struct aw_clk_nm_sc), clknode_class);
305
306int
307aw_clk_nm_register(struct clkdom *clkdom, struct aw_clk_nm_def *clkdef)
308{
309	struct clknode *clk;
310	struct aw_clk_nm_sc *sc;
311
312	clk = clknode_create(clkdom, &aw_nm_clknode_class, &clkdef->clkdef);
313	if (clk == NULL)
314		return (1);
315
316	sc = clknode_get_softc(clk);
317
318	sc->offset = clkdef->offset;
319
320	sc->m.shift = clkdef->m.shift;
321	sc->m.width = clkdef->m.width;
322	sc->m.mask = ((1 << sc->m.width) - 1) << sc->m.shift;
323	sc->m.value = clkdef->m.value;
324	sc->m.flags = clkdef->m.flags;
325
326	sc->n.shift = clkdef->n.shift;
327	sc->n.width = clkdef->n.width;
328	sc->n.mask = ((1 << sc->n.width) - 1) << sc->n.shift;
329	sc->n.value = clkdef->n.value;
330	sc->n.flags = clkdef->n.flags;
331
332	sc->prediv.shift = clkdef->prediv.shift;
333	sc->prediv.width = clkdef->prediv.width;
334	sc->prediv.mask = ((1 << sc->prediv.width) - 1) << sc->prediv.shift;
335	sc->prediv.value = clkdef->prediv.value;
336	sc->prediv.flags = clkdef->prediv.flags;
337	sc->prediv.cond_shift = clkdef->prediv.cond_shift;
338	if (clkdef->prediv.cond_width != 0)
339		sc->prediv.cond_mask = ((1 << clkdef->prediv.cond_width) - 1) << sc->prediv.shift;
340	else
341		sc->prediv.cond_mask = clkdef->prediv.cond_mask;
342	sc->prediv.cond_value = clkdef->prediv.cond_value;
343
344	sc->mux_shift = clkdef->mux_shift;
345	sc->mux_mask = ((1 << clkdef->mux_width) - 1) << sc->mux_shift;
346
347	sc->gate_shift = clkdef->gate_shift;
348
349	sc->lock_shift = clkdef->lock_shift;
350	sc->lock_retries = clkdef->lock_retries;
351
352	sc->flags = clkdef->flags;
353
354	clknode_register(clkdom, clk);
355
356	return (0);
357}
358