1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2020-2022 MaxLinear, Inc.
4 * Copyright (C) 2020 Intel Corporation.
5 * Zhu Yixin <yzhu@maxlinear.com>
6 * Rahul Tanwar <rtanwar@maxlinear.com>
7 */
8
9#include <linux/clk-provider.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/iopoll.h>
13#include <linux/of.h>
14
15#include "clk-cgu.h"
16
17#define to_lgm_clk_pll(_hw)	container_of(_hw, struct lgm_clk_pll, hw)
18#define PLL_REF_DIV(x)		((x) + 0x08)
19
20/*
21 * Calculate formula:
22 * rate = (prate * mult + (prate * frac) / frac_div) / div
23 */
24static unsigned long
25lgm_pll_calc_rate(unsigned long prate, unsigned int mult,
26		  unsigned int div, unsigned int frac, unsigned int frac_div)
27{
28	u64 crate, frate, rate64;
29
30	rate64 = prate;
31	crate = rate64 * mult;
32	frate = rate64 * frac;
33	do_div(frate, frac_div);
34	crate += frate;
35	do_div(crate, div);
36
37	return crate;
38}
39
40static unsigned long lgm_pll_recalc_rate(struct clk_hw *hw, unsigned long prate)
41{
42	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
43	unsigned int div, mult, frac;
44
45	mult = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 0, 12);
46	div = lgm_get_clk_val(pll->membase, PLL_REF_DIV(pll->reg), 18, 6);
47	frac = lgm_get_clk_val(pll->membase, pll->reg, 2, 24);
48
49	if (pll->type == TYPE_LJPLL)
50		div *= 4;
51
52	return lgm_pll_calc_rate(prate, mult, div, frac, BIT(24));
53}
54
55static int lgm_pll_is_enabled(struct clk_hw *hw)
56{
57	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
58	unsigned int ret;
59
60	ret = lgm_get_clk_val(pll->membase, pll->reg, 0, 1);
61
62	return ret;
63}
64
65static int lgm_pll_enable(struct clk_hw *hw)
66{
67	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
68	u32 val;
69	int ret;
70
71	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 1);
72	ret = regmap_read_poll_timeout_atomic(pll->membase, pll->reg,
73					      val, (val & 0x1), 1, 100);
74
75
76	return ret;
77}
78
79static void lgm_pll_disable(struct clk_hw *hw)
80{
81	struct lgm_clk_pll *pll = to_lgm_clk_pll(hw);
82
83	lgm_set_clk_val(pll->membase, pll->reg, 0, 1, 0);
84}
85
86static const struct clk_ops lgm_pll_ops = {
87	.recalc_rate = lgm_pll_recalc_rate,
88	.is_enabled = lgm_pll_is_enabled,
89	.enable = lgm_pll_enable,
90	.disable = lgm_pll_disable,
91};
92
93static struct clk_hw *
94lgm_clk_register_pll(struct lgm_clk_provider *ctx,
95		     const struct lgm_pll_clk_data *list)
96{
97	struct clk_init_data init = {};
98	struct lgm_clk_pll *pll;
99	struct device *dev = ctx->dev;
100	struct clk_hw *hw;
101	int ret;
102
103	init.ops = &lgm_pll_ops;
104	init.name = list->name;
105	init.flags = list->flags;
106	init.parent_data = list->parent_data;
107	init.num_parents = list->num_parents;
108
109	pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
110	if (!pll)
111		return ERR_PTR(-ENOMEM);
112
113	pll->membase = ctx->membase;
114	pll->reg = list->reg;
115	pll->flags = list->flags;
116	pll->type = list->type;
117	pll->hw.init = &init;
118
119	hw = &pll->hw;
120	ret = devm_clk_hw_register(dev, hw);
121	if (ret)
122		return ERR_PTR(ret);
123
124	return hw;
125}
126
127int lgm_clk_register_plls(struct lgm_clk_provider *ctx,
128			  const struct lgm_pll_clk_data *list,
129			  unsigned int nr_clk)
130{
131	struct clk_hw *hw;
132	int i;
133
134	for (i = 0; i < nr_clk; i++, list++) {
135		hw = lgm_clk_register_pll(ctx, list);
136		if (IS_ERR(hw)) {
137			dev_err(ctx->dev, "failed to register pll: %s\n",
138				list->name);
139			return PTR_ERR(hw);
140		}
141		ctx->clk_data.hws[list->id] = hw;
142	}
143
144	return 0;
145}
146