1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2019 Intel Corporation.
4 * Lei Chuanhua <Chuanhua.lei@intel.com>
5 */
6
7#include <linux/bitfield.h>
8#include <linux/init.h>
9#include <linux/of.h>
10#include <linux/platform_device.h>
11#include <linux/reboot.h>
12#include <linux/regmap.h>
13#include <linux/reset-controller.h>
14
15#define RCU_RST_STAT	0x0024
16#define RCU_RST_REQ	0x0048
17
18#define REG_OFFSET_MASK	GENMASK(31, 16)
19#define BIT_OFFSET_MASK	GENMASK(15, 8)
20#define STAT_BIT_OFFSET_MASK	GENMASK(7, 0)
21
22#define to_reset_data(x)	container_of(x, struct intel_reset_data, rcdev)
23
24struct intel_reset_soc {
25	bool legacy;
26	u32 reset_cell_count;
27};
28
29struct intel_reset_data {
30	struct reset_controller_dev rcdev;
31	struct notifier_block restart_nb;
32	const struct intel_reset_soc *soc_data;
33	struct regmap *regmap;
34	struct device *dev;
35	u32 reboot_id;
36};
37
38static const struct regmap_config intel_rcu_regmap_config = {
39	.name =		"intel-reset",
40	.reg_bits =	32,
41	.reg_stride =	4,
42	.val_bits =	32,
43	.fast_io =	true,
44};
45
46/*
47 * Reset status register offset relative to
48 * the reset control register(X) is X + 4
49 */
50static u32 id_to_reg_and_bit_offsets(struct intel_reset_data *data,
51				     unsigned long id, u32 *rst_req,
52				     u32 *req_bit, u32 *stat_bit)
53{
54	*rst_req = FIELD_GET(REG_OFFSET_MASK, id);
55	*req_bit = FIELD_GET(BIT_OFFSET_MASK, id);
56
57	if (data->soc_data->legacy)
58		*stat_bit = FIELD_GET(STAT_BIT_OFFSET_MASK, id);
59	else
60		*stat_bit = *req_bit;
61
62	if (data->soc_data->legacy && *rst_req == RCU_RST_REQ)
63		return RCU_RST_STAT;
64	else
65		return *rst_req + 0x4;
66}
67
68static int intel_set_clr_bits(struct intel_reset_data *data, unsigned long id,
69			      bool set)
70{
71	u32 rst_req, req_bit, rst_stat, stat_bit, val;
72	int ret;
73
74	rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
75					     &req_bit, &stat_bit);
76
77	val = set ? BIT(req_bit) : 0;
78	ret = regmap_update_bits(data->regmap, rst_req,  BIT(req_bit), val);
79	if (ret)
80		return ret;
81
82	return regmap_read_poll_timeout(data->regmap, rst_stat, val,
83					set == !!(val & BIT(stat_bit)), 20,
84					200);
85}
86
87static int intel_assert_device(struct reset_controller_dev *rcdev,
88			       unsigned long id)
89{
90	struct intel_reset_data *data = to_reset_data(rcdev);
91	int ret;
92
93	ret = intel_set_clr_bits(data, id, true);
94	if (ret)
95		dev_err(data->dev, "Reset assert failed %d\n", ret);
96
97	return ret;
98}
99
100static int intel_deassert_device(struct reset_controller_dev *rcdev,
101				 unsigned long id)
102{
103	struct intel_reset_data *data = to_reset_data(rcdev);
104	int ret;
105
106	ret = intel_set_clr_bits(data, id, false);
107	if (ret)
108		dev_err(data->dev, "Reset deassert failed %d\n", ret);
109
110	return ret;
111}
112
113static int intel_reset_status(struct reset_controller_dev *rcdev,
114			      unsigned long id)
115{
116	struct intel_reset_data *data = to_reset_data(rcdev);
117	u32 rst_req, req_bit, rst_stat, stat_bit, val;
118	int ret;
119
120	rst_stat = id_to_reg_and_bit_offsets(data, id, &rst_req,
121					     &req_bit, &stat_bit);
122	ret = regmap_read(data->regmap, rst_stat, &val);
123	if (ret)
124		return ret;
125
126	return !!(val & BIT(stat_bit));
127}
128
129static const struct reset_control_ops intel_reset_ops = {
130	.assert =	intel_assert_device,
131	.deassert =	intel_deassert_device,
132	.status	=	intel_reset_status,
133};
134
135static int intel_reset_xlate(struct reset_controller_dev *rcdev,
136			     const struct of_phandle_args *spec)
137{
138	struct intel_reset_data *data = to_reset_data(rcdev);
139	u32 id;
140
141	if (spec->args[1] > 31)
142		return -EINVAL;
143
144	id = FIELD_PREP(REG_OFFSET_MASK, spec->args[0]);
145	id |= FIELD_PREP(BIT_OFFSET_MASK, spec->args[1]);
146
147	if (data->soc_data->legacy) {
148		if (spec->args[2] > 31)
149			return -EINVAL;
150
151		id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, spec->args[2]);
152	}
153
154	return id;
155}
156
157static int intel_reset_restart_handler(struct notifier_block *nb,
158				       unsigned long action, void *data)
159{
160	struct intel_reset_data *reset_data;
161
162	reset_data = container_of(nb, struct intel_reset_data, restart_nb);
163	intel_assert_device(&reset_data->rcdev, reset_data->reboot_id);
164
165	return NOTIFY_DONE;
166}
167
168static int intel_reset_probe(struct platform_device *pdev)
169{
170	struct device_node *np = pdev->dev.of_node;
171	struct device *dev = &pdev->dev;
172	struct intel_reset_data *data;
173	void __iomem *base;
174	u32 rb_id[3];
175	int ret;
176
177	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
178	if (!data)
179		return -ENOMEM;
180
181	data->soc_data = of_device_get_match_data(dev);
182	if (!data->soc_data)
183		return -ENODEV;
184
185	base = devm_platform_ioremap_resource(pdev, 0);
186	if (IS_ERR(base))
187		return PTR_ERR(base);
188
189	data->regmap = devm_regmap_init_mmio(dev, base,
190					     &intel_rcu_regmap_config);
191	if (IS_ERR(data->regmap)) {
192		dev_err(dev, "regmap initialization failed\n");
193		return PTR_ERR(data->regmap);
194	}
195
196	ret = device_property_read_u32_array(dev, "intel,global-reset", rb_id,
197					     data->soc_data->reset_cell_count);
198	if (ret) {
199		dev_err(dev, "Failed to get global reset offset!\n");
200		return ret;
201	}
202
203	data->dev =			dev;
204	data->rcdev.of_node =		np;
205	data->rcdev.owner =		dev->driver->owner;
206	data->rcdev.ops	=		&intel_reset_ops;
207	data->rcdev.of_xlate =		intel_reset_xlate;
208	data->rcdev.of_reset_n_cells =	data->soc_data->reset_cell_count;
209	ret = devm_reset_controller_register(&pdev->dev, &data->rcdev);
210	if (ret)
211		return ret;
212
213	data->reboot_id = FIELD_PREP(REG_OFFSET_MASK, rb_id[0]);
214	data->reboot_id |= FIELD_PREP(BIT_OFFSET_MASK, rb_id[1]);
215
216	if (data->soc_data->legacy)
217		data->reboot_id |= FIELD_PREP(STAT_BIT_OFFSET_MASK, rb_id[2]);
218
219	data->restart_nb.notifier_call =	intel_reset_restart_handler;
220	data->restart_nb.priority =		128;
221	register_restart_handler(&data->restart_nb);
222
223	return 0;
224}
225
226static const struct intel_reset_soc xrx200_data = {
227	.legacy =		true,
228	.reset_cell_count =	3,
229};
230
231static const struct intel_reset_soc lgm_data = {
232	.legacy =		false,
233	.reset_cell_count =	2,
234};
235
236static const struct of_device_id intel_reset_match[] = {
237	{ .compatible = "intel,rcu-lgm", .data = &lgm_data },
238	{ .compatible = "intel,rcu-xrx200", .data = &xrx200_data },
239	{}
240};
241
242static struct platform_driver intel_reset_driver = {
243	.probe = intel_reset_probe,
244	.driver = {
245		.name = "intel-reset",
246		.of_match_table = intel_reset_match,
247	},
248};
249
250static int __init intel_reset_init(void)
251{
252	return platform_driver_register(&intel_reset_driver);
253}
254
255/*
256 * RCU is system core entity which is in Always On Domain whose clocks
257 * or resource initialization happens in system core initialization.
258 * Also, it is required for most of the platform or architecture
259 * specific devices to perform reset operation as part of initialization.
260 * So perform RCU as post core initialization.
261 */
262postcore_initcall(intel_reset_init);
263