1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 *		http://www.samsung.com
5 *
6 * Combiner irqchip for EXYNOS
7 */
8#include <linux/err.h>
9#include <linux/export.h>
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/slab.h>
13#include <linux/syscore_ops.h>
14#include <linux/irqdomain.h>
15#include <linux/irqchip.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/interrupt.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#define COMBINER_ENABLE_SET	0x0
22#define COMBINER_ENABLE_CLEAR	0x4
23#define COMBINER_INT_STATUS	0xC
24
25#define IRQ_IN_COMBINER		8
26
27static DEFINE_SPINLOCK(irq_controller_lock);
28
29struct combiner_chip_data {
30	unsigned int hwirq_offset;
31	unsigned int irq_mask;
32	void __iomem *base;
33	unsigned int parent_irq;
34#ifdef CONFIG_PM
35	u32 pm_save;
36#endif
37};
38
39static struct combiner_chip_data *combiner_data;
40static struct irq_domain *combiner_irq_domain;
41static unsigned int max_nr = 20;
42
43static inline void __iomem *combiner_base(struct irq_data *data)
44{
45	struct combiner_chip_data *combiner_data =
46		irq_data_get_irq_chip_data(data);
47
48	return combiner_data->base;
49}
50
51static void combiner_mask_irq(struct irq_data *data)
52{
53	u32 mask = 1 << (data->hwirq % 32);
54
55	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
56}
57
58static void combiner_unmask_irq(struct irq_data *data)
59{
60	u32 mask = 1 << (data->hwirq % 32);
61
62	writel_relaxed(mask, combiner_base(data) + COMBINER_ENABLE_SET);
63}
64
65static void combiner_handle_cascade_irq(struct irq_desc *desc)
66{
67	struct combiner_chip_data *chip_data = irq_desc_get_handler_data(desc);
68	struct irq_chip *chip = irq_desc_get_chip(desc);
69	unsigned int combiner_irq;
70	unsigned long status;
71	int ret;
72
73	chained_irq_enter(chip, desc);
74
75	spin_lock(&irq_controller_lock);
76	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
77	spin_unlock(&irq_controller_lock);
78	status &= chip_data->irq_mask;
79
80	if (status == 0)
81		goto out;
82
83	combiner_irq = chip_data->hwirq_offset + __ffs(status);
84	ret = generic_handle_domain_irq(combiner_irq_domain, combiner_irq);
85	if (unlikely(ret))
86		handle_bad_irq(desc);
87
88 out:
89	chained_irq_exit(chip, desc);
90}
91
92#ifdef CONFIG_SMP
93static int combiner_set_affinity(struct irq_data *d,
94				 const struct cpumask *mask_val, bool force)
95{
96	struct combiner_chip_data *chip_data = irq_data_get_irq_chip_data(d);
97	struct irq_chip *chip = irq_get_chip(chip_data->parent_irq);
98	struct irq_data *data = irq_get_irq_data(chip_data->parent_irq);
99
100	if (chip && chip->irq_set_affinity)
101		return chip->irq_set_affinity(data, mask_val, force);
102	else
103		return -EINVAL;
104}
105#endif
106
107static struct irq_chip combiner_chip = {
108	.name			= "COMBINER",
109	.irq_mask		= combiner_mask_irq,
110	.irq_unmask		= combiner_unmask_irq,
111#ifdef CONFIG_SMP
112	.irq_set_affinity	= combiner_set_affinity,
113#endif
114};
115
116static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
117					unsigned int irq)
118{
119	irq_set_chained_handler_and_data(irq, combiner_handle_cascade_irq,
120					 combiner_data);
121}
122
123static void __init combiner_init_one(struct combiner_chip_data *combiner_data,
124				     unsigned int combiner_nr,
125				     void __iomem *base, unsigned int irq)
126{
127	combiner_data->base = base;
128	combiner_data->hwirq_offset = (combiner_nr & ~3) * IRQ_IN_COMBINER;
129	combiner_data->irq_mask = 0xff << ((combiner_nr % 4) << 3);
130	combiner_data->parent_irq = irq;
131
132	/* Disable all interrupts */
133	writel_relaxed(combiner_data->irq_mask, base + COMBINER_ENABLE_CLEAR);
134}
135
136static int combiner_irq_domain_xlate(struct irq_domain *d,
137				     struct device_node *controller,
138				     const u32 *intspec, unsigned int intsize,
139				     unsigned long *out_hwirq,
140				     unsigned int *out_type)
141{
142	if (irq_domain_get_of_node(d) != controller)
143		return -EINVAL;
144
145	if (intsize < 2)
146		return -EINVAL;
147
148	*out_hwirq = intspec[0] * IRQ_IN_COMBINER + intspec[1];
149	*out_type = 0;
150
151	return 0;
152}
153
154static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
155				   irq_hw_number_t hw)
156{
157	struct combiner_chip_data *combiner_data = d->host_data;
158
159	irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
160	irq_set_chip_data(irq, &combiner_data[hw >> 3]);
161	irq_set_probe(irq);
162
163	return 0;
164}
165
166static const struct irq_domain_ops combiner_irq_domain_ops = {
167	.xlate	= combiner_irq_domain_xlate,
168	.map	= combiner_irq_domain_map,
169};
170
171static void __init combiner_init(void __iomem *combiner_base,
172				 struct device_node *np)
173{
174	int i, irq;
175	unsigned int nr_irq;
176
177	nr_irq = max_nr * IRQ_IN_COMBINER;
178
179	combiner_data = kcalloc(max_nr, sizeof (*combiner_data), GFP_KERNEL);
180	if (!combiner_data)
181		return;
182
183	combiner_irq_domain = irq_domain_add_linear(np, nr_irq,
184				&combiner_irq_domain_ops, combiner_data);
185	if (WARN_ON(!combiner_irq_domain)) {
186		pr_warn("%s: irq domain init failed\n", __func__);
187		return;
188	}
189
190	for (i = 0; i < max_nr; i++) {
191		irq = irq_of_parse_and_map(np, i);
192
193		combiner_init_one(&combiner_data[i], i,
194				  combiner_base + (i >> 2) * 0x10, irq);
195		combiner_cascade_irq(&combiner_data[i], irq);
196	}
197}
198
199#ifdef CONFIG_PM
200
201/**
202 * combiner_suspend - save interrupt combiner state before suspend
203 *
204 * Save the interrupt enable set register for all combiner groups since
205 * the state is lost when the system enters into a sleep state.
206 *
207 */
208static int combiner_suspend(void)
209{
210	int i;
211
212	for (i = 0; i < max_nr; i++)
213		combiner_data[i].pm_save =
214			readl_relaxed(combiner_data[i].base + COMBINER_ENABLE_SET);
215
216	return 0;
217}
218
219/**
220 * combiner_resume - restore interrupt combiner state after resume
221 *
222 * Restore the interrupt enable set register for all combiner groups since
223 * the state is lost when the system enters into a sleep state on suspend.
224 *
225 */
226static void combiner_resume(void)
227{
228	int i;
229
230	for (i = 0; i < max_nr; i++) {
231		writel_relaxed(combiner_data[i].irq_mask,
232			     combiner_data[i].base + COMBINER_ENABLE_CLEAR);
233		writel_relaxed(combiner_data[i].pm_save,
234			     combiner_data[i].base + COMBINER_ENABLE_SET);
235	}
236}
237
238#else
239#define combiner_suspend	NULL
240#define combiner_resume		NULL
241#endif
242
243static struct syscore_ops combiner_syscore_ops = {
244	.suspend	= combiner_suspend,
245	.resume		= combiner_resume,
246};
247
248static int __init combiner_of_init(struct device_node *np,
249				   struct device_node *parent)
250{
251	void __iomem *combiner_base;
252
253	combiner_base = of_iomap(np, 0);
254	if (!combiner_base) {
255		pr_err("%s: failed to map combiner registers\n", __func__);
256		return -ENXIO;
257	}
258
259	if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
260		pr_info("%s: number of combiners not specified, "
261			"setting default as %d.\n",
262			__func__, max_nr);
263	}
264
265	combiner_init(combiner_base, np);
266
267	register_syscore_ops(&combiner_syscore_ops);
268
269	return 0;
270}
271IRQCHIP_DECLARE(exynos4210_combiner, "samsung,exynos4210-combiner",
272		combiner_of_init);
273