1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Freescale SCFG MSI(-X) support
4 *
5 * Copyright (C) 2016 Freescale Semiconductor.
6 *
7 * Author: Minghuan Lian <Minghuan.Lian@nxp.com>
8 */
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/interrupt.h>
14#include <linux/iommu.h>
15#include <linux/irq.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/of_irq.h>
19#include <linux/of_pci.h>
20#include <linux/platform_device.h>
21#include <linux/property.h>
22#include <linux/spinlock.h>
23
24#define MSI_IRQS_PER_MSIR	32
25#define MSI_MSIR_OFFSET		4
26
27#define MSI_LS1043V1_1_IRQS_PER_MSIR	8
28#define MSI_LS1043V1_1_MSIR_OFFSET	0x10
29
30struct ls_scfg_msi_cfg {
31	u32 ibs_shift; /* Shift of interrupt bit select */
32	u32 msir_irqs; /* The irq number per MSIR */
33	u32 msir_base; /* The base address of MSIR */
34};
35
36struct ls_scfg_msir {
37	struct ls_scfg_msi *msi_data;
38	unsigned int index;
39	unsigned int gic_irq;
40	unsigned int bit_start;
41	unsigned int bit_end;
42	unsigned int srs; /* Shared interrupt register select */
43	void __iomem *reg;
44};
45
46struct ls_scfg_msi {
47	spinlock_t		lock;
48	struct platform_device	*pdev;
49	struct irq_domain	*parent;
50	struct irq_domain	*msi_domain;
51	void __iomem		*regs;
52	phys_addr_t		msiir_addr;
53	struct ls_scfg_msi_cfg	*cfg;
54	u32			msir_num;
55	struct ls_scfg_msir	*msir;
56	u32			irqs_num;
57	unsigned long		*used;
58};
59
60static struct irq_chip ls_scfg_msi_irq_chip = {
61	.name = "MSI",
62	.irq_mask	= pci_msi_mask_irq,
63	.irq_unmask	= pci_msi_unmask_irq,
64};
65
66static struct msi_domain_info ls_scfg_msi_domain_info = {
67	.flags	= (MSI_FLAG_USE_DEF_DOM_OPS |
68		   MSI_FLAG_USE_DEF_CHIP_OPS |
69		   MSI_FLAG_PCI_MSIX),
70	.chip	= &ls_scfg_msi_irq_chip,
71};
72
73static int msi_affinity_flag = 1;
74
75static int __init early_parse_ls_scfg_msi(char *p)
76{
77	if (p && strncmp(p, "no-affinity", 11) == 0)
78		msi_affinity_flag = 0;
79	else
80		msi_affinity_flag = 1;
81
82	return 0;
83}
84early_param("lsmsi", early_parse_ls_scfg_msi);
85
86static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
87{
88	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
89
90	msg->address_hi = upper_32_bits(msi_data->msiir_addr);
91	msg->address_lo = lower_32_bits(msi_data->msiir_addr);
92	msg->data = data->hwirq;
93
94	if (msi_affinity_flag) {
95		const struct cpumask *mask;
96
97		mask = irq_data_get_effective_affinity_mask(data);
98		msg->data |= cpumask_first(mask);
99	}
100
101	iommu_dma_compose_msi_msg(irq_data_get_msi_desc(data), msg);
102}
103
104static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
105				    const struct cpumask *mask, bool force)
106{
107	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(irq_data);
108	u32 cpu;
109
110	if (!msi_affinity_flag)
111		return -EINVAL;
112
113	if (!force)
114		cpu = cpumask_any_and(mask, cpu_online_mask);
115	else
116		cpu = cpumask_first(mask);
117
118	if (cpu >= msi_data->msir_num)
119		return -EINVAL;
120
121	if (msi_data->msir[cpu].gic_irq <= 0) {
122		pr_warn("cannot bind the irq to cpu%d\n", cpu);
123		return -EINVAL;
124	}
125
126	irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
127
128	return IRQ_SET_MASK_OK;
129}
130
131static struct irq_chip ls_scfg_msi_parent_chip = {
132	.name			= "SCFG",
133	.irq_compose_msi_msg	= ls_scfg_msi_compose_msg,
134	.irq_set_affinity	= ls_scfg_msi_set_affinity,
135};
136
137static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
138					unsigned int virq,
139					unsigned int nr_irqs,
140					void *args)
141{
142	msi_alloc_info_t *info = args;
143	struct ls_scfg_msi *msi_data = domain->host_data;
144	int pos, err = 0;
145
146	WARN_ON(nr_irqs != 1);
147
148	spin_lock(&msi_data->lock);
149	pos = find_first_zero_bit(msi_data->used, msi_data->irqs_num);
150	if (pos < msi_data->irqs_num)
151		__set_bit(pos, msi_data->used);
152	else
153		err = -ENOSPC;
154	spin_unlock(&msi_data->lock);
155
156	if (err)
157		return err;
158
159	err = iommu_dma_prepare_msi(info->desc, msi_data->msiir_addr);
160	if (err)
161		return err;
162
163	irq_domain_set_info(domain, virq, pos,
164			    &ls_scfg_msi_parent_chip, msi_data,
165			    handle_simple_irq, NULL, NULL);
166
167	return 0;
168}
169
170static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
171				   unsigned int virq, unsigned int nr_irqs)
172{
173	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
174	struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
175	int pos;
176
177	pos = d->hwirq;
178	if (pos < 0 || pos >= msi_data->irqs_num) {
179		pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
180		return;
181	}
182
183	spin_lock(&msi_data->lock);
184	__clear_bit(pos, msi_data->used);
185	spin_unlock(&msi_data->lock);
186}
187
188static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
189	.alloc	= ls_scfg_msi_domain_irq_alloc,
190	.free	= ls_scfg_msi_domain_irq_free,
191};
192
193static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
194{
195	struct ls_scfg_msir *msir = irq_desc_get_handler_data(desc);
196	struct ls_scfg_msi *msi_data = msir->msi_data;
197	unsigned long val;
198	int pos, size, hwirq;
199
200	chained_irq_enter(irq_desc_get_chip(desc), desc);
201
202	val = ioread32be(msir->reg);
203
204	pos = msir->bit_start;
205	size = msir->bit_end + 1;
206
207	for_each_set_bit_from(pos, &val, size) {
208		hwirq = ((msir->bit_end - pos) << msi_data->cfg->ibs_shift) |
209			msir->srs;
210		generic_handle_domain_irq(msi_data->parent, hwirq);
211	}
212
213	chained_irq_exit(irq_desc_get_chip(desc), desc);
214}
215
216static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
217{
218	/* Initialize MSI domain parent */
219	msi_data->parent = irq_domain_add_linear(NULL,
220						 msi_data->irqs_num,
221						 &ls_scfg_msi_domain_ops,
222						 msi_data);
223	if (!msi_data->parent) {
224		dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
225		return -ENOMEM;
226	}
227
228	msi_data->msi_domain = pci_msi_create_irq_domain(
229				of_node_to_fwnode(msi_data->pdev->dev.of_node),
230				&ls_scfg_msi_domain_info,
231				msi_data->parent);
232	if (!msi_data->msi_domain) {
233		dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
234		irq_domain_remove(msi_data->parent);
235		return -ENOMEM;
236	}
237
238	return 0;
239}
240
241static int ls_scfg_msi_setup_hwirq(struct ls_scfg_msi *msi_data, int index)
242{
243	struct ls_scfg_msir *msir;
244	int virq, i, hwirq;
245
246	virq = platform_get_irq(msi_data->pdev, index);
247	if (virq <= 0)
248		return -ENODEV;
249
250	msir = &msi_data->msir[index];
251	msir->index = index;
252	msir->msi_data = msi_data;
253	msir->gic_irq = virq;
254	msir->reg = msi_data->regs + msi_data->cfg->msir_base + 4 * index;
255
256	if (msi_data->cfg->msir_irqs == MSI_LS1043V1_1_IRQS_PER_MSIR) {
257		msir->bit_start = 32 - ((msir->index + 1) *
258				  MSI_LS1043V1_1_IRQS_PER_MSIR);
259		msir->bit_end = msir->bit_start +
260				MSI_LS1043V1_1_IRQS_PER_MSIR - 1;
261	} else {
262		msir->bit_start = 0;
263		msir->bit_end = msi_data->cfg->msir_irqs - 1;
264	}
265
266	irq_set_chained_handler_and_data(msir->gic_irq,
267					 ls_scfg_msi_irq_handler,
268					 msir);
269
270	if (msi_affinity_flag) {
271		/* Associate MSIR interrupt to the cpu */
272		irq_set_affinity(msir->gic_irq, get_cpu_mask(index));
273		msir->srs = 0; /* This value is determined by the CPU */
274	} else
275		msir->srs = index;
276
277	/* Release the hwirqs corresponding to this MSIR */
278	if (!msi_affinity_flag || msir->index == 0) {
279		for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
280			hwirq = i << msi_data->cfg->ibs_shift | msir->index;
281			bitmap_clear(msi_data->used, hwirq, 1);
282		}
283	}
284
285	return 0;
286}
287
288static int ls_scfg_msi_teardown_hwirq(struct ls_scfg_msir *msir)
289{
290	struct ls_scfg_msi *msi_data = msir->msi_data;
291	int i, hwirq;
292
293	if (msir->gic_irq > 0)
294		irq_set_chained_handler_and_data(msir->gic_irq, NULL, NULL);
295
296	for (i = 0; i < msi_data->cfg->msir_irqs; i++) {
297		hwirq = i << msi_data->cfg->ibs_shift | msir->index;
298		bitmap_set(msi_data->used, hwirq, 1);
299	}
300
301	return 0;
302}
303
304static struct ls_scfg_msi_cfg ls1021_msi_cfg = {
305	.ibs_shift = 3,
306	.msir_irqs = MSI_IRQS_PER_MSIR,
307	.msir_base = MSI_MSIR_OFFSET,
308};
309
310static struct ls_scfg_msi_cfg ls1046_msi_cfg = {
311	.ibs_shift = 2,
312	.msir_irqs = MSI_IRQS_PER_MSIR,
313	.msir_base = MSI_MSIR_OFFSET,
314};
315
316static struct ls_scfg_msi_cfg ls1043_v1_1_msi_cfg = {
317	.ibs_shift = 2,
318	.msir_irqs = MSI_LS1043V1_1_IRQS_PER_MSIR,
319	.msir_base = MSI_LS1043V1_1_MSIR_OFFSET,
320};
321
322static const struct of_device_id ls_scfg_msi_id[] = {
323	/* The following two misspelled compatibles are obsolete */
324	{ .compatible = "fsl,1s1021a-msi", .data = &ls1021_msi_cfg},
325	{ .compatible = "fsl,1s1043a-msi", .data = &ls1021_msi_cfg},
326
327	{ .compatible = "fsl,ls1012a-msi", .data = &ls1021_msi_cfg },
328	{ .compatible = "fsl,ls1021a-msi", .data = &ls1021_msi_cfg },
329	{ .compatible = "fsl,ls1043a-msi", .data = &ls1021_msi_cfg },
330	{ .compatible = "fsl,ls1043a-v1.1-msi", .data = &ls1043_v1_1_msi_cfg },
331	{ .compatible = "fsl,ls1046a-msi", .data = &ls1046_msi_cfg },
332	{},
333};
334MODULE_DEVICE_TABLE(of, ls_scfg_msi_id);
335
336static int ls_scfg_msi_probe(struct platform_device *pdev)
337{
338	struct ls_scfg_msi *msi_data;
339	struct resource *res;
340	int i, ret;
341
342	msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
343	if (!msi_data)
344		return -ENOMEM;
345
346	msi_data->cfg = (struct ls_scfg_msi_cfg *)device_get_match_data(&pdev->dev);
347	if (!msi_data->cfg)
348		return -ENODEV;
349
350	msi_data->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
351	if (IS_ERR(msi_data->regs)) {
352		dev_err(&pdev->dev, "failed to initialize 'regs'\n");
353		return PTR_ERR(msi_data->regs);
354	}
355	msi_data->msiir_addr = res->start;
356
357	msi_data->pdev = pdev;
358	spin_lock_init(&msi_data->lock);
359
360	msi_data->irqs_num = MSI_IRQS_PER_MSIR *
361			     (1 << msi_data->cfg->ibs_shift);
362	msi_data->used = devm_bitmap_zalloc(&pdev->dev, msi_data->irqs_num, GFP_KERNEL);
363	if (!msi_data->used)
364		return -ENOMEM;
365	/*
366	 * Reserve all the hwirqs
367	 * The available hwirqs will be released in ls1_msi_setup_hwirq()
368	 */
369	bitmap_set(msi_data->used, 0, msi_data->irqs_num);
370
371	msi_data->msir_num = of_irq_count(pdev->dev.of_node);
372
373	if (msi_affinity_flag) {
374		u32 cpu_num;
375
376		cpu_num = num_possible_cpus();
377		if (msi_data->msir_num >= cpu_num)
378			msi_data->msir_num = cpu_num;
379		else
380			msi_affinity_flag = 0;
381	}
382
383	msi_data->msir = devm_kcalloc(&pdev->dev, msi_data->msir_num,
384				      sizeof(*msi_data->msir),
385				      GFP_KERNEL);
386	if (!msi_data->msir)
387		return -ENOMEM;
388
389	for (i = 0; i < msi_data->msir_num; i++)
390		ls_scfg_msi_setup_hwirq(msi_data, i);
391
392	ret = ls_scfg_msi_domains_init(msi_data);
393	if (ret)
394		return ret;
395
396	platform_set_drvdata(pdev, msi_data);
397
398	return 0;
399}
400
401static void ls_scfg_msi_remove(struct platform_device *pdev)
402{
403	struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
404	int i;
405
406	for (i = 0; i < msi_data->msir_num; i++)
407		ls_scfg_msi_teardown_hwirq(&msi_data->msir[i]);
408
409	irq_domain_remove(msi_data->msi_domain);
410	irq_domain_remove(msi_data->parent);
411
412	platform_set_drvdata(pdev, NULL);
413}
414
415static struct platform_driver ls_scfg_msi_driver = {
416	.driver = {
417		.name		= "ls-scfg-msi",
418		.of_match_table	= ls_scfg_msi_id,
419	},
420	.probe		= ls_scfg_msi_probe,
421	.remove_new	= ls_scfg_msi_remove,
422};
423
424module_platform_driver(ls_scfg_msi_driver);
425
426MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
427MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
428