1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2007, Michael Ellerman, IBM Corporation.
4 */
5
6
7#include <linux/interrupt.h>
8#include <linux/irq.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/msi.h>
12#include <linux/export.h>
13#include <linux/slab.h>
14#include <linux/debugfs.h>
15#include <linux/of.h>
16#include <linux/of_irq.h>
17#include <linux/platform_device.h>
18
19#include <asm/dcr.h>
20#include <asm/machdep.h>
21
22#include "cell.h"
23
24/*
25 * MSIC registers, specified as offsets from dcr_base
26 */
27#define MSIC_CTRL_REG	0x0
28
29/* Base Address registers specify FIFO location in BE memory */
30#define MSIC_BASE_ADDR_HI_REG	0x3
31#define MSIC_BASE_ADDR_LO_REG	0x4
32
33/* Hold the read/write offsets into the FIFO */
34#define MSIC_READ_OFFSET_REG	0x5
35#define MSIC_WRITE_OFFSET_REG	0x6
36
37
38/* MSIC control register flags */
39#define MSIC_CTRL_ENABLE		0x0001
40#define MSIC_CTRL_FIFO_FULL_ENABLE	0x0002
41#define MSIC_CTRL_IRQ_ENABLE		0x0008
42#define MSIC_CTRL_FULL_STOP_ENABLE	0x0010
43
44/*
45 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
46 * Currently we're using a 64KB FIFO size.
47 */
48#define MSIC_FIFO_SIZE_SHIFT	16
49#define MSIC_FIFO_SIZE_BYTES	(1 << MSIC_FIFO_SIZE_SHIFT)
50
51/*
52 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
53 * 8-9 of the MSIC control reg.
54 */
55#define MSIC_CTRL_FIFO_SIZE	(((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
56
57/*
58 * We need to mask the read/write offsets to make sure they stay within
59 * the bounds of the FIFO. Also they should always be 16-byte aligned.
60 */
61#define MSIC_FIFO_SIZE_MASK	((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
62
63/* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
64#define MSIC_FIFO_ENTRY_SIZE	0x10
65
66
67struct axon_msic {
68	struct irq_domain *irq_domain;
69	__le32 *fifo_virt;
70	dma_addr_t fifo_phys;
71	dcr_host_t dcr_host;
72	u32 read_offset;
73#ifdef DEBUG
74	u32 __iomem *trigger;
75#endif
76};
77
78#ifdef DEBUG
79void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
80#else
81static inline void axon_msi_debug_setup(struct device_node *dn,
82					struct axon_msic *msic) { }
83#endif
84
85
86static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
87{
88	pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
89
90	dcr_write(msic->dcr_host, dcr_n, val);
91}
92
93static void axon_msi_cascade(struct irq_desc *desc)
94{
95	struct irq_chip *chip = irq_desc_get_chip(desc);
96	struct axon_msic *msic = irq_desc_get_handler_data(desc);
97	u32 write_offset, msi;
98	int idx;
99	int retry = 0;
100
101	write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
102	pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
103
104	/* write_offset doesn't wrap properly, so we have to mask it */
105	write_offset &= MSIC_FIFO_SIZE_MASK;
106
107	while (msic->read_offset != write_offset && retry < 100) {
108		idx  = msic->read_offset / sizeof(__le32);
109		msi  = le32_to_cpu(msic->fifo_virt[idx]);
110		msi &= 0xFFFF;
111
112		pr_devel("axon_msi: woff %x roff %x msi %x\n",
113			  write_offset, msic->read_offset, msi);
114
115		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
116			generic_handle_irq(msi);
117			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
118		} else {
119			/*
120			 * Reading the MSIC_WRITE_OFFSET_REG does not
121			 * reliably flush the outstanding DMA to the
122			 * FIFO buffer. Here we were reading stale
123			 * data, so we need to retry.
124			 */
125			udelay(1);
126			retry++;
127			pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
128			continue;
129		}
130
131		if (retry) {
132			pr_devel("axon_msi: late irq 0x%x, retry %d\n",
133				 msi, retry);
134			retry = 0;
135		}
136
137		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
138		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
139	}
140
141	if (retry) {
142		printk(KERN_WARNING "axon_msi: irq timed out\n");
143
144		msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
145		msic->read_offset &= MSIC_FIFO_SIZE_MASK;
146	}
147
148	chip->irq_eoi(&desc->irq_data);
149}
150
151static struct axon_msic *find_msi_translator(struct pci_dev *dev)
152{
153	struct irq_domain *irq_domain;
154	struct device_node *dn, *tmp;
155	const phandle *ph;
156	struct axon_msic *msic = NULL;
157
158	dn = of_node_get(pci_device_to_OF_node(dev));
159	if (!dn) {
160		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
161		return NULL;
162	}
163
164	for (; dn; dn = of_get_next_parent(dn)) {
165		ph = of_get_property(dn, "msi-translator", NULL);
166		if (ph)
167			break;
168	}
169
170	if (!ph) {
171		dev_dbg(&dev->dev,
172			"axon_msi: no msi-translator property found\n");
173		goto out_error;
174	}
175
176	tmp = dn;
177	dn = of_find_node_by_phandle(*ph);
178	of_node_put(tmp);
179	if (!dn) {
180		dev_dbg(&dev->dev,
181			"axon_msi: msi-translator doesn't point to a node\n");
182		goto out_error;
183	}
184
185	irq_domain = irq_find_host(dn);
186	if (!irq_domain) {
187		dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %pOF\n",
188			dn);
189		goto out_error;
190	}
191
192	msic = irq_domain->host_data;
193
194out_error:
195	of_node_put(dn);
196
197	return msic;
198}
199
200static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
201{
202	struct device_node *dn;
203	int len;
204	const u32 *prop;
205
206	dn = of_node_get(pci_device_to_OF_node(dev));
207	if (!dn) {
208		dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
209		return -ENODEV;
210	}
211
212	for (; dn; dn = of_get_next_parent(dn)) {
213		if (!dev->no_64bit_msi) {
214			prop = of_get_property(dn, "msi-address-64", &len);
215			if (prop)
216				break;
217		}
218
219		prop = of_get_property(dn, "msi-address-32", &len);
220		if (prop)
221			break;
222	}
223
224	if (!prop) {
225		dev_dbg(&dev->dev,
226			"axon_msi: no msi-address-(32|64) properties found\n");
227		of_node_put(dn);
228		return -ENOENT;
229	}
230
231	switch (len) {
232	case 8:
233		msg->address_hi = prop[0];
234		msg->address_lo = prop[1];
235		break;
236	case 4:
237		msg->address_hi = 0;
238		msg->address_lo = prop[0];
239		break;
240	default:
241		dev_dbg(&dev->dev,
242			"axon_msi: malformed msi-address-(32|64) property\n");
243		of_node_put(dn);
244		return -EINVAL;
245	}
246
247	of_node_put(dn);
248
249	return 0;
250}
251
252static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
253{
254	unsigned int virq, rc;
255	struct msi_desc *entry;
256	struct msi_msg msg;
257	struct axon_msic *msic;
258
259	msic = find_msi_translator(dev);
260	if (!msic)
261		return -ENODEV;
262
263	rc = setup_msi_msg_address(dev, &msg);
264	if (rc)
265		return rc;
266
267	msi_for_each_desc(entry, &dev->dev, MSI_DESC_NOTASSOCIATED) {
268		virq = irq_create_direct_mapping(msic->irq_domain);
269		if (!virq) {
270			dev_warn(&dev->dev,
271				 "axon_msi: virq allocation failed!\n");
272			return -1;
273		}
274		dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
275
276		irq_set_msi_desc(virq, entry);
277		msg.data = virq;
278		pci_write_msi_msg(virq, &msg);
279	}
280
281	return 0;
282}
283
284static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
285{
286	struct msi_desc *entry;
287
288	dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
289
290	msi_for_each_desc(entry, &dev->dev, MSI_DESC_ASSOCIATED) {
291		irq_set_msi_desc(entry->irq, NULL);
292		irq_dispose_mapping(entry->irq);
293		entry->irq = 0;
294	}
295}
296
297static struct irq_chip msic_irq_chip = {
298	.irq_mask	= pci_msi_mask_irq,
299	.irq_unmask	= pci_msi_unmask_irq,
300	.irq_shutdown	= pci_msi_mask_irq,
301	.name		= "AXON-MSI",
302};
303
304static int msic_host_map(struct irq_domain *h, unsigned int virq,
305			 irq_hw_number_t hw)
306{
307	irq_set_chip_data(virq, h->host_data);
308	irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
309
310	return 0;
311}
312
313static const struct irq_domain_ops msic_host_ops = {
314	.map	= msic_host_map,
315};
316
317static void axon_msi_shutdown(struct platform_device *device)
318{
319	struct axon_msic *msic = dev_get_drvdata(&device->dev);
320	u32 tmp;
321
322	pr_devel("axon_msi: disabling %pOF\n",
323		 irq_domain_get_of_node(msic->irq_domain));
324	tmp  = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
325	tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
326	msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
327}
328
329static int axon_msi_probe(struct platform_device *device)
330{
331	struct device_node *dn = device->dev.of_node;
332	struct axon_msic *msic;
333	unsigned int virq;
334	int dcr_base, dcr_len;
335
336	pr_devel("axon_msi: setting up dn %pOF\n", dn);
337
338	msic = kzalloc(sizeof(*msic), GFP_KERNEL);
339	if (!msic) {
340		printk(KERN_ERR "axon_msi: couldn't allocate msic for %pOF\n",
341		       dn);
342		goto out;
343	}
344
345	dcr_base = dcr_resource_start(dn, 0);
346	dcr_len = dcr_resource_len(dn, 0);
347
348	if (dcr_base == 0 || dcr_len == 0) {
349		printk(KERN_ERR
350		       "axon_msi: couldn't parse dcr properties on %pOF\n",
351			dn);
352		goto out_free_msic;
353	}
354
355	msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
356	if (!DCR_MAP_OK(msic->dcr_host)) {
357		printk(KERN_ERR "axon_msi: dcr_map failed for %pOF\n",
358		       dn);
359		goto out_free_msic;
360	}
361
362	msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
363					     &msic->fifo_phys, GFP_KERNEL);
364	if (!msic->fifo_virt) {
365		printk(KERN_ERR "axon_msi: couldn't allocate fifo for %pOF\n",
366		       dn);
367		goto out_free_msic;
368	}
369
370	virq = irq_of_parse_and_map(dn, 0);
371	if (!virq) {
372		printk(KERN_ERR "axon_msi: irq parse and map failed for %pOF\n",
373		       dn);
374		goto out_free_fifo;
375	}
376	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
377
378	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
379	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
380	if (!msic->irq_domain) {
381		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %pOF\n",
382		       dn);
383		goto out_free_fifo;
384	}
385
386	irq_set_handler_data(virq, msic);
387	irq_set_chained_handler(virq, axon_msi_cascade);
388	pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
389
390	/* Enable the MSIC hardware */
391	msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
392	msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
393				  msic->fifo_phys & 0xFFFFFFFF);
394	msic_dcr_write(msic, MSIC_CTRL_REG,
395			MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
396			MSIC_CTRL_FIFO_SIZE);
397
398	msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
399				& MSIC_FIFO_SIZE_MASK;
400
401	dev_set_drvdata(&device->dev, msic);
402
403	cell_pci_controller_ops.setup_msi_irqs = axon_msi_setup_msi_irqs;
404	cell_pci_controller_ops.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
405
406	axon_msi_debug_setup(dn, msic);
407
408	printk(KERN_DEBUG "axon_msi: setup MSIC on %pOF\n", dn);
409
410	return 0;
411
412out_free_fifo:
413	dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
414			  msic->fifo_phys);
415out_free_msic:
416	kfree(msic);
417out:
418
419	return -1;
420}
421
422static const struct of_device_id axon_msi_device_id[] = {
423	{
424		.compatible	= "ibm,axon-msic"
425	},
426	{}
427};
428
429static struct platform_driver axon_msi_driver = {
430	.probe		= axon_msi_probe,
431	.shutdown	= axon_msi_shutdown,
432	.driver = {
433		.name = "axon-msi",
434		.of_match_table = axon_msi_device_id,
435	},
436};
437
438static int __init axon_msi_init(void)
439{
440	return platform_driver_register(&axon_msi_driver);
441}
442subsys_initcall(axon_msi_init);
443
444
445#ifdef DEBUG
446static int msic_set(void *data, u64 val)
447{
448	struct axon_msic *msic = data;
449	out_le32(msic->trigger, val);
450	return 0;
451}
452
453static int msic_get(void *data, u64 *val)
454{
455	*val = 0;
456	return 0;
457}
458
459DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
460
461void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
462{
463	char name[8];
464	struct resource res;
465
466	if (of_address_to_resource(dn, 0, &res)) {
467		pr_devel("axon_msi: couldn't get reg property\n");
468		return;
469	}
470
471	msic->trigger = ioremap(res.start, 0x4);
472	if (!msic->trigger) {
473		pr_devel("axon_msi: ioremap failed\n");
474		return;
475	}
476
477	snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
478
479	debugfs_create_file(name, 0600, arch_debugfs_dir, msic, &fops_msic);
480}
481#endif /* DEBUG */
482