1/*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2020 Ruslan Bukin <br@bsdpad.com>
5 *
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory (Department of Computer Science and
8 * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
9 * DARPA SSITH research programme.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include "opt_platform.h"
34
35#include <sys/cdefs.h>
36__FBSDID("$FreeBSD$");
37
38#include <sys/param.h>
39#include <sys/systm.h>
40#include <sys/malloc.h>
41#include <sys/types.h>
42#include <sys/sysctl.h>
43#include <sys/kernel.h>
44#include <sys/rman.h>
45#include <sys/module.h>
46#include <sys/bus.h>
47#include <sys/endian.h>
48#include <sys/cpuset.h>
49#include <sys/mutex.h>
50#include <sys/proc.h>
51
52#include <machine/intr.h>
53#include <machine/bus.h>
54
55#include <vm/vm.h>
56#include <vm/vm_extern.h>
57#include <vm/vm_kern.h>
58#include <vm/pmap.h>
59
60#include <dev/ofw/openfirm.h>
61#include <dev/ofw/ofw_bus.h>
62#include <dev/ofw/ofw_bus_subr.h>
63
64#include <dev/pci/pcireg.h>
65#include <dev/pci/pcivar.h>
66#include <dev/pci/pci_host_generic.h>
67#include <dev/pci/pci_host_generic_fdt.h>
68#include <dev/pci/pcib_private.h>
69
70#include "xlnx_pcib.h"
71
72#include "ofw_bus_if.h"
73#include "msi_if.h"
74#include "pcib_if.h"
75#include "pic_if.h"
76
77#define	XLNX_PCIB_MAX_MSI	64
78
79static int xlnx_pcib_fdt_attach(device_t);
80static int xlnx_pcib_fdt_probe(device_t);
81static int xlnx_pcib_fdt_get_id(device_t, device_t, enum pci_id_type,
82    uintptr_t *);
83static void xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc,
84    bool mask);
85
86struct xlnx_pcib_softc {
87	struct generic_pcie_fdt_softc	fdt_sc;
88	struct resource			*res[4];
89	struct mtx			mtx;
90	vm_offset_t			msi_page;
91	struct xlnx_pcib_irqsrc		*isrcs;
92	device_t			dev;
93	void				*intr_cookie[3];
94};
95
96static struct resource_spec xlnx_pcib_spec[] = {
97	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
98	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
99	{ SYS_RES_IRQ,		1,	RF_ACTIVE },
100	{ SYS_RES_IRQ,		2,	RF_ACTIVE },
101	{ -1, 0 }
102};
103
104struct xlnx_pcib_irqsrc {
105	struct intr_irqsrc	isrc;
106	u_int			irq;
107#define	XLNX_IRQ_FLAG_USED	(1 << 0)
108	u_int			flags;
109};
110
111static void
112xlnx_pcib_clear_err_interrupts(struct generic_pcie_core_softc *sc)
113{
114	uint32_t reg;
115
116	reg = bus_read_4(sc->res, XLNX_PCIE_RPERRFRR);
117
118	if (reg & RPERRFRR_VALID) {
119		device_printf(sc->dev, "Requested ID: %x\n",
120		    reg & RPERRFRR_REQ_ID_M);
121		bus_write_4(sc->res, XLNX_PCIE_RPERRFRR, ~0U);
122	}
123}
124
125static int
126xlnx_pcib_intr(void *arg)
127{
128	struct generic_pcie_fdt_softc *fdt_sc;
129	struct generic_pcie_core_softc *sc;
130	struct xlnx_pcib_softc *xlnx_sc;
131	uint32_t val, mask, status;
132
133	xlnx_sc = arg;
134	fdt_sc = &xlnx_sc->fdt_sc;
135	sc = &fdt_sc->base;
136
137	val = bus_read_4(sc->res, XLNX_PCIE_IDR);
138	mask = bus_read_4(sc->res, XLNX_PCIE_IMR);
139
140	status = val & mask;
141	if (!status)
142		return (FILTER_HANDLED);
143
144	if (status & IMR_LINK_DOWN)
145		device_printf(sc->dev, "Link down");
146
147	if (status & IMR_HOT_RESET)
148		device_printf(sc->dev, "Hot reset");
149
150	if (status & IMR_CORRECTABLE)
151		xlnx_pcib_clear_err_interrupts(sc);
152
153	if (status & IMR_FATAL)
154		xlnx_pcib_clear_err_interrupts(sc);
155
156	if (status & IMR_NON_FATAL)
157		xlnx_pcib_clear_err_interrupts(sc);
158
159	if (status & IMR_MSI) {
160		device_printf(sc->dev, "MSI interrupt");
161
162		/* FIFO mode MSI not implemented. */
163	}
164
165	if (status & IMR_INTX) {
166		device_printf(sc->dev, "INTx received");
167
168		/* Not implemented. */
169	}
170
171	if (status & IMR_SLAVE_UNSUPP_REQ)
172		device_printf(sc->dev, "Slave unsupported request");
173
174	if (status & IMR_SLAVE_UNEXP_COMPL)
175		device_printf(sc->dev, "Slave unexpected completion");
176
177	if (status & IMR_SLAVE_COMPL_TIMOUT)
178		device_printf(sc->dev, "Slave completion timeout");
179
180	if (status & IMR_SLAVE_ERROR_POISON)
181		device_printf(sc->dev, "Slave error poison");
182
183	if (status & IMR_SLAVE_COMPL_ABORT)
184		device_printf(sc->dev, "Slave completion abort");
185
186	if (status & IMR_SLAVE_ILLEG_BURST)
187		device_printf(sc->dev, "Slave illegal burst");
188
189	if (status & IMR_MASTER_DECERR)
190		device_printf(sc->dev, "Master decode error");
191
192	if (status & IMR_MASTER_SLVERR)
193		device_printf(sc->dev, "Master slave error");
194
195	bus_write_4(sc->res, XLNX_PCIE_IDR, val);
196
197	return (FILTER_HANDLED);
198}
199
200static void
201xlnx_pcib_handle_msi_intr(void *arg, int msireg)
202{
203	struct generic_pcie_fdt_softc *fdt_sc;
204	struct generic_pcie_core_softc *sc;
205	struct xlnx_pcib_softc *xlnx_sc;
206	struct xlnx_pcib_irqsrc *xi;
207	struct trapframe *tf;
208	int irq;
209	int reg;
210	int i;
211
212	xlnx_sc = arg;
213	fdt_sc = &xlnx_sc->fdt_sc;
214	sc = &fdt_sc->base;
215	tf = curthread->td_intr_frame;
216
217	do {
218		reg = bus_read_4(sc->res, msireg);
219
220		for (i = 0; i < sizeof(uint32_t) * 8; i++) {
221			if (reg & (1 << i)) {
222				bus_write_4(sc->res, msireg, (1 << i));
223
224				irq = i;
225				if (msireg == XLNX_PCIE_RPMSIID2)
226					irq += 32;
227
228				xi = &xlnx_sc->isrcs[irq];
229				if (intr_isrc_dispatch(&xi->isrc, tf) != 0) {
230					/* Disable stray. */
231					xlnx_pcib_msi_mask(sc->dev,
232					    &xi->isrc, 1);
233					device_printf(sc->dev,
234					    "Stray irq %u disabled\n", irq);
235				}
236			}
237		}
238	} while (reg != 0);
239}
240
241static int
242xlnx_pcib_msi0_intr(void *arg)
243{
244
245	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID1);
246
247	return (FILTER_HANDLED);
248}
249
250static int
251xlnx_pcib_msi1_intr(void *arg)
252{
253
254	xlnx_pcib_handle_msi_intr(arg, XLNX_PCIE_RPMSIID2);
255
256	return (FILTER_HANDLED);
257}
258
259static int
260xlnx_pcib_register_msi(struct xlnx_pcib_softc *sc)
261{
262	const char *name;
263	int error;
264	int irq;
265
266	sc->isrcs = malloc(sizeof(*sc->isrcs) * XLNX_PCIB_MAX_MSI, M_DEVBUF,
267	    M_WAITOK | M_ZERO);
268
269	name = device_get_nameunit(sc->dev);
270
271	for (irq = 0; irq < XLNX_PCIB_MAX_MSI; irq++) {
272		sc->isrcs[irq].irq = irq;
273		error = intr_isrc_register(&sc->isrcs[irq].isrc,
274		    sc->dev, 0, "%s,%u", name, irq);
275		if (error != 0)
276			return (error); /* XXX deregister ISRCs */
277	}
278
279	if (intr_msi_register(sc->dev,
280	    OF_xref_from_node(ofw_bus_get_node(sc->dev))) != 0)
281		return (ENXIO);
282
283	return (0);
284}
285
286static void
287xlnx_pcib_init(struct xlnx_pcib_softc *sc)
288{
289	bus_addr_t addr;
290	int reg;
291
292	/* Disable interrupts. */
293	bus_write_4(sc->res[0], XLNX_PCIE_IMR, 0);
294
295	/* Clear pending interrupts.*/
296	reg = bus_read_4(sc->res[0], XLNX_PCIE_IDR);
297	bus_write_4(sc->res[0], XLNX_PCIE_IDR, reg);
298
299	/* Setup an MSI page. */
300	sc->msi_page = kmem_alloc_contig(PAGE_SIZE, M_WAITOK, 0,
301	    BUS_SPACE_MAXADDR, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
302	addr = vtophys(sc->msi_page);
303	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR1, (addr >> 32));
304	bus_write_4(sc->res[0], XLNX_PCIE_RPMSIBR2, (addr >>  0));
305
306	/* Enable the bridge. */
307	reg = bus_read_4(sc->res[0], XLNX_PCIE_RPSCR);
308	reg |= RPSCR_BE;
309	bus_write_4(sc->res[0], XLNX_PCIE_RPSCR, reg);
310
311	/* Enable interrupts. */
312	reg = IMR_LINK_DOWN
313		| IMR_HOT_RESET
314		| IMR_CFG_COMPL_STATUS_M
315		| IMR_CFG_TIMEOUT
316		| IMR_CORRECTABLE
317		| IMR_NON_FATAL
318		| IMR_FATAL
319		| IMR_INTX
320		| IMR_MSI
321		| IMR_SLAVE_UNSUPP_REQ
322		| IMR_SLAVE_UNEXP_COMPL
323		| IMR_SLAVE_COMPL_TIMOUT
324		| IMR_SLAVE_ERROR_POISON
325		| IMR_SLAVE_COMPL_ABORT
326		| IMR_SLAVE_ILLEG_BURST
327		| IMR_MASTER_DECERR
328		| IMR_MASTER_SLVERR;
329	bus_write_4(sc->res[0], XLNX_PCIE_IMR, reg);
330}
331
332static int
333xlnx_pcib_fdt_probe(device_t dev)
334{
335
336	if (!ofw_bus_status_okay(dev))
337		return (ENXIO);
338
339	if (ofw_bus_is_compatible(dev, "xlnx,xdma-host-3.00")) {
340		device_set_desc(dev, "Xilinx XDMA PCIe Controller");
341		return (BUS_PROBE_DEFAULT);
342	}
343
344	return (ENXIO);
345}
346
347static int
348xlnx_pcib_fdt_attach(device_t dev)
349{
350	struct xlnx_pcib_softc *sc;
351	int error;
352
353	sc = device_get_softc(dev);
354	sc->dev = dev;
355
356	mtx_init(&sc->mtx, "msi_mtx", NULL, MTX_DEF);
357
358	if (bus_alloc_resources(dev, xlnx_pcib_spec, sc->res)) {
359		device_printf(dev, "could not allocate resources\n");
360		return (ENXIO);
361	}
362
363	/* Setup MISC interrupt handler. */
364	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
365	    xlnx_pcib_intr, NULL, sc, &sc->intr_cookie[0]);
366	if (error != 0) {
367		device_printf(dev, "could not setup interrupt handler.\n");
368		return (ENXIO);
369	}
370
371	/* Setup MSI0 interrupt handler. */
372	error = bus_setup_intr(dev, sc->res[2], INTR_TYPE_MISC | INTR_MPSAFE,
373	    xlnx_pcib_msi0_intr, NULL, sc, &sc->intr_cookie[1]);
374	if (error != 0) {
375		device_printf(dev, "could not setup interrupt handler.\n");
376		return (ENXIO);
377	}
378
379	/* Setup MSI1 interrupt handler. */
380	error = bus_setup_intr(dev, sc->res[3], INTR_TYPE_MISC | INTR_MPSAFE,
381	    xlnx_pcib_msi1_intr, NULL, sc, &sc->intr_cookie[2]);
382	if (error != 0) {
383		device_printf(dev, "could not setup interrupt handler.\n");
384		return (ENXIO);
385	}
386
387	xlnx_pcib_init(sc);
388
389	/*
390	 * Allow the core driver to map registers.
391	 * We will be accessing the device memory using core_softc.
392	 */
393	bus_release_resources(dev, xlnx_pcib_spec, sc->res);
394
395	error = xlnx_pcib_register_msi(sc);
396	if (error)
397		return (error);
398
399	return (pci_host_generic_attach(dev));
400}
401
402static int
403xlnx_pcib_fdt_get_id(device_t pci, device_t child, enum pci_id_type type,
404    uintptr_t *id)
405{
406	phandle_t node;
407	int bsf;
408
409	if (type != PCI_ID_MSI)
410		return (pcib_get_id(pci, child, type, id));
411
412	node = ofw_bus_get_node(pci);
413	if (OF_hasprop(node, "msi-map"))
414		return (generic_pcie_get_id(pci, child, type, id));
415
416	bsf = pci_get_rid(child);
417	*id = (pci_get_domain(child) << PCI_RID_DOMAIN_SHIFT) | bsf;
418
419	return (0);
420}
421
422static int
423xlnx_pcib_req_valid(struct generic_pcie_core_softc *sc,
424    u_int bus, u_int slot, u_int func, u_int reg)
425{
426	bus_space_handle_t h;
427	bus_space_tag_t t;
428	uint32_t val;
429
430	t = sc->bst;
431	h = sc->bsh;
432
433	if ((bus < sc->bus_start) || (bus > sc->bus_end))
434		return (0);
435	if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) ||
436	    (reg > PCIE_REGMAX))
437		return (0);
438
439	if (bus == 0 && slot > 0)
440		return (0);
441
442	val = bus_space_read_4(t, h, XLNX_PCIE_PHYSCR);
443	if ((val & PHYSCR_LINK_UP) == 0) {
444		/* Link is down */
445		return (0);
446	}
447
448	/* Valid */
449
450	return (1);
451}
452
453static uint32_t
454xlnx_pcib_read_config(device_t dev, u_int bus, u_int slot,
455    u_int func, u_int reg, int bytes)
456{
457	struct generic_pcie_fdt_softc *fdt_sc;
458	struct xlnx_pcib_softc *xlnx_sc;
459	struct generic_pcie_core_softc *sc;
460	bus_space_handle_t h;
461	bus_space_tag_t t;
462	uint64_t offset;
463	uint32_t data;
464
465	xlnx_sc = device_get_softc(dev);
466	fdt_sc = &xlnx_sc->fdt_sc;
467	sc = &fdt_sc->base;
468
469	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
470		return (~0U);
471
472	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
473	t = sc->bst;
474	h = sc->bsh;
475
476	data = bus_space_read_4(t, h, offset & ~3);
477
478	switch (bytes) {
479	case 1:
480		data >>= (offset & 3) * 8;
481		data &= 0xff;
482		break;
483	case 2:
484		data >>= (offset & 3) * 8;
485		data = le16toh(data);
486		break;
487	case 4:
488		data = le32toh(data);
489		break;
490	default:
491		return (~0U);
492	}
493
494	return (data);
495}
496
497static void
498xlnx_pcib_write_config(device_t dev, u_int bus, u_int slot,
499    u_int func, u_int reg, uint32_t val, int bytes)
500{
501	struct generic_pcie_fdt_softc *fdt_sc;
502	struct xlnx_pcib_softc *xlnx_sc;
503	struct generic_pcie_core_softc *sc;
504	bus_space_handle_t h;
505	bus_space_tag_t t;
506	uint64_t offset;
507	uint32_t data;
508
509	xlnx_sc = device_get_softc(dev);
510	fdt_sc = &xlnx_sc->fdt_sc;
511	sc = &fdt_sc->base;
512
513	if (!xlnx_pcib_req_valid(sc, bus, slot, func, reg))
514		return;
515
516	offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg);
517
518	t = sc->bst;
519	h = sc->bsh;
520
521	/*
522	 * 32-bit access used due to a bug in the Xilinx bridge that
523	 * requires to write primary and secondary buses in one blast.
524	 *
525	 * TODO: This is probably wrong on big-endian.
526	 */
527	switch (bytes) {
528	case 1:
529		data = bus_space_read_4(t, h, offset & ~3);
530		data &= ~(0xff << ((offset & 3) * 8));
531		data |= (val & 0xff) << ((offset & 3) * 8);
532		bus_space_write_4(t, h, offset & ~3, htole32(data));
533		break;
534	case 2:
535		data = bus_space_read_4(t, h, offset & ~3);
536		data &= ~(0xffff << ((offset & 3) * 8));
537		data |= (val & 0xffff) << ((offset & 3) * 8);
538		bus_space_write_4(t, h, offset & ~3, htole32(data));
539		break;
540	case 4:
541		bus_space_write_4(t, h, offset, htole32(val));
542		break;
543	default:
544		return;
545	}
546}
547
548static int
549xlnx_pcib_alloc_msi(device_t pci, device_t child, int count, int maxcount,
550    int *irqs)
551{
552	phandle_t msi_parent;
553
554	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
555	    NULL);
556	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
557	return (intr_alloc_msi(pci, child, msi_parent, count, maxcount,
558	    irqs));
559}
560
561static int
562xlnx_pcib_release_msi(device_t pci, device_t child, int count, int *irqs)
563{
564	phandle_t msi_parent;
565
566	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
567	    NULL);
568	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
569	return (intr_release_msi(pci, child, msi_parent, count, irqs));
570}
571
572static int
573xlnx_pcib_map_msi(device_t pci, device_t child, int irq, uint64_t *addr,
574    uint32_t *data)
575{
576	phandle_t msi_parent;
577
578	ofw_bus_msimap(ofw_bus_get_node(pci), pci_get_rid(child), &msi_parent,
579	    NULL);
580	msi_parent = OF_xref_from_node(ofw_bus_get_node(pci));
581	return (intr_map_msi(pci, child, msi_parent, irq, addr, data));
582}
583
584static int
585xlnx_pcib_msi_alloc_msi(device_t dev, device_t child, int count, int maxcount,
586    device_t *pic, struct intr_irqsrc **srcs)
587{
588	struct xlnx_pcib_softc *sc;
589	int irq, end_irq, i;
590	bool found;
591
592	sc = device_get_softc(dev);
593
594	mtx_lock(&sc->mtx);
595
596	found = false;
597
598	for (irq = 0; (irq + count - 1) < XLNX_PCIB_MAX_MSI; irq++) {
599		/* Assume the range is valid. */
600		found = true;
601
602		/* Check this range is valid. */
603		for (end_irq = irq; end_irq < irq + count; end_irq++) {
604			if (sc->isrcs[end_irq].flags & XLNX_IRQ_FLAG_USED) {
605				/* This is already used. */
606				found = false;
607				break;
608			}
609		}
610
611		if (found)
612			break;
613	}
614
615	if (!found || irq == (XLNX_PCIB_MAX_MSI - 1)) {
616		/* Not enough interrupts were found. */
617		mtx_unlock(&sc->mtx);
618		return (ENXIO);
619	}
620
621	/* Mark the interrupt as used. */
622	for (i = 0; i < count; i++)
623		sc->isrcs[irq + i].flags |= XLNX_IRQ_FLAG_USED;
624
625	mtx_unlock(&sc->mtx);
626
627	for (i = 0; i < count; i++)
628		srcs[i] = (struct intr_irqsrc *)&sc->isrcs[irq + i];
629
630	*pic = device_get_parent(dev);
631
632	return (0);
633}
634
635static int
636xlnx_pcib_msi_release_msi(device_t dev, device_t child, int count,
637    struct intr_irqsrc **isrc)
638{
639	struct xlnx_pcib_softc *sc;
640	struct xlnx_pcib_irqsrc *xi;
641	int i;
642
643	sc = device_get_softc(dev);
644	mtx_lock(&sc->mtx);
645	for (i = 0; i < count; i++) {
646		xi = (struct xlnx_pcib_irqsrc *)isrc[i];
647
648		KASSERT(xi->flags & XLNX_IRQ_FLAG_USED,
649		    ("%s: Releasing an unused MSI interrupt", __func__));
650
651		xi->flags &= ~XLNX_IRQ_FLAG_USED;
652	}
653
654	mtx_unlock(&sc->mtx);
655	return (0);
656}
657
658static int
659xlnx_pcib_msi_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
660    uint64_t *addr, uint32_t *data)
661{
662	struct xlnx_pcib_softc *sc;
663	struct xlnx_pcib_irqsrc *xi;
664
665	sc = device_get_softc(dev);
666	xi = (struct xlnx_pcib_irqsrc *)isrc;
667
668	*addr = vtophys(sc->msi_page);
669	*data = xi->irq;
670
671	return (0);
672}
673
674static void
675xlnx_pcib_msi_mask(device_t dev, struct intr_irqsrc *isrc, bool mask)
676{
677	struct generic_pcie_fdt_softc *fdt_sc;
678	struct generic_pcie_core_softc *sc;
679	struct xlnx_pcib_softc *xlnx_sc;
680	struct xlnx_pcib_irqsrc *xi;
681	uint32_t msireg, irq;
682	uint32_t reg;
683
684	xlnx_sc = device_get_softc(dev);
685	fdt_sc = &xlnx_sc->fdt_sc;
686	sc = &fdt_sc->base;
687
688	xi = (struct xlnx_pcib_irqsrc *)isrc;
689
690	irq = xi->irq;
691	if (irq < 32)
692		msireg = XLNX_PCIE_RPMSIID1_MASK;
693	else
694		msireg = XLNX_PCIE_RPMSIID2_MASK;
695
696	reg = bus_read_4(sc->res, msireg);
697	if (mask)
698		reg &= ~(1 << irq);
699	else
700		reg |= (1 << irq);
701	bus_write_4(sc->res, msireg, reg);
702}
703
704static void
705xlnx_pcib_msi_disable_intr(device_t dev, struct intr_irqsrc *isrc)
706{
707
708	xlnx_pcib_msi_mask(dev, isrc, true);
709}
710
711static void
712xlnx_pcib_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
713{
714
715	xlnx_pcib_msi_mask(dev, isrc, false);
716}
717
718static void
719xlnx_pcib_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
720{
721
722}
723
724static void
725xlnx_pcib_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
726{
727
728	xlnx_pcib_msi_mask(dev, isrc, false);
729}
730
731static void
732xlnx_pcib_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
733{
734
735	xlnx_pcib_msi_mask(dev, isrc, true);
736}
737
738static int
739xlnx_pcib_msi_setup_intr(device_t dev, struct intr_irqsrc *isrc,
740    struct resource *res, struct intr_map_data *data)
741{
742
743	return (0);
744}
745
746static int
747xlnx_pcib_msi_teardown_intr(device_t dev, struct intr_irqsrc *isrc,
748    struct resource *res, struct intr_map_data *data)
749{
750
751	return (0);
752}
753
754static device_method_t xlnx_pcib_fdt_methods[] = {
755	/* Device interface */
756	DEVMETHOD(device_probe,		xlnx_pcib_fdt_probe),
757	DEVMETHOD(device_attach,	xlnx_pcib_fdt_attach),
758
759	/* pcib interface */
760	DEVMETHOD(pcib_get_id,		xlnx_pcib_fdt_get_id),
761	DEVMETHOD(pcib_read_config,	xlnx_pcib_read_config),
762	DEVMETHOD(pcib_write_config,	xlnx_pcib_write_config),
763	DEVMETHOD(pcib_alloc_msi,	xlnx_pcib_alloc_msi),
764	DEVMETHOD(pcib_release_msi,	xlnx_pcib_release_msi),
765	DEVMETHOD(pcib_map_msi,		xlnx_pcib_map_msi),
766
767	/* MSI interface */
768	DEVMETHOD(msi_alloc_msi,		xlnx_pcib_msi_alloc_msi),
769	DEVMETHOD(msi_release_msi,		xlnx_pcib_msi_release_msi),
770	DEVMETHOD(msi_map_msi,			xlnx_pcib_msi_map_msi),
771
772	/* Interrupt controller interface */
773	DEVMETHOD(pic_disable_intr,		xlnx_pcib_msi_disable_intr),
774	DEVMETHOD(pic_enable_intr,		xlnx_pcib_msi_enable_intr),
775	DEVMETHOD(pic_setup_intr,		xlnx_pcib_msi_setup_intr),
776	DEVMETHOD(pic_teardown_intr,		xlnx_pcib_msi_teardown_intr),
777	DEVMETHOD(pic_post_filter,		xlnx_pcib_msi_post_filter),
778	DEVMETHOD(pic_post_ithread,		xlnx_pcib_msi_post_ithread),
779	DEVMETHOD(pic_pre_ithread,		xlnx_pcib_msi_pre_ithread),
780
781	/* End */
782	DEVMETHOD_END
783};
784
785DEFINE_CLASS_1(pcib, xlnx_pcib_fdt_driver, xlnx_pcib_fdt_methods,
786    sizeof(struct xlnx_pcib_softc), generic_pcie_fdt_driver);
787
788static devclass_t xlnx_pcib_fdt_devclass;
789
790DRIVER_MODULE(xlnx_pcib, simplebus, xlnx_pcib_fdt_driver,
791    xlnx_pcib_fdt_devclass, 0, 0);
792DRIVER_MODULE(xlnx_pcib, ofwbus, xlnx_pcib_fdt_driver,
793    xlnx_pcib_fdt_devclass, 0, 0);
794